source: trunk/packages/xen-3.1/xen-3.1/tools/vnet/vnetd/skbuff.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 12.9 KB
Line 
1/*
2 *      Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 *      Authors:        Alan Cox <iiitac@pyr.swan.ac.uk>
5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 *      Fixes:
8 *              Alan Cox        :       Fixed the worst of the load
9 *                                      balancer bugs.
10 *              Dave Platt      :       Interrupt stacking fix.
11 *      Richard Kooijman        :       Timestamp fixes.
12 *              Alan Cox        :       Changed buffer format.
13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
14 *              Linus Torvalds  :       Better skb_clone.
15 *              Alan Cox        :       Added skb_copy.
16 *              Alan Cox        :       Added all the changed routines Linus
17 *                                      only put in the headers
18 *              Ray VanTassle   :       Fixed --skb->lock in free
19 *              Alan Cox        :       skb_copy copy arp field
20 *              Andi Kleen      :       slabified it.
21 *              Robert Olsson   :       Removed skb_head_pool
22 *
23 *      This program is free software; you can redistribute it and/or
24 *      modify it under the terms of the GNU General Public License
25 *      as published by the Free Software Foundation; either version
26 *      2 of the License, or (at your option) any later version.
27 */
28
29#include <stddef.h>
30#include <stdlib.h>
31#include <string.h>
32#include <errno.h>
33
34#include "allocate.h"
35#include "debug.h"
36#include "skbuff.h"
37
38#define SKB_DATA_ALIGN(size) ((((size) + 7) >> 3) << 3)
39
40/**
41 *      skb_over_panic  -       private function
42 *      @skb: buffer
43 *      @sz: size
44 *      @here: address
45 *
46 *      Out of line support code for skb_put(). Not user callable.
47 */
48void skb_over_panic(struct sk_buff *skb, int sz, void *here)
49{
50        eprintf("skput:over: %p:%d put:%d\n", here, skb->len, sz);
51        BUG();
52}
53
54/**
55 *      skb_under_panic -       private function
56 *      @skb: buffer
57 *      @sz: size
58 *      @here: address
59 *
60 *      Out of line support code for skb_push(). Not user callable.
61 */
62
63void skb_under_panic(struct sk_buff *skb, int sz, void *here)
64{
65        eprintf("skput:under: %p:%d put:%d\n", here, skb->len, sz);
66        BUG();
67}
68
69/**
70 *      alloc_skb       -       allocate a network buffer
71 *      @size: size to allocate
72 *      @gfp_mask: allocation mask
73 *
74 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
75 *      tail room of size bytes. The object has a reference count of one.
76 *      The return is the buffer. On a failure the return is %NULL.
77 */
78struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
79{
80        struct sk_buff *skb;
81        u8 *data;
82
83        /* Get the HEAD */
84        skb = ALLOCATE(struct sk_buff);
85        if (!skb)
86                goto out;
87
88        /* Get the DATA. Size must match skb_add_mtu(). */
89        size = SKB_DATA_ALIGN(size);
90        data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
91        if (!data)
92                goto nodata;
93
94        memset(skb, 0, offsetof(struct sk_buff, truesize));
95        skb->truesize = size + sizeof(struct sk_buff);
96        atomic_set(&skb->users, 1);
97        skb->head = data;
98        skb->data = data;
99        skb->tail = data;
100        skb->end  = data + size;
101        skb->list = NULL;
102
103        atomic_set(&(skb_shinfo(skb)->dataref), 1);
104        skb_shinfo(skb)->nr_frags  = 0;
105        skb_shinfo(skb)->tso_size = 0;
106        skb_shinfo(skb)->tso_segs = 0;
107        skb_shinfo(skb)->frag_list = NULL;
108out:
109        return skb;
110nodata:
111        kfree(skb);
112        skb = NULL;
113        goto out;
114}
115
116
117void skb_release_data(struct sk_buff *skb)
118{
119        kfree(skb->head);
120}
121
122/*
123 *      Free an skbuff by memory without cleaning the state.
124 */
125void kfree_skbmem(struct sk_buff *skb)
126{
127        skb_release_data(skb);
128        kfree(skb);
129}
130
131/**
132 *      __kfree_skb - private function
133 *      @skb: buffer
134 *
135 *      Free an sk_buff. Release anything attached to the buffer.
136 *      Clean the state. This is an internal helper function. Users should
137 *      always call kfree_skb
138 */
139
140void __kfree_skb(struct sk_buff *skb)
141{
142        if (skb->list) {
143                wprintf("Warning: kfree_skb passed an skb still "
144                        "on a list.\n");
145                //BUG();
146        }
147
148        if(skb->destructor) {
149                skb->destructor(skb);
150        }
151        kfree_skbmem(skb);
152}
153
154
155/**
156 *      skb_clone       -       duplicate an sk_buff
157 *      @skb: buffer to clone
158 *      @gfp_mask: allocation priority
159 *
160 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
161 *      copies share the same packet data but not structure. The new
162 *      buffer has a reference count of 1. If the allocation fails the
163 *      function returns %NULL otherwise the new buffer is returned.
164 *
165 *      If this function is called from an interrupt gfp_mask() must be
166 *      %GFP_ATOMIC.
167 */
168
169struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
170{
171    return pskb_copy(skb, gfp_mask);
172}
173
174static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
175{
176        /*
177         *      Shift between the two data areas in bytes
178         */
179        unsigned long offset = new->data - old->data;
180
181        new->list       = NULL;
182        new->protocol   = old->protocol;
183        new->h.raw      = old->h.raw + offset;
184        new->nh.raw     = old->nh.raw + offset;
185        new->mac.raw    = old->mac.raw + offset;
186        new->pkt_type   = old->pkt_type;
187        new->destructor = NULL;
188        atomic_set(&new->users, 1);
189}
190
191
192/**
193 *      pskb_expand_head - reallocate header of &sk_buff
194 *      @skb: buffer to reallocate
195 *      @nhead: room to add at head
196 *      @ntail: room to add at tail
197 *      @gfp_mask: allocation priority
198 *
199 *      Expands (or creates identical copy, if &nhead and &ntail are zero)
200 *      header of skb. &sk_buff itself is not changed. &sk_buff MUST have
201 *      reference count of 1. Returns zero in the case of success or error,
202 *      if expansion failed. In the last case, &sk_buff is not changed.
203 *
204 *      All the pointers pointing into skb header may change and must be
205 *      reloaded after call to this function.
206 */
207
208int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
209{
210        u8 *data;
211        int size = nhead + (skb->end - skb->head) + ntail;
212        long off;
213
214        if (skb_shared(skb))
215                BUG();
216
217        size = SKB_DATA_ALIGN(size);
218
219        data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
220        if (!data)
221                goto nodata;
222
223        /* Copy only real data... and, alas, header. This should be
224         * optimized for the cases when header is void. */
225        memcpy(data + nhead, skb->head, skb->tail - skb->head);
226        memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
227
228        skb_release_data(skb);
229
230        off = (data + nhead) - skb->head;
231
232        skb->head     = data;
233        skb->end      = data + size;
234        skb->data    += off;
235        skb->tail    += off;
236        skb->mac.raw += off;
237        skb->h.raw   += off;
238        skb->nh.raw  += off;
239        return 0;
240
241nodata:
242        return -ENOMEM;
243}
244
245struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
246{
247        /*
248         *      Allocate the copy buffer
249         */
250        struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
251
252        if (!n)
253                goto out;
254
255        /* Set the data pointer */
256        skb_reserve(n, skb->data - skb->head);
257        /* Set the tail pointer and length */
258        skb_put(n, skb_headlen(skb));
259        /* Copy the bytes */
260        memcpy(n->data, skb->data, n->len);
261
262        n->data_len  = skb->data_len;
263        n->len       = skb->len;
264
265        copy_skb_header(n, skb);
266out:
267        return n;
268}
269
270/* Make private copy of skb with writable head and some headroom */
271
272struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
273{
274        struct sk_buff *skb2;
275        int delta = headroom - skb_headroom(skb);
276
277        if (delta <= 0)
278                skb2 = pskb_copy(skb, GFP_ATOMIC);
279        else {
280            skb2 = skb_copy_expand(skb, headroom, 0, GFP_ATOMIC);
281        }
282        return skb2;
283}
284
285
286/**
287 *      skb_copy_expand -       copy and expand sk_buff
288 *      @skb: buffer to copy
289 *      @newheadroom: new free bytes at head
290 *      @newtailroom: new free bytes at tail
291 *      @gfp_mask: allocation priority
292 *
293 *      Make a copy of both an &sk_buff and its data and while doing so
294 *      allocate additional space.
295 *
296 *      This is used when the caller wishes to modify the data and needs a
297 *      private copy of the data to alter as well as more space for new fields.
298 *      Returns %NULL on failure or the pointer to the buffer
299 *      on success. The returned buffer has a reference count of 1.
300 *
301 *      You must pass %GFP_ATOMIC as the allocation priority if this function
302 *      is called from an interrupt.
303 *
304 *      BUG ALERT: ip_summed is not copied. Why does this work? Is it used
305 *      only by netfilter in the cases when checksum is recalculated? --ANK
306 */
307struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
308                                int newheadroom, int newtailroom, int gfp_mask)
309{
310        /*
311         *      Allocate the copy buffer
312         */
313        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
314                                      gfp_mask);
315        int head_copy_len, head_copy_off;
316
317        if (!n)
318                return NULL;
319
320        skb_reserve(n, newheadroom);
321
322        /* Set the tail pointer and length */
323        skb_put(n, skb->len);
324
325        head_copy_len = skb_headroom(skb);
326        head_copy_off = 0;
327        if (newheadroom <= head_copy_len)
328                head_copy_len = newheadroom;
329        else
330                head_copy_off = newheadroom - head_copy_len;
331
332        /* Copy the linear header and data. */
333        if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
334                          skb->len + head_copy_len))
335                BUG();
336
337        copy_skb_header(n, skb);
338
339        return n;
340}
341
342
343/* Copy some data bits from skb to kernel buffer. */
344
345int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
346{
347        int copy;
348        int start = skb_headlen(skb);
349
350        if (offset > (int)skb->len - len)
351                goto fault;
352
353        /* Copy header. */
354        if ((copy = start - offset) > 0) {
355                if (copy > len)
356                        copy = len;
357                memcpy(to, skb->data + offset, copy);
358                if ((len -= copy) == 0)
359                        return 0;
360                offset += copy;
361                to     += copy;
362        }
363
364        if (!len)
365                return 0;
366
367fault:
368        return -EFAULT;
369}
370
371
372/**
373 *      skb_dequeue - remove from the head of the queue
374 *      @list: list to dequeue from
375 *
376 *      Remove the head of the list. The list lock is taken so the function
377 *      may be used safely with other locking list functions. The head item is
378 *      returned or %NULL if the list is empty.
379 */
380
381struct sk_buff *skb_dequeue(struct sk_buff_head *list)
382{
383        unsigned long flags;
384        struct sk_buff *result;
385
386        spin_lock_irqsave(&list->lock, flags);
387        result = __skb_dequeue(list);
388        spin_unlock_irqrestore(&list->lock, flags);
389        return result;
390}
391
392/**
393 *      skb_dequeue_tail - remove from the tail of the queue
394 *      @list: list to dequeue from
395 *
396 *      Remove the tail of the list. The list lock is taken so the function
397 *      may be used safely with other locking list functions. The tail item is
398 *      returned or %NULL if the list is empty.
399 */
400struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
401{
402        unsigned long flags;
403        struct sk_buff *result;
404
405        spin_lock_irqsave(&list->lock, flags);
406        result = __skb_dequeue_tail(list);
407        spin_unlock_irqrestore(&list->lock, flags);
408        return result;
409}
410
411/**
412 *      skb_queue_purge - empty a list
413 *      @list: list to empty
414 *
415 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
416 *      the list and one reference dropped. This function takes the list
417 *      lock and is atomic with respect to other list locking functions.
418 */
419void skb_queue_purge(struct sk_buff_head *list)
420{
421        struct sk_buff *skb;
422        while ((skb = skb_dequeue(list)) != NULL)
423                kfree_skb(skb);
424}
425
426/**
427 *      skb_queue_head - queue a buffer at the list head
428 *      @list: list to use
429 *      @newsk: buffer to queue
430 *
431 *      Queue a buffer at the start of the list. This function takes the
432 *      list lock and can be used safely with other locking &sk_buff functions
433 *      safely.
434 *
435 *      A buffer cannot be placed on two lists at the same time.
436 */
437void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
438{
439        unsigned long flags;
440
441        spin_lock_irqsave(&list->lock, flags);
442        __skb_queue_head(list, newsk);
443        spin_unlock_irqrestore(&list->lock, flags);
444}
445
446/**
447 *      skb_queue_tail - queue a buffer at the list tail
448 *      @list: list to use
449 *      @newsk: buffer to queue
450 *
451 *      Queue a buffer at the tail of the list. This function takes the
452 *      list lock and can be used safely with other locking &sk_buff functions
453 *      safely.
454 *
455 *      A buffer cannot be placed on two lists at the same time.
456 */
457void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
458{
459        unsigned long flags;
460
461        spin_lock_irqsave(&list->lock, flags);
462        __skb_queue_tail(list, newsk);
463        spin_unlock_irqrestore(&list->lock, flags);
464}
465/**
466 *      skb_unlink      -       remove a buffer from a list
467 *      @skb: buffer to remove
468 *
469 *      Place a packet after a given packet in a list. The list locks are taken
470 *      and this function is atomic with respect to other list locked calls
471 *
472 *      Works even without knowing the list it is sitting on, which can be
473 *      handy at times. It also means that THE LIST MUST EXIST when you
474 *      unlink. Thus a list must have its contents unlinked before it is
475 *      destroyed.
476 */
477void skb_unlink(struct sk_buff *skb)
478{
479        struct sk_buff_head *list = skb->list;
480
481        if (list) {
482                unsigned long flags;
483
484                spin_lock_irqsave(&list->lock, flags);
485                if (skb->list == list)
486                        __skb_unlink(skb, skb->list);
487                spin_unlock_irqrestore(&list->lock, flags);
488        }
489}
490
491
492/**
493 *      skb_append      -       append a buffer
494 *      @old: buffer to insert after
495 *      @newsk: buffer to insert
496 *
497 *      Place a packet after a given packet in a list. The list locks are taken
498 *      and this function is atomic with respect to other list locked calls.
499 *      A buffer cannot be placed on two lists at the same time.
500 */
501
502void skb_append(struct sk_buff *old, struct sk_buff *newsk)
503{
504        unsigned long flags;
505
506        spin_lock_irqsave(&old->list->lock, flags);
507        __skb_append(old, newsk);
508        spin_unlock_irqrestore(&old->list->lock, flags);
509}
510
511
512/**
513 *      skb_insert      -       insert a buffer
514 *      @old: buffer to insert before
515 *      @newsk: buffer to insert
516 *
517 *      Place a packet before a given packet in a list. The list locks are taken
518 *      and this function is atomic with respect to other list locked calls
519 *      A buffer cannot be placed on two lists at the same time.
520 */
521
522void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
523{
524        unsigned long flags;
525
526        spin_lock_irqsave(&old->list->lock, flags);
527        __skb_insert(newsk, old->prev, old, old->list);
528        spin_unlock_irqrestore(&old->list->lock, flags);
529}
530
Note: See TracBrowser for help on using the repository browser.