1 | /* |
---|
2 | * Definitions for the 'struct sk_buff' memory handlers. |
---|
3 | * |
---|
4 | * Authors: |
---|
5 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
---|
6 | * Florian La Roche, <rzsfl@rz.uni-sb.de> |
---|
7 | * |
---|
8 | * This program is free software; you can redistribute it and/or |
---|
9 | * modify it under the terms of the GNU General Public License |
---|
10 | * as published by the Free Software Foundation; either version |
---|
11 | * 2 of the License, or (at your option) any later version. |
---|
12 | */ |
---|
13 | |
---|
14 | #ifndef _LINUX_SKBUFF_H |
---|
15 | #define _LINUX_SKBUFF_H |
---|
16 | |
---|
17 | #include <linux/kernel.h> |
---|
18 | #include <linux/compiler.h> |
---|
19 | #include <linux/time.h> |
---|
20 | #include <linux/cache.h> |
---|
21 | |
---|
22 | #include <asm/atomic.h> |
---|
23 | #include <asm/types.h> |
---|
24 | #include <linux/spinlock.h> |
---|
25 | #include <linux/mm.h> |
---|
26 | #include <linux/highmem.h> |
---|
27 | #include <linux/poll.h> |
---|
28 | #include <linux/net.h> |
---|
29 | #include <linux/textsearch.h> |
---|
30 | #include <net/checksum.h> |
---|
31 | #include <linux/dmaengine.h> |
---|
32 | |
---|
33 | #define HAVE_ALLOC_SKB /* For the drivers to know */ |
---|
34 | #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ |
---|
35 | |
---|
36 | #define CHECKSUM_NONE 0 |
---|
37 | #define CHECKSUM_HW 1 |
---|
38 | #define CHECKSUM_UNNECESSARY 2 |
---|
39 | |
---|
40 | #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ |
---|
41 | ~(SMP_CACHE_BYTES - 1)) |
---|
42 | #define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ |
---|
43 | sizeof(struct skb_shared_info)) & \ |
---|
44 | ~(SMP_CACHE_BYTES - 1)) |
---|
45 | #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) |
---|
46 | #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) |
---|
47 | |
---|
48 | /* A. Checksumming of received packets by device. |
---|
49 | * |
---|
50 | * NONE: device failed to checksum this packet. |
---|
51 | * skb->csum is undefined. |
---|
52 | * |
---|
53 | * UNNECESSARY: device parsed packet and wouldbe verified checksum. |
---|
54 | * skb->csum is undefined. |
---|
55 | * It is bad option, but, unfortunately, many of vendors do this. |
---|
56 | * Apparently with secret goal to sell you new device, when you |
---|
57 | * will add new protocol to your host. F.e. IPv6. 8) |
---|
58 | * |
---|
59 | * HW: the most generic way. Device supplied checksum of _all_ |
---|
60 | * the packet as seen by netif_rx in skb->csum. |
---|
61 | * NOTE: Even if device supports only some protocols, but |
---|
62 | * is able to produce some skb->csum, it MUST use HW, |
---|
63 | * not UNNECESSARY. |
---|
64 | * |
---|
65 | * B. Checksumming on output. |
---|
66 | * |
---|
67 | * NONE: skb is checksummed by protocol or csum is not required. |
---|
68 | * |
---|
69 | * HW: device is required to csum packet as seen by hard_start_xmit |
---|
70 | * from skb->h.raw to the end and to record the checksum |
---|
71 | * at skb->h.raw+skb->csum. |
---|
72 | * |
---|
73 | * Device must show its capabilities in dev->features, set |
---|
74 | * at device setup time. |
---|
75 | * NETIF_F_HW_CSUM - it is clever device, it is able to checksum |
---|
76 | * everything. |
---|
77 | * NETIF_F_NO_CSUM - loopback or reliable single hop media. |
---|
78 | * NETIF_F_IP_CSUM - device is dumb. It is able to csum only |
---|
79 | * TCP/UDP over IPv4. Sigh. Vendors like this |
---|
80 | * way by an unknown reason. Though, see comment above |
---|
81 | * about CHECKSUM_UNNECESSARY. 8) |
---|
82 | * |
---|
83 | * Any questions? No questions, good. --ANK |
---|
84 | */ |
---|
85 | |
---|
86 | struct net_device; |
---|
87 | |
---|
88 | #ifdef CONFIG_NETFILTER |
---|
89 | struct nf_conntrack { |
---|
90 | atomic_t use; |
---|
91 | void (*destroy)(struct nf_conntrack *); |
---|
92 | }; |
---|
93 | |
---|
94 | #ifdef CONFIG_BRIDGE_NETFILTER |
---|
95 | struct nf_bridge_info { |
---|
96 | atomic_t use; |
---|
97 | struct net_device *physindev; |
---|
98 | struct net_device *physoutdev; |
---|
99 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
---|
100 | struct net_device *netoutdev; |
---|
101 | #endif |
---|
102 | unsigned int mask; |
---|
103 | unsigned long data[32 / sizeof(unsigned long)]; |
---|
104 | }; |
---|
105 | #endif |
---|
106 | |
---|
107 | #endif |
---|
108 | |
---|
109 | struct sk_buff_head { |
---|
110 | /* These two members must be first. */ |
---|
111 | struct sk_buff *next; |
---|
112 | struct sk_buff *prev; |
---|
113 | |
---|
114 | __u32 qlen; |
---|
115 | spinlock_t lock; |
---|
116 | }; |
---|
117 | |
---|
118 | struct sk_buff; |
---|
119 | |
---|
120 | /* To allow 64K frame to be packed as single skb without frag_list */ |
---|
121 | #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) |
---|
122 | |
---|
123 | typedef struct skb_frag_struct skb_frag_t; |
---|
124 | |
---|
125 | struct skb_frag_struct { |
---|
126 | struct page *page; |
---|
127 | __u16 page_offset; |
---|
128 | __u16 size; |
---|
129 | }; |
---|
130 | |
---|
131 | /* This data is invariant across clones and lives at |
---|
132 | * the end of the header data, ie. at skb->end. |
---|
133 | */ |
---|
134 | struct skb_shared_info { |
---|
135 | atomic_t dataref; |
---|
136 | unsigned short nr_frags; |
---|
137 | unsigned short gso_size; |
---|
138 | /* Warning: this field is not always filled in (UFO)! */ |
---|
139 | unsigned short gso_segs; |
---|
140 | unsigned short gso_type; |
---|
141 | unsigned int ip6_frag_id; |
---|
142 | struct sk_buff *frag_list; |
---|
143 | skb_frag_t frags[MAX_SKB_FRAGS]; |
---|
144 | }; |
---|
145 | |
---|
146 | /* We divide dataref into two halves. The higher 16 bits hold references |
---|
147 | * to the payload part of skb->data. The lower 16 bits hold references to |
---|
148 | * the entire skb->data. It is up to the users of the skb to agree on |
---|
149 | * where the payload starts. |
---|
150 | * |
---|
151 | * All users must obey the rule that the skb->data reference count must be |
---|
152 | * greater than or equal to the payload reference count. |
---|
153 | * |
---|
154 | * Holding a reference to the payload part means that the user does not |
---|
155 | * care about modifications to the header part of skb->data. |
---|
156 | */ |
---|
157 | #define SKB_DATAREF_SHIFT 16 |
---|
158 | #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) |
---|
159 | |
---|
160 | struct skb_timeval { |
---|
161 | u32 off_sec; |
---|
162 | u32 off_usec; |
---|
163 | }; |
---|
164 | |
---|
165 | |
---|
166 | enum { |
---|
167 | SKB_FCLONE_UNAVAILABLE, |
---|
168 | SKB_FCLONE_ORIG, |
---|
169 | SKB_FCLONE_CLONE, |
---|
170 | }; |
---|
171 | |
---|
172 | enum { |
---|
173 | SKB_GSO_TCPV4 = 1 << 0, |
---|
174 | SKB_GSO_UDP = 1 << 1, |
---|
175 | |
---|
176 | /* This indicates the skb is from an untrusted source. */ |
---|
177 | SKB_GSO_DODGY = 1 << 2, |
---|
178 | |
---|
179 | /* This indicates the tcp segment has CWR set. */ |
---|
180 | SKB_GSO_TCP_ECN = 1 << 3, |
---|
181 | |
---|
182 | SKB_GSO_TCPV6 = 1 << 4, |
---|
183 | }; |
---|
184 | |
---|
185 | /** |
---|
186 | * struct sk_buff - socket buffer |
---|
187 | * @next: Next buffer in list |
---|
188 | * @prev: Previous buffer in list |
---|
189 | * @sk: Socket we are owned by |
---|
190 | * @tstamp: Time we arrived |
---|
191 | * @dev: Device we arrived on/are leaving by |
---|
192 | * @input_dev: Device we arrived on |
---|
193 | * @h: Transport layer header |
---|
194 | * @nh: Network layer header |
---|
195 | * @mac: Link layer header |
---|
196 | * @dst: destination entry |
---|
197 | * @sp: the security path, used for xfrm |
---|
198 | * @cb: Control buffer. Free for use by every layer. Put private vars here |
---|
199 | * @len: Length of actual data |
---|
200 | * @data_len: Data length |
---|
201 | * @mac_len: Length of link layer header |
---|
202 | * @csum: Checksum |
---|
203 | * @local_df: allow local fragmentation |
---|
204 | * @cloned: Head may be cloned (check refcnt to be sure) |
---|
205 | * @nohdr: Payload reference only, must not modify header |
---|
206 | * @proto_data_valid: Protocol data validated since arriving at localhost |
---|
207 | * @proto_csum_blank: Protocol csum must be added before leaving localhost |
---|
208 | * @pkt_type: Packet class |
---|
209 | * @fclone: skbuff clone status |
---|
210 | * @ip_summed: Driver fed us an IP checksum |
---|
211 | * @priority: Packet queueing priority |
---|
212 | * @users: User count - see {datagram,tcp}.c |
---|
213 | * @protocol: Packet protocol from driver |
---|
214 | * @truesize: Buffer size |
---|
215 | * @head: Head of buffer |
---|
216 | * @data: Data head pointer |
---|
217 | * @tail: Tail pointer |
---|
218 | * @end: End pointer |
---|
219 | * @destructor: Destruct function |
---|
220 | * @nfmark: Can be used for communication between hooks |
---|
221 | * @nfct: Associated connection, if any |
---|
222 | * @ipvs_property: skbuff is owned by ipvs |
---|
223 | * @nfctinfo: Relationship of this skb to the connection |
---|
224 | * @nfct_reasm: netfilter conntrack re-assembly pointer |
---|
225 | * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c |
---|
226 | * @tc_index: Traffic control index |
---|
227 | * @tc_verd: traffic control verdict |
---|
228 | * @dma_cookie: a cookie to one of several possible DMA operations |
---|
229 | * done by skb DMA functions |
---|
230 | * @secmark: security marking |
---|
231 | */ |
---|
232 | |
---|
233 | struct sk_buff { |
---|
234 | /* These two members must be first. */ |
---|
235 | struct sk_buff *next; |
---|
236 | struct sk_buff *prev; |
---|
237 | |
---|
238 | struct sock *sk; |
---|
239 | struct skb_timeval tstamp; |
---|
240 | struct net_device *dev; |
---|
241 | struct net_device *input_dev; |
---|
242 | |
---|
243 | union { |
---|
244 | struct tcphdr *th; |
---|
245 | struct udphdr *uh; |
---|
246 | struct icmphdr *icmph; |
---|
247 | struct igmphdr *igmph; |
---|
248 | struct iphdr *ipiph; |
---|
249 | struct ipv6hdr *ipv6h; |
---|
250 | unsigned char *raw; |
---|
251 | } h; |
---|
252 | |
---|
253 | union { |
---|
254 | struct iphdr *iph; |
---|
255 | struct ipv6hdr *ipv6h; |
---|
256 | struct arphdr *arph; |
---|
257 | unsigned char *raw; |
---|
258 | } nh; |
---|
259 | |
---|
260 | union { |
---|
261 | unsigned char *raw; |
---|
262 | } mac; |
---|
263 | |
---|
264 | struct dst_entry *dst; |
---|
265 | struct sec_path *sp; |
---|
266 | |
---|
267 | /* |
---|
268 | * This is the control buffer. It is free to use for every |
---|
269 | * layer. Please put your private variables there. If you |
---|
270 | * want to keep them across layers you have to do a skb_clone() |
---|
271 | * first. This is owned by whoever has the skb queued ATM. |
---|
272 | */ |
---|
273 | char cb[48]; |
---|
274 | |
---|
275 | unsigned int len, |
---|
276 | data_len, |
---|
277 | mac_len, |
---|
278 | csum; |
---|
279 | __u32 priority; |
---|
280 | __u8 local_df:1, |
---|
281 | cloned:1, |
---|
282 | ip_summed:2, |
---|
283 | nohdr:1, |
---|
284 | nfctinfo:3; |
---|
285 | __u8 pkt_type:3, |
---|
286 | fclone:2, |
---|
287 | #ifndef CONFIG_XEN |
---|
288 | ipvs_property:1; |
---|
289 | #else |
---|
290 | ipvs_property:1, |
---|
291 | proto_data_valid:1, |
---|
292 | proto_csum_blank:1; |
---|
293 | #endif |
---|
294 | __be16 protocol; |
---|
295 | |
---|
296 | void (*destructor)(struct sk_buff *skb); |
---|
297 | #ifdef CONFIG_NETFILTER |
---|
298 | struct nf_conntrack *nfct; |
---|
299 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
300 | struct sk_buff *nfct_reasm; |
---|
301 | #endif |
---|
302 | #ifdef CONFIG_BRIDGE_NETFILTER |
---|
303 | struct nf_bridge_info *nf_bridge; |
---|
304 | #endif |
---|
305 | __u32 nfmark; |
---|
306 | #endif /* CONFIG_NETFILTER */ |
---|
307 | #ifdef CONFIG_NET_SCHED |
---|
308 | __u16 tc_index; /* traffic control index */ |
---|
309 | #ifdef CONFIG_NET_CLS_ACT |
---|
310 | __u16 tc_verd; /* traffic control verdict */ |
---|
311 | #endif |
---|
312 | #endif |
---|
313 | #ifdef CONFIG_NET_DMA |
---|
314 | dma_cookie_t dma_cookie; |
---|
315 | #endif |
---|
316 | #ifdef CONFIG_NETWORK_SECMARK |
---|
317 | __u32 secmark; |
---|
318 | #endif |
---|
319 | |
---|
320 | |
---|
321 | /* These elements must be at the end, see alloc_skb() for details. */ |
---|
322 | unsigned int truesize; |
---|
323 | atomic_t users; |
---|
324 | unsigned char *head, |
---|
325 | *data, |
---|
326 | *tail, |
---|
327 | *end; |
---|
328 | }; |
---|
329 | |
---|
330 | #ifdef __KERNEL__ |
---|
331 | /* |
---|
332 | * Handling routines are only of interest to the kernel |
---|
333 | */ |
---|
334 | #include <linux/slab.h> |
---|
335 | |
---|
336 | #include <asm/system.h> |
---|
337 | |
---|
338 | extern void kfree_skb(struct sk_buff *skb); |
---|
339 | extern void __kfree_skb(struct sk_buff *skb); |
---|
340 | extern struct sk_buff *__alloc_skb(unsigned int size, |
---|
341 | gfp_t priority, int fclone); |
---|
342 | static inline struct sk_buff *alloc_skb(unsigned int size, |
---|
343 | gfp_t priority) |
---|
344 | { |
---|
345 | return __alloc_skb(size, priority, 0); |
---|
346 | } |
---|
347 | |
---|
348 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
---|
349 | gfp_t priority) |
---|
350 | { |
---|
351 | return __alloc_skb(size, priority, 1); |
---|
352 | } |
---|
353 | |
---|
354 | extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, |
---|
355 | unsigned int size, |
---|
356 | gfp_t priority); |
---|
357 | extern void kfree_skbmem(struct sk_buff *skb); |
---|
358 | extern struct sk_buff *skb_clone(struct sk_buff *skb, |
---|
359 | gfp_t priority); |
---|
360 | extern struct sk_buff *skb_copy(const struct sk_buff *skb, |
---|
361 | gfp_t priority); |
---|
362 | extern struct sk_buff *pskb_copy(struct sk_buff *skb, |
---|
363 | gfp_t gfp_mask); |
---|
364 | extern int pskb_expand_head(struct sk_buff *skb, |
---|
365 | int nhead, int ntail, |
---|
366 | gfp_t gfp_mask); |
---|
367 | extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, |
---|
368 | unsigned int headroom); |
---|
369 | extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
---|
370 | int newheadroom, int newtailroom, |
---|
371 | gfp_t priority); |
---|
372 | extern int skb_pad(struct sk_buff *skb, int pad); |
---|
373 | #define dev_kfree_skb(a) kfree_skb(a) |
---|
374 | extern void skb_over_panic(struct sk_buff *skb, int len, |
---|
375 | void *here); |
---|
376 | extern void skb_under_panic(struct sk_buff *skb, int len, |
---|
377 | void *here); |
---|
378 | extern void skb_truesize_bug(struct sk_buff *skb); |
---|
379 | |
---|
380 | static inline void skb_truesize_check(struct sk_buff *skb) |
---|
381 | { |
---|
382 | if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) |
---|
383 | skb_truesize_bug(skb); |
---|
384 | } |
---|
385 | |
---|
386 | extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
---|
387 | int getfrag(void *from, char *to, int offset, |
---|
388 | int len,int odd, struct sk_buff *skb), |
---|
389 | void *from, int length); |
---|
390 | |
---|
391 | struct skb_seq_state |
---|
392 | { |
---|
393 | __u32 lower_offset; |
---|
394 | __u32 upper_offset; |
---|
395 | __u32 frag_idx; |
---|
396 | __u32 stepped_offset; |
---|
397 | struct sk_buff *root_skb; |
---|
398 | struct sk_buff *cur_skb; |
---|
399 | __u8 *frag_data; |
---|
400 | }; |
---|
401 | |
---|
402 | extern void skb_prepare_seq_read(struct sk_buff *skb, |
---|
403 | unsigned int from, unsigned int to, |
---|
404 | struct skb_seq_state *st); |
---|
405 | extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
---|
406 | struct skb_seq_state *st); |
---|
407 | extern void skb_abort_seq_read(struct skb_seq_state *st); |
---|
408 | |
---|
409 | extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
---|
410 | unsigned int to, struct ts_config *config, |
---|
411 | struct ts_state *state); |
---|
412 | |
---|
413 | /* Internal */ |
---|
414 | #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) |
---|
415 | |
---|
416 | /** |
---|
417 | * skb_queue_empty - check if a queue is empty |
---|
418 | * @list: queue head |
---|
419 | * |
---|
420 | * Returns true if the queue is empty, false otherwise. |
---|
421 | */ |
---|
422 | static inline int skb_queue_empty(const struct sk_buff_head *list) |
---|
423 | { |
---|
424 | return list->next == (struct sk_buff *)list; |
---|
425 | } |
---|
426 | |
---|
427 | /** |
---|
428 | * skb_get - reference buffer |
---|
429 | * @skb: buffer to reference |
---|
430 | * |
---|
431 | * Makes another reference to a socket buffer and returns a pointer |
---|
432 | * to the buffer. |
---|
433 | */ |
---|
434 | static inline struct sk_buff *skb_get(struct sk_buff *skb) |
---|
435 | { |
---|
436 | atomic_inc(&skb->users); |
---|
437 | return skb; |
---|
438 | } |
---|
439 | |
---|
440 | /* |
---|
441 | * If users == 1, we are the only owner and are can avoid redundant |
---|
442 | * atomic change. |
---|
443 | */ |
---|
444 | |
---|
445 | /** |
---|
446 | * skb_cloned - is the buffer a clone |
---|
447 | * @skb: buffer to check |
---|
448 | * |
---|
449 | * Returns true if the buffer was generated with skb_clone() and is |
---|
450 | * one of multiple shared copies of the buffer. Cloned buffers are |
---|
451 | * shared data so must not be written to under normal circumstances. |
---|
452 | */ |
---|
453 | static inline int skb_cloned(const struct sk_buff *skb) |
---|
454 | { |
---|
455 | return skb->cloned && |
---|
456 | (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; |
---|
457 | } |
---|
458 | |
---|
459 | /** |
---|
460 | * skb_header_cloned - is the header a clone |
---|
461 | * @skb: buffer to check |
---|
462 | * |
---|
463 | * Returns true if modifying the header part of the buffer requires |
---|
464 | * the data to be copied. |
---|
465 | */ |
---|
466 | static inline int skb_header_cloned(const struct sk_buff *skb) |
---|
467 | { |
---|
468 | int dataref; |
---|
469 | |
---|
470 | if (!skb->cloned) |
---|
471 | return 0; |
---|
472 | |
---|
473 | dataref = atomic_read(&skb_shinfo(skb)->dataref); |
---|
474 | dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); |
---|
475 | return dataref != 1; |
---|
476 | } |
---|
477 | |
---|
478 | /** |
---|
479 | * skb_header_release - release reference to header |
---|
480 | * @skb: buffer to operate on |
---|
481 | * |
---|
482 | * Drop a reference to the header part of the buffer. This is done |
---|
483 | * by acquiring a payload reference. You must not read from the header |
---|
484 | * part of skb->data after this. |
---|
485 | */ |
---|
486 | static inline void skb_header_release(struct sk_buff *skb) |
---|
487 | { |
---|
488 | BUG_ON(skb->nohdr); |
---|
489 | skb->nohdr = 1; |
---|
490 | atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); |
---|
491 | } |
---|
492 | |
---|
493 | /** |
---|
494 | * skb_shared - is the buffer shared |
---|
495 | * @skb: buffer to check |
---|
496 | * |
---|
497 | * Returns true if more than one person has a reference to this |
---|
498 | * buffer. |
---|
499 | */ |
---|
500 | static inline int skb_shared(const struct sk_buff *skb) |
---|
501 | { |
---|
502 | return atomic_read(&skb->users) != 1; |
---|
503 | } |
---|
504 | |
---|
505 | /** |
---|
506 | * skb_share_check - check if buffer is shared and if so clone it |
---|
507 | * @skb: buffer to check |
---|
508 | * @pri: priority for memory allocation |
---|
509 | * |
---|
510 | * If the buffer is shared the buffer is cloned and the old copy |
---|
511 | * drops a reference. A new clone with a single reference is returned. |
---|
512 | * If the buffer is not shared the original buffer is returned. When |
---|
513 | * being called from interrupt status or with spinlocks held pri must |
---|
514 | * be GFP_ATOMIC. |
---|
515 | * |
---|
516 | * NULL is returned on a memory allocation failure. |
---|
517 | */ |
---|
518 | static inline struct sk_buff *skb_share_check(struct sk_buff *skb, |
---|
519 | gfp_t pri) |
---|
520 | { |
---|
521 | might_sleep_if(pri & __GFP_WAIT); |
---|
522 | if (skb_shared(skb)) { |
---|
523 | struct sk_buff *nskb = skb_clone(skb, pri); |
---|
524 | kfree_skb(skb); |
---|
525 | skb = nskb; |
---|
526 | } |
---|
527 | return skb; |
---|
528 | } |
---|
529 | |
---|
530 | /* |
---|
531 | * Copy shared buffers into a new sk_buff. We effectively do COW on |
---|
532 | * packets to handle cases where we have a local reader and forward |
---|
533 | * and a couple of other messy ones. The normal one is tcpdumping |
---|
534 | * a packet thats being forwarded. |
---|
535 | */ |
---|
536 | |
---|
537 | /** |
---|
538 | * skb_unshare - make a copy of a shared buffer |
---|
539 | * @skb: buffer to check |
---|
540 | * @pri: priority for memory allocation |
---|
541 | * |
---|
542 | * If the socket buffer is a clone then this function creates a new |
---|
543 | * copy of the data, drops a reference count on the old copy and returns |
---|
544 | * the new copy with the reference count at 1. If the buffer is not a clone |
---|
545 | * the original buffer is returned. When called with a spinlock held or |
---|
546 | * from interrupt state @pri must be %GFP_ATOMIC |
---|
547 | * |
---|
548 | * %NULL is returned on a memory allocation failure. |
---|
549 | */ |
---|
550 | static inline struct sk_buff *skb_unshare(struct sk_buff *skb, |
---|
551 | gfp_t pri) |
---|
552 | { |
---|
553 | might_sleep_if(pri & __GFP_WAIT); |
---|
554 | if (skb_cloned(skb)) { |
---|
555 | struct sk_buff *nskb = skb_copy(skb, pri); |
---|
556 | kfree_skb(skb); /* Free our shared copy */ |
---|
557 | skb = nskb; |
---|
558 | } |
---|
559 | return skb; |
---|
560 | } |
---|
561 | |
---|
562 | /** |
---|
563 | * skb_peek |
---|
564 | * @list_: list to peek at |
---|
565 | * |
---|
566 | * Peek an &sk_buff. Unlike most other operations you _MUST_ |
---|
567 | * be careful with this one. A peek leaves the buffer on the |
---|
568 | * list and someone else may run off with it. You must hold |
---|
569 | * the appropriate locks or have a private queue to do this. |
---|
570 | * |
---|
571 | * Returns %NULL for an empty list or a pointer to the head element. |
---|
572 | * The reference count is not incremented and the reference is therefore |
---|
573 | * volatile. Use with caution. |
---|
574 | */ |
---|
575 | static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) |
---|
576 | { |
---|
577 | struct sk_buff *list = ((struct sk_buff *)list_)->next; |
---|
578 | if (list == (struct sk_buff *)list_) |
---|
579 | list = NULL; |
---|
580 | return list; |
---|
581 | } |
---|
582 | |
---|
583 | /** |
---|
584 | * skb_peek_tail |
---|
585 | * @list_: list to peek at |
---|
586 | * |
---|
587 | * Peek an &sk_buff. Unlike most other operations you _MUST_ |
---|
588 | * be careful with this one. A peek leaves the buffer on the |
---|
589 | * list and someone else may run off with it. You must hold |
---|
590 | * the appropriate locks or have a private queue to do this. |
---|
591 | * |
---|
592 | * Returns %NULL for an empty list or a pointer to the tail element. |
---|
593 | * The reference count is not incremented and the reference is therefore |
---|
594 | * volatile. Use with caution. |
---|
595 | */ |
---|
596 | static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) |
---|
597 | { |
---|
598 | struct sk_buff *list = ((struct sk_buff *)list_)->prev; |
---|
599 | if (list == (struct sk_buff *)list_) |
---|
600 | list = NULL; |
---|
601 | return list; |
---|
602 | } |
---|
603 | |
---|
604 | /** |
---|
605 | * skb_queue_len - get queue length |
---|
606 | * @list_: list to measure |
---|
607 | * |
---|
608 | * Return the length of an &sk_buff queue. |
---|
609 | */ |
---|
610 | static inline __u32 skb_queue_len(const struct sk_buff_head *list_) |
---|
611 | { |
---|
612 | return list_->qlen; |
---|
613 | } |
---|
614 | |
---|
615 | /* |
---|
616 | * This function creates a split out lock class for each invocation; |
---|
617 | * this is needed for now since a whole lot of users of the skb-queue |
---|
618 | * infrastructure in drivers have different locking usage (in hardirq) |
---|
619 | * than the networking core (in softirq only). In the long run either the |
---|
620 | * network layer or drivers should need annotation to consolidate the |
---|
621 | * main types of usage into 3 classes. |
---|
622 | */ |
---|
623 | static inline void skb_queue_head_init(struct sk_buff_head *list) |
---|
624 | { |
---|
625 | spin_lock_init(&list->lock); |
---|
626 | list->prev = list->next = (struct sk_buff *)list; |
---|
627 | list->qlen = 0; |
---|
628 | } |
---|
629 | |
---|
630 | /* |
---|
631 | * Insert an sk_buff at the start of a list. |
---|
632 | * |
---|
633 | * The "__skb_xxxx()" functions are the non-atomic ones that |
---|
634 | * can only be called with interrupts disabled. |
---|
635 | */ |
---|
636 | |
---|
637 | /** |
---|
638 | * __skb_queue_after - queue a buffer at the list head |
---|
639 | * @list: list to use |
---|
640 | * @prev: place after this buffer |
---|
641 | * @newsk: buffer to queue |
---|
642 | * |
---|
643 | * Queue a buffer int the middle of a list. This function takes no locks |
---|
644 | * and you must therefore hold required locks before calling it. |
---|
645 | * |
---|
646 | * A buffer cannot be placed on two lists at the same time. |
---|
647 | */ |
---|
648 | static inline void __skb_queue_after(struct sk_buff_head *list, |
---|
649 | struct sk_buff *prev, |
---|
650 | struct sk_buff *newsk) |
---|
651 | { |
---|
652 | struct sk_buff *next; |
---|
653 | list->qlen++; |
---|
654 | |
---|
655 | next = prev->next; |
---|
656 | newsk->next = next; |
---|
657 | newsk->prev = prev; |
---|
658 | next->prev = prev->next = newsk; |
---|
659 | } |
---|
660 | |
---|
661 | /** |
---|
662 | * __skb_queue_head - queue a buffer at the list head |
---|
663 | * @list: list to use |
---|
664 | * @newsk: buffer to queue |
---|
665 | * |
---|
666 | * Queue a buffer at the start of a list. This function takes no locks |
---|
667 | * and you must therefore hold required locks before calling it. |
---|
668 | * |
---|
669 | * A buffer cannot be placed on two lists at the same time. |
---|
670 | */ |
---|
671 | extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
672 | static inline void __skb_queue_head(struct sk_buff_head *list, |
---|
673 | struct sk_buff *newsk) |
---|
674 | { |
---|
675 | __skb_queue_after(list, (struct sk_buff *)list, newsk); |
---|
676 | } |
---|
677 | |
---|
678 | /** |
---|
679 | * __skb_queue_tail - queue a buffer at the list tail |
---|
680 | * @list: list to use |
---|
681 | * @newsk: buffer to queue |
---|
682 | * |
---|
683 | * Queue a buffer at the end of a list. This function takes no locks |
---|
684 | * and you must therefore hold required locks before calling it. |
---|
685 | * |
---|
686 | * A buffer cannot be placed on two lists at the same time. |
---|
687 | */ |
---|
688 | extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); |
---|
689 | static inline void __skb_queue_tail(struct sk_buff_head *list, |
---|
690 | struct sk_buff *newsk) |
---|
691 | { |
---|
692 | struct sk_buff *prev, *next; |
---|
693 | |
---|
694 | list->qlen++; |
---|
695 | next = (struct sk_buff *)list; |
---|
696 | prev = next->prev; |
---|
697 | newsk->next = next; |
---|
698 | newsk->prev = prev; |
---|
699 | next->prev = prev->next = newsk; |
---|
700 | } |
---|
701 | |
---|
702 | |
---|
703 | /** |
---|
704 | * __skb_dequeue - remove from the head of the queue |
---|
705 | * @list: list to dequeue from |
---|
706 | * |
---|
707 | * Remove the head of the list. This function does not take any locks |
---|
708 | * so must be used with appropriate locks held only. The head item is |
---|
709 | * returned or %NULL if the list is empty. |
---|
710 | */ |
---|
711 | extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); |
---|
712 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) |
---|
713 | { |
---|
714 | struct sk_buff *next, *prev, *result; |
---|
715 | |
---|
716 | prev = (struct sk_buff *) list; |
---|
717 | next = prev->next; |
---|
718 | result = NULL; |
---|
719 | if (next != prev) { |
---|
720 | result = next; |
---|
721 | next = next->next; |
---|
722 | list->qlen--; |
---|
723 | next->prev = prev; |
---|
724 | prev->next = next; |
---|
725 | result->next = result->prev = NULL; |
---|
726 | } |
---|
727 | return result; |
---|
728 | } |
---|
729 | |
---|
730 | |
---|
731 | /* |
---|
732 | * Insert a packet on a list. |
---|
733 | */ |
---|
734 | extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); |
---|
735 | static inline void __skb_insert(struct sk_buff *newsk, |
---|
736 | struct sk_buff *prev, struct sk_buff *next, |
---|
737 | struct sk_buff_head *list) |
---|
738 | { |
---|
739 | newsk->next = next; |
---|
740 | newsk->prev = prev; |
---|
741 | next->prev = prev->next = newsk; |
---|
742 | list->qlen++; |
---|
743 | } |
---|
744 | |
---|
745 | /* |
---|
746 | * Place a packet after a given packet in a list. |
---|
747 | */ |
---|
748 | extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); |
---|
749 | static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
---|
750 | { |
---|
751 | __skb_insert(newsk, old, old->next, list); |
---|
752 | } |
---|
753 | |
---|
754 | /* |
---|
755 | * remove sk_buff from list. _Must_ be called atomically, and with |
---|
756 | * the list known.. |
---|
757 | */ |
---|
758 | extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); |
---|
759 | static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
---|
760 | { |
---|
761 | struct sk_buff *next, *prev; |
---|
762 | |
---|
763 | list->qlen--; |
---|
764 | next = skb->next; |
---|
765 | prev = skb->prev; |
---|
766 | skb->next = skb->prev = NULL; |
---|
767 | next->prev = prev; |
---|
768 | prev->next = next; |
---|
769 | } |
---|
770 | |
---|
771 | |
---|
772 | /* XXX: more streamlined implementation */ |
---|
773 | |
---|
774 | /** |
---|
775 | * __skb_dequeue_tail - remove from the tail of the queue |
---|
776 | * @list: list to dequeue from |
---|
777 | * |
---|
778 | * Remove the tail of the list. This function does not take any locks |
---|
779 | * so must be used with appropriate locks held only. The tail item is |
---|
780 | * returned or %NULL if the list is empty. |
---|
781 | */ |
---|
782 | extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); |
---|
783 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) |
---|
784 | { |
---|
785 | struct sk_buff *skb = skb_peek_tail(list); |
---|
786 | if (skb) |
---|
787 | __skb_unlink(skb, list); |
---|
788 | return skb; |
---|
789 | } |
---|
790 | |
---|
791 | |
---|
792 | static inline int skb_is_nonlinear(const struct sk_buff *skb) |
---|
793 | { |
---|
794 | return skb->data_len; |
---|
795 | } |
---|
796 | |
---|
797 | static inline unsigned int skb_headlen(const struct sk_buff *skb) |
---|
798 | { |
---|
799 | return skb->len - skb->data_len; |
---|
800 | } |
---|
801 | |
---|
802 | static inline int skb_pagelen(const struct sk_buff *skb) |
---|
803 | { |
---|
804 | int i, len = 0; |
---|
805 | |
---|
806 | for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) |
---|
807 | len += skb_shinfo(skb)->frags[i].size; |
---|
808 | return len + skb_headlen(skb); |
---|
809 | } |
---|
810 | |
---|
811 | static inline void skb_fill_page_desc(struct sk_buff *skb, int i, |
---|
812 | struct page *page, int off, int size) |
---|
813 | { |
---|
814 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
815 | |
---|
816 | frag->page = page; |
---|
817 | frag->page_offset = off; |
---|
818 | frag->size = size; |
---|
819 | skb_shinfo(skb)->nr_frags = i + 1; |
---|
820 | } |
---|
821 | |
---|
822 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
---|
823 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) |
---|
824 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
---|
825 | |
---|
826 | /* |
---|
827 | * Add data to an sk_buff |
---|
828 | */ |
---|
829 | static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) |
---|
830 | { |
---|
831 | unsigned char *tmp = skb->tail; |
---|
832 | SKB_LINEAR_ASSERT(skb); |
---|
833 | skb->tail += len; |
---|
834 | skb->len += len; |
---|
835 | return tmp; |
---|
836 | } |
---|
837 | |
---|
838 | /** |
---|
839 | * skb_put - add data to a buffer |
---|
840 | * @skb: buffer to use |
---|
841 | * @len: amount of data to add |
---|
842 | * |
---|
843 | * This function extends the used data area of the buffer. If this would |
---|
844 | * exceed the total buffer size the kernel will panic. A pointer to the |
---|
845 | * first byte of the extra data is returned. |
---|
846 | */ |
---|
847 | static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) |
---|
848 | { |
---|
849 | unsigned char *tmp = skb->tail; |
---|
850 | SKB_LINEAR_ASSERT(skb); |
---|
851 | skb->tail += len; |
---|
852 | skb->len += len; |
---|
853 | if (unlikely(skb->tail>skb->end)) |
---|
854 | skb_over_panic(skb, len, current_text_addr()); |
---|
855 | return tmp; |
---|
856 | } |
---|
857 | |
---|
858 | static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) |
---|
859 | { |
---|
860 | skb->data -= len; |
---|
861 | skb->len += len; |
---|
862 | return skb->data; |
---|
863 | } |
---|
864 | |
---|
865 | /** |
---|
866 | * skb_push - add data to the start of a buffer |
---|
867 | * @skb: buffer to use |
---|
868 | * @len: amount of data to add |
---|
869 | * |
---|
870 | * This function extends the used data area of the buffer at the buffer |
---|
871 | * start. If this would exceed the total buffer headroom the kernel will |
---|
872 | * panic. A pointer to the first byte of the extra data is returned. |
---|
873 | */ |
---|
874 | static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) |
---|
875 | { |
---|
876 | skb->data -= len; |
---|
877 | skb->len += len; |
---|
878 | if (unlikely(skb->data<skb->head)) |
---|
879 | skb_under_panic(skb, len, current_text_addr()); |
---|
880 | return skb->data; |
---|
881 | } |
---|
882 | |
---|
883 | static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) |
---|
884 | { |
---|
885 | skb->len -= len; |
---|
886 | BUG_ON(skb->len < skb->data_len); |
---|
887 | return skb->data += len; |
---|
888 | } |
---|
889 | |
---|
890 | /** |
---|
891 | * skb_pull - remove data from the start of a buffer |
---|
892 | * @skb: buffer to use |
---|
893 | * @len: amount of data to remove |
---|
894 | * |
---|
895 | * This function removes data from the start of a buffer, returning |
---|
896 | * the memory to the headroom. A pointer to the next data in the buffer |
---|
897 | * is returned. Once the data has been pulled future pushes will overwrite |
---|
898 | * the old data. |
---|
899 | */ |
---|
900 | static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
---|
901 | { |
---|
902 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); |
---|
903 | } |
---|
904 | |
---|
905 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); |
---|
906 | |
---|
907 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) |
---|
908 | { |
---|
909 | if (len > skb_headlen(skb) && |
---|
910 | !__pskb_pull_tail(skb, len-skb_headlen(skb))) |
---|
911 | return NULL; |
---|
912 | skb->len -= len; |
---|
913 | return skb->data += len; |
---|
914 | } |
---|
915 | |
---|
916 | static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) |
---|
917 | { |
---|
918 | return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); |
---|
919 | } |
---|
920 | |
---|
921 | static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) |
---|
922 | { |
---|
923 | if (likely(len <= skb_headlen(skb))) |
---|
924 | return 1; |
---|
925 | if (unlikely(len > skb->len)) |
---|
926 | return 0; |
---|
927 | return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; |
---|
928 | } |
---|
929 | |
---|
930 | /** |
---|
931 | * skb_headroom - bytes at buffer head |
---|
932 | * @skb: buffer to check |
---|
933 | * |
---|
934 | * Return the number of bytes of free space at the head of an &sk_buff. |
---|
935 | */ |
---|
936 | static inline int skb_headroom(const struct sk_buff *skb) |
---|
937 | { |
---|
938 | return skb->data - skb->head; |
---|
939 | } |
---|
940 | |
---|
941 | /** |
---|
942 | * skb_tailroom - bytes at buffer end |
---|
943 | * @skb: buffer to check |
---|
944 | * |
---|
945 | * Return the number of bytes of free space at the tail of an sk_buff |
---|
946 | */ |
---|
947 | static inline int skb_tailroom(const struct sk_buff *skb) |
---|
948 | { |
---|
949 | return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; |
---|
950 | } |
---|
951 | |
---|
952 | /** |
---|
953 | * skb_reserve - adjust headroom |
---|
954 | * @skb: buffer to alter |
---|
955 | * @len: bytes to move |
---|
956 | * |
---|
957 | * Increase the headroom of an empty &sk_buff by reducing the tail |
---|
958 | * room. This is only allowed for an empty buffer. |
---|
959 | */ |
---|
960 | static inline void skb_reserve(struct sk_buff *skb, int len) |
---|
961 | { |
---|
962 | skb->data += len; |
---|
963 | skb->tail += len; |
---|
964 | } |
---|
965 | |
---|
966 | /* |
---|
967 | * CPUs often take a performance hit when accessing unaligned memory |
---|
968 | * locations. The actual performance hit varies, it can be small if the |
---|
969 | * hardware handles it or large if we have to take an exception and fix it |
---|
970 | * in software. |
---|
971 | * |
---|
972 | * Since an ethernet header is 14 bytes network drivers often end up with |
---|
973 | * the IP header at an unaligned offset. The IP header can be aligned by |
---|
974 | * shifting the start of the packet by 2 bytes. Drivers should do this |
---|
975 | * with: |
---|
976 | * |
---|
977 | * skb_reserve(NET_IP_ALIGN); |
---|
978 | * |
---|
979 | * The downside to this alignment of the IP header is that the DMA is now |
---|
980 | * unaligned. On some architectures the cost of an unaligned DMA is high |
---|
981 | * and this cost outweighs the gains made by aligning the IP header. |
---|
982 | * |
---|
983 | * Since this trade off varies between architectures, we allow NET_IP_ALIGN |
---|
984 | * to be overridden. |
---|
985 | */ |
---|
986 | #ifndef NET_IP_ALIGN |
---|
987 | #define NET_IP_ALIGN 2 |
---|
988 | #endif |
---|
989 | |
---|
990 | /* |
---|
991 | * The networking layer reserves some headroom in skb data (via |
---|
992 | * dev_alloc_skb). This is used to avoid having to reallocate skb data when |
---|
993 | * the header has to grow. In the default case, if the header has to grow |
---|
994 | * 16 bytes or less we avoid the reallocation. |
---|
995 | * |
---|
996 | * Unfortunately this headroom changes the DMA alignment of the resulting |
---|
997 | * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive |
---|
998 | * on some architectures. An architecture can override this value, |
---|
999 | * perhaps setting it to a cacheline in size (since that will maintain |
---|
1000 | * cacheline alignment of the DMA). It must be a power of 2. |
---|
1001 | * |
---|
1002 | * Various parts of the networking layer expect at least 16 bytes of |
---|
1003 | * headroom, you should not reduce this. |
---|
1004 | */ |
---|
1005 | #ifndef NET_SKB_PAD |
---|
1006 | #define NET_SKB_PAD 16 |
---|
1007 | #endif |
---|
1008 | |
---|
1009 | extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); |
---|
1010 | |
---|
1011 | static inline void __skb_trim(struct sk_buff *skb, unsigned int len) |
---|
1012 | { |
---|
1013 | if (unlikely(skb->data_len)) { |
---|
1014 | WARN_ON(1); |
---|
1015 | return; |
---|
1016 | } |
---|
1017 | skb->len = len; |
---|
1018 | skb->tail = skb->data + len; |
---|
1019 | } |
---|
1020 | |
---|
1021 | /** |
---|
1022 | * skb_trim - remove end from a buffer |
---|
1023 | * @skb: buffer to alter |
---|
1024 | * @len: new length |
---|
1025 | * |
---|
1026 | * Cut the length of a buffer down by removing data from the tail. If |
---|
1027 | * the buffer is already under the length specified it is not modified. |
---|
1028 | * The skb must be linear. |
---|
1029 | */ |
---|
1030 | static inline void skb_trim(struct sk_buff *skb, unsigned int len) |
---|
1031 | { |
---|
1032 | if (skb->len > len) |
---|
1033 | __skb_trim(skb, len); |
---|
1034 | } |
---|
1035 | |
---|
1036 | |
---|
1037 | static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) |
---|
1038 | { |
---|
1039 | if (skb->data_len) |
---|
1040 | return ___pskb_trim(skb, len); |
---|
1041 | __skb_trim(skb, len); |
---|
1042 | return 0; |
---|
1043 | } |
---|
1044 | |
---|
1045 | static inline int pskb_trim(struct sk_buff *skb, unsigned int len) |
---|
1046 | { |
---|
1047 | return (len < skb->len) ? __pskb_trim(skb, len) : 0; |
---|
1048 | } |
---|
1049 | |
---|
1050 | /** |
---|
1051 | * pskb_trim_unique - remove end from a paged unique (not cloned) buffer |
---|
1052 | * @skb: buffer to alter |
---|
1053 | * @len: new length |
---|
1054 | * |
---|
1055 | * This is identical to pskb_trim except that the caller knows that |
---|
1056 | * the skb is not cloned so we should never get an error due to out- |
---|
1057 | * of-memory. |
---|
1058 | */ |
---|
1059 | static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) |
---|
1060 | { |
---|
1061 | int err = pskb_trim(skb, len); |
---|
1062 | BUG_ON(err); |
---|
1063 | } |
---|
1064 | |
---|
1065 | /** |
---|
1066 | * skb_orphan - orphan a buffer |
---|
1067 | * @skb: buffer to orphan |
---|
1068 | * |
---|
1069 | * If a buffer currently has an owner then we call the owner's |
---|
1070 | * destructor function and make the @skb unowned. The buffer continues |
---|
1071 | * to exist but is no longer charged to its former owner. |
---|
1072 | */ |
---|
1073 | static inline void skb_orphan(struct sk_buff *skb) |
---|
1074 | { |
---|
1075 | if (skb->destructor) |
---|
1076 | skb->destructor(skb); |
---|
1077 | skb->destructor = NULL; |
---|
1078 | skb->sk = NULL; |
---|
1079 | } |
---|
1080 | |
---|
1081 | /** |
---|
1082 | * __skb_queue_purge - empty a list |
---|
1083 | * @list: list to empty |
---|
1084 | * |
---|
1085 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
---|
1086 | * the list and one reference dropped. This function does not take the |
---|
1087 | * list lock and the caller must hold the relevant locks to use it. |
---|
1088 | */ |
---|
1089 | extern void skb_queue_purge(struct sk_buff_head *list); |
---|
1090 | static inline void __skb_queue_purge(struct sk_buff_head *list) |
---|
1091 | { |
---|
1092 | struct sk_buff *skb; |
---|
1093 | while ((skb = __skb_dequeue(list)) != NULL) |
---|
1094 | kfree_skb(skb); |
---|
1095 | } |
---|
1096 | |
---|
1097 | /** |
---|
1098 | * __dev_alloc_skb - allocate an skbuff for receiving |
---|
1099 | * @length: length to allocate |
---|
1100 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
---|
1101 | * |
---|
1102 | * Allocate a new &sk_buff and assign it a usage count of one. The |
---|
1103 | * buffer has unspecified headroom built in. Users should allocate |
---|
1104 | * the headroom they think they need without accounting for the |
---|
1105 | * built in space. The built in space is used for optimisations. |
---|
1106 | * |
---|
1107 | * %NULL is returned if there is no free memory. |
---|
1108 | */ |
---|
1109 | static inline struct sk_buff *__dev_alloc_skb(unsigned int length, |
---|
1110 | gfp_t gfp_mask) |
---|
1111 | { |
---|
1112 | struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); |
---|
1113 | if (likely(skb)) |
---|
1114 | skb_reserve(skb, NET_SKB_PAD); |
---|
1115 | return skb; |
---|
1116 | } |
---|
1117 | |
---|
1118 | /** |
---|
1119 | * dev_alloc_skb - allocate an skbuff for receiving |
---|
1120 | * @length: length to allocate |
---|
1121 | * |
---|
1122 | * Allocate a new &sk_buff and assign it a usage count of one. The |
---|
1123 | * buffer has unspecified headroom built in. Users should allocate |
---|
1124 | * the headroom they think they need without accounting for the |
---|
1125 | * built in space. The built in space is used for optimisations. |
---|
1126 | * |
---|
1127 | * %NULL is returned if there is no free memory. Although this function |
---|
1128 | * allocates memory it can be called from an interrupt. |
---|
1129 | */ |
---|
1130 | static inline struct sk_buff *dev_alloc_skb(unsigned int length) |
---|
1131 | { |
---|
1132 | return __dev_alloc_skb(length, GFP_ATOMIC); |
---|
1133 | } |
---|
1134 | |
---|
1135 | extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
---|
1136 | unsigned int length, gfp_t gfp_mask); |
---|
1137 | |
---|
1138 | /** |
---|
1139 | * netdev_alloc_skb - allocate an skbuff for rx on a specific device |
---|
1140 | * @dev: network device to receive on |
---|
1141 | * @length: length to allocate |
---|
1142 | * |
---|
1143 | * Allocate a new &sk_buff and assign it a usage count of one. The |
---|
1144 | * buffer has unspecified headroom built in. Users should allocate |
---|
1145 | * the headroom they think they need without accounting for the |
---|
1146 | * built in space. The built in space is used for optimisations. |
---|
1147 | * |
---|
1148 | * %NULL is returned if there is no free memory. Although this function |
---|
1149 | * allocates memory it can be called from an interrupt. |
---|
1150 | */ |
---|
1151 | static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, |
---|
1152 | unsigned int length) |
---|
1153 | { |
---|
1154 | return __netdev_alloc_skb(dev, length, GFP_ATOMIC); |
---|
1155 | } |
---|
1156 | |
---|
1157 | /** |
---|
1158 | * skb_cow - copy header of skb when it is required |
---|
1159 | * @skb: buffer to cow |
---|
1160 | * @headroom: needed headroom |
---|
1161 | * |
---|
1162 | * If the skb passed lacks sufficient headroom or its data part |
---|
1163 | * is shared, data is reallocated. If reallocation fails, an error |
---|
1164 | * is returned and original skb is not changed. |
---|
1165 | * |
---|
1166 | * The result is skb with writable area skb->head...skb->tail |
---|
1167 | * and at least @headroom of space at head. |
---|
1168 | */ |
---|
1169 | static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) |
---|
1170 | { |
---|
1171 | int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - |
---|
1172 | skb_headroom(skb); |
---|
1173 | |
---|
1174 | if (delta < 0) |
---|
1175 | delta = 0; |
---|
1176 | |
---|
1177 | if (delta || skb_cloned(skb)) |
---|
1178 | return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & |
---|
1179 | ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); |
---|
1180 | return 0; |
---|
1181 | } |
---|
1182 | |
---|
1183 | /** |
---|
1184 | * skb_padto - pad an skbuff up to a minimal size |
---|
1185 | * @skb: buffer to pad |
---|
1186 | * @len: minimal length |
---|
1187 | * |
---|
1188 | * Pads up a buffer to ensure the trailing bytes exist and are |
---|
1189 | * blanked. If the buffer already contains sufficient data it |
---|
1190 | * is untouched. Otherwise it is extended. Returns zero on |
---|
1191 | * success. The skb is freed on error. |
---|
1192 | */ |
---|
1193 | |
---|
1194 | static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
---|
1195 | { |
---|
1196 | unsigned int size = skb->len; |
---|
1197 | if (likely(size >= len)) |
---|
1198 | return 0; |
---|
1199 | return skb_pad(skb, len-size); |
---|
1200 | } |
---|
1201 | |
---|
1202 | static inline int skb_add_data(struct sk_buff *skb, |
---|
1203 | char __user *from, int copy) |
---|
1204 | { |
---|
1205 | const int off = skb->len; |
---|
1206 | |
---|
1207 | if (skb->ip_summed == CHECKSUM_NONE) { |
---|
1208 | int err = 0; |
---|
1209 | unsigned int csum = csum_and_copy_from_user(from, |
---|
1210 | skb_put(skb, copy), |
---|
1211 | copy, 0, &err); |
---|
1212 | if (!err) { |
---|
1213 | skb->csum = csum_block_add(skb->csum, csum, off); |
---|
1214 | return 0; |
---|
1215 | } |
---|
1216 | } else if (!copy_from_user(skb_put(skb, copy), from, copy)) |
---|
1217 | return 0; |
---|
1218 | |
---|
1219 | __skb_trim(skb, off); |
---|
1220 | return -EFAULT; |
---|
1221 | } |
---|
1222 | |
---|
1223 | static inline int skb_can_coalesce(struct sk_buff *skb, int i, |
---|
1224 | struct page *page, int off) |
---|
1225 | { |
---|
1226 | if (i) { |
---|
1227 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; |
---|
1228 | |
---|
1229 | return page == frag->page && |
---|
1230 | off == frag->page_offset + frag->size; |
---|
1231 | } |
---|
1232 | return 0; |
---|
1233 | } |
---|
1234 | |
---|
1235 | static inline int __skb_linearize(struct sk_buff *skb) |
---|
1236 | { |
---|
1237 | return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; |
---|
1238 | } |
---|
1239 | |
---|
1240 | /** |
---|
1241 | * skb_linearize - convert paged skb to linear one |
---|
1242 | * @skb: buffer to linarize |
---|
1243 | * |
---|
1244 | * If there is no free memory -ENOMEM is returned, otherwise zero |
---|
1245 | * is returned and the old skb data released. |
---|
1246 | */ |
---|
1247 | static inline int skb_linearize(struct sk_buff *skb) |
---|
1248 | { |
---|
1249 | return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; |
---|
1250 | } |
---|
1251 | |
---|
1252 | /** |
---|
1253 | * skb_linearize_cow - make sure skb is linear and writable |
---|
1254 | * @skb: buffer to process |
---|
1255 | * |
---|
1256 | * If there is no free memory -ENOMEM is returned, otherwise zero |
---|
1257 | * is returned and the old skb data released. |
---|
1258 | */ |
---|
1259 | static inline int skb_linearize_cow(struct sk_buff *skb) |
---|
1260 | { |
---|
1261 | return skb_is_nonlinear(skb) || skb_cloned(skb) ? |
---|
1262 | __skb_linearize(skb) : 0; |
---|
1263 | } |
---|
1264 | |
---|
1265 | /** |
---|
1266 | * skb_postpull_rcsum - update checksum for received skb after pull |
---|
1267 | * @skb: buffer to update |
---|
1268 | * @start: start of data before pull |
---|
1269 | * @len: length of data pulled |
---|
1270 | * |
---|
1271 | * After doing a pull on a received packet, you need to call this to |
---|
1272 | * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE |
---|
1273 | * so that it can be recomputed from scratch. |
---|
1274 | */ |
---|
1275 | |
---|
1276 | static inline void skb_postpull_rcsum(struct sk_buff *skb, |
---|
1277 | const void *start, unsigned int len) |
---|
1278 | { |
---|
1279 | if (skb->ip_summed == CHECKSUM_HW) |
---|
1280 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
---|
1281 | } |
---|
1282 | |
---|
1283 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); |
---|
1284 | |
---|
1285 | /** |
---|
1286 | * pskb_trim_rcsum - trim received skb and update checksum |
---|
1287 | * @skb: buffer to trim |
---|
1288 | * @len: new length |
---|
1289 | * |
---|
1290 | * This is exactly the same as pskb_trim except that it ensures the |
---|
1291 | * checksum of received packets are still valid after the operation. |
---|
1292 | */ |
---|
1293 | |
---|
1294 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) |
---|
1295 | { |
---|
1296 | if (likely(len >= skb->len)) |
---|
1297 | return 0; |
---|
1298 | if (skb->ip_summed == CHECKSUM_HW) |
---|
1299 | skb->ip_summed = CHECKSUM_NONE; |
---|
1300 | return __pskb_trim(skb, len); |
---|
1301 | } |
---|
1302 | |
---|
1303 | static inline void *kmap_skb_frag(const skb_frag_t *frag) |
---|
1304 | { |
---|
1305 | #ifdef CONFIG_HIGHMEM |
---|
1306 | BUG_ON(in_irq()); |
---|
1307 | |
---|
1308 | local_bh_disable(); |
---|
1309 | #endif |
---|
1310 | return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); |
---|
1311 | } |
---|
1312 | |
---|
1313 | static inline void kunmap_skb_frag(void *vaddr) |
---|
1314 | { |
---|
1315 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
---|
1316 | #ifdef CONFIG_HIGHMEM |
---|
1317 | local_bh_enable(); |
---|
1318 | #endif |
---|
1319 | } |
---|
1320 | |
---|
1321 | #define skb_queue_walk(queue, skb) \ |
---|
1322 | for (skb = (queue)->next; \ |
---|
1323 | prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ |
---|
1324 | skb = skb->next) |
---|
1325 | |
---|
1326 | #define skb_queue_reverse_walk(queue, skb) \ |
---|
1327 | for (skb = (queue)->prev; \ |
---|
1328 | prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ |
---|
1329 | skb = skb->prev) |
---|
1330 | |
---|
1331 | |
---|
1332 | extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, |
---|
1333 | int noblock, int *err); |
---|
1334 | extern unsigned int datagram_poll(struct file *file, struct socket *sock, |
---|
1335 | struct poll_table_struct *wait); |
---|
1336 | extern int skb_copy_datagram_iovec(const struct sk_buff *from, |
---|
1337 | int offset, struct iovec *to, |
---|
1338 | int size); |
---|
1339 | extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
---|
1340 | int hlen, |
---|
1341 | struct iovec *iov); |
---|
1342 | extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); |
---|
1343 | extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, |
---|
1344 | unsigned int flags); |
---|
1345 | extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, |
---|
1346 | int len, unsigned int csum); |
---|
1347 | extern int skb_copy_bits(const struct sk_buff *skb, int offset, |
---|
1348 | void *to, int len); |
---|
1349 | extern int skb_store_bits(const struct sk_buff *skb, int offset, |
---|
1350 | void *from, int len); |
---|
1351 | extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, |
---|
1352 | int offset, u8 *to, int len, |
---|
1353 | unsigned int csum); |
---|
1354 | extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
---|
1355 | extern void skb_split(struct sk_buff *skb, |
---|
1356 | struct sk_buff *skb1, const u32 len); |
---|
1357 | |
---|
1358 | extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); |
---|
1359 | |
---|
1360 | static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, |
---|
1361 | int len, void *buffer) |
---|
1362 | { |
---|
1363 | int hlen = skb_headlen(skb); |
---|
1364 | |
---|
1365 | if (hlen - offset >= len) |
---|
1366 | return skb->data + offset; |
---|
1367 | |
---|
1368 | if (skb_copy_bits(skb, offset, buffer, len) < 0) |
---|
1369 | return NULL; |
---|
1370 | |
---|
1371 | return buffer; |
---|
1372 | } |
---|
1373 | |
---|
1374 | extern void skb_init(void); |
---|
1375 | extern void skb_add_mtu(int mtu); |
---|
1376 | |
---|
1377 | /** |
---|
1378 | * skb_get_timestamp - get timestamp from a skb |
---|
1379 | * @skb: skb to get stamp from |
---|
1380 | * @stamp: pointer to struct timeval to store stamp in |
---|
1381 | * |
---|
1382 | * Timestamps are stored in the skb as offsets to a base timestamp. |
---|
1383 | * This function converts the offset back to a struct timeval and stores |
---|
1384 | * it in stamp. |
---|
1385 | */ |
---|
1386 | static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) |
---|
1387 | { |
---|
1388 | stamp->tv_sec = skb->tstamp.off_sec; |
---|
1389 | stamp->tv_usec = skb->tstamp.off_usec; |
---|
1390 | } |
---|
1391 | |
---|
1392 | /** |
---|
1393 | * skb_set_timestamp - set timestamp of a skb |
---|
1394 | * @skb: skb to set stamp of |
---|
1395 | * @stamp: pointer to struct timeval to get stamp from |
---|
1396 | * |
---|
1397 | * Timestamps are stored in the skb as offsets to a base timestamp. |
---|
1398 | * This function converts a struct timeval to an offset and stores |
---|
1399 | * it in the skb. |
---|
1400 | */ |
---|
1401 | static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) |
---|
1402 | { |
---|
1403 | skb->tstamp.off_sec = stamp->tv_sec; |
---|
1404 | skb->tstamp.off_usec = stamp->tv_usec; |
---|
1405 | } |
---|
1406 | |
---|
1407 | extern void __net_timestamp(struct sk_buff *skb); |
---|
1408 | |
---|
1409 | extern unsigned int __skb_checksum_complete(struct sk_buff *skb); |
---|
1410 | |
---|
1411 | /** |
---|
1412 | * skb_checksum_complete - Calculate checksum of an entire packet |
---|
1413 | * @skb: packet to process |
---|
1414 | * |
---|
1415 | * This function calculates the checksum over the entire packet plus |
---|
1416 | * the value of skb->csum. The latter can be used to supply the |
---|
1417 | * checksum of a pseudo header as used by TCP/UDP. It returns the |
---|
1418 | * checksum. |
---|
1419 | * |
---|
1420 | * For protocols that contain complete checksums such as ICMP/TCP/UDP, |
---|
1421 | * this function can be used to verify that checksum on received |
---|
1422 | * packets. In that case the function should return zero if the |
---|
1423 | * checksum is correct. In particular, this function will return zero |
---|
1424 | * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the |
---|
1425 | * hardware has already verified the correctness of the checksum. |
---|
1426 | */ |
---|
1427 | static inline unsigned int skb_checksum_complete(struct sk_buff *skb) |
---|
1428 | { |
---|
1429 | return skb->ip_summed != CHECKSUM_UNNECESSARY && |
---|
1430 | __skb_checksum_complete(skb); |
---|
1431 | } |
---|
1432 | |
---|
1433 | #ifdef CONFIG_NETFILTER |
---|
1434 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) |
---|
1435 | { |
---|
1436 | if (nfct && atomic_dec_and_test(&nfct->use)) |
---|
1437 | nfct->destroy(nfct); |
---|
1438 | } |
---|
1439 | static inline void nf_conntrack_get(struct nf_conntrack *nfct) |
---|
1440 | { |
---|
1441 | if (nfct) |
---|
1442 | atomic_inc(&nfct->use); |
---|
1443 | } |
---|
1444 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
1445 | static inline void nf_conntrack_get_reasm(struct sk_buff *skb) |
---|
1446 | { |
---|
1447 | if (skb) |
---|
1448 | atomic_inc(&skb->users); |
---|
1449 | } |
---|
1450 | static inline void nf_conntrack_put_reasm(struct sk_buff *skb) |
---|
1451 | { |
---|
1452 | if (skb) |
---|
1453 | kfree_skb(skb); |
---|
1454 | } |
---|
1455 | #endif |
---|
1456 | #ifdef CONFIG_BRIDGE_NETFILTER |
---|
1457 | static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) |
---|
1458 | { |
---|
1459 | if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) |
---|
1460 | kfree(nf_bridge); |
---|
1461 | } |
---|
1462 | static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) |
---|
1463 | { |
---|
1464 | if (nf_bridge) |
---|
1465 | atomic_inc(&nf_bridge->use); |
---|
1466 | } |
---|
1467 | #endif /* CONFIG_BRIDGE_NETFILTER */ |
---|
1468 | static inline void nf_reset(struct sk_buff *skb) |
---|
1469 | { |
---|
1470 | nf_conntrack_put(skb->nfct); |
---|
1471 | skb->nfct = NULL; |
---|
1472 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
---|
1473 | nf_conntrack_put_reasm(skb->nfct_reasm); |
---|
1474 | skb->nfct_reasm = NULL; |
---|
1475 | #endif |
---|
1476 | #ifdef CONFIG_BRIDGE_NETFILTER |
---|
1477 | nf_bridge_put(skb->nf_bridge); |
---|
1478 | skb->nf_bridge = NULL; |
---|
1479 | #endif |
---|
1480 | } |
---|
1481 | |
---|
1482 | #else /* CONFIG_NETFILTER */ |
---|
1483 | static inline void nf_reset(struct sk_buff *skb) {} |
---|
1484 | #endif /* CONFIG_NETFILTER */ |
---|
1485 | |
---|
1486 | #ifdef CONFIG_NETWORK_SECMARK |
---|
1487 | static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) |
---|
1488 | { |
---|
1489 | to->secmark = from->secmark; |
---|
1490 | } |
---|
1491 | |
---|
1492 | static inline void skb_init_secmark(struct sk_buff *skb) |
---|
1493 | { |
---|
1494 | skb->secmark = 0; |
---|
1495 | } |
---|
1496 | #else |
---|
1497 | static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) |
---|
1498 | { } |
---|
1499 | |
---|
1500 | static inline void skb_init_secmark(struct sk_buff *skb) |
---|
1501 | { } |
---|
1502 | #endif |
---|
1503 | |
---|
1504 | static inline int skb_is_gso(const struct sk_buff *skb) |
---|
1505 | { |
---|
1506 | return skb_shinfo(skb)->gso_size; |
---|
1507 | } |
---|
1508 | |
---|
1509 | #endif /* __KERNEL__ */ |
---|
1510 | #endif /* _LINUX_SKBUFF_H */ |
---|