]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
3 | * | |
4 | * Authors: Alan Cox <[email protected]> | |
5 | * Florian La Roche <[email protected]> | |
6 | * | |
7 | * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ | |
8 | * | |
9 | * Fixes: | |
10 | * Alan Cox : Fixed the worst of the load | |
11 | * balancer bugs. | |
12 | * Dave Platt : Interrupt stacking fix. | |
13 | * Richard Kooijman : Timestamp fixes. | |
14 | * Alan Cox : Changed buffer format. | |
15 | * Alan Cox : destructor hook for AF_UNIX etc. | |
16 | * Linus Torvalds : Better skb_clone. | |
17 | * Alan Cox : Added skb_copy. | |
18 | * Alan Cox : Added all the changed routines Linus | |
19 | * only put in the headers | |
20 | * Ray VanTassle : Fixed --skb->lock in free | |
21 | * Alan Cox : skb_copy copy arp field | |
22 | * Andi Kleen : slabified it. | |
23 | * Robert Olsson : Removed skb_head_pool | |
24 | * | |
25 | * NOTE: | |
26 | * The __skb_ routines should be called with interrupts | |
27 | * disabled, or you better be *real* sure that the operation is atomic | |
28 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
29 | * or via disabling bottom half handlers, etc). | |
30 | * | |
31 | * This program is free software; you can redistribute it and/or | |
32 | * modify it under the terms of the GNU General Public License | |
33 | * as published by the Free Software Foundation; either version | |
34 | * 2 of the License, or (at your option) any later version. | |
35 | */ | |
36 | ||
37 | /* | |
38 | * The functions in this file will not compile correctly with gcc 2.4.x | |
39 | */ | |
40 | ||
41 | #include <linux/config.h> | |
42 | #include <linux/module.h> | |
43 | #include <linux/types.h> | |
44 | #include <linux/kernel.h> | |
45 | #include <linux/sched.h> | |
46 | #include <linux/mm.h> | |
47 | #include <linux/interrupt.h> | |
48 | #include <linux/in.h> | |
49 | #include <linux/inet.h> | |
50 | #include <linux/slab.h> | |
51 | #include <linux/netdevice.h> | |
52 | #ifdef CONFIG_NET_CLS_ACT | |
53 | #include <net/pkt_sched.h> | |
54 | #endif | |
55 | #include <linux/string.h> | |
56 | #include <linux/skbuff.h> | |
57 | #include <linux/cache.h> | |
58 | #include <linux/rtnetlink.h> | |
59 | #include <linux/init.h> | |
60 | #include <linux/highmem.h> | |
61 | ||
62 | #include <net/protocol.h> | |
63 | #include <net/dst.h> | |
64 | #include <net/sock.h> | |
65 | #include <net/checksum.h> | |
66 | #include <net/xfrm.h> | |
67 | ||
68 | #include <asm/uaccess.h> | |
69 | #include <asm/system.h> | |
70 | ||
ba89966c ED |
71 | static kmem_cache_t *skbuff_head_cache __read_mostly; |
72 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | |
1da177e4 LT |
73 | |
74 | /* | |
75 | * Keep out-of-line to prevent kernel bloat. | |
76 | * __builtin_return_address is not used because it is not always | |
77 | * reliable. | |
78 | */ | |
79 | ||
80 | /** | |
81 | * skb_over_panic - private function | |
82 | * @skb: buffer | |
83 | * @sz: size | |
84 | * @here: address | |
85 | * | |
86 | * Out of line support code for skb_put(). Not user callable. | |
87 | */ | |
88 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |
89 | { | |
26095455 PM |
90 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
91 | "data:%p tail:%p end:%p dev:%s\n", | |
92 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | |
93 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
94 | BUG(); |
95 | } | |
96 | ||
97 | /** | |
98 | * skb_under_panic - private function | |
99 | * @skb: buffer | |
100 | * @sz: size | |
101 | * @here: address | |
102 | * | |
103 | * Out of line support code for skb_push(). Not user callable. | |
104 | */ | |
105 | ||
106 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |
107 | { | |
26095455 PM |
108 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
109 | "data:%p tail:%p end:%p dev:%s\n", | |
110 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | |
111 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
112 | BUG(); |
113 | } | |
114 | ||
115 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
116 | * 'private' fields and also do memory statistics to find all the | |
117 | * [BEEP] leaks. | |
118 | * | |
119 | */ | |
120 | ||
121 | /** | |
d179cd12 | 122 | * __alloc_skb - allocate a network buffer |
1da177e4 LT |
123 | * @size: size to allocate |
124 | * @gfp_mask: allocation mask | |
c83c2486 RD |
125 | * @fclone: allocate from fclone cache instead of head cache |
126 | * and allocate a cloned (child) skb | |
1da177e4 LT |
127 | * |
128 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
129 | * tail room of size bytes. The object has a reference count of one. | |
130 | * The return is the buffer. On a failure the return is %NULL. | |
131 | * | |
132 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
133 | * %GFP_ATOMIC. | |
134 | */ | |
dd0fc66f | 135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
d179cd12 | 136 | int fclone) |
1da177e4 LT |
137 | { |
138 | struct sk_buff *skb; | |
139 | u8 *data; | |
140 | ||
141 | /* Get the HEAD */ | |
d179cd12 DM |
142 | if (fclone) |
143 | skb = kmem_cache_alloc(skbuff_fclone_cache, | |
144 | gfp_mask & ~__GFP_DMA); | |
145 | else | |
146 | skb = kmem_cache_alloc(skbuff_head_cache, | |
147 | gfp_mask & ~__GFP_DMA); | |
148 | ||
1da177e4 LT |
149 | if (!skb) |
150 | goto out; | |
151 | ||
152 | /* Get the DATA. Size must match skb_add_mtu(). */ | |
153 | size = SKB_DATA_ALIGN(size); | |
154 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | |
155 | if (!data) | |
156 | goto nodata; | |
157 | ||
158 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | |
159 | skb->truesize = size + sizeof(struct sk_buff); | |
160 | atomic_set(&skb->users, 1); | |
161 | skb->head = data; | |
162 | skb->data = data; | |
163 | skb->tail = data; | |
164 | skb->end = data + size; | |
d179cd12 DM |
165 | if (fclone) { |
166 | struct sk_buff *child = skb + 1; | |
167 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | |
1da177e4 | 168 | |
d179cd12 DM |
169 | skb->fclone = SKB_FCLONE_ORIG; |
170 | atomic_set(fclone_ref, 1); | |
171 | ||
172 | child->fclone = SKB_FCLONE_UNAVAILABLE; | |
173 | } | |
1da177e4 LT |
174 | atomic_set(&(skb_shinfo(skb)->dataref), 1); |
175 | skb_shinfo(skb)->nr_frags = 0; | |
176 | skb_shinfo(skb)->tso_size = 0; | |
177 | skb_shinfo(skb)->tso_segs = 0; | |
178 | skb_shinfo(skb)->frag_list = NULL; | |
179 | out: | |
180 | return skb; | |
181 | nodata: | |
182 | kmem_cache_free(skbuff_head_cache, skb); | |
183 | skb = NULL; | |
184 | goto out; | |
185 | } | |
186 | ||
187 | /** | |
188 | * alloc_skb_from_cache - allocate a network buffer | |
189 | * @cp: kmem_cache from which to allocate the data area | |
190 | * (object size must be big enough for @size bytes + skb overheads) | |
191 | * @size: size to allocate | |
192 | * @gfp_mask: allocation mask | |
193 | * | |
194 | * Allocate a new &sk_buff. The returned buffer has no headroom and | |
195 | * tail room of size bytes. The object has a reference count of one. | |
196 | * The return is the buffer. On a failure the return is %NULL. | |
197 | * | |
198 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
199 | * %GFP_ATOMIC. | |
200 | */ | |
201 | struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, | |
86a76caf | 202 | unsigned int size, |
dd0fc66f | 203 | gfp_t gfp_mask) |
1da177e4 LT |
204 | { |
205 | struct sk_buff *skb; | |
206 | u8 *data; | |
207 | ||
208 | /* Get the HEAD */ | |
209 | skb = kmem_cache_alloc(skbuff_head_cache, | |
210 | gfp_mask & ~__GFP_DMA); | |
211 | if (!skb) | |
212 | goto out; | |
213 | ||
214 | /* Get the DATA. */ | |
215 | size = SKB_DATA_ALIGN(size); | |
216 | data = kmem_cache_alloc(cp, gfp_mask); | |
217 | if (!data) | |
218 | goto nodata; | |
219 | ||
220 | memset(skb, 0, offsetof(struct sk_buff, truesize)); | |
221 | skb->truesize = size + sizeof(struct sk_buff); | |
222 | atomic_set(&skb->users, 1); | |
223 | skb->head = data; | |
224 | skb->data = data; | |
225 | skb->tail = data; | |
226 | skb->end = data + size; | |
227 | ||
228 | atomic_set(&(skb_shinfo(skb)->dataref), 1); | |
229 | skb_shinfo(skb)->nr_frags = 0; | |
230 | skb_shinfo(skb)->tso_size = 0; | |
231 | skb_shinfo(skb)->tso_segs = 0; | |
232 | skb_shinfo(skb)->frag_list = NULL; | |
233 | out: | |
234 | return skb; | |
235 | nodata: | |
236 | kmem_cache_free(skbuff_head_cache, skb); | |
237 | skb = NULL; | |
238 | goto out; | |
239 | } | |
240 | ||
241 | ||
242 | static void skb_drop_fraglist(struct sk_buff *skb) | |
243 | { | |
244 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
245 | ||
246 | skb_shinfo(skb)->frag_list = NULL; | |
247 | ||
248 | do { | |
249 | struct sk_buff *this = list; | |
250 | list = list->next; | |
251 | kfree_skb(this); | |
252 | } while (list); | |
253 | } | |
254 | ||
255 | static void skb_clone_fraglist(struct sk_buff *skb) | |
256 | { | |
257 | struct sk_buff *list; | |
258 | ||
259 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | |
260 | skb_get(list); | |
261 | } | |
262 | ||
263 | void skb_release_data(struct sk_buff *skb) | |
264 | { | |
265 | if (!skb->cloned || | |
266 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
267 | &skb_shinfo(skb)->dataref)) { | |
268 | if (skb_shinfo(skb)->nr_frags) { | |
269 | int i; | |
270 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
271 | put_page(skb_shinfo(skb)->frags[i].page); | |
272 | } | |
273 | ||
274 | if (skb_shinfo(skb)->frag_list) | |
275 | skb_drop_fraglist(skb); | |
276 | ||
277 | kfree(skb->head); | |
278 | } | |
279 | } | |
280 | ||
281 | /* | |
282 | * Free an skbuff by memory without cleaning the state. | |
283 | */ | |
284 | void kfree_skbmem(struct sk_buff *skb) | |
285 | { | |
d179cd12 DM |
286 | struct sk_buff *other; |
287 | atomic_t *fclone_ref; | |
288 | ||
1da177e4 | 289 | skb_release_data(skb); |
d179cd12 DM |
290 | switch (skb->fclone) { |
291 | case SKB_FCLONE_UNAVAILABLE: | |
292 | kmem_cache_free(skbuff_head_cache, skb); | |
293 | break; | |
294 | ||
295 | case SKB_FCLONE_ORIG: | |
296 | fclone_ref = (atomic_t *) (skb + 2); | |
297 | if (atomic_dec_and_test(fclone_ref)) | |
298 | kmem_cache_free(skbuff_fclone_cache, skb); | |
299 | break; | |
300 | ||
301 | case SKB_FCLONE_CLONE: | |
302 | fclone_ref = (atomic_t *) (skb + 1); | |
303 | other = skb - 1; | |
304 | ||
305 | /* The clone portion is available for | |
306 | * fast-cloning again. | |
307 | */ | |
308 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | |
309 | ||
310 | if (atomic_dec_and_test(fclone_ref)) | |
311 | kmem_cache_free(skbuff_fclone_cache, other); | |
312 | break; | |
313 | }; | |
1da177e4 LT |
314 | } |
315 | ||
316 | /** | |
317 | * __kfree_skb - private function | |
318 | * @skb: buffer | |
319 | * | |
320 | * Free an sk_buff. Release anything attached to the buffer. | |
321 | * Clean the state. This is an internal helper function. Users should | |
322 | * always call kfree_skb | |
323 | */ | |
324 | ||
325 | void __kfree_skb(struct sk_buff *skb) | |
326 | { | |
1da177e4 LT |
327 | dst_release(skb->dst); |
328 | #ifdef CONFIG_XFRM | |
329 | secpath_put(skb->sp); | |
330 | #endif | |
9c2b3328 SH |
331 | if (skb->destructor) { |
332 | WARN_ON(in_irq()); | |
1da177e4 LT |
333 | skb->destructor(skb); |
334 | } | |
335 | #ifdef CONFIG_NETFILTER | |
336 | nf_conntrack_put(skb->nfct); | |
337 | #ifdef CONFIG_BRIDGE_NETFILTER | |
338 | nf_bridge_put(skb->nf_bridge); | |
339 | #endif | |
340 | #endif | |
341 | /* XXX: IS this still necessary? - JHS */ | |
342 | #ifdef CONFIG_NET_SCHED | |
343 | skb->tc_index = 0; | |
344 | #ifdef CONFIG_NET_CLS_ACT | |
345 | skb->tc_verd = 0; | |
1da177e4 LT |
346 | #endif |
347 | #endif | |
348 | ||
349 | kfree_skbmem(skb); | |
350 | } | |
351 | ||
352 | /** | |
353 | * skb_clone - duplicate an sk_buff | |
354 | * @skb: buffer to clone | |
355 | * @gfp_mask: allocation priority | |
356 | * | |
357 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both | |
358 | * copies share the same packet data but not structure. The new | |
359 | * buffer has a reference count of 1. If the allocation fails the | |
360 | * function returns %NULL otherwise the new buffer is returned. | |
361 | * | |
362 | * If this function is called from an interrupt gfp_mask() must be | |
363 | * %GFP_ATOMIC. | |
364 | */ | |
365 | ||
dd0fc66f | 366 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
1da177e4 | 367 | { |
d179cd12 DM |
368 | struct sk_buff *n; |
369 | ||
370 | n = skb + 1; | |
371 | if (skb->fclone == SKB_FCLONE_ORIG && | |
372 | n->fclone == SKB_FCLONE_UNAVAILABLE) { | |
373 | atomic_t *fclone_ref = (atomic_t *) (n + 1); | |
374 | n->fclone = SKB_FCLONE_CLONE; | |
375 | atomic_inc(fclone_ref); | |
376 | } else { | |
377 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); | |
378 | if (!n) | |
379 | return NULL; | |
380 | n->fclone = SKB_FCLONE_UNAVAILABLE; | |
381 | } | |
1da177e4 LT |
382 | |
383 | #define C(x) n->x = skb->x | |
384 | ||
385 | n->next = n->prev = NULL; | |
1da177e4 | 386 | n->sk = NULL; |
a61bbcf2 | 387 | C(tstamp); |
1da177e4 | 388 | C(dev); |
1da177e4 LT |
389 | C(h); |
390 | C(nh); | |
391 | C(mac); | |
392 | C(dst); | |
393 | dst_clone(skb->dst); | |
394 | C(sp); | |
395 | #ifdef CONFIG_INET | |
396 | secpath_get(skb->sp); | |
397 | #endif | |
398 | memcpy(n->cb, skb->cb, sizeof(skb->cb)); | |
399 | C(len); | |
400 | C(data_len); | |
401 | C(csum); | |
402 | C(local_df); | |
403 | n->cloned = 1; | |
404 | n->nohdr = 0; | |
405 | C(pkt_type); | |
406 | C(ip_summed); | |
407 | C(priority); | |
408 | C(protocol); | |
1da177e4 LT |
409 | n->destructor = NULL; |
410 | #ifdef CONFIG_NETFILTER | |
411 | C(nfmark); | |
1da177e4 LT |
412 | C(nfct); |
413 | nf_conntrack_get(skb->nfct); | |
414 | C(nfctinfo); | |
c98d80ed JA |
415 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
416 | C(ipvs_property); | |
417 | #endif | |
1da177e4 LT |
418 | #ifdef CONFIG_BRIDGE_NETFILTER |
419 | C(nf_bridge); | |
420 | nf_bridge_get(skb->nf_bridge); | |
421 | #endif | |
422 | #endif /*CONFIG_NETFILTER*/ | |
1da177e4 LT |
423 | #ifdef CONFIG_NET_SCHED |
424 | C(tc_index); | |
425 | #ifdef CONFIG_NET_CLS_ACT | |
426 | n->tc_verd = SET_TC_VERD(skb->tc_verd,0); | |
b72f6ecc DM |
427 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); |
428 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | |
1da177e4 | 429 | C(input_dev); |
1da177e4 LT |
430 | #endif |
431 | ||
432 | #endif | |
433 | C(truesize); | |
434 | atomic_set(&n->users, 1); | |
435 | C(head); | |
436 | C(data); | |
437 | C(tail); | |
438 | C(end); | |
439 | ||
440 | atomic_inc(&(skb_shinfo(skb)->dataref)); | |
441 | skb->cloned = 1; | |
442 | ||
443 | return n; | |
444 | } | |
445 | ||
446 | static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |
447 | { | |
448 | /* | |
449 | * Shift between the two data areas in bytes | |
450 | */ | |
451 | unsigned long offset = new->data - old->data; | |
452 | ||
1da177e4 LT |
453 | new->sk = NULL; |
454 | new->dev = old->dev; | |
1da177e4 LT |
455 | new->priority = old->priority; |
456 | new->protocol = old->protocol; | |
457 | new->dst = dst_clone(old->dst); | |
458 | #ifdef CONFIG_INET | |
459 | new->sp = secpath_get(old->sp); | |
460 | #endif | |
461 | new->h.raw = old->h.raw + offset; | |
462 | new->nh.raw = old->nh.raw + offset; | |
463 | new->mac.raw = old->mac.raw + offset; | |
464 | memcpy(new->cb, old->cb, sizeof(old->cb)); | |
465 | new->local_df = old->local_df; | |
d179cd12 | 466 | new->fclone = SKB_FCLONE_UNAVAILABLE; |
1da177e4 | 467 | new->pkt_type = old->pkt_type; |
a61bbcf2 | 468 | new->tstamp = old->tstamp; |
1da177e4 | 469 | new->destructor = NULL; |
1da177e4 LT |
470 | #ifdef CONFIG_NETFILTER |
471 | new->nfmark = old->nfmark; | |
1da177e4 LT |
472 | new->nfct = old->nfct; |
473 | nf_conntrack_get(old->nfct); | |
474 | new->nfctinfo = old->nfctinfo; | |
c98d80ed JA |
475 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
476 | new->ipvs_property = old->ipvs_property; | |
477 | #endif | |
1da177e4 LT |
478 | #ifdef CONFIG_BRIDGE_NETFILTER |
479 | new->nf_bridge = old->nf_bridge; | |
480 | nf_bridge_get(old->nf_bridge); | |
481 | #endif | |
482 | #endif | |
483 | #ifdef CONFIG_NET_SCHED | |
484 | #ifdef CONFIG_NET_CLS_ACT | |
485 | new->tc_verd = old->tc_verd; | |
486 | #endif | |
487 | new->tc_index = old->tc_index; | |
488 | #endif | |
489 | atomic_set(&new->users, 1); | |
490 | skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size; | |
491 | skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs; | |
492 | } | |
493 | ||
494 | /** | |
495 | * skb_copy - create private copy of an sk_buff | |
496 | * @skb: buffer to copy | |
497 | * @gfp_mask: allocation priority | |
498 | * | |
499 | * Make a copy of both an &sk_buff and its data. This is used when the | |
500 | * caller wishes to modify the data and needs a private copy of the | |
501 | * data to alter. Returns %NULL on failure or the pointer to the buffer | |
502 | * on success. The returned buffer has a reference count of 1. | |
503 | * | |
504 | * As by-product this function converts non-linear &sk_buff to linear | |
505 | * one, so that &sk_buff becomes completely private and caller is allowed | |
506 | * to modify all the data of returned buffer. This means that this | |
507 | * function is not recommended for use in circumstances when only | |
508 | * header is going to be modified. Use pskb_copy() instead. | |
509 | */ | |
510 | ||
dd0fc66f | 511 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
1da177e4 LT |
512 | { |
513 | int headerlen = skb->data - skb->head; | |
514 | /* | |
515 | * Allocate the copy buffer | |
516 | */ | |
517 | struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, | |
518 | gfp_mask); | |
519 | if (!n) | |
520 | return NULL; | |
521 | ||
522 | /* Set the data pointer */ | |
523 | skb_reserve(n, headerlen); | |
524 | /* Set the tail pointer and length */ | |
525 | skb_put(n, skb->len); | |
526 | n->csum = skb->csum; | |
527 | n->ip_summed = skb->ip_summed; | |
528 | ||
529 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) | |
530 | BUG(); | |
531 | ||
532 | copy_skb_header(n, skb); | |
533 | return n; | |
534 | } | |
535 | ||
536 | ||
537 | /** | |
538 | * pskb_copy - create copy of an sk_buff with private head. | |
539 | * @skb: buffer to copy | |
540 | * @gfp_mask: allocation priority | |
541 | * | |
542 | * Make a copy of both an &sk_buff and part of its data, located | |
543 | * in header. Fragmented data remain shared. This is used when | |
544 | * the caller wishes to modify only header of &sk_buff and needs | |
545 | * private copy of the header to alter. Returns %NULL on failure | |
546 | * or the pointer to the buffer on success. | |
547 | * The returned buffer has a reference count of 1. | |
548 | */ | |
549 | ||
dd0fc66f | 550 | struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) |
1da177e4 LT |
551 | { |
552 | /* | |
553 | * Allocate the copy buffer | |
554 | */ | |
555 | struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); | |
556 | ||
557 | if (!n) | |
558 | goto out; | |
559 | ||
560 | /* Set the data pointer */ | |
561 | skb_reserve(n, skb->data - skb->head); | |
562 | /* Set the tail pointer and length */ | |
563 | skb_put(n, skb_headlen(skb)); | |
564 | /* Copy the bytes */ | |
565 | memcpy(n->data, skb->data, n->len); | |
566 | n->csum = skb->csum; | |
567 | n->ip_summed = skb->ip_summed; | |
568 | ||
569 | n->data_len = skb->data_len; | |
570 | n->len = skb->len; | |
571 | ||
572 | if (skb_shinfo(skb)->nr_frags) { | |
573 | int i; | |
574 | ||
575 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
576 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; | |
577 | get_page(skb_shinfo(n)->frags[i].page); | |
578 | } | |
579 | skb_shinfo(n)->nr_frags = i; | |
580 | } | |
581 | ||
582 | if (skb_shinfo(skb)->frag_list) { | |
583 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | |
584 | skb_clone_fraglist(n); | |
585 | } | |
586 | ||
587 | copy_skb_header(n, skb); | |
588 | out: | |
589 | return n; | |
590 | } | |
591 | ||
592 | /** | |
593 | * pskb_expand_head - reallocate header of &sk_buff | |
594 | * @skb: buffer to reallocate | |
595 | * @nhead: room to add at head | |
596 | * @ntail: room to add at tail | |
597 | * @gfp_mask: allocation priority | |
598 | * | |
599 | * Expands (or creates identical copy, if &nhead and &ntail are zero) | |
600 | * header of skb. &sk_buff itself is not changed. &sk_buff MUST have | |
601 | * reference count of 1. Returns zero in the case of success or error, | |
602 | * if expansion failed. In the last case, &sk_buff is not changed. | |
603 | * | |
604 | * All the pointers pointing into skb header may change and must be | |
605 | * reloaded after call to this function. | |
606 | */ | |
607 | ||
86a76caf | 608 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
dd0fc66f | 609 | gfp_t gfp_mask) |
1da177e4 LT |
610 | { |
611 | int i; | |
612 | u8 *data; | |
613 | int size = nhead + (skb->end - skb->head) + ntail; | |
614 | long off; | |
615 | ||
616 | if (skb_shared(skb)) | |
617 | BUG(); | |
618 | ||
619 | size = SKB_DATA_ALIGN(size); | |
620 | ||
621 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | |
622 | if (!data) | |
623 | goto nodata; | |
624 | ||
625 | /* Copy only real data... and, alas, header. This should be | |
626 | * optimized for the cases when header is void. */ | |
627 | memcpy(data + nhead, skb->head, skb->tail - skb->head); | |
628 | memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); | |
629 | ||
630 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
631 | get_page(skb_shinfo(skb)->frags[i].page); | |
632 | ||
633 | if (skb_shinfo(skb)->frag_list) | |
634 | skb_clone_fraglist(skb); | |
635 | ||
636 | skb_release_data(skb); | |
637 | ||
638 | off = (data + nhead) - skb->head; | |
639 | ||
640 | skb->head = data; | |
641 | skb->end = data + size; | |
642 | skb->data += off; | |
643 | skb->tail += off; | |
644 | skb->mac.raw += off; | |
645 | skb->h.raw += off; | |
646 | skb->nh.raw += off; | |
647 | skb->cloned = 0; | |
648 | skb->nohdr = 0; | |
649 | atomic_set(&skb_shinfo(skb)->dataref, 1); | |
650 | return 0; | |
651 | ||
652 | nodata: | |
653 | return -ENOMEM; | |
654 | } | |
655 | ||
656 | /* Make private copy of skb with writable head and some headroom */ | |
657 | ||
658 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | |
659 | { | |
660 | struct sk_buff *skb2; | |
661 | int delta = headroom - skb_headroom(skb); | |
662 | ||
663 | if (delta <= 0) | |
664 | skb2 = pskb_copy(skb, GFP_ATOMIC); | |
665 | else { | |
666 | skb2 = skb_clone(skb, GFP_ATOMIC); | |
667 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, | |
668 | GFP_ATOMIC)) { | |
669 | kfree_skb(skb2); | |
670 | skb2 = NULL; | |
671 | } | |
672 | } | |
673 | return skb2; | |
674 | } | |
675 | ||
676 | ||
677 | /** | |
678 | * skb_copy_expand - copy and expand sk_buff | |
679 | * @skb: buffer to copy | |
680 | * @newheadroom: new free bytes at head | |
681 | * @newtailroom: new free bytes at tail | |
682 | * @gfp_mask: allocation priority | |
683 | * | |
684 | * Make a copy of both an &sk_buff and its data and while doing so | |
685 | * allocate additional space. | |
686 | * | |
687 | * This is used when the caller wishes to modify the data and needs a | |
688 | * private copy of the data to alter as well as more space for new fields. | |
689 | * Returns %NULL on failure or the pointer to the buffer | |
690 | * on success. The returned buffer has a reference count of 1. | |
691 | * | |
692 | * You must pass %GFP_ATOMIC as the allocation priority if this function | |
693 | * is called from an interrupt. | |
694 | * | |
695 | * BUG ALERT: ip_summed is not copied. Why does this work? Is it used | |
696 | * only by netfilter in the cases when checksum is recalculated? --ANK | |
697 | */ | |
698 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |
86a76caf | 699 | int newheadroom, int newtailroom, |
dd0fc66f | 700 | gfp_t gfp_mask) |
1da177e4 LT |
701 | { |
702 | /* | |
703 | * Allocate the copy buffer | |
704 | */ | |
705 | struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, | |
706 | gfp_mask); | |
707 | int head_copy_len, head_copy_off; | |
708 | ||
709 | if (!n) | |
710 | return NULL; | |
711 | ||
712 | skb_reserve(n, newheadroom); | |
713 | ||
714 | /* Set the tail pointer and length */ | |
715 | skb_put(n, skb->len); | |
716 | ||
717 | head_copy_len = skb_headroom(skb); | |
718 | head_copy_off = 0; | |
719 | if (newheadroom <= head_copy_len) | |
720 | head_copy_len = newheadroom; | |
721 | else | |
722 | head_copy_off = newheadroom - head_copy_len; | |
723 | ||
724 | /* Copy the linear header and data. */ | |
725 | if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, | |
726 | skb->len + head_copy_len)) | |
727 | BUG(); | |
728 | ||
729 | copy_skb_header(n, skb); | |
730 | ||
731 | return n; | |
732 | } | |
733 | ||
734 | /** | |
735 | * skb_pad - zero pad the tail of an skb | |
736 | * @skb: buffer to pad | |
737 | * @pad: space to pad | |
738 | * | |
739 | * Ensure that a buffer is followed by a padding area that is zero | |
740 | * filled. Used by network drivers which may DMA or transfer data | |
741 | * beyond the buffer end onto the wire. | |
742 | * | |
743 | * May return NULL in out of memory cases. | |
744 | */ | |
745 | ||
746 | struct sk_buff *skb_pad(struct sk_buff *skb, int pad) | |
747 | { | |
748 | struct sk_buff *nskb; | |
749 | ||
750 | /* If the skbuff is non linear tailroom is always zero.. */ | |
751 | if (skb_tailroom(skb) >= pad) { | |
752 | memset(skb->data+skb->len, 0, pad); | |
753 | return skb; | |
754 | } | |
755 | ||
756 | nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); | |
757 | kfree_skb(skb); | |
758 | if (nskb) | |
759 | memset(nskb->data+nskb->len, 0, pad); | |
760 | return nskb; | |
761 | } | |
762 | ||
763 | /* Trims skb to length len. It can change skb pointers, if "realloc" is 1. | |
764 | * If realloc==0 and trimming is impossible without change of data, | |
765 | * it is BUG(). | |
766 | */ | |
767 | ||
768 | int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc) | |
769 | { | |
770 | int offset = skb_headlen(skb); | |
771 | int nfrags = skb_shinfo(skb)->nr_frags; | |
772 | int i; | |
773 | ||
774 | for (i = 0; i < nfrags; i++) { | |
775 | int end = offset + skb_shinfo(skb)->frags[i].size; | |
776 | if (end > len) { | |
777 | if (skb_cloned(skb)) { | |
778 | if (!realloc) | |
779 | BUG(); | |
780 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | |
781 | return -ENOMEM; | |
782 | } | |
783 | if (len <= offset) { | |
784 | put_page(skb_shinfo(skb)->frags[i].page); | |
785 | skb_shinfo(skb)->nr_frags--; | |
786 | } else { | |
787 | skb_shinfo(skb)->frags[i].size = len - offset; | |
788 | } | |
789 | } | |
790 | offset = end; | |
791 | } | |
792 | ||
793 | if (offset < len) { | |
794 | skb->data_len -= skb->len - len; | |
795 | skb->len = len; | |
796 | } else { | |
797 | if (len <= skb_headlen(skb)) { | |
798 | skb->len = len; | |
799 | skb->data_len = 0; | |
800 | skb->tail = skb->data + len; | |
801 | if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) | |
802 | skb_drop_fraglist(skb); | |
803 | } else { | |
804 | skb->data_len -= skb->len - len; | |
805 | skb->len = len; | |
806 | } | |
807 | } | |
808 | ||
809 | return 0; | |
810 | } | |
811 | ||
812 | /** | |
813 | * __pskb_pull_tail - advance tail of skb header | |
814 | * @skb: buffer to reallocate | |
815 | * @delta: number of bytes to advance tail | |
816 | * | |
817 | * The function makes a sense only on a fragmented &sk_buff, | |
818 | * it expands header moving its tail forward and copying necessary | |
819 | * data from fragmented part. | |
820 | * | |
821 | * &sk_buff MUST have reference count of 1. | |
822 | * | |
823 | * Returns %NULL (and &sk_buff does not change) if pull failed | |
824 | * or value of new tail of skb in the case of success. | |
825 | * | |
826 | * All the pointers pointing into skb header may change and must be | |
827 | * reloaded after call to this function. | |
828 | */ | |
829 | ||
830 | /* Moves tail of skb head forward, copying data from fragmented part, | |
831 | * when it is necessary. | |
832 | * 1. It may fail due to malloc failure. | |
833 | * 2. It may change skb pointers. | |
834 | * | |
835 | * It is pretty complicated. Luckily, it is called only in exceptional cases. | |
836 | */ | |
837 | unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |
838 | { | |
839 | /* If skb has not enough free space at tail, get new one | |
840 | * plus 128 bytes for future expansions. If we have enough | |
841 | * room at tail, reallocate without expansion only if skb is cloned. | |
842 | */ | |
843 | int i, k, eat = (skb->tail + delta) - skb->end; | |
844 | ||
845 | if (eat > 0 || skb_cloned(skb)) { | |
846 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, | |
847 | GFP_ATOMIC)) | |
848 | return NULL; | |
849 | } | |
850 | ||
851 | if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) | |
852 | BUG(); | |
853 | ||
854 | /* Optimization: no fragments, no reasons to preestimate | |
855 | * size of pulled pages. Superb. | |
856 | */ | |
857 | if (!skb_shinfo(skb)->frag_list) | |
858 | goto pull_pages; | |
859 | ||
860 | /* Estimate size of pulled pages. */ | |
861 | eat = delta; | |
862 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
863 | if (skb_shinfo(skb)->frags[i].size >= eat) | |
864 | goto pull_pages; | |
865 | eat -= skb_shinfo(skb)->frags[i].size; | |
866 | } | |
867 | ||
868 | /* If we need update frag list, we are in troubles. | |
869 | * Certainly, it possible to add an offset to skb data, | |
870 | * but taking into account that pulling is expected to | |
871 | * be very rare operation, it is worth to fight against | |
872 | * further bloating skb head and crucify ourselves here instead. | |
873 | * Pure masohism, indeed. 8)8) | |
874 | */ | |
875 | if (eat) { | |
876 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
877 | struct sk_buff *clone = NULL; | |
878 | struct sk_buff *insp = NULL; | |
879 | ||
880 | do { | |
881 | if (!list) | |
882 | BUG(); | |
883 | ||
884 | if (list->len <= eat) { | |
885 | /* Eaten as whole. */ | |
886 | eat -= list->len; | |
887 | list = list->next; | |
888 | insp = list; | |
889 | } else { | |
890 | /* Eaten partially. */ | |
891 | ||
892 | if (skb_shared(list)) { | |
893 | /* Sucks! We need to fork list. :-( */ | |
894 | clone = skb_clone(list, GFP_ATOMIC); | |
895 | if (!clone) | |
896 | return NULL; | |
897 | insp = list->next; | |
898 | list = clone; | |
899 | } else { | |
900 | /* This may be pulled without | |
901 | * problems. */ | |
902 | insp = list; | |
903 | } | |
904 | if (!pskb_pull(list, eat)) { | |
905 | if (clone) | |
906 | kfree_skb(clone); | |
907 | return NULL; | |
908 | } | |
909 | break; | |
910 | } | |
911 | } while (eat); | |
912 | ||
913 | /* Free pulled out fragments. */ | |
914 | while ((list = skb_shinfo(skb)->frag_list) != insp) { | |
915 | skb_shinfo(skb)->frag_list = list->next; | |
916 | kfree_skb(list); | |
917 | } | |
918 | /* And insert new clone at head. */ | |
919 | if (clone) { | |
920 | clone->next = list; | |
921 | skb_shinfo(skb)->frag_list = clone; | |
922 | } | |
923 | } | |
924 | /* Success! Now we may commit changes to skb data. */ | |
925 | ||
926 | pull_pages: | |
927 | eat = delta; | |
928 | k = 0; | |
929 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
930 | if (skb_shinfo(skb)->frags[i].size <= eat) { | |
931 | put_page(skb_shinfo(skb)->frags[i].page); | |
932 | eat -= skb_shinfo(skb)->frags[i].size; | |
933 | } else { | |
934 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; | |
935 | if (eat) { | |
936 | skb_shinfo(skb)->frags[k].page_offset += eat; | |
937 | skb_shinfo(skb)->frags[k].size -= eat; | |
938 | eat = 0; | |
939 | } | |
940 | k++; | |
941 | } | |
942 | } | |
943 | skb_shinfo(skb)->nr_frags = k; | |
944 | ||
945 | skb->tail += delta; | |
946 | skb->data_len -= delta; | |
947 | ||
948 | return skb->tail; | |
949 | } | |
950 | ||
951 | /* Copy some data bits from skb to kernel buffer. */ | |
952 | ||
953 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |
954 | { | |
955 | int i, copy; | |
956 | int start = skb_headlen(skb); | |
957 | ||
958 | if (offset > (int)skb->len - len) | |
959 | goto fault; | |
960 | ||
961 | /* Copy header. */ | |
962 | if ((copy = start - offset) > 0) { | |
963 | if (copy > len) | |
964 | copy = len; | |
965 | memcpy(to, skb->data + offset, copy); | |
966 | if ((len -= copy) == 0) | |
967 | return 0; | |
968 | offset += copy; | |
969 | to += copy; | |
970 | } | |
971 | ||
972 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
973 | int end; | |
974 | ||
975 | BUG_TRAP(start <= offset + len); | |
976 | ||
977 | end = start + skb_shinfo(skb)->frags[i].size; | |
978 | if ((copy = end - offset) > 0) { | |
979 | u8 *vaddr; | |
980 | ||
981 | if (copy > len) | |
982 | copy = len; | |
983 | ||
984 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); | |
985 | memcpy(to, | |
986 | vaddr + skb_shinfo(skb)->frags[i].page_offset+ | |
987 | offset - start, copy); | |
988 | kunmap_skb_frag(vaddr); | |
989 | ||
990 | if ((len -= copy) == 0) | |
991 | return 0; | |
992 | offset += copy; | |
993 | to += copy; | |
994 | } | |
995 | start = end; | |
996 | } | |
997 | ||
998 | if (skb_shinfo(skb)->frag_list) { | |
999 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1000 | ||
1001 | for (; list; list = list->next) { | |
1002 | int end; | |
1003 | ||
1004 | BUG_TRAP(start <= offset + len); | |
1005 | ||
1006 | end = start + list->len; | |
1007 | if ((copy = end - offset) > 0) { | |
1008 | if (copy > len) | |
1009 | copy = len; | |
1010 | if (skb_copy_bits(list, offset - start, | |
1011 | to, copy)) | |
1012 | goto fault; | |
1013 | if ((len -= copy) == 0) | |
1014 | return 0; | |
1015 | offset += copy; | |
1016 | to += copy; | |
1017 | } | |
1018 | start = end; | |
1019 | } | |
1020 | } | |
1021 | if (!len) | |
1022 | return 0; | |
1023 | ||
1024 | fault: | |
1025 | return -EFAULT; | |
1026 | } | |
1027 | ||
357b40a1 HX |
1028 | /** |
1029 | * skb_store_bits - store bits from kernel buffer to skb | |
1030 | * @skb: destination buffer | |
1031 | * @offset: offset in destination | |
1032 | * @from: source buffer | |
1033 | * @len: number of bytes to copy | |
1034 | * | |
1035 | * Copy the specified number of bytes from the source buffer to the | |
1036 | * destination skb. This function handles all the messy bits of | |
1037 | * traversing fragment lists and such. | |
1038 | */ | |
1039 | ||
1040 | int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) | |
1041 | { | |
1042 | int i, copy; | |
1043 | int start = skb_headlen(skb); | |
1044 | ||
1045 | if (offset > (int)skb->len - len) | |
1046 | goto fault; | |
1047 | ||
1048 | if ((copy = start - offset) > 0) { | |
1049 | if (copy > len) | |
1050 | copy = len; | |
1051 | memcpy(skb->data + offset, from, copy); | |
1052 | if ((len -= copy) == 0) | |
1053 | return 0; | |
1054 | offset += copy; | |
1055 | from += copy; | |
1056 | } | |
1057 | ||
1058 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1059 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1060 | int end; | |
1061 | ||
1062 | BUG_TRAP(start <= offset + len); | |
1063 | ||
1064 | end = start + frag->size; | |
1065 | if ((copy = end - offset) > 0) { | |
1066 | u8 *vaddr; | |
1067 | ||
1068 | if (copy > len) | |
1069 | copy = len; | |
1070 | ||
1071 | vaddr = kmap_skb_frag(frag); | |
1072 | memcpy(vaddr + frag->page_offset + offset - start, | |
1073 | from, copy); | |
1074 | kunmap_skb_frag(vaddr); | |
1075 | ||
1076 | if ((len -= copy) == 0) | |
1077 | return 0; | |
1078 | offset += copy; | |
1079 | from += copy; | |
1080 | } | |
1081 | start = end; | |
1082 | } | |
1083 | ||
1084 | if (skb_shinfo(skb)->frag_list) { | |
1085 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1086 | ||
1087 | for (; list; list = list->next) { | |
1088 | int end; | |
1089 | ||
1090 | BUG_TRAP(start <= offset + len); | |
1091 | ||
1092 | end = start + list->len; | |
1093 | if ((copy = end - offset) > 0) { | |
1094 | if (copy > len) | |
1095 | copy = len; | |
1096 | if (skb_store_bits(list, offset - start, | |
1097 | from, copy)) | |
1098 | goto fault; | |
1099 | if ((len -= copy) == 0) | |
1100 | return 0; | |
1101 | offset += copy; | |
1102 | from += copy; | |
1103 | } | |
1104 | start = end; | |
1105 | } | |
1106 | } | |
1107 | if (!len) | |
1108 | return 0; | |
1109 | ||
1110 | fault: | |
1111 | return -EFAULT; | |
1112 | } | |
1113 | ||
1114 | EXPORT_SYMBOL(skb_store_bits); | |
1115 | ||
1da177e4 LT |
1116 | /* Checksum skb data. */ |
1117 | ||
1118 | unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |
1119 | int len, unsigned int csum) | |
1120 | { | |
1121 | int start = skb_headlen(skb); | |
1122 | int i, copy = start - offset; | |
1123 | int pos = 0; | |
1124 | ||
1125 | /* Checksum header. */ | |
1126 | if (copy > 0) { | |
1127 | if (copy > len) | |
1128 | copy = len; | |
1129 | csum = csum_partial(skb->data + offset, copy, csum); | |
1130 | if ((len -= copy) == 0) | |
1131 | return csum; | |
1132 | offset += copy; | |
1133 | pos = copy; | |
1134 | } | |
1135 | ||
1136 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1137 | int end; | |
1138 | ||
1139 | BUG_TRAP(start <= offset + len); | |
1140 | ||
1141 | end = start + skb_shinfo(skb)->frags[i].size; | |
1142 | if ((copy = end - offset) > 0) { | |
1143 | unsigned int csum2; | |
1144 | u8 *vaddr; | |
1145 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1146 | ||
1147 | if (copy > len) | |
1148 | copy = len; | |
1149 | vaddr = kmap_skb_frag(frag); | |
1150 | csum2 = csum_partial(vaddr + frag->page_offset + | |
1151 | offset - start, copy, 0); | |
1152 | kunmap_skb_frag(vaddr); | |
1153 | csum = csum_block_add(csum, csum2, pos); | |
1154 | if (!(len -= copy)) | |
1155 | return csum; | |
1156 | offset += copy; | |
1157 | pos += copy; | |
1158 | } | |
1159 | start = end; | |
1160 | } | |
1161 | ||
1162 | if (skb_shinfo(skb)->frag_list) { | |
1163 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1164 | ||
1165 | for (; list; list = list->next) { | |
1166 | int end; | |
1167 | ||
1168 | BUG_TRAP(start <= offset + len); | |
1169 | ||
1170 | end = start + list->len; | |
1171 | if ((copy = end - offset) > 0) { | |
1172 | unsigned int csum2; | |
1173 | if (copy > len) | |
1174 | copy = len; | |
1175 | csum2 = skb_checksum(list, offset - start, | |
1176 | copy, 0); | |
1177 | csum = csum_block_add(csum, csum2, pos); | |
1178 | if ((len -= copy) == 0) | |
1179 | return csum; | |
1180 | offset += copy; | |
1181 | pos += copy; | |
1182 | } | |
1183 | start = end; | |
1184 | } | |
1185 | } | |
1186 | if (len) | |
1187 | BUG(); | |
1188 | ||
1189 | return csum; | |
1190 | } | |
1191 | ||
1192 | /* Both of above in one bottle. */ | |
1193 | ||
1194 | unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |
1195 | u8 *to, int len, unsigned int csum) | |
1196 | { | |
1197 | int start = skb_headlen(skb); | |
1198 | int i, copy = start - offset; | |
1199 | int pos = 0; | |
1200 | ||
1201 | /* Copy header. */ | |
1202 | if (copy > 0) { | |
1203 | if (copy > len) | |
1204 | copy = len; | |
1205 | csum = csum_partial_copy_nocheck(skb->data + offset, to, | |
1206 | copy, csum); | |
1207 | if ((len -= copy) == 0) | |
1208 | return csum; | |
1209 | offset += copy; | |
1210 | to += copy; | |
1211 | pos = copy; | |
1212 | } | |
1213 | ||
1214 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1215 | int end; | |
1216 | ||
1217 | BUG_TRAP(start <= offset + len); | |
1218 | ||
1219 | end = start + skb_shinfo(skb)->frags[i].size; | |
1220 | if ((copy = end - offset) > 0) { | |
1221 | unsigned int csum2; | |
1222 | u8 *vaddr; | |
1223 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1224 | ||
1225 | if (copy > len) | |
1226 | copy = len; | |
1227 | vaddr = kmap_skb_frag(frag); | |
1228 | csum2 = csum_partial_copy_nocheck(vaddr + | |
1229 | frag->page_offset + | |
1230 | offset - start, to, | |
1231 | copy, 0); | |
1232 | kunmap_skb_frag(vaddr); | |
1233 | csum = csum_block_add(csum, csum2, pos); | |
1234 | if (!(len -= copy)) | |
1235 | return csum; | |
1236 | offset += copy; | |
1237 | to += copy; | |
1238 | pos += copy; | |
1239 | } | |
1240 | start = end; | |
1241 | } | |
1242 | ||
1243 | if (skb_shinfo(skb)->frag_list) { | |
1244 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | |
1245 | ||
1246 | for (; list; list = list->next) { | |
1247 | unsigned int csum2; | |
1248 | int end; | |
1249 | ||
1250 | BUG_TRAP(start <= offset + len); | |
1251 | ||
1252 | end = start + list->len; | |
1253 | if ((copy = end - offset) > 0) { | |
1254 | if (copy > len) | |
1255 | copy = len; | |
1256 | csum2 = skb_copy_and_csum_bits(list, | |
1257 | offset - start, | |
1258 | to, copy, 0); | |
1259 | csum = csum_block_add(csum, csum2, pos); | |
1260 | if ((len -= copy) == 0) | |
1261 | return csum; | |
1262 | offset += copy; | |
1263 | to += copy; | |
1264 | pos += copy; | |
1265 | } | |
1266 | start = end; | |
1267 | } | |
1268 | } | |
1269 | if (len) | |
1270 | BUG(); | |
1271 | return csum; | |
1272 | } | |
1273 | ||
1274 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | |
1275 | { | |
1276 | unsigned int csum; | |
1277 | long csstart; | |
1278 | ||
1279 | if (skb->ip_summed == CHECKSUM_HW) | |
1280 | csstart = skb->h.raw - skb->data; | |
1281 | else | |
1282 | csstart = skb_headlen(skb); | |
1283 | ||
1284 | if (csstart > skb_headlen(skb)) | |
1285 | BUG(); | |
1286 | ||
1287 | memcpy(to, skb->data, csstart); | |
1288 | ||
1289 | csum = 0; | |
1290 | if (csstart != skb->len) | |
1291 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, | |
1292 | skb->len - csstart, 0); | |
1293 | ||
1294 | if (skb->ip_summed == CHECKSUM_HW) { | |
1295 | long csstuff = csstart + skb->csum; | |
1296 | ||
1297 | *((unsigned short *)(to + csstuff)) = csum_fold(csum); | |
1298 | } | |
1299 | } | |
1300 | ||
1301 | /** | |
1302 | * skb_dequeue - remove from the head of the queue | |
1303 | * @list: list to dequeue from | |
1304 | * | |
1305 | * Remove the head of the list. The list lock is taken so the function | |
1306 | * may be used safely with other locking list functions. The head item is | |
1307 | * returned or %NULL if the list is empty. | |
1308 | */ | |
1309 | ||
1310 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) | |
1311 | { | |
1312 | unsigned long flags; | |
1313 | struct sk_buff *result; | |
1314 | ||
1315 | spin_lock_irqsave(&list->lock, flags); | |
1316 | result = __skb_dequeue(list); | |
1317 | spin_unlock_irqrestore(&list->lock, flags); | |
1318 | return result; | |
1319 | } | |
1320 | ||
1321 | /** | |
1322 | * skb_dequeue_tail - remove from the tail of the queue | |
1323 | * @list: list to dequeue from | |
1324 | * | |
1325 | * Remove the tail of the list. The list lock is taken so the function | |
1326 | * may be used safely with other locking list functions. The tail item is | |
1327 | * returned or %NULL if the list is empty. | |
1328 | */ | |
1329 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) | |
1330 | { | |
1331 | unsigned long flags; | |
1332 | struct sk_buff *result; | |
1333 | ||
1334 | spin_lock_irqsave(&list->lock, flags); | |
1335 | result = __skb_dequeue_tail(list); | |
1336 | spin_unlock_irqrestore(&list->lock, flags); | |
1337 | return result; | |
1338 | } | |
1339 | ||
1340 | /** | |
1341 | * skb_queue_purge - empty a list | |
1342 | * @list: list to empty | |
1343 | * | |
1344 | * Delete all buffers on an &sk_buff list. Each buffer is removed from | |
1345 | * the list and one reference dropped. This function takes the list | |
1346 | * lock and is atomic with respect to other list locking functions. | |
1347 | */ | |
1348 | void skb_queue_purge(struct sk_buff_head *list) | |
1349 | { | |
1350 | struct sk_buff *skb; | |
1351 | while ((skb = skb_dequeue(list)) != NULL) | |
1352 | kfree_skb(skb); | |
1353 | } | |
1354 | ||
1355 | /** | |
1356 | * skb_queue_head - queue a buffer at the list head | |
1357 | * @list: list to use | |
1358 | * @newsk: buffer to queue | |
1359 | * | |
1360 | * Queue a buffer at the start of the list. This function takes the | |
1361 | * list lock and can be used safely with other locking &sk_buff functions | |
1362 | * safely. | |
1363 | * | |
1364 | * A buffer cannot be placed on two lists at the same time. | |
1365 | */ | |
1366 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) | |
1367 | { | |
1368 | unsigned long flags; | |
1369 | ||
1370 | spin_lock_irqsave(&list->lock, flags); | |
1371 | __skb_queue_head(list, newsk); | |
1372 | spin_unlock_irqrestore(&list->lock, flags); | |
1373 | } | |
1374 | ||
1375 | /** | |
1376 | * skb_queue_tail - queue a buffer at the list tail | |
1377 | * @list: list to use | |
1378 | * @newsk: buffer to queue | |
1379 | * | |
1380 | * Queue a buffer at the tail of the list. This function takes the | |
1381 | * list lock and can be used safely with other locking &sk_buff functions | |
1382 | * safely. | |
1383 | * | |
1384 | * A buffer cannot be placed on two lists at the same time. | |
1385 | */ | |
1386 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) | |
1387 | { | |
1388 | unsigned long flags; | |
1389 | ||
1390 | spin_lock_irqsave(&list->lock, flags); | |
1391 | __skb_queue_tail(list, newsk); | |
1392 | spin_unlock_irqrestore(&list->lock, flags); | |
1393 | } | |
8728b834 | 1394 | |
1da177e4 LT |
1395 | /** |
1396 | * skb_unlink - remove a buffer from a list | |
1397 | * @skb: buffer to remove | |
8728b834 | 1398 | * @list: list to use |
1da177e4 | 1399 | * |
8728b834 DM |
1400 | * Remove a packet from a list. The list locks are taken and this |
1401 | * function is atomic with respect to other list locked calls | |
1da177e4 | 1402 | * |
8728b834 | 1403 | * You must know what list the SKB is on. |
1da177e4 | 1404 | */ |
8728b834 | 1405 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
1da177e4 | 1406 | { |
8728b834 | 1407 | unsigned long flags; |
1da177e4 | 1408 | |
8728b834 DM |
1409 | spin_lock_irqsave(&list->lock, flags); |
1410 | __skb_unlink(skb, list); | |
1411 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1412 | } |
1413 | ||
1da177e4 LT |
1414 | /** |
1415 | * skb_append - append a buffer | |
1416 | * @old: buffer to insert after | |
1417 | * @newsk: buffer to insert | |
8728b834 | 1418 | * @list: list to use |
1da177e4 LT |
1419 | * |
1420 | * Place a packet after a given packet in a list. The list locks are taken | |
1421 | * and this function is atomic with respect to other list locked calls. | |
1422 | * A buffer cannot be placed on two lists at the same time. | |
1423 | */ | |
8728b834 | 1424 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
1da177e4 LT |
1425 | { |
1426 | unsigned long flags; | |
1427 | ||
8728b834 DM |
1428 | spin_lock_irqsave(&list->lock, flags); |
1429 | __skb_append(old, newsk, list); | |
1430 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1431 | } |
1432 | ||
1433 | ||
1434 | /** | |
1435 | * skb_insert - insert a buffer | |
1436 | * @old: buffer to insert before | |
1437 | * @newsk: buffer to insert | |
8728b834 DM |
1438 | * @list: list to use |
1439 | * | |
1440 | * Place a packet before a given packet in a list. The list locks are | |
1441 | * taken and this function is atomic with respect to other list locked | |
1442 | * calls. | |
1da177e4 | 1443 | * |
1da177e4 LT |
1444 | * A buffer cannot be placed on two lists at the same time. |
1445 | */ | |
8728b834 | 1446 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
1da177e4 LT |
1447 | { |
1448 | unsigned long flags; | |
1449 | ||
8728b834 DM |
1450 | spin_lock_irqsave(&list->lock, flags); |
1451 | __skb_insert(newsk, old->prev, old, list); | |
1452 | spin_unlock_irqrestore(&list->lock, flags); | |
1da177e4 LT |
1453 | } |
1454 | ||
1455 | #if 0 | |
1456 | /* | |
1457 | * Tune the memory allocator for a new MTU size. | |
1458 | */ | |
1459 | void skb_add_mtu(int mtu) | |
1460 | { | |
1461 | /* Must match allocation in alloc_skb */ | |
1462 | mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); | |
1463 | ||
1464 | kmem_add_cache_size(mtu); | |
1465 | } | |
1466 | #endif | |
1467 | ||
1468 | static inline void skb_split_inside_header(struct sk_buff *skb, | |
1469 | struct sk_buff* skb1, | |
1470 | const u32 len, const int pos) | |
1471 | { | |
1472 | int i; | |
1473 | ||
1474 | memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); | |
1475 | ||
1476 | /* And move data appendix as is. */ | |
1477 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
1478 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; | |
1479 | ||
1480 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; | |
1481 | skb_shinfo(skb)->nr_frags = 0; | |
1482 | skb1->data_len = skb->data_len; | |
1483 | skb1->len += skb1->data_len; | |
1484 | skb->data_len = 0; | |
1485 | skb->len = len; | |
1486 | skb->tail = skb->data + len; | |
1487 | } | |
1488 | ||
1489 | static inline void skb_split_no_header(struct sk_buff *skb, | |
1490 | struct sk_buff* skb1, | |
1491 | const u32 len, int pos) | |
1492 | { | |
1493 | int i, k = 0; | |
1494 | const int nfrags = skb_shinfo(skb)->nr_frags; | |
1495 | ||
1496 | skb_shinfo(skb)->nr_frags = 0; | |
1497 | skb1->len = skb1->data_len = skb->len - len; | |
1498 | skb->len = len; | |
1499 | skb->data_len = len - pos; | |
1500 | ||
1501 | for (i = 0; i < nfrags; i++) { | |
1502 | int size = skb_shinfo(skb)->frags[i].size; | |
1503 | ||
1504 | if (pos + size > len) { | |
1505 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; | |
1506 | ||
1507 | if (pos < len) { | |
1508 | /* Split frag. | |
1509 | * We have two variants in this case: | |
1510 | * 1. Move all the frag to the second | |
1511 | * part, if it is possible. F.e. | |
1512 | * this approach is mandatory for TUX, | |
1513 | * where splitting is expensive. | |
1514 | * 2. Split is accurately. We make this. | |
1515 | */ | |
1516 | get_page(skb_shinfo(skb)->frags[i].page); | |
1517 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; | |
1518 | skb_shinfo(skb1)->frags[0].size -= len - pos; | |
1519 | skb_shinfo(skb)->frags[i].size = len - pos; | |
1520 | skb_shinfo(skb)->nr_frags++; | |
1521 | } | |
1522 | k++; | |
1523 | } else | |
1524 | skb_shinfo(skb)->nr_frags++; | |
1525 | pos += size; | |
1526 | } | |
1527 | skb_shinfo(skb1)->nr_frags = k; | |
1528 | } | |
1529 | ||
1530 | /** | |
1531 | * skb_split - Split fragmented skb to two parts at length len. | |
1532 | * @skb: the buffer to split | |
1533 | * @skb1: the buffer to receive the second part | |
1534 | * @len: new length for skb | |
1535 | */ | |
1536 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) | |
1537 | { | |
1538 | int pos = skb_headlen(skb); | |
1539 | ||
1540 | if (len < pos) /* Split line is inside header. */ | |
1541 | skb_split_inside_header(skb, skb1, len, pos); | |
1542 | else /* Second chunk has no header, nothing to copy. */ | |
1543 | skb_split_no_header(skb, skb1, len, pos); | |
1544 | } | |
1545 | ||
677e90ed TG |
1546 | /** |
1547 | * skb_prepare_seq_read - Prepare a sequential read of skb data | |
1548 | * @skb: the buffer to read | |
1549 | * @from: lower offset of data to be read | |
1550 | * @to: upper offset of data to be read | |
1551 | * @st: state variable | |
1552 | * | |
1553 | * Initializes the specified state variable. Must be called before | |
1554 | * invoking skb_seq_read() for the first time. | |
1555 | */ | |
1556 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, | |
1557 | unsigned int to, struct skb_seq_state *st) | |
1558 | { | |
1559 | st->lower_offset = from; | |
1560 | st->upper_offset = to; | |
1561 | st->root_skb = st->cur_skb = skb; | |
1562 | st->frag_idx = st->stepped_offset = 0; | |
1563 | st->frag_data = NULL; | |
1564 | } | |
1565 | ||
1566 | /** | |
1567 | * skb_seq_read - Sequentially read skb data | |
1568 | * @consumed: number of bytes consumed by the caller so far | |
1569 | * @data: destination pointer for data to be returned | |
1570 | * @st: state variable | |
1571 | * | |
1572 | * Reads a block of skb data at &consumed relative to the | |
1573 | * lower offset specified to skb_prepare_seq_read(). Assigns | |
1574 | * the head of the data block to &data and returns the length | |
1575 | * of the block or 0 if the end of the skb data or the upper | |
1576 | * offset has been reached. | |
1577 | * | |
1578 | * The caller is not required to consume all of the data | |
1579 | * returned, i.e. &consumed is typically set to the number | |
1580 | * of bytes already consumed and the next call to | |
1581 | * skb_seq_read() will return the remaining part of the block. | |
1582 | * | |
1583 | * Note: The size of each block of data returned can be arbitary, | |
1584 | * this limitation is the cost for zerocopy seqeuental | |
1585 | * reads of potentially non linear data. | |
1586 | * | |
1587 | * Note: Fragment lists within fragments are not implemented | |
1588 | * at the moment, state->root_skb could be replaced with | |
1589 | * a stack for this purpose. | |
1590 | */ | |
1591 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | |
1592 | struct skb_seq_state *st) | |
1593 | { | |
1594 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; | |
1595 | skb_frag_t *frag; | |
1596 | ||
1597 | if (unlikely(abs_offset >= st->upper_offset)) | |
1598 | return 0; | |
1599 | ||
1600 | next_skb: | |
1601 | block_limit = skb_headlen(st->cur_skb); | |
1602 | ||
1603 | if (abs_offset < block_limit) { | |
1604 | *data = st->cur_skb->data + abs_offset; | |
1605 | return block_limit - abs_offset; | |
1606 | } | |
1607 | ||
1608 | if (st->frag_idx == 0 && !st->frag_data) | |
1609 | st->stepped_offset += skb_headlen(st->cur_skb); | |
1610 | ||
1611 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { | |
1612 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; | |
1613 | block_limit = frag->size + st->stepped_offset; | |
1614 | ||
1615 | if (abs_offset < block_limit) { | |
1616 | if (!st->frag_data) | |
1617 | st->frag_data = kmap_skb_frag(frag); | |
1618 | ||
1619 | *data = (u8 *) st->frag_data + frag->page_offset + | |
1620 | (abs_offset - st->stepped_offset); | |
1621 | ||
1622 | return block_limit - abs_offset; | |
1623 | } | |
1624 | ||
1625 | if (st->frag_data) { | |
1626 | kunmap_skb_frag(st->frag_data); | |
1627 | st->frag_data = NULL; | |
1628 | } | |
1629 | ||
1630 | st->frag_idx++; | |
1631 | st->stepped_offset += frag->size; | |
1632 | } | |
1633 | ||
1634 | if (st->cur_skb->next) { | |
1635 | st->cur_skb = st->cur_skb->next; | |
1636 | st->frag_idx = 0; | |
1637 | goto next_skb; | |
1638 | } else if (st->root_skb == st->cur_skb && | |
1639 | skb_shinfo(st->root_skb)->frag_list) { | |
1640 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | |
1641 | goto next_skb; | |
1642 | } | |
1643 | ||
1644 | return 0; | |
1645 | } | |
1646 | ||
1647 | /** | |
1648 | * skb_abort_seq_read - Abort a sequential read of skb data | |
1649 | * @st: state variable | |
1650 | * | |
1651 | * Must be called if skb_seq_read() was not called until it | |
1652 | * returned 0. | |
1653 | */ | |
1654 | void skb_abort_seq_read(struct skb_seq_state *st) | |
1655 | { | |
1656 | if (st->frag_data) | |
1657 | kunmap_skb_frag(st->frag_data); | |
1658 | } | |
1659 | ||
3fc7e8a6 TG |
1660 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
1661 | ||
1662 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, | |
1663 | struct ts_config *conf, | |
1664 | struct ts_state *state) | |
1665 | { | |
1666 | return skb_seq_read(offset, text, TS_SKB_CB(state)); | |
1667 | } | |
1668 | ||
1669 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) | |
1670 | { | |
1671 | skb_abort_seq_read(TS_SKB_CB(state)); | |
1672 | } | |
1673 | ||
1674 | /** | |
1675 | * skb_find_text - Find a text pattern in skb data | |
1676 | * @skb: the buffer to look in | |
1677 | * @from: search offset | |
1678 | * @to: search limit | |
1679 | * @config: textsearch configuration | |
1680 | * @state: uninitialized textsearch state variable | |
1681 | * | |
1682 | * Finds a pattern in the skb data according to the specified | |
1683 | * textsearch configuration. Use textsearch_next() to retrieve | |
1684 | * subsequent occurrences of the pattern. Returns the offset | |
1685 | * to the first occurrence or UINT_MAX if no match was found. | |
1686 | */ | |
1687 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |
1688 | unsigned int to, struct ts_config *config, | |
1689 | struct ts_state *state) | |
1690 | { | |
1691 | config->get_next_block = skb_ts_get_next_block; | |
1692 | config->finish = skb_ts_finish; | |
1693 | ||
1694 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); | |
1695 | ||
1696 | return textsearch_find(config, state); | |
1697 | } | |
1698 | ||
1da177e4 LT |
1699 | void __init skb_init(void) |
1700 | { | |
1701 | skbuff_head_cache = kmem_cache_create("skbuff_head_cache", | |
1702 | sizeof(struct sk_buff), | |
1703 | 0, | |
1704 | SLAB_HWCACHE_ALIGN, | |
1705 | NULL, NULL); | |
1706 | if (!skbuff_head_cache) | |
1707 | panic("cannot create skbuff cache"); | |
d179cd12 DM |
1708 | |
1709 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", | |
1710 | (2*sizeof(struct sk_buff)) + | |
1711 | sizeof(atomic_t), | |
1712 | 0, | |
1713 | SLAB_HWCACHE_ALIGN, | |
1714 | NULL, NULL); | |
1715 | if (!skbuff_fclone_cache) | |
1716 | panic("cannot create skbuff cache"); | |
1da177e4 LT |
1717 | } |
1718 | ||
1719 | EXPORT_SYMBOL(___pskb_trim); | |
1720 | EXPORT_SYMBOL(__kfree_skb); | |
1721 | EXPORT_SYMBOL(__pskb_pull_tail); | |
d179cd12 | 1722 | EXPORT_SYMBOL(__alloc_skb); |
1da177e4 LT |
1723 | EXPORT_SYMBOL(pskb_copy); |
1724 | EXPORT_SYMBOL(pskb_expand_head); | |
1725 | EXPORT_SYMBOL(skb_checksum); | |
1726 | EXPORT_SYMBOL(skb_clone); | |
1727 | EXPORT_SYMBOL(skb_clone_fraglist); | |
1728 | EXPORT_SYMBOL(skb_copy); | |
1729 | EXPORT_SYMBOL(skb_copy_and_csum_bits); | |
1730 | EXPORT_SYMBOL(skb_copy_and_csum_dev); | |
1731 | EXPORT_SYMBOL(skb_copy_bits); | |
1732 | EXPORT_SYMBOL(skb_copy_expand); | |
1733 | EXPORT_SYMBOL(skb_over_panic); | |
1734 | EXPORT_SYMBOL(skb_pad); | |
1735 | EXPORT_SYMBOL(skb_realloc_headroom); | |
1736 | EXPORT_SYMBOL(skb_under_panic); | |
1737 | EXPORT_SYMBOL(skb_dequeue); | |
1738 | EXPORT_SYMBOL(skb_dequeue_tail); | |
1739 | EXPORT_SYMBOL(skb_insert); | |
1740 | EXPORT_SYMBOL(skb_queue_purge); | |
1741 | EXPORT_SYMBOL(skb_queue_head); | |
1742 | EXPORT_SYMBOL(skb_queue_tail); | |
1743 | EXPORT_SYMBOL(skb_unlink); | |
1744 | EXPORT_SYMBOL(skb_append); | |
1745 | EXPORT_SYMBOL(skb_split); | |
677e90ed TG |
1746 | EXPORT_SYMBOL(skb_prepare_seq_read); |
1747 | EXPORT_SYMBOL(skb_seq_read); | |
1748 | EXPORT_SYMBOL(skb_abort_seq_read); | |
3fc7e8a6 | 1749 | EXPORT_SYMBOL(skb_find_text); |