]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
3 | * | |
113aa838 | 4 | * Authors: Alan Cox <[email protected]> |
1da177e4 LT |
5 | * Florian La Roche <[email protected]> |
6 | * | |
1da177e4 LT |
7 | * Fixes: |
8 | * Alan Cox : Fixed the worst of the load | |
9 | * balancer bugs. | |
10 | * Dave Platt : Interrupt stacking fix. | |
11 | * Richard Kooijman : Timestamp fixes. | |
12 | * Alan Cox : Changed buffer format. | |
13 | * Alan Cox : destructor hook for AF_UNIX etc. | |
14 | * Linus Torvalds : Better skb_clone. | |
15 | * Alan Cox : Added skb_copy. | |
16 | * Alan Cox : Added all the changed routines Linus | |
17 | * only put in the headers | |
18 | * Ray VanTassle : Fixed --skb->lock in free | |
19 | * Alan Cox : skb_copy copy arp field | |
20 | * Andi Kleen : slabified it. | |
21 | * Robert Olsson : Removed skb_head_pool | |
22 | * | |
23 | * NOTE: | |
24 | * The __skb_ routines should be called with interrupts | |
25 | * disabled, or you better be *real* sure that the operation is atomic | |
26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
27 | * or via disabling bottom half handlers, etc). | |
28 | * | |
29 | * This program is free software; you can redistribute it and/or | |
30 | * modify it under the terms of the GNU General Public License | |
31 | * as published by the Free Software Foundation; either version | |
32 | * 2 of the License, or (at your option) any later version. | |
33 | */ | |
34 | ||
35 | /* | |
36 | * The functions in this file will not compile correctly with gcc 2.4.x | |
37 | */ | |
38 | ||
1da177e4 LT |
39 | #include <linux/module.h> |
40 | #include <linux/types.h> | |
41 | #include <linux/kernel.h> | |
fe55f6d5 | 42 | #include <linux/kmemcheck.h> |
1da177e4 LT |
43 | #include <linux/mm.h> |
44 | #include <linux/interrupt.h> | |
45 | #include <linux/in.h> | |
46 | #include <linux/inet.h> | |
47 | #include <linux/slab.h> | |
48 | #include <linux/netdevice.h> | |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | #include <net/pkt_sched.h> | |
51 | #endif | |
52 | #include <linux/string.h> | |
53 | #include <linux/skbuff.h> | |
9c55e01c | 54 | #include <linux/splice.h> |
1da177e4 LT |
55 | #include <linux/cache.h> |
56 | #include <linux/rtnetlink.h> | |
57 | #include <linux/init.h> | |
716ea3a7 | 58 | #include <linux/scatterlist.h> |
ac45f602 | 59 | #include <linux/errqueue.h> |
268bb0ce | 60 | #include <linux/prefetch.h> |
1da177e4 LT |
61 | |
62 | #include <net/protocol.h> | |
63 | #include <net/dst.h> | |
64 | #include <net/sock.h> | |
65 | #include <net/checksum.h> | |
66 | #include <net/xfrm.h> | |
67 | ||
68 | #include <asm/uaccess.h> | |
69 | #include <asm/system.h> | |
ad8d75ff | 70 | #include <trace/events/skb.h> |
1da177e4 | 71 | |
a1f8e7f7 AV |
72 | #include "kmap_skb.h" |
73 | ||
e18b890b CL |
74 | static struct kmem_cache *skbuff_head_cache __read_mostly; |
75 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |
1da177e4 | 76 | |
9c55e01c JA |
77 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
78 | struct pipe_buffer *buf) | |
79 | { | |
8b9d3728 | 80 | put_page(buf->page); |
9c55e01c JA |
81 | } |
82 | ||
83 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, | |
84 | struct pipe_buffer *buf) | |
85 | { | |
8b9d3728 | 86 | get_page(buf->page); |
9c55e01c JA |
87 | } |
88 | ||
89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, | |
90 | struct pipe_buffer *buf) | |
91 | { | |
92 | return 1; | |
93 | } | |
94 | ||
95 | ||
96 | /* Pipe buffer operations for a socket. */ | |
28dfef8f | 97 | static const struct pipe_buf_operations sock_pipe_buf_ops = { |
9c55e01c JA |
98 | .can_merge = 0, |
99 | .map = generic_pipe_buf_map, | |
100 | .unmap = generic_pipe_buf_unmap, | |
101 | .confirm = generic_pipe_buf_confirm, | |
102 | .release = sock_pipe_buf_release, | |
103 | .steal = sock_pipe_buf_steal, | |
104 | .get = sock_pipe_buf_get, | |
105 | }; | |
106 | ||
1da177e4 LT |
107 | /* |
108 | * Keep out-of-line to prevent kernel bloat. | |
109 | * __builtin_return_address is not used because it is not always | |
110 | * reliable. | |
111 | */ | |
112 | ||
113 | /** | |
114 | * skb_over_panic - private function | |
115 | * @skb: buffer | |
116 | * @sz: size | |
117 | * @here: address | |
118 | * | |
119 | * Out of line support code for skb_put(). Not user callable. | |
120 | */ | |
ccb7c773 | 121 | static void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
1da177e4 | 122 | { |
26095455 | 123 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
4305b541 | 124 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
27a884dc | 125 | here, skb->len, sz, skb->head, skb->data, |
4305b541 | 126 | (unsigned long)skb->tail, (unsigned long)skb->end, |
26095455 | 127 | skb->dev ? skb->dev->name : "<NULL>"); |
1da177e4 LT |
128 | BUG(); |
129 | } | |
130 | ||
131 | /** | |
132 | * skb_under_panic - private function | |
133 | * @skb: buffer | |
134 | * @sz: size | |
135 | * @here: address | |
136 | * | |
137 | * Out of line support code for skb_push(). Not user callable. | |
138 | */ | |
139 | ||
ccb7c773 | 140 | static void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
1da177e4 | 141 | { |
26095455 | 142 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
4305b541 | 143 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
27a884dc | 144 | here, skb->len, sz, skb->head, skb->data, |
4305b541 | 145 | (unsigned long)skb->tail, (unsigned long)skb->end, |
26095455 | 146 | skb->dev ? skb->dev->name : "<NULL>"); |
1da177e4 LT |
147 | BUG(); |
148 | } | |
149 | ||
150 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
151 | * 'private' fields and also do memory statistics to find all the | |
152 | * [BEEP] leaks. | |
153 | * | |
154 | */ | |
155 | ||
156 | /** | |
d179cd12 | 157 | * __alloc_skb - allocate a network buffer |
1da177e4 LT |
158 | * @size: size to allocate |
159 | * @gfp_mask: allocation mask | |
c83c2486 RD |
160 | * @fclone: allocate from fclone cache instead of head cache |
161 | * and allocate a cloned (child) skb | |
b30973f8 | 162 | * @node: numa node to allocate memory on |
1da177e4 LT |
163 | * |
164 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
165 | * tail room of size bytes. The object has a reference count of one. | |
166 | * The return is the buffer. On a failure the return is %NULL. | |
167 | * | |
168 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
169 | * %GFP_ATOMIC. | |
170 | */ | |
dd0fc66f | 171 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
b30973f8 | 172 | int fclone, int node) |
1da177e4 | 173 | { |
e18b890b | 174 | struct kmem_cache *cache; |
4947d3ef | 175 | struct skb_shared_info *shinfo; |
1da177e4 LT |
176 | struct sk_buff *skb; |
177 | u8 *data; | |
178 | ||
8798b3fb HX |
179 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
180 | ||
1da177e4 | 181 | /* Get the HEAD */ |
b30973f8 | 182 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
1da177e4 LT |
183 | if (!skb) |
184 | goto out; | |
ec7d2f2c | 185 | prefetchw(skb); |
1da177e4 | 186 | |
1da177e4 | 187 | size = SKB_DATA_ALIGN(size); |
b30973f8 CH |
188 | data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), |
189 | gfp_mask, node); | |
1da177e4 LT |
190 | if (!data) |
191 | goto nodata; | |
ec7d2f2c | 192 | prefetchw(data + size); |
1da177e4 | 193 | |
ca0605a7 | 194 | /* |
c8005785 JB |
195 | * Only clear those fields we need to clear, not those that we will |
196 | * actually initialise below. Hence, don't put any more fields after | |
197 | * the tail pointer in struct sk_buff! | |
ca0605a7 ACM |
198 | */ |
199 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
1da177e4 LT |
200 | skb->truesize = size + sizeof(struct sk_buff); |
201 | atomic_set(&skb->users, 1); | |
202 | skb->head = data; | |
203 | skb->data = data; | |
27a884dc | 204 | skb_reset_tail_pointer(skb); |
4305b541 | 205 | skb->end = skb->tail + size; |
19633e12 SH |
206 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
207 | skb->mac_header = ~0U; | |
208 | #endif | |
209 | ||
4947d3ef BL |
210 | /* make sure we initialize shinfo sequentially */ |
211 | shinfo = skb_shinfo(skb); | |
ec7d2f2c | 212 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
4947d3ef | 213 | atomic_set(&shinfo->dataref, 1); |
c2aa3665 | 214 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
4947d3ef | 215 | |
d179cd12 DM |
216 | if (fclone) { |
217 | struct sk_buff *child = skb + 1; | |
218 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | |
1da177e4 | 219 | |
fe55f6d5 VN |
220 | kmemcheck_annotate_bitfield(child, flags1); |
221 | kmemcheck_annotate_bitfield(child, flags2); | |
d179cd12 DM |
222 | skb->fclone = SKB_FCLONE_ORIG; |
223 | atomic_set(fclone_ref, 1); | |
224 | ||
225 | child->fclone = SKB_FCLONE_UNAVAILABLE; | |
226 | } | |
1da177e4 LT |
227 | out: |
228 | return skb; | |
229 | nodata: | |
8798b3fb | 230 | kmem_cache_free(cache, skb); |
1da177e4 LT |
231 | skb = NULL; |
232 | goto out; | |
1da177e4 | 233 | } |
b4ac530f | 234 | EXPORT_SYMBOL(__alloc_skb); |
1da177e4 | 235 | |
8af27456 CH |
236 | /** |
237 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
238 | * @dev: network device to receive on | |
239 | * @length: length to allocate | |
240 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | |
241 | * | |
242 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
243 | * buffer has unspecified headroom built in. Users should allocate | |
244 | * the headroom they think they need without accounting for the | |
245 | * built in space. The built in space is used for optimisations. | |
246 | * | |
247 | * %NULL is returned if there is no free memory. | |
248 | */ | |
249 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |
250 | unsigned int length, gfp_t gfp_mask) | |
251 | { | |
252 | struct sk_buff *skb; | |
253 | ||
564824b0 | 254 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); |
7b2e497a | 255 | if (likely(skb)) { |
8af27456 | 256 | skb_reserve(skb, NET_SKB_PAD); |
7b2e497a CH |
257 | skb->dev = dev; |
258 | } | |
8af27456 CH |
259 | return skb; |
260 | } | |
b4ac530f | 261 | EXPORT_SYMBOL(__netdev_alloc_skb); |
1da177e4 | 262 | |
654bed16 PZ |
263 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
264 | int size) | |
265 | { | |
266 | skb_fill_page_desc(skb, i, page, off, size); | |
267 | skb->len += size; | |
268 | skb->data_len += size; | |
269 | skb->truesize += size; | |
270 | } | |
271 | EXPORT_SYMBOL(skb_add_rx_frag); | |
272 | ||
f58518e6 IJ |
273 | /** |
274 | * dev_alloc_skb - allocate an skbuff for receiving | |
275 | * @length: length to allocate | |
276 | * | |
277 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
278 | * buffer has unspecified headroom built in. Users should allocate | |
279 | * the headroom they think they need without accounting for the | |
280 | * built in space. The built in space is used for optimisations. | |
281 | * | |
282 | * %NULL is returned if there is no free memory. Although this function | |
283 | * allocates memory it can be called from an interrupt. | |
284 | */ | |
285 | struct sk_buff *dev_alloc_skb(unsigned int length) | |
286 | { | |
1483b874 DV |
287 | /* |
288 | * There is more code here than it seems: | |
a0f55e0e | 289 | * __dev_alloc_skb is an inline |
1483b874 | 290 | */ |
f58518e6 IJ |
291 | return __dev_alloc_skb(length, GFP_ATOMIC); |
292 | } | |
293 | EXPORT_SYMBOL(dev_alloc_skb); | |
294 | ||
27b437c8 | 295 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 296 | { |
27b437c8 | 297 | struct sk_buff *list = *listp; |
1da177e4 | 298 | |
27b437c8 | 299 | *listp = NULL; |
1da177e4 LT |
300 | |
301 | do { | |
302 | struct sk_buff *this = list; | |
303 | list = list->next; | |
304 | kfree_skb(this); | |
305 | } while (list); | |
306 | } | |
307 | ||
27b437c8 HX |
308 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
309 | { | |
310 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
311 | } | |
312 | ||
1da177e4 LT |
313 | static void skb_clone_fraglist(struct sk_buff *skb) |
314 | { | |
315 | struct sk_buff *list; | |
316 | ||
fbb398a8 | 317 | skb_walk_frags(skb, list) |
1da177e4 LT |
318 | skb_get(list); |
319 | } | |
320 | ||
5bba1712 | 321 | static void skb_release_data(struct sk_buff *skb) |
1da177e4 LT |
322 | { |
323 | if (!skb->cloned || | |
324 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
325 | &skb_shinfo(skb)->dataref)) { | |
326 | if (skb_shinfo(skb)->nr_frags) { | |
327 | int i; | |
328 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
329 | put_page(skb_shinfo(skb)->frags[i].page); | |
330 | } | |
331 | ||
21dc3301 | 332 | if (skb_has_frag_list(skb)) |
1da177e4 LT |
333 | skb_drop_fraglist(skb); |
334 | ||
335 | kfree(skb->head); | |
336 | } | |
337 | } | |
338 | ||
339 | /* | |
340 | * Free an skbuff by memory without cleaning the state. | |
341 | */ | |
2d4baff8 | 342 | static void kfree_skbmem(struct sk_buff *skb) |
1da177e4 | 343 | { |
d179cd12 DM |
344 | struct sk_buff *other; |
345 | atomic_t *fclone_ref; | |
346 | ||
d179cd12 DM |
347 | switch (skb->fclone) { |
348 | case SKB_FCLONE_UNAVAILABLE: | |
349 | kmem_cache_free(skbuff_head_cache, skb); | |
350 | break; | |
351 | ||
352 | case SKB_FCLONE_ORIG: | |
353 | fclone_ref = (atomic_t *) (skb + 2); | |
354 | if (atomic_dec_and_test(fclone_ref)) | |
355 | kmem_cache_free(skbuff_fclone_cache, skb); | |
356 | break; | |
357 | ||
358 | case SKB_FCLONE_CLONE: | |
359 | fclone_ref = (atomic_t *) (skb + 1); | |
360 | other = skb - 1; | |
361 | ||
362 | /* The clone portion is available for | |
363 | * fast-cloning again. | |
364 | */ | |
365 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | |
366 | ||
367 | if (atomic_dec_and_test(fclone_ref)) | |
368 | kmem_cache_free(skbuff_fclone_cache, other); | |
369 | break; | |
3ff50b79 | 370 | } |
1da177e4 LT |
371 | } |
372 | ||
04a4bb55 | 373 | static void skb_release_head_state(struct sk_buff *skb) |
1da177e4 | 374 | { |
adf30907 | 375 | skb_dst_drop(skb); |
1da177e4 LT |
376 | #ifdef CONFIG_XFRM |
377 | secpath_put(skb->sp); | |
378 | #endif | |
9c2b3328 SH |
379 | if (skb->destructor) { |
380 | WARN_ON(in_irq()); | |
1da177e4 LT |
381 | skb->destructor(skb); |
382 | } | |
9fb9cbb1 | 383 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
5f79e0f9 | 384 | nf_conntrack_put(skb->nfct); |
2fc72c7b KK |
385 | #endif |
386 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | |
9fb9cbb1 YK |
387 | nf_conntrack_put_reasm(skb->nfct_reasm); |
388 | #endif | |
1da177e4 LT |
389 | #ifdef CONFIG_BRIDGE_NETFILTER |
390 | nf_bridge_put(skb->nf_bridge); | |
391 | #endif | |
1da177e4 LT |
392 | /* XXX: IS this still necessary? - JHS */ |
393 | #ifdef CONFIG_NET_SCHED | |
394 | skb->tc_index = 0; | |
395 | #ifdef CONFIG_NET_CLS_ACT | |
396 | skb->tc_verd = 0; | |
1da177e4 LT |
397 | #endif |
398 | #endif | |
04a4bb55 LB |
399 | } |
400 | ||
401 | /* Free everything but the sk_buff shell. */ | |
402 | static void skb_release_all(struct sk_buff *skb) | |
403 | { | |
404 | skb_release_head_state(skb); | |
2d4baff8 HX |
405 | skb_release_data(skb); |
406 | } | |
407 | ||
408 | /** | |
409 | * __kfree_skb - private function | |
410 | * @skb: buffer | |
411 | * | |
412 | * Free an sk_buff. Release anything attached to the buffer. | |
413 | * Clean the state. This is an internal helper function. Users should | |
414 | * always call kfree_skb | |
415 | */ | |
1da177e4 | 416 | |
2d4baff8 HX |
417 | void __kfree_skb(struct sk_buff *skb) |
418 | { | |
419 | skb_release_all(skb); | |
1da177e4 LT |
420 | kfree_skbmem(skb); |
421 | } | |
b4ac530f | 422 | EXPORT_SYMBOL(__kfree_skb); |
1da177e4 | 423 | |