]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
3 | * | |
4 | * Authors: Alan Cox <[email protected]> | |
5 | * Florian La Roche <[email protected]> | |
6 | * | |
7 | * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ | |
8 | * | |
9 | * Fixes: | |
10 | * Alan Cox : Fixed the worst of the load | |
11 | * balancer bugs. | |
12 | * Dave Platt : Interrupt stacking fix. | |
13 | * Richard Kooijman : Timestamp fixes. | |
14 | * Alan Cox : Changed buffer format. | |
15 | * Alan Cox : destructor hook for AF_UNIX etc. | |
16 | * Linus Torvalds : Better skb_clone. | |
17 | * Alan Cox : Added skb_copy. | |
18 | * Alan Cox : Added all the changed routines Linus | |
19 | * only put in the headers | |
20 | * Ray VanTassle : Fixed --skb->lock in free | |
21 | * Alan Cox : skb_copy copy arp field | |
22 | * Andi Kleen : slabified it. | |
23 | * Robert Olsson : Removed skb_head_pool | |
24 | * | |
25 | * NOTE: | |
26 | * The __skb_ routines should be called with interrupts | |
27 | * disabled, or you better be *real* sure that the operation is atomic | |
28 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
29 | * or via disabling bottom half handlers, etc). | |
30 | * | |
31 | * This program is free software; you can redistribute it and/or | |
32 | * modify it under the terms of the GNU General Public License | |
33 | * as published by the Free Software Foundation; either version | |
34 | * 2 of the License, or (at your option) any later version. | |
35 | */ | |
36 | ||
37 | /* | |
38 | * The functions in this file will not compile correctly with gcc 2.4.x | |
39 | */ | |
40 | ||
1da177e4 LT |
41 | #include <linux/module.h> |
42 | #include <linux/types.h> | |
43 | #include <linux/kernel.h> | |
1da177e4 LT |
44 | #include <linux/mm.h> |
45 | #include <linux/interrupt.h> | |
46 | #include <linux/in.h> | |
47 | #include <linux/inet.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/netdevice.h> | |
50 | #ifdef CONFIG_NET_CLS_ACT | |
51 | #include <net/pkt_sched.h> | |
52 | #endif | |
53 | #include <linux/string.h> | |
54 | #include <linux/skbuff.h> | |
55 | #include <linux/cache.h> | |
56 | #include <linux/rtnetlink.h> | |
57 | #include <linux/init.h> | |
716ea3a7 | 58 | #include <linux/scatterlist.h> |
1da177e4 LT |
59 | |
60 | #include <net/protocol.h> | |
61 | #include <net/dst.h> | |
62 | #include <net/sock.h> | |
63 | #include <net/checksum.h> | |
64 | #include <net/xfrm.h> | |
65 | ||
66 | #include <asm/uaccess.h> | |
67 | #include <asm/system.h> | |
68 | ||
a1f8e7f7 AV |
69 | #include "kmap_skb.h" |
70 | ||
e18b890b CL |
71 | static struct kmem_cache *skbuff_head_cache __read_mostly; |
72 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |
1da177e4 LT |
73 | |
74 | /* | |
75 | * Keep out-of-line to prevent kernel bloat. | |
76 | * __builtin_return_address is not used because it is not always | |
77 | * reliable. | |
78 | */ | |
79 | ||
80 | /** | |
81 | * skb_over_panic - private function | |
82 | * @skb: buffer | |
83 | * @sz: size | |
84 | * @here: address | |
85 | * | |
86 | * Out of line support code for skb_put(). Not user callable. | |
87 | */ | |
88 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |
89 | { | |
26095455 | 90 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
4305b541 | 91 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
27a884dc | 92 | here, skb->len, sz, skb->head, skb->data, |
4305b541 | 93 | (unsigned long)skb->tail, (unsigned long)skb->end, |
26095455 | 94 | skb->dev ? skb->dev->name : "<NULL>"); |
1da177e4 LT |
95 | BUG(); |
96 | } | |
97 | ||
98 | /** | |
99 | * skb_under_panic - private function | |
100 | * @skb: buffer | |
101 | * @sz: size | |
102 | * @here: address | |
103 | * | |
104 | * Out of line support code for skb_push(). Not user callable. | |
105 | */ | |
106 | ||
107 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |
108 | { | |
26095455 | 109 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
4305b541 | 110 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
27a884dc | 111 | here, skb->len, sz, skb->head, skb->data, |
4305b541 | 112 | (unsigned long)skb->tail, (unsigned long)skb->end, |
26095455 | 113 | skb->dev ? skb->dev->name : "<NULL>"); |
1da177e4 LT |
114 | BUG(); |
115 | } | |
116 | ||
dc6de336 DM |
117 | void skb_truesize_bug(struct sk_buff *skb) |
118 | { | |
119 | printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " | |
120 | "len=%u, sizeof(sk_buff)=%Zd\n", | |
121 | skb->truesize, skb->len, sizeof(struct sk_buff)); | |
122 | } | |
123 | EXPORT_SYMBOL(skb_truesize_bug); | |
124 | ||
1da177e4 LT |
125 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
126 | * 'private' fields and also do memory statistics to find all the | |
127 | * [BEEP] leaks. | |
128 | * | |
129 | */ | |
130 | ||
131 | /** | |
d179cd12 | 132 | * __alloc_skb - allocate a network buffer |
1da177e4 LT |
133 | * @size: size to allocate |
134 | * @gfp_mask: allocation mask | |
c83c2486 RD |
135 | * @fclone: allocate from fclone cache instead of head cache |
136 | * and allocate a cloned (child) skb | |
b30973f8 | 137 | * @node: numa node to allocate memory on |
1da177e4 LT |
138 | * |
139 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
140 | * tail room of size bytes. The object has a reference count of one. | |
141 | * The return is the buffer. On a failure the return is %NULL. | |
142 | * | |
143 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
144 | * %GFP_ATOMIC. | |
145 | */ | |
dd0fc66f | 146 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
b30973f8 | 147 | int fclone, int node) |
1da177e4 | 148 | { |
e18b890b | 149 | struct kmem_cache *cache; |
4947d3ef | 150 | struct skb_shared_info *shinfo; |
1da177e4 LT |
151 | struct sk_buff *skb; |
152 | u8 *data; | |
153 | ||
8798b3fb HX |
154 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; |
155 | ||
1da177e4 | 156 | /* Get the HEAD */ |
b30973f8 | 157 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
1da177e4 LT |
158 | if (!skb) |
159 | goto out; | |
160 | ||
1da177e4 | 161 | size = SKB_DATA_ALIGN(size); |
b30973f8 CH |
162 | data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), |
163 | gfp_mask, node); | |
1da177e4 LT |
164 | if (!data) |
165 | goto nodata; | |
166 | ||
ca0605a7 ACM |
167 | /* |
168 | * See comment in sk_buff definition, just before the 'tail' member | |
169 | */ | |
170 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
1da177e4 LT |
171 | skb->truesize = size + sizeof(struct sk_buff); |
172 | atomic_set(&skb->users, 1); | |
173 | skb->head = data; | |
174 | skb->data = data; | |
27a884dc | 175 | skb_reset_tail_pointer(skb); |
4305b541 | 176 | skb->end = skb->tail + size; |
4947d3ef BL |
177 | /* make sure we initialize shinfo sequentially */ |
178 | shinfo = skb_shinfo(skb); | |
179 | atomic_set(&shinfo->dataref, 1); | |
180 | shinfo->nr_frags = 0; | |
7967168c HX |
181 | shinfo->gso_size = 0; |
182 | shinfo->gso_segs = 0; | |
183 | shinfo->gso_type = 0; | |
4947d3ef BL |
184 | shinfo->ip6_frag_id = 0; |
185 | shinfo->frag_list = NULL; | |
186 | ||
d179cd12 DM |
187 | if (fclone) { |
188 | struct sk_buff *child = skb + 1; | |
189 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | |
1da177e4 | 190 | |
d179cd12 DM |
191 | skb->fclone = SKB_FCLONE_ORIG; |
192 | atomic_set(fclone_ref, 1); | |
193 | ||
194 | child->fclone = SKB_FCLONE_UNAVAILABLE; | |
195 | } | |
1da177e4 LT |
196 | out: |
197 | return skb; | |
198 | nodata: | |
8798b3fb | 199 | kmem_cache_free(cache, skb); |
1da177e4 LT |
200 | skb = NULL; |
201 | goto out; | |
1da177e4 LT |
202 | } |
203 | ||
8af27456 CH |
204 | /** |
205 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
206 | * @dev: network device to receive on | |
207 | * @length: length to allocate | |
208 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | |
209 | * | |
210 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
211 | * buffer has unspecified headroom built in. Users should allocate | |
212 | * the headroom they think they need without accounting for the | |
213 | * built in space. The built in space is used for optimisations. | |
214 | * | |
215 | * %NULL is returned if there is no free memory. | |
216 | */ | |
217 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |
218 | unsigned int length, gfp_t gfp_mask) | |
219 | { | |
43cb76d9 | 220 | int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; |
8af27456 CH |
221 | struct sk_buff *skb; |
222 | ||
4ec93edb | 223 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); |
7b2e497a | 224 | if (likely(skb)) { |
8af27456 | 225 | skb_reserve(skb, NET_SKB_PAD); |
7b2e497a CH |
226 | skb->dev = dev; |
227 | } | |
8af27456 CH |
228 | return skb; |
229 | } | |
1da177e4 | 230 | |
27b437c8 | 231 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 232 | { |
27b437c8 | 233 | struct sk_buff *list = *listp; |
1da177e4 | 234 | |
27b437c8 | 235 | *listp = NULL; |
1da177e4 LT |
236 | |
237 | do { | |
238 | struct sk_buff *this = list; | |
239 | list = list->next; | |
240 | kfree_skb(this); | |
241 | } while (list); | |
242 | } | |
243 | ||
27b437c8 HX |
244 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
245 | { | |
246 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
247 | } | |
248 | ||
1da177e4 LT |
249 | static void skb_clone_fraglist(struct sk_buff *skb) |
250 | { | |
251 | struct sk_buff *list; | |
252 | ||
253 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | |
254 | skb_get(list); | |
255 | } | |
256 | ||
5bba1712 | 257 | static void skb_release_data(struct sk_buff *skb) |
1da177e4 LT |
258 | { |
259 | if (!skb->cloned || | |
260 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
261 | &skb_shinfo(skb)->dataref)) { | |
262 | if (skb_shinfo(skb)->nr_frags) { | |
263 | int i; | |
264 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | |
265 | put_page(skb_shinfo(skb)->frags[i].page); | |
266 | } | |
267 | ||
268 | if (skb_shinfo(skb)->frag_list) | |
269 | skb_drop_fraglist(skb); | |
270 | ||
271 | kfree(skb->head); | |
272 | } | |
273 | } | |
274 | ||
275 | /* | |
276 | * Free an skbuff by memory without cleaning the state. | |
277 | */ | |
278 | void kfree_skbmem(struct sk_buff *skb) | |
279 | { | |
d179cd12 DM |
280 | struct sk_buff *other; |
281 | atomic_t *fclone_ref; | |
282 | ||
1da177e4 | 283 | skb_release_data(skb); |
d179cd12 DM |
284 | switch (skb->fclone) { |
285 | case SKB_FCLONE_UNAVAILABLE: | |
286 | kmem_cache_free(skbuff_head_cache, skb); | |
287 | break; | |
288 | ||
289 | case SKB_FCLONE_ORIG: | |
290 | fclone_ref = (atomic_t *) (skb + 2); | |
291 | if (atomic_dec_and_test(fclone_ref)) | |
292 | kmem_cache_free(skbuff_fclone_cache, skb); | |
293 | break; | |
294 | ||
295 | case SKB_FCLONE_CLONE: | |
296 | fclone_ref = (atomic_t *) (skb + 1); | |
297 | other = skb - 1; | |
298 | ||
299 | /* The clone portion is available for | |
300 | * fast-cloning again. | |
301 | */ | |
302 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | |
303 | ||
304 | if (atomic_dec_and_test(fclone_ref)) | |
305 | kmem_cache_free(skbuff_fclone_cache, other); | |
306 | break; | |
3ff50b79 | 307 | } |
1da177e4 LT |
308 | } |
309 | ||
310 | /** | |
311 | * __kfree_skb - private function | |
312 | * @skb: buffer | |
313 | * | |
314 | * Free an sk_buff. Release anything attached to the buffer. | |
315 | * Clean the state. This is an internal helper function. Users should | |
316 | * always call kfree_skb | |
317 | */ | |
318 | ||
319 | void __kfree_skb(struct sk_buff *skb) | |
320 | { | |
1da177e4 LT |
321 | dst_release(skb->dst); |
322 | #ifdef CONFIG_XFRM | |
323 | secpath_put(skb->sp); | |
324 | #endif | |
9c2b3328 SH |
325 | if (skb->destructor) { |
326 | WARN_ON(in_irq()); | |
1da177e4 LT |
327 | skb->destructor(skb); |
328 | } | |
9fb9cbb1 | 329 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
5f79e0f9 | 330 | nf_conntrack_put(skb->nfct); |
9fb9cbb1 YK |
331 | nf_conntrack_put_reasm(skb->nfct_reasm); |
332 | #endif | |
1da177e4 LT |
333 | #ifdef CONFIG_BRIDGE_NETFILTER |
334 | nf_bridge_put(skb->nf_bridge); | |
335 | #endif | |
1da177e4 LT |
336 | /* XXX: IS this still necessary? - JHS */ |
337 | #ifdef CONFIG_NET_SCHED | |
338 | skb->tc_index = 0; | |
339 | #ifdef CONFIG_NET_CLS_ACT | |
340 | skb->tc_verd = 0; | |
1da177e4 LT |
341 | #endif |
342 | #endif | |
343 | ||
344 | kfree_skbmem(skb); | |
345 | } | |
346 | ||