]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
4 | * | |
113aa838 | 5 | * Authors: Alan Cox <[email protected]> |
1da177e4 LT |
6 | * Florian La Roche <[email protected]> |
7 | * | |
1da177e4 LT |
8 | * Fixes: |
9 | * Alan Cox : Fixed the worst of the load | |
10 | * balancer bugs. | |
11 | * Dave Platt : Interrupt stacking fix. | |
12 | * Richard Kooijman : Timestamp fixes. | |
13 | * Alan Cox : Changed buffer format. | |
14 | * Alan Cox : destructor hook for AF_UNIX etc. | |
15 | * Linus Torvalds : Better skb_clone. | |
16 | * Alan Cox : Added skb_copy. | |
17 | * Alan Cox : Added all the changed routines Linus | |
18 | * only put in the headers | |
19 | * Ray VanTassle : Fixed --skb->lock in free | |
20 | * Alan Cox : skb_copy copy arp field | |
21 | * Andi Kleen : slabified it. | |
22 | * Robert Olsson : Removed skb_head_pool | |
23 | * | |
24 | * NOTE: | |
25 | * The __skb_ routines should be called with interrupts | |
26 | * disabled, or you better be *real* sure that the operation is atomic | |
27 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
28 | * or via disabling bottom half handlers, etc). | |
1da177e4 LT |
29 | */ |
30 | ||
31 | /* | |
32 | * The functions in this file will not compile correctly with gcc 2.4.x | |
33 | */ | |
34 | ||
e005d193 JP |
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
36 | ||
1da177e4 LT |
37 | #include <linux/module.h> |
38 | #include <linux/types.h> | |
39 | #include <linux/kernel.h> | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/in.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/slab.h> | |
de960aa9 FW |
45 | #include <linux/tcp.h> |
46 | #include <linux/udp.h> | |
90017acc | 47 | #include <linux/sctp.h> |
1da177e4 LT |
48 | #include <linux/netdevice.h> |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | #include <net/pkt_sched.h> | |
51 | #endif | |
52 | #include <linux/string.h> | |
53 | #include <linux/skbuff.h> | |
9c55e01c | 54 | #include <linux/splice.h> |
1da177e4 LT |
55 | #include <linux/cache.h> |
56 | #include <linux/rtnetlink.h> | |
57 | #include <linux/init.h> | |
716ea3a7 | 58 | #include <linux/scatterlist.h> |
ac45f602 | 59 | #include <linux/errqueue.h> |
268bb0ce | 60 | #include <linux/prefetch.h> |
0d5501c1 | 61 | #include <linux/if_vlan.h> |
2a2ea508 | 62 | #include <linux/mpls.h> |
183f47fc | 63 | #include <linux/kcov.h> |
1da177e4 LT |
64 | |
65 | #include <net/protocol.h> | |
66 | #include <net/dst.h> | |
67 | #include <net/sock.h> | |
68 | #include <net/checksum.h> | |
ed1f50c3 | 69 | #include <net/ip6_checksum.h> |
1da177e4 | 70 | #include <net/xfrm.h> |
8822e270 | 71 | #include <net/mpls.h> |
3ee17bc7 | 72 | #include <net/mptcp.h> |
78476d31 | 73 | #include <net/mctp.h> |
6a5bcd84 | 74 | #include <net/page_pool.h> |
1da177e4 | 75 | |
7c0f6ba6 | 76 | #include <linux/uaccess.h> |
ad8d75ff | 77 | #include <trace/events/skb.h> |
51c56b00 | 78 | #include <linux/highmem.h> |
b245be1f WB |
79 | #include <linux/capability.h> |
80 | #include <linux/user_namespace.h> | |
2544af03 | 81 | #include <linux/indirect_call_wrapper.h> |
a1f8e7f7 | 82 | |
7b7ed885 | 83 | #include "datagram.h" |
7f678def | 84 | #include "sock_destructor.h" |
7b7ed885 | 85 | |
08009a76 AD |
86 | struct kmem_cache *skbuff_head_cache __ro_after_init; |
87 | static struct kmem_cache *skbuff_fclone_cache __ro_after_init; | |
df5042f4 FW |
88 | #ifdef CONFIG_SKB_EXTENSIONS |
89 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; | |
90 | #endif | |
5f74f82e HWR |
91 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
92 | EXPORT_SYMBOL(sysctl_max_skb_frags); | |
1da177e4 | 93 | |
1da177e4 | 94 | /** |
f05de73b JS |
95 | * skb_panic - private function for out-of-line support |
96 | * @skb: buffer | |
97 | * @sz: size | |
98 | * @addr: address | |
99d5851e | 99 | * @msg: skb_over_panic or skb_under_panic |
1da177e4 | 100 | * |
f05de73b JS |
101 | * Out-of-line support for skb_put() and skb_push(). |
102 | * Called via the wrapper skb_over_panic() or skb_under_panic(). | |
103 | * Keep out of line to prevent kernel bloat. | |
104 | * __builtin_return_address is not used because it is not always reliable. | |
1da177e4 | 105 | */ |
f05de73b | 106 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
99d5851e | 107 | const char msg[]) |
1da177e4 | 108 | { |
41a46913 | 109 | pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", |
99d5851e | 110 | msg, addr, skb->len, sz, skb->head, skb->data, |
e005d193 JP |
111 | (unsigned long)skb->tail, (unsigned long)skb->end, |
112 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
113 | BUG(); |
114 | } | |
115 | ||
f05de73b | 116 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
1da177e4 | 117 | { |
f05de73b | 118 | skb_panic(skb, sz, addr, __func__); |
1da177e4 LT |
119 | } |
120 | ||
f05de73b JS |
121 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
122 | { | |
123 | skb_panic(skb, sz, addr, __func__); | |
124 | } | |
c93bdd0e | 125 | |
50fad4b5 | 126 | #define NAPI_SKB_CACHE_SIZE 64 |
f450d539 AL |
127 | #define NAPI_SKB_CACHE_BULK 16 |
128 | #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) | |
50fad4b5 AL |
129 | |
130 | struct napi_alloc_cache { | |
131 | struct page_frag_cache page; | |
132 | unsigned int skb_count; | |
133 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; | |
134 | }; | |
135 | ||
136 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); | |
137 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); | |
138 | ||
32e3573f | 139 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
50fad4b5 AL |
140 | { |
141 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | |
142 | ||
50fad4b5 AL |
143 | fragsz = SKB_DATA_ALIGN(fragsz); |
144 | ||
32e3573f | 145 | return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); |
50fad4b5 AL |
146 | } |
147 | EXPORT_SYMBOL(__napi_alloc_frag_align); | |
148 | ||
149 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) | |
150 | { | |
50fad4b5 AL |
151 | void *data; |
152 | ||
153 | fragsz = SKB_DATA_ALIGN(fragsz); | |
afa79d08 | 154 | if (in_hardirq() || irqs_disabled()) { |
32e3573f YD |
155 | struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); |
156 | ||
50fad4b5 AL |
157 | data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); |
158 | } else { | |
32e3573f YD |
159 | struct napi_alloc_cache *nc; |
160 | ||
50fad4b5 | 161 | local_bh_disable(); |
32e3573f YD |
162 | nc = this_cpu_ptr(&napi_alloc_cache); |
163 | data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); | |
50fad4b5 AL |
164 | local_bh_enable(); |
165 | } | |
166 | return data; | |
167 | } | |
168 | EXPORT_SYMBOL(__netdev_alloc_frag_align); | |
169 | ||
f450d539 AL |
170 | static struct sk_buff *napi_skb_cache_get(void) |
171 | { | |
172 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | |
173 | struct sk_buff *skb; | |
174 | ||
175 | if (unlikely(!nc->skb_count)) | |
176 | nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, | |
177 | GFP_ATOMIC, | |
178 | NAPI_SKB_CACHE_BULK, | |
179 | nc->skb_cache); | |
180 | if (unlikely(!nc->skb_count)) | |
181 | return NULL; | |
182 | ||
183 | skb = nc->skb_cache[--nc->skb_count]; | |
184 | kasan_unpoison_object_data(skbuff_head_cache, skb); | |
185 | ||
186 | return skb; | |
187 | } | |
188 | ||
ba0509b6 | 189 | /* Caller must provide SKB that is memset cleared */ |
483126b3 AL |
190 | static void __build_skb_around(struct sk_buff *skb, void *data, |
191 | unsigned int frag_size) | |
ba0509b6 JDB |
192 | { |
193 | struct skb_shared_info *shinfo; | |
194 | unsigned int size = frag_size ? : ksize(data); | |
195 | ||
196 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
197 | ||
198 | /* Assumes caller memset cleared SKB */ | |
199 | skb->truesize = SKB_TRUESIZE(size); | |
200 | refcount_set(&skb->users, 1); | |
201 | skb->head = data; | |
202 | skb->data = data; | |
203 | skb_reset_tail_pointer(skb); | |
204 | skb->end = skb->tail + size; | |
205 | skb->mac_header = (typeof(skb->mac_header))~0U; | |
206 | skb->transport_header = (typeof(skb->transport_header))~0U; | |
207 | ||
208 | /* make sure we initialize shinfo sequentially */ | |
209 | shinfo = skb_shinfo(skb); | |
210 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | |
211 | atomic_set(&shinfo->dataref, 1); | |
212 | ||
6370cc3b | 213 | skb_set_kcov_handle(skb, kcov_common_handle()); |
ba0509b6 JDB |
214 | } |
215 | ||
b2b5ce9d | 216 | /** |
2ea2f62c | 217 | * __build_skb - build a network buffer |
b2b5ce9d | 218 | * @data: data buffer provided by caller |
2ea2f62c | 219 | * @frag_size: size of data, or 0 if head was kmalloced |
b2b5ce9d ED |
220 | * |
221 | * Allocate a new &sk_buff. Caller provides space holding head and | |
deceb4c0 | 222 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
2ea2f62c ED |
223 | * @frag_size is 0, otherwise data should come from the page allocator |
224 | * or vmalloc() | |
b2b5ce9d ED |
225 | * The return is the new skb buffer. |
226 | * On a failure the return is %NULL, and @data is not freed. | |
227 | * Notes : | |
228 | * Before IO, driver allocates only data buffer where NIC put incoming frame | |
229 | * Driver should add room at head (NET_SKB_PAD) and | |
230 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | |
231 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it | |
232 | * before giving packet to stack. | |
233 | * RX rings only contains data buffers, not full skbs. | |
234 | */ | |
2ea2f62c | 235 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
b2b5ce9d | 236 | { |
b2b5ce9d | 237 | struct sk_buff *skb; |
b2b5ce9d ED |
238 | |
239 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | |
ba0509b6 | 240 | if (unlikely(!skb)) |
b2b5ce9d ED |
241 | return NULL; |
242 | ||
b2b5ce9d | 243 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
483126b3 | 244 | __build_skb_around(skb, data, frag_size); |
b2b5ce9d | 245 | |
483126b3 | 246 | return skb; |
b2b5ce9d | 247 | } |
2ea2f62c ED |
248 | |
249 | /* build_skb() is wrapper over __build_skb(), that specifically | |
250 | * takes care of skb->head and skb->pfmemalloc | |
251 | * This means that if @frag_size is not zero, then @data must be backed | |
252 | * by a page fragment, not kmalloc() or vmalloc() | |
253 | */ | |
254 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | |
255 | { | |
256 | struct sk_buff *skb = __build_skb(data, frag_size); | |
257 | ||
258 | if (skb && frag_size) { | |
259 | skb->head_frag = 1; | |
2f064f34 | 260 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
2ea2f62c ED |
261 | skb->pfmemalloc = 1; |
262 | } | |
263 | return skb; | |
264 | } | |
b2b5ce9d ED |
265 | EXPORT_SYMBOL(build_skb); |
266 | ||
ba0509b6 JDB |
267 | /** |
268 | * build_skb_around - build a network buffer around provided skb | |
269 | * @skb: sk_buff provide by caller, must be memset cleared | |
270 | * @data: data buffer provided by caller | |
271 | * @frag_size: size of data, or 0 if head was kmalloced | |
272 | */ | |
273 | struct sk_buff *build_skb_around(struct sk_buff *skb, | |
274 | void *data, unsigned int frag_size) | |
275 | { | |
276 | if (unlikely(!skb)) | |
277 | return NULL; | |
278 | ||
483126b3 | 279 | __build_skb_around(skb, data, frag_size); |
ba0509b6 | 280 | |
483126b3 | 281 | if (frag_size) { |
ba0509b6 JDB |
282 | skb->head_frag = 1; |
283 | if (page_is_pfmemalloc(virt_to_head_page(data))) | |
284 | skb->pfmemalloc = 1; | |
285 | } | |
286 | return skb; | |
287 | } | |
288 | EXPORT_SYMBOL(build_skb_around); | |
289 | ||
f450d539 AL |
290 | /** |
291 | * __napi_build_skb - build a network buffer | |
292 | * @data: data buffer provided by caller | |
293 | * @frag_size: size of data, or 0 if head was kmalloced | |
294 | * | |
295 | * Version of __build_skb() that uses NAPI percpu caches to obtain | |
296 | * skbuff_head instead of inplace allocation. | |
297 | * | |
298 | * Returns a new &sk_buff on success, %NULL on allocation failure. | |
299 | */ | |
300 | static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) | |
301 | { | |
302 | struct sk_buff *skb; | |
303 | ||
304 | skb = napi_skb_cache_get(); | |
305 | if (unlikely(!skb)) | |
306 | return NULL; | |
307 | ||
308 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
309 | __build_skb_around(skb, data, frag_size); | |
310 | ||
311 | return skb; | |
312 | } | |
313 | ||
314 | /** | |
315 | * napi_build_skb - build a network buffer | |
316 | * @data: data buffer provided by caller | |
317 | * @frag_size: size of data, or 0 if head was kmalloced | |
318 | * | |
319 | * Version of __napi_build_skb() that takes care of skb->head_frag | |
320 | * and skb->pfmemalloc when the data is a page or page fragment. | |
321 | * | |
322 | * Returns a new &sk_buff on success, %NULL on allocation failure. | |
323 | */ | |
324 | struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) | |
325 | { | |
326 | struct sk_buff *skb = __napi_build_skb(data, frag_size); | |
327 | ||
328 | if (likely(skb) && frag_size) { | |
329 | skb->head_frag = 1; | |
330 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); | |
331 | } | |
332 | ||
333 | return skb; | |
334 | } | |
335 | EXPORT_SYMBOL(napi_build_skb); | |
336 | ||
5381b23d AL |
337 | /* |
338 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells | |
339 | * the caller if emergency pfmemalloc reserves are being used. If it is and | |
340 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves | |
341 | * may be used. Otherwise, the packet data may be discarded until enough | |
342 | * memory is free | |
343 | */ | |
ef28095f AL |
344 | static void *kmalloc_reserve(size_t size, gfp_t flags, int node, |
345 | bool *pfmemalloc) | |
5381b23d AL |
346 | { |
347 | void *obj; | |
348 | bool ret_pfmemalloc = false; | |
349 | ||
350 | /* | |
351 | * Try a regular allocation, when that fails and we're not entitled | |
352 | * to the reserves, fail. | |
353 | */ | |
354 | obj = kmalloc_node_track_caller(size, | |
355 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | |
356 | node); | |
357 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | |
358 | goto out; | |
359 | ||
360 | /* Try again but now we are using pfmemalloc reserves */ | |
361 | ret_pfmemalloc = true; | |
362 | obj = kmalloc_node_track_caller(size, flags, node); | |
363 | ||
364 | out: | |
365 | if (pfmemalloc) | |
366 | *pfmemalloc = ret_pfmemalloc; | |
367 | ||
368 | return obj; | |
369 | } | |
370 | ||
371 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
372 | * 'private' fields and also do memory statistics to find all the | |
373 | * [BEEP] leaks. | |
374 | * | |
375 | */ | |
376 | ||
377 | /** | |
378 | * __alloc_skb - allocate a network buffer | |
379 | * @size: size to allocate | |
380 | * @gfp_mask: allocation mask | |
381 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache | |
382 | * instead of head cache and allocate a cloned (child) skb. | |
383 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for | |
384 | * allocations in case the data is required for writeback | |
385 | * @node: numa node to allocate memory on | |
386 | * | |
387 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
388 | * tail room of at least size bytes. The object has a reference count | |
389 | * of one. The return is the buffer. On a failure the return is %NULL. | |
390 | * | |
391 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
392 | * %GFP_ATOMIC. | |
393 | */ | |
394 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |
395 | int flags, int node) | |
396 | { | |
397 | struct kmem_cache *cache; | |
5381b23d | 398 | struct sk_buff *skb; |
a5df6333 | 399 | unsigned int osize; |
5381b23d | 400 | bool pfmemalloc; |
a5df6333 | 401 | u8 *data; |
5381b23d AL |
402 | |
403 | cache = (flags & SKB_ALLOC_FCLONE) | |
404 | ? skbuff_fclone_cache : skbuff_head_cache; | |
405 | ||
406 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) | |
407 | gfp_mask |= __GFP_MEMALLOC; | |
408 | ||
409 | /* Get the HEAD */ | |
d13612b5 AL |
410 | if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && |
411 | likely(node == NUMA_NO_NODE || node == numa_mem_id())) | |
412 | skb = napi_skb_cache_get(); | |
413 | else | |
414 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); | |
df1ae022 AL |
415 | if (unlikely(!skb)) |
416 | return NULL; | |
5381b23d AL |
417 | prefetchw(skb); |
418 | ||
419 | /* We do our best to align skb_shared_info on a separate cache | |
420 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives | |
421 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. | |
422 | * Both skb->head and skb_shared_info are cache line aligned. | |
423 | */ | |
424 | size = SKB_DATA_ALIGN(size); | |
425 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
426 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); | |
df1ae022 | 427 | if (unlikely(!data)) |
5381b23d AL |
428 | goto nodata; |
429 | /* kmalloc(size) might give us more room than requested. | |
430 | * Put skb_shared_info exactly at the end of allocated zone, | |
431 | * to allow max possible filling before reallocation. | |
432 | */ | |
a5df6333 LR |
433 | osize = ksize(data); |
434 | size = SKB_WITH_OVERHEAD(osize); | |
5381b23d AL |
435 | prefetchw(data + size); |
436 | ||
437 | /* | |
438 | * Only clear those fields we need to clear, not those that we will | |
439 | * actually initialise below. Hence, don't put any more fields after | |
440 | * the tail pointer in struct sk_buff! | |
441 | */ | |
442 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
a5df6333 | 443 | __build_skb_around(skb, data, osize); |
5381b23d | 444 | skb->pfmemalloc = pfmemalloc; |
5381b23d AL |
445 | |
446 | if (flags & SKB_ALLOC_FCLONE) { | |
447 | struct sk_buff_fclones *fclones; | |
448 | ||
449 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | |
450 | ||
451 | skb->fclone = SKB_FCLONE_ORIG; | |
452 | refcount_set(&fclones->fclone_ref, 1); | |
453 | ||
454 | fclones->skb2.fclone = SKB_FCLONE_CLONE; | |
455 | } | |
456 | ||
5381b23d | 457 | return skb; |
df1ae022 | 458 | |
5381b23d AL |
459 | nodata: |
460 | kmem_cache_free(cache, skb); | |
df1ae022 | 461 | return NULL; |
5381b23d AL |
462 | } |
463 | EXPORT_SYMBOL(__alloc_skb); | |
464 | ||
fd11a83d AD |
465 | /** |
466 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
467 | * @dev: network device to receive on | |
d7499160 | 468 | * @len: length to allocate |
fd11a83d AD |
469 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
470 | * | |
471 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
472 | * buffer has NET_SKB_PAD headroom built in. Users should allocate | |
473 | * the headroom they think they need without accounting for the | |
474 | * built in space. The built in space is used for optimisations. | |
475 | * | |
476 | * %NULL is returned if there is no free memory. | |
477 | */ | |
9451980a AD |
478 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
479 | gfp_t gfp_mask) | |
fd11a83d | 480 | { |
b63ae8ca | 481 | struct page_frag_cache *nc; |
fd11a83d | 482 | struct sk_buff *skb; |
9451980a AD |
483 | bool pfmemalloc; |
484 | void *data; | |
485 | ||
486 | len += NET_SKB_PAD; | |
fd11a83d | 487 | |
66c55602 AL |
488 | /* If requested length is either too small or too big, |
489 | * we use kmalloc() for skb->head allocation. | |
490 | */ | |
491 | if (len <= SKB_WITH_OVERHEAD(1024) || | |
492 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | |
d0164adc | 493 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
a080e7bd AD |
494 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
495 | if (!skb) | |
496 | goto skb_fail; | |
497 | goto skb_success; | |
498 | } | |
fd11a83d | 499 | |
9451980a AD |
500 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
501 | len = SKB_DATA_ALIGN(len); | |
502 | ||
503 | if (sk_memalloc_socks()) | |
504 | gfp_mask |= __GFP_MEMALLOC; | |
505 | ||
afa79d08 | 506 | if (in_hardirq() || irqs_disabled()) { |
92dcabd7 SAS |
507 | nc = this_cpu_ptr(&netdev_alloc_cache); |
508 | data = page_frag_alloc(nc, len, gfp_mask); | |
509 | pfmemalloc = nc->pfmemalloc; | |
510 | } else { | |
511 | local_bh_disable(); | |
512 | nc = this_cpu_ptr(&napi_alloc_cache.page); | |
513 | data = page_frag_alloc(nc, len, gfp_mask); | |
514 | pfmemalloc = nc->pfmemalloc; | |
515 | local_bh_enable(); | |
516 | } | |
9451980a AD |
517 | |
518 | if (unlikely(!data)) | |
519 | return NULL; | |
520 | ||
521 | skb = __build_skb(data, len); | |
522 | if (unlikely(!skb)) { | |
181edb2b | 523 | skb_free_frag(data); |
9451980a | 524 | return NULL; |
7b2e497a | 525 | } |
fd11a83d | 526 | |
9451980a AD |
527 | if (pfmemalloc) |
528 | skb->pfmemalloc = 1; | |
529 | skb->head_frag = 1; | |
530 | ||
a080e7bd | 531 | skb_success: |
9451980a AD |
532 | skb_reserve(skb, NET_SKB_PAD); |
533 | skb->dev = dev; | |
534 | ||
a080e7bd | 535 | skb_fail: |
8af27456 CH |
536 | return skb; |
537 | } | |
b4ac530f | 538 | EXPORT_SYMBOL(__netdev_alloc_skb); |
1da177e4 | 539 | |
fd11a83d AD |
540 | /** |
541 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance | |
542 | * @napi: napi instance this buffer was allocated for | |
d7499160 | 543 | * @len: length to allocate |
fd11a83d AD |
544 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
545 | * | |
546 | * Allocate a new sk_buff for use in NAPI receive. This buffer will | |
547 | * attempt to allocate the head from a special reserved region used | |
548 | * only for NAPI Rx allocation. By doing this we can save several | |
549 | * CPU cycles by avoiding having to disable and re-enable IRQs. | |
550 | * | |
551 | * %NULL is returned if there is no free memory. | |
552 | */ | |
9451980a AD |
553 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
554 | gfp_t gfp_mask) | |
fd11a83d | 555 | { |
3226b158 | 556 | struct napi_alloc_cache *nc; |
fd11a83d | 557 | struct sk_buff *skb; |
9451980a AD |
558 | void *data; |
559 | ||
560 | len += NET_SKB_PAD + NET_IP_ALIGN; | |
fd11a83d | 561 | |
3226b158 ED |
562 | /* If requested length is either too small or too big, |
563 | * we use kmalloc() for skb->head allocation. | |
564 | */ | |
565 | if (len <= SKB_WITH_OVERHEAD(1024) || | |
566 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | |
d0164adc | 567 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
cfb8ec65 AL |
568 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, |
569 | NUMA_NO_NODE); | |
a080e7bd AD |
570 | if (!skb) |
571 | goto skb_fail; | |
572 | goto skb_success; | |
573 | } | |
9451980a | 574 | |
3226b158 | 575 | nc = this_cpu_ptr(&napi_alloc_cache); |
9451980a AD |
576 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
577 | len = SKB_DATA_ALIGN(len); | |
578 | ||
579 | if (sk_memalloc_socks()) | |
580 | gfp_mask |= __GFP_MEMALLOC; | |
fd11a83d | 581 | |
8c2dd3e4 | 582 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
9451980a AD |
583 | if (unlikely(!data)) |
584 | return NULL; | |
585 | ||
cfb8ec65 | 586 | skb = __napi_build_skb(data, len); |
9451980a | 587 | if (unlikely(!skb)) { |
181edb2b | 588 | skb_free_frag(data); |
9451980a | 589 | return NULL; |
fd11a83d AD |
590 | } |
591 | ||
795bb1c0 | 592 | if (nc->page.pfmemalloc) |
9451980a AD |
593 | skb->pfmemalloc = 1; |
594 | skb->head_frag = 1; | |
595 | ||
a080e7bd | 596 | skb_success: |
9451980a AD |
597 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
598 | skb->dev = napi->dev; | |
599 | ||
a080e7bd | 600 | skb_fail: |
fd11a83d AD |
601 | return skb; |
602 | } | |
603 | EXPORT_SYMBOL(__napi_alloc_skb); | |
604 | ||
654bed16 | 605 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
50269e19 | 606 | int size, unsigned int truesize) |
654bed16 PZ |
607 | { |
608 | skb_fill_page_desc(skb, i, page, off, size); | |
609 | skb->len += size; | |
610 | skb->data_len += size; | |
50269e19 | 611 | skb->truesize += truesize; |
654bed16 PZ |
612 | } |
613 | EXPORT_SYMBOL(skb_add_rx_frag); | |
614 | ||
f8e617e1 JW |
615 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
616 | unsigned int truesize) | |
617 | { | |
618 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
619 | ||
620 | skb_frag_size_add(frag, size); | |
621 | skb->len += size; | |
622 | skb->data_len += size; | |
623 | skb->truesize += truesize; | |
624 | } | |
625 | EXPORT_SYMBOL(skb_coalesce_rx_frag); | |
626 | ||
27b437c8 | 627 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 628 | { |
bd8a7036 | 629 | kfree_skb_list(*listp); |
27b437c8 | 630 | *listp = NULL; |
1da177e4 LT |
631 | } |
632 | ||
27b437c8 HX |
633 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
634 | { | |
635 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
636 | } | |
637 | ||
1da177e4 LT |
638 | static void skb_clone_fraglist(struct sk_buff *skb) |
639 | { | |
640 | struct sk_buff *list; | |
641 | ||
fbb398a8 | 642 | skb_walk_frags(skb, list) |
1da177e4 LT |
643 | skb_get(list); |
644 | } | |
645 | ||
d3836f21 ED |
646 | static void skb_free_head(struct sk_buff *skb) |
647 | { | |
181edb2b AD |
648 | unsigned char *head = skb->head; |
649 | ||
6a5bcd84 IA |
650 | if (skb->head_frag) { |
651 | if (skb_pp_recycle(skb, head)) | |
652 | return; | |
181edb2b | 653 | skb_free_frag(head); |
6a5bcd84 | 654 | } else { |
181edb2b | 655 | kfree(head); |
6a5bcd84 | 656 | } |
d3836f21 ED |
657 | } |
658 | ||
5bba1712 | 659 | static void skb_release_data(struct sk_buff *skb) |
1da177e4 | 660 | { |
ff04a771 ED |
661 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
662 | int i; | |
1da177e4 | 663 | |
ff04a771 ED |
664 | if (skb->cloned && |
665 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
666 | &shinfo->dataref)) | |
2cc3aeb5 | 667 | goto exit; |
a6686f2f | 668 | |
70c43167 JL |
669 | skb_zcopy_clear(skb, true); |
670 | ||
ff04a771 | 671 | for (i = 0; i < shinfo->nr_frags; i++) |
6a5bcd84 | 672 | __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); |
a6686f2f | 673 | |
ff04a771 ED |
674 | if (shinfo->frag_list) |
675 | kfree_skb_list(shinfo->frag_list); | |
676 | ||
677 | skb_free_head(skb); | |
2cc3aeb5 IA |
678 | exit: |
679 | /* When we clone an SKB we copy the reycling bit. The pp_recycle | |
680 | * bit is only set on the head though, so in order to avoid races | |
681 | * while trying to recycle fragments on __skb_frag_unref() we need | |
682 | * to make one SKB responsible for triggering the recycle path. | |
683 | * So disable the recycling bit if an SKB is cloned and we have | |
684 | * additional references to to the fragmented part of the SKB. | |
685 | * Eventually the last SKB will have the recycling bit set and it's | |
686 | * dataref set to 0, which will trigger the recycling | |
687 | */ | |
688 | skb->pp_recycle = 0; | |
1da177e4 LT |
689 | } |
690 | ||
691 | /* | |
692 | * Free an skbuff by memory without cleaning the state. | |
693 | */ | |
2d4baff8 | 694 | static void kfree_skbmem(struct sk_buff *skb) |
1da177e4 | 695 | { |
d0bf4a9e | 696 | struct sk_buff_fclones *fclones; |
d179cd12 | 697 | |
d179cd12 DM |
698 | switch (skb->fclone) { |
699 | case SKB_FCLONE_UNAVAILABLE: | |
700 | kmem_cache_free(skbuff_head_cache, skb); | |
6ffe75eb | 701 | return; |
d179cd12 DM |
702 | |
703 | case SKB_FCLONE_ORIG: | |
d0bf4a9e | 704 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
d179cd12 | 705 | |
6ffe75eb ED |
706 | /* We usually free the clone (TX completion) before original skb |
707 | * This test would have no chance to be true for the clone, | |
708 | * while here, branch prediction will be good. | |
d179cd12 | 709 | */ |
2638595a | 710 | if (refcount_read(&fclones->fclone_ref) == 1) |
6ffe75eb ED |
711 | goto fastpath; |
712 | break; | |
e7820e39 | 713 | |
6ffe75eb ED |
714 | default: /* SKB_FCLONE_CLONE */ |
715 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | |
d179cd12 | 716 | break; |
3ff50b79 | 717 | } |
2638595a | 718 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
6ffe75eb ED |
719 | return; |
720 | fastpath: | |
721 | kmem_cache_free(skbuff_fclone_cache, fclones); | |
1da177e4 LT |
722 | } |
723 | ||
0a463c78 | 724 | void skb_release_head_state(struct sk_buff *skb) |
1da177e4 | 725 | { |
adf30907 | 726 | skb_dst_drop(skb); |
9c2b3328 | 727 | if (skb->destructor) { |
afa79d08 | 728 | WARN_ON(in_hardirq()); |
1da177e4 LT |
729 | skb->destructor(skb); |
730 | } | |
a3bf7ae9 | 731 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
cb9c6836 | 732 | nf_conntrack_put(skb_nfct(skb)); |
1da177e4 | 733 | #endif |
df5042f4 | 734 | skb_ext_put(skb); |
04a4bb55 LB |
735 | } |
736 | ||
737 | /* Free everything but the sk_buff shell. */ | |
738 | static void skb_release_all(struct sk_buff *skb) | |
739 | { | |
740 | skb_release_head_state(skb); | |
a28b1b90 FW |
741 | if (likely(skb->head)) |
742 | skb_release_data(skb); | |
2d4baff8 HX |
743 | } |
744 | ||
745 | /** | |
746 | * __kfree_skb - private function | |
747 | * @skb: buffer | |
748 | * | |
749 | * Free an sk_buff. Release anything attached to the buffer. | |
750 | * Clean the state. This is an internal helper function. Users should | |
751 | * always call kfree_skb | |
752 | */ | |
1da177e4 | 753 | |
2d4baff8 HX |
754 | void __kfree_skb(struct sk_buff *skb) |
755 | { | |
756 | skb_release_all(skb); | |
1da177e4 LT |
757 | kfree_skbmem(skb); |
758 | } | |
b4ac530f | 759 | EXPORT_SYMBOL(__kfree_skb); |
1da177e4 | 760 | |