]>
Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
aecd67b6 JDB |
2 | /* net/core/xdp.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
aecd67b6 | 5 | */ |
05296620 | 6 | #include <linux/bpf.h> |
400031e0 | 7 | #include <linux/btf.h> |
3d76a4d3 | 8 | #include <linux/btf_ids.h> |
05296620 | 9 | #include <linux/filter.h> |
aecd67b6 JDB |
10 | #include <linux/types.h> |
11 | #include <linux/mm.h> | |
05296620 | 12 | #include <linux/netdevice.h> |
8d5d8852 JDB |
13 | #include <linux/slab.h> |
14 | #include <linux/idr.h> | |
15 | #include <linux/rhashtable.h> | |
34cc0b33 | 16 | #include <linux/bug.h> |
57d0a1c1 | 17 | #include <net/page_pool.h> |
aecd67b6 JDB |
18 | |
19 | #include <net/xdp.h> | |
f033b688 JDB |
20 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
21 | #include <trace/events/xdp.h> | |
2b43470a | 22 | #include <net/xdp_sock_drv.h> |
aecd67b6 JDB |
23 | |
24 | #define REG_STATE_NEW 0x0 | |
25 | #define REG_STATE_REGISTERED 0x1 | |
26 | #define REG_STATE_UNREGISTERED 0x2 | |
27 | #define REG_STATE_UNUSED 0x3 | |
28 | ||
8d5d8852 JDB |
29 | static DEFINE_IDA(mem_id_pool); |
30 | static DEFINE_MUTEX(mem_id_lock); | |
31 | #define MEM_ID_MAX 0xFFFE | |
32 | #define MEM_ID_MIN 1 | |
33 | static int mem_id_next = MEM_ID_MIN; | |
34 | ||
35 | static bool mem_id_init; /* false */ | |
36 | static struct rhashtable *mem_id_ht; | |
37 | ||
8d5d8852 JDB |
38 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
39 | { | |
40 | const u32 *k = data; | |
41 | const u32 key = *k; | |
42 | ||
c593642c | 43 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
8d5d8852 JDB |
44 | != sizeof(u32)); |
45 | ||
9f9a7077 N |
46 | /* Use cyclic increasing ID as direct hash key */ |
47 | return key; | |
8d5d8852 JDB |
48 | } |
49 | ||
50 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, | |
51 | const void *ptr) | |
52 | { | |
53 | const struct xdp_mem_allocator *xa = ptr; | |
54 | u32 mem_id = *(u32 *)arg->key; | |
55 | ||
56 | return xa->mem.id != mem_id; | |
57 | } | |
58 | ||
59 | static const struct rhashtable_params mem_id_rht_params = { | |
60 | .nelem_hint = 64, | |
61 | .head_offset = offsetof(struct xdp_mem_allocator, node), | |
62 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), | |
c593642c | 63 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
8d5d8852 JDB |
64 | .max_size = MEM_ID_MAX, |
65 | .min_size = 8, | |
66 | .automatic_shrinking = true, | |
67 | .hashfn = xdp_mem_id_hashfn, | |
68 | .obj_cmpfn = xdp_mem_id_cmp, | |
69 | }; | |
70 | ||
71 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) | |
72 | { | |
73 | struct xdp_mem_allocator *xa; | |
74 | ||
75 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); | |
76 | ||
77 | /* Allow this ID to be reused */ | |
78 | ida_simple_remove(&mem_id_pool, xa->mem.id); | |
79 | ||
8d5d8852 JDB |
80 | kfree(xa); |
81 | } | |
82 | ||
c3f812ce | 83 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
99c07c43 | 84 | { |
c3f812ce | 85 | trace_mem_disconnect(xa); |
99c07c43 | 86 | |
c3f812ce | 87 | if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
99c07c43 | 88 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
99c07c43 JDB |
89 | } |
90 | ||
c3f812ce JL |
91 | static void mem_allocator_disconnect(void *allocator) |
92 | { | |
93 | struct xdp_mem_allocator *xa; | |
94 | struct rhashtable_iter iter; | |
95 | ||
86c76c09 JL |
96 | mutex_lock(&mem_id_lock); |
97 | ||
c3f812ce JL |
98 | rhashtable_walk_enter(mem_id_ht, &iter); |
99 | do { | |
100 | rhashtable_walk_start(&iter); | |
101 | ||
102 | while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { | |
103 | if (xa->allocator == allocator) | |
104 | mem_xa_remove(xa); | |
105 | } | |
106 | ||
107 | rhashtable_walk_stop(&iter); | |
99c07c43 | 108 | |
c3f812ce JL |
109 | } while (xa == ERR_PTR(-EAGAIN)); |
110 | rhashtable_walk_exit(&iter); | |
86c76c09 JL |
111 | |
112 | mutex_unlock(&mem_id_lock); | |
c3f812ce JL |
113 | } |
114 | ||
4a48ef70 | 115 | void xdp_unreg_mem_model(struct xdp_mem_info *mem) |
8d5d8852 JDB |
116 | { |
117 | struct xdp_mem_allocator *xa; | |
4a48ef70 THJ |
118 | int type = mem->type; |
119 | int id = mem->id; | |
8d5d8852 | 120 | |
a78cae24 | 121 | /* Reset mem info to defaults */ |
4a48ef70 THJ |
122 | mem->id = 0; |
123 | mem->type = 0; | |
dce5bd61 | 124 | |
8d5d8852 JDB |
125 | if (id == 0) |
126 | return; | |
127 | ||
a78cae24 | 128 | if (type == MEM_TYPE_PAGE_POOL) { |
c3f812ce JL |
129 | rcu_read_lock(); |
130 | xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); | |
131 | page_pool_destroy(xa->page_pool); | |
132 | rcu_read_unlock(); | |
99c07c43 | 133 | } |
8d5d8852 | 134 | } |
4a48ef70 THJ |
135 | EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); |
136 | ||
137 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) | |
138 | { | |
139 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { | |
140 | WARN(1, "Missing register, driver bug"); | |
141 | return; | |
142 | } | |
143 | ||
144 | xdp_unreg_mem_model(&xdp_rxq->mem); | |
145 | } | |
dce5bd61 | 146 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
8d5d8852 | 147 | |
aecd67b6 JDB |
148 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
149 | { | |
150 | /* Simplify driver cleanup code paths, allow unreg "unused" */ | |
151 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) | |
152 | return; | |
153 | ||
dce5bd61 | 154 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
8d5d8852 | 155 | |
aecd67b6 JDB |
156 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
157 | xdp_rxq->dev = NULL; | |
158 | } | |
159 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); | |
160 | ||
161 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) | |
162 | { | |
163 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); | |
164 | } | |
165 | ||
166 | /* Returns 0 on success, negative on failure */ | |
bf25146a EC |
167 | int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
168 | struct net_device *dev, u32 queue_index, | |
169 | unsigned int napi_id, u32 frag_size) | |
aecd67b6 | 170 | { |
f85b244e YD |
171 | if (!dev) { |
172 | WARN(1, "Missing net_device from driver"); | |
173 | return -ENODEV; | |
174 | } | |
175 | ||
aecd67b6 JDB |
176 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
177 | WARN(1, "Driver promised not to register this"); | |
178 | return -EINVAL; | |
179 | } | |
180 | ||
181 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { | |
182 | WARN(1, "Missing unregister, handled but fix driver"); | |
183 | xdp_rxq_info_unreg(xdp_rxq); | |
184 | } | |
185 | ||
aecd67b6 JDB |
186 | /* State either UNREGISTERED or NEW */ |
187 | xdp_rxq_info_init(xdp_rxq); | |
188 | xdp_rxq->dev = dev; | |
189 | xdp_rxq->queue_index = queue_index; | |
b02e5a0e | 190 | xdp_rxq->napi_id = napi_id; |
bf25146a | 191 | xdp_rxq->frag_size = frag_size; |
aecd67b6 JDB |
192 | |
193 | xdp_rxq->reg_state = REG_STATE_REGISTERED; | |
194 | return 0; | |
195 | } | |
bf25146a | 196 | EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); |
aecd67b6 JDB |
197 | |
198 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) | |
199 | { | |
200 | xdp_rxq->reg_state = REG_STATE_UNUSED; | |
201 | } | |
202 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); | |
c0124f32 JDB |
203 | |
204 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) | |
205 | { | |
206 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); | |
207 | } | |
208 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); | |
5ab073ff | 209 | |
8d5d8852 JDB |
210 | static int __mem_id_init_hash_table(void) |
211 | { | |
212 | struct rhashtable *rht; | |
213 | int ret; | |
214 | ||
215 | if (unlikely(mem_id_init)) | |
216 | return 0; | |
217 | ||
218 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); | |
219 | if (!rht) | |
220 | return -ENOMEM; | |
221 | ||
222 | ret = rhashtable_init(rht, &mem_id_rht_params); | |
223 | if (ret < 0) { | |
224 | kfree(rht); | |
225 | return ret; | |
226 | } | |
227 | mem_id_ht = rht; | |
228 | smp_mb(); /* mutex lock should provide enough pairing */ | |
229 | mem_id_init = true; | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | /* Allocate a cyclic ID that maps to allocator pointer. | |
235 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html | |
236 | * | |
237 | * Caller must lock mem_id_lock. | |
238 | */ | |
239 | static int __mem_id_cyclic_get(gfp_t gfp) | |
240 | { | |
241 | int retries = 1; | |
242 | int id; | |
243 | ||
244 | again: | |
245 | id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); | |
246 | if (id < 0) { | |
247 | if (id == -ENOSPC) { | |
248 | /* Cyclic allocator, reset next id */ | |
249 | if (retries--) { | |
250 | mem_id_next = MEM_ID_MIN; | |
251 | goto again; | |
252 | } | |
253 | } | |
254 | return id; /* errno */ | |
255 | } | |
256 | mem_id_next = id + 1; | |
257 | ||
258 | return id; | |
259 | } | |
260 | ||
57d0a1c1 JDB |
261 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
262 | { | |
263 | if (type == MEM_TYPE_PAGE_POOL) | |
264 | return is_page_pool_compiled_in(); | |
265 | ||
266 | if (type >= MEM_TYPE_MAX) | |
267 | return false; | |
268 | ||
269 | return true; | |
270 | } | |
271 | ||
4a48ef70 THJ |
272 | static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, |
273 | enum xdp_mem_type type, | |
274 | void *allocator) | |
5ab073ff | 275 | { |
8d5d8852 JDB |
276 | struct xdp_mem_allocator *xdp_alloc; |
277 | gfp_t gfp = GFP_KERNEL; | |
278 | int id, errno, ret; | |
279 | void *ptr; | |
280 | ||
57d0a1c1 | 281 | if (!__is_supported_mem_type(type)) |
4a48ef70 | 282 | return ERR_PTR(-EOPNOTSUPP); |
5ab073ff | 283 | |
4a48ef70 | 284 | mem->type = type; |
5ab073ff | 285 | |
57d0a1c1 | 286 | if (!allocator) { |
0807892e | 287 | if (type == MEM_TYPE_PAGE_POOL) |
4a48ef70 THJ |
288 | return ERR_PTR(-EINVAL); /* Setup time check page_pool req */ |
289 | return NULL; | |
57d0a1c1 | 290 | } |
8d5d8852 JDB |
291 | |
292 | /* Delay init of rhashtable to save memory if feature isn't used */ | |
293 | if (!mem_id_init) { | |
294 | mutex_lock(&mem_id_lock); | |
295 | ret = __mem_id_init_hash_table(); | |
296 | mutex_unlock(&mem_id_lock); | |
297 | if (ret < 0) { | |
298 | WARN_ON(1); | |
4a48ef70 | 299 | return ERR_PTR(ret); |
8d5d8852 JDB |
300 | } |
301 | } | |
302 | ||
303 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); | |
304 | if (!xdp_alloc) | |
4a48ef70 | 305 | return ERR_PTR(-ENOMEM); |
8d5d8852 JDB |
306 | |
307 | mutex_lock(&mem_id_lock); | |
308 | id = __mem_id_cyclic_get(gfp); | |
309 | if (id < 0) { | |
310 | errno = id; | |
311 | goto err; | |
312 | } | |
4a48ef70 THJ |
313 | mem->id = id; |
314 | xdp_alloc->mem = *mem; | |
8d5d8852 JDB |
315 | xdp_alloc->allocator = allocator; |
316 | ||
317 | /* Insert allocator into ID lookup table */ | |
318 | ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); | |
319 | if (IS_ERR(ptr)) { | |
4a48ef70 THJ |
320 | ida_simple_remove(&mem_id_pool, mem->id); |
321 | mem->id = 0; | |
8d5d8852 JDB |
322 | errno = PTR_ERR(ptr); |
323 | goto err; | |
324 | } | |
325 | ||
1da4bbef | 326 | if (type == MEM_TYPE_PAGE_POOL) |
64693ec7 | 327 | page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem); |
1da4bbef | 328 | |
8d5d8852 | 329 | mutex_unlock(&mem_id_lock); |
5ab073ff | 330 | |
4a48ef70 | 331 | return xdp_alloc; |
8d5d8852 JDB |
332 | err: |
333 | mutex_unlock(&mem_id_lock); | |
334 | kfree(xdp_alloc); | |
4a48ef70 THJ |
335 | return ERR_PTR(errno); |
336 | } | |
337 | ||
338 | int xdp_reg_mem_model(struct xdp_mem_info *mem, | |
339 | enum xdp_mem_type type, void *allocator) | |
340 | { | |
341 | struct xdp_mem_allocator *xdp_alloc; | |
342 | ||
343 | xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); | |
344 | if (IS_ERR(xdp_alloc)) | |
345 | return PTR_ERR(xdp_alloc); | |
346 | return 0; | |
347 | } | |
348 | EXPORT_SYMBOL_GPL(xdp_reg_mem_model); | |
349 | ||
350 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, | |
351 | enum xdp_mem_type type, void *allocator) | |
352 | { | |
353 | struct xdp_mem_allocator *xdp_alloc; | |
354 | ||
355 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { | |
356 | WARN(1, "Missing register, driver bug"); | |
357 | return -EFAULT; | |
358 | } | |
359 | ||
360 | xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator); | |
361 | if (IS_ERR(xdp_alloc)) | |
362 | return PTR_ERR(xdp_alloc); | |
363 | ||
e0ae7130 SAS |
364 | if (trace_mem_connect_enabled() && xdp_alloc) |
365 | trace_mem_connect(xdp_alloc, xdp_rxq); | |
4a48ef70 | 366 | return 0; |
5ab073ff | 367 | } |
4a48ef70 | 368 | |
5ab073ff | 369 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
8d5d8852 | 370 | |
389ab7f0 JDB |
371 | /* XDP RX runs under NAPI protection, and in different delivery error |
372 | * scenarios (e.g. queue full), it is possible to return the xdp_frame | |
baead859 | 373 | * while still leveraging this protection. The @napi_direct boolean |
389ab7f0 | 374 | * is used for those calls sites. Thus, allowing for faster recycling |
ed1182dc | 375 | * of xdp_frames/pages in those cases. |
389ab7f0 | 376 | */ |
bf25146a EC |
377 | void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
378 | struct xdp_buff *xdp) | |
8d5d8852 | 379 | { |
57d0a1c1 JDB |
380 | struct page *page; |
381 | ||
382 | switch (mem->type) { | |
383 | case MEM_TYPE_PAGE_POOL: | |
57d0a1c1 | 384 | page = virt_to_head_page(data); |
622d1369 OBL |
385 | if (napi_direct && xdp_return_frame_no_direct()) |
386 | napi_direct = false; | |
fb33ec01 JDB |
387 | /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
388 | * as mem->type knows this a page_pool page | |
389 | */ | |
390 | page_pool_put_full_page(page->pp, page, napi_direct); | |
57d0a1c1 JDB |
391 | break; |
392 | case MEM_TYPE_PAGE_SHARED: | |
8d5d8852 | 393 | page_frag_free(data); |
57d0a1c1 JDB |
394 | break; |
395 | case MEM_TYPE_PAGE_ORDER0: | |
396 | page = virt_to_page(data); /* Assumes order0 page*/ | |
8d5d8852 | 397 | put_page(page); |
57d0a1c1 | 398 | break; |
ed1182dc BT |
399 | case MEM_TYPE_XSK_BUFF_POOL: |
400 | /* NB! Only valid from an xdp_buff! */ | |
401 | xsk_buff_free(xdp); | |
402 | break; | |
57d0a1c1 JDB |
403 | default: |
404 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ | |
82c41671 | 405 | WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); |
57d0a1c1 | 406 | break; |
8d5d8852 JDB |
407 | } |
408 | } | |
c497176c BT |
409 | |
410 | void xdp_return_frame(struct xdp_frame *xdpf) | |
411 | { | |
7c48cb01 LB |
412 | struct skb_shared_info *sinfo; |
413 | int i; | |
414 | ||
415 | if (likely(!xdp_frame_has_frags(xdpf))) | |
416 | goto out; | |
417 | ||
418 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
419 | for (i = 0; i < sinfo->nr_frags; i++) { | |
420 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
421 | ||
422 | __xdp_return(page_address(page), &xdpf->mem, false, NULL); | |
423 | } | |
424 | out: | |
ed1182dc | 425 | __xdp_return(xdpf->data, &xdpf->mem, false, NULL); |
c497176c | 426 | } |
8d5d8852 | 427 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
c497176c | 428 | |
389ab7f0 JDB |
429 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
430 | { | |
7c48cb01 LB |
431 | struct skb_shared_info *sinfo; |
432 | int i; | |
433 | ||
434 | if (likely(!xdp_frame_has_frags(xdpf))) | |
435 | goto out; | |
436 | ||
437 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
438 | for (i = 0; i < sinfo->nr_frags; i++) { | |
439 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
440 | ||
441 | __xdp_return(page_address(page), &xdpf->mem, true, NULL); | |
442 | } | |
443 | out: | |
ed1182dc | 444 | __xdp_return(xdpf->data, &xdpf->mem, true, NULL); |
389ab7f0 JDB |
445 | } |
446 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); | |
447 | ||
89653987 LB |
448 | /* XDP bulk APIs introduce a defer/flush mechanism to return |
449 | * pages belonging to the same xdp_mem_allocator object | |
450 | * (identified via the mem.id field) in bulk to optimize | |
451 | * I-cache and D-cache. | |
452 | * The bulk queue size is set to 16 to be aligned to how | |
453 | * XDP_REDIRECT bulking works. The bulk is flushed when | |
454 | * it is full or when mem.id changes. | |
455 | * xdp_frame_bulk is usually stored/allocated on the function | |
456 | * call-stack to avoid locking penalties. | |
457 | */ | |
458 | void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) | |
459 | { | |
460 | struct xdp_mem_allocator *xa = bq->xa; | |
89653987 | 461 | |
78862447 | 462 | if (unlikely(!xa || !bq->count)) |
89653987 LB |
463 | return; |
464 | ||
78862447 | 465 | page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); |
89653987 LB |
466 | /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ |
467 | bq->count = 0; | |
468 | } | |
469 | EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk); | |
470 | ||
471 | /* Must be called with rcu_read_lock held */ | |
472 | void xdp_return_frame_bulk(struct xdp_frame *xdpf, | |
473 | struct xdp_frame_bulk *bq) | |
474 | { | |
475 | struct xdp_mem_info *mem = &xdpf->mem; | |
476 | struct xdp_mem_allocator *xa; | |
477 | ||
478 | if (mem->type != MEM_TYPE_PAGE_POOL) { | |
7c48cb01 | 479 | xdp_return_frame(xdpf); |
89653987 LB |
480 | return; |
481 | } | |
482 | ||
483 | xa = bq->xa; | |
484 | if (unlikely(!xa)) { | |
485 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
486 | bq->count = 0; | |
487 | bq->xa = xa; | |
488 | } | |
489 | ||
490 | if (bq->count == XDP_BULK_QUEUE_SIZE) | |
491 | xdp_flush_frame_bulk(bq); | |
492 | ||
493 | if (unlikely(mem->id != xa->mem.id)) { | |
494 | xdp_flush_frame_bulk(bq); | |
495 | bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
496 | } | |
497 | ||
7c48cb01 LB |
498 | if (unlikely(xdp_frame_has_frags(xdpf))) { |
499 | struct skb_shared_info *sinfo; | |
500 | int i; | |
501 | ||
502 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
503 | for (i = 0; i < sinfo->nr_frags; i++) { | |
504 | skb_frag_t *frag = &sinfo->frags[i]; | |
505 | ||
506 | bq->q[bq->count++] = skb_frag_address(frag); | |
507 | if (bq->count == XDP_BULK_QUEUE_SIZE) | |
508 | xdp_flush_frame_bulk(bq); | |
509 | } | |
510 | } | |
89653987 LB |
511 | bq->q[bq->count++] = xdpf->data; |
512 | } | |
513 | EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); | |
514 | ||
c497176c BT |
515 | void xdp_return_buff(struct xdp_buff *xdp) |
516 | { | |
7c48cb01 LB |
517 | struct skb_shared_info *sinfo; |
518 | int i; | |
519 | ||
520 | if (likely(!xdp_buff_has_frags(xdp))) | |
521 | goto out; | |
522 | ||
523 | sinfo = xdp_get_shared_info_from_buff(xdp); | |
524 | for (i = 0; i < sinfo->nr_frags; i++) { | |
525 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
526 | ||
527 | __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); | |
528 | } | |
529 | out: | |
ed1182dc | 530 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); |
c497176c | 531 | } |
718a18a0 | 532 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
05296620 | 533 | |
6bf071bf JDB |
534 | /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ |
535 | void __xdp_release_frame(void *data, struct xdp_mem_info *mem) | |
536 | { | |
537 | struct xdp_mem_allocator *xa; | |
538 | struct page *page; | |
539 | ||
540 | rcu_read_lock(); | |
541 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
542 | page = virt_to_head_page(data); | |
543 | if (xa) | |
544 | page_pool_release_page(xa->page_pool, page); | |
545 | rcu_read_unlock(); | |
546 | } | |
547 | EXPORT_SYMBOL_GPL(__xdp_release_frame); | |
548 | ||
05296620 JK |
549 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
550 | struct netdev_bpf *bpf) | |
551 | { | |
552 | if (info->prog) | |
553 | bpf_prog_put(info->prog); | |
554 | info->prog = bpf->prog; | |
555 | info->flags = bpf->flags; | |
556 | } | |
557 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); | |
b0d1beef BT |
558 | |
559 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) | |
560 | { | |
72962167 | 561 | unsigned int metasize, totsize; |
b0d1beef BT |
562 | void *addr, *data_to_copy; |
563 | struct xdp_frame *xdpf; | |
564 | struct page *page; | |
565 | ||
566 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ | |
567 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : | |
568 | xdp->data - xdp->data_meta; | |
b0d1beef BT |
569 | totsize = xdp->data_end - xdp->data + metasize; |
570 | ||
571 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) | |
572 | return NULL; | |
573 | ||
574 | page = dev_alloc_page(); | |
575 | if (!page) | |
576 | return NULL; | |
577 | ||
578 | addr = page_to_virt(page); | |
579 | xdpf = addr; | |
580 | memset(xdpf, 0, sizeof(*xdpf)); | |
581 | ||
582 | addr += sizeof(*xdpf); | |
583 | data_to_copy = metasize ? xdp->data_meta : xdp->data; | |
584 | memcpy(addr, data_to_copy, totsize); | |
585 | ||
586 | xdpf->data = addr + metasize; | |
587 | xdpf->len = totsize - metasize; | |
588 | xdpf->headroom = 0; | |
589 | xdpf->metasize = metasize; | |
3ff23516 | 590 | xdpf->frame_sz = PAGE_SIZE; |
b0d1beef BT |
591 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
592 | ||
82c41671 | 593 | xsk_buff_free(xdp); |
b0d1beef BT |
594 | return xdpf; |
595 | } | |
596 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); | |
34cc0b33 JDB |
597 | |
598 | /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ | |
599 | void xdp_warn(const char *msg, const char *func, const int line) | |
600 | { | |
601 | WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); | |
602 | }; | |
603 | EXPORT_SYMBOL_GPL(xdp_warn); | |
97a0e1ea | 604 | |
65e6dcf7 LB |
605 | int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) |
606 | { | |
025a785f | 607 | n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs); |
65e6dcf7 LB |
608 | if (unlikely(!n_skb)) |
609 | return -ENOMEM; | |
610 | ||
611 | return 0; | |
612 | } | |
613 | EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk); | |
614 | ||
97a0e1ea LB |
615 | struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
616 | struct sk_buff *skb, | |
617 | struct net_device *dev) | |
618 | { | |
d65a1906 | 619 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); |
97a0e1ea LB |
620 | unsigned int headroom, frame_size; |
621 | void *hard_start; | |
d65a1906 LB |
622 | u8 nr_frags; |
623 | ||
624 | /* xdp frags frame */ | |
625 | if (unlikely(xdp_frame_has_frags(xdpf))) | |
626 | nr_frags = sinfo->nr_frags; | |
97a0e1ea LB |
627 | |
628 | /* Part of headroom was reserved to xdpf */ | |
629 | headroom = sizeof(*xdpf) + xdpf->headroom; | |
630 | ||
631 | /* Memory size backing xdp_frame data already have reserved | |
632 | * room for build_skb to place skb_shared_info in tailroom. | |
633 | */ | |
634 | frame_size = xdpf->frame_sz; | |
635 | ||
636 | hard_start = xdpf->data - headroom; | |
637 | skb = build_skb_around(skb, hard_start, frame_size); | |
638 | if (unlikely(!skb)) | |
639 | return NULL; | |
640 | ||
641 | skb_reserve(skb, headroom); | |
642 | __skb_put(skb, xdpf->len); | |
643 | if (xdpf->metasize) | |
644 | skb_metadata_set(skb, xdpf->metasize); | |
645 | ||
d65a1906 LB |
646 | if (unlikely(xdp_frame_has_frags(xdpf))) |
647 | xdp_update_skb_shared_info(skb, nr_frags, | |
648 | sinfo->xdp_frags_size, | |
649 | nr_frags * xdpf->frame_sz, | |
650 | xdp_frame_is_frag_pfmemalloc(xdpf)); | |
651 | ||
97a0e1ea LB |
652 | /* Essential SKB info: protocol and skb->dev */ |
653 | skb->protocol = eth_type_trans(skb, dev); | |
654 | ||
655 | /* Optional SKB info, currently missing: | |
656 | * - HW checksum info (skb->ip_summed) | |
657 | * - HW RX hash (skb_set_hash) | |
658 | * - RX ring dev queue index (skb_record_rx_queue) | |
659 | */ | |
660 | ||
661 | /* Until page_pool get SKB return path, release DMA here */ | |
662 | xdp_release_frame(xdpf); | |
663 | ||
664 | /* Allow SKB to reuse area used by xdp_frame */ | |
665 | xdp_scrub_frame(xdpf); | |
666 | ||
667 | return skb; | |
668 | } | |
669 | EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); | |
89f479f0 LB |
670 | |
671 | struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, | |
672 | struct net_device *dev) | |
673 | { | |
674 | struct sk_buff *skb; | |
675 | ||
025a785f | 676 | skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); |
89f479f0 LB |
677 | if (unlikely(!skb)) |
678 | return NULL; | |
679 | ||
680 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
681 | ||
682 | return __xdp_build_skb_from_frame(xdpf, skb, dev); | |
683 | } | |
684 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); | |
e624d4ed HL |
685 | |
686 | struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) | |
687 | { | |
688 | unsigned int headroom, totalsize; | |
689 | struct xdp_frame *nxdpf; | |
690 | struct page *page; | |
691 | void *addr; | |
692 | ||
693 | headroom = xdpf->headroom + sizeof(*xdpf); | |
694 | totalsize = headroom + xdpf->len; | |
695 | ||
696 | if (unlikely(totalsize > PAGE_SIZE)) | |
697 | return NULL; | |
698 | page = dev_alloc_page(); | |
699 | if (!page) | |
700 | return NULL; | |
701 | addr = page_to_virt(page); | |
702 | ||
703 | memcpy(addr, xdpf, totalsize); | |
704 | ||
705 | nxdpf = addr; | |
706 | nxdpf->data = addr + headroom; | |
707 | nxdpf->frame_sz = PAGE_SIZE; | |
708 | nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0; | |
709 | nxdpf->mem.id = 0; | |
710 | ||
711 | return nxdpf; | |
712 | } | |
3d76a4d3 SF |
713 | |
714 | __diag_push(); | |
715 | __diag_ignore_all("-Wmissing-prototypes", | |
716 | "Global functions as their definitions will be in vmlinux BTF"); | |
717 | ||
718 | /** | |
719 | * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. | |
720 | * @ctx: XDP context pointer. | |
721 | * @timestamp: Return value pointer. | |
722 | * | |
915efd8a JDB |
723 | * Return: |
724 | * * Returns 0 on success or ``-errno`` on error. | |
725 | * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc | |
726 | * * ``-ENODATA`` : means no RX-timestamp available for this frame | |
3d76a4d3 | 727 | */ |
400031e0 | 728 | __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) |
3d76a4d3 SF |
729 | { |
730 | return -EOPNOTSUPP; | |
731 | } | |
732 | ||
733 | /** | |
734 | * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. | |
735 | * @ctx: XDP context pointer. | |
736 | * @hash: Return value pointer. | |
737 | * | |
915efd8a JDB |
738 | * Return: |
739 | * * Returns 0 on success or ``-errno`` on error. | |
740 | * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc | |
741 | * * ``-ENODATA`` : means no RX-hash available for this frame | |
3d76a4d3 | 742 | */ |
400031e0 | 743 | __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash) |
3d76a4d3 SF |
744 | { |
745 | return -EOPNOTSUPP; | |
746 | } | |
747 | ||
748 | __diag_pop(); | |
749 | ||
750 | BTF_SET8_START(xdp_metadata_kfunc_ids) | |
751 | #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0) | |
752 | XDP_METADATA_KFUNC_xxx | |
753 | #undef XDP_METADATA_KFUNC | |
754 | BTF_SET8_END(xdp_metadata_kfunc_ids) | |
755 | ||
756 | static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { | |
757 | .owner = THIS_MODULE, | |
758 | .set = &xdp_metadata_kfunc_ids, | |
759 | }; | |
760 | ||
761 | BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) | |
762 | #define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str) | |
763 | XDP_METADATA_KFUNC_xxx | |
764 | #undef XDP_METADATA_KFUNC | |
765 | ||
766 | u32 bpf_xdp_metadata_kfunc_id(int id) | |
767 | { | |
768 | /* xdp_metadata_kfunc_ids is sorted and can't be used */ | |
769 | return xdp_metadata_kfunc_ids_unsorted[id]; | |
770 | } | |
771 | ||
772 | bool bpf_dev_bound_kfunc_id(u32 btf_id) | |
773 | { | |
774 | return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id); | |
775 | } | |
776 | ||
777 | static int __init xdp_metadata_init(void) | |
778 | { | |
779 | return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set); | |
780 | } | |
781 | late_initcall(xdp_metadata_init); | |
66c0e13a | 782 | |
f85949f9 | 783 | void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) |
66c0e13a | 784 | { |
f85949f9 LB |
785 | val &= NETDEV_XDP_ACT_MASK; |
786 | if (dev->xdp_features == val) | |
787 | return; | |
66c0e13a | 788 | |
f85949f9 | 789 | dev->xdp_features = val; |
769639c1 JK |
790 | |
791 | if (dev->reg_state == NETREG_REGISTERED) | |
792 | call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); | |
66c0e13a | 793 | } |
f85949f9 LB |
794 | EXPORT_SYMBOL_GPL(xdp_set_features_flag); |
795 | ||
796 | void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) | |
797 | { | |
798 | xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); | |
799 | ||
800 | if (support_sg) | |
801 | val |= NETDEV_XDP_ACT_NDO_XMIT_SG; | |
802 | xdp_set_features_flag(dev, val); | |
803 | } | |
66c0e13a MM |
804 | EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); |
805 | ||
806 | void xdp_features_clear_redirect_target(struct net_device *dev) | |
807 | { | |
f85949f9 LB |
808 | xdp_features_t val = dev->xdp_features; |
809 | ||
810 | val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); | |
811 | xdp_set_features_flag(dev, val); | |
66c0e13a MM |
812 | } |
813 | EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); |