3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
7 #include <linux/filter.h>
8 #include <linux/types.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <net/page_pool.h>
18 #define REG_STATE_NEW 0x0
19 #define REG_STATE_REGISTERED 0x1
20 #define REG_STATE_UNREGISTERED 0x2
21 #define REG_STATE_UNUSED 0x3
23 static DEFINE_IDA(mem_id_pool);
24 static DEFINE_MUTEX(mem_id_lock);
25 #define MEM_ID_MAX 0xFFFE
27 static int mem_id_next = MEM_ID_MIN;
29 static bool mem_id_init; /* false */
30 static struct rhashtable *mem_id_ht;
32 struct xdp_mem_allocator {
33 struct xdp_mem_info mem;
36 struct page_pool *page_pool;
37 struct zero_copy_allocator *zc_alloc;
39 struct rhash_head node;
43 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
48 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
51 /* Use cyclic increasing ID as direct hash key */
55 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
58 const struct xdp_mem_allocator *xa = ptr;
59 u32 mem_id = *(u32 *)arg->key;
61 return xa->mem.id != mem_id;
64 static const struct rhashtable_params mem_id_rht_params = {
66 .head_offset = offsetof(struct xdp_mem_allocator, node),
67 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
68 .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
69 .max_size = MEM_ID_MAX,
71 .automatic_shrinking = true,
72 .hashfn = xdp_mem_id_hashfn,
73 .obj_cmpfn = xdp_mem_id_cmp,
76 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
78 struct xdp_mem_allocator *xa;
80 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
82 /* Allow this ID to be reused */
83 ida_simple_remove(&mem_id_pool, xa->mem.id);
85 /* Notice, driver is expected to free the *allocator,
86 * e.g. page_pool, and MUST also use RCU free.
91 xa->mem.type = 0xF0F0;
92 xa->allocator = (void *)0xDEAD9001;
97 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
99 struct xdp_mem_allocator *xa;
100 int id = xdp_rxq->mem.id;
102 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
103 WARN(1, "Missing register, driver bug");
107 if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
108 xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
115 mutex_lock(&mem_id_lock);
117 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
118 if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
119 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
121 mutex_unlock(&mem_id_lock);
123 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
125 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
127 /* Simplify driver cleanup code paths, allow unreg "unused" */
128 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
131 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
133 xdp_rxq_info_unreg_mem_model(xdp_rxq);
135 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
138 /* Reset mem info to defaults */
140 xdp_rxq->mem.type = 0;
142 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
144 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
146 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
149 /* Returns 0 on success, negative on failure */
150 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
151 struct net_device *dev, u32 queue_index)
153 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
154 WARN(1, "Driver promised not to register this");
158 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
159 WARN(1, "Missing unregister, handled but fix driver");
160 xdp_rxq_info_unreg(xdp_rxq);
164 WARN(1, "Missing net_device from driver");
168 /* State either UNREGISTERED or NEW */
169 xdp_rxq_info_init(xdp_rxq);
171 xdp_rxq->queue_index = queue_index;
173 xdp_rxq->reg_state = REG_STATE_REGISTERED;
176 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
178 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
180 xdp_rxq->reg_state = REG_STATE_UNUSED;
182 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
184 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
186 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
188 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
190 static int __mem_id_init_hash_table(void)
192 struct rhashtable *rht;
195 if (unlikely(mem_id_init))
198 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
202 ret = rhashtable_init(rht, &mem_id_rht_params);
208 smp_mb(); /* mutex lock should provide enough pairing */
214 /* Allocate a cyclic ID that maps to allocator pointer.
215 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
217 * Caller must lock mem_id_lock.
219 static int __mem_id_cyclic_get(gfp_t gfp)
225 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
228 /* Cyclic allocator, reset next id */
230 mem_id_next = MEM_ID_MIN;
234 return id; /* errno */
236 mem_id_next = id + 1;
241 static bool __is_supported_mem_type(enum xdp_mem_type type)
243 if (type == MEM_TYPE_PAGE_POOL)
244 return is_page_pool_compiled_in();
246 if (type >= MEM_TYPE_MAX)
252 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
253 enum xdp_mem_type type, void *allocator)
255 struct xdp_mem_allocator *xdp_alloc;
256 gfp_t gfp = GFP_KERNEL;
260 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
261 WARN(1, "Missing register, driver bug");
265 if (!__is_supported_mem_type(type))
268 xdp_rxq->mem.type = type;
271 if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY)
272 return -EINVAL; /* Setup time check page_pool req */
276 /* Delay init of rhashtable to save memory if feature isn't used */
278 mutex_lock(&mem_id_lock);
279 ret = __mem_id_init_hash_table();
280 mutex_unlock(&mem_id_lock);
287 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
291 mutex_lock(&mem_id_lock);
292 id = __mem_id_cyclic_get(gfp);
297 xdp_rxq->mem.id = id;
298 xdp_alloc->mem = xdp_rxq->mem;
299 xdp_alloc->allocator = allocator;
301 /* Insert allocator into ID lookup table */
302 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
304 errno = PTR_ERR(ptr);
308 mutex_unlock(&mem_id_lock);
312 mutex_unlock(&mem_id_lock);
316 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
318 /* XDP RX runs under NAPI protection, and in different delivery error
319 * scenarios (e.g. queue full), it is possible to return the xdp_frame
320 * while still leveraging this protection. The @napi_direct boolian
321 * is used for those calls sites. Thus, allowing for faster recycling
322 * of xdp_frames/pages in those cases.
324 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
325 unsigned long handle)
327 struct xdp_mem_allocator *xa;
331 case MEM_TYPE_PAGE_POOL:
333 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
334 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
335 page = virt_to_head_page(data);
337 napi_direct &= !xdp_return_frame_no_direct();
338 page_pool_put_page(xa->page_pool, page, napi_direct);
344 case MEM_TYPE_PAGE_SHARED:
345 page_frag_free(data);
347 case MEM_TYPE_PAGE_ORDER0:
348 page = virt_to_page(data); /* Assumes order0 page*/
351 case MEM_TYPE_ZERO_COPY:
352 /* NB! Only valid from an xdp_buff! */
354 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
355 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
356 xa->zc_alloc->free(xa->zc_alloc, handle);
359 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
364 void xdp_return_frame(struct xdp_frame *xdpf)
366 __xdp_return(xdpf->data, &xdpf->mem, false, 0);
368 EXPORT_SYMBOL_GPL(xdp_return_frame);
370 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
372 __xdp_return(xdpf->data, &xdpf->mem, true, 0);
374 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
376 void xdp_return_buff(struct xdp_buff *xdp)
378 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
380 EXPORT_SYMBOL_GPL(xdp_return_buff);
382 int xdp_attachment_query(struct xdp_attachment_info *info,
383 struct netdev_bpf *bpf)
385 bpf->prog_id = info->prog ? info->prog->aux->id : 0;
386 bpf->prog_flags = info->prog ? info->flags : 0;
389 EXPORT_SYMBOL_GPL(xdp_attachment_query);
391 bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
392 struct netdev_bpf *bpf)
394 if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
395 NL_SET_ERR_MSG(bpf->extack,
396 "program loaded with different flags");
401 EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
403 void xdp_attachment_setup(struct xdp_attachment_info *info,
404 struct netdev_bpf *bpf)
407 bpf_prog_put(info->prog);
408 info->prog = bpf->prog;
409 info->flags = bpf->flags;
411 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
413 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
415 unsigned int metasize, totsize;
416 void *addr, *data_to_copy;
417 struct xdp_frame *xdpf;
420 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
421 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
422 xdp->data - xdp->data_meta;
423 totsize = xdp->data_end - xdp->data + metasize;
425 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
428 page = dev_alloc_page();
432 addr = page_to_virt(page);
434 memset(xdpf, 0, sizeof(*xdpf));
436 addr += sizeof(*xdpf);
437 data_to_copy = metasize ? xdp->data_meta : xdp->data;
438 memcpy(addr, data_to_copy, totsize);
440 xdpf->data = addr + metasize;
441 xdpf->len = totsize - metasize;
443 xdpf->metasize = metasize;
444 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
446 xdp_return_buff(xdp);
449 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);