1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
25 u32 pad3 ____cacheline_aligned_in_smp;
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
37 u64 desc[] ____cacheline_aligned_in_smp;
45 struct xdp_ring *ring;
47 u64 queue_empty_descs;
48 size_t ring_vmalloc_size;
56 /* The structure of the shared state of the rings are a simple
57 * circular buffer, as outlined in
58 * Documentation/core-api/circular-buffers.rst. For the Rx and
59 * completion ring, the kernel is the producer and user space is the
60 * consumer. For the Tx and fill rings, the kernel is the consumer and
61 * user space is the producer.
65 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
66 * STORE $data LOAD $data
67 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
70 * (A) pairs with (D), and (B) pairs with (C).
72 * Starting with (B), it protects the data from being written after
73 * the producer pointer. If this barrier was missing, the consumer
74 * could observe the producer pointer being set and thus load the data
75 * before the producer has written the new data. The consumer would in
76 * this case load the old data.
78 * (C) protects the consumer from speculatively loading the data before
79 * the producer pointer actually has been read. If we do not have this
80 * barrier, some architectures could load old data as speculative loads
81 * are not discarded as the CPU does not know there is a dependency
82 * between ->producer and data.
84 * (A) is a control dependency that separates the load of ->consumer
85 * from the stores of $data. In case ->consumer indicates there is no
86 * room in the buffer to store $data we do not. The dependency will
87 * order both of the stores after the loads. So no barrier is needed.
89 * (D) protects the load of the data to be observed to happen after the
90 * store of the consumer pointer. If we did not have this memory
91 * barrier, the producer could observe the consumer pointer being set
92 * and overwrite the data with a new value before the consumer got the
93 * chance to read the old value. The consumer would thus miss reading
94 * the old entry and very likely read the new entry twice, once right
95 * now and again after circling through the ring.
98 /* The operations on the rings are the following:
102 * RESERVE entries PEEK in the ring for entries
103 * WRITE data into the ring READ data from the ring
104 * SUBMIT entries RELEASE entries
106 * The producer reserves one or more entries in the ring. It can then
107 * fill in these entries and finally submit them so that they can be
108 * seen and read by the consumer.
110 * The consumer peeks into the ring to see if the producer has written
111 * any new entries. If so, the consumer can then read these entries
112 * and when it is done reading them release them back to the producer
113 * so that the producer can use these slots to fill in new entries.
115 * The function names below reflect these operations.
118 /* Functions that read and validate content from consumer rings. */
120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
123 u32 idx = cached_cons & q->ring_mask;
125 *addr = ring->desc[idx];
128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
130 if (q->cached_cons != q->cached_prod) {
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
138 static inline bool xp_unused_options_set(u32 options)
140 return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA);
143 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
144 struct xdp_desc *desc)
146 u64 addr = desc->addr - pool->tx_metadata_len;
147 u64 len = desc->len + pool->tx_metadata_len;
148 u64 offset = addr & (pool->chunk_size - 1);
153 if (offset + len > pool->chunk_size)
156 if (addr >= pool->addrs_cnt)
159 if (xp_unused_options_set(desc->options))
164 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
165 struct xdp_desc *desc)
167 u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len;
168 u64 len = desc->len + pool->tx_metadata_len;
173 if (len > pool->chunk_size)
176 if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt ||
177 xp_desc_crosses_non_contig_pg(pool, addr, len))
180 if (xp_unused_options_set(desc->options))
185 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
186 struct xdp_desc *desc)
188 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
189 xp_aligned_validate_desc(pool, desc);
192 static inline bool xskq_has_descs(struct xsk_queue *q)
194 return q->cached_cons != q->cached_prod;
197 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
199 struct xsk_buff_pool *pool)
201 if (!xp_validate_desc(pool, d)) {
208 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
209 struct xdp_desc *desc,
210 struct xsk_buff_pool *pool)
212 if (q->cached_cons != q->cached_prod) {
213 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
214 u32 idx = q->cached_cons & q->ring_mask;
216 *desc = ring->desc[idx];
217 return xskq_cons_is_valid_desc(q, desc, pool);
220 q->queue_empty_descs++;
224 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
226 q->cached_cons += cnt;
229 static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
230 struct xdp_desc *desc, struct parsed_desc *parsed)
232 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
233 parsed->mb = xp_mb_desc(desc);
237 u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
240 u32 cached_cons = q->cached_cons, nb_entries = 0;
241 struct xdp_desc *descs = pool->tx_descs;
242 u32 total_descs = 0, nr_frags = 0;
244 /* track first entry, if stumble upon *any* invalid descriptor, rewind
245 * current packet that consists of frags and stop the processing
247 while (cached_cons != q->cached_prod && nb_entries < max) {
248 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
249 u32 idx = cached_cons & q->ring_mask;
250 struct parsed_desc parsed;
252 descs[nb_entries] = ring->desc[idx];
254 parse_desc(q, pool, &descs[nb_entries], &parsed);
255 if (unlikely(!parsed.valid))
258 if (likely(!parsed.mb)) {
259 total_descs += (nr_frags + 1);
263 if (nr_frags == pool->xdp_zc_max_segs) {
271 cached_cons -= nr_frags;
272 /* Release valid plus any invalid entries */
273 xskq_cons_release_n(q, cached_cons - q->cached_cons);
277 /* Functions for consumers */
279 static inline void __xskq_cons_release(struct xsk_queue *q)
281 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
284 static inline void __xskq_cons_peek(struct xsk_queue *q)
286 /* Refresh the local pointer */
287 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
290 static inline void xskq_cons_get_entries(struct xsk_queue *q)
292 __xskq_cons_release(q);
296 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
298 u32 entries = q->cached_prod - q->cached_cons;
304 entries = q->cached_prod - q->cached_cons;
306 return entries >= max ? max : entries;
309 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
311 if (q->cached_prod == q->cached_cons)
312 xskq_cons_get_entries(q);
313 return xskq_cons_read_addr_unchecked(q, addr);
316 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
317 struct xdp_desc *desc,
318 struct xsk_buff_pool *pool)
320 if (q->cached_prod == q->cached_cons)
321 xskq_cons_get_entries(q);
322 return xskq_cons_read_desc(q, desc, pool);
325 /* To improve performance in the xskq_cons_release functions, only update local state here.
326 * Reflect this to global state when we get new entries from the ring in
327 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
329 static inline void xskq_cons_release(struct xsk_queue *q)
334 static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
336 q->cached_cons -= cnt;
339 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
341 /* No barriers needed since data is not accessed */
342 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
345 /* Functions for producers */
347 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
349 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
351 if (free_entries >= max)
354 /* Refresh the local tail pointer */
355 q->cached_cons = READ_ONCE(q->ring->consumer);
356 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
358 return free_entries >= max ? max : free_entries;
361 static inline bool xskq_prod_is_full(struct xsk_queue *q)
363 return xskq_prod_nb_free(q, 1) ? false : true;
366 static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
368 q->cached_prod -= cnt;
371 static inline int xskq_prod_reserve(struct xsk_queue *q)
373 if (xskq_prod_is_full(q))
381 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
383 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
385 if (xskq_prod_is_full(q))
389 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
393 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
396 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
400 cached_prod = q->cached_prod;
401 for (i = 0; i < nb_entries; i++)
402 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
403 q->cached_prod = cached_prod;
406 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
407 u64 addr, u32 len, u32 flags)
409 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
412 if (xskq_prod_is_full(q))
416 idx = q->cached_prod++ & q->ring_mask;
417 ring->desc[idx].addr = addr;
418 ring->desc[idx].len = len;
419 ring->desc[idx].options = flags;
424 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
426 smp_store_release(&q->ring->producer, idx); /* B, matches C */
429 static inline void xskq_prod_submit(struct xsk_queue *q)
431 __xskq_prod_submit(q, q->cached_prod);
434 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
436 __xskq_prod_submit(q, q->ring->producer + nb_entries);
439 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
441 /* No barriers needed since data is not accessed */
442 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
445 /* For both producers and consumers */
447 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
449 return q ? q->invalid_descs : 0;
452 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
454 return q ? q->queue_empty_descs : 0;
457 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
458 void xskq_destroy(struct xsk_queue *q_ops);
460 #endif /* _LINUX_XSK_QUEUE_H */