2 * Definitions for the 'struct ptr_ring' datastructure.
7 * Copyright (C) 2016 Red Hat, Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
21 #ifndef _LINUX_PTR_RING_H
22 #define _LINUX_PTR_RING_H 1
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/types.h>
28 #include <linux/compiler.h>
29 #include <linux/cache.h>
30 #include <linux/slab.h>
31 #include <asm/errno.h>
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
37 int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
38 int consumer_tail; /* next entry to invalidate */
39 spinlock_t consumer_lock;
40 /* Shared consumer/producer data */
41 /* Read-only by both the producer and the consumer */
42 int size ____cacheline_aligned_in_smp; /* max entries in queue */
43 int batch; /* number of entries to consume in a batch */
47 /* Note: callers invoking this in a loop must use a compiler barrier,
48 * for example cpu_relax().
50 * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
51 * see e.g. ptr_ring_full.
53 static inline bool __ptr_ring_full(struct ptr_ring *r)
55 return r->queue[r->producer];
58 static inline bool ptr_ring_full(struct ptr_ring *r)
62 spin_lock(&r->producer_lock);
63 ret = __ptr_ring_full(r);
64 spin_unlock(&r->producer_lock);
69 static inline bool ptr_ring_full_irq(struct ptr_ring *r)
73 spin_lock_irq(&r->producer_lock);
74 ret = __ptr_ring_full(r);
75 spin_unlock_irq(&r->producer_lock);
80 static inline bool ptr_ring_full_any(struct ptr_ring *r)
85 spin_lock_irqsave(&r->producer_lock, flags);
86 ret = __ptr_ring_full(r);
87 spin_unlock_irqrestore(&r->producer_lock, flags);
92 static inline bool ptr_ring_full_bh(struct ptr_ring *r)
96 spin_lock_bh(&r->producer_lock);
97 ret = __ptr_ring_full(r);
98 spin_unlock_bh(&r->producer_lock);
103 /* Note: callers invoking this in a loop must use a compiler barrier,
104 * for example cpu_relax(). Callers must hold producer_lock.
105 * Callers are responsible for making sure pointer that is being queued
106 * points to a valid data.
108 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
110 if (unlikely(!r->size) || r->queue[r->producer])
113 /* Make sure the pointer we are storing points to a valid data. */
114 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
117 WRITE_ONCE(r->queue[r->producer++], ptr);
118 if (unlikely(r->producer >= r->size))
124 * Note: resize (below) nests producer lock within consumer lock, so if you
125 * consume in interrupt or BH context, you must disable interrupts/BH when
128 static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
132 spin_lock(&r->producer_lock);
133 ret = __ptr_ring_produce(r, ptr);
134 spin_unlock(&r->producer_lock);
139 static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
143 spin_lock_irq(&r->producer_lock);
144 ret = __ptr_ring_produce(r, ptr);
145 spin_unlock_irq(&r->producer_lock);
150 static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
155 spin_lock_irqsave(&r->producer_lock, flags);
156 ret = __ptr_ring_produce(r, ptr);
157 spin_unlock_irqrestore(&r->producer_lock, flags);
162 static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
166 spin_lock_bh(&r->producer_lock);
167 ret = __ptr_ring_produce(r, ptr);
168 spin_unlock_bh(&r->producer_lock);
173 static inline void *__ptr_ring_peek(struct ptr_ring *r)
176 return READ_ONCE(r->queue[r->consumer_head]);
181 * Test ring empty status without taking any locks.
183 * NB: This is only safe to call if ring is never resized.
185 * However, if some other CPU consumes ring entries at the same time, the value
186 * returned is not guaranteed to be correct.
188 * In this case - to avoid incorrectly detecting the ring
189 * as empty - the CPU consuming the ring entries is responsible
190 * for either consuming all ring entries until the ring is empty,
191 * or synchronizing with some other CPU and causing it to
192 * re-test __ptr_ring_empty and/or consume the ring enteries
193 * after the synchronization point.
195 * Note: callers invoking this in a loop must use a compiler barrier,
196 * for example cpu_relax().
198 static inline bool __ptr_ring_empty(struct ptr_ring *r)
201 return !r->queue[READ_ONCE(r->consumer_head)];
205 static inline bool ptr_ring_empty(struct ptr_ring *r)
209 spin_lock(&r->consumer_lock);
210 ret = __ptr_ring_empty(r);
211 spin_unlock(&r->consumer_lock);
216 static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
220 spin_lock_irq(&r->consumer_lock);
221 ret = __ptr_ring_empty(r);
222 spin_unlock_irq(&r->consumer_lock);
227 static inline bool ptr_ring_empty_any(struct ptr_ring *r)
232 spin_lock_irqsave(&r->consumer_lock, flags);
233 ret = __ptr_ring_empty(r);
234 spin_unlock_irqrestore(&r->consumer_lock, flags);
239 static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
243 spin_lock_bh(&r->consumer_lock);
244 ret = __ptr_ring_empty(r);
245 spin_unlock_bh(&r->consumer_lock);
250 /* Must only be called after __ptr_ring_peek returned !NULL */
251 static inline void __ptr_ring_discard_one(struct ptr_ring *r)
253 /* Fundamentally, what we want to do is update consumer
254 * index and zero out the entry so producer can reuse it.
255 * Doing it naively at each consume would be as simple as:
256 * consumer = r->consumer;
257 * r->queue[consumer++] = NULL;
258 * if (unlikely(consumer >= r->size))
260 * r->consumer = consumer;
261 * but that is suboptimal when the ring is full as producer is writing
262 * out new entries in the same cache line. Defer these updates until a
263 * batch of entries has been consumed.
265 /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
268 int consumer_head = r->consumer_head;
269 int head = consumer_head++;
271 /* Once we have processed enough entries invalidate them in
272 * the ring all at once so producer can reuse their space in the ring.
273 * We also do this when we reach end of the ring - not mandatory
274 * but helps keep the implementation simple.
276 if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
277 consumer_head >= r->size)) {
278 /* Zero out entries in the reverse order: this way we touch the
279 * cache line that producer might currently be reading the last;
280 * producer won't make progress and touch other cache lines
281 * besides the first one until we write out all entries.
283 while (likely(head >= r->consumer_tail))
284 r->queue[head--] = NULL;
285 r->consumer_tail = consumer_head;
287 if (unlikely(consumer_head >= r->size)) {
289 r->consumer_tail = 0;
291 /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
292 WRITE_ONCE(r->consumer_head, consumer_head);
295 static inline void *__ptr_ring_consume(struct ptr_ring *r)
299 ptr = __ptr_ring_peek(r);
301 __ptr_ring_discard_one(r);
303 /* Make sure anyone accessing data through the pointer is up to date. */
304 /* Pairs with smp_wmb in __ptr_ring_produce. */
305 smp_read_barrier_depends();
309 static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
315 for (i = 0; i < n; i++) {
316 ptr = __ptr_ring_consume(r);
326 * Note: resize (below) nests producer lock within consumer lock, so if you
327 * call this in interrupt or BH context, you must disable interrupts/BH when
330 static inline void *ptr_ring_consume(struct ptr_ring *r)
334 spin_lock(&r->consumer_lock);
335 ptr = __ptr_ring_consume(r);
336 spin_unlock(&r->consumer_lock);
341 static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
345 spin_lock_irq(&r->consumer_lock);
346 ptr = __ptr_ring_consume(r);
347 spin_unlock_irq(&r->consumer_lock);
352 static inline void *ptr_ring_consume_any(struct ptr_ring *r)
357 spin_lock_irqsave(&r->consumer_lock, flags);
358 ptr = __ptr_ring_consume(r);
359 spin_unlock_irqrestore(&r->consumer_lock, flags);
364 static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
368 spin_lock_bh(&r->consumer_lock);
369 ptr = __ptr_ring_consume(r);
370 spin_unlock_bh(&r->consumer_lock);
375 static inline int ptr_ring_consume_batched(struct ptr_ring *r,
380 spin_lock(&r->consumer_lock);
381 ret = __ptr_ring_consume_batched(r, array, n);
382 spin_unlock(&r->consumer_lock);
387 static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
392 spin_lock_irq(&r->consumer_lock);
393 ret = __ptr_ring_consume_batched(r, array, n);
394 spin_unlock_irq(&r->consumer_lock);
399 static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
405 spin_lock_irqsave(&r->consumer_lock, flags);
406 ret = __ptr_ring_consume_batched(r, array, n);
407 spin_unlock_irqrestore(&r->consumer_lock, flags);
412 static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
417 spin_lock_bh(&r->consumer_lock);
418 ret = __ptr_ring_consume_batched(r, array, n);
419 spin_unlock_bh(&r->consumer_lock);
424 /* Cast to structure type and call a function without discarding from FIFO.
425 * Function must return a value.
426 * Callers must take consumer_lock.
428 #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
430 #define PTR_RING_PEEK_CALL(r, f) ({ \
431 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
433 spin_lock(&(r)->consumer_lock); \
434 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
435 spin_unlock(&(r)->consumer_lock); \
436 __PTR_RING_PEEK_CALL_v; \
439 #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
440 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
442 spin_lock_irq(&(r)->consumer_lock); \
443 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
444 spin_unlock_irq(&(r)->consumer_lock); \
445 __PTR_RING_PEEK_CALL_v; \
448 #define PTR_RING_PEEK_CALL_BH(r, f) ({ \
449 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
451 spin_lock_bh(&(r)->consumer_lock); \
452 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
453 spin_unlock_bh(&(r)->consumer_lock); \
454 __PTR_RING_PEEK_CALL_v; \
457 #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
458 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
459 unsigned long __PTR_RING_PEEK_CALL_f;\
461 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
462 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
463 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
464 __PTR_RING_PEEK_CALL_v; \
467 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
469 return kcalloc(size, sizeof(void *), gfp);
472 static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
475 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
476 /* We need to set batch at least to 1 to make logic
477 * in __ptr_ring_discard_one work correctly.
478 * Batching too much (because ring is small) would cause a lot of
479 * burstiness. Needs tuning, for now disable batching.
481 if (r->batch > r->size / 2 || !r->batch)
485 static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
487 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
491 __ptr_ring_set_size(r, size);
492 r->producer = r->consumer_head = r->consumer_tail = 0;
493 spin_lock_init(&r->producer_lock);
494 spin_lock_init(&r->consumer_lock);
500 * Return entries into ring. Destroy entries that don't fit.
502 * Note: this is expected to be a rare slow path operation.
504 * Note: producer lock is nested within consumer lock, so if you
505 * resize you must make sure all uses nest correctly.
506 * In particular if you consume ring in interrupt or BH context, you must
507 * disable interrupts/BH when doing so.
509 static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
510 void (*destroy)(void *))
515 spin_lock_irqsave(&r->consumer_lock, flags);
516 spin_lock(&r->producer_lock);
522 * Clean out buffered entries (for simplicity). This way following code
523 * can test entries for NULL and if not assume they are valid.
525 head = r->consumer_head - 1;
526 while (likely(head >= r->consumer_tail))
527 r->queue[head--] = NULL;
528 r->consumer_tail = r->consumer_head;
531 * Go over entries in batch, start moving head back and copy entries.
532 * Stop when we run into previously unconsumed entries.
535 head = r->consumer_head - 1;
538 if (r->queue[head]) {
539 /* This batch entry will have to be destroyed. */
542 r->queue[head] = batch[--n];
543 r->consumer_tail = head;
544 /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
545 WRITE_ONCE(r->consumer_head, head);
549 /* Destroy all entries left in the batch. */
552 spin_unlock(&r->producer_lock);
553 spin_unlock_irqrestore(&r->consumer_lock, flags);
556 static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
558 void (*destroy)(void *))
564 while ((ptr = __ptr_ring_consume(r)))
566 queue[producer++] = ptr;
570 __ptr_ring_set_size(r, size);
571 r->producer = producer;
572 r->consumer_head = 0;
573 r->consumer_tail = 0;
581 * Note: producer lock is nested within consumer lock, so if you
582 * resize you must make sure all uses nest correctly.
583 * In particular if you consume ring in interrupt or BH context, you must
584 * disable interrupts/BH when doing so.
586 static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
587 void (*destroy)(void *))
590 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
596 spin_lock_irqsave(&(r)->consumer_lock, flags);
597 spin_lock(&(r)->producer_lock);
599 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
601 spin_unlock(&(r)->producer_lock);
602 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
610 * Note: producer lock is nested within consumer lock, so if you
611 * resize you must make sure all uses nest correctly.
612 * In particular if you consume ring in interrupt or BH context, you must
613 * disable interrupts/BH when doing so.
615 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
618 gfp_t gfp, void (*destroy)(void *))
624 queues = kmalloc_array(nrings, sizeof(*queues), gfp);
628 for (i = 0; i < nrings; ++i) {
629 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
634 for (i = 0; i < nrings; ++i) {
635 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
636 spin_lock(&(rings[i])->producer_lock);
637 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
639 spin_unlock(&(rings[i])->producer_lock);
640 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
643 for (i = 0; i < nrings; ++i)
660 static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
665 while ((ptr = ptr_ring_consume(r)))
670 #endif /* _LINUX_PTR_RING_H */