1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * RCU segmented callback lists
5 * This seemingly RCU-private file must be available to SRCU users
6 * because the size of the TREE SRCU srcu_struct structure depends
7 * on these definitions.
9 * Copyright IBM Corporation, 2017
14 #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
15 #define __INCLUDE_LINUX_RCU_SEGCBLIST_H
17 #include <linux/types.h>
18 #include <linux/atomic.h>
20 /* Simple unsegmented callback lists. */
22 struct rcu_head *head;
23 struct rcu_head **tail;
27 #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
29 /* Complicated segmented callback lists. ;-) */
32 * Index values for segments in rcu_segcblist structure.
34 * The segments are as follows:
36 * [head, *tails[RCU_DONE_TAIL]):
37 * Callbacks whose grace period has elapsed, and thus can be invoked.
38 * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
39 * Callbacks waiting for the current GP from the current CPU's viewpoint.
40 * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
41 * Callbacks that arrived before the next GP started, again from
42 * the current CPU's viewpoint. These can be handled by the next GP.
43 * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
44 * Callbacks that might have arrived after the next GP started.
45 * There is some uncertainty as to when a given GP starts and
46 * ends, but a CPU knows the exact times if it is the one starting
47 * or ending the GP. Other CPUs know that the previous GP ends
48 * before the next one starts.
50 * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
53 * The ->gp_seq[] array contains the grace-period number at which the
54 * corresponding segment of callbacks will be ready to invoke. A given
55 * element of this array is meaningful only when the corresponding segment
56 * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
57 * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
58 * not yet been assigned a grace-period number).
60 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
61 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
62 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
63 #define RCU_NEXT_TAIL 3
64 #define RCU_CBLIST_NSEGS 4
68 * ==NOCB Offloading state machine==
71 * ----------------------------------------------------------------------------
72 * | SEGCBLIST_RCU_CORE |
74 * | Callbacks processed by rcu_core() from softirqs or local |
75 * | rcuc kthread, without holding nocb_lock. |
76 * ----------------------------------------------------------------------------
79 * ----------------------------------------------------------------------------
80 * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
82 * | Callbacks processed by rcu_core() from softirqs or local |
83 * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
84 * | allowing nocb_timer to be armed. |
85 * ----------------------------------------------------------------------------
88 * -----------------------------------
91 * --------------------------------------- ----------------------------------|
92 * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
93 * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
94 * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
95 * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
98 * | CB kthread woke up and | | GP kthread woke up and |
99 * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
100 * | Processes callbacks concurrently | | |
101 * | with rcu_core(), holding | | |
103 * --------------------------------------- -----------------------------------
105 * -----------------------------------
108 * |--------------------------------------------------------------------------|
109 * | SEGCBLIST_LOCKING | |
110 * | SEGCBLIST_OFFLOADED | |
111 * | SEGCBLIST_KTHREAD_GP | |
112 * | SEGCBLIST_KTHREAD_CB |
114 * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
115 * | handling callbacks. Enable bypass queueing. |
116 * ----------------------------------------------------------------------------
122 * ==NOCB De-Offloading state machine==
125 * |--------------------------------------------------------------------------|
126 * | SEGCBLIST_LOCKING | |
127 * | SEGCBLIST_OFFLOADED | |
128 * | SEGCBLIST_KTHREAD_CB | |
129 * | SEGCBLIST_KTHREAD_GP |
131 * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
132 * | ignores callbacks. Bypass enqueue is enabled. |
133 * ----------------------------------------------------------------------------
136 * |--------------------------------------------------------------------------|
137 * | SEGCBLIST_RCU_CORE | |
138 * | SEGCBLIST_LOCKING | |
139 * | SEGCBLIST_OFFLOADED | |
140 * | SEGCBLIST_KTHREAD_CB | |
141 * | SEGCBLIST_KTHREAD_GP |
143 * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
144 * | handles callbacks concurrently. Bypass enqueue is enabled. |
145 * | Invoke RCU core so we make sure not to preempt it in the middle with |
146 * | leaving some urgent work unattended within a jiffy. |
147 * ----------------------------------------------------------------------------
150 * |--------------------------------------------------------------------------|
151 * | SEGCBLIST_RCU_CORE | |
152 * | SEGCBLIST_LOCKING | |
153 * | SEGCBLIST_KTHREAD_CB | |
154 * | SEGCBLIST_KTHREAD_GP |
156 * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
157 * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
158 * | bypass enqueue. |
159 * ----------------------------------------------------------------------------
162 * -----------------------------------
165 * ---------------------------------------------------------------------------|
167 * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
168 * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
169 * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
171 * | GP kthread woke up and | CB kthread woke up and |
172 * | acknowledged the fact that | acknowledged the fact that |
173 * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
174 * | | The CB kthread goes to sleep |
175 * | The callbacks from the target CPU | until it ever gets re-offloaded. |
176 * | will be ignored from the GP kthread | |
178 * ----------------------------------------------------------------------------
180 * -----------------------------------
183 * ----------------------------------------------------------------------------
184 * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
186 * | Callbacks processed by rcu_core() from softirqs or local |
187 * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
188 * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
189 * ----------------------------------------------------------------------------
192 * ----------------------------------------------------------------------------
193 * | SEGCBLIST_RCU_CORE |
195 * | Callbacks processed by rcu_core() from softirqs or local |
196 * | rcuc kthread, without holding nocb_lock. |
197 * ----------------------------------------------------------------------------
199 #define SEGCBLIST_ENABLED BIT(0)
200 #define SEGCBLIST_RCU_CORE BIT(1)
201 #define SEGCBLIST_LOCKING BIT(2)
202 #define SEGCBLIST_KTHREAD_CB BIT(3)
203 #define SEGCBLIST_KTHREAD_GP BIT(4)
204 #define SEGCBLIST_OFFLOADED BIT(5)
206 struct rcu_segcblist {
207 struct rcu_head *head;
208 struct rcu_head **tails[RCU_CBLIST_NSEGS];
209 unsigned long gp_seq[RCU_CBLIST_NSEGS];
210 #ifdef CONFIG_RCU_NOCB_CPU
215 long seglen[RCU_CBLIST_NSEGS];
219 #define RCU_SEGCBLIST_INITIALIZER(n) \
222 .tails[RCU_DONE_TAIL] = &n.head, \
223 .tails[RCU_WAIT_TAIL] = &n.head, \
224 .tails[RCU_NEXT_READY_TAIL] = &n.head, \
225 .tails[RCU_NEXT_TAIL] = &n.head, \
228 #endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */