]> Git Repo - J-linux.git/blob - include/linux/srcutree.h
Merge tag 'riscv-for-linus-6.13-mw1' of git://git.kernel.org/pub/scm/linux/kernel...
[J-linux.git] / include / linux / srcutree.h
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Sleepable Read-Copy Update mechanism for mutual exclusion,
4  *      tree variant.
5  *
6  * Copyright (C) IBM Corporation, 2017
7  *
8  * Author: Paul McKenney <[email protected]>
9  */
10
11 #ifndef _LINUX_SRCU_TREE_H
12 #define _LINUX_SRCU_TREE_H
13
14 #include <linux/rcu_node_tree.h>
15 #include <linux/completion.h>
16
17 struct srcu_node;
18 struct srcu_struct;
19
20 /*
21  * Per-CPU structure feeding into leaf srcu_node, similar in function
22  * to rcu_node.
23  */
24 struct srcu_data {
25         /* Read-side state. */
26         atomic_long_t srcu_lock_count[2];       /* Locks per CPU. */
27         atomic_long_t srcu_unlock_count[2];     /* Unlocks per CPU. */
28         int srcu_reader_flavor;                 /* Reader flavor for srcu_struct structure? */
29
30         /* Update-side state. */
31         spinlock_t __private lock ____cacheline_internodealigned_in_smp;
32         struct rcu_segcblist srcu_cblist;       /* List of callbacks.*/
33         unsigned long srcu_gp_seq_needed;       /* Furthest future GP needed. */
34         unsigned long srcu_gp_seq_needed_exp;   /* Furthest future exp GP. */
35         bool srcu_cblist_invoking;              /* Invoking these CBs? */
36         struct timer_list delay_work;           /* Delay for CB invoking */
37         struct work_struct work;                /* Context for CB invoking. */
38         struct rcu_head srcu_barrier_head;      /* For srcu_barrier() use. */
39         struct srcu_node *mynode;               /* Leaf srcu_node. */
40         unsigned long grpmask;                  /* Mask for leaf srcu_node */
41                                                 /*  ->srcu_data_have_cbs[]. */
42         int cpu;
43         struct srcu_struct *ssp;
44 };
45
46 /* Values for ->srcu_reader_flavor. */
47 #define SRCU_READ_FLAVOR_NORMAL 0x1             // srcu_read_lock().
48 #define SRCU_READ_FLAVOR_NMI    0x2             // srcu_read_lock_nmisafe().
49 #define SRCU_READ_FLAVOR_LITE   0x4             // srcu_read_lock_lite().
50
51 /*
52  * Node in SRCU combining tree, similar in function to rcu_data.
53  */
54 struct srcu_node {
55         spinlock_t __private lock;
56         unsigned long srcu_have_cbs[4];         /* GP seq for children having CBs, but only */
57                                                 /*  if greater than ->srcu_gp_seq. */
58         unsigned long srcu_data_have_cbs[4];    /* Which srcu_data structs have CBs for given GP? */
59         unsigned long srcu_gp_seq_needed_exp;   /* Furthest future exp GP. */
60         struct srcu_node *srcu_parent;          /* Next up in tree. */
61         int grplo;                              /* Least CPU for node. */
62         int grphi;                              /* Biggest CPU for node. */
63 };
64
65 /*
66  * Per-SRCU-domain structure, update-side data linked from srcu_struct.
67  */
68 struct srcu_usage {
69         struct srcu_node *node;                 /* Combining tree. */
70         struct srcu_node *level[RCU_NUM_LVLS + 1];
71                                                 /* First node at each level. */
72         int srcu_size_state;                    /* Small-to-big transition state. */
73         struct mutex srcu_cb_mutex;             /* Serialize CB preparation. */
74         spinlock_t __private lock;              /* Protect counters and size state. */
75         struct mutex srcu_gp_mutex;             /* Serialize GP work. */
76         unsigned long srcu_gp_seq;              /* Grace-period seq #. */
77         unsigned long srcu_gp_seq_needed;       /* Latest gp_seq needed. */
78         unsigned long srcu_gp_seq_needed_exp;   /* Furthest future exp GP. */
79         unsigned long srcu_gp_start;            /* Last GP start timestamp (jiffies) */
80         unsigned long srcu_last_gp_end;         /* Last GP end timestamp (ns) */
81         unsigned long srcu_size_jiffies;        /* Current contention-measurement interval. */
82         unsigned long srcu_n_lock_retries;      /* Contention events in current interval. */
83         unsigned long srcu_n_exp_nodelay;       /* # expedited no-delays in current GP phase. */
84         bool sda_is_static;                     /* May ->sda be passed to free_percpu()? */
85         unsigned long srcu_barrier_seq;         /* srcu_barrier seq #. */
86         struct mutex srcu_barrier_mutex;        /* Serialize barrier ops. */
87         struct completion srcu_barrier_completion;
88                                                 /* Awaken barrier rq at end. */
89         atomic_t srcu_barrier_cpu_cnt;          /* # CPUs not yet posting a */
90                                                 /*  callback for the barrier */
91                                                 /*  operation. */
92         unsigned long reschedule_jiffies;
93         unsigned long reschedule_count;
94         struct delayed_work work;
95         struct srcu_struct *srcu_ssp;
96 };
97
98 /*
99  * Per-SRCU-domain structure, similar in function to rcu_state.
100  */
101 struct srcu_struct {
102         unsigned int srcu_idx;                  /* Current rdr array element. */
103         struct srcu_data __percpu *sda;         /* Per-CPU srcu_data array. */
104         struct lockdep_map dep_map;
105         struct srcu_usage *srcu_sup;            /* Update-side data. */
106 };
107
108 // Values for size state variable (->srcu_size_state).  Once the state
109 // has been set to SRCU_SIZE_ALLOC, the grace-period code advances through
110 // this state machine one step per grace period until the SRCU_SIZE_BIG state
111 // is reached.  Otherwise, the state machine remains in the SRCU_SIZE_SMALL
112 // state indefinitely.
113 #define SRCU_SIZE_SMALL         0       // No srcu_node combining tree, ->node == NULL
114 #define SRCU_SIZE_ALLOC         1       // An srcu_node tree is being allocated, initialized,
115                                         //  and then referenced by ->node.  It will not be used.
116 #define SRCU_SIZE_WAIT_BARRIER  2       // The srcu_node tree starts being used by everything
117                                         //  except call_srcu(), especially by srcu_barrier().
118                                         //  By the end of this state, all CPUs and threads
119                                         //  are aware of this tree's existence.
120 #define SRCU_SIZE_WAIT_CALL     3       // The srcu_node tree starts being used by call_srcu().
121                                         //  By the end of this state, all of the call_srcu()
122                                         //  invocations that were running on a non-boot CPU
123                                         //  and using the boot CPU's callback queue will have
124                                         //  completed.
125 #define SRCU_SIZE_WAIT_CBS1     4       // Don't trust the ->srcu_have_cbs[] grace-period
126 #define SRCU_SIZE_WAIT_CBS2     5       //  sequence elements or the ->srcu_data_have_cbs[]
127 #define SRCU_SIZE_WAIT_CBS3     6       //  CPU-bitmask elements until all four elements of
128 #define SRCU_SIZE_WAIT_CBS4     7       //  each array have been initialized.
129 #define SRCU_SIZE_BIG           8       // The srcu_node combining tree is fully initialized
130                                         //  and all aspects of it are being put to use.
131
132 /* Values for state variable (bottom bits of ->srcu_gp_seq). */
133 #define SRCU_STATE_IDLE         0
134 #define SRCU_STATE_SCAN1        1
135 #define SRCU_STATE_SCAN2        2
136
137 /*
138  * Values for initializing gp sequence fields. Higher values allow wrap arounds to
139  * occur earlier.
140  * The second value with state is useful in the case of static initialization of
141  * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
142  * lower bits (or else it will appear to be already initialized within
143  * the call check_init_srcu_struct()).
144  */
145 #define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
146 #define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
147
148 #define __SRCU_USAGE_INIT(name)                                                                 \
149 {                                                                                               \
150         .lock = __SPIN_LOCK_UNLOCKED(name.lock),                                                \
151         .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL,                                                 \
152         .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE,                               \
153         .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL,                                      \
154         .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0),                                 \
155 }
156
157 #define __SRCU_STRUCT_INIT_COMMON(name, usage_name)                                             \
158         .srcu_sup = &usage_name,                                                                \
159         __SRCU_DEP_MAP_INIT(name)
160
161 #define __SRCU_STRUCT_INIT_MODULE(name, usage_name)                                             \
162 {                                                                                               \
163         __SRCU_STRUCT_INIT_COMMON(name, usage_name)                                             \
164 }
165
166 #define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name)                                         \
167 {                                                                                               \
168         .sda = &pcpu_name,                                                                      \
169         __SRCU_STRUCT_INIT_COMMON(name, usage_name)                                             \
170 }
171
172 /*
173  * Define and initialize a srcu struct at build time.
174  * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
175  *
176  * Note that although DEFINE_STATIC_SRCU() hides the name from other
177  * files, the per-CPU variable rules nevertheless require that the
178  * chosen name be globally unique.  These rules also prohibit use of
179  * DEFINE_STATIC_SRCU() within a function.  If these rules are too
180  * restrictive, declare the srcu_struct manually.  For example, in
181  * each file:
182  *
183  *      static struct srcu_struct my_srcu;
184  *
185  * Then, before the first use of each my_srcu, manually initialize it:
186  *
187  *      init_srcu_struct(&my_srcu);
188  *
189  * See include/linux/percpu-defs.h for the rules on per-CPU variables.
190  */
191 #ifdef MODULE
192 # define __DEFINE_SRCU(name, is_static)                                                         \
193         static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage);      \
194         is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
195         extern struct srcu_struct * const __srcu_struct_##name;                                 \
196         struct srcu_struct * const __srcu_struct_##name                                         \
197                 __section("___srcu_struct_ptrs") = &name
198 #else
199 # define __DEFINE_SRCU(name, is_static)                                                         \
200         static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);                              \
201         static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage);      \
202         is_static struct srcu_struct name =                                                     \
203                 __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
204 #endif
205 #define DEFINE_SRCU(name)               __DEFINE_SRCU(name, /* not static */)
206 #define DEFINE_STATIC_SRCU(name)        __DEFINE_SRCU(name, static)
207
208 void synchronize_srcu_expedited(struct srcu_struct *ssp);
209 void srcu_barrier(struct srcu_struct *ssp);
210 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
211
212 /*
213  * Counts the new reader in the appropriate per-CPU element of the
214  * srcu_struct.  Returns an index that must be passed to the matching
215  * srcu_read_unlock_lite().
216  *
217  * Note that this_cpu_inc() is an RCU read-side critical section either
218  * because it disables interrupts, because it is a single instruction,
219  * or because it is a read-modify-write atomic operation, depending on
220  * the whims of the architecture.
221  */
222 static inline int __srcu_read_lock_lite(struct srcu_struct *ssp)
223 {
224         int idx;
225
226         RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite().");
227         idx = READ_ONCE(ssp->srcu_idx) & 0x1;
228         this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); /* Y */
229         barrier(); /* Avoid leaking the critical section. */
230         return idx;
231 }
232
233 /*
234  * Removes the count for the old reader from the appropriate
235  * per-CPU element of the srcu_struct.  Note that this may well be a
236  * different CPU than that which was incremented by the corresponding
237  * srcu_read_lock_lite(), but it must be within the same task.
238  *
239  * Note that this_cpu_inc() is an RCU read-side critical section either
240  * because it disables interrupts, because it is a single instruction,
241  * or because it is a read-modify-write atomic operation, depending on
242  * the whims of the architecture.
243  */
244 static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
245 {
246         barrier();  /* Avoid leaking the critical section. */
247         this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);  /* Z */
248         RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite().");
249 }
250
251 void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
252
253 // Record _lite() usage even for CONFIG_PROVE_RCU=n kernels.
254 static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp)
255 {
256         struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
257
258         if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
259                 return;
260
261         // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered.
262         __srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
263 }
264
265 // Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
266 static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
267 {
268         if (IS_ENABLED(CONFIG_PROVE_RCU))
269                 __srcu_check_read_flavor(ssp, read_flavor);
270 }
271
272 #endif
This page took 0.038791 seconds and 4 git commands to generate.