]>
Commit | Line | Data |
---|---|---|
f1f8810c MD |
1 | Semantics and Behavior of Local Atomic Operations |
2 | ||
3 | Mathieu Desnoyers | |
4 | ||
5 | ||
6 | This document explains the purpose of the local atomic operations, how | |
7 | to implement them for any given architecture and shows how they can be used | |
8 | properly. It also stresses on the precautions that must be taken when reading | |
9 | those local variables across CPUs when the order of memory writes matters. | |
10 | ||
11 | ||
12 | ||
13 | * Purpose of local atomic operations | |
14 | ||
15 | Local atomic operations are meant to provide fast and highly reentrant per CPU | |
16 | counters. They minimize the performance cost of standard atomic operations by | |
17 | removing the LOCK prefix and memory barriers normally required to synchronize | |
18 | across CPUs. | |
19 | ||
20 | Having fast per CPU atomic counters is interesting in many cases : it does not | |
21 | require disabling interrupts to protect from interrupt handlers and it permits | |
22 | coherent counters in NMI handlers. It is especially useful for tracing purposes | |
23 | and for various performance monitoring counters. | |
24 | ||
25 | Local atomic operations only guarantee variable modification atomicity wrt the | |
26 | CPU which owns the data. Therefore, care must taken to make sure that only one | |
27 | CPU writes to the local_t data. This is done by using per cpu data and making | |
28 | sure that we modify it from within a preemption safe context. It is however | |
29 | permitted to read local_t data from any CPU : it will then appear to be written | |
0e1ccb96 | 30 | out of order wrt other memory writes by the owner CPU. |
f1f8810c MD |
31 | |
32 | ||
33 | * Implementation for a given architecture | |
34 | ||
35 | It can be done by slightly modifying the standard atomic operations : only | |
36 | their UP variant must be kept. It typically means removing LOCK prefix (on | |
37 | i386 and x86_64) and any SMP sychronization barrier. If the architecture does | |
38 | not have a different behavior between SMP and UP, including asm-generic/local.h | |
39 | in your archtecture's local.h is sufficient. | |
40 | ||
41 | The local_t type is defined as an opaque signed long by embedding an | |
42 | atomic_long_t inside a structure. This is made so a cast from this type to a | |
43 | long fails. The definition looks like : | |
44 | ||
45 | typedef struct { atomic_long_t a; } local_t; | |
46 | ||
47 | ||
74beb9db MD |
48 | * Rules to follow when using local atomic operations |
49 | ||
50 | - Variables touched by local ops must be per cpu variables. | |
51 | - _Only_ the CPU owner of these variables must write to them. | |
52 | - This CPU can use local ops from any context (process, irq, softirq, nmi, ...) | |
53 | to update its local_t variables. | |
54 | - Preemption (or interrupts) must be disabled when using local ops in | |
55 | process context to make sure the process won't be migrated to a | |
56 | different CPU between getting the per-cpu variable and doing the | |
57 | actual local op. | |
58 | - When using local ops in interrupt context, no special care must be | |
59 | taken on a mainline kernel, since they will run on the local CPU with | |
60 | preemption already disabled. I suggest, however, to explicitly | |
61 | disable preemption anyway to make sure it will still work correctly on | |
62 | -rt kernels. | |
63 | - Reading the local cpu variable will provide the current copy of the | |
64 | variable. | |
65 | - Reads of these variables can be done from any CPU, because updates to | |
66 | "long", aligned, variables are always atomic. Since no memory | |
67 | synchronization is done by the writer CPU, an outdated copy of the | |
68 | variable can be read when reading some _other_ cpu's variables. | |
69 | ||
70 | ||
f1f8810c MD |
71 | * How to use local atomic operations |
72 | ||
73 | #include <linux/percpu.h> | |
74 | #include <asm/local.h> | |
75 | ||
76 | static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); | |
77 | ||
78 | ||
79 | * Counting | |
80 | ||
81 | Counting is done on all the bits of a signed long. | |
82 | ||
83 | In preemptible context, use get_cpu_var() and put_cpu_var() around local atomic | |
84 | operations : it makes sure that preemption is disabled around write access to | |
85 | the per cpu variable. For instance : | |
86 | ||
87 | local_inc(&get_cpu_var(counters)); | |
88 | put_cpu_var(counters); | |
89 | ||
90 | If you are already in a preemption-safe context, you can directly use | |
91 | __get_cpu_var() instead. | |
92 | ||
93 | local_inc(&__get_cpu_var(counters)); | |
94 | ||
95 | ||
96 | ||
97 | * Reading the counters | |
98 | ||
99 | Those local counters can be read from foreign CPUs to sum the count. Note that | |
100 | the data seen by local_read across CPUs must be considered to be out of order | |
101 | relatively to other memory writes happening on the CPU that owns the data. | |
102 | ||
103 | long sum = 0; | |
104 | for_each_online_cpu(cpu) | |
105 | sum += local_read(&per_cpu(counters, cpu)); | |
106 | ||
107 | If you want to use a remote local_read to synchronize access to a resource | |
108 | between CPUs, explicit smp_wmb() and smp_rmb() memory barriers must be used | |
109 | respectively on the writer and the reader CPUs. It would be the case if you use | |
110 | the local_t variable as a counter of bytes written in a buffer : there should | |
111 | be a smp_wmb() between the buffer write and the counter increment and also a | |
112 | smp_rmb() between the counter read and the buffer read. | |
113 | ||
114 | ||
115 | Here is a sample module which implements a basic per cpu counter using local.h. | |
116 | ||
117 | --- BEGIN --- | |
118 | /* test-local.c | |
119 | * | |
120 | * Sample module for local.h usage. | |
121 | */ | |
122 | ||
123 | ||
124 | #include <asm/local.h> | |
125 | #include <linux/module.h> | |
126 | #include <linux/timer.h> | |
127 | ||
128 | static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); | |
129 | ||
130 | static struct timer_list test_timer; | |
131 | ||
132 | /* IPI called on each CPU. */ | |
133 | static void test_each(void *info) | |
134 | { | |
135 | /* Increment the counter from a non preemptible context */ | |
136 | printk("Increment on cpu %d\n", smp_processor_id()); | |
137 | local_inc(&__get_cpu_var(counters)); | |
138 | ||
139 | /* This is what incrementing the variable would look like within a | |
140 | * preemptible context (it disables preemption) : | |
141 | * | |
142 | * local_inc(&get_cpu_var(counters)); | |
143 | * put_cpu_var(counters); | |
144 | */ | |
145 | } | |
146 | ||
147 | static void do_test_timer(unsigned long data) | |
148 | { | |
149 | int cpu; | |
150 | ||
151 | /* Increment the counters */ | |
152 | on_each_cpu(test_each, NULL, 0, 1); | |
153 | /* Read all the counters */ | |
154 | printk("Counters read from CPU %d\n", smp_processor_id()); | |
155 | for_each_online_cpu(cpu) { | |
156 | printk("Read : CPU %d, count %ld\n", cpu, | |
157 | local_read(&per_cpu(counters, cpu))); | |
158 | } | |
159 | del_timer(&test_timer); | |
160 | test_timer.expires = jiffies + 1000; | |
161 | add_timer(&test_timer); | |
162 | } | |
163 | ||
164 | static int __init test_init(void) | |
165 | { | |
166 | /* initialize the timer that will increment the counter */ | |
167 | init_timer(&test_timer); | |
168 | test_timer.function = do_test_timer; | |
169 | test_timer.expires = jiffies + 1; | |
170 | add_timer(&test_timer); | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | static void __exit test_exit(void) | |
176 | { | |
177 | del_timer_sync(&test_timer); | |
178 | } | |
179 | ||
180 | module_init(test_init); | |
181 | module_exit(test_exit); | |
182 | ||
183 | MODULE_LICENSE("GPL"); | |
184 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
185 | MODULE_DESCRIPTION("Local Atomic Ops"); | |
186 | --- END --- |