]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * SMP Support | |
4 | * | |
5 | * Copyright (C) 1999 Walt Drummond <[email protected]> | |
6 | * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <[email protected]> | |
7 | * | |
8 | * Lots of stuff stolen from arch/alpha/kernel/smp.c | |
9 | * | |
10 | * 01/05/16 Rohit Seth <[email protected]> IA64-SMP functions. Reorganized | |
11 | * the existing code (on the lines of x86 port). | |
12 | * 00/09/11 David Mosberger <[email protected]> Do loops_per_jiffy | |
13 | * calibration on each CPU. | |
14 | * 00/08/23 Asit Mallick <[email protected]> fixed logical processor id | |
15 | * 00/03/31 Rohit Seth <[email protected]> Fixes for Bootstrap Processor | |
16 | * & cpu_online_map now gets done here (instead of setup.c) | |
17 | * 99/10/05 davidm Update to bring it in sync with new command-line processing | |
18 | * scheme. | |
19 | * 10/13/00 Goutham Rao <[email protected]> Updated smp_call_function and | |
20 | * smp_call_function_single to resend IPI on timeouts | |
21 | */ | |
22 | #include <linux/module.h> | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/smp.h> | |
28 | #include <linux/kernel_stat.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/cache.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/efi.h> | |
33 | #include <linux/bitops.h> | |
a7956113 | 34 | #include <linux/kexec.h> |
1da177e4 | 35 | |
60063497 | 36 | #include <linux/atomic.h> |
1da177e4 LT |
37 | #include <asm/current.h> |
38 | #include <asm/delay.h> | |
1da177e4 LT |
39 | #include <asm/io.h> |
40 | #include <asm/irq.h> | |
41 | #include <asm/page.h> | |
1da177e4 LT |
42 | #include <asm/processor.h> |
43 | #include <asm/ptrace.h> | |
44 | #include <asm/sal.h> | |
1da177e4 LT |
45 | #include <asm/tlbflush.h> |
46 | #include <asm/unistd.h> | |
47 | #include <asm/mca.h> | |
b3545192 | 48 | #include <asm/xtp.h> |
1da177e4 | 49 | |
3be44b9c JS |
50 | /* |
51 | * Note: alignment of 4 entries/cacheline was empirically determined | |
52 | * to be a good tradeoff between hot cachelines & spreading the array | |
53 | * across too many cacheline. | |
54 | */ | |
55 | static struct local_tlb_flush_counts { | |
56 | unsigned int count; | |
57 | } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; | |
58 | ||
b9bf3121 TH |
59 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], |
60 | shadow_flush_counts); | |
3be44b9c | 61 | |
1da177e4 LT |
62 | #define IPI_CALL_FUNC 0 |
63 | #define IPI_CPU_STOP 1 | |
f27b433e | 64 | #define IPI_CALL_FUNC_SINGLE 2 |
a7956113 | 65 | #define IPI_KDUMP_CPU_STOP 3 |
1da177e4 LT |
66 | |
67 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | |
e088a4ad | 68 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); |
1da177e4 LT |
69 | |
70 | extern void cpu_halt (void); | |
71 | ||
1da177e4 | 72 | static void |
c0cd661b | 73 | stop_this_cpu(void) |
1da177e4 LT |
74 | { |
75 | /* | |
76 | * Remove this CPU: | |
77 | */ | |
7d7f9848 | 78 | set_cpu_online(smp_processor_id(), false); |
1da177e4 LT |
79 | max_xtp(); |
80 | local_irq_disable(); | |
81 | cpu_halt(); | |
82 | } | |
83 | ||
84 | void | |
85 | cpu_die(void) | |
86 | { | |
87 | max_xtp(); | |
88 | local_irq_disable(); | |
89 | cpu_halt(); | |
90 | /* Should never be here */ | |
91 | BUG(); | |
92 | for (;;); | |
93 | } | |
94 | ||
95 | irqreturn_t | |
024e4f2c | 96 | handle_IPI (int irq, void *dev_id) |
1da177e4 LT |
97 | { |
98 | int this_cpu = get_cpu(); | |
99 | unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); | |
100 | unsigned long ops; | |
101 | ||
102 | mb(); /* Order interrupt and bit testing. */ | |
103 | while ((ops = xchg(pending_ipis, 0)) != 0) { | |
104 | mb(); /* Order bit clearing and data access. */ | |
105 | do { | |
106 | unsigned long which; | |
107 | ||
108 | which = ffz(~ops); | |
109 | ops &= ~(1 << which); | |
110 | ||
111 | switch (which) { | |
c0cd661b | 112 | case IPI_CPU_STOP: |
1da177e4 LT |
113 | stop_this_cpu(); |
114 | break; | |
f27b433e JA |
115 | case IPI_CALL_FUNC: |
116 | generic_smp_call_function_interrupt(); | |
117 | break; | |
118 | case IPI_CALL_FUNC_SINGLE: | |
119 | generic_smp_call_function_single_interrupt(); | |
120 | break; | |
45a98fc6 | 121 | #ifdef CONFIG_KEXEC |
c0cd661b | 122 | case IPI_KDUMP_CPU_STOP: |
a7956113 ZN |
123 | unw_init_running(kdump_cpu_freeze, NULL); |
124 | break; | |
125 | #endif | |
c0cd661b HS |
126 | default: |
127 | printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", | |
128 | this_cpu, which); | |
1da177e4 LT |
129 | break; |
130 | } | |
131 | } while (ops); | |
132 | mb(); /* Order data access and bit testing. */ | |
133 | } | |
134 | put_cpu(); | |
135 | return IRQ_HANDLED; | |
136 | } | |
137 | ||
f27b433e JA |
138 | |
139 | ||
1da177e4 | 140 | /* |
72fdbdce | 141 | * Called with preemption disabled. |
1da177e4 LT |
142 | */ |
143 | static inline void | |
144 | send_IPI_single (int dest_cpu, int op) | |
145 | { | |
146 | set_bit(op, &per_cpu(ipi_operation, dest_cpu)); | |
05933aac | 147 | ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0); |
1da177e4 LT |
148 | } |
149 | ||
150 | /* | |
72fdbdce | 151 | * Called with preemption disabled. |
1da177e4 LT |
152 | */ |
153 | static inline void | |
154 | send_IPI_allbutself (int op) | |
155 | { | |
156 | unsigned int i; | |
157 | ||
dc565b52 | 158 | for_each_online_cpu(i) { |
159 | if (i != smp_processor_id()) | |
1da177e4 LT |
160 | send_IPI_single(i, op); |
161 | } | |
162 | } | |
163 | ||
31a6b11f XZ |
164 | /* |
165 | * Called with preemption disabled. | |
166 | */ | |
167 | static inline void | |
40fe697a | 168 | send_IPI_mask(const struct cpumask *mask, int op) |
31a6b11f XZ |
169 | { |
170 | unsigned int cpu; | |
171 | ||
40fe697a | 172 | for_each_cpu(cpu, mask) { |
31a6b11f XZ |
173 | send_IPI_single(cpu, op); |
174 | } | |
175 | } | |
176 | ||
1da177e4 | 177 | /* |
72fdbdce | 178 | * Called with preemption disabled. |
1da177e4 LT |
179 | */ |
180 | static inline void | |
181 | send_IPI_all (int op) | |
182 | { | |
183 | int i; | |
184 | ||
dc565b52 | 185 | for_each_online_cpu(i) { |
186 | send_IPI_single(i, op); | |
187 | } | |
1da177e4 LT |
188 | } |
189 | ||
190 | /* | |
72fdbdce | 191 | * Called with preemption disabled. |
1da177e4 LT |
192 | */ |
193 | static inline void | |
194 | send_IPI_self (int op) | |
195 | { | |
196 | send_IPI_single(smp_processor_id(), op); | |
197 | } | |
198 | ||
45a98fc6 | 199 | #ifdef CONFIG_KEXEC |
a7956113 | 200 | void |
ccbebdac | 201 | kdump_smp_send_stop(void) |
a7956113 ZN |
202 | { |
203 | send_IPI_allbutself(IPI_KDUMP_CPU_STOP); | |
204 | } | |
205 | ||
206 | void | |
ccbebdac | 207 | kdump_smp_send_init(void) |
a7956113 ZN |
208 | { |
209 | unsigned int cpu, self_cpu; | |
210 | self_cpu = smp_processor_id(); | |
211 | for_each_online_cpu(cpu) { | |
212 | if (cpu != self_cpu) { | |
213 | if(kdump_status[cpu] == 0) | |
05933aac | 214 | ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); |
a7956113 ZN |
215 | } |
216 | } | |
217 | } | |
218 | #endif | |
1da177e4 | 219 | /* |
72fdbdce | 220 | * Called with preemption disabled. |
1da177e4 LT |
221 | */ |
222 | void | |
223 | smp_send_reschedule (int cpu) | |
224 | { | |
05933aac | 225 | ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); |
1da177e4 | 226 | } |
c4cb768f | 227 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |
1da177e4 | 228 | |
3be44b9c | 229 | /* |
72fdbdce | 230 | * Called with preemption disabled. |
3be44b9c JS |
231 | */ |
232 | static void | |
233 | smp_send_local_flush_tlb (int cpu) | |
234 | { | |
05933aac | 235 | ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); |
3be44b9c JS |
236 | } |
237 | ||
238 | void | |
239 | smp_local_flush_tlb(void) | |
240 | { | |
241 | /* | |
242 | * Use atomic ops. Otherwise, the load/increment/store sequence from | |
243 | * a "++" operation can have the line stolen between the load & store. | |
244 | * The overhead of the atomic op in negligible in this case & offers | |
245 | * significant benefit for the brief periods where lots of cpus | |
246 | * are simultaneously flushing TLBs. | |
247 | */ | |
248 | ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); | |
249 | local_flush_tlb_all(); | |
250 | } | |
251 | ||
252 | #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ | |
253 | ||
254 | void | |
255 | smp_flush_tlb_cpumask(cpumask_t xcpumask) | |
256 | { | |
97653f92 | 257 | unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); |
3be44b9c JS |
258 | cpumask_t cpumask = xcpumask; |
259 | int mycpu, cpu, flush_mycpu = 0; | |
260 | ||
261 | preempt_disable(); | |
262 | mycpu = smp_processor_id(); | |
263 | ||
5d2068da | 264 | for_each_cpu(cpu, &cpumask) |
97653f92 | 265 | counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; |
3be44b9c JS |
266 | |
267 | mb(); | |
5d2068da | 268 | for_each_cpu(cpu, &cpumask) { |
3be44b9c JS |
269 | if (cpu == mycpu) |
270 | flush_mycpu = 1; | |
271 | else | |
272 | smp_send_local_flush_tlb(cpu); | |
273 | } | |
274 | ||
275 | if (flush_mycpu) | |
276 | smp_local_flush_tlb(); | |
277 | ||
5d2068da | 278 | for_each_cpu(cpu, &cpumask) |
97653f92 | 279 | while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) |
3be44b9c JS |
280 | udelay(FLUSH_DELAY); |
281 | ||
282 | preempt_enable(); | |
283 | } | |
284 | ||
1da177e4 LT |
285 | void |
286 | smp_flush_tlb_all (void) | |
287 | { | |
15c8b6c1 | 288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); |
1da177e4 LT |
289 | } |
290 | ||
291 | void | |
292 | smp_flush_tlb_mm (struct mm_struct *mm) | |
293 | { | |
75c1c91c | 294 | cpumask_var_t cpus; |
a68db763 | 295 | preempt_disable(); |
1da177e4 LT |
296 | /* this happens for the common case of a single-threaded fork(): */ |
297 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) | |
298 | { | |
299 | local_finish_flush_tlb_mm(mm); | |
a68db763 | 300 | preempt_enable(); |
1da177e4 LT |
301 | return; |
302 | } | |
75c1c91c DS |
303 | if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { |
304 | smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, | |
305 | mm, 1); | |
306 | } else { | |
307 | cpumask_copy(cpus, mm_cpumask(mm)); | |
308 | smp_call_function_many(cpus, | |
309 | (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); | |
310 | free_cpumask_var(cpus); | |
311 | } | |
edb91dc0 DS |
312 | local_irq_disable(); |
313 | local_finish_flush_tlb_mm(mm); | |
314 | local_irq_enable(); | |
a68db763 | 315 | preempt_enable(); |
1da177e4 LT |
316 | } |
317 | ||
f27b433e | 318 | void arch_send_call_function_single_ipi(int cpu) |
31a6b11f | 319 | { |
f27b433e | 320 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
31a6b11f | 321 | } |
31a6b11f | 322 | |
40fe697a | 323 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 324 | { |
f27b433e | 325 | send_IPI_mask(mask, IPI_CALL_FUNC); |
1da177e4 | 326 | } |
1da177e4 LT |
327 | |
328 | /* | |
329 | * this function calls the 'stop' function on all other CPUs in the system. | |
330 | */ | |
331 | void | |
332 | smp_send_stop (void) | |
333 | { | |
334 | send_IPI_allbutself(IPI_CPU_STOP); | |
335 | } | |
336 | ||
cb2e0912 | 337 | int |
1da177e4 LT |
338 | setup_profiling_timer (unsigned int multiplier) |
339 | { | |
340 | return -EINVAL; | |
341 | } |