]> Git Repo - linux.git/blob - arch/x86/kernel/apic/ipi.c
Linux 6.14-rc3
[linux.git] / arch / x86 / kernel / apic / ipi.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/cpumask.h>
4 #include <linux/delay.h>
5 #include <linux/smp.h>
6
7 #include <asm/io_apic.h>
8
9 #include "local.h"
10
11 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
12
13 #ifdef CONFIG_SMP
14 static int apic_ipi_shorthand_off __ro_after_init;
15
16 static __init int apic_ipi_shorthand(char *str)
17 {
18         get_option(&str, &apic_ipi_shorthand_off);
19         return 1;
20 }
21 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
22
23 static int __init print_ipi_mode(void)
24 {
25         pr_info("IPI shorthand broadcast: %s\n",
26                 apic_ipi_shorthand_off ? "disabled" : "enabled");
27         return 0;
28 }
29 late_initcall(print_ipi_mode);
30
31 void apic_smt_update(void)
32 {
33         /*
34          * Do not switch to broadcast mode if:
35          * - Disabled on the command line
36          * - Only a single CPU is online
37          * - Not all present CPUs have been at least booted once
38          *
39          * The latter is important as the local APIC might be in some
40          * random state and a broadcast might cause havoc. That's
41          * especially true for NMI broadcasting.
42          */
43         if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
44             !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
45                 static_branch_disable(&apic_use_ipi_shorthand);
46         } else {
47                 static_branch_enable(&apic_use_ipi_shorthand);
48         }
49 }
50
51 void apic_send_IPI_allbutself(unsigned int vector)
52 {
53         if (num_online_cpus() < 2)
54                 return;
55
56         if (static_branch_likely(&apic_use_ipi_shorthand))
57                 __apic_send_IPI_allbutself(vector);
58         else
59                 __apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
60 }
61
62 /*
63  * Send a 'reschedule' IPI to another CPU. It goes straight through and
64  * wastes no time serializing anything. Worst case is that we lose a
65  * reschedule ...
66  */
67 void native_smp_send_reschedule(int cpu)
68 {
69         if (unlikely(cpu_is_offline(cpu))) {
70                 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
71                 return;
72         }
73         __apic_send_IPI(cpu, RESCHEDULE_VECTOR);
74 }
75
76 void native_send_call_func_single_ipi(int cpu)
77 {
78         __apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
79 }
80
81 void native_send_call_func_ipi(const struct cpumask *mask)
82 {
83         if (static_branch_likely(&apic_use_ipi_shorthand)) {
84                 unsigned int cpu = smp_processor_id();
85
86                 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
87                         goto sendmask;
88
89                 if (cpumask_test_cpu(cpu, mask))
90                         __apic_send_IPI_all(CALL_FUNCTION_VECTOR);
91                 else if (num_online_cpus() > 1)
92                         __apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
93                 return;
94         }
95
96 sendmask:
97         __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
98 }
99
100 void apic_send_nmi_to_offline_cpu(unsigned int cpu)
101 {
102         if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
103                 return;
104         if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
105                 return;
106         apic->send_IPI(cpu, NMI_VECTOR);
107 }
108 #endif /* CONFIG_SMP */
109
110 static inline int __prepare_ICR2(unsigned int mask)
111 {
112         return SET_XAPIC_DEST_FIELD(mask);
113 }
114
115 u32 apic_mem_wait_icr_idle_timeout(void)
116 {
117         int cnt;
118
119         for (cnt = 0; cnt < 1000; cnt++) {
120                 if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY))
121                         return 0;
122                 inc_irq_stat(icr_read_retry_count);
123                 udelay(100);
124         }
125         return APIC_ICR_BUSY;
126 }
127
128 void apic_mem_wait_icr_idle(void)
129 {
130         while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
131                 cpu_relax();
132 }
133
134 /*
135  * This is safe against interruption because it only writes the lower 32
136  * bits of the APIC_ICR register. The destination field is ignored for
137  * short hand IPIs.
138  *
139  *  wait_icr_idle()
140  *  write(ICR2, dest)
141  *  NMI
142  *      wait_icr_idle()
143  *      write(ICR)
144  *      wait_icr_idle()
145  *  write(ICR)
146  *
147  * This function does not need to disable interrupts as there is no ICR2
148  * interaction. The memory write is direct except when the machine is
149  * affected by the 11AP Pentium erratum, which turns the plain write into
150  * an XCHG operation.
151  */
152 static void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
153 {
154         /*
155          * Wait for the previous ICR command to complete.  Use
156          * safe_apic_wait_icr_idle() for the NMI vector as there have been
157          * issues where otherwise the system hangs when the panic CPU tries
158          * to stop the others before launching the kdump kernel.
159          */
160         if (unlikely(vector == NMI_VECTOR))
161                 apic_mem_wait_icr_idle_timeout();
162         else
163                 apic_mem_wait_icr_idle();
164
165         /* Destination field (ICR2) and the destination mode are ignored */
166         native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0));
167 }
168
169 /*
170  * This is used to send an IPI with no shorthand notation (the destination is
171  * specified in bits 56 to 63 of the ICR).
172  */
173 void __default_send_IPI_dest_field(unsigned int dest_mask, int vector,
174                                    unsigned int dest_mode)
175 {
176         /* See comment in __default_send_IPI_shortcut() */
177         if (unlikely(vector == NMI_VECTOR))
178                 apic_mem_wait_icr_idle_timeout();
179         else
180                 apic_mem_wait_icr_idle();
181
182         /* Set the IPI destination field in the ICR */
183         native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask));
184         /* Send it with the proper destination mode */
185         native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode));
186 }
187
188 void default_send_IPI_single_phys(int cpu, int vector)
189 {
190         unsigned long flags;
191
192         local_irq_save(flags);
193         __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
194                                       vector, APIC_DEST_PHYSICAL);
195         local_irq_restore(flags);
196 }
197
198 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
199 {
200         unsigned long flags;
201         unsigned long cpu;
202
203         local_irq_save(flags);
204         for_each_cpu(cpu, mask) {
205                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
206                                 cpu), vector, APIC_DEST_PHYSICAL);
207         }
208         local_irq_restore(flags);
209 }
210
211 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
212                                                  int vector)
213 {
214         unsigned int cpu, this_cpu = smp_processor_id();
215         unsigned long flags;
216
217         local_irq_save(flags);
218         for_each_cpu(cpu, mask) {
219                 if (cpu == this_cpu)
220                         continue;
221                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
222                                  cpu), vector, APIC_DEST_PHYSICAL);
223         }
224         local_irq_restore(flags);
225 }
226
227 /*
228  * Helper function for APICs which insist on cpumasks
229  */
230 void default_send_IPI_single(int cpu, int vector)
231 {
232         __apic_send_IPI_mask(cpumask_of(cpu), vector);
233 }
234
235 void default_send_IPI_allbutself(int vector)
236 {
237         __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
238 }
239
240 void default_send_IPI_all(int vector)
241 {
242         __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
243 }
244
245 void default_send_IPI_self(int vector)
246 {
247         __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
248 }
249
250 #ifdef CONFIG_X86_32
251 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
252 {
253         unsigned long flags;
254         unsigned int cpu;
255
256         local_irq_save(flags);
257         for_each_cpu(cpu, mask)
258                 __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
259         local_irq_restore(flags);
260 }
261
262 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
263                                                  int vector)
264 {
265         unsigned int cpu, this_cpu = smp_processor_id();
266         unsigned long flags;
267
268         local_irq_save(flags);
269         for_each_cpu(cpu, mask) {
270                 if (cpu == this_cpu)
271                         continue;
272                 __default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
273         }
274         local_irq_restore(flags);
275 }
276
277 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
278 {
279         unsigned long mask = cpumask_bits(cpumask)[0];
280         unsigned long flags;
281
282         if (!mask)
283                 return;
284
285         local_irq_save(flags);
286         WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
287         __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
288         local_irq_restore(flags);
289 }
290
291 #ifdef CONFIG_SMP
292 static int convert_apicid_to_cpu(u32 apic_id)
293 {
294         int i;
295
296         for_each_possible_cpu(i) {
297                 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
298                         return i;
299         }
300         return -1;
301 }
302
303 int safe_smp_processor_id(void)
304 {
305         u32 apicid;
306         int cpuid;
307
308         if (!boot_cpu_has(X86_FEATURE_APIC))
309                 return 0;
310
311         apicid = read_apic_id();
312         if (apicid == BAD_APICID)
313                 return 0;
314
315         cpuid = convert_apicid_to_cpu(apicid);
316
317         return cpuid >= 0 ? cpuid : 0;
318 }
319 #endif
320 #endif
This page took 0.050225 seconds and 4 git commands to generate.