]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SMP_H |
2 | #define __LINUX_SMP_H | |
3 | ||
4 | /* | |
5 | * Generic SMP support | |
6 | * Alan Cox. <[email protected]> | |
7 | */ | |
8 | ||
79974a0e | 9 | #include <linux/errno.h> |
54514a70 | 10 | #include <linux/types.h> |
3d442233 | 11 | #include <linux/list.h> |
3d442233 | 12 | #include <linux/cpumask.h> |
04948c7f | 13 | #include <linux/init.h> |
6897fc22 | 14 | #include <linux/llist.h> |
1da177e4 | 15 | |
3a5f65df | 16 | typedef void (*smp_call_func_t)(void *info); |
3d442233 | 17 | struct call_single_data { |
0ebeb79c | 18 | struct llist_node llist; |
3a5f65df | 19 | smp_call_func_t func; |
3d442233 | 20 | void *info; |
54514a70 | 21 | u16 flags; |
3d442233 JA |
22 | }; |
23 | ||
e057d7ae MT |
24 | /* total number of cpus in this system (may exceed NR_CPUS) */ |
25 | extern unsigned int total_cpus; | |
26 | ||
3a5f65df DH |
27 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, |
28 | int wait); | |
53ce3d95 | 29 | |
bff2dc42 DD |
30 | /* |
31 | * Call a function on all processors | |
32 | */ | |
33 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | |
34 | ||
fa688207 DD |
35 | /* |
36 | * Call a function on processors specified by mask, which might include | |
37 | * the local one. | |
38 | */ | |
39 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | |
40 | void *info, bool wait); | |
41 | ||
42 | /* | |
43 | * Call a function on each processor for which the supplied function | |
44 | * cond_func returns a positive value. This may include the local | |
45 | * processor. | |
46 | */ | |
47 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | |
48 | smp_call_func_t func, void *info, bool wait, | |
49 | gfp_t gfp_flags); | |
50 | ||
c46fff2a | 51 | int smp_call_function_single_async(int cpu, struct call_single_data *csd); |
7cf64f86 | 52 | |
1da177e4 LT |
53 | #ifdef CONFIG_SMP |
54 | ||
55 | #include <linux/preempt.h> | |
56 | #include <linux/kernel.h> | |
57 | #include <linux/compiler.h> | |
58 | #include <linux/thread_info.h> | |
59 | #include <asm/smp.h> | |
1da177e4 LT |
60 | |
61 | /* | |
62 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. | |
63 | * (defined in asm header): | |
d1dedb52 | 64 | */ |
1da177e4 LT |
65 | |
66 | /* | |
67 | * stops all CPUs but the current one: | |
68 | */ | |
69 | extern void smp_send_stop(void); | |
70 | ||
71 | /* | |
72 | * sends a 'reschedule' event to another CPU: | |
73 | */ | |
74 | extern void smp_send_reschedule(int cpu); | |
75 | ||
76 | ||
77 | /* | |
78 | * Prepare machine for booting other CPUs. | |
79 | */ | |
80 | extern void smp_prepare_cpus(unsigned int max_cpus); | |
81 | ||
82 | /* | |
83 | * Bring a CPU up | |
84 | */ | |
8239c25f | 85 | extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); |
1da177e4 LT |
86 | |
87 | /* | |
88 | * Final polishing of CPUs | |
89 | */ | |
90 | extern void smp_cpus_done(unsigned int max_cpus); | |
91 | ||
92 | /* | |
93 | * Call a function on all other processors | |
94 | */ | |
3a5f65df | 95 | int smp_call_function(smp_call_func_t func, void *info, int wait); |
54b11e6d | 96 | void smp_call_function_many(const struct cpumask *mask, |
3a5f65df | 97 | smp_call_func_t func, void *info, bool wait); |
2d3854a3 | 98 | |
2ea6dec4 | 99 | int smp_call_function_any(const struct cpumask *mask, |
3a5f65df | 100 | smp_call_func_t func, void *info, int wait); |
2ea6dec4 | 101 | |
f37f435f | 102 | void kick_all_cpus_sync(void); |
c6f4459f | 103 | void wake_up_all_idle_cpus(void); |
f37f435f | 104 | |
3d442233 JA |
105 | /* |
106 | * Generic and arch helpers | |
107 | */ | |
d8ad7d11 | 108 | void __init call_function_init(void); |
3d442233 | 109 | void generic_smp_call_function_single_interrupt(void); |
9a46ad6d SL |
110 | #define generic_smp_call_function_interrupt \ |
111 | generic_smp_call_function_single_interrupt | |
a3bc0dbc | 112 | |
1da177e4 LT |
113 | /* |
114 | * Mark the boot cpu "online" so that it can call console drivers in | |
115 | * printk() and can access its per-cpu storage. | |
116 | */ | |
117 | void smp_prepare_boot_cpu(void); | |
118 | ||
ca74a6f8 | 119 | extern unsigned int setup_max_cpus; |
34db18a0 AW |
120 | extern void __init setup_nr_cpu_ids(void); |
121 | extern void __init smp_init(void); | |
ca74a6f8 | 122 | |
1da177e4 LT |
123 | #else /* !SMP */ |
124 | ||
d1dedb52 IM |
125 | static inline void smp_send_stop(void) { } |
126 | ||
1da177e4 LT |
127 | /* |
128 | * These macros fold the SMP functionality into a single CPU system | |
129 | */ | |
39c715b7 | 130 | #define raw_smp_processor_id() 0 |
3a5f65df | 131 | static inline int up_smp_call_function(smp_call_func_t func, void *info) |
3c30b06d CK |
132 | { |
133 | return 0; | |
134 | } | |
8691e5a8 | 135 | #define smp_call_function(func, info, wait) \ |
a5fbb6d1 | 136 | (up_smp_call_function(func, info)) |
3b8967d7 | 137 | |
79a88102 | 138 | static inline void smp_send_reschedule(int cpu) { } |
2ac6608c | 139 | #define smp_prepare_boot_cpu() do {} while (0) |
d2ff9118 RR |
140 | #define smp_call_function_many(mask, func, info, wait) \ |
141 | (up_smp_call_function(func, info)) | |
d8ad7d11 | 142 | static inline void call_function_init(void) { } |
2ea6dec4 RR |
143 | |
144 | static inline int | |
3a5f65df | 145 | smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, |
2ea6dec4 | 146 | void *info, int wait) |
3d442233 | 147 | { |
2ea6dec4 | 148 | return smp_call_function_single(0, func, info, wait); |
3d442233 | 149 | } |
2ea6dec4 | 150 | |
f37f435f | 151 | static inline void kick_all_cpus_sync(void) { } |
c6f4459f | 152 | static inline void wake_up_all_idle_cpus(void) { } |
f37f435f | 153 | |
1da177e4 LT |
154 | #endif /* !SMP */ |
155 | ||
156 | /* | |
39c715b7 | 157 | * smp_processor_id(): get the current CPU ID. |
1da177e4 | 158 | * |
cfd8d6c0 | 159 | * if DEBUG_PREEMPT is enabled then we check whether it is |
39c715b7 IM |
160 | * used in a preemption-safe way. (smp_processor_id() is safe |
161 | * if it's used in a preemption-off critical section, or in | |
162 | * a thread that is bound to the current CPU.) | |
1da177e4 | 163 | * |
39c715b7 IM |
164 | * NOTE: raw_smp_processor_id() is for internal use only |
165 | * (smp_processor_id() is the preferred variant), but in rare | |
166 | * instances it might also be used to turn off false positives | |
167 | * (i.e. smp_processor_id() use that the debugging code reports but | |
168 | * which use for some reason is legal). Don't use this to hack around | |
169 | * the warning message, as your code might not work under PREEMPT. | |
1da177e4 | 170 | */ |
39c715b7 IM |
171 | #ifdef CONFIG_DEBUG_PREEMPT |
172 | extern unsigned int debug_smp_processor_id(void); | |
173 | # define smp_processor_id() debug_smp_processor_id() | |
1da177e4 | 174 | #else |
39c715b7 | 175 | # define smp_processor_id() raw_smp_processor_id() |
1da177e4 LT |
176 | #endif |
177 | ||
178 | #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) | |
179 | #define put_cpu() preempt_enable() | |
1da177e4 | 180 | |
a146649b IM |
181 | /* |
182 | * Callback to arch code if there's nosmp or maxcpus=0 on the | |
183 | * boot command line: | |
184 | */ | |
185 | extern void arch_disable_smp_support(void); | |
186 | ||
fb37bb04 PG |
187 | extern void arch_enable_nonboot_cpus_begin(void); |
188 | extern void arch_enable_nonboot_cpus_end(void); | |
189 | ||
033ab7f8 AM |
190 | void smp_setup_processor_id(void); |
191 | ||
1da177e4 | 192 | #endif /* __LINUX_SMP_H */ |