]> Git Repo - linux.git/blob - kernel/cpu.c
net: phy: aquantia: wait for the GLOBAL_CFG to start returning real values
[linux.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
8 #include <linux/smp.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/bug.h>
23 #include <linux/kthread.h>
24 #include <linux/stop_machine.h>
25 #include <linux/mutex.h>
26 #include <linux/gfp.h>
27 #include <linux/suspend.h>
28 #include <linux/lockdep.h>
29 #include <linux/tick.h>
30 #include <linux/irq.h>
31 #include <linux/nmi.h>
32 #include <linux/smpboot.h>
33 #include <linux/relay.h>
34 #include <linux/slab.h>
35 #include <linux/scs.h>
36 #include <linux/percpu-rwsem.h>
37 #include <linux/cpuset.h>
38 #include <linux/random.h>
39 #include <linux/cc_platform.h>
40
41 #include <trace/events/power.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/cpuhp.h>
44
45 #include "smpboot.h"
46
47 /**
48  * struct cpuhp_cpu_state - Per cpu hotplug state storage
49  * @state:      The current cpu state
50  * @target:     The target state
51  * @fail:       Current CPU hotplug callback state
52  * @thread:     Pointer to the hotplug thread
53  * @should_run: Thread should execute
54  * @rollback:   Perform a rollback
55  * @single:     Single callback invocation
56  * @bringup:    Single callback bringup or teardown selector
57  * @node:       Remote CPU node; for multi-instance, do a
58  *              single entry callback for install/remove
59  * @last:       For multi-instance rollback, remember how far we got
60  * @cb_state:   The state for a single callback (install/uninstall)
61  * @result:     Result of the operation
62  * @ap_sync_state:      State for AP synchronization
63  * @done_up:    Signal completion to the issuer of the task for cpu-up
64  * @done_down:  Signal completion to the issuer of the task for cpu-down
65  */
66 struct cpuhp_cpu_state {
67         enum cpuhp_state        state;
68         enum cpuhp_state        target;
69         enum cpuhp_state        fail;
70 #ifdef CONFIG_SMP
71         struct task_struct      *thread;
72         bool                    should_run;
73         bool                    rollback;
74         bool                    single;
75         bool                    bringup;
76         struct hlist_node       *node;
77         struct hlist_node       *last;
78         enum cpuhp_state        cb_state;
79         int                     result;
80         atomic_t                ap_sync_state;
81         struct completion       done_up;
82         struct completion       done_down;
83 #endif
84 };
85
86 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
87         .fail = CPUHP_INVALID,
88 };
89
90 #ifdef CONFIG_SMP
91 cpumask_t cpus_booted_once_mask;
92 #endif
93
94 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
95 static struct lockdep_map cpuhp_state_up_map =
96         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
97 static struct lockdep_map cpuhp_state_down_map =
98         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
99
100
101 static inline void cpuhp_lock_acquire(bool bringup)
102 {
103         lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
104 }
105
106 static inline void cpuhp_lock_release(bool bringup)
107 {
108         lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
109 }
110 #else
111
112 static inline void cpuhp_lock_acquire(bool bringup) { }
113 static inline void cpuhp_lock_release(bool bringup) { }
114
115 #endif
116
117 /**
118  * struct cpuhp_step - Hotplug state machine step
119  * @name:       Name of the step
120  * @startup:    Startup function of the step
121  * @teardown:   Teardown function of the step
122  * @cant_stop:  Bringup/teardown can't be stopped at this step
123  * @multi_instance:     State has multiple instances which get added afterwards
124  */
125 struct cpuhp_step {
126         const char              *name;
127         union {
128                 int             (*single)(unsigned int cpu);
129                 int             (*multi)(unsigned int cpu,
130                                          struct hlist_node *node);
131         } startup;
132         union {
133                 int             (*single)(unsigned int cpu);
134                 int             (*multi)(unsigned int cpu,
135                                          struct hlist_node *node);
136         } teardown;
137         /* private: */
138         struct hlist_head       list;
139         /* public: */
140         bool                    cant_stop;
141         bool                    multi_instance;
142 };
143
144 static DEFINE_MUTEX(cpuhp_state_mutex);
145 static struct cpuhp_step cpuhp_hp_states[];
146
147 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
148 {
149         return cpuhp_hp_states + state;
150 }
151
152 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
153 {
154         return bringup ? !step->startup.single : !step->teardown.single;
155 }
156
157 /**
158  * cpuhp_invoke_callback - Invoke the callbacks for a given state
159  * @cpu:        The cpu for which the callback should be invoked
160  * @state:      The state to do callbacks for
161  * @bringup:    True if the bringup callback should be invoked
162  * @node:       For multi-instance, do a single entry callback for install/remove
163  * @lastp:      For multi-instance rollback, remember how far we got
164  *
165  * Called from cpu hotplug and from the state register machinery.
166  *
167  * Return: %0 on success or a negative errno code
168  */
169 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
170                                  bool bringup, struct hlist_node *node,
171                                  struct hlist_node **lastp)
172 {
173         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174         struct cpuhp_step *step = cpuhp_get_step(state);
175         int (*cbm)(unsigned int cpu, struct hlist_node *node);
176         int (*cb)(unsigned int cpu);
177         int ret, cnt;
178
179         if (st->fail == state) {
180                 st->fail = CPUHP_INVALID;
181                 return -EAGAIN;
182         }
183
184         if (cpuhp_step_empty(bringup, step)) {
185                 WARN_ON_ONCE(1);
186                 return 0;
187         }
188
189         if (!step->multi_instance) {
190                 WARN_ON_ONCE(lastp && *lastp);
191                 cb = bringup ? step->startup.single : step->teardown.single;
192
193                 trace_cpuhp_enter(cpu, st->target, state, cb);
194                 ret = cb(cpu);
195                 trace_cpuhp_exit(cpu, st->state, state, ret);
196                 return ret;
197         }
198         cbm = bringup ? step->startup.multi : step->teardown.multi;
199
200         /* Single invocation for instance add/remove */
201         if (node) {
202                 WARN_ON_ONCE(lastp && *lastp);
203                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204                 ret = cbm(cpu, node);
205                 trace_cpuhp_exit(cpu, st->state, state, ret);
206                 return ret;
207         }
208
209         /* State transition. Invoke on all instances */
210         cnt = 0;
211         hlist_for_each(node, &step->list) {
212                 if (lastp && node == *lastp)
213                         break;
214
215                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216                 ret = cbm(cpu, node);
217                 trace_cpuhp_exit(cpu, st->state, state, ret);
218                 if (ret) {
219                         if (!lastp)
220                                 goto err;
221
222                         *lastp = node;
223                         return ret;
224                 }
225                 cnt++;
226         }
227         if (lastp)
228                 *lastp = NULL;
229         return 0;
230 err:
231         /* Rollback the instances if one failed */
232         cbm = !bringup ? step->startup.multi : step->teardown.multi;
233         if (!cbm)
234                 return ret;
235
236         hlist_for_each(node, &step->list) {
237                 if (!cnt--)
238                         break;
239
240                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
241                 ret = cbm(cpu, node);
242                 trace_cpuhp_exit(cpu, st->state, state, ret);
243                 /*
244                  * Rollback must not fail,
245                  */
246                 WARN_ON_ONCE(ret);
247         }
248         return ret;
249 }
250
251 #ifdef CONFIG_SMP
252 static bool cpuhp_is_ap_state(enum cpuhp_state state)
253 {
254         /*
255          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
256          * purposes as that state is handled explicitly in cpu_down.
257          */
258         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
259 }
260
261 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
262 {
263         struct completion *done = bringup ? &st->done_up : &st->done_down;
264         wait_for_completion(done);
265 }
266
267 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
268 {
269         struct completion *done = bringup ? &st->done_up : &st->done_down;
270         complete(done);
271 }
272
273 /*
274  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
275  */
276 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
277 {
278         return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
279 }
280
281 /* Synchronization state management */
282 enum cpuhp_sync_state {
283         SYNC_STATE_DEAD,
284         SYNC_STATE_KICKED,
285         SYNC_STATE_SHOULD_DIE,
286         SYNC_STATE_ALIVE,
287         SYNC_STATE_SHOULD_ONLINE,
288         SYNC_STATE_ONLINE,
289 };
290
291 #ifdef CONFIG_HOTPLUG_CORE_SYNC
292 /**
293  * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
294  * @state:      The synchronization state to set
295  *
296  * No synchronization point. Just update of the synchronization state, but implies
297  * a full barrier so that the AP changes are visible before the control CPU proceeds.
298  */
299 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
300 {
301         atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
302
303         (void)atomic_xchg(st, state);
304 }
305
306 void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
307
308 static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
309                                       enum cpuhp_sync_state next_state)
310 {
311         atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
312         ktime_t now, end, start = ktime_get();
313         int sync;
314
315         end = start + 10ULL * NSEC_PER_SEC;
316
317         sync = atomic_read(st);
318         while (1) {
319                 if (sync == state) {
320                         if (!atomic_try_cmpxchg(st, &sync, next_state))
321                                 continue;
322                         return true;
323                 }
324
325                 now = ktime_get();
326                 if (now > end) {
327                         /* Timeout. Leave the state unchanged */
328                         return false;
329                 } else if (now - start < NSEC_PER_MSEC) {
330                         /* Poll for one millisecond */
331                         arch_cpuhp_sync_state_poll();
332                 } else {
333                         usleep_range_state(USEC_PER_MSEC, 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE);
334                 }
335                 sync = atomic_read(st);
336         }
337         return true;
338 }
339 #else  /* CONFIG_HOTPLUG_CORE_SYNC */
340 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
341 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
342
343 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
344 /**
345  * cpuhp_ap_report_dead - Update synchronization state to DEAD
346  *
347  * No synchronization point. Just update of the synchronization state.
348  */
349 void cpuhp_ap_report_dead(void)
350 {
351         cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
352 }
353
354 void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
355
356 /*
357  * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
358  * because the AP cannot issue complete() at this stage.
359  */
360 static void cpuhp_bp_sync_dead(unsigned int cpu)
361 {
362         atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
363         int sync = atomic_read(st);
364
365         do {
366                 /* CPU can have reported dead already. Don't overwrite that! */
367                 if (sync == SYNC_STATE_DEAD)
368                         break;
369         } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
370
371         if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
372                 /* CPU reached dead state. Invoke the cleanup function */
373                 arch_cpuhp_cleanup_dead_cpu(cpu);
374                 return;
375         }
376
377         /* No further action possible. Emit message and give up. */
378         pr_err("CPU%u failed to report dead state\n", cpu);
379 }
380 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
381 static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
382 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
383
384 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
385 /**
386  * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
387  *
388  * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
389  * for the BP to release it.
390  */
391 void cpuhp_ap_sync_alive(void)
392 {
393         atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
394
395         cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
396
397         /* Wait for the control CPU to release it. */
398         while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
399                 cpu_relax();
400 }
401
402 static bool cpuhp_can_boot_ap(unsigned int cpu)
403 {
404         atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
405         int sync = atomic_read(st);
406
407 again:
408         switch (sync) {
409         case SYNC_STATE_DEAD:
410                 /* CPU is properly dead */
411                 break;
412         case SYNC_STATE_KICKED:
413                 /* CPU did not come up in previous attempt */
414                 break;
415         case SYNC_STATE_ALIVE:
416                 /* CPU is stuck cpuhp_ap_sync_alive(). */
417                 break;
418         default:
419                 /* CPU failed to report online or dead and is in limbo state. */
420                 return false;
421         }
422
423         /* Prepare for booting */
424         if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
425                 goto again;
426
427         return true;
428 }
429
430 void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
431
432 /*
433  * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
434  * because the AP cannot issue complete() so early in the bringup.
435  */
436 static int cpuhp_bp_sync_alive(unsigned int cpu)
437 {
438         int ret = 0;
439
440         if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
441                 return 0;
442
443         if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
444                 pr_err("CPU%u failed to report alive state\n", cpu);
445                 ret = -EIO;
446         }
447
448         /* Let the architecture cleanup the kick alive mechanics. */
449         arch_cpuhp_cleanup_kick_cpu(cpu);
450         return ret;
451 }
452 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
453 static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
454 static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
455 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
456
457 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458 static DEFINE_MUTEX(cpu_add_remove_lock);
459 bool cpuhp_tasks_frozen;
460 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
461
462 /*
463  * The following two APIs (cpu_maps_update_begin/done) must be used when
464  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
465  */
466 void cpu_maps_update_begin(void)
467 {
468         mutex_lock(&cpu_add_remove_lock);
469 }
470
471 void cpu_maps_update_done(void)
472 {
473         mutex_unlock(&cpu_add_remove_lock);
474 }
475
476 /*
477  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478  * Should always be manipulated under cpu_add_remove_lock
479  */
480 static int cpu_hotplug_disabled;
481
482 #ifdef CONFIG_HOTPLUG_CPU
483
484 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
485
486 void cpus_read_lock(void)
487 {
488         percpu_down_read(&cpu_hotplug_lock);
489 }
490 EXPORT_SYMBOL_GPL(cpus_read_lock);
491
492 int cpus_read_trylock(void)
493 {
494         return percpu_down_read_trylock(&cpu_hotplug_lock);
495 }
496 EXPORT_SYMBOL_GPL(cpus_read_trylock);
497
498 void cpus_read_unlock(void)
499 {
500         percpu_up_read(&cpu_hotplug_lock);
501 }
502 EXPORT_SYMBOL_GPL(cpus_read_unlock);
503
504 void cpus_write_lock(void)
505 {
506         percpu_down_write(&cpu_hotplug_lock);
507 }
508
509 void cpus_write_unlock(void)
510 {
511         percpu_up_write(&cpu_hotplug_lock);
512 }
513
514 void lockdep_assert_cpus_held(void)
515 {
516         /*
517          * We can't have hotplug operations before userspace starts running,
518          * and some init codepaths will knowingly not take the hotplug lock.
519          * This is all valid, so mute lockdep until it makes sense to report
520          * unheld locks.
521          */
522         if (system_state < SYSTEM_RUNNING)
523                 return;
524
525         percpu_rwsem_assert_held(&cpu_hotplug_lock);
526 }
527
528 #ifdef CONFIG_LOCKDEP
529 int lockdep_is_cpus_held(void)
530 {
531         return percpu_rwsem_is_held(&cpu_hotplug_lock);
532 }
533 #endif
534
535 static void lockdep_acquire_cpus_lock(void)
536 {
537         rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
538 }
539
540 static void lockdep_release_cpus_lock(void)
541 {
542         rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
543 }
544
545 /*
546  * Wait for currently running CPU hotplug operations to complete (if any) and
547  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
548  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
549  * hotplug path before performing hotplug operations. So acquiring that lock
550  * guarantees mutual exclusion from any currently running hotplug operations.
551  */
552 void cpu_hotplug_disable(void)
553 {
554         cpu_maps_update_begin();
555         cpu_hotplug_disabled++;
556         cpu_maps_update_done();
557 }
558 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
559
560 static void __cpu_hotplug_enable(void)
561 {
562         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
563                 return;
564         cpu_hotplug_disabled--;
565 }
566
567 void cpu_hotplug_enable(void)
568 {
569         cpu_maps_update_begin();
570         __cpu_hotplug_enable();
571         cpu_maps_update_done();
572 }
573 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
574
575 #else
576
577 static void lockdep_acquire_cpus_lock(void)
578 {
579 }
580
581 static void lockdep_release_cpus_lock(void)
582 {
583 }
584
585 #endif  /* CONFIG_HOTPLUG_CPU */
586
587 /*
588  * Architectures that need SMT-specific errata handling during SMT hotplug
589  * should override this.
590  */
591 void __weak arch_smt_update(void) { }
592
593 #ifdef CONFIG_HOTPLUG_SMT
594
595 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
596 static unsigned int cpu_smt_max_threads __ro_after_init;
597 unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
598
599 void __init cpu_smt_disable(bool force)
600 {
601         if (!cpu_smt_possible())
602                 return;
603
604         if (force) {
605                 pr_info("SMT: Force disabled\n");
606                 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
607         } else {
608                 pr_info("SMT: disabled\n");
609                 cpu_smt_control = CPU_SMT_DISABLED;
610         }
611         cpu_smt_num_threads = 1;
612 }
613
614 /*
615  * The decision whether SMT is supported can only be done after the full
616  * CPU identification. Called from architecture code.
617  */
618 void __init cpu_smt_set_num_threads(unsigned int num_threads,
619                                     unsigned int max_threads)
620 {
621         WARN_ON(!num_threads || (num_threads > max_threads));
622
623         if (max_threads == 1)
624                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
625
626         cpu_smt_max_threads = max_threads;
627
628         /*
629          * If SMT has been disabled via the kernel command line or SMT is
630          * not supported, set cpu_smt_num_threads to 1 for consistency.
631          * If enabled, take the architecture requested number of threads
632          * to bring up into account.
633          */
634         if (cpu_smt_control != CPU_SMT_ENABLED)
635                 cpu_smt_num_threads = 1;
636         else if (num_threads < cpu_smt_num_threads)
637                 cpu_smt_num_threads = num_threads;
638 }
639
640 static int __init smt_cmdline_disable(char *str)
641 {
642         cpu_smt_disable(str && !strcmp(str, "force"));
643         return 0;
644 }
645 early_param("nosmt", smt_cmdline_disable);
646
647 /*
648  * For Archicture supporting partial SMT states check if the thread is allowed.
649  * Otherwise this has already been checked through cpu_smt_max_threads when
650  * setting the SMT level.
651  */
652 static inline bool cpu_smt_thread_allowed(unsigned int cpu)
653 {
654 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
655         return topology_smt_thread_allowed(cpu);
656 #else
657         return true;
658 #endif
659 }
660
661 static inline bool cpu_bootable(unsigned int cpu)
662 {
663         if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
664                 return true;
665
666         /* All CPUs are bootable if controls are not configured */
667         if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
668                 return true;
669
670         /* All CPUs are bootable if CPU is not SMT capable */
671         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
672                 return true;
673
674         if (topology_is_primary_thread(cpu))
675                 return true;
676
677         /*
678          * On x86 it's required to boot all logical CPUs at least once so
679          * that the init code can get a chance to set CR4.MCE on each
680          * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
681          * core will shutdown the machine.
682          */
683         return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
684 }
685
686 /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
687 bool cpu_smt_possible(void)
688 {
689         return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
690                 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
691 }
692 EXPORT_SYMBOL_GPL(cpu_smt_possible);
693
694 #else
695 static inline bool cpu_bootable(unsigned int cpu) { return true; }
696 #endif
697
698 static inline enum cpuhp_state
699 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
700 {
701         enum cpuhp_state prev_state = st->state;
702         bool bringup = st->state < target;
703
704         st->rollback = false;
705         st->last = NULL;
706
707         st->target = target;
708         st->single = false;
709         st->bringup = bringup;
710         if (cpu_dying(cpu) != !bringup)
711                 set_cpu_dying(cpu, !bringup);
712
713         return prev_state;
714 }
715
716 static inline void
717 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
718                   enum cpuhp_state prev_state)
719 {
720         bool bringup = !st->bringup;
721
722         st->target = prev_state;
723
724         /*
725          * Already rolling back. No need invert the bringup value or to change
726          * the current state.
727          */
728         if (st->rollback)
729                 return;
730
731         st->rollback = true;
732
733         /*
734          * If we have st->last we need to undo partial multi_instance of this
735          * state first. Otherwise start undo at the previous state.
736          */
737         if (!st->last) {
738                 if (st->bringup)
739                         st->state--;
740                 else
741                         st->state++;
742         }
743
744         st->bringup = bringup;
745         if (cpu_dying(cpu) != !bringup)
746                 set_cpu_dying(cpu, !bringup);
747 }
748
749 /* Regular hotplug invocation of the AP hotplug thread */
750 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
751 {
752         if (!st->single && st->state == st->target)
753                 return;
754
755         st->result = 0;
756         /*
757          * Make sure the above stores are visible before should_run becomes
758          * true. Paired with the mb() above in cpuhp_thread_fun()
759          */
760         smp_mb();
761         st->should_run = true;
762         wake_up_process(st->thread);
763         wait_for_ap_thread(st, st->bringup);
764 }
765
766 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
767                          enum cpuhp_state target)
768 {
769         enum cpuhp_state prev_state;
770         int ret;
771
772         prev_state = cpuhp_set_state(cpu, st, target);
773         __cpuhp_kick_ap(st);
774         if ((ret = st->result)) {
775                 cpuhp_reset_state(cpu, st, prev_state);
776                 __cpuhp_kick_ap(st);
777         }
778
779         return ret;
780 }
781
782 static int bringup_wait_for_ap_online(unsigned int cpu)
783 {
784         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
785
786         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
787         wait_for_ap_thread(st, true);
788         if (WARN_ON_ONCE((!cpu_online(cpu))))
789                 return -ECANCELED;
790
791         /* Unpark the hotplug thread of the target cpu */
792         kthread_unpark(st->thread);
793
794         /*
795          * SMT soft disabling on X86 requires to bring the CPU out of the
796          * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
797          * CPU marked itself as booted_once in notify_cpu_starting() so the
798          * cpu_bootable() check will now return false if this is not the
799          * primary sibling.
800          */
801         if (!cpu_bootable(cpu))
802                 return -ECANCELED;
803         return 0;
804 }
805
806 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
807 static int cpuhp_kick_ap_alive(unsigned int cpu)
808 {
809         if (!cpuhp_can_boot_ap(cpu))
810                 return -EAGAIN;
811
812         return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
813 }
814
815 static int cpuhp_bringup_ap(unsigned int cpu)
816 {
817         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
818         int ret;
819
820         /*
821          * Some architectures have to walk the irq descriptors to
822          * setup the vector space for the cpu which comes online.
823          * Prevent irq alloc/free across the bringup.
824          */
825         irq_lock_sparse();
826
827         ret = cpuhp_bp_sync_alive(cpu);
828         if (ret)
829                 goto out_unlock;
830
831         ret = bringup_wait_for_ap_online(cpu);
832         if (ret)
833                 goto out_unlock;
834
835         irq_unlock_sparse();
836
837         if (st->target <= CPUHP_AP_ONLINE_IDLE)
838                 return 0;
839
840         return cpuhp_kick_ap(cpu, st, st->target);
841
842 out_unlock:
843         irq_unlock_sparse();
844         return ret;
845 }
846 #else
847 static int bringup_cpu(unsigned int cpu)
848 {
849         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
850         struct task_struct *idle = idle_thread_get(cpu);
851         int ret;
852
853         if (!cpuhp_can_boot_ap(cpu))
854                 return -EAGAIN;
855
856         /*
857          * Some architectures have to walk the irq descriptors to
858          * setup the vector space for the cpu which comes online.
859          *
860          * Prevent irq alloc/free across the bringup by acquiring the
861          * sparse irq lock. Hold it until the upcoming CPU completes the
862          * startup in cpuhp_online_idle() which allows to avoid
863          * intermediate synchronization points in the architecture code.
864          */
865         irq_lock_sparse();
866
867         ret = __cpu_up(cpu, idle);
868         if (ret)
869                 goto out_unlock;
870
871         ret = cpuhp_bp_sync_alive(cpu);
872         if (ret)
873                 goto out_unlock;
874
875         ret = bringup_wait_for_ap_online(cpu);
876         if (ret)
877                 goto out_unlock;
878
879         irq_unlock_sparse();
880
881         if (st->target <= CPUHP_AP_ONLINE_IDLE)
882                 return 0;
883
884         return cpuhp_kick_ap(cpu, st, st->target);
885
886 out_unlock:
887         irq_unlock_sparse();
888         return ret;
889 }
890 #endif
891
892 static int finish_cpu(unsigned int cpu)
893 {
894         struct task_struct *idle = idle_thread_get(cpu);
895         struct mm_struct *mm = idle->active_mm;
896
897         /*
898          * idle_task_exit() will have switched to &init_mm, now
899          * clean up any remaining active_mm state.
900          */
901         if (mm != &init_mm)
902                 idle->active_mm = &init_mm;
903         mmdrop_lazy_tlb(mm);
904         return 0;
905 }
906
907 /*
908  * Hotplug state machine related functions
909  */
910
911 /*
912  * Get the next state to run. Empty ones will be skipped. Returns true if a
913  * state must be run.
914  *
915  * st->state will be modified ahead of time, to match state_to_run, as if it
916  * has already ran.
917  */
918 static bool cpuhp_next_state(bool bringup,
919                              enum cpuhp_state *state_to_run,
920                              struct cpuhp_cpu_state *st,
921                              enum cpuhp_state target)
922 {
923         do {
924                 if (bringup) {
925                         if (st->state >= target)
926                                 return false;
927
928                         *state_to_run = ++st->state;
929                 } else {
930                         if (st->state <= target)
931                                 return false;
932
933                         *state_to_run = st->state--;
934                 }
935
936                 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
937                         break;
938         } while (true);
939
940         return true;
941 }
942
943 static int __cpuhp_invoke_callback_range(bool bringup,
944                                          unsigned int cpu,
945                                          struct cpuhp_cpu_state *st,
946                                          enum cpuhp_state target,
947                                          bool nofail)
948 {
949         enum cpuhp_state state;
950         int ret = 0;
951
952         while (cpuhp_next_state(bringup, &state, st, target)) {
953                 int err;
954
955                 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
956                 if (!err)
957                         continue;
958
959                 if (nofail) {
960                         pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
961                                 cpu, bringup ? "UP" : "DOWN",
962                                 cpuhp_get_step(st->state)->name,
963                                 st->state, err);
964                         ret = -1;
965                 } else {
966                         ret = err;
967                         break;
968                 }
969         }
970
971         return ret;
972 }
973
974 static inline int cpuhp_invoke_callback_range(bool bringup,
975                                               unsigned int cpu,
976                                               struct cpuhp_cpu_state *st,
977                                               enum cpuhp_state target)
978 {
979         return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
980 }
981
982 static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
983                                                       unsigned int cpu,
984                                                       struct cpuhp_cpu_state *st,
985                                                       enum cpuhp_state target)
986 {
987         __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
988 }
989
990 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
991 {
992         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
993                 return true;
994         /*
995          * When CPU hotplug is disabled, then taking the CPU down is not
996          * possible because takedown_cpu() and the architecture and
997          * subsystem specific mechanisms are not available. So the CPU
998          * which would be completely unplugged again needs to stay around
999          * in the current state.
1000          */
1001         return st->state <= CPUHP_BRINGUP_CPU;
1002 }
1003
1004 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1005                               enum cpuhp_state target)
1006 {
1007         enum cpuhp_state prev_state = st->state;
1008         int ret = 0;
1009
1010         ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1011         if (ret) {
1012                 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1013                          ret, cpu, cpuhp_get_step(st->state)->name,
1014                          st->state);
1015
1016                 cpuhp_reset_state(cpu, st, prev_state);
1017                 if (can_rollback_cpu(st))
1018                         WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1019                                                             prev_state));
1020         }
1021         return ret;
1022 }
1023
1024 /*
1025  * The cpu hotplug threads manage the bringup and teardown of the cpus
1026  */
1027 static int cpuhp_should_run(unsigned int cpu)
1028 {
1029         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1030
1031         return st->should_run;
1032 }
1033
1034 /*
1035  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1036  * callbacks when a state gets [un]installed at runtime.
1037  *
1038  * Each invocation of this function by the smpboot thread does a single AP
1039  * state callback.
1040  *
1041  * It has 3 modes of operation:
1042  *  - single: runs st->cb_state
1043  *  - up:     runs ++st->state, while st->state < st->target
1044  *  - down:   runs st->state--, while st->state > st->target
1045  *
1046  * When complete or on error, should_run is cleared and the completion is fired.
1047  */
1048 static void cpuhp_thread_fun(unsigned int cpu)
1049 {
1050         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1051         bool bringup = st->bringup;
1052         enum cpuhp_state state;
1053
1054         if (WARN_ON_ONCE(!st->should_run))
1055                 return;
1056
1057         /*
1058          * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1059          * that if we see ->should_run we also see the rest of the state.
1060          */
1061         smp_mb();
1062
1063         /*
1064          * The BP holds the hotplug lock, but we're now running on the AP,
1065          * ensure that anybody asserting the lock is held, will actually find
1066          * it so.
1067          */
1068         lockdep_acquire_cpus_lock();
1069         cpuhp_lock_acquire(bringup);
1070
1071         if (st->single) {
1072                 state = st->cb_state;
1073                 st->should_run = false;
1074         } else {
1075                 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1076                 if (!st->should_run)
1077                         goto end;
1078         }
1079
1080         WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1081
1082         if (cpuhp_is_atomic_state(state)) {
1083                 local_irq_disable();
1084                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1085                 local_irq_enable();
1086
1087                 /*
1088                  * STARTING/DYING must not fail!
1089                  */
1090                 WARN_ON_ONCE(st->result);
1091         } else {
1092                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1093         }
1094
1095         if (st->result) {
1096                 /*
1097                  * If we fail on a rollback, we're up a creek without no
1098                  * paddle, no way forward, no way back. We loose, thanks for
1099                  * playing.
1100                  */
1101                 WARN_ON_ONCE(st->rollback);
1102                 st->should_run = false;
1103         }
1104
1105 end:
1106         cpuhp_lock_release(bringup);
1107         lockdep_release_cpus_lock();
1108
1109         if (!st->should_run)
1110                 complete_ap_thread(st, bringup);
1111 }
1112
1113 /* Invoke a single callback on a remote cpu */
1114 static int
1115 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1116                          struct hlist_node *node)
1117 {
1118         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1119         int ret;
1120
1121         if (!cpu_online(cpu))
1122                 return 0;
1123
1124         cpuhp_lock_acquire(false);
1125         cpuhp_lock_release(false);
1126
1127         cpuhp_lock_acquire(true);
1128         cpuhp_lock_release(true);
1129
1130         /*
1131          * If we are up and running, use the hotplug thread. For early calls
1132          * we invoke the thread function directly.
1133          */
1134         if (!st->thread)
1135                 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1136
1137         st->rollback = false;
1138         st->last = NULL;
1139
1140         st->node = node;
1141         st->bringup = bringup;
1142         st->cb_state = state;
1143         st->single = true;
1144
1145         __cpuhp_kick_ap(st);
1146
1147         /*
1148          * If we failed and did a partial, do a rollback.
1149          */
1150         if ((ret = st->result) && st->last) {
1151                 st->rollback = true;
1152                 st->bringup = !bringup;
1153
1154                 __cpuhp_kick_ap(st);
1155         }
1156
1157         /*
1158          * Clean up the leftovers so the next hotplug operation wont use stale
1159          * data.
1160          */
1161         st->node = st->last = NULL;
1162         return ret;
1163 }
1164
1165 static int cpuhp_kick_ap_work(unsigned int cpu)
1166 {
1167         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1168         enum cpuhp_state prev_state = st->state;
1169         int ret;
1170
1171         cpuhp_lock_acquire(false);
1172         cpuhp_lock_release(false);
1173
1174         cpuhp_lock_acquire(true);
1175         cpuhp_lock_release(true);
1176
1177         trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1178         ret = cpuhp_kick_ap(cpu, st, st->target);
1179         trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1180
1181         return ret;
1182 }
1183
1184 static struct smp_hotplug_thread cpuhp_threads = {
1185         .store                  = &cpuhp_state.thread,
1186         .thread_should_run      = cpuhp_should_run,
1187         .thread_fn              = cpuhp_thread_fun,
1188         .thread_comm            = "cpuhp/%u",
1189         .selfparking            = true,
1190 };
1191
1192 static __init void cpuhp_init_state(void)
1193 {
1194         struct cpuhp_cpu_state *st;
1195         int cpu;
1196
1197         for_each_possible_cpu(cpu) {
1198                 st = per_cpu_ptr(&cpuhp_state, cpu);
1199                 init_completion(&st->done_up);
1200                 init_completion(&st->done_down);
1201         }
1202 }
1203
1204 void __init cpuhp_threads_init(void)
1205 {
1206         cpuhp_init_state();
1207         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1208         kthread_unpark(this_cpu_read(cpuhp_state.thread));
1209 }
1210
1211 #ifdef CONFIG_HOTPLUG_CPU
1212 #ifndef arch_clear_mm_cpumask_cpu
1213 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1214 #endif
1215
1216 /**
1217  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1218  * @cpu: a CPU id
1219  *
1220  * This function walks all processes, finds a valid mm struct for each one and
1221  * then clears a corresponding bit in mm's cpumask.  While this all sounds
1222  * trivial, there are various non-obvious corner cases, which this function
1223  * tries to solve in a safe manner.
1224  *
1225  * Also note that the function uses a somewhat relaxed locking scheme, so it may
1226  * be called only for an already offlined CPU.
1227  */
1228 void clear_tasks_mm_cpumask(int cpu)
1229 {
1230         struct task_struct *p;
1231
1232         /*
1233          * This function is called after the cpu is taken down and marked
1234          * offline, so its not like new tasks will ever get this cpu set in
1235          * their mm mask. -- Peter Zijlstra
1236          * Thus, we may use rcu_read_lock() here, instead of grabbing
1237          * full-fledged tasklist_lock.
1238          */
1239         WARN_ON(cpu_online(cpu));
1240         rcu_read_lock();
1241         for_each_process(p) {
1242                 struct task_struct *t;
1243
1244                 /*
1245                  * Main thread might exit, but other threads may still have
1246                  * a valid mm. Find one.
1247                  */
1248                 t = find_lock_task_mm(p);
1249                 if (!t)
1250                         continue;
1251                 arch_clear_mm_cpumask_cpu(cpu, t->mm);
1252                 task_unlock(t);
1253         }
1254         rcu_read_unlock();
1255 }
1256
1257 /* Take this CPU down. */
1258 static int take_cpu_down(void *_param)
1259 {
1260         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1261         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1262         int err, cpu = smp_processor_id();
1263
1264         /* Ensure this CPU doesn't handle any more interrupts. */
1265         err = __cpu_disable();
1266         if (err < 0)
1267                 return err;
1268
1269         /*
1270          * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1271          * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1272          */
1273         WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1274
1275         /*
1276          * Invoke the former CPU_DYING callbacks. DYING must not fail!
1277          */
1278         cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1279
1280         /* Park the stopper thread */
1281         stop_machine_park(cpu);
1282         return 0;
1283 }
1284
1285 static int takedown_cpu(unsigned int cpu)
1286 {
1287         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1288         int err;
1289
1290         /* Park the smpboot threads */
1291         kthread_park(st->thread);
1292
1293         /*
1294          * Prevent irq alloc/free while the dying cpu reorganizes the
1295          * interrupt affinities.
1296          */
1297         irq_lock_sparse();
1298
1299         /*
1300          * So now all preempt/rcu users must observe !cpu_active().
1301          */
1302         err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1303         if (err) {
1304                 /* CPU refused to die */
1305                 irq_unlock_sparse();
1306                 /* Unpark the hotplug thread so we can rollback there */
1307                 kthread_unpark(st->thread);
1308                 return err;
1309         }
1310         BUG_ON(cpu_online(cpu));
1311
1312         /*
1313          * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1314          * all runnable tasks from the CPU, there's only the idle task left now
1315          * that the migration thread is done doing the stop_machine thing.
1316          *
1317          * Wait for the stop thread to go away.
1318          */
1319         wait_for_ap_thread(st, false);
1320         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1321
1322         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1323         irq_unlock_sparse();
1324
1325         hotplug_cpu__broadcast_tick_pull(cpu);
1326         /* This actually kills the CPU. */
1327         __cpu_die(cpu);
1328
1329         cpuhp_bp_sync_dead(cpu);
1330
1331         tick_cleanup_dead_cpu(cpu);
1332
1333         /*
1334          * Callbacks must be re-integrated right away to the RCU state machine.
1335          * Otherwise an RCU callback could block a further teardown function
1336          * waiting for its completion.
1337          */
1338         rcutree_migrate_callbacks(cpu);
1339
1340         return 0;
1341 }
1342
1343 static void cpuhp_complete_idle_dead(void *arg)
1344 {
1345         struct cpuhp_cpu_state *st = arg;
1346
1347         complete_ap_thread(st, false);
1348 }
1349
1350 void cpuhp_report_idle_dead(void)
1351 {
1352         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1353
1354         BUG_ON(st->state != CPUHP_AP_OFFLINE);
1355         tick_assert_timekeeping_handover();
1356         rcutree_report_cpu_dead();
1357         st->state = CPUHP_AP_IDLE_DEAD;
1358         /*
1359          * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
1360          * to an online cpu.
1361          */
1362         smp_call_function_single(cpumask_first(cpu_online_mask),
1363                                  cpuhp_complete_idle_dead, st, 0);
1364 }
1365
1366 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1367                                 enum cpuhp_state target)
1368 {
1369         enum cpuhp_state prev_state = st->state;
1370         int ret = 0;
1371
1372         ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1373         if (ret) {
1374                 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1375                          ret, cpu, cpuhp_get_step(st->state)->name,
1376                          st->state);
1377
1378                 cpuhp_reset_state(cpu, st, prev_state);
1379
1380                 if (st->state < prev_state)
1381                         WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1382                                                             prev_state));
1383         }
1384
1385         return ret;
1386 }
1387
1388 /* Requires cpu_add_remove_lock to be held */
1389 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1390                            enum cpuhp_state target)
1391 {
1392         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1393         int prev_state, ret = 0;
1394
1395         if (num_online_cpus() == 1)
1396                 return -EBUSY;
1397
1398         if (!cpu_present(cpu))
1399                 return -EINVAL;
1400
1401         cpus_write_lock();
1402
1403         cpuhp_tasks_frozen = tasks_frozen;
1404
1405         prev_state = cpuhp_set_state(cpu, st, target);
1406         /*
1407          * If the current CPU state is in the range of the AP hotplug thread,
1408          * then we need to kick the thread.
1409          */
1410         if (st->state > CPUHP_TEARDOWN_CPU) {
1411                 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1412                 ret = cpuhp_kick_ap_work(cpu);
1413                 /*
1414                  * The AP side has done the error rollback already. Just
1415                  * return the error code..
1416                  */
1417                 if (ret)
1418                         goto out;
1419
1420                 /*
1421                  * We might have stopped still in the range of the AP hotplug
1422                  * thread. Nothing to do anymore.
1423                  */
1424                 if (st->state > CPUHP_TEARDOWN_CPU)
1425                         goto out;
1426
1427                 st->target = target;
1428         }
1429         /*
1430          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1431          * to do the further cleanups.
1432          */
1433         ret = cpuhp_down_callbacks(cpu, st, target);
1434         if (ret && st->state < prev_state) {
1435                 if (st->state == CPUHP_TEARDOWN_CPU) {
1436                         cpuhp_reset_state(cpu, st, prev_state);
1437                         __cpuhp_kick_ap(st);
1438                 } else {
1439                         WARN(1, "DEAD callback error for CPU%d", cpu);
1440                 }
1441         }
1442
1443 out:
1444         cpus_write_unlock();
1445         /*
1446          * Do post unplug cleanup. This is still protected against
1447          * concurrent CPU hotplug via cpu_add_remove_lock.
1448          */
1449         lockup_detector_cleanup();
1450         arch_smt_update();
1451         return ret;
1452 }
1453
1454 struct cpu_down_work {
1455         unsigned int            cpu;
1456         enum cpuhp_state        target;
1457 };
1458
1459 static long __cpu_down_maps_locked(void *arg)
1460 {
1461         struct cpu_down_work *work = arg;
1462
1463         return _cpu_down(work->cpu, 0, work->target);
1464 }
1465
1466 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1467 {
1468         struct cpu_down_work work = { .cpu = cpu, .target = target, };
1469
1470         /*
1471          * If the platform does not support hotplug, report it explicitly to
1472          * differentiate it from a transient offlining failure.
1473          */
1474         if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
1475                 return -EOPNOTSUPP;
1476         if (cpu_hotplug_disabled)
1477                 return -EBUSY;
1478
1479         /*
1480          * Ensure that the control task does not run on the to be offlined
1481          * CPU to prevent a deadlock against cfs_b->period_timer.
1482          * Also keep at least one housekeeping cpu onlined to avoid generating
1483          * an empty sched_domain span.
1484          */
1485         for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
1486                 if (cpu != work.cpu)
1487                         return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1488         }
1489         return -EBUSY;
1490 }
1491
1492 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1493 {
1494         int err;
1495
1496         cpu_maps_update_begin();
1497         err = cpu_down_maps_locked(cpu, target);
1498         cpu_maps_update_done();
1499         return err;
1500 }
1501
1502 /**
1503  * cpu_device_down - Bring down a cpu device
1504  * @dev: Pointer to the cpu device to offline
1505  *
1506  * This function is meant to be used by device core cpu subsystem only.
1507  *
1508  * Other subsystems should use remove_cpu() instead.
1509  *
1510  * Return: %0 on success or a negative errno code
1511  */
1512 int cpu_device_down(struct device *dev)
1513 {
1514         return cpu_down(dev->id, CPUHP_OFFLINE);
1515 }
1516
1517 int remove_cpu(unsigned int cpu)
1518 {
1519         int ret;
1520
1521         lock_device_hotplug();
1522         ret = device_offline(get_cpu_device(cpu));
1523         unlock_device_hotplug();
1524
1525         return ret;
1526 }
1527 EXPORT_SYMBOL_GPL(remove_cpu);
1528
1529 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1530 {
1531         unsigned int cpu;
1532         int error;
1533
1534         cpu_maps_update_begin();
1535
1536         /*
1537          * Make certain the cpu I'm about to reboot on is online.
1538          *
1539          * This is inline to what migrate_to_reboot_cpu() already do.
1540          */
1541         if (!cpu_online(primary_cpu))
1542                 primary_cpu = cpumask_first(cpu_online_mask);
1543
1544         for_each_online_cpu(cpu) {
1545                 if (cpu == primary_cpu)
1546                         continue;
1547
1548                 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1549                 if (error) {
1550                         pr_err("Failed to offline CPU%d - error=%d",
1551                                 cpu, error);
1552                         break;
1553                 }
1554         }
1555
1556         /*
1557          * Ensure all but the reboot CPU are offline.
1558          */
1559         BUG_ON(num_online_cpus() > 1);
1560
1561         /*
1562          * Make sure the CPUs won't be enabled by someone else after this
1563          * point. Kexec will reboot to a new kernel shortly resetting
1564          * everything along the way.
1565          */
1566         cpu_hotplug_disabled++;
1567
1568         cpu_maps_update_done();
1569 }
1570
1571 #else
1572 #define takedown_cpu            NULL
1573 #endif /*CONFIG_HOTPLUG_CPU*/
1574
1575 /**
1576  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1577  * @cpu: cpu that just started
1578  *
1579  * It must be called by the arch code on the new cpu, before the new cpu
1580  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1581  */
1582 void notify_cpu_starting(unsigned int cpu)
1583 {
1584         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1585         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1586
1587         rcutree_report_cpu_starting(cpu);       /* Enables RCU usage on this CPU. */
1588         cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1589
1590         /*
1591          * STARTING must not fail!
1592          */
1593         cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1594 }
1595
1596 /*
1597  * Called from the idle task. Wake up the controlling task which brings the
1598  * hotplug thread of the upcoming CPU up and then delegates the rest of the
1599  * online bringup to the hotplug thread.
1600  */
1601 void cpuhp_online_idle(enum cpuhp_state state)
1602 {
1603         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1604
1605         /* Happens for the boot cpu */
1606         if (state != CPUHP_AP_ONLINE_IDLE)
1607                 return;
1608
1609         cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
1610
1611         /*
1612          * Unpark the stopper thread before we start the idle loop (and start
1613          * scheduling); this ensures the stopper task is always available.
1614          */
1615         stop_machine_unpark(smp_processor_id());
1616
1617         st->state = CPUHP_AP_ONLINE_IDLE;
1618         complete_ap_thread(st, true);
1619 }
1620
1621 /* Requires cpu_add_remove_lock to be held */
1622 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1623 {
1624         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1625         struct task_struct *idle;
1626         int ret = 0;
1627
1628         cpus_write_lock();
1629
1630         if (!cpu_present(cpu)) {
1631                 ret = -EINVAL;
1632                 goto out;
1633         }
1634
1635         /*
1636          * The caller of cpu_up() might have raced with another
1637          * caller. Nothing to do.
1638          */
1639         if (st->state >= target)
1640                 goto out;
1641
1642         if (st->state == CPUHP_OFFLINE) {
1643                 /* Let it fail before we try to bring the cpu up */
1644                 idle = idle_thread_get(cpu);
1645                 if (IS_ERR(idle)) {
1646                         ret = PTR_ERR(idle);
1647                         goto out;
1648                 }
1649
1650                 /*
1651                  * Reset stale stack state from the last time this CPU was online.
1652                  */
1653                 scs_task_reset(idle);
1654                 kasan_unpoison_task_stack(idle);
1655         }
1656
1657         cpuhp_tasks_frozen = tasks_frozen;
1658
1659         cpuhp_set_state(cpu, st, target);
1660         /*
1661          * If the current CPU state is in the range of the AP hotplug thread,
1662          * then we need to kick the thread once more.
1663          */
1664         if (st->state > CPUHP_BRINGUP_CPU) {
1665                 ret = cpuhp_kick_ap_work(cpu);
1666                 /*
1667                  * The AP side has done the error rollback already. Just
1668                  * return the error code..
1669                  */
1670                 if (ret)
1671                         goto out;
1672         }
1673
1674         /*
1675          * Try to reach the target state. We max out on the BP at
1676          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1677          * responsible for bringing it up to the target state.
1678          */
1679         target = min((int)target, CPUHP_BRINGUP_CPU);
1680         ret = cpuhp_up_callbacks(cpu, st, target);
1681 out:
1682         cpus_write_unlock();
1683         arch_smt_update();
1684         return ret;
1685 }
1686
1687 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1688 {
1689         int err = 0;
1690
1691         if (!cpu_possible(cpu)) {
1692                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1693                        cpu);
1694                 return -EINVAL;
1695         }
1696
1697         err = try_online_node(cpu_to_node(cpu));
1698         if (err)
1699                 return err;
1700
1701         cpu_maps_update_begin();
1702
1703         if (cpu_hotplug_disabled) {
1704                 err = -EBUSY;
1705                 goto out;
1706         }
1707         if (!cpu_bootable(cpu)) {
1708                 err = -EPERM;
1709                 goto out;
1710         }
1711
1712         err = _cpu_up(cpu, 0, target);
1713 out:
1714         cpu_maps_update_done();
1715         return err;
1716 }
1717
1718 /**
1719  * cpu_device_up - Bring up a cpu device
1720  * @dev: Pointer to the cpu device to online
1721  *
1722  * This function is meant to be used by device core cpu subsystem only.
1723  *
1724  * Other subsystems should use add_cpu() instead.
1725  *
1726  * Return: %0 on success or a negative errno code
1727  */
1728 int cpu_device_up(struct device *dev)
1729 {
1730         return cpu_up(dev->id, CPUHP_ONLINE);
1731 }
1732
1733 int add_cpu(unsigned int cpu)
1734 {
1735         int ret;
1736
1737         lock_device_hotplug();
1738         ret = device_online(get_cpu_device(cpu));
1739         unlock_device_hotplug();
1740
1741         return ret;
1742 }
1743 EXPORT_SYMBOL_GPL(add_cpu);
1744
1745 /**
1746  * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1747  * @sleep_cpu: The cpu we hibernated on and should be brought up.
1748  *
1749  * On some architectures like arm64, we can hibernate on any CPU, but on
1750  * wake up the CPU we hibernated on might be offline as a side effect of
1751  * using maxcpus= for example.
1752  *
1753  * Return: %0 on success or a negative errno code
1754  */
1755 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1756 {
1757         int ret;
1758
1759         if (!cpu_online(sleep_cpu)) {
1760                 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1761                 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1762                 if (ret) {
1763                         pr_err("Failed to bring hibernate-CPU up!\n");
1764                         return ret;
1765                 }
1766         }
1767         return 0;
1768 }
1769
1770 static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
1771                                       enum cpuhp_state target)
1772 {
1773         unsigned int cpu;
1774
1775         for_each_cpu(cpu, mask) {
1776                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1777
1778                 if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
1779                         /*
1780                          * If this failed then cpu_up() might have only
1781                          * rolled back to CPUHP_BP_KICK_AP for the final
1782                          * online. Clean it up. NOOP if already rolled back.
1783                          */
1784                         WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1785                 }
1786
1787                 if (!--ncpus)
1788                         break;
1789         }
1790 }
1791
1792 #ifdef CONFIG_HOTPLUG_PARALLEL
1793 static bool __cpuhp_parallel_bringup __ro_after_init = true;
1794
1795 static int __init parallel_bringup_parse_param(char *arg)
1796 {
1797         return kstrtobool(arg, &__cpuhp_parallel_bringup);
1798 }
1799 early_param("cpuhp.parallel", parallel_bringup_parse_param);
1800
1801 static inline bool cpuhp_smt_aware(void)
1802 {
1803         return cpu_smt_max_threads > 1;
1804 }
1805
1806 static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1807 {
1808         return cpu_primary_thread_mask;
1809 }
1810
1811 /*
1812  * On architectures which have enabled parallel bringup this invokes all BP
1813  * prepare states for each of the to be onlined APs first. The last state
1814  * sends the startup IPI to the APs. The APs proceed through the low level
1815  * bringup code in parallel and then wait for the control CPU to release
1816  * them one by one for the final onlining procedure.
1817  *
1818  * This avoids waiting for each AP to respond to the startup IPI in
1819  * CPUHP_BRINGUP_CPU.
1820  */
1821 static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
1822 {
1823         const struct cpumask *mask = cpu_present_mask;
1824
1825         if (__cpuhp_parallel_bringup)
1826                 __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
1827         if (!__cpuhp_parallel_bringup)
1828                 return false;
1829
1830         if (cpuhp_smt_aware()) {
1831                 const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
1832                 static struct cpumask tmp_mask __initdata;
1833
1834                 /*
1835                  * X86 requires to prevent that SMT siblings stopped while
1836                  * the primary thread does a microcode update for various
1837                  * reasons. Bring the primary threads up first.
1838                  */
1839                 cpumask_and(&tmp_mask, mask, pmask);
1840                 cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
1841                 cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
1842                 /* Account for the online CPUs */
1843                 ncpus -= num_online_cpus();
1844                 if (!ncpus)
1845                         return true;
1846                 /* Create the mask for secondary CPUs */
1847                 cpumask_andnot(&tmp_mask, mask, pmask);
1848                 mask = &tmp_mask;
1849         }
1850
1851         /* Bring the not-yet started CPUs up */
1852         cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
1853         cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
1854         return true;
1855 }
1856 #else
1857 static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
1858 #endif /* CONFIG_HOTPLUG_PARALLEL */
1859
1860 void __init bringup_nonboot_cpus(unsigned int max_cpus)
1861 {
1862         if (!max_cpus)
1863                 return;
1864
1865         /* Try parallel bringup optimization if enabled */
1866         if (cpuhp_bringup_cpus_parallel(max_cpus))
1867                 return;
1868
1869         /* Full per CPU serialized bringup */
1870         cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
1871 }
1872
1873 #ifdef CONFIG_PM_SLEEP_SMP
1874 static cpumask_var_t frozen_cpus;
1875
1876 int freeze_secondary_cpus(int primary)
1877 {
1878         int cpu, error = 0;
1879
1880         cpu_maps_update_begin();
1881         if (primary == -1) {
1882                 primary = cpumask_first(cpu_online_mask);
1883                 if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1884                         primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1885         } else {
1886                 if (!cpu_online(primary))
1887                         primary = cpumask_first(cpu_online_mask);
1888         }
1889
1890         /*
1891          * We take down all of the non-boot CPUs in one shot to avoid races
1892          * with the userspace trying to use the CPU hotplug at the same time
1893          */
1894         cpumask_clear(frozen_cpus);
1895
1896         pr_info("Disabling non-boot CPUs ...\n");
1897         for_each_online_cpu(cpu) {
1898                 if (cpu == primary)
1899                         continue;
1900
1901                 if (pm_wakeup_pending()) {
1902                         pr_info("Wakeup pending. Abort CPU freeze\n");
1903                         error = -EBUSY;
1904                         break;
1905                 }
1906
1907                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1908                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1909                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1910                 if (!error)
1911                         cpumask_set_cpu(cpu, frozen_cpus);
1912                 else {
1913                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1914                         break;
1915                 }
1916         }
1917
1918         if (!error)
1919                 BUG_ON(num_online_cpus() > 1);
1920         else
1921                 pr_err("Non-boot CPUs are not disabled\n");
1922
1923         /*
1924          * Make sure the CPUs won't be enabled by someone else. We need to do
1925          * this even in case of failure as all freeze_secondary_cpus() users are
1926          * supposed to do thaw_secondary_cpus() on the failure path.
1927          */
1928         cpu_hotplug_disabled++;
1929
1930         cpu_maps_update_done();
1931         return error;
1932 }
1933
1934 void __weak arch_thaw_secondary_cpus_begin(void)
1935 {
1936 }
1937
1938 void __weak arch_thaw_secondary_cpus_end(void)
1939 {
1940 }
1941
1942 void thaw_secondary_cpus(void)
1943 {
1944         int cpu, error;
1945
1946         /* Allow everyone to use the CPU hotplug again */
1947         cpu_maps_update_begin();
1948         __cpu_hotplug_enable();
1949         if (cpumask_empty(frozen_cpus))
1950                 goto out;
1951
1952         pr_info("Enabling non-boot CPUs ...\n");
1953
1954         arch_thaw_secondary_cpus_begin();
1955
1956         for_each_cpu(cpu, frozen_cpus) {
1957                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1958                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1959                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1960                 if (!error) {
1961                         pr_info("CPU%d is up\n", cpu);
1962                         continue;
1963                 }
1964                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1965         }
1966
1967         arch_thaw_secondary_cpus_end();
1968
1969         cpumask_clear(frozen_cpus);
1970 out:
1971         cpu_maps_update_done();
1972 }
1973
1974 static int __init alloc_frozen_cpus(void)
1975 {
1976         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1977                 return -ENOMEM;
1978         return 0;
1979 }
1980 core_initcall(alloc_frozen_cpus);
1981
1982 /*
1983  * When callbacks for CPU hotplug notifications are being executed, we must
1984  * ensure that the state of the system with respect to the tasks being frozen
1985  * or not, as reported by the notification, remains unchanged *throughout the
1986  * duration* of the execution of the callbacks.
1987  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1988  *
1989  * This synchronization is implemented by mutually excluding regular CPU
1990  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1991  * Hibernate notifications.
1992  */
1993 static int
1994 cpu_hotplug_pm_callback(struct notifier_block *nb,
1995                         unsigned long action, void *ptr)
1996 {
1997         switch (action) {
1998
1999         case PM_SUSPEND_PREPARE:
2000         case PM_HIBERNATION_PREPARE:
2001                 cpu_hotplug_disable();
2002                 break;
2003
2004         case PM_POST_SUSPEND:
2005         case PM_POST_HIBERNATION:
2006                 cpu_hotplug_enable();
2007                 break;
2008
2009         default:
2010                 return NOTIFY_DONE;
2011         }
2012
2013         return NOTIFY_OK;
2014 }
2015
2016
2017 static int __init cpu_hotplug_pm_sync_init(void)
2018 {
2019         /*
2020          * cpu_hotplug_pm_callback has higher priority than x86
2021          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2022          * to disable cpu hotplug to avoid cpu hotplug race.
2023          */
2024         pm_notifier(cpu_hotplug_pm_callback, 0);
2025         return 0;
2026 }
2027 core_initcall(cpu_hotplug_pm_sync_init);
2028
2029 #endif /* CONFIG_PM_SLEEP_SMP */
2030
2031 int __boot_cpu_id;
2032
2033 #endif /* CONFIG_SMP */
2034
2035 /* Boot processor state steps */
2036 static struct cpuhp_step cpuhp_hp_states[] = {
2037         [CPUHP_OFFLINE] = {
2038                 .name                   = "offline",
2039                 .startup.single         = NULL,
2040                 .teardown.single        = NULL,
2041         },
2042 #ifdef CONFIG_SMP
2043         [CPUHP_CREATE_THREADS]= {
2044                 .name                   = "threads:prepare",
2045                 .startup.single         = smpboot_create_threads,
2046                 .teardown.single        = NULL,
2047                 .cant_stop              = true,
2048         },
2049         [CPUHP_PERF_PREPARE] = {
2050                 .name                   = "perf:prepare",
2051                 .startup.single         = perf_event_init_cpu,
2052                 .teardown.single        = perf_event_exit_cpu,
2053         },
2054         [CPUHP_RANDOM_PREPARE] = {
2055                 .name                   = "random:prepare",
2056                 .startup.single         = random_prepare_cpu,
2057                 .teardown.single        = NULL,
2058         },
2059         [CPUHP_WORKQUEUE_PREP] = {
2060                 .name                   = "workqueue:prepare",
2061                 .startup.single         = workqueue_prepare_cpu,
2062                 .teardown.single        = NULL,
2063         },
2064         [CPUHP_HRTIMERS_PREPARE] = {
2065                 .name                   = "hrtimers:prepare",
2066                 .startup.single         = hrtimers_prepare_cpu,
2067                 .teardown.single        = NULL,
2068         },
2069         [CPUHP_SMPCFD_PREPARE] = {
2070                 .name                   = "smpcfd:prepare",
2071                 .startup.single         = smpcfd_prepare_cpu,
2072                 .teardown.single        = smpcfd_dead_cpu,
2073         },
2074         [CPUHP_RELAY_PREPARE] = {
2075                 .name                   = "relay:prepare",
2076                 .startup.single         = relay_prepare_cpu,
2077                 .teardown.single        = NULL,
2078         },
2079         [CPUHP_RCUTREE_PREP] = {
2080                 .name                   = "RCU/tree:prepare",
2081                 .startup.single         = rcutree_prepare_cpu,
2082                 .teardown.single        = rcutree_dead_cpu,
2083         },
2084         /*
2085          * On the tear-down path, timers_dead_cpu() must be invoked
2086          * before blk_mq_queue_reinit_notify() from notify_dead(),
2087          * otherwise a RCU stall occurs.
2088          */
2089         [CPUHP_TIMERS_PREPARE] = {
2090                 .name                   = "timers:prepare",
2091                 .startup.single         = timers_prepare_cpu,
2092                 .teardown.single        = timers_dead_cpu,
2093         },
2094
2095 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2096         /*
2097          * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2098          * the next step will release it.
2099          */
2100         [CPUHP_BP_KICK_AP] = {
2101                 .name                   = "cpu:kick_ap",
2102                 .startup.single         = cpuhp_kick_ap_alive,
2103         },
2104
2105         /*
2106          * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2107          * releases it for the complete bringup.
2108          */
2109         [CPUHP_BRINGUP_CPU] = {
2110                 .name                   = "cpu:bringup",
2111                 .startup.single         = cpuhp_bringup_ap,
2112                 .teardown.single        = finish_cpu,
2113                 .cant_stop              = true,
2114         },
2115 #else
2116         /*
2117          * All-in-one CPU bringup state which includes the kick alive.
2118          */
2119         [CPUHP_BRINGUP_CPU] = {
2120                 .name                   = "cpu:bringup",
2121                 .startup.single         = bringup_cpu,
2122                 .teardown.single        = finish_cpu,
2123                 .cant_stop              = true,
2124         },
2125 #endif
2126         /* Final state before CPU kills itself */
2127         [CPUHP_AP_IDLE_DEAD] = {
2128                 .name                   = "idle:dead",
2129         },
2130         /*
2131          * Last state before CPU enters the idle loop to die. Transient state
2132          * for synchronization.
2133          */
2134         [CPUHP_AP_OFFLINE] = {
2135                 .name                   = "ap:offline",
2136                 .cant_stop              = true,
2137         },
2138         /* First state is scheduler control. Interrupts are disabled */
2139         [CPUHP_AP_SCHED_STARTING] = {
2140                 .name                   = "sched:starting",
2141                 .startup.single         = sched_cpu_starting,
2142                 .teardown.single        = sched_cpu_dying,
2143         },
2144         [CPUHP_AP_RCUTREE_DYING] = {
2145                 .name                   = "RCU/tree:dying",
2146                 .startup.single         = NULL,
2147                 .teardown.single        = rcutree_dying_cpu,
2148         },
2149         [CPUHP_AP_SMPCFD_DYING] = {
2150                 .name                   = "smpcfd:dying",
2151                 .startup.single         = NULL,
2152                 .teardown.single        = smpcfd_dying_cpu,
2153         },
2154         [CPUHP_AP_HRTIMERS_DYING] = {
2155                 .name                   = "hrtimers:dying",
2156                 .startup.single         = NULL,
2157                 .teardown.single        = hrtimers_cpu_dying,
2158         },
2159         [CPUHP_AP_TICK_DYING] = {
2160                 .name                   = "tick:dying",
2161                 .startup.single         = NULL,
2162                 .teardown.single        = tick_cpu_dying,
2163         },
2164         /* Entry state on starting. Interrupts enabled from here on. Transient
2165          * state for synchronsization */
2166         [CPUHP_AP_ONLINE] = {
2167                 .name                   = "ap:online",
2168         },
2169         /*
2170          * Handled on control processor until the plugged processor manages
2171          * this itself.
2172          */
2173         [CPUHP_TEARDOWN_CPU] = {
2174                 .name                   = "cpu:teardown",
2175                 .startup.single         = NULL,
2176                 .teardown.single        = takedown_cpu,
2177                 .cant_stop              = true,
2178         },
2179
2180         [CPUHP_AP_SCHED_WAIT_EMPTY] = {
2181                 .name                   = "sched:waitempty",
2182                 .startup.single         = NULL,
2183                 .teardown.single        = sched_cpu_wait_empty,
2184         },
2185
2186         /* Handle smpboot threads park/unpark */
2187         [CPUHP_AP_SMPBOOT_THREADS] = {
2188                 .name                   = "smpboot/threads:online",
2189                 .startup.single         = smpboot_unpark_threads,
2190                 .teardown.single        = smpboot_park_threads,
2191         },
2192         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2193                 .name                   = "irq/affinity:online",
2194                 .startup.single         = irq_affinity_online_cpu,
2195                 .teardown.single        = NULL,
2196         },
2197         [CPUHP_AP_PERF_ONLINE] = {
2198                 .name                   = "perf:online",
2199                 .startup.single         = perf_event_init_cpu,
2200                 .teardown.single        = perf_event_exit_cpu,
2201         },
2202         [CPUHP_AP_WATCHDOG_ONLINE] = {
2203                 .name                   = "lockup_detector:online",
2204                 .startup.single         = lockup_detector_online_cpu,
2205                 .teardown.single        = lockup_detector_offline_cpu,
2206         },
2207         [CPUHP_AP_WORKQUEUE_ONLINE] = {
2208                 .name                   = "workqueue:online",
2209                 .startup.single         = workqueue_online_cpu,
2210                 .teardown.single        = workqueue_offline_cpu,
2211         },
2212         [CPUHP_AP_RANDOM_ONLINE] = {
2213                 .name                   = "random:online",
2214                 .startup.single         = random_online_cpu,
2215                 .teardown.single        = NULL,
2216         },
2217         [CPUHP_AP_RCUTREE_ONLINE] = {
2218                 .name                   = "RCU/tree:online",
2219                 .startup.single         = rcutree_online_cpu,
2220                 .teardown.single        = rcutree_offline_cpu,
2221         },
2222 #endif
2223         /*
2224          * The dynamically registered state space is here
2225          */
2226
2227 #ifdef CONFIG_SMP
2228         /* Last state is scheduler control setting the cpu active */
2229         [CPUHP_AP_ACTIVE] = {
2230                 .name                   = "sched:active",
2231                 .startup.single         = sched_cpu_activate,
2232                 .teardown.single        = sched_cpu_deactivate,
2233         },
2234 #endif
2235
2236         /* CPU is fully up and running. */
2237         [CPUHP_ONLINE] = {
2238                 .name                   = "online",
2239                 .startup.single         = NULL,
2240                 .teardown.single        = NULL,
2241         },
2242 };
2243
2244 /* Sanity check for callbacks */
2245 static int cpuhp_cb_check(enum cpuhp_state state)
2246 {
2247         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2248                 return -EINVAL;
2249         return 0;
2250 }
2251
2252 /*
2253  * Returns a free for dynamic slot assignment of the Online state. The states
2254  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2255  * by having no name assigned.
2256  */
2257 static int cpuhp_reserve_state(enum cpuhp_state state)
2258 {
2259         enum cpuhp_state i, end;
2260         struct cpuhp_step *step;
2261
2262         switch (state) {
2263         case CPUHP_AP_ONLINE_DYN:
2264                 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2265                 end = CPUHP_AP_ONLINE_DYN_END;
2266                 break;
2267         case CPUHP_BP_PREPARE_DYN:
2268                 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2269                 end = CPUHP_BP_PREPARE_DYN_END;
2270                 break;
2271         default:
2272                 return -EINVAL;
2273         }
2274
2275         for (i = state; i <= end; i++, step++) {
2276                 if (!step->name)
2277                         return i;
2278         }
2279         WARN(1, "No more dynamic states available for CPU hotplug\n");
2280         return -ENOSPC;
2281 }
2282
2283 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2284                                  int (*startup)(unsigned int cpu),
2285                                  int (*teardown)(unsigned int cpu),
2286                                  bool multi_instance)
2287 {
2288         /* (Un)Install the callbacks for further cpu hotplug operations */
2289         struct cpuhp_step *sp;
2290         int ret = 0;
2291
2292         /*
2293          * If name is NULL, then the state gets removed.
2294          *
2295          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2296          * the first allocation from these dynamic ranges, so the removal
2297          * would trigger a new allocation and clear the wrong (already
2298          * empty) state, leaving the callbacks of the to be cleared state
2299          * dangling, which causes wreckage on the next hotplug operation.
2300          */
2301         if (name && (state == CPUHP_AP_ONLINE_DYN ||
2302                      state == CPUHP_BP_PREPARE_DYN)) {
2303                 ret = cpuhp_reserve_state(state);
2304                 if (ret < 0)
2305                         return ret;
2306                 state = ret;
2307         }
2308         sp = cpuhp_get_step(state);
2309         if (name && sp->name)
2310                 return -EBUSY;
2311
2312         sp->startup.single = startup;
2313         sp->teardown.single = teardown;
2314         sp->name = name;
2315         sp->multi_instance = multi_instance;
2316         INIT_HLIST_HEAD(&sp->list);
2317         return ret;
2318 }
2319
2320 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2321 {
2322         return cpuhp_get_step(state)->teardown.single;
2323 }
2324
2325 /*
2326  * Call the startup/teardown function for a step either on the AP or
2327  * on the current CPU.
2328  */
2329 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2330                             struct hlist_node *node)
2331 {
2332         struct cpuhp_step *sp = cpuhp_get_step(state);
2333         int ret;
2334
2335         /*
2336          * If there's nothing to do, we done.
2337          * Relies on the union for multi_instance.
2338          */
2339         if (cpuhp_step_empty(bringup, sp))
2340                 return 0;
2341         /*
2342          * The non AP bound callbacks can fail on bringup. On teardown
2343          * e.g. module removal we crash for now.
2344          */
2345 #ifdef CONFIG_SMP
2346         if (cpuhp_is_ap_state(state))
2347                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2348         else
2349                 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2350 #else
2351         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2352 #endif
2353         BUG_ON(ret && !bringup);
2354         return ret;
2355 }
2356
2357 /*
2358  * Called from __cpuhp_setup_state on a recoverable failure.
2359  *
2360  * Note: The teardown callbacks for rollback are not allowed to fail!
2361  */
2362 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2363                                    struct hlist_node *node)
2364 {
2365         int cpu;
2366
2367         /* Roll back the already executed steps on the other cpus */
2368         for_each_present_cpu(cpu) {
2369                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2370                 int cpustate = st->state;
2371
2372                 if (cpu >= failedcpu)
2373                         break;
2374
2375                 /* Did we invoke the startup call on that cpu ? */
2376                 if (cpustate >= state)
2377                         cpuhp_issue_call(cpu, state, false, node);
2378         }
2379 }
2380
2381 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2382                                           struct hlist_node *node,
2383                                           bool invoke)
2384 {
2385         struct cpuhp_step *sp;
2386         int cpu;
2387         int ret;
2388
2389         lockdep_assert_cpus_held();
2390
2391         sp = cpuhp_get_step(state);
2392         if (sp->multi_instance == false)
2393                 return -EINVAL;
2394
2395         mutex_lock(&cpuhp_state_mutex);
2396
2397         if (!invoke || !sp->startup.multi)
2398                 goto add_node;
2399
2400         /*
2401          * Try to call the startup callback for each present cpu
2402          * depending on the hotplug state of the cpu.
2403          */
2404         for_each_present_cpu(cpu) {
2405                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2406                 int cpustate = st->state;
2407
2408                 if (cpustate < state)
2409                         continue;
2410
2411                 ret = cpuhp_issue_call(cpu, state, true, node);
2412                 if (ret) {
2413                         if (sp->teardown.multi)
2414                                 cpuhp_rollback_install(cpu, state, node);
2415                         goto unlock;
2416                 }
2417         }
2418 add_node:
2419         ret = 0;
2420         hlist_add_head(node, &sp->list);
2421 unlock:
2422         mutex_unlock(&cpuhp_state_mutex);
2423         return ret;
2424 }
2425
2426 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2427                                bool invoke)
2428 {
2429         int ret;
2430
2431         cpus_read_lock();
2432         ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2433         cpus_read_unlock();
2434         return ret;
2435 }
2436 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2437
2438 /**
2439  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2440  * @state:              The state to setup
2441  * @name:               Name of the step
2442  * @invoke:             If true, the startup function is invoked for cpus where
2443  *                      cpu state >= @state
2444  * @startup:            startup callback function
2445  * @teardown:           teardown callback function
2446  * @multi_instance:     State is set up for multiple instances which get
2447  *                      added afterwards.
2448  *
2449  * The caller needs to hold cpus read locked while calling this function.
2450  * Return:
2451  *   On success:
2452  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
2453  *      0 for all other states
2454  *   On failure: proper (negative) error code
2455  */
2456 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2457                                    const char *name, bool invoke,
2458                                    int (*startup)(unsigned int cpu),
2459                                    int (*teardown)(unsigned int cpu),
2460                                    bool multi_instance)
2461 {
2462         int cpu, ret = 0;
2463         bool dynstate;
2464
2465         lockdep_assert_cpus_held();
2466
2467         if (cpuhp_cb_check(state) || !name)
2468                 return -EINVAL;
2469
2470         mutex_lock(&cpuhp_state_mutex);
2471
2472         ret = cpuhp_store_callbacks(state, name, startup, teardown,
2473                                     multi_instance);
2474
2475         dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
2476         if (ret > 0 && dynstate) {
2477                 state = ret;
2478                 ret = 0;
2479         }
2480
2481         if (ret || !invoke || !startup)
2482                 goto out;
2483
2484         /*
2485          * Try to call the startup callback for each present cpu
2486          * depending on the hotplug state of the cpu.
2487          */
2488         for_each_present_cpu(cpu) {
2489                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2490                 int cpustate = st->state;
2491
2492                 if (cpustate < state)
2493                         continue;
2494
2495                 ret = cpuhp_issue_call(cpu, state, true, NULL);
2496                 if (ret) {
2497                         if (teardown)
2498                                 cpuhp_rollback_install(cpu, state, NULL);
2499                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2500                         goto out;
2501                 }
2502         }
2503 out:
2504         mutex_unlock(&cpuhp_state_mutex);
2505         /*
2506          * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
2507          * return the dynamically allocated state in case of success.
2508          */
2509         if (!ret && dynstate)
2510                 return state;
2511         return ret;
2512 }
2513 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2514
2515 int __cpuhp_setup_state(enum cpuhp_state state,
2516                         const char *name, bool invoke,
2517                         int (*startup)(unsigned int cpu),
2518                         int (*teardown)(unsigned int cpu),
2519                         bool multi_instance)
2520 {
2521         int ret;
2522
2523         cpus_read_lock();
2524         ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2525                                              teardown, multi_instance);
2526         cpus_read_unlock();
2527         return ret;
2528 }
2529 EXPORT_SYMBOL(__cpuhp_setup_state);
2530
2531 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2532                                   struct hlist_node *node, bool invoke)
2533 {
2534         struct cpuhp_step *sp = cpuhp_get_step(state);
2535         int cpu;
2536
2537         BUG_ON(cpuhp_cb_check(state));
2538
2539         if (!sp->multi_instance)
2540                 return -EINVAL;
2541
2542         cpus_read_lock();
2543         mutex_lock(&cpuhp_state_mutex);
2544
2545         if (!invoke || !cpuhp_get_teardown_cb(state))
2546                 goto remove;
2547         /*
2548          * Call the teardown callback for each present cpu depending
2549          * on the hotplug state of the cpu. This function is not
2550          * allowed to fail currently!
2551          */
2552         for_each_present_cpu(cpu) {
2553                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2554                 int cpustate = st->state;
2555
2556                 if (cpustate >= state)
2557                         cpuhp_issue_call(cpu, state, false, node);
2558         }
2559
2560 remove:
2561         hlist_del(node);
2562         mutex_unlock(&cpuhp_state_mutex);
2563         cpus_read_unlock();
2564
2565         return 0;
2566 }
2567 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2568
2569 /**
2570  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2571  * @state:      The state to remove
2572  * @invoke:     If true, the teardown function is invoked for cpus where
2573  *              cpu state >= @state
2574  *
2575  * The caller needs to hold cpus read locked while calling this function.
2576  * The teardown callback is currently not allowed to fail. Think
2577  * about module removal!
2578  */
2579 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2580 {
2581         struct cpuhp_step *sp = cpuhp_get_step(state);
2582         int cpu;
2583
2584         BUG_ON(cpuhp_cb_check(state));
2585
2586         lockdep_assert_cpus_held();
2587
2588         mutex_lock(&cpuhp_state_mutex);
2589         if (sp->multi_instance) {
2590                 WARN(!hlist_empty(&sp->list),
2591                      "Error: Removing state %d which has instances left.\n",
2592                      state);
2593                 goto remove;
2594         }
2595
2596         if (!invoke || !cpuhp_get_teardown_cb(state))
2597                 goto remove;
2598
2599         /*
2600          * Call the teardown callback for each present cpu depending
2601          * on the hotplug state of the cpu. This function is not
2602          * allowed to fail currently!
2603          */
2604         for_each_present_cpu(cpu) {
2605                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2606                 int cpustate = st->state;
2607
2608                 if (cpustate >= state)
2609                         cpuhp_issue_call(cpu, state, false, NULL);
2610         }
2611 remove:
2612         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2613         mutex_unlock(&cpuhp_state_mutex);
2614 }
2615 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2616
2617 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2618 {
2619         cpus_read_lock();
2620         __cpuhp_remove_state_cpuslocked(state, invoke);
2621         cpus_read_unlock();
2622 }
2623 EXPORT_SYMBOL(__cpuhp_remove_state);
2624
2625 #ifdef CONFIG_HOTPLUG_SMT
2626 static void cpuhp_offline_cpu_device(unsigned int cpu)
2627 {
2628         struct device *dev = get_cpu_device(cpu);
2629
2630         dev->offline = true;
2631         /* Tell user space about the state change */
2632         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2633 }
2634
2635 static void cpuhp_online_cpu_device(unsigned int cpu)
2636 {
2637         struct device *dev = get_cpu_device(cpu);
2638
2639         dev->offline = false;
2640         /* Tell user space about the state change */
2641         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2642 }
2643
2644 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2645 {
2646         int cpu, ret = 0;
2647
2648         cpu_maps_update_begin();
2649         for_each_online_cpu(cpu) {
2650                 if (topology_is_primary_thread(cpu))
2651                         continue;
2652                 /*
2653                  * Disable can be called with CPU_SMT_ENABLED when changing
2654                  * from a higher to lower number of SMT threads per core.
2655                  */
2656                 if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
2657                         continue;
2658                 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2659                 if (ret)
2660                         break;
2661                 /*
2662                  * As this needs to hold the cpu maps lock it's impossible
2663                  * to call device_offline() because that ends up calling
2664                  * cpu_down() which takes cpu maps lock. cpu maps lock
2665                  * needs to be held as this might race against in kernel
2666                  * abusers of the hotplug machinery (thermal management).
2667                  *
2668                  * So nothing would update device:offline state. That would
2669                  * leave the sysfs entry stale and prevent onlining after
2670                  * smt control has been changed to 'off' again. This is
2671                  * called under the sysfs hotplug lock, so it is properly
2672                  * serialized against the regular offline usage.
2673                  */
2674                 cpuhp_offline_cpu_device(cpu);
2675         }
2676         if (!ret)
2677                 cpu_smt_control = ctrlval;
2678         cpu_maps_update_done();
2679         return ret;
2680 }
2681
2682 int cpuhp_smt_enable(void)
2683 {
2684         int cpu, ret = 0;
2685
2686         cpu_maps_update_begin();
2687         cpu_smt_control = CPU_SMT_ENABLED;
2688         for_each_present_cpu(cpu) {
2689                 /* Skip online CPUs and CPUs on offline nodes */
2690                 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2691                         continue;
2692                 if (!cpu_smt_thread_allowed(cpu))
2693                         continue;
2694                 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2695                 if (ret)
2696                         break;
2697                 /* See comment in cpuhp_smt_disable() */
2698                 cpuhp_online_cpu_device(cpu);
2699         }
2700         cpu_maps_update_done();
2701         return ret;
2702 }
2703 #endif
2704
2705 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2706 static ssize_t state_show(struct device *dev,
2707                           struct device_attribute *attr, char *buf)
2708 {
2709         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2710
2711         return sprintf(buf, "%d\n", st->state);
2712 }
2713 static DEVICE_ATTR_RO(state);
2714
2715 static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2716                             const char *buf, size_t count)
2717 {
2718         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2719         struct cpuhp_step *sp;
2720         int target, ret;
2721
2722         ret = kstrtoint(buf, 10, &target);
2723         if (ret)
2724                 return ret;
2725
2726 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2727         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2728                 return -EINVAL;
2729 #else
2730         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2731                 return -EINVAL;
2732 #endif
2733
2734         ret = lock_device_hotplug_sysfs();
2735         if (ret)
2736                 return ret;
2737
2738         mutex_lock(&cpuhp_state_mutex);
2739         sp = cpuhp_get_step(target);
2740         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2741         mutex_unlock(&cpuhp_state_mutex);
2742         if (ret)
2743                 goto out;
2744
2745         if (st->state < target)
2746                 ret = cpu_up(dev->id, target);
2747         else if (st->state > target)
2748                 ret = cpu_down(dev->id, target);
2749         else if (WARN_ON(st->target != target))
2750                 st->target = target;
2751 out:
2752         unlock_device_hotplug();
2753         return ret ? ret : count;
2754 }
2755
2756 static ssize_t target_show(struct device *dev,
2757                            struct device_attribute *attr, char *buf)
2758 {
2759         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2760
2761         return sprintf(buf, "%d\n", st->target);
2762 }
2763 static DEVICE_ATTR_RW(target);
2764
2765 static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2766                           const char *buf, size_t count)
2767 {
2768         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2769         struct cpuhp_step *sp;
2770         int fail, ret;
2771
2772         ret = kstrtoint(buf, 10, &fail);
2773         if (ret)
2774                 return ret;
2775
2776         if (fail == CPUHP_INVALID) {
2777                 st->fail = fail;
2778                 return count;
2779         }
2780
2781         if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2782                 return -EINVAL;
2783
2784         /*
2785          * Cannot fail STARTING/DYING callbacks.
2786          */
2787         if (cpuhp_is_atomic_state(fail))
2788                 return -EINVAL;
2789
2790         /*
2791          * DEAD callbacks cannot fail...
2792          * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2793          * triggering STARTING callbacks, a failure in this state would
2794          * hinder rollback.
2795          */
2796         if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2797                 return -EINVAL;
2798
2799         /*
2800          * Cannot fail anything that doesn't have callbacks.
2801          */
2802         mutex_lock(&cpuhp_state_mutex);
2803         sp = cpuhp_get_step(fail);
2804         if (!sp->startup.single && !sp->teardown.single)
2805                 ret = -EINVAL;
2806         mutex_unlock(&cpuhp_state_mutex);
2807         if (ret)
2808                 return ret;
2809
2810         st->fail = fail;
2811
2812         return count;
2813 }
2814
2815 static ssize_t fail_show(struct device *dev,
2816                          struct device_attribute *attr, char *buf)
2817 {
2818         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2819
2820         return sprintf(buf, "%d\n", st->fail);
2821 }
2822
2823 static DEVICE_ATTR_RW(fail);
2824
2825 static struct attribute *cpuhp_cpu_attrs[] = {
2826         &dev_attr_state.attr,
2827         &dev_attr_target.attr,
2828         &dev_attr_fail.attr,
2829         NULL
2830 };
2831
2832 static const struct attribute_group cpuhp_cpu_attr_group = {
2833         .attrs = cpuhp_cpu_attrs,
2834         .name = "hotplug",
2835         NULL
2836 };
2837
2838 static ssize_t states_show(struct device *dev,
2839                                  struct device_attribute *attr, char *buf)
2840 {
2841         ssize_t cur, res = 0;
2842         int i;
2843
2844         mutex_lock(&cpuhp_state_mutex);
2845         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2846                 struct cpuhp_step *sp = cpuhp_get_step(i);
2847
2848                 if (sp->name) {
2849                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2850                         buf += cur;
2851                         res += cur;
2852                 }
2853         }
2854         mutex_unlock(&cpuhp_state_mutex);
2855         return res;
2856 }
2857 static DEVICE_ATTR_RO(states);
2858
2859 static struct attribute *cpuhp_cpu_root_attrs[] = {
2860         &dev_attr_states.attr,
2861         NULL
2862 };
2863
2864 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2865         .attrs = cpuhp_cpu_root_attrs,
2866         .name = "hotplug",
2867         NULL
2868 };
2869
2870 #ifdef CONFIG_HOTPLUG_SMT
2871
2872 static bool cpu_smt_num_threads_valid(unsigned int threads)
2873 {
2874         if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
2875                 return threads >= 1 && threads <= cpu_smt_max_threads;
2876         return threads == 1 || threads == cpu_smt_max_threads;
2877 }
2878
2879 static ssize_t
2880 __store_smt_control(struct device *dev, struct device_attribute *attr,
2881                     const char *buf, size_t count)
2882 {
2883         int ctrlval, ret, num_threads, orig_threads;
2884         bool force_off;
2885
2886         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2887                 return -EPERM;
2888
2889         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2890                 return -ENODEV;
2891
2892         if (sysfs_streq(buf, "on")) {
2893                 ctrlval = CPU_SMT_ENABLED;
2894                 num_threads = cpu_smt_max_threads;
2895         } else if (sysfs_streq(buf, "off")) {
2896                 ctrlval = CPU_SMT_DISABLED;
2897                 num_threads = 1;
2898         } else if (sysfs_streq(buf, "forceoff")) {
2899                 ctrlval = CPU_SMT_FORCE_DISABLED;
2900                 num_threads = 1;
2901         } else if (kstrtoint(buf, 10, &num_threads) == 0) {
2902                 if (num_threads == 1)
2903                         ctrlval = CPU_SMT_DISABLED;
2904                 else if (cpu_smt_num_threads_valid(num_threads))
2905                         ctrlval = CPU_SMT_ENABLED;
2906                 else
2907                         return -EINVAL;
2908         } else {
2909                 return -EINVAL;
2910         }
2911
2912         ret = lock_device_hotplug_sysfs();
2913         if (ret)
2914                 return ret;
2915
2916         orig_threads = cpu_smt_num_threads;
2917         cpu_smt_num_threads = num_threads;
2918
2919         force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
2920
2921         if (num_threads > orig_threads)
2922                 ret = cpuhp_smt_enable();
2923         else if (num_threads < orig_threads || force_off)
2924                 ret = cpuhp_smt_disable(ctrlval);
2925
2926         unlock_device_hotplug();
2927         return ret ? ret : count;
2928 }
2929
2930 #else /* !CONFIG_HOTPLUG_SMT */
2931 static ssize_t
2932 __store_smt_control(struct device *dev, struct device_attribute *attr,
2933                     const char *buf, size_t count)
2934 {
2935         return -ENODEV;
2936 }
2937 #endif /* CONFIG_HOTPLUG_SMT */
2938
2939 static const char *smt_states[] = {
2940         [CPU_SMT_ENABLED]               = "on",
2941         [CPU_SMT_DISABLED]              = "off",
2942         [CPU_SMT_FORCE_DISABLED]        = "forceoff",
2943         [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
2944         [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
2945 };
2946
2947 static ssize_t control_show(struct device *dev,
2948                             struct device_attribute *attr, char *buf)
2949 {
2950         const char *state = smt_states[cpu_smt_control];
2951
2952 #ifdef CONFIG_HOTPLUG_SMT
2953         /*
2954          * If SMT is enabled but not all threads are enabled then show the
2955          * number of threads. If all threads are enabled show "on". Otherwise
2956          * show the state name.
2957          */
2958         if (cpu_smt_control == CPU_SMT_ENABLED &&
2959             cpu_smt_num_threads != cpu_smt_max_threads)
2960                 return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
2961 #endif
2962
2963         return sysfs_emit(buf, "%s\n", state);
2964 }
2965
2966 static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2967                              const char *buf, size_t count)
2968 {
2969         return __store_smt_control(dev, attr, buf, count);
2970 }
2971 static DEVICE_ATTR_RW(control);
2972
2973 static ssize_t active_show(struct device *dev,
2974                            struct device_attribute *attr, char *buf)
2975 {
2976         return sysfs_emit(buf, "%d\n", sched_smt_active());
2977 }
2978 static DEVICE_ATTR_RO(active);
2979
2980 static struct attribute *cpuhp_smt_attrs[] = {
2981         &dev_attr_control.attr,
2982         &dev_attr_active.attr,
2983         NULL
2984 };
2985
2986 static const struct attribute_group cpuhp_smt_attr_group = {
2987         .attrs = cpuhp_smt_attrs,
2988         .name = "smt",
2989         NULL
2990 };
2991
2992 static int __init cpu_smt_sysfs_init(void)
2993 {
2994         struct device *dev_root;
2995         int ret = -ENODEV;
2996
2997         dev_root = bus_get_dev_root(&cpu_subsys);
2998         if (dev_root) {
2999                 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3000                 put_device(dev_root);
3001         }
3002         return ret;
3003 }
3004
3005 static int __init cpuhp_sysfs_init(void)
3006 {
3007         struct device *dev_root;
3008         int cpu, ret;
3009
3010         ret = cpu_smt_sysfs_init();
3011         if (ret)
3012                 return ret;
3013
3014         dev_root = bus_get_dev_root(&cpu_subsys);
3015         if (dev_root) {
3016                 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
3017                 put_device(dev_root);
3018                 if (ret)
3019                         return ret;
3020         }
3021
3022         for_each_possible_cpu(cpu) {
3023                 struct device *dev = get_cpu_device(cpu);
3024
3025                 if (!dev)
3026                         continue;
3027                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3028                 if (ret)
3029                         return ret;
3030         }
3031         return 0;
3032 }
3033 device_initcall(cpuhp_sysfs_init);
3034 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3035
3036 /*
3037  * cpu_bit_bitmap[] is a special, "compressed" data structure that
3038  * represents all NR_CPUS bits binary values of 1<<nr.
3039  *
3040  * It is used by cpumask_of() to get a constant address to a CPU
3041  * mask value that has a single bit set only.
3042  */
3043
3044 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3045 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
3046 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3047 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3048 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3049
3050 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3051
3052         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
3053         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
3054 #if BITS_PER_LONG > 32
3055         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
3056         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
3057 #endif
3058 };
3059 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3060
3061 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3062 EXPORT_SYMBOL(cpu_all_bits);
3063
3064 #ifdef CONFIG_INIT_ALL_POSSIBLE
3065 struct cpumask __cpu_possible_mask __ro_after_init
3066         = {CPU_BITS_ALL};
3067 #else
3068 struct cpumask __cpu_possible_mask __ro_after_init;
3069 #endif
3070 EXPORT_SYMBOL(__cpu_possible_mask);
3071
3072 struct cpumask __cpu_online_mask __read_mostly;
3073 EXPORT_SYMBOL(__cpu_online_mask);
3074
3075 struct cpumask __cpu_present_mask __read_mostly;
3076 EXPORT_SYMBOL(__cpu_present_mask);
3077
3078 struct cpumask __cpu_active_mask __read_mostly;
3079 EXPORT_SYMBOL(__cpu_active_mask);
3080
3081 struct cpumask __cpu_dying_mask __read_mostly;
3082 EXPORT_SYMBOL(__cpu_dying_mask);
3083
3084 atomic_t __num_online_cpus __read_mostly;
3085 EXPORT_SYMBOL(__num_online_cpus);
3086
3087 void init_cpu_present(const struct cpumask *src)
3088 {
3089         cpumask_copy(&__cpu_present_mask, src);
3090 }
3091
3092 void init_cpu_possible(const struct cpumask *src)
3093 {
3094         cpumask_copy(&__cpu_possible_mask, src);
3095 }
3096
3097 void init_cpu_online(const struct cpumask *src)
3098 {
3099         cpumask_copy(&__cpu_online_mask, src);
3100 }
3101
3102 void set_cpu_online(unsigned int cpu, bool online)
3103 {
3104         /*
3105          * atomic_inc/dec() is required to handle the horrid abuse of this
3106          * function by the reboot and kexec code which invoke it from
3107          * IPI/NMI broadcasts when shutting down CPUs. Invocation from
3108          * regular CPU hotplug is properly serialized.
3109          *
3110          * Note, that the fact that __num_online_cpus is of type atomic_t
3111          * does not protect readers which are not serialized against
3112          * concurrent hotplug operations.
3113          */
3114         if (online) {
3115                 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
3116                         atomic_inc(&__num_online_cpus);
3117         } else {
3118                 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
3119                         atomic_dec(&__num_online_cpus);
3120         }
3121 }
3122
3123 /*
3124  * Activate the first processor.
3125  */
3126 void __init boot_cpu_init(void)
3127 {
3128         int cpu = smp_processor_id();
3129
3130         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3131         set_cpu_online(cpu, true);
3132         set_cpu_active(cpu, true);
3133         set_cpu_present(cpu, true);
3134         set_cpu_possible(cpu, true);
3135
3136 #ifdef CONFIG_SMP
3137         __boot_cpu_id = cpu;
3138 #endif
3139 }
3140
3141 /*
3142  * Must be called _AFTER_ setting up the per_cpu areas
3143  */
3144 void __init boot_cpu_hotplug_init(void)
3145 {
3146 #ifdef CONFIG_SMP
3147         cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
3148         atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3149 #endif
3150         this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3151         this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
3152 }
3153
3154 #ifdef CONFIG_CPU_MITIGATIONS
3155 /*
3156  * These are used for a global "mitigations=" cmdline option for toggling
3157  * optional CPU mitigations.
3158  */
3159 enum cpu_mitigations {
3160         CPU_MITIGATIONS_OFF,
3161         CPU_MITIGATIONS_AUTO,
3162         CPU_MITIGATIONS_AUTO_NOSMT,
3163 };
3164
3165 static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
3166
3167 static int __init mitigations_parse_cmdline(char *arg)
3168 {
3169         if (!strcmp(arg, "off"))
3170                 cpu_mitigations = CPU_MITIGATIONS_OFF;
3171         else if (!strcmp(arg, "auto"))
3172                 cpu_mitigations = CPU_MITIGATIONS_AUTO;
3173         else if (!strcmp(arg, "auto,nosmt"))
3174                 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
3175         else
3176                 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
3177                         arg);
3178
3179         return 0;
3180 }
3181
3182 /* mitigations=off */
3183 bool cpu_mitigations_off(void)
3184 {
3185         return cpu_mitigations == CPU_MITIGATIONS_OFF;
3186 }
3187 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3188
3189 /* mitigations=auto,nosmt */
3190 bool cpu_mitigations_auto_nosmt(void)
3191 {
3192         return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3193 }
3194 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3195 #else
3196 static int __init mitigations_parse_cmdline(char *arg)
3197 {
3198         pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3199         return 0;
3200 }
3201 #endif
3202 early_param("mitigations", mitigations_parse_cmdline);
This page took 0.202327 seconds and 4 git commands to generate.