]> Git Repo - linux.git/blob - kernel/cpu.c
mm/highmem: Provide kmap_local*
[linux.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
8 #include <linux/smp.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/export.h>
21 #include <linux/bug.h>
22 #include <linux/kthread.h>
23 #include <linux/stop_machine.h>
24 #include <linux/mutex.h>
25 #include <linux/gfp.h>
26 #include <linux/suspend.h>
27 #include <linux/lockdep.h>
28 #include <linux/tick.h>
29 #include <linux/irq.h>
30 #include <linux/nmi.h>
31 #include <linux/smpboot.h>
32 #include <linux/relay.h>
33 #include <linux/slab.h>
34 #include <linux/percpu-rwsem.h>
35
36 #include <trace/events/power.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/cpuhp.h>
39
40 #include "smpboot.h"
41
42 /**
43  * cpuhp_cpu_state - Per cpu hotplug state storage
44  * @state:      The current cpu state
45  * @target:     The target state
46  * @thread:     Pointer to the hotplug thread
47  * @should_run: Thread should execute
48  * @rollback:   Perform a rollback
49  * @single:     Single callback invocation
50  * @bringup:    Single callback bringup or teardown selector
51  * @cb_state:   The state for a single callback (install/uninstall)
52  * @result:     Result of the operation
53  * @done_up:    Signal completion to the issuer of the task for cpu-up
54  * @done_down:  Signal completion to the issuer of the task for cpu-down
55  */
56 struct cpuhp_cpu_state {
57         enum cpuhp_state        state;
58         enum cpuhp_state        target;
59         enum cpuhp_state        fail;
60 #ifdef CONFIG_SMP
61         struct task_struct      *thread;
62         bool                    should_run;
63         bool                    rollback;
64         bool                    single;
65         bool                    bringup;
66         struct hlist_node       *node;
67         struct hlist_node       *last;
68         enum cpuhp_state        cb_state;
69         int                     result;
70         struct completion       done_up;
71         struct completion       done_down;
72 #endif
73 };
74
75 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
76         .fail = CPUHP_INVALID,
77 };
78
79 #ifdef CONFIG_SMP
80 cpumask_t cpus_booted_once_mask;
81 #endif
82
83 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
84 static struct lockdep_map cpuhp_state_up_map =
85         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
86 static struct lockdep_map cpuhp_state_down_map =
87         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
88
89
90 static inline void cpuhp_lock_acquire(bool bringup)
91 {
92         lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
93 }
94
95 static inline void cpuhp_lock_release(bool bringup)
96 {
97         lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
98 }
99 #else
100
101 static inline void cpuhp_lock_acquire(bool bringup) { }
102 static inline void cpuhp_lock_release(bool bringup) { }
103
104 #endif
105
106 /**
107  * cpuhp_step - Hotplug state machine step
108  * @name:       Name of the step
109  * @startup:    Startup function of the step
110  * @teardown:   Teardown function of the step
111  * @cant_stop:  Bringup/teardown can't be stopped at this step
112  */
113 struct cpuhp_step {
114         const char              *name;
115         union {
116                 int             (*single)(unsigned int cpu);
117                 int             (*multi)(unsigned int cpu,
118                                          struct hlist_node *node);
119         } startup;
120         union {
121                 int             (*single)(unsigned int cpu);
122                 int             (*multi)(unsigned int cpu,
123                                          struct hlist_node *node);
124         } teardown;
125         struct hlist_head       list;
126         bool                    cant_stop;
127         bool                    multi_instance;
128 };
129
130 static DEFINE_MUTEX(cpuhp_state_mutex);
131 static struct cpuhp_step cpuhp_hp_states[];
132
133 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
134 {
135         return cpuhp_hp_states + state;
136 }
137
138 /**
139  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
140  * @cpu:        The cpu for which the callback should be invoked
141  * @state:      The state to do callbacks for
142  * @bringup:    True if the bringup callback should be invoked
143  * @node:       For multi-instance, do a single entry callback for install/remove
144  * @lastp:      For multi-instance rollback, remember how far we got
145  *
146  * Called from cpu hotplug and from the state register machinery.
147  */
148 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
149                                  bool bringup, struct hlist_node *node,
150                                  struct hlist_node **lastp)
151 {
152         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
153         struct cpuhp_step *step = cpuhp_get_step(state);
154         int (*cbm)(unsigned int cpu, struct hlist_node *node);
155         int (*cb)(unsigned int cpu);
156         int ret, cnt;
157
158         if (st->fail == state) {
159                 st->fail = CPUHP_INVALID;
160
161                 if (!(bringup ? step->startup.single : step->teardown.single))
162                         return 0;
163
164                 return -EAGAIN;
165         }
166
167         if (!step->multi_instance) {
168                 WARN_ON_ONCE(lastp && *lastp);
169                 cb = bringup ? step->startup.single : step->teardown.single;
170                 if (!cb)
171                         return 0;
172                 trace_cpuhp_enter(cpu, st->target, state, cb);
173                 ret = cb(cpu);
174                 trace_cpuhp_exit(cpu, st->state, state, ret);
175                 return ret;
176         }
177         cbm = bringup ? step->startup.multi : step->teardown.multi;
178         if (!cbm)
179                 return 0;
180
181         /* Single invocation for instance add/remove */
182         if (node) {
183                 WARN_ON_ONCE(lastp && *lastp);
184                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
185                 ret = cbm(cpu, node);
186                 trace_cpuhp_exit(cpu, st->state, state, ret);
187                 return ret;
188         }
189
190         /* State transition. Invoke on all instances */
191         cnt = 0;
192         hlist_for_each(node, &step->list) {
193                 if (lastp && node == *lastp)
194                         break;
195
196                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
197                 ret = cbm(cpu, node);
198                 trace_cpuhp_exit(cpu, st->state, state, ret);
199                 if (ret) {
200                         if (!lastp)
201                                 goto err;
202
203                         *lastp = node;
204                         return ret;
205                 }
206                 cnt++;
207         }
208         if (lastp)
209                 *lastp = NULL;
210         return 0;
211 err:
212         /* Rollback the instances if one failed */
213         cbm = !bringup ? step->startup.multi : step->teardown.multi;
214         if (!cbm)
215                 return ret;
216
217         hlist_for_each(node, &step->list) {
218                 if (!cnt--)
219                         break;
220
221                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
222                 ret = cbm(cpu, node);
223                 trace_cpuhp_exit(cpu, st->state, state, ret);
224                 /*
225                  * Rollback must not fail,
226                  */
227                 WARN_ON_ONCE(ret);
228         }
229         return ret;
230 }
231
232 #ifdef CONFIG_SMP
233 static bool cpuhp_is_ap_state(enum cpuhp_state state)
234 {
235         /*
236          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
237          * purposes as that state is handled explicitly in cpu_down.
238          */
239         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
240 }
241
242 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
243 {
244         struct completion *done = bringup ? &st->done_up : &st->done_down;
245         wait_for_completion(done);
246 }
247
248 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
249 {
250         struct completion *done = bringup ? &st->done_up : &st->done_down;
251         complete(done);
252 }
253
254 /*
255  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
256  */
257 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
258 {
259         return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
260 }
261
262 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
263 static DEFINE_MUTEX(cpu_add_remove_lock);
264 bool cpuhp_tasks_frozen;
265 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
266
267 /*
268  * The following two APIs (cpu_maps_update_begin/done) must be used when
269  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
270  */
271 void cpu_maps_update_begin(void)
272 {
273         mutex_lock(&cpu_add_remove_lock);
274 }
275
276 void cpu_maps_update_done(void)
277 {
278         mutex_unlock(&cpu_add_remove_lock);
279 }
280
281 /*
282  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
283  * Should always be manipulated under cpu_add_remove_lock
284  */
285 static int cpu_hotplug_disabled;
286
287 #ifdef CONFIG_HOTPLUG_CPU
288
289 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
290
291 void cpus_read_lock(void)
292 {
293         percpu_down_read(&cpu_hotplug_lock);
294 }
295 EXPORT_SYMBOL_GPL(cpus_read_lock);
296
297 int cpus_read_trylock(void)
298 {
299         return percpu_down_read_trylock(&cpu_hotplug_lock);
300 }
301 EXPORT_SYMBOL_GPL(cpus_read_trylock);
302
303 void cpus_read_unlock(void)
304 {
305         percpu_up_read(&cpu_hotplug_lock);
306 }
307 EXPORT_SYMBOL_GPL(cpus_read_unlock);
308
309 void cpus_write_lock(void)
310 {
311         percpu_down_write(&cpu_hotplug_lock);
312 }
313
314 void cpus_write_unlock(void)
315 {
316         percpu_up_write(&cpu_hotplug_lock);
317 }
318
319 void lockdep_assert_cpus_held(void)
320 {
321         /*
322          * We can't have hotplug operations before userspace starts running,
323          * and some init codepaths will knowingly not take the hotplug lock.
324          * This is all valid, so mute lockdep until it makes sense to report
325          * unheld locks.
326          */
327         if (system_state < SYSTEM_RUNNING)
328                 return;
329
330         percpu_rwsem_assert_held(&cpu_hotplug_lock);
331 }
332
333 static void lockdep_acquire_cpus_lock(void)
334 {
335         rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
336 }
337
338 static void lockdep_release_cpus_lock(void)
339 {
340         rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
341 }
342
343 /*
344  * Wait for currently running CPU hotplug operations to complete (if any) and
345  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
346  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
347  * hotplug path before performing hotplug operations. So acquiring that lock
348  * guarantees mutual exclusion from any currently running hotplug operations.
349  */
350 void cpu_hotplug_disable(void)
351 {
352         cpu_maps_update_begin();
353         cpu_hotplug_disabled++;
354         cpu_maps_update_done();
355 }
356 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
357
358 static void __cpu_hotplug_enable(void)
359 {
360         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
361                 return;
362         cpu_hotplug_disabled--;
363 }
364
365 void cpu_hotplug_enable(void)
366 {
367         cpu_maps_update_begin();
368         __cpu_hotplug_enable();
369         cpu_maps_update_done();
370 }
371 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
372
373 #else
374
375 static void lockdep_acquire_cpus_lock(void)
376 {
377 }
378
379 static void lockdep_release_cpus_lock(void)
380 {
381 }
382
383 #endif  /* CONFIG_HOTPLUG_CPU */
384
385 /*
386  * Architectures that need SMT-specific errata handling during SMT hotplug
387  * should override this.
388  */
389 void __weak arch_smt_update(void) { }
390
391 #ifdef CONFIG_HOTPLUG_SMT
392 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
393
394 void __init cpu_smt_disable(bool force)
395 {
396         if (!cpu_smt_possible())
397                 return;
398
399         if (force) {
400                 pr_info("SMT: Force disabled\n");
401                 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
402         } else {
403                 pr_info("SMT: disabled\n");
404                 cpu_smt_control = CPU_SMT_DISABLED;
405         }
406 }
407
408 /*
409  * The decision whether SMT is supported can only be done after the full
410  * CPU identification. Called from architecture code.
411  */
412 void __init cpu_smt_check_topology(void)
413 {
414         if (!topology_smt_supported())
415                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
416 }
417
418 static int __init smt_cmdline_disable(char *str)
419 {
420         cpu_smt_disable(str && !strcmp(str, "force"));
421         return 0;
422 }
423 early_param("nosmt", smt_cmdline_disable);
424
425 static inline bool cpu_smt_allowed(unsigned int cpu)
426 {
427         if (cpu_smt_control == CPU_SMT_ENABLED)
428                 return true;
429
430         if (topology_is_primary_thread(cpu))
431                 return true;
432
433         /*
434          * On x86 it's required to boot all logical CPUs at least once so
435          * that the init code can get a chance to set CR4.MCE on each
436          * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
437          * core will shutdown the machine.
438          */
439         return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
440 }
441
442 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
443 bool cpu_smt_possible(void)
444 {
445         return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
446                 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
447 }
448 EXPORT_SYMBOL_GPL(cpu_smt_possible);
449 #else
450 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
451 #endif
452
453 static inline enum cpuhp_state
454 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
455 {
456         enum cpuhp_state prev_state = st->state;
457
458         st->rollback = false;
459         st->last = NULL;
460
461         st->target = target;
462         st->single = false;
463         st->bringup = st->state < target;
464
465         return prev_state;
466 }
467
468 static inline void
469 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
470 {
471         st->rollback = true;
472
473         /*
474          * If we have st->last we need to undo partial multi_instance of this
475          * state first. Otherwise start undo at the previous state.
476          */
477         if (!st->last) {
478                 if (st->bringup)
479                         st->state--;
480                 else
481                         st->state++;
482         }
483
484         st->target = prev_state;
485         st->bringup = !st->bringup;
486 }
487
488 /* Regular hotplug invocation of the AP hotplug thread */
489 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
490 {
491         if (!st->single && st->state == st->target)
492                 return;
493
494         st->result = 0;
495         /*
496          * Make sure the above stores are visible before should_run becomes
497          * true. Paired with the mb() above in cpuhp_thread_fun()
498          */
499         smp_mb();
500         st->should_run = true;
501         wake_up_process(st->thread);
502         wait_for_ap_thread(st, st->bringup);
503 }
504
505 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
506 {
507         enum cpuhp_state prev_state;
508         int ret;
509
510         prev_state = cpuhp_set_state(st, target);
511         __cpuhp_kick_ap(st);
512         if ((ret = st->result)) {
513                 cpuhp_reset_state(st, prev_state);
514                 __cpuhp_kick_ap(st);
515         }
516
517         return ret;
518 }
519
520 static int bringup_wait_for_ap(unsigned int cpu)
521 {
522         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
523
524         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
525         wait_for_ap_thread(st, true);
526         if (WARN_ON_ONCE((!cpu_online(cpu))))
527                 return -ECANCELED;
528
529         /* Unpark the hotplug thread of the target cpu */
530         kthread_unpark(st->thread);
531
532         /*
533          * SMT soft disabling on X86 requires to bring the CPU out of the
534          * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
535          * CPU marked itself as booted_once in notify_cpu_starting() so the
536          * cpu_smt_allowed() check will now return false if this is not the
537          * primary sibling.
538          */
539         if (!cpu_smt_allowed(cpu))
540                 return -ECANCELED;
541
542         if (st->target <= CPUHP_AP_ONLINE_IDLE)
543                 return 0;
544
545         return cpuhp_kick_ap(st, st->target);
546 }
547
548 static int bringup_cpu(unsigned int cpu)
549 {
550         struct task_struct *idle = idle_thread_get(cpu);
551         int ret;
552
553         /*
554          * Some architectures have to walk the irq descriptors to
555          * setup the vector space for the cpu which comes online.
556          * Prevent irq alloc/free across the bringup.
557          */
558         irq_lock_sparse();
559
560         /* Arch-specific enabling code. */
561         ret = __cpu_up(cpu, idle);
562         irq_unlock_sparse();
563         if (ret)
564                 return ret;
565         return bringup_wait_for_ap(cpu);
566 }
567
568 static int finish_cpu(unsigned int cpu)
569 {
570         struct task_struct *idle = idle_thread_get(cpu);
571         struct mm_struct *mm = idle->active_mm;
572
573         /*
574          * idle_task_exit() will have switched to &init_mm, now
575          * clean up any remaining active_mm state.
576          */
577         if (mm != &init_mm)
578                 idle->active_mm = &init_mm;
579         mmdrop(mm);
580         return 0;
581 }
582
583 /*
584  * Hotplug state machine related functions
585  */
586
587 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
588 {
589         for (st->state--; st->state > st->target; st->state--)
590                 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
591 }
592
593 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
594 {
595         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
596                 return true;
597         /*
598          * When CPU hotplug is disabled, then taking the CPU down is not
599          * possible because takedown_cpu() and the architecture and
600          * subsystem specific mechanisms are not available. So the CPU
601          * which would be completely unplugged again needs to stay around
602          * in the current state.
603          */
604         return st->state <= CPUHP_BRINGUP_CPU;
605 }
606
607 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
608                               enum cpuhp_state target)
609 {
610         enum cpuhp_state prev_state = st->state;
611         int ret = 0;
612
613         while (st->state < target) {
614                 st->state++;
615                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
616                 if (ret) {
617                         if (can_rollback_cpu(st)) {
618                                 st->target = prev_state;
619                                 undo_cpu_up(cpu, st);
620                         }
621                         break;
622                 }
623         }
624         return ret;
625 }
626
627 /*
628  * The cpu hotplug threads manage the bringup and teardown of the cpus
629  */
630 static void cpuhp_create(unsigned int cpu)
631 {
632         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
633
634         init_completion(&st->done_up);
635         init_completion(&st->done_down);
636 }
637
638 static int cpuhp_should_run(unsigned int cpu)
639 {
640         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
641
642         return st->should_run;
643 }
644
645 /*
646  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
647  * callbacks when a state gets [un]installed at runtime.
648  *
649  * Each invocation of this function by the smpboot thread does a single AP
650  * state callback.
651  *
652  * It has 3 modes of operation:
653  *  - single: runs st->cb_state
654  *  - up:     runs ++st->state, while st->state < st->target
655  *  - down:   runs st->state--, while st->state > st->target
656  *
657  * When complete or on error, should_run is cleared and the completion is fired.
658  */
659 static void cpuhp_thread_fun(unsigned int cpu)
660 {
661         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
662         bool bringup = st->bringup;
663         enum cpuhp_state state;
664
665         if (WARN_ON_ONCE(!st->should_run))
666                 return;
667
668         /*
669          * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
670          * that if we see ->should_run we also see the rest of the state.
671          */
672         smp_mb();
673
674         /*
675          * The BP holds the hotplug lock, but we're now running on the AP,
676          * ensure that anybody asserting the lock is held, will actually find
677          * it so.
678          */
679         lockdep_acquire_cpus_lock();
680         cpuhp_lock_acquire(bringup);
681
682         if (st->single) {
683                 state = st->cb_state;
684                 st->should_run = false;
685         } else {
686                 if (bringup) {
687                         st->state++;
688                         state = st->state;
689                         st->should_run = (st->state < st->target);
690                         WARN_ON_ONCE(st->state > st->target);
691                 } else {
692                         state = st->state;
693                         st->state--;
694                         st->should_run = (st->state > st->target);
695                         WARN_ON_ONCE(st->state < st->target);
696                 }
697         }
698
699         WARN_ON_ONCE(!cpuhp_is_ap_state(state));
700
701         if (cpuhp_is_atomic_state(state)) {
702                 local_irq_disable();
703                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
704                 local_irq_enable();
705
706                 /*
707                  * STARTING/DYING must not fail!
708                  */
709                 WARN_ON_ONCE(st->result);
710         } else {
711                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
712         }
713
714         if (st->result) {
715                 /*
716                  * If we fail on a rollback, we're up a creek without no
717                  * paddle, no way forward, no way back. We loose, thanks for
718                  * playing.
719                  */
720                 WARN_ON_ONCE(st->rollback);
721                 st->should_run = false;
722         }
723
724         cpuhp_lock_release(bringup);
725         lockdep_release_cpus_lock();
726
727         if (!st->should_run)
728                 complete_ap_thread(st, bringup);
729 }
730
731 /* Invoke a single callback on a remote cpu */
732 static int
733 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
734                          struct hlist_node *node)
735 {
736         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
737         int ret;
738
739         if (!cpu_online(cpu))
740                 return 0;
741
742         cpuhp_lock_acquire(false);
743         cpuhp_lock_release(false);
744
745         cpuhp_lock_acquire(true);
746         cpuhp_lock_release(true);
747
748         /*
749          * If we are up and running, use the hotplug thread. For early calls
750          * we invoke the thread function directly.
751          */
752         if (!st->thread)
753                 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
754
755         st->rollback = false;
756         st->last = NULL;
757
758         st->node = node;
759         st->bringup = bringup;
760         st->cb_state = state;
761         st->single = true;
762
763         __cpuhp_kick_ap(st);
764
765         /*
766          * If we failed and did a partial, do a rollback.
767          */
768         if ((ret = st->result) && st->last) {
769                 st->rollback = true;
770                 st->bringup = !bringup;
771
772                 __cpuhp_kick_ap(st);
773         }
774
775         /*
776          * Clean up the leftovers so the next hotplug operation wont use stale
777          * data.
778          */
779         st->node = st->last = NULL;
780         return ret;
781 }
782
783 static int cpuhp_kick_ap_work(unsigned int cpu)
784 {
785         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
786         enum cpuhp_state prev_state = st->state;
787         int ret;
788
789         cpuhp_lock_acquire(false);
790         cpuhp_lock_release(false);
791
792         cpuhp_lock_acquire(true);
793         cpuhp_lock_release(true);
794
795         trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
796         ret = cpuhp_kick_ap(st, st->target);
797         trace_cpuhp_exit(cpu, st->state, prev_state, ret);
798
799         return ret;
800 }
801
802 static struct smp_hotplug_thread cpuhp_threads = {
803         .store                  = &cpuhp_state.thread,
804         .create                 = &cpuhp_create,
805         .thread_should_run      = cpuhp_should_run,
806         .thread_fn              = cpuhp_thread_fun,
807         .thread_comm            = "cpuhp/%u",
808         .selfparking            = true,
809 };
810
811 void __init cpuhp_threads_init(void)
812 {
813         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
814         kthread_unpark(this_cpu_read(cpuhp_state.thread));
815 }
816
817 #ifdef CONFIG_HOTPLUG_CPU
818 /**
819  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
820  * @cpu: a CPU id
821  *
822  * This function walks all processes, finds a valid mm struct for each one and
823  * then clears a corresponding bit in mm's cpumask.  While this all sounds
824  * trivial, there are various non-obvious corner cases, which this function
825  * tries to solve in a safe manner.
826  *
827  * Also note that the function uses a somewhat relaxed locking scheme, so it may
828  * be called only for an already offlined CPU.
829  */
830 void clear_tasks_mm_cpumask(int cpu)
831 {
832         struct task_struct *p;
833
834         /*
835          * This function is called after the cpu is taken down and marked
836          * offline, so its not like new tasks will ever get this cpu set in
837          * their mm mask. -- Peter Zijlstra
838          * Thus, we may use rcu_read_lock() here, instead of grabbing
839          * full-fledged tasklist_lock.
840          */
841         WARN_ON(cpu_online(cpu));
842         rcu_read_lock();
843         for_each_process(p) {
844                 struct task_struct *t;
845
846                 /*
847                  * Main thread might exit, but other threads may still have
848                  * a valid mm. Find one.
849                  */
850                 t = find_lock_task_mm(p);
851                 if (!t)
852                         continue;
853                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
854                 task_unlock(t);
855         }
856         rcu_read_unlock();
857 }
858
859 /* Take this CPU down. */
860 static int take_cpu_down(void *_param)
861 {
862         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
863         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
864         int err, cpu = smp_processor_id();
865         int ret;
866
867         /* Ensure this CPU doesn't handle any more interrupts. */
868         err = __cpu_disable();
869         if (err < 0)
870                 return err;
871
872         /*
873          * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
874          * do this step again.
875          */
876         WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
877         st->state--;
878         /* Invoke the former CPU_DYING callbacks */
879         for (; st->state > target; st->state--) {
880                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
881                 /*
882                  * DYING must not fail!
883                  */
884                 WARN_ON_ONCE(ret);
885         }
886
887         /* Give up timekeeping duties */
888         tick_handover_do_timer();
889         /* Remove CPU from timer broadcasting */
890         tick_offline_cpu(cpu);
891         /* Park the stopper thread */
892         stop_machine_park(cpu);
893         return 0;
894 }
895
896 static int takedown_cpu(unsigned int cpu)
897 {
898         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
899         int err;
900
901         /* Park the smpboot threads */
902         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
903
904         /*
905          * Prevent irq alloc/free while the dying cpu reorganizes the
906          * interrupt affinities.
907          */
908         irq_lock_sparse();
909
910         /*
911          * So now all preempt/rcu users must observe !cpu_active().
912          */
913         err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
914         if (err) {
915                 /* CPU refused to die */
916                 irq_unlock_sparse();
917                 /* Unpark the hotplug thread so we can rollback there */
918                 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
919                 return err;
920         }
921         BUG_ON(cpu_online(cpu));
922
923         /*
924          * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
925          * all runnable tasks from the CPU, there's only the idle task left now
926          * that the migration thread is done doing the stop_machine thing.
927          *
928          * Wait for the stop thread to go away.
929          */
930         wait_for_ap_thread(st, false);
931         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
932
933         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
934         irq_unlock_sparse();
935
936         hotplug_cpu__broadcast_tick_pull(cpu);
937         /* This actually kills the CPU. */
938         __cpu_die(cpu);
939
940         tick_cleanup_dead_cpu(cpu);
941         rcutree_migrate_callbacks(cpu);
942         return 0;
943 }
944
945 static void cpuhp_complete_idle_dead(void *arg)
946 {
947         struct cpuhp_cpu_state *st = arg;
948
949         complete_ap_thread(st, false);
950 }
951
952 void cpuhp_report_idle_dead(void)
953 {
954         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
955
956         BUG_ON(st->state != CPUHP_AP_OFFLINE);
957         rcu_report_dead(smp_processor_id());
958         st->state = CPUHP_AP_IDLE_DEAD;
959         /*
960          * We cannot call complete after rcu_report_dead() so we delegate it
961          * to an online cpu.
962          */
963         smp_call_function_single(cpumask_first(cpu_online_mask),
964                                  cpuhp_complete_idle_dead, st, 0);
965 }
966
967 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
968 {
969         for (st->state++; st->state < st->target; st->state++)
970                 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
971 }
972
973 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
974                                 enum cpuhp_state target)
975 {
976         enum cpuhp_state prev_state = st->state;
977         int ret = 0;
978
979         for (; st->state > target; st->state--) {
980                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
981                 if (ret) {
982                         st->target = prev_state;
983                         if (st->state < prev_state)
984                                 undo_cpu_down(cpu, st);
985                         break;
986                 }
987         }
988         return ret;
989 }
990
991 /* Requires cpu_add_remove_lock to be held */
992 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
993                            enum cpuhp_state target)
994 {
995         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
996         int prev_state, ret = 0;
997
998         if (num_online_cpus() == 1)
999                 return -EBUSY;
1000
1001         if (!cpu_present(cpu))
1002                 return -EINVAL;
1003
1004         cpus_write_lock();
1005
1006         cpuhp_tasks_frozen = tasks_frozen;
1007
1008         prev_state = cpuhp_set_state(st, target);
1009         /*
1010          * If the current CPU state is in the range of the AP hotplug thread,
1011          * then we need to kick the thread.
1012          */
1013         if (st->state > CPUHP_TEARDOWN_CPU) {
1014                 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1015                 ret = cpuhp_kick_ap_work(cpu);
1016                 /*
1017                  * The AP side has done the error rollback already. Just
1018                  * return the error code..
1019                  */
1020                 if (ret)
1021                         goto out;
1022
1023                 /*
1024                  * We might have stopped still in the range of the AP hotplug
1025                  * thread. Nothing to do anymore.
1026                  */
1027                 if (st->state > CPUHP_TEARDOWN_CPU)
1028                         goto out;
1029
1030                 st->target = target;
1031         }
1032         /*
1033          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1034          * to do the further cleanups.
1035          */
1036         ret = cpuhp_down_callbacks(cpu, st, target);
1037         if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1038                 cpuhp_reset_state(st, prev_state);
1039                 __cpuhp_kick_ap(st);
1040         }
1041
1042 out:
1043         cpus_write_unlock();
1044         /*
1045          * Do post unplug cleanup. This is still protected against
1046          * concurrent CPU hotplug via cpu_add_remove_lock.
1047          */
1048         lockup_detector_cleanup();
1049         arch_smt_update();
1050         return ret;
1051 }
1052
1053 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1054 {
1055         if (cpu_hotplug_disabled)
1056                 return -EBUSY;
1057         return _cpu_down(cpu, 0, target);
1058 }
1059
1060 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1061 {
1062         int err;
1063
1064         cpu_maps_update_begin();
1065         err = cpu_down_maps_locked(cpu, target);
1066         cpu_maps_update_done();
1067         return err;
1068 }
1069
1070 /**
1071  * cpu_device_down - Bring down a cpu device
1072  * @dev: Pointer to the cpu device to offline
1073  *
1074  * This function is meant to be used by device core cpu subsystem only.
1075  *
1076  * Other subsystems should use remove_cpu() instead.
1077  */
1078 int cpu_device_down(struct device *dev)
1079 {
1080         return cpu_down(dev->id, CPUHP_OFFLINE);
1081 }
1082
1083 int remove_cpu(unsigned int cpu)
1084 {
1085         int ret;
1086
1087         lock_device_hotplug();
1088         ret = device_offline(get_cpu_device(cpu));
1089         unlock_device_hotplug();
1090
1091         return ret;
1092 }
1093 EXPORT_SYMBOL_GPL(remove_cpu);
1094
1095 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1096 {
1097         unsigned int cpu;
1098         int error;
1099
1100         cpu_maps_update_begin();
1101
1102         /*
1103          * Make certain the cpu I'm about to reboot on is online.
1104          *
1105          * This is inline to what migrate_to_reboot_cpu() already do.
1106          */
1107         if (!cpu_online(primary_cpu))
1108                 primary_cpu = cpumask_first(cpu_online_mask);
1109
1110         for_each_online_cpu(cpu) {
1111                 if (cpu == primary_cpu)
1112                         continue;
1113
1114                 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1115                 if (error) {
1116                         pr_err("Failed to offline CPU%d - error=%d",
1117                                 cpu, error);
1118                         break;
1119                 }
1120         }
1121
1122         /*
1123          * Ensure all but the reboot CPU are offline.
1124          */
1125         BUG_ON(num_online_cpus() > 1);
1126
1127         /*
1128          * Make sure the CPUs won't be enabled by someone else after this
1129          * point. Kexec will reboot to a new kernel shortly resetting
1130          * everything along the way.
1131          */
1132         cpu_hotplug_disabled++;
1133
1134         cpu_maps_update_done();
1135 }
1136
1137 #else
1138 #define takedown_cpu            NULL
1139 #endif /*CONFIG_HOTPLUG_CPU*/
1140
1141 /**
1142  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1143  * @cpu: cpu that just started
1144  *
1145  * It must be called by the arch code on the new cpu, before the new cpu
1146  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1147  */
1148 void notify_cpu_starting(unsigned int cpu)
1149 {
1150         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1151         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1152         int ret;
1153
1154         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1155         cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1156         while (st->state < target) {
1157                 st->state++;
1158                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1159                 /*
1160                  * STARTING must not fail!
1161                  */
1162                 WARN_ON_ONCE(ret);
1163         }
1164 }
1165
1166 /*
1167  * Called from the idle task. Wake up the controlling task which brings the
1168  * hotplug thread of the upcoming CPU up and then delegates the rest of the
1169  * online bringup to the hotplug thread.
1170  */
1171 void cpuhp_online_idle(enum cpuhp_state state)
1172 {
1173         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1174
1175         /* Happens for the boot cpu */
1176         if (state != CPUHP_AP_ONLINE_IDLE)
1177                 return;
1178
1179         /*
1180          * Unpart the stopper thread before we start the idle loop (and start
1181          * scheduling); this ensures the stopper task is always available.
1182          */
1183         stop_machine_unpark(smp_processor_id());
1184
1185         st->state = CPUHP_AP_ONLINE_IDLE;
1186         complete_ap_thread(st, true);
1187 }
1188
1189 /* Requires cpu_add_remove_lock to be held */
1190 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1191 {
1192         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1193         struct task_struct *idle;
1194         int ret = 0;
1195
1196         cpus_write_lock();
1197
1198         if (!cpu_present(cpu)) {
1199                 ret = -EINVAL;
1200                 goto out;
1201         }
1202
1203         /*
1204          * The caller of cpu_up() might have raced with another
1205          * caller. Nothing to do.
1206          */
1207         if (st->state >= target)
1208                 goto out;
1209
1210         if (st->state == CPUHP_OFFLINE) {
1211                 /* Let it fail before we try to bring the cpu up */
1212                 idle = idle_thread_get(cpu);
1213                 if (IS_ERR(idle)) {
1214                         ret = PTR_ERR(idle);
1215                         goto out;
1216                 }
1217         }
1218
1219         cpuhp_tasks_frozen = tasks_frozen;
1220
1221         cpuhp_set_state(st, target);
1222         /*
1223          * If the current CPU state is in the range of the AP hotplug thread,
1224          * then we need to kick the thread once more.
1225          */
1226         if (st->state > CPUHP_BRINGUP_CPU) {
1227                 ret = cpuhp_kick_ap_work(cpu);
1228                 /*
1229                  * The AP side has done the error rollback already. Just
1230                  * return the error code..
1231                  */
1232                 if (ret)
1233                         goto out;
1234         }
1235
1236         /*
1237          * Try to reach the target state. We max out on the BP at
1238          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1239          * responsible for bringing it up to the target state.
1240          */
1241         target = min((int)target, CPUHP_BRINGUP_CPU);
1242         ret = cpuhp_up_callbacks(cpu, st, target);
1243 out:
1244         cpus_write_unlock();
1245         arch_smt_update();
1246         return ret;
1247 }
1248
1249 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1250 {
1251         int err = 0;
1252
1253         if (!cpu_possible(cpu)) {
1254                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1255                        cpu);
1256 #if defined(CONFIG_IA64)
1257                 pr_err("please check additional_cpus= boot parameter\n");
1258 #endif
1259                 return -EINVAL;
1260         }
1261
1262         err = try_online_node(cpu_to_node(cpu));
1263         if (err)
1264                 return err;
1265
1266         cpu_maps_update_begin();
1267
1268         if (cpu_hotplug_disabled) {
1269                 err = -EBUSY;
1270                 goto out;
1271         }
1272         if (!cpu_smt_allowed(cpu)) {
1273                 err = -EPERM;
1274                 goto out;
1275         }
1276
1277         err = _cpu_up(cpu, 0, target);
1278 out:
1279         cpu_maps_update_done();
1280         return err;
1281 }
1282
1283 /**
1284  * cpu_device_up - Bring up a cpu device
1285  * @dev: Pointer to the cpu device to online
1286  *
1287  * This function is meant to be used by device core cpu subsystem only.
1288  *
1289  * Other subsystems should use add_cpu() instead.
1290  */
1291 int cpu_device_up(struct device *dev)
1292 {
1293         return cpu_up(dev->id, CPUHP_ONLINE);
1294 }
1295
1296 int add_cpu(unsigned int cpu)
1297 {
1298         int ret;
1299
1300         lock_device_hotplug();
1301         ret = device_online(get_cpu_device(cpu));
1302         unlock_device_hotplug();
1303
1304         return ret;
1305 }
1306 EXPORT_SYMBOL_GPL(add_cpu);
1307
1308 /**
1309  * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1310  * @sleep_cpu: The cpu we hibernated on and should be brought up.
1311  *
1312  * On some architectures like arm64, we can hibernate on any CPU, but on
1313  * wake up the CPU we hibernated on might be offline as a side effect of
1314  * using maxcpus= for example.
1315  */
1316 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1317 {
1318         int ret;
1319
1320         if (!cpu_online(sleep_cpu)) {
1321                 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1322                 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1323                 if (ret) {
1324                         pr_err("Failed to bring hibernate-CPU up!\n");
1325                         return ret;
1326                 }
1327         }
1328         return 0;
1329 }
1330
1331 void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1332 {
1333         unsigned int cpu;
1334
1335         for_each_present_cpu(cpu) {
1336                 if (num_online_cpus() >= setup_max_cpus)
1337                         break;
1338                 if (!cpu_online(cpu))
1339                         cpu_up(cpu, CPUHP_ONLINE);
1340         }
1341 }
1342
1343 #ifdef CONFIG_PM_SLEEP_SMP
1344 static cpumask_var_t frozen_cpus;
1345
1346 int freeze_secondary_cpus(int primary)
1347 {
1348         int cpu, error = 0;
1349
1350         cpu_maps_update_begin();
1351         if (primary == -1) {
1352                 primary = cpumask_first(cpu_online_mask);
1353                 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1354                         primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1355         } else {
1356                 if (!cpu_online(primary))
1357                         primary = cpumask_first(cpu_online_mask);
1358         }
1359
1360         /*
1361          * We take down all of the non-boot CPUs in one shot to avoid races
1362          * with the userspace trying to use the CPU hotplug at the same time
1363          */
1364         cpumask_clear(frozen_cpus);
1365
1366         pr_info("Disabling non-boot CPUs ...\n");
1367         for_each_online_cpu(cpu) {
1368                 if (cpu == primary)
1369                         continue;
1370
1371                 if (pm_wakeup_pending()) {
1372                         pr_info("Wakeup pending. Abort CPU freeze\n");
1373                         error = -EBUSY;
1374                         break;
1375                 }
1376
1377                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1378                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1379                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1380                 if (!error)
1381                         cpumask_set_cpu(cpu, frozen_cpus);
1382                 else {
1383                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1384                         break;
1385                 }
1386         }
1387
1388         if (!error)
1389                 BUG_ON(num_online_cpus() > 1);
1390         else
1391                 pr_err("Non-boot CPUs are not disabled\n");
1392
1393         /*
1394          * Make sure the CPUs won't be enabled by someone else. We need to do
1395          * this even in case of failure as all freeze_secondary_cpus() users are
1396          * supposed to do thaw_secondary_cpus() on the failure path.
1397          */
1398         cpu_hotplug_disabled++;
1399
1400         cpu_maps_update_done();
1401         return error;
1402 }
1403
1404 void __weak arch_thaw_secondary_cpus_begin(void)
1405 {
1406 }
1407
1408 void __weak arch_thaw_secondary_cpus_end(void)
1409 {
1410 }
1411
1412 void thaw_secondary_cpus(void)
1413 {
1414         int cpu, error;
1415
1416         /* Allow everyone to use the CPU hotplug again */
1417         cpu_maps_update_begin();
1418         __cpu_hotplug_enable();
1419         if (cpumask_empty(frozen_cpus))
1420                 goto out;
1421
1422         pr_info("Enabling non-boot CPUs ...\n");
1423
1424         arch_thaw_secondary_cpus_begin();
1425
1426         for_each_cpu(cpu, frozen_cpus) {
1427                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1428                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1429                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1430                 if (!error) {
1431                         pr_info("CPU%d is up\n", cpu);
1432                         continue;
1433                 }
1434                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1435         }
1436
1437         arch_thaw_secondary_cpus_end();
1438
1439         cpumask_clear(frozen_cpus);
1440 out:
1441         cpu_maps_update_done();
1442 }
1443
1444 static int __init alloc_frozen_cpus(void)
1445 {
1446         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1447                 return -ENOMEM;
1448         return 0;
1449 }
1450 core_initcall(alloc_frozen_cpus);
1451
1452 /*
1453  * When callbacks for CPU hotplug notifications are being executed, we must
1454  * ensure that the state of the system with respect to the tasks being frozen
1455  * or not, as reported by the notification, remains unchanged *throughout the
1456  * duration* of the execution of the callbacks.
1457  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1458  *
1459  * This synchronization is implemented by mutually excluding regular CPU
1460  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1461  * Hibernate notifications.
1462  */
1463 static int
1464 cpu_hotplug_pm_callback(struct notifier_block *nb,
1465                         unsigned long action, void *ptr)
1466 {
1467         switch (action) {
1468
1469         case PM_SUSPEND_PREPARE:
1470         case PM_HIBERNATION_PREPARE:
1471                 cpu_hotplug_disable();
1472                 break;
1473
1474         case PM_POST_SUSPEND:
1475         case PM_POST_HIBERNATION:
1476                 cpu_hotplug_enable();
1477                 break;
1478
1479         default:
1480                 return NOTIFY_DONE;
1481         }
1482
1483         return NOTIFY_OK;
1484 }
1485
1486
1487 static int __init cpu_hotplug_pm_sync_init(void)
1488 {
1489         /*
1490          * cpu_hotplug_pm_callback has higher priority than x86
1491          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1492          * to disable cpu hotplug to avoid cpu hotplug race.
1493          */
1494         pm_notifier(cpu_hotplug_pm_callback, 0);
1495         return 0;
1496 }
1497 core_initcall(cpu_hotplug_pm_sync_init);
1498
1499 #endif /* CONFIG_PM_SLEEP_SMP */
1500
1501 int __boot_cpu_id;
1502
1503 #endif /* CONFIG_SMP */
1504
1505 /* Boot processor state steps */
1506 static struct cpuhp_step cpuhp_hp_states[] = {
1507         [CPUHP_OFFLINE] = {
1508                 .name                   = "offline",
1509                 .startup.single         = NULL,
1510                 .teardown.single        = NULL,
1511         },
1512 #ifdef CONFIG_SMP
1513         [CPUHP_CREATE_THREADS]= {
1514                 .name                   = "threads:prepare",
1515                 .startup.single         = smpboot_create_threads,
1516                 .teardown.single        = NULL,
1517                 .cant_stop              = true,
1518         },
1519         [CPUHP_PERF_PREPARE] = {
1520                 .name                   = "perf:prepare",
1521                 .startup.single         = perf_event_init_cpu,
1522                 .teardown.single        = perf_event_exit_cpu,
1523         },
1524         [CPUHP_WORKQUEUE_PREP] = {
1525                 .name                   = "workqueue:prepare",
1526                 .startup.single         = workqueue_prepare_cpu,
1527                 .teardown.single        = NULL,
1528         },
1529         [CPUHP_HRTIMERS_PREPARE] = {
1530                 .name                   = "hrtimers:prepare",
1531                 .startup.single         = hrtimers_prepare_cpu,
1532                 .teardown.single        = hrtimers_dead_cpu,
1533         },
1534         [CPUHP_SMPCFD_PREPARE] = {
1535                 .name                   = "smpcfd:prepare",
1536                 .startup.single         = smpcfd_prepare_cpu,
1537                 .teardown.single        = smpcfd_dead_cpu,
1538         },
1539         [CPUHP_RELAY_PREPARE] = {
1540                 .name                   = "relay:prepare",
1541                 .startup.single         = relay_prepare_cpu,
1542                 .teardown.single        = NULL,
1543         },
1544         [CPUHP_SLAB_PREPARE] = {
1545                 .name                   = "slab:prepare",
1546                 .startup.single         = slab_prepare_cpu,
1547                 .teardown.single        = slab_dead_cpu,
1548         },
1549         [CPUHP_RCUTREE_PREP] = {
1550                 .name                   = "RCU/tree:prepare",
1551                 .startup.single         = rcutree_prepare_cpu,
1552                 .teardown.single        = rcutree_dead_cpu,
1553         },
1554         /*
1555          * On the tear-down path, timers_dead_cpu() must be invoked
1556          * before blk_mq_queue_reinit_notify() from notify_dead(),
1557          * otherwise a RCU stall occurs.
1558          */
1559         [CPUHP_TIMERS_PREPARE] = {
1560                 .name                   = "timers:prepare",
1561                 .startup.single         = timers_prepare_cpu,
1562                 .teardown.single        = timers_dead_cpu,
1563         },
1564         /* Kicks the plugged cpu into life */
1565         [CPUHP_BRINGUP_CPU] = {
1566                 .name                   = "cpu:bringup",
1567                 .startup.single         = bringup_cpu,
1568                 .teardown.single        = finish_cpu,
1569                 .cant_stop              = true,
1570         },
1571         /* Final state before CPU kills itself */
1572         [CPUHP_AP_IDLE_DEAD] = {
1573                 .name                   = "idle:dead",
1574         },
1575         /*
1576          * Last state before CPU enters the idle loop to die. Transient state
1577          * for synchronization.
1578          */
1579         [CPUHP_AP_OFFLINE] = {
1580                 .name                   = "ap:offline",
1581                 .cant_stop              = true,
1582         },
1583         /* First state is scheduler control. Interrupts are disabled */
1584         [CPUHP_AP_SCHED_STARTING] = {
1585                 .name                   = "sched:starting",
1586                 .startup.single         = sched_cpu_starting,
1587                 .teardown.single        = sched_cpu_dying,
1588         },
1589         [CPUHP_AP_RCUTREE_DYING] = {
1590                 .name                   = "RCU/tree:dying",
1591                 .startup.single         = NULL,
1592                 .teardown.single        = rcutree_dying_cpu,
1593         },
1594         [CPUHP_AP_SMPCFD_DYING] = {
1595                 .name                   = "smpcfd:dying",
1596                 .startup.single         = NULL,
1597                 .teardown.single        = smpcfd_dying_cpu,
1598         },
1599         /* Entry state on starting. Interrupts enabled from here on. Transient
1600          * state for synchronsization */
1601         [CPUHP_AP_ONLINE] = {
1602                 .name                   = "ap:online",
1603         },
1604         /*
1605          * Handled on control processor until the plugged processor manages
1606          * this itself.
1607          */
1608         [CPUHP_TEARDOWN_CPU] = {
1609                 .name                   = "cpu:teardown",
1610                 .startup.single         = NULL,
1611                 .teardown.single        = takedown_cpu,
1612                 .cant_stop              = true,
1613         },
1614
1615         [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1616                 .name                   = "sched:waitempty",
1617                 .startup.single         = NULL,
1618                 .teardown.single        = sched_cpu_wait_empty,
1619         },
1620
1621         /* Handle smpboot threads park/unpark */
1622         [CPUHP_AP_SMPBOOT_THREADS] = {
1623                 .name                   = "smpboot/threads:online",
1624                 .startup.single         = smpboot_unpark_threads,
1625                 .teardown.single        = smpboot_park_threads,
1626         },
1627         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1628                 .name                   = "irq/affinity:online",
1629                 .startup.single         = irq_affinity_online_cpu,
1630                 .teardown.single        = NULL,
1631         },
1632         [CPUHP_AP_PERF_ONLINE] = {
1633                 .name                   = "perf:online",
1634                 .startup.single         = perf_event_init_cpu,
1635                 .teardown.single        = perf_event_exit_cpu,
1636         },
1637         [CPUHP_AP_WATCHDOG_ONLINE] = {
1638                 .name                   = "lockup_detector:online",
1639                 .startup.single         = lockup_detector_online_cpu,
1640                 .teardown.single        = lockup_detector_offline_cpu,
1641         },
1642         [CPUHP_AP_WORKQUEUE_ONLINE] = {
1643                 .name                   = "workqueue:online",
1644                 .startup.single         = workqueue_online_cpu,
1645                 .teardown.single        = workqueue_offline_cpu,
1646         },
1647         [CPUHP_AP_RCUTREE_ONLINE] = {
1648                 .name                   = "RCU/tree:online",
1649                 .startup.single         = rcutree_online_cpu,
1650                 .teardown.single        = rcutree_offline_cpu,
1651         },
1652 #endif
1653         /*
1654          * The dynamically registered state space is here
1655          */
1656
1657 #ifdef CONFIG_SMP
1658         /* Last state is scheduler control setting the cpu active */
1659         [CPUHP_AP_ACTIVE] = {
1660                 .name                   = "sched:active",
1661                 .startup.single         = sched_cpu_activate,
1662                 .teardown.single        = sched_cpu_deactivate,
1663         },
1664 #endif
1665
1666         /* CPU is fully up and running. */
1667         [CPUHP_ONLINE] = {
1668                 .name                   = "online",
1669                 .startup.single         = NULL,
1670                 .teardown.single        = NULL,
1671         },
1672 };
1673
1674 /* Sanity check for callbacks */
1675 static int cpuhp_cb_check(enum cpuhp_state state)
1676 {
1677         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1678                 return -EINVAL;
1679         return 0;
1680 }
1681
1682 /*
1683  * Returns a free for dynamic slot assignment of the Online state. The states
1684  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1685  * by having no name assigned.
1686  */
1687 static int cpuhp_reserve_state(enum cpuhp_state state)
1688 {
1689         enum cpuhp_state i, end;
1690         struct cpuhp_step *step;
1691
1692         switch (state) {
1693         case CPUHP_AP_ONLINE_DYN:
1694                 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1695                 end = CPUHP_AP_ONLINE_DYN_END;
1696                 break;
1697         case CPUHP_BP_PREPARE_DYN:
1698                 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1699                 end = CPUHP_BP_PREPARE_DYN_END;
1700                 break;
1701         default:
1702                 return -EINVAL;
1703         }
1704
1705         for (i = state; i <= end; i++, step++) {
1706                 if (!step->name)
1707                         return i;
1708         }
1709         WARN(1, "No more dynamic states available for CPU hotplug\n");
1710         return -ENOSPC;
1711 }
1712
1713 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1714                                  int (*startup)(unsigned int cpu),
1715                                  int (*teardown)(unsigned int cpu),
1716                                  bool multi_instance)
1717 {
1718         /* (Un)Install the callbacks for further cpu hotplug operations */
1719         struct cpuhp_step *sp;
1720         int ret = 0;
1721
1722         /*
1723          * If name is NULL, then the state gets removed.
1724          *
1725          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1726          * the first allocation from these dynamic ranges, so the removal
1727          * would trigger a new allocation and clear the wrong (already
1728          * empty) state, leaving the callbacks of the to be cleared state
1729          * dangling, which causes wreckage on the next hotplug operation.
1730          */
1731         if (name && (state == CPUHP_AP_ONLINE_DYN ||
1732                      state == CPUHP_BP_PREPARE_DYN)) {
1733                 ret = cpuhp_reserve_state(state);
1734                 if (ret < 0)
1735                         return ret;
1736                 state = ret;
1737         }
1738         sp = cpuhp_get_step(state);
1739         if (name && sp->name)
1740                 return -EBUSY;
1741
1742         sp->startup.single = startup;
1743         sp->teardown.single = teardown;
1744         sp->name = name;
1745         sp->multi_instance = multi_instance;
1746         INIT_HLIST_HEAD(&sp->list);
1747         return ret;
1748 }
1749
1750 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1751 {
1752         return cpuhp_get_step(state)->teardown.single;
1753 }
1754
1755 /*
1756  * Call the startup/teardown function for a step either on the AP or
1757  * on the current CPU.
1758  */
1759 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1760                             struct hlist_node *node)
1761 {
1762         struct cpuhp_step *sp = cpuhp_get_step(state);
1763         int ret;
1764
1765         /*
1766          * If there's nothing to do, we done.
1767          * Relies on the union for multi_instance.
1768          */
1769         if ((bringup && !sp->startup.single) ||
1770             (!bringup && !sp->teardown.single))
1771                 return 0;
1772         /*
1773          * The non AP bound callbacks can fail on bringup. On teardown
1774          * e.g. module removal we crash for now.
1775          */
1776 #ifdef CONFIG_SMP
1777         if (cpuhp_is_ap_state(state))
1778                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1779         else
1780                 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1781 #else
1782         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1783 #endif
1784         BUG_ON(ret && !bringup);
1785         return ret;
1786 }
1787
1788 /*
1789  * Called from __cpuhp_setup_state on a recoverable failure.
1790  *
1791  * Note: The teardown callbacks for rollback are not allowed to fail!
1792  */
1793 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1794                                    struct hlist_node *node)
1795 {
1796         int cpu;
1797
1798         /* Roll back the already executed steps on the other cpus */
1799         for_each_present_cpu(cpu) {
1800                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1801                 int cpustate = st->state;
1802
1803                 if (cpu >= failedcpu)
1804                         break;
1805
1806                 /* Did we invoke the startup call on that cpu ? */
1807                 if (cpustate >= state)
1808                         cpuhp_issue_call(cpu, state, false, node);
1809         }
1810 }
1811
1812 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1813                                           struct hlist_node *node,
1814                                           bool invoke)
1815 {
1816         struct cpuhp_step *sp;
1817         int cpu;
1818         int ret;
1819
1820         lockdep_assert_cpus_held();
1821
1822         sp = cpuhp_get_step(state);
1823         if (sp->multi_instance == false)
1824                 return -EINVAL;
1825
1826         mutex_lock(&cpuhp_state_mutex);
1827
1828         if (!invoke || !sp->startup.multi)
1829                 goto add_node;
1830
1831         /*
1832          * Try to call the startup callback for each present cpu
1833          * depending on the hotplug state of the cpu.
1834          */
1835         for_each_present_cpu(cpu) {
1836                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1837                 int cpustate = st->state;
1838
1839                 if (cpustate < state)
1840                         continue;
1841
1842                 ret = cpuhp_issue_call(cpu, state, true, node);
1843                 if (ret) {
1844                         if (sp->teardown.multi)
1845                                 cpuhp_rollback_install(cpu, state, node);
1846                         goto unlock;
1847                 }
1848         }
1849 add_node:
1850         ret = 0;
1851         hlist_add_head(node, &sp->list);
1852 unlock:
1853         mutex_unlock(&cpuhp_state_mutex);
1854         return ret;
1855 }
1856
1857 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1858                                bool invoke)
1859 {
1860         int ret;
1861
1862         cpus_read_lock();
1863         ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1864         cpus_read_unlock();
1865         return ret;
1866 }
1867 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1868
1869 /**
1870  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1871  * @state:              The state to setup
1872  * @invoke:             If true, the startup function is invoked for cpus where
1873  *                      cpu state >= @state
1874  * @startup:            startup callback function
1875  * @teardown:           teardown callback function
1876  * @multi_instance:     State is set up for multiple instances which get
1877  *                      added afterwards.
1878  *
1879  * The caller needs to hold cpus read locked while calling this function.
1880  * Returns:
1881  *   On success:
1882  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1883  *      0 for all other states
1884  *   On failure: proper (negative) error code
1885  */
1886 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1887                                    const char *name, bool invoke,
1888                                    int (*startup)(unsigned int cpu),
1889                                    int (*teardown)(unsigned int cpu),
1890                                    bool multi_instance)
1891 {
1892         int cpu, ret = 0;
1893         bool dynstate;
1894
1895         lockdep_assert_cpus_held();
1896
1897         if (cpuhp_cb_check(state) || !name)
1898                 return -EINVAL;
1899
1900         mutex_lock(&cpuhp_state_mutex);
1901
1902         ret = cpuhp_store_callbacks(state, name, startup, teardown,
1903                                     multi_instance);
1904
1905         dynstate = state == CPUHP_AP_ONLINE_DYN;
1906         if (ret > 0 && dynstate) {
1907                 state = ret;
1908                 ret = 0;
1909         }
1910
1911         if (ret || !invoke || !startup)
1912                 goto out;
1913
1914         /*
1915          * Try to call the startup callback for each present cpu
1916          * depending on the hotplug state of the cpu.
1917          */
1918         for_each_present_cpu(cpu) {
1919                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1920                 int cpustate = st->state;
1921
1922                 if (cpustate < state)
1923                         continue;
1924
1925                 ret = cpuhp_issue_call(cpu, state, true, NULL);
1926                 if (ret) {
1927                         if (teardown)
1928                                 cpuhp_rollback_install(cpu, state, NULL);
1929                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1930                         goto out;
1931                 }
1932         }
1933 out:
1934         mutex_unlock(&cpuhp_state_mutex);
1935         /*
1936          * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1937          * dynamically allocated state in case of success.
1938          */
1939         if (!ret && dynstate)
1940                 return state;
1941         return ret;
1942 }
1943 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1944
1945 int __cpuhp_setup_state(enum cpuhp_state state,
1946                         const char *name, bool invoke,
1947                         int (*startup)(unsigned int cpu),
1948                         int (*teardown)(unsigned int cpu),
1949                         bool multi_instance)
1950 {
1951         int ret;
1952
1953         cpus_read_lock();
1954         ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1955                                              teardown, multi_instance);
1956         cpus_read_unlock();
1957         return ret;
1958 }
1959 EXPORT_SYMBOL(__cpuhp_setup_state);
1960
1961 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1962                                   struct hlist_node *node, bool invoke)
1963 {
1964         struct cpuhp_step *sp = cpuhp_get_step(state);
1965         int cpu;
1966
1967         BUG_ON(cpuhp_cb_check(state));
1968
1969         if (!sp->multi_instance)
1970                 return -EINVAL;
1971
1972         cpus_read_lock();
1973         mutex_lock(&cpuhp_state_mutex);
1974
1975         if (!invoke || !cpuhp_get_teardown_cb(state))
1976                 goto remove;
1977         /*
1978          * Call the teardown callback for each present cpu depending
1979          * on the hotplug state of the cpu. This function is not
1980          * allowed to fail currently!
1981          */
1982         for_each_present_cpu(cpu) {
1983                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1984                 int cpustate = st->state;
1985
1986                 if (cpustate >= state)
1987                         cpuhp_issue_call(cpu, state, false, node);
1988         }
1989
1990 remove:
1991         hlist_del(node);
1992         mutex_unlock(&cpuhp_state_mutex);
1993         cpus_read_unlock();
1994
1995         return 0;
1996 }
1997 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1998
1999 /**
2000  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2001  * @state:      The state to remove
2002  * @invoke:     If true, the teardown function is invoked for cpus where
2003  *              cpu state >= @state
2004  *
2005  * The caller needs to hold cpus read locked while calling this function.
2006  * The teardown callback is currently not allowed to fail. Think
2007  * about module removal!
2008  */
2009 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2010 {
2011         struct cpuhp_step *sp = cpuhp_get_step(state);
2012         int cpu;
2013
2014         BUG_ON(cpuhp_cb_check(state));
2015
2016         lockdep_assert_cpus_held();
2017
2018         mutex_lock(&cpuhp_state_mutex);
2019         if (sp->multi_instance) {
2020                 WARN(!hlist_empty(&sp->list),
2021                      "Error: Removing state %d which has instances left.\n",
2022                      state);
2023                 goto remove;
2024         }
2025
2026         if (!invoke || !cpuhp_get_teardown_cb(state))
2027                 goto remove;
2028
2029         /*
2030          * Call the teardown callback for each present cpu depending
2031          * on the hotplug state of the cpu. This function is not
2032          * allowed to fail currently!
2033          */
2034         for_each_present_cpu(cpu) {
2035                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2036                 int cpustate = st->state;
2037
2038                 if (cpustate >= state)
2039                         cpuhp_issue_call(cpu, state, false, NULL);
2040         }
2041 remove:
2042         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2043         mutex_unlock(&cpuhp_state_mutex);
2044 }
2045 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2046
2047 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2048 {
2049         cpus_read_lock();
2050         __cpuhp_remove_state_cpuslocked(state, invoke);
2051         cpus_read_unlock();
2052 }
2053 EXPORT_SYMBOL(__cpuhp_remove_state);
2054
2055 #ifdef CONFIG_HOTPLUG_SMT
2056 static void cpuhp_offline_cpu_device(unsigned int cpu)
2057 {
2058         struct device *dev = get_cpu_device(cpu);
2059
2060         dev->offline = true;
2061         /* Tell user space about the state change */
2062         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2063 }
2064
2065 static void cpuhp_online_cpu_device(unsigned int cpu)
2066 {
2067         struct device *dev = get_cpu_device(cpu);
2068
2069         dev->offline = false;
2070         /* Tell user space about the state change */
2071         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2072 }
2073
2074 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2075 {
2076         int cpu, ret = 0;
2077
2078         cpu_maps_update_begin();
2079         for_each_online_cpu(cpu) {
2080                 if (topology_is_primary_thread(cpu))
2081                         continue;
2082                 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2083                 if (ret)
2084                         break;
2085                 /*
2086                  * As this needs to hold the cpu maps lock it's impossible
2087                  * to call device_offline() because that ends up calling
2088                  * cpu_down() which takes cpu maps lock. cpu maps lock
2089                  * needs to be held as this might race against in kernel
2090                  * abusers of the hotplug machinery (thermal management).
2091                  *
2092                  * So nothing would update device:offline state. That would
2093                  * leave the sysfs entry stale and prevent onlining after
2094                  * smt control has been changed to 'off' again. This is
2095                  * called under the sysfs hotplug lock, so it is properly
2096                  * serialized against the regular offline usage.
2097                  */
2098                 cpuhp_offline_cpu_device(cpu);
2099         }
2100         if (!ret)
2101                 cpu_smt_control = ctrlval;
2102         cpu_maps_update_done();
2103         return ret;
2104 }
2105
2106 int cpuhp_smt_enable(void)
2107 {
2108         int cpu, ret = 0;
2109
2110         cpu_maps_update_begin();
2111         cpu_smt_control = CPU_SMT_ENABLED;
2112         for_each_present_cpu(cpu) {
2113                 /* Skip online CPUs and CPUs on offline nodes */
2114                 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2115                         continue;
2116                 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2117                 if (ret)
2118                         break;
2119                 /* See comment in cpuhp_smt_disable() */
2120                 cpuhp_online_cpu_device(cpu);
2121         }
2122         cpu_maps_update_done();
2123         return ret;
2124 }
2125 #endif
2126
2127 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2128 static ssize_t show_cpuhp_state(struct device *dev,
2129                                 struct device_attribute *attr, char *buf)
2130 {
2131         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2132
2133         return sprintf(buf, "%d\n", st->state);
2134 }
2135 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2136
2137 static ssize_t write_cpuhp_target(struct device *dev,
2138                                   struct device_attribute *attr,
2139                                   const char *buf, size_t count)
2140 {
2141         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2142         struct cpuhp_step *sp;
2143         int target, ret;
2144
2145         ret = kstrtoint(buf, 10, &target);
2146         if (ret)
2147                 return ret;
2148
2149 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2150         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2151                 return -EINVAL;
2152 #else
2153         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2154                 return -EINVAL;
2155 #endif
2156
2157         ret = lock_device_hotplug_sysfs();
2158         if (ret)
2159                 return ret;
2160
2161         mutex_lock(&cpuhp_state_mutex);
2162         sp = cpuhp_get_step(target);
2163         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2164         mutex_unlock(&cpuhp_state_mutex);
2165         if (ret)
2166                 goto out;
2167
2168         if (st->state < target)
2169                 ret = cpu_up(dev->id, target);
2170         else
2171                 ret = cpu_down(dev->id, target);
2172 out:
2173         unlock_device_hotplug();
2174         return ret ? ret : count;
2175 }
2176
2177 static ssize_t show_cpuhp_target(struct device *dev,
2178                                  struct device_attribute *attr, char *buf)
2179 {
2180         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2181
2182         return sprintf(buf, "%d\n", st->target);
2183 }
2184 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2185
2186
2187 static ssize_t write_cpuhp_fail(struct device *dev,
2188                                 struct device_attribute *attr,
2189                                 const char *buf, size_t count)
2190 {
2191         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2192         struct cpuhp_step *sp;
2193         int fail, ret;
2194
2195         ret = kstrtoint(buf, 10, &fail);
2196         if (ret)
2197                 return ret;
2198
2199         if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2200                 return -EINVAL;
2201
2202         /*
2203          * Cannot fail STARTING/DYING callbacks.
2204          */
2205         if (cpuhp_is_atomic_state(fail))
2206                 return -EINVAL;
2207
2208         /*
2209          * Cannot fail anything that doesn't have callbacks.
2210          */
2211         mutex_lock(&cpuhp_state_mutex);
2212         sp = cpuhp_get_step(fail);
2213         if (!sp->startup.single && !sp->teardown.single)
2214                 ret = -EINVAL;
2215         mutex_unlock(&cpuhp_state_mutex);
2216         if (ret)
2217                 return ret;
2218
2219         st->fail = fail;
2220
2221         return count;
2222 }
2223
2224 static ssize_t show_cpuhp_fail(struct device *dev,
2225                                struct device_attribute *attr, char *buf)
2226 {
2227         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2228
2229         return sprintf(buf, "%d\n", st->fail);
2230 }
2231
2232 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2233
2234 static struct attribute *cpuhp_cpu_attrs[] = {
2235         &dev_attr_state.attr,
2236         &dev_attr_target.attr,
2237         &dev_attr_fail.attr,
2238         NULL
2239 };
2240
2241 static const struct attribute_group cpuhp_cpu_attr_group = {
2242         .attrs = cpuhp_cpu_attrs,
2243         .name = "hotplug",
2244         NULL
2245 };
2246
2247 static ssize_t show_cpuhp_states(struct device *dev,
2248                                  struct device_attribute *attr, char *buf)
2249 {
2250         ssize_t cur, res = 0;
2251         int i;
2252
2253         mutex_lock(&cpuhp_state_mutex);
2254         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2255                 struct cpuhp_step *sp = cpuhp_get_step(i);
2256
2257                 if (sp->name) {
2258                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2259                         buf += cur;
2260                         res += cur;
2261                 }
2262         }
2263         mutex_unlock(&cpuhp_state_mutex);
2264         return res;
2265 }
2266 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2267
2268 static struct attribute *cpuhp_cpu_root_attrs[] = {
2269         &dev_attr_states.attr,
2270         NULL
2271 };
2272
2273 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2274         .attrs = cpuhp_cpu_root_attrs,
2275         .name = "hotplug",
2276         NULL
2277 };
2278
2279 #ifdef CONFIG_HOTPLUG_SMT
2280
2281 static ssize_t
2282 __store_smt_control(struct device *dev, struct device_attribute *attr,
2283                     const char *buf, size_t count)
2284 {
2285         int ctrlval, ret;
2286
2287         if (sysfs_streq(buf, "on"))
2288                 ctrlval = CPU_SMT_ENABLED;
2289         else if (sysfs_streq(buf, "off"))
2290                 ctrlval = CPU_SMT_DISABLED;
2291         else if (sysfs_streq(buf, "forceoff"))
2292                 ctrlval = CPU_SMT_FORCE_DISABLED;
2293         else
2294                 return -EINVAL;
2295
2296         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2297                 return -EPERM;
2298
2299         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2300                 return -ENODEV;
2301
2302         ret = lock_device_hotplug_sysfs();
2303         if (ret)
2304                 return ret;
2305
2306         if (ctrlval != cpu_smt_control) {
2307                 switch (ctrlval) {
2308                 case CPU_SMT_ENABLED:
2309                         ret = cpuhp_smt_enable();
2310                         break;
2311                 case CPU_SMT_DISABLED:
2312                 case CPU_SMT_FORCE_DISABLED:
2313                         ret = cpuhp_smt_disable(ctrlval);
2314                         break;
2315                 }
2316         }
2317
2318         unlock_device_hotplug();
2319         return ret ? ret : count;
2320 }
2321
2322 #else /* !CONFIG_HOTPLUG_SMT */
2323 static ssize_t
2324 __store_smt_control(struct device *dev, struct device_attribute *attr,
2325                     const char *buf, size_t count)
2326 {
2327         return -ENODEV;
2328 }
2329 #endif /* CONFIG_HOTPLUG_SMT */
2330
2331 static const char *smt_states[] = {
2332         [CPU_SMT_ENABLED]               = "on",
2333         [CPU_SMT_DISABLED]              = "off",
2334         [CPU_SMT_FORCE_DISABLED]        = "forceoff",
2335         [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
2336         [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
2337 };
2338
2339 static ssize_t
2340 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2341 {
2342         const char *state = smt_states[cpu_smt_control];
2343
2344         return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2345 }
2346
2347 static ssize_t
2348 store_smt_control(struct device *dev, struct device_attribute *attr,
2349                   const char *buf, size_t count)
2350 {
2351         return __store_smt_control(dev, attr, buf, count);
2352 }
2353 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2354
2355 static ssize_t
2356 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2357 {
2358         return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2359 }
2360 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2361
2362 static struct attribute *cpuhp_smt_attrs[] = {
2363         &dev_attr_control.attr,
2364         &dev_attr_active.attr,
2365         NULL
2366 };
2367
2368 static const struct attribute_group cpuhp_smt_attr_group = {
2369         .attrs = cpuhp_smt_attrs,
2370         .name = "smt",
2371         NULL
2372 };
2373
2374 static int __init cpu_smt_sysfs_init(void)
2375 {
2376         return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2377                                   &cpuhp_smt_attr_group);
2378 }
2379
2380 static int __init cpuhp_sysfs_init(void)
2381 {
2382         int cpu, ret;
2383
2384         ret = cpu_smt_sysfs_init();
2385         if (ret)
2386                 return ret;
2387
2388         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2389                                  &cpuhp_cpu_root_attr_group);
2390         if (ret)
2391                 return ret;
2392
2393         for_each_possible_cpu(cpu) {
2394                 struct device *dev = get_cpu_device(cpu);
2395
2396                 if (!dev)
2397                         continue;
2398                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2399                 if (ret)
2400                         return ret;
2401         }
2402         return 0;
2403 }
2404 device_initcall(cpuhp_sysfs_init);
2405 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2406
2407 /*
2408  * cpu_bit_bitmap[] is a special, "compressed" data structure that
2409  * represents all NR_CPUS bits binary values of 1<<nr.
2410  *
2411  * It is used by cpumask_of() to get a constant address to a CPU
2412  * mask value that has a single bit set only.
2413  */
2414
2415 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2416 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
2417 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2418 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2419 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2420
2421 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2422
2423         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
2424         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
2425 #if BITS_PER_LONG > 32
2426         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
2427         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
2428 #endif
2429 };
2430 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2431
2432 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2433 EXPORT_SYMBOL(cpu_all_bits);
2434
2435 #ifdef CONFIG_INIT_ALL_POSSIBLE
2436 struct cpumask __cpu_possible_mask __read_mostly
2437         = {CPU_BITS_ALL};
2438 #else
2439 struct cpumask __cpu_possible_mask __read_mostly;
2440 #endif
2441 EXPORT_SYMBOL(__cpu_possible_mask);
2442
2443 struct cpumask __cpu_online_mask __read_mostly;
2444 EXPORT_SYMBOL(__cpu_online_mask);
2445
2446 struct cpumask __cpu_present_mask __read_mostly;
2447 EXPORT_SYMBOL(__cpu_present_mask);
2448
2449 struct cpumask __cpu_active_mask __read_mostly;
2450 EXPORT_SYMBOL(__cpu_active_mask);
2451
2452 atomic_t __num_online_cpus __read_mostly;
2453 EXPORT_SYMBOL(__num_online_cpus);
2454
2455 void init_cpu_present(const struct cpumask *src)
2456 {
2457         cpumask_copy(&__cpu_present_mask, src);
2458 }
2459
2460 void init_cpu_possible(const struct cpumask *src)
2461 {
2462         cpumask_copy(&__cpu_possible_mask, src);
2463 }
2464
2465 void init_cpu_online(const struct cpumask *src)
2466 {
2467         cpumask_copy(&__cpu_online_mask, src);
2468 }
2469
2470 void set_cpu_online(unsigned int cpu, bool online)
2471 {
2472         /*
2473          * atomic_inc/dec() is required to handle the horrid abuse of this
2474          * function by the reboot and kexec code which invoke it from
2475          * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2476          * regular CPU hotplug is properly serialized.
2477          *
2478          * Note, that the fact that __num_online_cpus is of type atomic_t
2479          * does not protect readers which are not serialized against
2480          * concurrent hotplug operations.
2481          */
2482         if (online) {
2483                 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2484                         atomic_inc(&__num_online_cpus);
2485         } else {
2486                 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2487                         atomic_dec(&__num_online_cpus);
2488         }
2489 }
2490
2491 /*
2492  * Activate the first processor.
2493  */
2494 void __init boot_cpu_init(void)
2495 {
2496         int cpu = smp_processor_id();
2497
2498         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2499         set_cpu_online(cpu, true);
2500         set_cpu_active(cpu, true);
2501         set_cpu_present(cpu, true);
2502         set_cpu_possible(cpu, true);
2503
2504 #ifdef CONFIG_SMP
2505         __boot_cpu_id = cpu;
2506 #endif
2507 }
2508
2509 /*
2510  * Must be called _AFTER_ setting up the per_cpu areas
2511  */
2512 void __init boot_cpu_hotplug_init(void)
2513 {
2514 #ifdef CONFIG_SMP
2515         cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2516 #endif
2517         this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2518 }
2519
2520 /*
2521  * These are used for a global "mitigations=" cmdline option for toggling
2522  * optional CPU mitigations.
2523  */
2524 enum cpu_mitigations {
2525         CPU_MITIGATIONS_OFF,
2526         CPU_MITIGATIONS_AUTO,
2527         CPU_MITIGATIONS_AUTO_NOSMT,
2528 };
2529
2530 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2531         CPU_MITIGATIONS_AUTO;
2532
2533 static int __init mitigations_parse_cmdline(char *arg)
2534 {
2535         if (!strcmp(arg, "off"))
2536                 cpu_mitigations = CPU_MITIGATIONS_OFF;
2537         else if (!strcmp(arg, "auto"))
2538                 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2539         else if (!strcmp(arg, "auto,nosmt"))
2540                 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2541         else
2542                 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2543                         arg);
2544
2545         return 0;
2546 }
2547 early_param("mitigations", mitigations_parse_cmdline);
2548
2549 /* mitigations=off */
2550 bool cpu_mitigations_off(void)
2551 {
2552         return cpu_mitigations == CPU_MITIGATIONS_OFF;
2553 }
2554 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2555
2556 /* mitigations=auto,nosmt */
2557 bool cpu_mitigations_auto_nosmt(void)
2558 {
2559         return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2560 }
2561 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
This page took 0.189903 seconds and 4 git commands to generate.