1 // SPDX-License-Identifier: GPL-2.0
3 * SMP related functions
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
9 * based on other smp stuff by
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/memblock.h>
23 #include <linux/export.h>
24 #include <linux/init.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/irqflags.h>
32 #include <linux/irq_work.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/kprobes.h>
39 #include <asm/access-regs.h>
40 #include <asm/asm-offsets.h>
41 #include <asm/ctlreg.h>
42 #include <asm/pfault.h>
44 #include <asm/facility.h>
47 #include <asm/setup.h>
49 #include <asm/tlbflush.h>
50 #include <asm/vtimer.h>
51 #include <asm/abs_lowcore.h>
53 #include <asm/debug.h>
54 #include <asm/os_info.h>
58 #include <asm/stacktrace.h>
59 #include <asm/topology.h>
61 #include <asm/maccess.h>
66 ec_call_function_single,
77 static u8 boot_core_type;
78 DEFINE_PER_CPU(struct pcpu, pcpu_devices);
80 * Pointer to the pcpu area of the boot CPU. This is required when a restart
81 * interrupt is triggered on an offline CPU. For that case accessing percpu
82 * data with the common primitives does not work, since the percpu offset is
83 * stored in a non existent lowcore.
85 static struct pcpu *ipl_pcpu;
87 unsigned int smp_cpu_mt_shift;
88 EXPORT_SYMBOL(smp_cpu_mt_shift);
90 unsigned int smp_cpu_mtid;
91 EXPORT_SYMBOL(smp_cpu_mtid);
93 #ifdef CONFIG_CRASH_DUMP
94 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
97 static unsigned int smp_max_threads __initdata = -1U;
98 cpumask_t cpu_setup_mask;
100 static int __init early_nosmt(char *s)
105 early_param("nosmt", early_nosmt);
107 static int __init early_smt(char *s)
109 get_option(&s, &smp_max_threads);
112 early_param("smt", early_smt);
115 * The smp_cpu_state_mutex must be held when changing the state or polarization
116 * member of a pcpu data structure within the pcpu_devices array.
118 DEFINE_MUTEX(smp_cpu_state_mutex);
121 * Signal processor helper functions.
123 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
128 cc = __pcpu_sigp(addr, order, parm, NULL);
129 if (cc != SIGP_CC_BUSY)
135 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
139 for (retry = 0; ; retry++) {
140 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
141 if (cc != SIGP_CC_BUSY)
149 static inline int pcpu_stopped(struct pcpu *pcpu)
153 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
154 0, &status) != SIGP_CC_STATUS_STORED)
156 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
159 static inline int pcpu_running(struct pcpu *pcpu)
161 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
162 0, NULL) != SIGP_CC_STATUS_STORED)
164 /* Status stored condition code is equivalent to cpu not running. */
169 * Find struct pcpu by cpu address.
171 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
175 for_each_cpu(cpu, mask)
176 if (per_cpu(pcpu_devices, cpu).address == address)
177 return &per_cpu(pcpu_devices, cpu);
181 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
185 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
187 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
188 pcpu->ec_clk = get_tod_clock_fast();
189 pcpu_sigp_retry(pcpu, order, 0);
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
194 unsigned long async_stack, nodat_stack, mcck_stack;
197 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
198 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
199 async_stack = stack_alloc();
200 mcck_stack = stack_alloc();
201 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
203 memcpy(lc, get_lowcore(), 512);
204 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
205 lc->async_stack = async_stack + STACK_INIT_OFFSET;
206 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
207 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
209 lc->spinlock_lockval = arch_spin_lockval(cpu);
210 lc->spinlock_index = 0;
211 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
212 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
213 lc->preempt_count = PREEMPT_DISABLED;
214 if (nmi_alloc_mcesa(&lc->mcesad))
216 if (abs_lowcore_map(cpu, lc, true))
218 lowcore_ptr[cpu] = lc;
219 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
223 nmi_free_mcesa(&lc->mcesad);
225 stack_free(mcck_stack);
226 stack_free(async_stack);
227 free_pages(nodat_stack, THREAD_SIZE_ORDER);
228 free_pages((unsigned long) lc, LC_ORDER);
232 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu)
234 unsigned long async_stack, nodat_stack, mcck_stack;
237 lc = lowcore_ptr[cpu];
238 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
239 async_stack = lc->async_stack - STACK_INIT_OFFSET;
240 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
241 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
242 lowcore_ptr[cpu] = NULL;
243 abs_lowcore_unmap(cpu);
244 nmi_free_mcesa(&lc->mcesad);
245 stack_free(async_stack);
246 stack_free(mcck_stack);
247 free_pages(nodat_stack, THREAD_SIZE_ORDER);
248 free_pages((unsigned long) lc, LC_ORDER);
251 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
253 struct lowcore *lc, *abs_lc;
255 lc = lowcore_ptr[cpu];
256 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
257 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
259 lc->pcpu = (unsigned long)pcpu;
260 lc->restart_flags = RESTART_FLAG_CTLREGS;
261 lc->spinlock_lockval = arch_spin_lockval(cpu);
262 lc->spinlock_index = 0;
263 lc->percpu_offset = __per_cpu_offset[cpu];
264 lc->kernel_asce = get_lowcore()->kernel_asce;
265 lc->user_asce = s390_invalid_asce;
266 lc->machine_flags = get_lowcore()->machine_flags;
267 lc->user_timer = lc->system_timer =
268 lc->steal_timer = lc->avg_steal_timer = 0;
269 abs_lc = get_abs_lowcore();
270 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
271 put_abs_lowcore(abs_lc);
272 lc->cregs_save_area[1] = lc->kernel_asce;
273 lc->cregs_save_area[7] = lc->user_asce;
274 save_access_regs((unsigned int *) lc->access_regs_save_area);
275 arch_spin_lock_setup(cpu);
278 static void pcpu_attach_task(int cpu, struct task_struct *tsk)
282 lc = lowcore_ptr[cpu];
283 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
284 lc->current_task = (unsigned long)tsk;
286 lc->current_pid = tsk->pid;
287 lc->user_timer = tsk->thread.user_timer;
288 lc->guest_timer = tsk->thread.guest_timer;
289 lc->system_timer = tsk->thread.system_timer;
290 lc->hardirq_timer = tsk->thread.hardirq_timer;
291 lc->softirq_timer = tsk->thread.softirq_timer;
295 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
299 lc = lowcore_ptr[cpu];
300 lc->restart_stack = lc->kernel_stack;
301 lc->restart_fn = (unsigned long) func;
302 lc->restart_data = (unsigned long) data;
303 lc->restart_source = -1U;
304 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0);
307 typedef void (pcpu_delegate_fn)(void *);
310 * Call function via PSW restart on pcpu and stop the current cpu.
312 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
314 func(data); /* should not return */
317 static void pcpu_delegate(struct pcpu *pcpu, int cpu,
318 pcpu_delegate_fn *func,
319 void *data, unsigned long stack)
321 struct lowcore *lc, *abs_lc;
322 unsigned int source_cpu;
324 lc = lowcore_ptr[cpu];
327 if (pcpu->address == source_cpu) {
328 call_on_stack(2, stack, void, __pcpu_delegate,
329 pcpu_delegate_fn *, func, void *, data);
331 /* Stop target cpu (if func returns this stops the current cpu). */
332 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
333 pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
334 /* Restart func on the target cpu and stop the current cpu. */
336 lc->restart_stack = stack;
337 lc->restart_fn = (unsigned long)func;
338 lc->restart_data = (unsigned long)data;
339 lc->restart_source = source_cpu;
341 abs_lc = get_abs_lowcore();
342 abs_lc->restart_stack = stack;
343 abs_lc->restart_fn = (unsigned long)func;
344 abs_lc->restart_data = (unsigned long)data;
345 abs_lc->restart_source = source_cpu;
346 put_abs_lowcore(abs_lc);
349 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
350 " brc 2,0b # busy, try again\n"
351 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
352 " brc 2,1b # busy, try again\n"
353 : : "d" (pcpu->address), "d" (source_cpu),
354 "K" (SIGP_RESTART), "K" (SIGP_STOP)
360 * Enable additional logical cpus for multi-threading.
362 static int pcpu_set_smt(unsigned int mtid)
366 if (smp_cpu_mtid == mtid)
368 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
371 smp_cpu_mt_shift = 0;
372 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
374 per_cpu(pcpu_devices, 0).address = stap();
380 * Call function on the ipl CPU.
382 void smp_call_ipl_cpu(void (*func)(void *), void *data)
384 struct lowcore *lc = lowcore_ptr[0];
386 if (ipl_pcpu->address == stap())
389 pcpu_delegate(ipl_pcpu, 0, func, data, lc->nodat_stack);
392 int smp_find_processor_id(u16 address)
396 for_each_present_cpu(cpu)
397 if (per_cpu(pcpu_devices, cpu).address == address)
402 void schedule_mcck_handler(void)
404 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_mcck_pending);
407 bool notrace arch_vcpu_is_preempted(int cpu)
409 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
411 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu)))
415 EXPORT_SYMBOL(arch_vcpu_is_preempted);
417 void notrace smp_yield_cpu(int cpu)
419 if (!MACHINE_HAS_DIAG9C)
421 diag_stat_inc_norecursion(DIAG_STAT_X09C);
422 asm volatile("diag %0,0,0x9c"
423 : : "d" (per_cpu(pcpu_devices, cpu).address));
425 EXPORT_SYMBOL_GPL(smp_yield_cpu);
428 * Send cpus emergency shutdown signal. This gives the cpus the
429 * opportunity to complete outstanding interrupts.
431 void notrace smp_emergency_stop(void)
433 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
434 static cpumask_t cpumask;
438 arch_spin_lock(&lock);
439 cpumask_copy(&cpumask, cpu_online_mask);
440 cpumask_clear_cpu(smp_processor_id(), &cpumask);
442 end = get_tod_clock() + (1000000UL << 12);
443 for_each_cpu(cpu, &cpumask) {
444 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
445 set_bit(ec_stop_cpu, &pcpu->ec_mask);
446 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
447 0, NULL) == SIGP_CC_BUSY &&
448 get_tod_clock() < end)
451 while (get_tod_clock() < end) {
452 for_each_cpu(cpu, &cpumask)
453 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
454 cpumask_clear_cpu(cpu, &cpumask);
455 if (cpumask_empty(&cpumask))
459 arch_spin_unlock(&lock);
461 NOKPROBE_SYMBOL(smp_emergency_stop);
464 * Stop all cpus but the current one.
466 void smp_send_stop(void)
471 /* Disable all interrupts/machine checks */
472 __load_psw_mask(PSW_KERNEL_BITS);
473 trace_hardirqs_off();
475 debug_set_critical();
477 if (oops_in_progress)
478 smp_emergency_stop();
480 /* stop all processors */
481 for_each_online_cpu(cpu) {
482 if (cpu == smp_processor_id())
484 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
485 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
486 while (!pcpu_stopped(pcpu))
492 * This is the main routine where commands issued by other
495 static void smp_handle_ext_call(void)
499 /* handle bit signal external calls */
500 bits = this_cpu_xchg(pcpu_devices.ec_mask, 0);
501 if (test_bit(ec_stop_cpu, &bits))
503 if (test_bit(ec_schedule, &bits))
505 if (test_bit(ec_call_function_single, &bits))
506 generic_smp_call_function_single_interrupt();
507 if (test_bit(ec_mcck_pending, &bits))
509 if (test_bit(ec_irq_work, &bits))
513 static void do_ext_call_interrupt(struct ext_code ext_code,
514 unsigned int param32, unsigned long param64)
516 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
517 smp_handle_ext_call();
520 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
524 for_each_cpu(cpu, mask)
525 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
528 void arch_send_call_function_single_ipi(int cpu)
530 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
534 * this function sends a 'reschedule' IPI to another CPU.
535 * it goes straight through and wastes no time serializing
536 * anything. Worst case is that we lose a reschedule ...
538 void arch_smp_send_reschedule(int cpu)
540 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
543 #ifdef CONFIG_IRQ_WORK
544 void arch_irq_work_raise(void)
546 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_irq_work);
550 #ifdef CONFIG_CRASH_DUMP
552 int smp_store_status(int cpu)
558 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
559 lc = lowcore_ptr[cpu];
560 pa = __pa(&lc->floating_pt_save_area);
561 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
562 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
564 if (!cpu_has_vx() && !MACHINE_HAS_GS)
566 pa = lc->mcesad & MCESA_ORIGIN_MASK;
568 pa |= lc->mcesad & MCESA_LC_MASK;
569 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
570 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
576 * Collect CPU state of the previous, crashed system.
577 * There are three cases:
578 * 1) standard zfcp/nvme dump
579 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
580 * The state for all CPUs except the boot CPU needs to be collected
581 * with sigp stop-and-store-status. The boot CPU state is located in
582 * the absolute lowcore of the memory stored in the HSA. The zcore code
583 * will copy the boot CPU state from the HSA.
584 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
585 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
586 * The state for all CPUs except the boot CPU needs to be collected
587 * with sigp stop-and-store-status. The firmware or the boot-loader
588 * stored the registers of the boot CPU in the absolute lowcore in the
589 * memory of the old system.
590 * 3) kdump or stand-alone kdump for DASD
591 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == false
592 * The state for all CPUs except the boot CPU needs to be collected
593 * with sigp stop-and-store-status. The kexec code or the boot-loader
594 * stored the registers of the boot CPU in the memory of the old system.
596 * Note that the legacy kdump mode where the old kernel stored the CPU states
597 * does no longer exist: setup_arch() explicitly deactivates the elfcorehdr=
598 * kernel parameter. The is_kdump_kernel() implementation on s390 is independent
599 * of the elfcorehdr= parameter.
601 static bool dump_available(void)
603 return oldmem_data.start || is_ipl_type_dump();
606 void __init smp_save_dump_ipl_cpu(void)
608 struct save_area *sa;
611 if (!dump_available())
613 sa = save_area_alloc(true);
614 regs = memblock_alloc(512, 8);
616 panic("could not allocate memory for boot CPU save area\n");
617 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
618 save_area_add_regs(sa, regs);
619 memblock_free(regs, 512);
621 save_area_add_vxrs(sa, boot_cpu_vector_save_area);
624 void __init smp_save_dump_secondary_cpus(void)
626 int addr, boot_cpu_addr, max_cpu_addr;
627 struct save_area *sa;
630 if (!dump_available())
632 /* Allocate a page as dumping area for the store status sigps */
633 page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
635 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
636 PAGE_SIZE, 1UL << 31);
638 /* Set multi-threading state to the previous system. */
639 pcpu_set_smt(sclp.mtid_prev);
640 boot_cpu_addr = stap();
641 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
642 for (addr = 0; addr <= max_cpu_addr; addr++) {
643 if (addr == boot_cpu_addr)
645 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
646 SIGP_CC_NOT_OPERATIONAL)
648 sa = save_area_alloc(false);
650 panic("could not allocate memory for save area\n");
651 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
652 save_area_add_regs(sa, page);
654 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
655 save_area_add_vxrs(sa, page);
658 memblock_free(page, PAGE_SIZE);
659 diag_amode31_ops.diag308_reset();
662 #endif /* CONFIG_CRASH_DUMP */
664 void smp_cpu_set_polarization(int cpu, int val)
666 per_cpu(pcpu_devices, cpu).polarization = val;
669 int smp_cpu_get_polarization(int cpu)
671 return per_cpu(pcpu_devices, cpu).polarization;
674 void smp_cpu_set_capacity(int cpu, unsigned long val)
676 per_cpu(pcpu_devices, cpu).capacity = val;
679 unsigned long smp_cpu_get_capacity(int cpu)
681 return per_cpu(pcpu_devices, cpu).capacity;
684 void smp_set_core_capacity(int cpu, unsigned long val)
688 cpu = smp_get_base_cpu(cpu);
689 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
690 smp_cpu_set_capacity(i, val);
693 int smp_cpu_get_cpu_address(int cpu)
695 return per_cpu(pcpu_devices, cpu).address;
698 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
700 static int use_sigp_detection;
703 if (use_sigp_detection || sclp_get_core_info(info, early)) {
704 use_sigp_detection = 1;
706 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
707 address += (1U << smp_cpu_mt_shift)) {
708 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
709 SIGP_CC_NOT_OPERATIONAL)
711 info->core[info->configured].core_id =
712 address >> smp_cpu_mt_shift;
715 info->combined = info->configured;
719 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
720 bool configured, bool early)
727 if (sclp.has_core_type && core->type != boot_core_type)
729 cpu = cpumask_first(avail);
730 address = core->core_id << smp_cpu_mt_shift;
731 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
732 if (pcpu_find_address(cpu_present_mask, address + i))
734 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
735 pcpu->address = address + i;
737 pcpu->state = CPU_STATE_CONFIGURED;
739 pcpu->state = CPU_STATE_STANDBY;
740 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
741 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
742 set_cpu_present(cpu, true);
743 if (!early && arch_register_cpu(cpu))
744 set_cpu_present(cpu, false);
747 cpumask_clear_cpu(cpu, avail);
748 cpu = cpumask_next(cpu, avail);
753 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
755 struct sclp_core_entry *core;
756 static cpumask_t avail;
762 mutex_lock(&smp_cpu_state_mutex);
764 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
766 * Add IPL core first (which got logical CPU number 0) to make sure
767 * that all SMT threads get subsequent logical CPU numbers.
770 core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift;
771 for (i = 0; i < info->configured; i++) {
772 core = &info->core[i];
773 if (core->core_id == core_id) {
774 nr += smp_add_core(core, &avail, true, early);
779 for (i = 0; i < info->combined; i++) {
780 configured = i < info->configured;
781 nr += smp_add_core(&info->core[i], &avail, configured, early);
783 mutex_unlock(&smp_cpu_state_mutex);
788 void __init smp_detect_cpus(void)
790 unsigned int cpu, mtid, c_cpus, s_cpus;
791 struct sclp_core_info *info;
794 /* Get CPU information */
795 info = memblock_alloc(sizeof(*info), 8);
797 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
798 __func__, sizeof(*info), 8);
799 smp_get_core_info(info, 1);
800 /* Find boot CPU type */
801 if (sclp.has_core_type) {
803 for (cpu = 0; cpu < info->combined; cpu++)
804 if (info->core[cpu].core_id == address) {
805 /* The boot cpu dictates the cpu type. */
806 boot_core_type = info->core[cpu].type;
809 if (cpu >= info->combined)
810 panic("Could not find boot CPU type");
813 /* Set multi-threading state for the current system */
814 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
815 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
818 /* Print number of CPUs */
820 for (cpu = 0; cpu < info->combined; cpu++) {
821 if (sclp.has_core_type &&
822 info->core[cpu].type != boot_core_type)
824 if (cpu < info->configured)
825 c_cpus += smp_cpu_mtid + 1;
827 s_cpus += smp_cpu_mtid + 1;
829 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
830 memblock_free(info, sizeof(*info));
834 * Activate a secondary processor.
836 static void smp_start_secondary(void *cpuvoid)
838 struct lowcore *lc = get_lowcore();
839 int cpu = raw_smp_processor_id();
841 lc->last_update_clock = get_tod_clock();
842 lc->restart_stack = (unsigned long)restart_stack;
843 lc->restart_fn = (unsigned long)do_restart;
844 lc->restart_data = 0;
845 lc->restart_source = -1U;
846 lc->restart_flags = 0;
847 restore_access_regs(lc->access_regs_save_area);
849 rcutree_report_cpu_starting(cpu);
854 cpumask_set_cpu(cpu, &cpu_setup_mask);
856 notify_cpu_starting(cpu);
857 if (topology_cpu_dedicated(cpu))
858 set_cpu_flag(CIF_DEDICATED_CPU);
860 clear_cpu_flag(CIF_DEDICATED_CPU);
861 set_cpu_online(cpu, true);
862 inc_irq_stat(CPU_RST);
864 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
867 /* Upping and downing of CPUs */
868 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
870 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
873 if (pcpu->state != CPU_STATE_CONFIGURED)
875 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
876 SIGP_CC_ORDER_CODE_ACCEPTED)
879 rc = pcpu_alloc_lowcore(pcpu, cpu);
883 * Make sure global control register contents do not change
884 * until new CPU has initialized control registers.
886 system_ctlreg_lock();
887 pcpu_prepare_secondary(pcpu, cpu);
888 pcpu_attach_task(cpu, tidle);
889 pcpu_start_fn(cpu, smp_start_secondary, NULL);
890 /* Wait until cpu puts itself in the online & active maps */
891 while (!cpu_online(cpu))
893 system_ctlreg_unlock();
897 static unsigned int setup_possible_cpus __initdata;
899 static int __init _setup_possible_cpus(char *s)
901 get_option(&s, &setup_possible_cpus);
904 early_param("possible_cpus", _setup_possible_cpus);
906 int __cpu_disable(void)
908 struct ctlreg cregs[16];
911 /* Handle possible pending IPIs */
912 smp_handle_ext_call();
913 cpu = smp_processor_id();
914 set_cpu_online(cpu, false);
915 cpumask_clear_cpu(cpu, &cpu_setup_mask);
917 /* Disable pseudo page faults on this cpu. */
919 /* Disable interrupt sources via control register. */
920 __local_ctl_store(0, 15, cregs);
921 cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
922 cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
923 cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
924 __local_ctl_load(0, 15, cregs);
925 clear_cpu_flag(CIF_NOHZ_DELAY);
929 void __cpu_die(unsigned int cpu)
933 /* Wait until target cpu is down */
934 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
935 while (!pcpu_stopped(pcpu))
937 pcpu_free_lowcore(pcpu, cpu);
938 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
939 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
943 void __noreturn cpu_die(void)
946 pcpu_sigp_retry(this_cpu_ptr(&pcpu_devices), SIGP_STOP, 0);
950 void __init smp_fill_possible_mask(void)
952 unsigned int possible, sclp_max, cpu;
954 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
955 sclp_max = min(smp_max_threads, sclp_max);
956 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
957 possible = setup_possible_cpus ?: nr_cpu_ids;
958 possible = min(possible, sclp_max);
959 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
960 set_cpu_possible(cpu, true);
963 void __init smp_prepare_cpus(unsigned int max_cpus)
965 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
966 panic("Couldn't request external interrupt 0x1201");
967 system_ctl_set_bit(0, 14);
968 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
969 panic("Couldn't request external interrupt 0x1202");
970 system_ctl_set_bit(0, 13);
971 smp_rescan_cpus(true);
974 void __init smp_prepare_boot_cpu(void)
976 struct lowcore *lc = get_lowcore();
978 WARN_ON(!cpu_present(0) || !cpu_online(0));
979 lc->percpu_offset = __per_cpu_offset[0];
980 ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0);
981 ipl_pcpu->state = CPU_STATE_CONFIGURED;
982 lc->pcpu = (unsigned long)ipl_pcpu;
983 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
984 smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH);
987 void __init smp_setup_processor_id(void)
989 struct lowcore *lc = get_lowcore();
992 per_cpu(pcpu_devices, 0).address = stap();
993 lc->spinlock_lockval = arch_spin_lockval(0);
994 lc->spinlock_index = 0;
998 * the frequency of the profiling timer can be changed
999 * by writing a multiplier value into /proc/profile.
1001 * usually you want to run this on all CPUs ;)
1003 int setup_profiling_timer(unsigned int multiplier)
1008 static ssize_t cpu_configure_show(struct device *dev,
1009 struct device_attribute *attr, char *buf)
1013 mutex_lock(&smp_cpu_state_mutex);
1014 count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
1015 mutex_unlock(&smp_cpu_state_mutex);
1019 static ssize_t cpu_configure_store(struct device *dev,
1020 struct device_attribute *attr,
1021 const char *buf, size_t count)
1024 int cpu, val, rc, i;
1027 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1029 if (val != 0 && val != 1)
1032 mutex_lock(&smp_cpu_state_mutex);
1034 /* disallow configuration changes of online cpus */
1036 cpu = smp_get_base_cpu(cpu);
1037 for (i = 0; i <= smp_cpu_mtid; i++)
1038 if (cpu_online(cpu + i))
1040 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
1044 if (pcpu->state != CPU_STATE_CONFIGURED)
1046 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1049 for (i = 0; i <= smp_cpu_mtid; i++) {
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1052 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
1053 smp_cpu_set_polarization(cpu + i,
1054 POLARIZATION_UNKNOWN);
1056 topology_expect_change();
1059 if (pcpu->state != CPU_STATE_STANDBY)
1061 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1064 for (i = 0; i <= smp_cpu_mtid; i++) {
1065 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1067 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
1068 smp_cpu_set_polarization(cpu + i,
1069 POLARIZATION_UNKNOWN);
1071 topology_expect_change();
1077 mutex_unlock(&smp_cpu_state_mutex);
1079 return rc ? rc : count;
1081 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1083 static ssize_t show_cpu_address(struct device *dev,
1084 struct device_attribute *attr, char *buf)
1086 return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
1088 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1090 static struct attribute *cpu_common_attrs[] = {
1091 &dev_attr_configure.attr,
1092 &dev_attr_address.attr,
1096 static struct attribute_group cpu_common_attr_group = {
1097 .attrs = cpu_common_attrs,
1100 static struct attribute *cpu_online_attrs[] = {
1101 &dev_attr_idle_count.attr,
1102 &dev_attr_idle_time_us.attr,
1106 static struct attribute_group cpu_online_attr_group = {
1107 .attrs = cpu_online_attrs,
1110 static int smp_cpu_online(unsigned int cpu)
1112 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1114 return sysfs_create_group(&c->dev.kobj, &cpu_online_attr_group);
1117 static int smp_cpu_pre_down(unsigned int cpu)
1119 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1121 sysfs_remove_group(&c->dev.kobj, &cpu_online_attr_group);
1125 bool arch_cpu_is_hotpluggable(int cpu)
1130 int arch_register_cpu(int cpu)
1132 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1135 c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
1136 rc = register_cpu(c, cpu);
1139 rc = sysfs_create_group(&c->dev.kobj, &cpu_common_attr_group);
1142 rc = topology_cpu_init(c);
1148 sysfs_remove_group(&c->dev.kobj, &cpu_common_attr_group);
1155 int __ref smp_rescan_cpus(bool early)
1157 struct sclp_core_info *info;
1160 info = kzalloc(sizeof(*info), GFP_KERNEL);
1163 smp_get_core_info(info, 0);
1164 nr = __smp_rescan_cpus(info, early);
1167 topology_schedule_update();
1171 static ssize_t __ref rescan_store(struct device *dev,
1172 struct device_attribute *attr,
1178 rc = lock_device_hotplug_sysfs();
1181 rc = smp_rescan_cpus(false);
1182 unlock_device_hotplug();
1183 return rc ? rc : count;
1185 static DEVICE_ATTR_WO(rescan);
1187 static int __init s390_smp_init(void)
1189 struct device *dev_root;
1192 dev_root = bus_get_dev_root(&cpu_subsys);
1194 rc = device_create_file(dev_root, &dev_attr_rescan);
1195 put_device(dev_root);
1199 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1200 smp_cpu_online, smp_cpu_pre_down);
1201 rc = rc <= 0 ? rc : 0;
1204 subsys_initcall(s390_smp_init);