]> Git Repo - linux.git/blob - arch/s390/kernel/smp.c
ACPI: bus: Avoid using CPPC if not supported by firmware
[linux.git] / arch / s390 / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  SMP related functions
4  *
5  *    Copyright IBM Corp. 1999, 2012
6  *    Author(s): Denis Joseph Barrow,
7  *               Martin Schwidefsky <[email protected]>,
8  *               Heiko Carstens <[email protected]>,
9  *
10  *  based on other smp stuff by
11  *    (c) 1995 Alan Cox, CymruNET Ltd  <[email protected]>
12  *    (c) 1998 Ingo Molnar
13  *
14  * The code outside of smp.c uses logical cpu numbers, only smp.c does
15  * the translation of logical to physical cpu ids. All new code that
16  * operates on physical cpu numbers needs to go into smp.c.
17  */
18
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22 #include <linux/workqueue.h>
23 #include <linux/memblock.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
26 #include <linux/mm.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/irq_work.h>
34 #include <linux/cpu.h>
35 #include <linux/slab.h>
36 #include <linux/sched/hotplug.h>
37 #include <linux/sched/task_stack.h>
38 #include <linux/crash_dump.h>
39 #include <linux/kprobes.h>
40 #include <asm/asm-offsets.h>
41 #include <asm/diag.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
44 #include <asm/ipl.h>
45 #include <asm/setup.h>
46 #include <asm/irq.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/lowcore.h>
50 #include <asm/sclp.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
53 #include <asm/sigp.h>
54 #include <asm/idle.h>
55 #include <asm/nmi.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
58 #include <asm/vdso.h>
59 #include "entry.h"
60
61 enum {
62         ec_schedule = 0,
63         ec_call_function_single,
64         ec_stop_cpu,
65         ec_mcck_pending,
66         ec_irq_work,
67 };
68
69 enum {
70         CPU_STATE_STANDBY,
71         CPU_STATE_CONFIGURED,
72 };
73
74 static DEFINE_PER_CPU(struct cpu *, cpu_device);
75
76 struct pcpu {
77         unsigned long ec_mask;          /* bit mask for ec_xxx functions */
78         unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
79         signed char state;              /* physical cpu state */
80         signed char polarization;       /* physical polarization */
81         u16 address;                    /* physical cpu address */
82 };
83
84 static u8 boot_core_type;
85 static struct pcpu pcpu_devices[NR_CPUS];
86
87 unsigned int smp_cpu_mt_shift;
88 EXPORT_SYMBOL(smp_cpu_mt_shift);
89
90 unsigned int smp_cpu_mtid;
91 EXPORT_SYMBOL(smp_cpu_mtid);
92
93 #ifdef CONFIG_CRASH_DUMP
94 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
95 #endif
96
97 static unsigned int smp_max_threads __initdata = -1U;
98 cpumask_t cpu_setup_mask;
99
100 static int __init early_nosmt(char *s)
101 {
102         smp_max_threads = 1;
103         return 0;
104 }
105 early_param("nosmt", early_nosmt);
106
107 static int __init early_smt(char *s)
108 {
109         get_option(&s, &smp_max_threads);
110         return 0;
111 }
112 early_param("smt", early_smt);
113
114 /*
115  * The smp_cpu_state_mutex must be held when changing the state or polarization
116  * member of a pcpu data structure within the pcpu_devices arreay.
117  */
118 DEFINE_MUTEX(smp_cpu_state_mutex);
119
120 /*
121  * Signal processor helper functions.
122  */
123 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
124 {
125         int cc;
126
127         while (1) {
128                 cc = __pcpu_sigp(addr, order, parm, NULL);
129                 if (cc != SIGP_CC_BUSY)
130                         return cc;
131                 cpu_relax();
132         }
133 }
134
135 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
136 {
137         int cc, retry;
138
139         for (retry = 0; ; retry++) {
140                 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
141                 if (cc != SIGP_CC_BUSY)
142                         break;
143                 if (retry >= 3)
144                         udelay(10);
145         }
146         return cc;
147 }
148
149 static inline int pcpu_stopped(struct pcpu *pcpu)
150 {
151         u32 status;
152
153         if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
154                         0, &status) != SIGP_CC_STATUS_STORED)
155                 return 0;
156         return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
157 }
158
159 static inline int pcpu_running(struct pcpu *pcpu)
160 {
161         if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
162                         0, NULL) != SIGP_CC_STATUS_STORED)
163                 return 1;
164         /* Status stored condition code is equivalent to cpu not running. */
165         return 0;
166 }
167
168 /*
169  * Find struct pcpu by cpu address.
170  */
171 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
172 {
173         int cpu;
174
175         for_each_cpu(cpu, mask)
176                 if (pcpu_devices[cpu].address == address)
177                         return pcpu_devices + cpu;
178         return NULL;
179 }
180
181 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
182 {
183         int order;
184
185         if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
186                 return;
187         order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
188         pcpu->ec_clk = get_tod_clock_fast();
189         pcpu_sigp_retry(pcpu, order, 0);
190 }
191
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
193 {
194         unsigned long async_stack, nodat_stack, mcck_stack;
195         struct lowcore *lc;
196
197         lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
198         nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
199         async_stack = stack_alloc();
200         mcck_stack = stack_alloc();
201         if (!lc || !nodat_stack || !async_stack || !mcck_stack)
202                 goto out;
203         memcpy(lc, &S390_lowcore, 512);
204         memset((char *) lc + 512, 0, sizeof(*lc) - 512);
205         lc->async_stack = async_stack + STACK_INIT_OFFSET;
206         lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
207         lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
208         lc->cpu_nr = cpu;
209         lc->spinlock_lockval = arch_spin_lockval(cpu);
210         lc->spinlock_index = 0;
211         lc->br_r1_trampoline = 0x07f1;  /* br %r1 */
212         lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
213         lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
214         lc->preempt_count = PREEMPT_DISABLED;
215         if (nmi_alloc_mcesa(&lc->mcesad))
216                 goto out;
217         lowcore_ptr[cpu] = lc;
218         pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
219         return 0;
220
221 out:
222         stack_free(mcck_stack);
223         stack_free(async_stack);
224         free_pages(nodat_stack, THREAD_SIZE_ORDER);
225         free_pages((unsigned long) lc, LC_ORDER);
226         return -ENOMEM;
227 }
228
229 static void pcpu_free_lowcore(struct pcpu *pcpu)
230 {
231         unsigned long async_stack, nodat_stack, mcck_stack;
232         struct lowcore *lc;
233         int cpu;
234
235         cpu = pcpu - pcpu_devices;
236         lc = lowcore_ptr[cpu];
237         nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
238         async_stack = lc->async_stack - STACK_INIT_OFFSET;
239         mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
240         pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
241         lowcore_ptr[cpu] = NULL;
242         nmi_free_mcesa(&lc->mcesad);
243         stack_free(async_stack);
244         stack_free(mcck_stack);
245         free_pages(nodat_stack, THREAD_SIZE_ORDER);
246         free_pages((unsigned long) lc, LC_ORDER);
247 }
248
249 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
250 {
251         struct lowcore *lc = lowcore_ptr[cpu];
252
253         cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
254         cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
255         lc->cpu_nr = cpu;
256         lc->restart_flags = RESTART_FLAG_CTLREGS;
257         lc->spinlock_lockval = arch_spin_lockval(cpu);
258         lc->spinlock_index = 0;
259         lc->percpu_offset = __per_cpu_offset[cpu];
260         lc->kernel_asce = S390_lowcore.kernel_asce;
261         lc->user_asce = s390_invalid_asce;
262         lc->machine_flags = S390_lowcore.machine_flags;
263         lc->user_timer = lc->system_timer =
264                 lc->steal_timer = lc->avg_steal_timer = 0;
265         __ctl_store(lc->cregs_save_area, 0, 15);
266         lc->cregs_save_area[1] = lc->kernel_asce;
267         lc->cregs_save_area[7] = lc->user_asce;
268         save_access_regs((unsigned int *) lc->access_regs_save_area);
269         arch_spin_lock_setup(cpu);
270 }
271
272 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
273 {
274         struct lowcore *lc;
275         int cpu;
276
277         cpu = pcpu - pcpu_devices;
278         lc = lowcore_ptr[cpu];
279         lc->kernel_stack = (unsigned long) task_stack_page(tsk)
280                 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
281         lc->current_task = (unsigned long) tsk;
282         lc->lpp = LPP_MAGIC;
283         lc->current_pid = tsk->pid;
284         lc->user_timer = tsk->thread.user_timer;
285         lc->guest_timer = tsk->thread.guest_timer;
286         lc->system_timer = tsk->thread.system_timer;
287         lc->hardirq_timer = tsk->thread.hardirq_timer;
288         lc->softirq_timer = tsk->thread.softirq_timer;
289         lc->steal_timer = 0;
290 }
291
292 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
293 {
294         struct lowcore *lc;
295         int cpu;
296
297         cpu = pcpu - pcpu_devices;
298         lc = lowcore_ptr[cpu];
299         lc->restart_stack = lc->kernel_stack;
300         lc->restart_fn = (unsigned long) func;
301         lc->restart_data = (unsigned long) data;
302         lc->restart_source = -1U;
303         pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
304 }
305
306 typedef void (pcpu_delegate_fn)(void *);
307
308 /*
309  * Call function via PSW restart on pcpu and stop the current cpu.
310  */
311 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
312 {
313         func(data);     /* should not return */
314 }
315
316 static void pcpu_delegate(struct pcpu *pcpu,
317                           pcpu_delegate_fn *func,
318                           void *data, unsigned long stack)
319 {
320         struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
321         unsigned int source_cpu = stap();
322
323         __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
324         if (pcpu->address == source_cpu) {
325                 call_on_stack(2, stack, void, __pcpu_delegate,
326                               pcpu_delegate_fn *, func, void *, data);
327         }
328         /* Stop target cpu (if func returns this stops the current cpu). */
329         pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
330         /* Restart func on the target cpu and stop the current cpu. */
331         mem_assign_absolute(lc->restart_stack, stack);
332         mem_assign_absolute(lc->restart_fn, (unsigned long) func);
333         mem_assign_absolute(lc->restart_data, (unsigned long) data);
334         mem_assign_absolute(lc->restart_source, source_cpu);
335         __bpon();
336         asm volatile(
337                 "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
338                 "       brc     2,0b    # busy, try again\n"
339                 "1:     sigp    0,%1,%3 # sigp stop to current cpu\n"
340                 "       brc     2,1b    # busy, try again\n"
341                 : : "d" (pcpu->address), "d" (source_cpu),
342                     "K" (SIGP_RESTART), "K" (SIGP_STOP)
343                 : "0", "1", "cc");
344         for (;;) ;
345 }
346
347 /*
348  * Enable additional logical cpus for multi-threading.
349  */
350 static int pcpu_set_smt(unsigned int mtid)
351 {
352         int cc;
353
354         if (smp_cpu_mtid == mtid)
355                 return 0;
356         cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
357         if (cc == 0) {
358                 smp_cpu_mtid = mtid;
359                 smp_cpu_mt_shift = 0;
360                 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
361                         smp_cpu_mt_shift++;
362                 pcpu_devices[0].address = stap();
363         }
364         return cc;
365 }
366
367 /*
368  * Call function on an online CPU.
369  */
370 void smp_call_online_cpu(void (*func)(void *), void *data)
371 {
372         struct pcpu *pcpu;
373
374         /* Use the current cpu if it is online. */
375         pcpu = pcpu_find_address(cpu_online_mask, stap());
376         if (!pcpu)
377                 /* Use the first online cpu. */
378                 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
379         pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
380 }
381
382 /*
383  * Call function on the ipl CPU.
384  */
385 void smp_call_ipl_cpu(void (*func)(void *), void *data)
386 {
387         struct lowcore *lc = lowcore_ptr[0];
388
389         if (pcpu_devices[0].address == stap())
390                 lc = &S390_lowcore;
391
392         pcpu_delegate(&pcpu_devices[0], func, data,
393                       lc->nodat_stack);
394 }
395
396 int smp_find_processor_id(u16 address)
397 {
398         int cpu;
399
400         for_each_present_cpu(cpu)
401                 if (pcpu_devices[cpu].address == address)
402                         return cpu;
403         return -1;
404 }
405
406 void schedule_mcck_handler(void)
407 {
408         pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
409 }
410
411 bool notrace arch_vcpu_is_preempted(int cpu)
412 {
413         if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
414                 return false;
415         if (pcpu_running(pcpu_devices + cpu))
416                 return false;
417         return true;
418 }
419 EXPORT_SYMBOL(arch_vcpu_is_preempted);
420
421 void notrace smp_yield_cpu(int cpu)
422 {
423         if (!MACHINE_HAS_DIAG9C)
424                 return;
425         diag_stat_inc_norecursion(DIAG_STAT_X09C);
426         asm volatile("diag %0,0,0x9c"
427                      : : "d" (pcpu_devices[cpu].address));
428 }
429 EXPORT_SYMBOL_GPL(smp_yield_cpu);
430
431 /*
432  * Send cpus emergency shutdown signal. This gives the cpus the
433  * opportunity to complete outstanding interrupts.
434  */
435 void notrace smp_emergency_stop(void)
436 {
437         static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
438         static cpumask_t cpumask;
439         u64 end;
440         int cpu;
441
442         arch_spin_lock(&lock);
443         cpumask_copy(&cpumask, cpu_online_mask);
444         cpumask_clear_cpu(smp_processor_id(), &cpumask);
445
446         end = get_tod_clock() + (1000000UL << 12);
447         for_each_cpu(cpu, &cpumask) {
448                 struct pcpu *pcpu = pcpu_devices + cpu;
449                 set_bit(ec_stop_cpu, &pcpu->ec_mask);
450                 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
451                                    0, NULL) == SIGP_CC_BUSY &&
452                        get_tod_clock() < end)
453                         cpu_relax();
454         }
455         while (get_tod_clock() < end) {
456                 for_each_cpu(cpu, &cpumask)
457                         if (pcpu_stopped(pcpu_devices + cpu))
458                                 cpumask_clear_cpu(cpu, &cpumask);
459                 if (cpumask_empty(&cpumask))
460                         break;
461                 cpu_relax();
462         }
463         arch_spin_unlock(&lock);
464 }
465 NOKPROBE_SYMBOL(smp_emergency_stop);
466
467 /*
468  * Stop all cpus but the current one.
469  */
470 void smp_send_stop(void)
471 {
472         int cpu;
473
474         /* Disable all interrupts/machine checks */
475         __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
476         trace_hardirqs_off();
477
478         debug_set_critical();
479
480         if (oops_in_progress)
481                 smp_emergency_stop();
482
483         /* stop all processors */
484         for_each_online_cpu(cpu) {
485                 if (cpu == smp_processor_id())
486                         continue;
487                 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
488                 while (!pcpu_stopped(pcpu_devices + cpu))
489                         cpu_relax();
490         }
491 }
492
493 /*
494  * This is the main routine where commands issued by other
495  * cpus are handled.
496  */
497 static void smp_handle_ext_call(void)
498 {
499         unsigned long bits;
500
501         /* handle bit signal external calls */
502         bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
503         if (test_bit(ec_stop_cpu, &bits))
504                 smp_stop_cpu();
505         if (test_bit(ec_schedule, &bits))
506                 scheduler_ipi();
507         if (test_bit(ec_call_function_single, &bits))
508                 generic_smp_call_function_single_interrupt();
509         if (test_bit(ec_mcck_pending, &bits))
510                 __s390_handle_mcck();
511         if (test_bit(ec_irq_work, &bits))
512                 irq_work_run();
513 }
514
515 static void do_ext_call_interrupt(struct ext_code ext_code,
516                                   unsigned int param32, unsigned long param64)
517 {
518         inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
519         smp_handle_ext_call();
520 }
521
522 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
523 {
524         int cpu;
525
526         for_each_cpu(cpu, mask)
527                 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
528 }
529
530 void arch_send_call_function_single_ipi(int cpu)
531 {
532         pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
533 }
534
535 /*
536  * this function sends a 'reschedule' IPI to another CPU.
537  * it goes straight through and wastes no time serializing
538  * anything. Worst case is that we lose a reschedule ...
539  */
540 void smp_send_reschedule(int cpu)
541 {
542         pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
543 }
544
545 #ifdef CONFIG_IRQ_WORK
546 void arch_irq_work_raise(void)
547 {
548         pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
549 }
550 #endif
551
552 /*
553  * parameter area for the set/clear control bit callbacks
554  */
555 struct ec_creg_mask_parms {
556         unsigned long orval;
557         unsigned long andval;
558         int cr;
559 };
560
561 /*
562  * callback for setting/clearing control bits
563  */
564 static void smp_ctl_bit_callback(void *info)
565 {
566         struct ec_creg_mask_parms *pp = info;
567         unsigned long cregs[16];
568
569         __ctl_store(cregs, 0, 15);
570         cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
571         __ctl_load(cregs, 0, 15);
572 }
573
574 static DEFINE_SPINLOCK(ctl_lock);
575 static unsigned long ctlreg;
576
577 /*
578  * Set a bit in a control register of all cpus
579  */
580 void smp_ctl_set_bit(int cr, int bit)
581 {
582         struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
583
584         spin_lock(&ctl_lock);
585         memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
586         __set_bit(bit, &ctlreg);
587         memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
588         spin_unlock(&ctl_lock);
589         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
590 }
591 EXPORT_SYMBOL(smp_ctl_set_bit);
592
593 /*
594  * Clear a bit in a control register of all cpus
595  */
596 void smp_ctl_clear_bit(int cr, int bit)
597 {
598         struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
599
600         spin_lock(&ctl_lock);
601         memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
602         __clear_bit(bit, &ctlreg);
603         memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
604         spin_unlock(&ctl_lock);
605         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
606 }
607 EXPORT_SYMBOL(smp_ctl_clear_bit);
608
609 #ifdef CONFIG_CRASH_DUMP
610
611 int smp_store_status(int cpu)
612 {
613         struct lowcore *lc;
614         struct pcpu *pcpu;
615         unsigned long pa;
616
617         pcpu = pcpu_devices + cpu;
618         lc = lowcore_ptr[cpu];
619         pa = __pa(&lc->floating_pt_save_area);
620         if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
621                               pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
622                 return -EIO;
623         if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
624                 return 0;
625         pa = lc->mcesad & MCESA_ORIGIN_MASK;
626         if (MACHINE_HAS_GS)
627                 pa |= lc->mcesad & MCESA_LC_MASK;
628         if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
629                               pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
630                 return -EIO;
631         return 0;
632 }
633
634 /*
635  * Collect CPU state of the previous, crashed system.
636  * There are four cases:
637  * 1) standard zfcp/nvme dump
638  *    condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
639  *    The state for all CPUs except the boot CPU needs to be collected
640  *    with sigp stop-and-store-status. The boot CPU state is located in
641  *    the absolute lowcore of the memory stored in the HSA. The zcore code
642  *    will copy the boot CPU state from the HSA.
643  * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
644  *    condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
645  *    The state for all CPUs except the boot CPU needs to be collected
646  *    with sigp stop-and-store-status. The firmware or the boot-loader
647  *    stored the registers of the boot CPU in the absolute lowcore in the
648  *    memory of the old system.
649  * 3) kdump and the old kernel did not store the CPU state,
650  *    or stand-alone kdump for DASD
651  *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
652  *    The state for all CPUs except the boot CPU needs to be collected
653  *    with sigp stop-and-store-status. The kexec code or the boot-loader
654  *    stored the registers of the boot CPU in the memory of the old system.
655  * 4) kdump and the old kernel stored the CPU state
656  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
657  *    This case does not exist for s390 anymore, setup_arch explicitly
658  *    deactivates the elfcorehdr= kernel parameter
659  */
660 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
661                                      bool is_boot_cpu, __vector128 *vxrs)
662 {
663         if (is_boot_cpu)
664                 vxrs = boot_cpu_vector_save_area;
665         else
666                 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(vxrs));
667         save_area_add_vxrs(sa, vxrs);
668 }
669
670 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
671                                      bool is_boot_cpu, void *regs)
672 {
673         if (is_boot_cpu)
674                 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
675         else
676                 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs));
677         save_area_add_regs(sa, regs);
678 }
679
680 void __init smp_save_dump_cpus(void)
681 {
682         int addr, boot_cpu_addr, max_cpu_addr;
683         struct save_area *sa;
684         bool is_boot_cpu;
685         void *page;
686
687         if (!(oldmem_data.start || is_ipl_type_dump()))
688                 /* No previous system present, normal boot. */
689                 return;
690         /* Allocate a page as dumping area for the store status sigps */
691         page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
692         if (!page)
693                 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
694                       PAGE_SIZE, 1UL << 31);
695
696         /* Set multi-threading state to the previous system. */
697         pcpu_set_smt(sclp.mtid_prev);
698         boot_cpu_addr = stap();
699         max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
700         for (addr = 0; addr <= max_cpu_addr; addr++) {
701                 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
702                     SIGP_CC_NOT_OPERATIONAL)
703                         continue;
704                 is_boot_cpu = (addr == boot_cpu_addr);
705                 /* Allocate save area */
706                 sa = save_area_alloc(is_boot_cpu);
707                 if (!sa)
708                         panic("could not allocate memory for save area\n");
709                 if (MACHINE_HAS_VX)
710                         /* Get the vector registers */
711                         smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
712                 /*
713                  * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
714                  * of the boot CPU are stored in the HSA. To retrieve
715                  * these registers an SCLP request is required which is
716                  * done by drivers/s390/char/zcore.c:init_cpu_info()
717                  */
718                 if (!is_boot_cpu || oldmem_data.start)
719                         /* Get the CPU registers */
720                         smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
721         }
722         memblock_free(page, PAGE_SIZE);
723         diag_amode31_ops.diag308_reset();
724         pcpu_set_smt(0);
725 }
726 #endif /* CONFIG_CRASH_DUMP */
727
728 void smp_cpu_set_polarization(int cpu, int val)
729 {
730         pcpu_devices[cpu].polarization = val;
731 }
732
733 int smp_cpu_get_polarization(int cpu)
734 {
735         return pcpu_devices[cpu].polarization;
736 }
737
738 int smp_cpu_get_cpu_address(int cpu)
739 {
740         return pcpu_devices[cpu].address;
741 }
742
743 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
744 {
745         static int use_sigp_detection;
746         int address;
747
748         if (use_sigp_detection || sclp_get_core_info(info, early)) {
749                 use_sigp_detection = 1;
750                 for (address = 0;
751                      address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
752                      address += (1U << smp_cpu_mt_shift)) {
753                         if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
754                             SIGP_CC_NOT_OPERATIONAL)
755                                 continue;
756                         info->core[info->configured].core_id =
757                                 address >> smp_cpu_mt_shift;
758                         info->configured++;
759                 }
760                 info->combined = info->configured;
761         }
762 }
763
764 static int smp_add_present_cpu(int cpu);
765
766 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
767                         bool configured, bool early)
768 {
769         struct pcpu *pcpu;
770         int cpu, nr, i;
771         u16 address;
772
773         nr = 0;
774         if (sclp.has_core_type && core->type != boot_core_type)
775                 return nr;
776         cpu = cpumask_first(avail);
777         address = core->core_id << smp_cpu_mt_shift;
778         for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
779                 if (pcpu_find_address(cpu_present_mask, address + i))
780                         continue;
781                 pcpu = pcpu_devices + cpu;
782                 pcpu->address = address + i;
783                 if (configured)
784                         pcpu->state = CPU_STATE_CONFIGURED;
785                 else
786                         pcpu->state = CPU_STATE_STANDBY;
787                 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
788                 set_cpu_present(cpu, true);
789                 if (!early && smp_add_present_cpu(cpu) != 0)
790                         set_cpu_present(cpu, false);
791                 else
792                         nr++;
793                 cpumask_clear_cpu(cpu, avail);
794                 cpu = cpumask_next(cpu, avail);
795         }
796         return nr;
797 }
798
799 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
800 {
801         struct sclp_core_entry *core;
802         static cpumask_t avail;
803         bool configured;
804         u16 core_id;
805         int nr, i;
806
807         cpus_read_lock();
808         mutex_lock(&smp_cpu_state_mutex);
809         nr = 0;
810         cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
811         /*
812          * Add IPL core first (which got logical CPU number 0) to make sure
813          * that all SMT threads get subsequent logical CPU numbers.
814          */
815         if (early) {
816                 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
817                 for (i = 0; i < info->configured; i++) {
818                         core = &info->core[i];
819                         if (core->core_id == core_id) {
820                                 nr += smp_add_core(core, &avail, true, early);
821                                 break;
822                         }
823                 }
824         }
825         for (i = 0; i < info->combined; i++) {
826                 configured = i < info->configured;
827                 nr += smp_add_core(&info->core[i], &avail, configured, early);
828         }
829         mutex_unlock(&smp_cpu_state_mutex);
830         cpus_read_unlock();
831         return nr;
832 }
833
834 void __init smp_detect_cpus(void)
835 {
836         unsigned int cpu, mtid, c_cpus, s_cpus;
837         struct sclp_core_info *info;
838         u16 address;
839
840         /* Get CPU information */
841         info = memblock_alloc(sizeof(*info), 8);
842         if (!info)
843                 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
844                       __func__, sizeof(*info), 8);
845         smp_get_core_info(info, 1);
846         /* Find boot CPU type */
847         if (sclp.has_core_type) {
848                 address = stap();
849                 for (cpu = 0; cpu < info->combined; cpu++)
850                         if (info->core[cpu].core_id == address) {
851                                 /* The boot cpu dictates the cpu type. */
852                                 boot_core_type = info->core[cpu].type;
853                                 break;
854                         }
855                 if (cpu >= info->combined)
856                         panic("Could not find boot CPU type");
857         }
858
859         /* Set multi-threading state for the current system */
860         mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
861         mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
862         pcpu_set_smt(mtid);
863
864         /* Print number of CPUs */
865         c_cpus = s_cpus = 0;
866         for (cpu = 0; cpu < info->combined; cpu++) {
867                 if (sclp.has_core_type &&
868                     info->core[cpu].type != boot_core_type)
869                         continue;
870                 if (cpu < info->configured)
871                         c_cpus += smp_cpu_mtid + 1;
872                 else
873                         s_cpus += smp_cpu_mtid + 1;
874         }
875         pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
876
877         /* Add CPUs present at boot */
878         __smp_rescan_cpus(info, true);
879         memblock_free(info, sizeof(*info));
880 }
881
882 /*
883  *      Activate a secondary processor.
884  */
885 static void smp_start_secondary(void *cpuvoid)
886 {
887         int cpu = raw_smp_processor_id();
888
889         S390_lowcore.last_update_clock = get_tod_clock();
890         S390_lowcore.restart_stack = (unsigned long)restart_stack;
891         S390_lowcore.restart_fn = (unsigned long)do_restart;
892         S390_lowcore.restart_data = 0;
893         S390_lowcore.restart_source = -1U;
894         S390_lowcore.restart_flags = 0;
895         restore_access_regs(S390_lowcore.access_regs_save_area);
896         cpu_init();
897         rcu_cpu_starting(cpu);
898         init_cpu_timer();
899         vtime_init();
900         vdso_getcpu_init();
901         pfault_init();
902         cpumask_set_cpu(cpu, &cpu_setup_mask);
903         update_cpu_masks();
904         notify_cpu_starting(cpu);
905         if (topology_cpu_dedicated(cpu))
906                 set_cpu_flag(CIF_DEDICATED_CPU);
907         else
908                 clear_cpu_flag(CIF_DEDICATED_CPU);
909         set_cpu_online(cpu, true);
910         inc_irq_stat(CPU_RST);
911         local_irq_enable();
912         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
913 }
914
915 /* Upping and downing of CPUs */
916 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
917 {
918         struct pcpu *pcpu = pcpu_devices + cpu;
919         int rc;
920
921         if (pcpu->state != CPU_STATE_CONFIGURED)
922                 return -EIO;
923         if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
924             SIGP_CC_ORDER_CODE_ACCEPTED)
925                 return -EIO;
926
927         rc = pcpu_alloc_lowcore(pcpu, cpu);
928         if (rc)
929                 return rc;
930         pcpu_prepare_secondary(pcpu, cpu);
931         pcpu_attach_task(pcpu, tidle);
932         pcpu_start_fn(pcpu, smp_start_secondary, NULL);
933         /* Wait until cpu puts itself in the online & active maps */
934         while (!cpu_online(cpu))
935                 cpu_relax();
936         return 0;
937 }
938
939 static unsigned int setup_possible_cpus __initdata;
940
941 static int __init _setup_possible_cpus(char *s)
942 {
943         get_option(&s, &setup_possible_cpus);
944         return 0;
945 }
946 early_param("possible_cpus", _setup_possible_cpus);
947
948 int __cpu_disable(void)
949 {
950         unsigned long cregs[16];
951         int cpu;
952
953         /* Handle possible pending IPIs */
954         smp_handle_ext_call();
955         cpu = smp_processor_id();
956         set_cpu_online(cpu, false);
957         cpumask_clear_cpu(cpu, &cpu_setup_mask);
958         update_cpu_masks();
959         /* Disable pseudo page faults on this cpu. */
960         pfault_fini();
961         /* Disable interrupt sources via control register. */
962         __ctl_store(cregs, 0, 15);
963         cregs[0]  &= ~0x0000ee70UL;     /* disable all external interrupts */
964         cregs[6]  &= ~0xff000000UL;     /* disable all I/O interrupts */
965         cregs[14] &= ~0x1f000000UL;     /* disable most machine checks */
966         __ctl_load(cregs, 0, 15);
967         clear_cpu_flag(CIF_NOHZ_DELAY);
968         return 0;
969 }
970
971 void __cpu_die(unsigned int cpu)
972 {
973         struct pcpu *pcpu;
974
975         /* Wait until target cpu is down */
976         pcpu = pcpu_devices + cpu;
977         while (!pcpu_stopped(pcpu))
978                 cpu_relax();
979         pcpu_free_lowcore(pcpu);
980         cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
981         cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
982 }
983
984 void __noreturn cpu_die(void)
985 {
986         idle_task_exit();
987         __bpon();
988         pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
989         for (;;) ;
990 }
991
992 void __init smp_fill_possible_mask(void)
993 {
994         unsigned int possible, sclp_max, cpu;
995
996         sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
997         sclp_max = min(smp_max_threads, sclp_max);
998         sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
999         possible = setup_possible_cpus ?: nr_cpu_ids;
1000         possible = min(possible, sclp_max);
1001         for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
1002                 set_cpu_possible(cpu, true);
1003 }
1004
1005 void __init smp_prepare_cpus(unsigned int max_cpus)
1006 {
1007         /* request the 0x1201 emergency signal external interrupt */
1008         if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
1009                 panic("Couldn't request external interrupt 0x1201");
1010         /* request the 0x1202 external call external interrupt */
1011         if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
1012                 panic("Couldn't request external interrupt 0x1202");
1013 }
1014
1015 void __init smp_prepare_boot_cpu(void)
1016 {
1017         struct pcpu *pcpu = pcpu_devices;
1018
1019         WARN_ON(!cpu_present(0) || !cpu_online(0));
1020         pcpu->state = CPU_STATE_CONFIGURED;
1021         S390_lowcore.percpu_offset = __per_cpu_offset[0];
1022         smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
1023 }
1024
1025 void __init smp_setup_processor_id(void)
1026 {
1027         pcpu_devices[0].address = stap();
1028         S390_lowcore.cpu_nr = 0;
1029         S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
1030         S390_lowcore.spinlock_index = 0;
1031 }
1032
1033 /*
1034  * the frequency of the profiling timer can be changed
1035  * by writing a multiplier value into /proc/profile.
1036  *
1037  * usually you want to run this on all CPUs ;)
1038  */
1039 int setup_profiling_timer(unsigned int multiplier)
1040 {
1041         return 0;
1042 }
1043
1044 static ssize_t cpu_configure_show(struct device *dev,
1045                                   struct device_attribute *attr, char *buf)
1046 {
1047         ssize_t count;
1048
1049         mutex_lock(&smp_cpu_state_mutex);
1050         count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1051         mutex_unlock(&smp_cpu_state_mutex);
1052         return count;
1053 }
1054
1055 static ssize_t cpu_configure_store(struct device *dev,
1056                                    struct device_attribute *attr,
1057                                    const char *buf, size_t count)
1058 {
1059         struct pcpu *pcpu;
1060         int cpu, val, rc, i;
1061         char delim;
1062
1063         if (sscanf(buf, "%d %c", &val, &delim) != 1)
1064                 return -EINVAL;
1065         if (val != 0 && val != 1)
1066                 return -EINVAL;
1067         cpus_read_lock();
1068         mutex_lock(&smp_cpu_state_mutex);
1069         rc = -EBUSY;
1070         /* disallow configuration changes of online cpus and cpu 0 */
1071         cpu = dev->id;
1072         cpu = smp_get_base_cpu(cpu);
1073         if (cpu == 0)
1074                 goto out;
1075         for (i = 0; i <= smp_cpu_mtid; i++)
1076                 if (cpu_online(cpu + i))
1077                         goto out;
1078         pcpu = pcpu_devices + cpu;
1079         rc = 0;
1080         switch (val) {
1081         case 0:
1082                 if (pcpu->state != CPU_STATE_CONFIGURED)
1083                         break;
1084                 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1085                 if (rc)
1086                         break;
1087                 for (i = 0; i <= smp_cpu_mtid; i++) {
1088                         if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1089                                 continue;
1090                         pcpu[i].state = CPU_STATE_STANDBY;
1091                         smp_cpu_set_polarization(cpu + i,
1092                                                  POLARIZATION_UNKNOWN);
1093                 }
1094                 topology_expect_change();
1095                 break;
1096         case 1:
1097                 if (pcpu->state != CPU_STATE_STANDBY)
1098                         break;
1099                 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1100                 if (rc)
1101                         break;
1102                 for (i = 0; i <= smp_cpu_mtid; i++) {
1103                         if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1104                                 continue;
1105                         pcpu[i].state = CPU_STATE_CONFIGURED;
1106                         smp_cpu_set_polarization(cpu + i,
1107                                                  POLARIZATION_UNKNOWN);
1108                 }
1109                 topology_expect_change();
1110                 break;
1111         default:
1112                 break;
1113         }
1114 out:
1115         mutex_unlock(&smp_cpu_state_mutex);
1116         cpus_read_unlock();
1117         return rc ? rc : count;
1118 }
1119 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1120
1121 static ssize_t show_cpu_address(struct device *dev,
1122                                 struct device_attribute *attr, char *buf)
1123 {
1124         return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1125 }
1126 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1127
1128 static struct attribute *cpu_common_attrs[] = {
1129         &dev_attr_configure.attr,
1130         &dev_attr_address.attr,
1131         NULL,
1132 };
1133
1134 static struct attribute_group cpu_common_attr_group = {
1135         .attrs = cpu_common_attrs,
1136 };
1137
1138 static struct attribute *cpu_online_attrs[] = {
1139         &dev_attr_idle_count.attr,
1140         &dev_attr_idle_time_us.attr,
1141         NULL,
1142 };
1143
1144 static struct attribute_group cpu_online_attr_group = {
1145         .attrs = cpu_online_attrs,
1146 };
1147
1148 static int smp_cpu_online(unsigned int cpu)
1149 {
1150         struct device *s = &per_cpu(cpu_device, cpu)->dev;
1151
1152         return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1153 }
1154
1155 static int smp_cpu_pre_down(unsigned int cpu)
1156 {
1157         struct device *s = &per_cpu(cpu_device, cpu)->dev;
1158
1159         sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1160         return 0;
1161 }
1162
1163 static int smp_add_present_cpu(int cpu)
1164 {
1165         struct device *s;
1166         struct cpu *c;
1167         int rc;
1168
1169         c = kzalloc(sizeof(*c), GFP_KERNEL);
1170         if (!c)
1171                 return -ENOMEM;
1172         per_cpu(cpu_device, cpu) = c;
1173         s = &c->dev;
1174         c->hotpluggable = 1;
1175         rc = register_cpu(c, cpu);
1176         if (rc)
1177                 goto out;
1178         rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1179         if (rc)
1180                 goto out_cpu;
1181         rc = topology_cpu_init(c);
1182         if (rc)
1183                 goto out_topology;
1184         return 0;
1185
1186 out_topology:
1187         sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1188 out_cpu:
1189         unregister_cpu(c);
1190 out:
1191         return rc;
1192 }
1193
1194 int __ref smp_rescan_cpus(void)
1195 {
1196         struct sclp_core_info *info;
1197         int nr;
1198
1199         info = kzalloc(sizeof(*info), GFP_KERNEL);
1200         if (!info)
1201                 return -ENOMEM;
1202         smp_get_core_info(info, 0);
1203         nr = __smp_rescan_cpus(info, false);
1204         kfree(info);
1205         if (nr)
1206                 topology_schedule_update();
1207         return 0;
1208 }
1209
1210 static ssize_t __ref rescan_store(struct device *dev,
1211                                   struct device_attribute *attr,
1212                                   const char *buf,
1213                                   size_t count)
1214 {
1215         int rc;
1216
1217         rc = lock_device_hotplug_sysfs();
1218         if (rc)
1219                 return rc;
1220         rc = smp_rescan_cpus();
1221         unlock_device_hotplug();
1222         return rc ? rc : count;
1223 }
1224 static DEVICE_ATTR_WO(rescan);
1225
1226 static int __init s390_smp_init(void)
1227 {
1228         int cpu, rc = 0;
1229
1230         rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1231         if (rc)
1232                 return rc;
1233         for_each_present_cpu(cpu) {
1234                 rc = smp_add_present_cpu(cpu);
1235                 if (rc)
1236                         goto out;
1237         }
1238
1239         rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1240                                smp_cpu_online, smp_cpu_pre_down);
1241         rc = rc <= 0 ? rc : 0;
1242 out:
1243         return rc;
1244 }
1245 subsys_initcall(s390_smp_init);
1246
1247 static __always_inline void set_new_lowcore(struct lowcore *lc)
1248 {
1249         union register_pair dst, src;
1250         u32 pfx;
1251
1252         src.even = (unsigned long) &S390_lowcore;
1253         src.odd  = sizeof(S390_lowcore);
1254         dst.even = (unsigned long) lc;
1255         dst.odd  = sizeof(*lc);
1256         pfx = (unsigned long) lc;
1257
1258         asm volatile(
1259                 "       mvcl    %[dst],%[src]\n"
1260                 "       spx     %[pfx]\n"
1261                 : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
1262                 : [pfx] "Q" (pfx)
1263                 : "memory", "cc");
1264 }
1265
1266 static int __init smp_reinit_ipl_cpu(void)
1267 {
1268         unsigned long async_stack, nodat_stack, mcck_stack;
1269         struct lowcore *lc, *lc_ipl;
1270         unsigned long flags, cr0;
1271         u64 mcesad;
1272
1273         lc_ipl = lowcore_ptr[0];
1274         lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
1275         nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
1276         async_stack = stack_alloc();
1277         mcck_stack = stack_alloc();
1278         if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
1279                 panic("Couldn't allocate memory");
1280
1281         local_irq_save(flags);
1282         local_mcck_disable();
1283         set_new_lowcore(lc);
1284         S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
1285         S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
1286         S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1287         __ctl_store(cr0, 0, 0);
1288         __ctl_clear_bit(0, 28); /* disable lowcore protection */
1289         S390_lowcore.mcesad = mcesad;
1290         __ctl_load(cr0, 0, 0);
1291         lowcore_ptr[0] = lc;
1292         local_mcck_enable();
1293         local_irq_restore(flags);
1294
1295         free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
1296         memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
1297         memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
1298
1299         return 0;
1300 }
1301 early_initcall(smp_reinit_ipl_cpu);
This page took 0.102898 seconds and 4 git commands to generate.