]> Git Repo - linux.git/blob - arch/powerpc/kernel/process.c
sched: Update task_tick_numa to ignore tasks without an mm
[linux.git] / arch / powerpc / kernel / process.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Derived from "arch/i386/kernel/process.c"
4  *    Copyright (C) 1995  Linus Torvalds
5  *
6  *  Updated and modified by Cort Dougan ([email protected]) and
7  *  Paul Mackerras ([email protected])
8  *
9  *  PowerPC version
10  *    Copyright (C) 1995-1996 Gary Thomas ([email protected])
11  */
12
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/elf.h>
27 #include <linux/prctl.h>
28 #include <linux/init_task.h>
29 #include <linux/export.h>
30 #include <linux/kallsyms.h>
31 #include <linux/mqueue.h>
32 #include <linux/hardirq.h>
33 #include <linux/utsname.h>
34 #include <linux/ftrace.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf-randomize.h>
41 #include <linux/pkeys.h>
42 #include <linux/seq_buf.h>
43
44 #include <asm/interrupt.h>
45 #include <asm/io.h>
46 #include <asm/processor.h>
47 #include <asm/mmu.h>
48 #include <asm/prom.h>
49 #include <asm/machdep.h>
50 #include <asm/time.h>
51 #include <asm/runlatch.h>
52 #include <asm/syscalls.h>
53 #include <asm/switch_to.h>
54 #include <asm/tm.h>
55 #include <asm/debug.h>
56 #ifdef CONFIG_PPC64
57 #include <asm/firmware.h>
58 #include <asm/hw_irq.h>
59 #endif
60 #include <asm/code-patching.h>
61 #include <asm/exec.h>
62 #include <asm/livepatch.h>
63 #include <asm/cpu_has_feature.h>
64 #include <asm/asm-prototypes.h>
65 #include <asm/stacktrace.h>
66 #include <asm/hw_breakpoint.h>
67
68 #include <linux/kprobes.h>
69 #include <linux/kdebug.h>
70
71 /* Transactional Memory debug */
72 #ifdef TM_DEBUG_SW
73 #define TM_DEBUG(x...) printk(KERN_INFO x)
74 #else
75 #define TM_DEBUG(x...) do { } while(0)
76 #endif
77
78 extern unsigned long _get_SP(void);
79
80 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81 /*
82  * Are we running in "Suspend disabled" mode? If so we have to block any
83  * sigreturn that would get us into suspended state, and we also warn in some
84  * other paths that we should never reach with suspend disabled.
85  */
86 bool tm_suspend_disabled __ro_after_init = false;
87
88 static void check_if_tm_restore_required(struct task_struct *tsk)
89 {
90         /*
91          * If we are saving the current thread's registers, and the
92          * thread is in a transactional state, set the TIF_RESTORE_TM
93          * bit so that we know to restore the registers before
94          * returning to userspace.
95          */
96         if (tsk == current && tsk->thread.regs &&
97             MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98             !test_thread_flag(TIF_RESTORE_TM)) {
99                 regs_set_return_msr(&tsk->thread.ckpt_regs,
100                                                 tsk->thread.regs->msr);
101                 set_thread_flag(TIF_RESTORE_TM);
102         }
103 }
104
105 #else
106 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
107 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
108
109 bool strict_msr_control;
110 EXPORT_SYMBOL(strict_msr_control);
111
112 static int __init enable_strict_msr_control(char *str)
113 {
114         strict_msr_control = true;
115         pr_info("Enabling strict facility control\n");
116
117         return 0;
118 }
119 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
120
121 /* notrace because it's called by restore_math */
122 unsigned long notrace msr_check_and_set(unsigned long bits)
123 {
124         unsigned long oldmsr = mfmsr();
125         unsigned long newmsr;
126
127         newmsr = oldmsr | bits;
128
129         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130                 newmsr |= MSR_VSX;
131
132         if (oldmsr != newmsr)
133                 mtmsr_isync(newmsr);
134
135         return newmsr;
136 }
137 EXPORT_SYMBOL_GPL(msr_check_and_set);
138
139 /* notrace because it's called by restore_math */
140 void notrace __msr_check_and_clear(unsigned long bits)
141 {
142         unsigned long oldmsr = mfmsr();
143         unsigned long newmsr;
144
145         newmsr = oldmsr & ~bits;
146
147         if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
148                 newmsr &= ~MSR_VSX;
149
150         if (oldmsr != newmsr)
151                 mtmsr_isync(newmsr);
152 }
153 EXPORT_SYMBOL(__msr_check_and_clear);
154
155 #ifdef CONFIG_PPC_FPU
156 static void __giveup_fpu(struct task_struct *tsk)
157 {
158         unsigned long msr;
159
160         save_fpu(tsk);
161         msr = tsk->thread.regs->msr;
162         msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
163         if (cpu_has_feature(CPU_FTR_VSX))
164                 msr &= ~MSR_VSX;
165         regs_set_return_msr(tsk->thread.regs, msr);
166 }
167
168 void giveup_fpu(struct task_struct *tsk)
169 {
170         check_if_tm_restore_required(tsk);
171
172         msr_check_and_set(MSR_FP);
173         __giveup_fpu(tsk);
174         msr_check_and_clear(MSR_FP);
175 }
176 EXPORT_SYMBOL(giveup_fpu);
177
178 /*
179  * Make sure the floating-point register state in the
180  * the thread_struct is up to date for task tsk.
181  */
182 void flush_fp_to_thread(struct task_struct *tsk)
183 {
184         if (tsk->thread.regs) {
185                 /*
186                  * We need to disable preemption here because if we didn't,
187                  * another process could get scheduled after the regs->msr
188                  * test but before we have finished saving the FP registers
189                  * to the thread_struct.  That process could take over the
190                  * FPU, and then when we get scheduled again we would store
191                  * bogus values for the remaining FP registers.
192                  */
193                 preempt_disable();
194                 if (tsk->thread.regs->msr & MSR_FP) {
195                         /*
196                          * This should only ever be called for current or
197                          * for a stopped child process.  Since we save away
198                          * the FP register state on context switch,
199                          * there is something wrong if a stopped child appears
200                          * to still have its FP state in the CPU registers.
201                          */
202                         BUG_ON(tsk != current);
203                         giveup_fpu(tsk);
204                 }
205                 preempt_enable();
206         }
207 }
208 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
209
210 void enable_kernel_fp(void)
211 {
212         unsigned long cpumsr;
213
214         WARN_ON(preemptible());
215
216         cpumsr = msr_check_and_set(MSR_FP);
217
218         if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
219                 check_if_tm_restore_required(current);
220                 /*
221                  * If a thread has already been reclaimed then the
222                  * checkpointed registers are on the CPU but have definitely
223                  * been saved by the reclaim code. Don't need to and *cannot*
224                  * giveup as this would save  to the 'live' structure not the
225                  * checkpointed structure.
226                  */
227                 if (!MSR_TM_ACTIVE(cpumsr) &&
228                      MSR_TM_ACTIVE(current->thread.regs->msr))
229                         return;
230                 __giveup_fpu(current);
231         }
232 }
233 EXPORT_SYMBOL(enable_kernel_fp);
234 #else
235 static inline void __giveup_fpu(struct task_struct *tsk) { }
236 #endif /* CONFIG_PPC_FPU */
237
238 #ifdef CONFIG_ALTIVEC
239 static void __giveup_altivec(struct task_struct *tsk)
240 {
241         unsigned long msr;
242
243         save_altivec(tsk);
244         msr = tsk->thread.regs->msr;
245         msr &= ~MSR_VEC;
246         if (cpu_has_feature(CPU_FTR_VSX))
247                 msr &= ~MSR_VSX;
248         regs_set_return_msr(tsk->thread.regs, msr);
249 }
250
251 void giveup_altivec(struct task_struct *tsk)
252 {
253         check_if_tm_restore_required(tsk);
254
255         msr_check_and_set(MSR_VEC);
256         __giveup_altivec(tsk);
257         msr_check_and_clear(MSR_VEC);
258 }
259 EXPORT_SYMBOL(giveup_altivec);
260
261 void enable_kernel_altivec(void)
262 {
263         unsigned long cpumsr;
264
265         WARN_ON(preemptible());
266
267         cpumsr = msr_check_and_set(MSR_VEC);
268
269         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
270                 check_if_tm_restore_required(current);
271                 /*
272                  * If a thread has already been reclaimed then the
273                  * checkpointed registers are on the CPU but have definitely
274                  * been saved by the reclaim code. Don't need to and *cannot*
275                  * giveup as this would save  to the 'live' structure not the
276                  * checkpointed structure.
277                  */
278                 if (!MSR_TM_ACTIVE(cpumsr) &&
279                      MSR_TM_ACTIVE(current->thread.regs->msr))
280                         return;
281                 __giveup_altivec(current);
282         }
283 }
284 EXPORT_SYMBOL(enable_kernel_altivec);
285
286 /*
287  * Make sure the VMX/Altivec register state in the
288  * the thread_struct is up to date for task tsk.
289  */
290 void flush_altivec_to_thread(struct task_struct *tsk)
291 {
292         if (tsk->thread.regs) {
293                 preempt_disable();
294                 if (tsk->thread.regs->msr & MSR_VEC) {
295                         BUG_ON(tsk != current);
296                         giveup_altivec(tsk);
297                 }
298                 preempt_enable();
299         }
300 }
301 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
302 #endif /* CONFIG_ALTIVEC */
303
304 #ifdef CONFIG_VSX
305 static void __giveup_vsx(struct task_struct *tsk)
306 {
307         unsigned long msr = tsk->thread.regs->msr;
308
309         /*
310          * We should never be ssetting MSR_VSX without also setting
311          * MSR_FP and MSR_VEC
312          */
313         WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
314
315         /* __giveup_fpu will clear MSR_VSX */
316         if (msr & MSR_FP)
317                 __giveup_fpu(tsk);
318         if (msr & MSR_VEC)
319                 __giveup_altivec(tsk);
320 }
321
322 static void giveup_vsx(struct task_struct *tsk)
323 {
324         check_if_tm_restore_required(tsk);
325
326         msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
327         __giveup_vsx(tsk);
328         msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
329 }
330
331 void enable_kernel_vsx(void)
332 {
333         unsigned long cpumsr;
334
335         WARN_ON(preemptible());
336
337         cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
338
339         if (current->thread.regs &&
340             (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
341                 check_if_tm_restore_required(current);
342                 /*
343                  * If a thread has already been reclaimed then the
344                  * checkpointed registers are on the CPU but have definitely
345                  * been saved by the reclaim code. Don't need to and *cannot*
346                  * giveup as this would save  to the 'live' structure not the
347                  * checkpointed structure.
348                  */
349                 if (!MSR_TM_ACTIVE(cpumsr) &&
350                      MSR_TM_ACTIVE(current->thread.regs->msr))
351                         return;
352                 __giveup_vsx(current);
353         }
354 }
355 EXPORT_SYMBOL(enable_kernel_vsx);
356
357 void flush_vsx_to_thread(struct task_struct *tsk)
358 {
359         if (tsk->thread.regs) {
360                 preempt_disable();
361                 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
362                         BUG_ON(tsk != current);
363                         giveup_vsx(tsk);
364                 }
365                 preempt_enable();
366         }
367 }
368 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
369 #endif /* CONFIG_VSX */
370
371 #ifdef CONFIG_SPE
372 void giveup_spe(struct task_struct *tsk)
373 {
374         check_if_tm_restore_required(tsk);
375
376         msr_check_and_set(MSR_SPE);
377         __giveup_spe(tsk);
378         msr_check_and_clear(MSR_SPE);
379 }
380 EXPORT_SYMBOL(giveup_spe);
381
382 void enable_kernel_spe(void)
383 {
384         WARN_ON(preemptible());
385
386         msr_check_and_set(MSR_SPE);
387
388         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
389                 check_if_tm_restore_required(current);
390                 __giveup_spe(current);
391         }
392 }
393 EXPORT_SYMBOL(enable_kernel_spe);
394
395 void flush_spe_to_thread(struct task_struct *tsk)
396 {
397         if (tsk->thread.regs) {
398                 preempt_disable();
399                 if (tsk->thread.regs->msr & MSR_SPE) {
400                         BUG_ON(tsk != current);
401                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
402                         giveup_spe(tsk);
403                 }
404                 preempt_enable();
405         }
406 }
407 #endif /* CONFIG_SPE */
408
409 static unsigned long msr_all_available;
410
411 static int __init init_msr_all_available(void)
412 {
413         if (IS_ENABLED(CONFIG_PPC_FPU))
414                 msr_all_available |= MSR_FP;
415         if (cpu_has_feature(CPU_FTR_ALTIVEC))
416                 msr_all_available |= MSR_VEC;
417         if (cpu_has_feature(CPU_FTR_VSX))
418                 msr_all_available |= MSR_VSX;
419         if (cpu_has_feature(CPU_FTR_SPE))
420                 msr_all_available |= MSR_SPE;
421
422         return 0;
423 }
424 early_initcall(init_msr_all_available);
425
426 void giveup_all(struct task_struct *tsk)
427 {
428         unsigned long usermsr;
429
430         if (!tsk->thread.regs)
431                 return;
432
433         check_if_tm_restore_required(tsk);
434
435         usermsr = tsk->thread.regs->msr;
436
437         if ((usermsr & msr_all_available) == 0)
438                 return;
439
440         msr_check_and_set(msr_all_available);
441
442         WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
443
444         if (usermsr & MSR_FP)
445                 __giveup_fpu(tsk);
446         if (usermsr & MSR_VEC)
447                 __giveup_altivec(tsk);
448         if (usermsr & MSR_SPE)
449                 __giveup_spe(tsk);
450
451         msr_check_and_clear(msr_all_available);
452 }
453 EXPORT_SYMBOL(giveup_all);
454
455 #ifdef CONFIG_PPC_BOOK3S_64
456 #ifdef CONFIG_PPC_FPU
457 static bool should_restore_fp(void)
458 {
459         if (current->thread.load_fp) {
460                 current->thread.load_fp++;
461                 return true;
462         }
463         return false;
464 }
465
466 static void do_restore_fp(void)
467 {
468         load_fp_state(&current->thread.fp_state);
469 }
470 #else
471 static bool should_restore_fp(void) { return false; }
472 static void do_restore_fp(void) { }
473 #endif /* CONFIG_PPC_FPU */
474
475 #ifdef CONFIG_ALTIVEC
476 static bool should_restore_altivec(void)
477 {
478         if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
479                 current->thread.load_vec++;
480                 return true;
481         }
482         return false;
483 }
484
485 static void do_restore_altivec(void)
486 {
487         load_vr_state(&current->thread.vr_state);
488         current->thread.used_vr = 1;
489 }
490 #else
491 static bool should_restore_altivec(void) { return false; }
492 static void do_restore_altivec(void) { }
493 #endif /* CONFIG_ALTIVEC */
494
495 static bool should_restore_vsx(void)
496 {
497         if (cpu_has_feature(CPU_FTR_VSX))
498                 return true;
499         return false;
500 }
501 #ifdef CONFIG_VSX
502 static void do_restore_vsx(void)
503 {
504         current->thread.used_vsr = 1;
505 }
506 #else
507 static void do_restore_vsx(void) { }
508 #endif /* CONFIG_VSX */
509
510 /*
511  * The exception exit path calls restore_math() with interrupts hard disabled
512  * but the soft irq state not "reconciled". ftrace code that calls
513  * local_irq_save/restore causes warnings.
514  *
515  * Rather than complicate the exit path, just don't trace restore_math. This
516  * could be done by having ftrace entry code check for this un-reconciled
517  * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
518  * temporarily fix it up for the duration of the ftrace call.
519  */
520 void notrace restore_math(struct pt_regs *regs)
521 {
522         unsigned long msr;
523         unsigned long new_msr = 0;
524
525         msr = regs->msr;
526
527         /*
528          * new_msr tracks the facilities that are to be restored. Only reload
529          * if the bit is not set in the user MSR (if it is set, the registers
530          * are live for the user thread).
531          */
532         if ((!(msr & MSR_FP)) && should_restore_fp())
533                 new_msr |= MSR_FP;
534
535         if ((!(msr & MSR_VEC)) && should_restore_altivec())
536                 new_msr |= MSR_VEC;
537
538         if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
539                 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
540                         new_msr |= MSR_VSX;
541         }
542
543         if (new_msr) {
544                 unsigned long fpexc_mode = 0;
545
546                 msr_check_and_set(new_msr);
547
548                 if (new_msr & MSR_FP) {
549                         do_restore_fp();
550
551                         // This also covers VSX, because VSX implies FP
552                         fpexc_mode = current->thread.fpexc_mode;
553                 }
554
555                 if (new_msr & MSR_VEC)
556                         do_restore_altivec();
557
558                 if (new_msr & MSR_VSX)
559                         do_restore_vsx();
560
561                 msr_check_and_clear(new_msr);
562
563                 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
564         }
565 }
566 #endif /* CONFIG_PPC_BOOK3S_64 */
567
568 static void save_all(struct task_struct *tsk)
569 {
570         unsigned long usermsr;
571
572         if (!tsk->thread.regs)
573                 return;
574
575         usermsr = tsk->thread.regs->msr;
576
577         if ((usermsr & msr_all_available) == 0)
578                 return;
579
580         msr_check_and_set(msr_all_available);
581
582         WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
583
584         if (usermsr & MSR_FP)
585                 save_fpu(tsk);
586
587         if (usermsr & MSR_VEC)
588                 save_altivec(tsk);
589
590         if (usermsr & MSR_SPE)
591                 __giveup_spe(tsk);
592
593         msr_check_and_clear(msr_all_available);
594 }
595
596 void flush_all_to_thread(struct task_struct *tsk)
597 {
598         if (tsk->thread.regs) {
599                 preempt_disable();
600                 BUG_ON(tsk != current);
601 #ifdef CONFIG_SPE
602                 if (tsk->thread.regs->msr & MSR_SPE)
603                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
604 #endif
605                 save_all(tsk);
606
607                 preempt_enable();
608         }
609 }
610 EXPORT_SYMBOL(flush_all_to_thread);
611
612 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
613 void do_send_trap(struct pt_regs *regs, unsigned long address,
614                   unsigned long error_code, int breakpt)
615 {
616         current->thread.trap_nr = TRAP_HWBKPT;
617         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
618                         11, SIGSEGV) == NOTIFY_STOP)
619                 return;
620
621         /* Deliver the signal to userspace */
622         force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
623                                     (void __user *)address);
624 }
625 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
626
627 static void do_break_handler(struct pt_regs *regs)
628 {
629         struct arch_hw_breakpoint null_brk = {0};
630         struct arch_hw_breakpoint *info;
631         ppc_inst_t instr = ppc_inst(0);
632         int type = 0;
633         int size = 0;
634         unsigned long ea;
635         int i;
636
637         /*
638          * If underneath hw supports only one watchpoint, we know it
639          * caused exception. 8xx also falls into this category.
640          */
641         if (nr_wp_slots() == 1) {
642                 __set_breakpoint(0, &null_brk);
643                 current->thread.hw_brk[0] = null_brk;
644                 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
645                 return;
646         }
647
648         /* Otherwise findout which DAWR caused exception and disable it. */
649         wp_get_instr_detail(regs, &instr, &type, &size, &ea);
650
651         for (i = 0; i < nr_wp_slots(); i++) {
652                 info = &current->thread.hw_brk[i];
653                 if (!info->address)
654                         continue;
655
656                 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
657                         __set_breakpoint(i, &null_brk);
658                         current->thread.hw_brk[i] = null_brk;
659                         current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
660                 }
661         }
662 }
663
664 DEFINE_INTERRUPT_HANDLER(do_break)
665 {
666         current->thread.trap_nr = TRAP_HWBKPT;
667         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
668                         11, SIGSEGV) == NOTIFY_STOP)
669                 return;
670
671         if (debugger_break_match(regs))
672                 return;
673
674         /*
675          * We reach here only when watchpoint exception is generated by ptrace
676          * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
677          * watchpoint is already handled by hw_breakpoint_handler() so we don't
678          * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
679          * we need to manually handle the watchpoint here.
680          */
681         if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
682                 do_break_handler(regs);
683
684         /* Deliver the signal to userspace */
685         force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
686 }
687 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
688
689 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
690
691 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
692 /*
693  * Set the debug registers back to their default "safe" values.
694  */
695 static void set_debug_reg_defaults(struct thread_struct *thread)
696 {
697         thread->debug.iac1 = thread->debug.iac2 = 0;
698 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
699         thread->debug.iac3 = thread->debug.iac4 = 0;
700 #endif
701         thread->debug.dac1 = thread->debug.dac2 = 0;
702 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
703         thread->debug.dvc1 = thread->debug.dvc2 = 0;
704 #endif
705         thread->debug.dbcr0 = 0;
706 #ifdef CONFIG_BOOKE
707         /*
708          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
709          */
710         thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
711                         DBCR1_IAC3US | DBCR1_IAC4US;
712         /*
713          * Force Data Address Compare User/Supervisor bits to be User-only
714          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
715          */
716         thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
717 #else
718         thread->debug.dbcr1 = 0;
719 #endif
720 }
721
722 static void prime_debug_regs(struct debug_reg *debug)
723 {
724         /*
725          * We could have inherited MSR_DE from userspace, since
726          * it doesn't get cleared on exception entry.  Make sure
727          * MSR_DE is clear before we enable any debug events.
728          */
729         mtmsr(mfmsr() & ~MSR_DE);
730
731         mtspr(SPRN_IAC1, debug->iac1);
732         mtspr(SPRN_IAC2, debug->iac2);
733 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
734         mtspr(SPRN_IAC3, debug->iac3);
735         mtspr(SPRN_IAC4, debug->iac4);
736 #endif
737         mtspr(SPRN_DAC1, debug->dac1);
738         mtspr(SPRN_DAC2, debug->dac2);
739 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
740         mtspr(SPRN_DVC1, debug->dvc1);
741         mtspr(SPRN_DVC2, debug->dvc2);
742 #endif
743         mtspr(SPRN_DBCR0, debug->dbcr0);
744         mtspr(SPRN_DBCR1, debug->dbcr1);
745 #ifdef CONFIG_BOOKE
746         mtspr(SPRN_DBCR2, debug->dbcr2);
747 #endif
748 }
749 /*
750  * Unless neither the old or new thread are making use of the
751  * debug registers, set the debug registers from the values
752  * stored in the new thread.
753  */
754 void switch_booke_debug_regs(struct debug_reg *new_debug)
755 {
756         if ((current->thread.debug.dbcr0 & DBCR0_IDM)
757                 || (new_debug->dbcr0 & DBCR0_IDM))
758                         prime_debug_regs(new_debug);
759 }
760 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
761 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
762 #ifndef CONFIG_HAVE_HW_BREAKPOINT
763 static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
764 {
765         preempt_disable();
766         __set_breakpoint(i, brk);
767         preempt_enable();
768 }
769
770 static void set_debug_reg_defaults(struct thread_struct *thread)
771 {
772         int i;
773         struct arch_hw_breakpoint null_brk = {0};
774
775         for (i = 0; i < nr_wp_slots(); i++) {
776                 thread->hw_brk[i] = null_brk;
777                 if (ppc_breakpoint_available())
778                         set_breakpoint(i, &thread->hw_brk[i]);
779         }
780 }
781
782 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
783                                 struct arch_hw_breakpoint *b)
784 {
785         if (a->address != b->address)
786                 return false;
787         if (a->type != b->type)
788                 return false;
789         if (a->len != b->len)
790                 return false;
791         /* no need to check hw_len. it's calculated from address and len */
792         return true;
793 }
794
795 static void switch_hw_breakpoint(struct task_struct *new)
796 {
797         int i;
798
799         for (i = 0; i < nr_wp_slots(); i++) {
800                 if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
801                                         &new->thread.hw_brk[i])))
802                         continue;
803
804                 __set_breakpoint(i, &new->thread.hw_brk[i]);
805         }
806 }
807 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
808 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
809
810 static inline int set_dabr(struct arch_hw_breakpoint *brk)
811 {
812         unsigned long dabr, dabrx;
813
814         dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
815         dabrx = ((brk->type >> 3) & 0x7);
816
817         if (ppc_md.set_dabr)
818                 return ppc_md.set_dabr(dabr, dabrx);
819
820         if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
821                 mtspr(SPRN_DAC1, dabr);
822                 if (IS_ENABLED(CONFIG_PPC_47x))
823                         isync();
824                 return 0;
825         } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
826                 mtspr(SPRN_DABR, dabr);
827                 if (cpu_has_feature(CPU_FTR_DABRX))
828                         mtspr(SPRN_DABRX, dabrx);
829                 return 0;
830         } else {
831                 return -EINVAL;
832         }
833 }
834
835 static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
836 {
837         unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
838                                LCTRL1_CRWF_RW;
839         unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
840         unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
841         unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
842
843         if (start_addr == 0)
844                 lctrl2 |= LCTRL2_LW0LA_F;
845         else if (end_addr == 0)
846                 lctrl2 |= LCTRL2_LW0LA_E;
847         else
848                 lctrl2 |= LCTRL2_LW0LA_EandF;
849
850         mtspr(SPRN_LCTRL2, 0);
851
852         if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
853                 return 0;
854
855         if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
856                 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
857         if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
858                 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
859
860         mtspr(SPRN_CMPE, start_addr - 1);
861         mtspr(SPRN_CMPF, end_addr);
862         mtspr(SPRN_LCTRL1, lctrl1);
863         mtspr(SPRN_LCTRL2, lctrl2);
864
865         return 0;
866 }
867
868 void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
869 {
870         memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
871
872         if (dawr_enabled())
873                 // Power8 or later
874                 set_dawr(nr, brk);
875         else if (IS_ENABLED(CONFIG_PPC_8xx))
876                 set_breakpoint_8xx(brk);
877         else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
878                 // Power7 or earlier
879                 set_dabr(brk);
880         else
881                 // Shouldn't happen due to higher level checks
882                 WARN_ON_ONCE(1);
883 }
884
885 /* Check if we have DAWR or DABR hardware */
886 bool ppc_breakpoint_available(void)
887 {
888         if (dawr_enabled())
889                 return true; /* POWER8 DAWR or POWER9 forced DAWR */
890         if (cpu_has_feature(CPU_FTR_ARCH_207S))
891                 return false; /* POWER9 with DAWR disabled */
892         /* DABR: Everything but POWER8 and POWER9 */
893         return true;
894 }
895 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
896
897 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
898
899 static inline bool tm_enabled(struct task_struct *tsk)
900 {
901         return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
902 }
903
904 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
905 {
906         /*
907          * Use the current MSR TM suspended bit to track if we have
908          * checkpointed state outstanding.
909          * On signal delivery, we'd normally reclaim the checkpointed
910          * state to obtain stack pointer (see:get_tm_stackpointer()).
911          * This will then directly return to userspace without going
912          * through __switch_to(). However, if the stack frame is bad,
913          * we need to exit this thread which calls __switch_to() which
914          * will again attempt to reclaim the already saved tm state.
915          * Hence we need to check that we've not already reclaimed
916          * this state.
917          * We do this using the current MSR, rather tracking it in
918          * some specific thread_struct bit, as it has the additional
919          * benefit of checking for a potential TM bad thing exception.
920          */
921         if (!MSR_TM_SUSPENDED(mfmsr()))
922                 return;
923
924         giveup_all(container_of(thr, struct task_struct, thread));
925
926         tm_reclaim(thr, cause);
927
928         /*
929          * If we are in a transaction and FP is off then we can't have
930          * used FP inside that transaction. Hence the checkpointed
931          * state is the same as the live state. We need to copy the
932          * live state to the checkpointed state so that when the
933          * transaction is restored, the checkpointed state is correct
934          * and the aborted transaction sees the correct state. We use
935          * ckpt_regs.msr here as that's what tm_reclaim will use to
936          * determine if it's going to write the checkpointed state or
937          * not. So either this will write the checkpointed registers,
938          * or reclaim will. Similarly for VMX.
939          */
940         if ((thr->ckpt_regs.msr & MSR_FP) == 0)
941                 memcpy(&thr->ckfp_state, &thr->fp_state,
942                        sizeof(struct thread_fp_state));
943         if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
944                 memcpy(&thr->ckvr_state, &thr->vr_state,
945                        sizeof(struct thread_vr_state));
946 }
947
948 void tm_reclaim_current(uint8_t cause)
949 {
950         tm_enable();
951         tm_reclaim_thread(&current->thread, cause);
952 }
953
954 static inline void tm_reclaim_task(struct task_struct *tsk)
955 {
956         /* We have to work out if we're switching from/to a task that's in the
957          * middle of a transaction.
958          *
959          * In switching we need to maintain a 2nd register state as
960          * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
961          * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
962          * ckvr_state
963          *
964          * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
965          */
966         struct thread_struct *thr = &tsk->thread;
967
968         if (!thr->regs)
969                 return;
970
971         if (!MSR_TM_ACTIVE(thr->regs->msr))
972                 goto out_and_saveregs;
973
974         WARN_ON(tm_suspend_disabled);
975
976         TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
977                  "ccr=%lx, msr=%lx, trap=%lx)\n",
978                  tsk->pid, thr->regs->nip,
979                  thr->regs->ccr, thr->regs->msr,
980                  thr->regs->trap);
981
982         tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
983
984         TM_DEBUG("--- tm_reclaim on pid %d complete\n",
985                  tsk->pid);
986
987 out_and_saveregs:
988         /* Always save the regs here, even if a transaction's not active.
989          * This context-switches a thread's TM info SPRs.  We do it here to
990          * be consistent with the restore path (in recheckpoint) which
991          * cannot happen later in _switch().
992          */
993         tm_save_sprs(thr);
994 }
995
996 extern void __tm_recheckpoint(struct thread_struct *thread);
997
998 void tm_recheckpoint(struct thread_struct *thread)
999 {
1000         unsigned long flags;
1001
1002         if (!(thread->regs->msr & MSR_TM))
1003                 return;
1004
1005         /* We really can't be interrupted here as the TEXASR registers can't
1006          * change and later in the trecheckpoint code, we have a userspace R1.
1007          * So let's hard disable over this region.
1008          */
1009         local_irq_save(flags);
1010         hard_irq_disable();
1011
1012         /* The TM SPRs are restored here, so that TEXASR.FS can be set
1013          * before the trecheckpoint and no explosion occurs.
1014          */
1015         tm_restore_sprs(thread);
1016
1017         __tm_recheckpoint(thread);
1018
1019         local_irq_restore(flags);
1020 }
1021
1022 static inline void tm_recheckpoint_new_task(struct task_struct *new)
1023 {
1024         if (!cpu_has_feature(CPU_FTR_TM))
1025                 return;
1026
1027         /* Recheckpoint the registers of the thread we're about to switch to.
1028          *
1029          * If the task was using FP, we non-lazily reload both the original and
1030          * the speculative FP register states.  This is because the kernel
1031          * doesn't see if/when a TM rollback occurs, so if we take an FP
1032          * unavailable later, we are unable to determine which set of FP regs
1033          * need to be restored.
1034          */
1035         if (!tm_enabled(new))
1036                 return;
1037
1038         if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1039                 tm_restore_sprs(&new->thread);
1040                 return;
1041         }
1042         /* Recheckpoint to restore original checkpointed register state. */
1043         TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1044                  new->pid, new->thread.regs->msr);
1045
1046         tm_recheckpoint(&new->thread);
1047
1048         /*
1049          * The checkpointed state has been restored but the live state has
1050          * not, ensure all the math functionality is turned off to trigger
1051          * restore_math() to reload.
1052          */
1053         new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1054
1055         TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1056                  "(kernel msr 0x%lx)\n",
1057                  new->pid, mfmsr());
1058 }
1059
1060 static inline void __switch_to_tm(struct task_struct *prev,
1061                 struct task_struct *new)
1062 {
1063         if (cpu_has_feature(CPU_FTR_TM)) {
1064                 if (tm_enabled(prev) || tm_enabled(new))
1065                         tm_enable();
1066
1067                 if (tm_enabled(prev)) {
1068                         prev->thread.load_tm++;
1069                         tm_reclaim_task(prev);
1070                         if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1071                                 prev->thread.regs->msr &= ~MSR_TM;
1072                 }
1073
1074                 tm_recheckpoint_new_task(new);
1075         }
1076 }
1077
1078 /*
1079  * This is called if we are on the way out to userspace and the
1080  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
1081  * FP and/or vector state and does so if necessary.
1082  * If userspace is inside a transaction (whether active or
1083  * suspended) and FP/VMX/VSX instructions have ever been enabled
1084  * inside that transaction, then we have to keep them enabled
1085  * and keep the FP/VMX/VSX state loaded while ever the transaction
1086  * continues.  The reason is that if we didn't, and subsequently
1087  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1088  * we don't know whether it's the same transaction, and thus we
1089  * don't know which of the checkpointed state and the transactional
1090  * state to use.
1091  */
1092 void restore_tm_state(struct pt_regs *regs)
1093 {
1094         unsigned long msr_diff;
1095
1096         /*
1097          * This is the only moment we should clear TIF_RESTORE_TM as
1098          * it is here that ckpt_regs.msr and pt_regs.msr become the same
1099          * again, anything else could lead to an incorrect ckpt_msr being
1100          * saved and therefore incorrect signal contexts.
1101          */
1102         clear_thread_flag(TIF_RESTORE_TM);
1103         if (!MSR_TM_ACTIVE(regs->msr))
1104                 return;
1105
1106         msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1107         msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1108
1109         /* Ensure that restore_math() will restore */
1110         if (msr_diff & MSR_FP)
1111                 current->thread.load_fp = 1;
1112 #ifdef CONFIG_ALTIVEC
1113         if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1114                 current->thread.load_vec = 1;
1115 #endif
1116         restore_math(regs);
1117
1118         regs_set_return_msr(regs, regs->msr | msr_diff);
1119 }
1120
1121 #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
1122 #define tm_recheckpoint_new_task(new)
1123 #define __switch_to_tm(prev, new)
1124 void tm_reclaim_current(uint8_t cause) {}
1125 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1126
1127 static inline void save_sprs(struct thread_struct *t)
1128 {
1129 #ifdef CONFIG_ALTIVEC
1130         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1131                 t->vrsave = mfspr(SPRN_VRSAVE);
1132 #endif
1133 #ifdef CONFIG_SPE
1134         if (cpu_has_feature(CPU_FTR_SPE))
1135                 t->spefscr = mfspr(SPRN_SPEFSCR);
1136 #endif
1137 #ifdef CONFIG_PPC_BOOK3S_64
1138         if (cpu_has_feature(CPU_FTR_DSCR))
1139                 t->dscr = mfspr(SPRN_DSCR);
1140
1141         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1142                 t->bescr = mfspr(SPRN_BESCR);
1143                 t->ebbhr = mfspr(SPRN_EBBHR);
1144                 t->ebbrr = mfspr(SPRN_EBBRR);
1145
1146                 t->fscr = mfspr(SPRN_FSCR);
1147
1148                 /*
1149                  * Note that the TAR is not available for use in the kernel.
1150                  * (To provide this, the TAR should be backed up/restored on
1151                  * exception entry/exit instead, and be in pt_regs.  FIXME,
1152                  * this should be in pt_regs anyway (for debug).)
1153                  */
1154                 t->tar = mfspr(SPRN_TAR);
1155         }
1156 #endif
1157 }
1158
1159 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1160 void kvmppc_save_user_regs(void)
1161 {
1162         unsigned long usermsr;
1163
1164         if (!current->thread.regs)
1165                 return;
1166
1167         usermsr = current->thread.regs->msr;
1168
1169         if (usermsr & MSR_FP)
1170                 save_fpu(current);
1171
1172         if (usermsr & MSR_VEC)
1173                 save_altivec(current);
1174
1175 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1176         if (usermsr & MSR_TM) {
1177                 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1178                 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1179                 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1180                 current->thread.regs->msr &= ~MSR_TM;
1181         }
1182 #endif
1183 }
1184 EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1185
1186 void kvmppc_save_current_sprs(void)
1187 {
1188         save_sprs(&current->thread);
1189 }
1190 EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
1191 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1192
1193 static inline void restore_sprs(struct thread_struct *old_thread,
1194                                 struct thread_struct *new_thread)
1195 {
1196 #ifdef CONFIG_ALTIVEC
1197         if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1198             old_thread->vrsave != new_thread->vrsave)
1199                 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1200 #endif
1201 #ifdef CONFIG_SPE
1202         if (cpu_has_feature(CPU_FTR_SPE) &&
1203             old_thread->spefscr != new_thread->spefscr)
1204                 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1205 #endif
1206 #ifdef CONFIG_PPC_BOOK3S_64
1207         if (cpu_has_feature(CPU_FTR_DSCR)) {
1208                 u64 dscr = get_paca()->dscr_default;
1209                 if (new_thread->dscr_inherit)
1210                         dscr = new_thread->dscr;
1211
1212                 if (old_thread->dscr != dscr)
1213                         mtspr(SPRN_DSCR, dscr);
1214         }
1215
1216         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1217                 if (old_thread->bescr != new_thread->bescr)
1218                         mtspr(SPRN_BESCR, new_thread->bescr);
1219                 if (old_thread->ebbhr != new_thread->ebbhr)
1220                         mtspr(SPRN_EBBHR, new_thread->ebbhr);
1221                 if (old_thread->ebbrr != new_thread->ebbrr)
1222                         mtspr(SPRN_EBBRR, new_thread->ebbrr);
1223
1224                 if (old_thread->fscr != new_thread->fscr)
1225                         mtspr(SPRN_FSCR, new_thread->fscr);
1226
1227                 if (old_thread->tar != new_thread->tar)
1228                         mtspr(SPRN_TAR, new_thread->tar);
1229         }
1230
1231         if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1232             old_thread->tidr != new_thread->tidr)
1233                 mtspr(SPRN_TIDR, new_thread->tidr);
1234 #endif
1235
1236 }
1237
1238 struct task_struct *__switch_to(struct task_struct *prev,
1239         struct task_struct *new)
1240 {
1241         struct thread_struct *new_thread, *old_thread;
1242         struct task_struct *last;
1243 #ifdef CONFIG_PPC_64S_HASH_MMU
1244         struct ppc64_tlb_batch *batch;
1245 #endif
1246
1247         new_thread = &new->thread;
1248         old_thread = &current->thread;
1249
1250         WARN_ON(!irqs_disabled());
1251
1252 #ifdef CONFIG_PPC_64S_HASH_MMU
1253         batch = this_cpu_ptr(&ppc64_tlb_batch);
1254         if (batch->active) {
1255                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1256                 if (batch->index)
1257                         __flush_tlb_pending(batch);
1258                 batch->active = 0;
1259         }
1260
1261         /*
1262          * On POWER9 the copy-paste buffer can only paste into
1263          * foreign real addresses, so unprivileged processes can not
1264          * see the data or use it in any way unless they have
1265          * foreign real mappings. If the new process has the foreign
1266          * real address mappings, we must issue a cp_abort to clear
1267          * any state and prevent snooping, corruption or a covert
1268          * channel. ISA v3.1 supports paste into local memory.
1269          */
1270         if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1271                         atomic_read(&new->mm->context.vas_windows)))
1272                 asm volatile(PPC_CP_ABORT);
1273 #endif /* CONFIG_PPC_BOOK3S_64 */
1274
1275 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1276         switch_booke_debug_regs(&new->thread.debug);
1277 #else
1278 /*
1279  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1280  * schedule DABR
1281  */
1282 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1283         switch_hw_breakpoint(new);
1284 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1285 #endif
1286
1287         /*
1288          * We need to save SPRs before treclaim/trecheckpoint as these will
1289          * change a number of them.
1290          */
1291         save_sprs(&prev->thread);
1292
1293         /* Save FPU, Altivec, VSX and SPE state */
1294         giveup_all(prev);
1295
1296         __switch_to_tm(prev, new);
1297
1298         if (!radix_enabled()) {
1299                 /*
1300                  * We can't take a PMU exception inside _switch() since there
1301                  * is a window where the kernel stack SLB and the kernel stack
1302                  * are out of sync. Hard disable here.
1303                  */
1304                 hard_irq_disable();
1305         }
1306
1307         /*
1308          * Call restore_sprs() and set_return_regs_changed() before calling
1309          * _switch(). If we move it after _switch() then we miss out on calling
1310          * it for new tasks. The reason for this is we manually create a stack
1311          * frame for new tasks that directly returns through ret_from_fork() or
1312          * ret_from_kernel_thread(). See copy_thread() for details.
1313          */
1314         restore_sprs(old_thread, new_thread);
1315
1316         set_return_regs_changed(); /* _switch changes stack (and regs) */
1317
1318         if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1319                 kuap_assert_locked();
1320
1321         last = _switch(old_thread, new_thread);
1322
1323         /*
1324          * Nothing after _switch will be run for newly created tasks,
1325          * because they switch directly to ret_from_fork/ret_from_kernel_thread
1326          * etc. Code added here should have a comment explaining why that is
1327          * okay.
1328          */
1329
1330 #ifdef CONFIG_PPC_BOOK3S_64
1331 #ifdef CONFIG_PPC_64S_HASH_MMU
1332         /*
1333          * This applies to a process that was context switched while inside
1334          * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
1335          * deactivated above, before _switch(). This will never be the case
1336          * for new tasks.
1337          */
1338         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1339                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1340                 batch = this_cpu_ptr(&ppc64_tlb_batch);
1341                 batch->active = 1;
1342         }
1343 #endif
1344
1345         /*
1346          * Math facilities are masked out of the child MSR in copy_thread.
1347          * A new task does not need to restore_math because it will
1348          * demand fault them.
1349          */
1350         if (current->thread.regs)
1351                 restore_math(current->thread.regs);
1352 #endif /* CONFIG_PPC_BOOK3S_64 */
1353
1354         return last;
1355 }
1356
1357 #define NR_INSN_TO_PRINT        16
1358
1359 static void show_instructions(struct pt_regs *regs)
1360 {
1361         int i;
1362         unsigned long nip = regs->nip;
1363         unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1364
1365         printk("Instruction dump:");
1366
1367         /*
1368          * If we were executing with the MMU off for instructions, adjust pc
1369          * rather than printing XXXXXXXX.
1370          */
1371         if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1372                 pc = (unsigned long)phys_to_virt(pc);
1373                 nip = (unsigned long)phys_to_virt(regs->nip);
1374         }
1375
1376         for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1377                 int instr;
1378
1379                 if (!(i % 8))
1380                         pr_cont("\n");
1381
1382                 if (!__kernel_text_address(pc) ||
1383                     get_kernel_nofault(instr, (const void *)pc)) {
1384                         pr_cont("XXXXXXXX ");
1385                 } else {
1386                         if (nip == pc)
1387                                 pr_cont("<%08x> ", instr);
1388                         else
1389                                 pr_cont("%08x ", instr);
1390                 }
1391
1392                 pc += sizeof(int);
1393         }
1394
1395         pr_cont("\n");
1396 }
1397
1398 void show_user_instructions(struct pt_regs *regs)
1399 {
1400         unsigned long pc;
1401         int n = NR_INSN_TO_PRINT;
1402         struct seq_buf s;
1403         char buf[96]; /* enough for 8 times 9 + 2 chars */
1404
1405         pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1406
1407         seq_buf_init(&s, buf, sizeof(buf));
1408
1409         while (n) {
1410                 int i;
1411
1412                 seq_buf_clear(&s);
1413
1414                 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1415                         int instr;
1416
1417                         if (copy_from_user_nofault(&instr, (void __user *)pc,
1418                                         sizeof(instr))) {
1419                                 seq_buf_printf(&s, "XXXXXXXX ");
1420                                 continue;
1421                         }
1422                         seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1423                 }
1424
1425                 if (!seq_buf_has_overflowed(&s))
1426                         pr_info("%s[%d]: code: %s\n", current->comm,
1427                                 current->pid, s.buffer);
1428         }
1429 }
1430
1431 struct regbit {
1432         unsigned long bit;
1433         const char *name;
1434 };
1435
1436 static struct regbit msr_bits[] = {
1437 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1438         {MSR_SF,        "SF"},
1439         {MSR_HV,        "HV"},
1440 #endif
1441         {MSR_VEC,       "VEC"},
1442         {MSR_VSX,       "VSX"},
1443 #ifdef CONFIG_BOOKE
1444         {MSR_CE,        "CE"},
1445 #endif
1446         {MSR_EE,        "EE"},
1447         {MSR_PR,        "PR"},
1448         {MSR_FP,        "FP"},
1449         {MSR_ME,        "ME"},
1450 #ifdef CONFIG_BOOKE
1451         {MSR_DE,        "DE"},
1452 #else
1453         {MSR_SE,        "SE"},
1454         {MSR_BE,        "BE"},
1455 #endif
1456         {MSR_IR,        "IR"},
1457         {MSR_DR,        "DR"},
1458         {MSR_PMM,       "PMM"},
1459 #ifndef CONFIG_BOOKE
1460         {MSR_RI,        "RI"},
1461         {MSR_LE,        "LE"},
1462 #endif
1463         {0,             NULL}
1464 };
1465
1466 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1467 {
1468         const char *s = "";
1469
1470         for (; bits->bit; ++bits)
1471                 if (val & bits->bit) {
1472                         pr_cont("%s%s", s, bits->name);
1473                         s = sep;
1474                 }
1475 }
1476
1477 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1478 static struct regbit msr_tm_bits[] = {
1479         {MSR_TS_T,      "T"},
1480         {MSR_TS_S,      "S"},
1481         {MSR_TM,        "E"},
1482         {0,             NULL}
1483 };
1484
1485 static void print_tm_bits(unsigned long val)
1486 {
1487 /*
1488  * This only prints something if at least one of the TM bit is set.
1489  * Inside the TM[], the output means:
1490  *   E: Enabled         (bit 32)
1491  *   S: Suspended       (bit 33)
1492  *   T: Transactional   (bit 34)
1493  */
1494         if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1495                 pr_cont(",TM[");
1496                 print_bits(val, msr_tm_bits, "");
1497                 pr_cont("]");
1498         }
1499 }
1500 #else
1501 static void print_tm_bits(unsigned long val) {}
1502 #endif
1503
1504 static void print_msr_bits(unsigned long val)
1505 {
1506         pr_cont("<");
1507         print_bits(val, msr_bits, ",");
1508         print_tm_bits(val);
1509         pr_cont(">");
1510 }
1511
1512 #ifdef CONFIG_PPC64
1513 #define REG             "%016lx"
1514 #define REGS_PER_LINE   4
1515 #else
1516 #define REG             "%08lx"
1517 #define REGS_PER_LINE   8
1518 #endif
1519
1520 static void __show_regs(struct pt_regs *regs)
1521 {
1522         int i, trap;
1523
1524         printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
1525                regs->nip, regs->link, regs->ctr);
1526         printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
1527                regs, regs->trap, print_tainted(), init_utsname()->release);
1528         printk("MSR:  "REG" ", regs->msr);
1529         print_msr_bits(regs->msr);
1530         pr_cont("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
1531         trap = TRAP(regs);
1532         if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1533                 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1534         if (trap == INTERRUPT_MACHINE_CHECK ||
1535             trap == INTERRUPT_DATA_STORAGE ||
1536             trap == INTERRUPT_ALIGNMENT) {
1537                 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1538                         pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1539                 else
1540                         pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1541         }
1542
1543 #ifdef CONFIG_PPC64
1544         pr_cont("IRQMASK: %lx ", regs->softe);
1545 #endif
1546 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1547         if (MSR_TM_ACTIVE(regs->msr))
1548                 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1549 #endif
1550
1551         for (i = 0;  i < 32;  i++) {
1552                 if ((i % REGS_PER_LINE) == 0)
1553                         pr_cont("\nGPR%02d: ", i);
1554                 pr_cont(REG " ", regs->gpr[i]);
1555         }
1556         pr_cont("\n");
1557         /*
1558          * Lookup NIP late so we have the best change of getting the
1559          * above info out without failing
1560          */
1561         if (IS_ENABLED(CONFIG_KALLSYMS)) {
1562                 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1563                 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1564         }
1565 }
1566
1567 void show_regs(struct pt_regs *regs)
1568 {
1569         show_regs_print_info(KERN_DEFAULT);
1570         __show_regs(regs);
1571         show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1572         if (!user_mode(regs))
1573                 show_instructions(regs);
1574 }
1575
1576 void flush_thread(void)
1577 {
1578 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1579         flush_ptrace_hw_breakpoint(current);
1580 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1581         set_debug_reg_defaults(&current->thread);
1582 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1583 }
1584
1585 void arch_setup_new_exec(void)
1586 {
1587
1588 #ifdef CONFIG_PPC_BOOK3S_64
1589         if (!radix_enabled())
1590                 hash__setup_new_exec();
1591 #endif
1592         /*
1593          * If we exec out of a kernel thread then thread.regs will not be
1594          * set.  Do it now.
1595          */
1596         if (!current->thread.regs) {
1597                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1598                 current->thread.regs = regs - 1;
1599         }
1600
1601 #ifdef CONFIG_PPC_MEM_KEYS
1602         current->thread.regs->amr  = default_amr;
1603         current->thread.regs->iamr  = default_iamr;
1604 #endif
1605 }
1606
1607 #ifdef CONFIG_PPC64
1608 /**
1609  * Assign a TIDR (thread ID) for task @t and set it in the thread
1610  * structure. For now, we only support setting TIDR for 'current' task.
1611  *
1612  * Since the TID value is a truncated form of it PID, it is possible
1613  * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1614  * that 2 threads share the same TID and are waiting, one of the following
1615  * cases will happen:
1616  *
1617  * 1. The correct thread is running, the wrong thread is not
1618  * In this situation, the correct thread is woken and proceeds to pass it's
1619  * condition check.
1620  *
1621  * 2. Neither threads are running
1622  * In this situation, neither thread will be woken. When scheduled, the waiting
1623  * threads will execute either a wait, which will return immediately, followed
1624  * by a condition check, which will pass for the correct thread and fail
1625  * for the wrong thread, or they will execute the condition check immediately.
1626  *
1627  * 3. The wrong thread is running, the correct thread is not
1628  * The wrong thread will be woken, but will fail it's condition check and
1629  * re-execute wait. The correct thread, when scheduled, will execute either
1630  * it's condition check (which will pass), or wait, which returns immediately
1631  * when called the first time after the thread is scheduled, followed by it's
1632  * condition check (which will pass).
1633  *
1634  * 4. Both threads are running
1635  * Both threads will be woken. The wrong thread will fail it's condition check
1636  * and execute another wait, while the correct thread will pass it's condition
1637  * check.
1638  *
1639  * @t: the task to set the thread ID for
1640  */
1641 int set_thread_tidr(struct task_struct *t)
1642 {
1643         if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1644                 return -EINVAL;
1645
1646         if (t != current)
1647                 return -EINVAL;
1648
1649         if (t->thread.tidr)
1650                 return 0;
1651
1652         t->thread.tidr = (u16)task_pid_nr(t);
1653         mtspr(SPRN_TIDR, t->thread.tidr);
1654
1655         return 0;
1656 }
1657 EXPORT_SYMBOL_GPL(set_thread_tidr);
1658
1659 #endif /* CONFIG_PPC64 */
1660
1661 void
1662 release_thread(struct task_struct *t)
1663 {
1664 }
1665
1666 /*
1667  * this gets called so that we can store coprocessor state into memory and
1668  * copy the current task into the new thread.
1669  */
1670 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1671 {
1672         flush_all_to_thread(src);
1673         /*
1674          * Flush TM state out so we can copy it.  __switch_to_tm() does this
1675          * flush but it removes the checkpointed state from the current CPU and
1676          * transitions the CPU out of TM mode.  Hence we need to call
1677          * tm_recheckpoint_new_task() (on the same task) to restore the
1678          * checkpointed state back and the TM mode.
1679          *
1680          * Can't pass dst because it isn't ready. Doesn't matter, passing
1681          * dst is only important for __switch_to()
1682          */
1683         __switch_to_tm(src, src);
1684
1685         *dst = *src;
1686
1687         clear_task_ebb(dst);
1688
1689         return 0;
1690 }
1691
1692 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1693 {
1694 #ifdef CONFIG_PPC_64S_HASH_MMU
1695         unsigned long sp_vsid;
1696         unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1697
1698         if (radix_enabled())
1699                 return;
1700
1701         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1702                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1703                         << SLB_VSID_SHIFT_1T;
1704         else
1705                 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1706                         << SLB_VSID_SHIFT;
1707         sp_vsid |= SLB_VSID_KERNEL | llp;
1708         p->thread.ksp_vsid = sp_vsid;
1709 #endif
1710 }
1711
1712 /*
1713  * Copy a thread..
1714  */
1715
1716 /*
1717  * Copy architecture-specific thread state
1718  */
1719 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
1720 {
1721         unsigned long clone_flags = args->flags;
1722         unsigned long usp = args->stack;
1723         unsigned long tls = args->tls;
1724         struct pt_regs *childregs, *kregs;
1725         extern void ret_from_fork(void);
1726         extern void ret_from_fork_scv(void);
1727         extern void ret_from_kernel_thread(void);
1728         void (*f)(void);
1729         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1730         struct thread_info *ti = task_thread_info(p);
1731 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1732         int i;
1733 #endif
1734
1735         klp_init_thread_info(p);
1736
1737         /* Copy registers */
1738         sp -= sizeof(struct pt_regs);
1739         childregs = (struct pt_regs *) sp;
1740         if (unlikely(args->fn)) {
1741                 /* kernel thread */
1742                 memset(childregs, 0, sizeof(struct pt_regs));
1743                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1744                 /* function */
1745                 if (args->fn)
1746                         childregs->gpr[14] = ppc_function_entry((void *)args->fn);
1747 #ifdef CONFIG_PPC64
1748                 clear_tsk_thread_flag(p, TIF_32BIT);
1749                 childregs->softe = IRQS_ENABLED;
1750 #endif
1751                 childregs->gpr[15] = (unsigned long)args->fn_arg;
1752                 p->thread.regs = NULL;  /* no user register state */
1753                 ti->flags |= _TIF_RESTOREALL;
1754                 f = ret_from_kernel_thread;
1755         } else {
1756                 /* user thread */
1757                 struct pt_regs *regs = current_pt_regs();
1758                 *childregs = *regs;
1759                 if (usp)
1760                         childregs->gpr[1] = usp;
1761                 p->thread.regs = childregs;
1762                 /* 64s sets this in ret_from_fork */
1763                 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1764                         childregs->gpr[3] = 0;  /* Result from fork() */
1765                 if (clone_flags & CLONE_SETTLS) {
1766                         if (!is_32bit_task())
1767                                 childregs->gpr[13] = tls;
1768                         else
1769                                 childregs->gpr[2] = tls;
1770                 }
1771
1772                 if (trap_is_scv(regs))
1773                         f = ret_from_fork_scv;
1774                 else
1775                         f = ret_from_fork;
1776         }
1777         childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1778         sp -= STACK_FRAME_OVERHEAD;
1779
1780         /*
1781          * The way this works is that at some point in the future
1782          * some task will call _switch to switch to the new task.
1783          * That will pop off the stack frame created below and start
1784          * the new task running at ret_from_fork.  The new task will
1785          * do some house keeping and then return from the fork or clone
1786          * system call, using the stack frame created above.
1787          */
1788         ((unsigned long *)sp)[0] = 0;
1789         sp -= sizeof(struct pt_regs);
1790         kregs = (struct pt_regs *) sp;
1791         sp -= STACK_FRAME_OVERHEAD;
1792         p->thread.ksp = sp;
1793 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1794         for (i = 0; i < nr_wp_slots(); i++)
1795                 p->thread.ptrace_bps[i] = NULL;
1796 #endif
1797
1798 #ifdef CONFIG_PPC_FPU_REGS
1799         p->thread.fp_save_area = NULL;
1800 #endif
1801 #ifdef CONFIG_ALTIVEC
1802         p->thread.vr_save_area = NULL;
1803 #endif
1804 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1805         p->thread.kuap = KUAP_NONE;
1806 #endif
1807 #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
1808         p->thread.pid = MMU_NO_CONTEXT;
1809 #endif
1810
1811         setup_ksp_vsid(p, sp);
1812
1813 #ifdef CONFIG_PPC64 
1814         if (cpu_has_feature(CPU_FTR_DSCR)) {
1815                 p->thread.dscr_inherit = current->thread.dscr_inherit;
1816                 p->thread.dscr = mfspr(SPRN_DSCR);
1817         }
1818         if (cpu_has_feature(CPU_FTR_HAS_PPR))
1819                 childregs->ppr = DEFAULT_PPR;
1820
1821         p->thread.tidr = 0;
1822 #endif
1823         /*
1824          * Run with the current AMR value of the kernel
1825          */
1826 #ifdef CONFIG_PPC_PKEY
1827         if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1828                 kregs->amr = AMR_KUAP_BLOCKED;
1829
1830         if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1831                 kregs->iamr = AMR_KUEP_BLOCKED;
1832 #endif
1833         kregs->nip = ppc_function_entry(f);
1834         return 0;
1835 }
1836
1837 void preload_new_slb_context(unsigned long start, unsigned long sp);
1838
1839 /*
1840  * Set up a thread for executing a new program
1841  */
1842 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1843 {
1844 #ifdef CONFIG_PPC64
1845         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1846
1847         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1848                 preload_new_slb_context(start, sp);
1849 #endif
1850
1851 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1852         /*
1853          * Clear any transactional state, we're exec()ing. The cause is
1854          * not important as there will never be a recheckpoint so it's not
1855          * user visible.
1856          */
1857         if (MSR_TM_SUSPENDED(mfmsr()))
1858                 tm_reclaim_current(0);
1859 #endif
1860
1861         memset(regs->gpr, 0, sizeof(regs->gpr));
1862         regs->ctr = 0;
1863         regs->link = 0;
1864         regs->xer = 0;
1865         regs->ccr = 0;
1866         regs->gpr[1] = sp;
1867
1868 #ifdef CONFIG_PPC32
1869         regs->mq = 0;
1870         regs->nip = start;
1871         regs->msr = MSR_USER;
1872 #else
1873         if (!is_32bit_task()) {
1874                 unsigned long entry;
1875
1876                 if (is_elf2_task()) {
1877                         /* Look ma, no function descriptors! */
1878                         entry = start;
1879
1880                         /*
1881                          * Ulrich says:
1882                          *   The latest iteration of the ABI requires that when
1883                          *   calling a function (at its global entry point),
1884                          *   the caller must ensure r12 holds the entry point
1885                          *   address (so that the function can quickly
1886                          *   establish addressability).
1887                          */
1888                         regs->gpr[12] = start;
1889                         /* Make sure that's restored on entry to userspace. */
1890                         set_thread_flag(TIF_RESTOREALL);
1891                 } else {
1892                         unsigned long toc;
1893
1894                         /* start is a relocated pointer to the function
1895                          * descriptor for the elf _start routine.  The first
1896                          * entry in the function descriptor is the entry
1897                          * address of _start and the second entry is the TOC
1898                          * value we need to use.
1899                          */
1900                         __get_user(entry, (unsigned long __user *)start);
1901                         __get_user(toc, (unsigned long __user *)start+1);
1902
1903                         /* Check whether the e_entry function descriptor entries
1904                          * need to be relocated before we can use them.
1905                          */
1906                         if (load_addr != 0) {
1907                                 entry += load_addr;
1908                                 toc   += load_addr;
1909                         }
1910                         regs->gpr[2] = toc;
1911                 }
1912                 regs_set_return_ip(regs, entry);
1913                 regs_set_return_msr(regs, MSR_USER64);
1914         } else {
1915                 regs->gpr[2] = 0;
1916                 regs_set_return_ip(regs, start);
1917                 regs_set_return_msr(regs, MSR_USER32);
1918         }
1919
1920 #endif
1921 #ifdef CONFIG_VSX
1922         current->thread.used_vsr = 0;
1923 #endif
1924         current->thread.load_slb = 0;
1925         current->thread.load_fp = 0;
1926 #ifdef CONFIG_PPC_FPU_REGS
1927         memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1928         current->thread.fp_save_area = NULL;
1929 #endif
1930 #ifdef CONFIG_ALTIVEC
1931         memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1932         current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1933         current->thread.vr_save_area = NULL;
1934         current->thread.vrsave = 0;
1935         current->thread.used_vr = 0;
1936         current->thread.load_vec = 0;
1937 #endif /* CONFIG_ALTIVEC */
1938 #ifdef CONFIG_SPE
1939         memset(current->thread.evr, 0, sizeof(current->thread.evr));
1940         current->thread.acc = 0;
1941         current->thread.spefscr = 0;
1942         current->thread.used_spe = 0;
1943 #endif /* CONFIG_SPE */
1944 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1945         current->thread.tm_tfhar = 0;
1946         current->thread.tm_texasr = 0;
1947         current->thread.tm_tfiar = 0;
1948         current->thread.load_tm = 0;
1949 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1950 }
1951 EXPORT_SYMBOL(start_thread);
1952
1953 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1954                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1955
1956 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1957 {
1958         struct pt_regs *regs = tsk->thread.regs;
1959
1960         /* This is a bit hairy.  If we are an SPE enabled  processor
1961          * (have embedded fp) we store the IEEE exception enable flags in
1962          * fpexc_mode.  fpexc_mode is also used for setting FP exception
1963          * mode (asyn, precise, disabled) for 'Classic' FP. */
1964         if (val & PR_FP_EXC_SW_ENABLE) {
1965                 if (cpu_has_feature(CPU_FTR_SPE)) {
1966                         /*
1967                          * When the sticky exception bits are set
1968                          * directly by userspace, it must call prctl
1969                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1970                          * in the existing prctl settings) or
1971                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1972                          * the bits being set).  <fenv.h> functions
1973                          * saving and restoring the whole
1974                          * floating-point environment need to do so
1975                          * anyway to restore the prctl settings from
1976                          * the saved environment.
1977                          */
1978 #ifdef CONFIG_SPE
1979                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1980                         tsk->thread.fpexc_mode = val &
1981                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1982 #endif
1983                         return 0;
1984                 } else {
1985                         return -EINVAL;
1986                 }
1987         }
1988
1989         /* on a CONFIG_SPE this does not hurt us.  The bits that
1990          * __pack_fe01 use do not overlap with bits used for
1991          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1992          * on CONFIG_SPE implementations are reserved so writing to
1993          * them does not change anything */
1994         if (val > PR_FP_EXC_PRECISE)
1995                 return -EINVAL;
1996         tsk->thread.fpexc_mode = __pack_fe01(val);
1997         if (regs != NULL && (regs->msr & MSR_FP) != 0) {
1998                 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
1999                                                 | tsk->thread.fpexc_mode);
2000         }
2001         return 0;
2002 }
2003
2004 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
2005 {
2006         unsigned int val = 0;
2007
2008         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
2009                 if (cpu_has_feature(CPU_FTR_SPE)) {
2010                         /*
2011                          * When the sticky exception bits are set
2012                          * directly by userspace, it must call prctl
2013                          * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
2014                          * in the existing prctl settings) or
2015                          * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
2016                          * the bits being set).  <fenv.h> functions
2017                          * saving and restoring the whole
2018                          * floating-point environment need to do so
2019                          * anyway to restore the prctl settings from
2020                          * the saved environment.
2021                          */
2022 #ifdef CONFIG_SPE
2023                         tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2024                         val = tsk->thread.fpexc_mode;
2025 #endif
2026                 } else
2027                         return -EINVAL;
2028         } else {
2029                 val = __unpack_fe01(tsk->thread.fpexc_mode);
2030         }
2031         return put_user(val, (unsigned int __user *) adr);
2032 }
2033
2034 int set_endian(struct task_struct *tsk, unsigned int val)
2035 {
2036         struct pt_regs *regs = tsk->thread.regs;
2037
2038         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
2039             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2040                 return -EINVAL;
2041
2042         if (regs == NULL)
2043                 return -EINVAL;
2044
2045         if (val == PR_ENDIAN_BIG)
2046                 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2047         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2048                 regs_set_return_msr(regs, regs->msr | MSR_LE);
2049         else
2050                 return -EINVAL;
2051
2052         return 0;
2053 }
2054
2055 int get_endian(struct task_struct *tsk, unsigned long adr)
2056 {
2057         struct pt_regs *regs = tsk->thread.regs;
2058         unsigned int val;
2059
2060         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2061             !cpu_has_feature(CPU_FTR_REAL_LE))
2062                 return -EINVAL;
2063
2064         if (regs == NULL)
2065                 return -EINVAL;
2066
2067         if (regs->msr & MSR_LE) {
2068                 if (cpu_has_feature(CPU_FTR_REAL_LE))
2069                         val = PR_ENDIAN_LITTLE;
2070                 else
2071                         val = PR_ENDIAN_PPC_LITTLE;
2072         } else
2073                 val = PR_ENDIAN_BIG;
2074
2075         return put_user(val, (unsigned int __user *)adr);
2076 }
2077
2078 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2079 {
2080         tsk->thread.align_ctl = val;
2081         return 0;
2082 }
2083
2084 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2085 {
2086         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2087 }
2088
2089 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2090                                   unsigned long nbytes)
2091 {
2092         unsigned long stack_page;
2093         unsigned long cpu = task_cpu(p);
2094
2095         stack_page = (unsigned long)hardirq_ctx[cpu];
2096         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2097                 return 1;
2098
2099         stack_page = (unsigned long)softirq_ctx[cpu];
2100         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2101                 return 1;
2102
2103         return 0;
2104 }
2105
2106 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2107                                         unsigned long nbytes)
2108 {
2109 #ifdef CONFIG_PPC64
2110         unsigned long stack_page;
2111         unsigned long cpu = task_cpu(p);
2112
2113         if (!paca_ptrs)
2114                 return 0;
2115
2116         stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2117         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2118                 return 1;
2119
2120 # ifdef CONFIG_PPC_BOOK3S_64
2121         stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2122         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2123                 return 1;
2124
2125         stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2126         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2127                 return 1;
2128 # endif
2129 #endif
2130
2131         return 0;
2132 }
2133
2134
2135 int validate_sp(unsigned long sp, struct task_struct *p,
2136                        unsigned long nbytes)
2137 {
2138         unsigned long stack_page = (unsigned long)task_stack_page(p);
2139
2140         if (sp < THREAD_SIZE)
2141                 return 0;
2142
2143         if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2144                 return 1;
2145
2146         if (valid_irq_stack(sp, p, nbytes))
2147                 return 1;
2148
2149         return valid_emergency_stack(sp, p, nbytes);
2150 }
2151
2152 EXPORT_SYMBOL(validate_sp);
2153
2154 static unsigned long ___get_wchan(struct task_struct *p)
2155 {
2156         unsigned long ip, sp;
2157         int count = 0;
2158
2159         sp = p->thread.ksp;
2160         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2161                 return 0;
2162
2163         do {
2164                 sp = *(unsigned long *)sp;
2165                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2166                     task_is_running(p))
2167                         return 0;
2168                 if (count > 0) {
2169                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2170                         if (!in_sched_functions(ip))
2171                                 return ip;
2172                 }
2173         } while (count++ < 16);
2174         return 0;
2175 }
2176
2177 unsigned long __get_wchan(struct task_struct *p)
2178 {
2179         unsigned long ret;
2180
2181         if (!try_get_task_stack(p))
2182                 return 0;
2183
2184         ret = ___get_wchan(p);
2185
2186         put_task_stack(p);
2187
2188         return ret;
2189 }
2190
2191 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2192
2193 void __no_sanitize_address show_stack(struct task_struct *tsk,
2194                                       unsigned long *stack,
2195                                       const char *loglvl)
2196 {
2197         unsigned long sp, ip, lr, newsp;
2198         int count = 0;
2199         int firstframe = 1;
2200         unsigned long ret_addr;
2201         int ftrace_idx = 0;
2202
2203         if (tsk == NULL)
2204                 tsk = current;
2205
2206         if (!try_get_task_stack(tsk))
2207                 return;
2208
2209         sp = (unsigned long) stack;
2210         if (sp == 0) {
2211                 if (tsk == current)
2212                         sp = current_stack_frame();
2213                 else
2214                         sp = tsk->thread.ksp;
2215         }
2216
2217         lr = 0;
2218         printk("%sCall Trace:\n", loglvl);
2219         do {
2220                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2221                         break;
2222
2223                 stack = (unsigned long *) sp;
2224                 newsp = stack[0];
2225                 ip = stack[STACK_FRAME_LR_SAVE];
2226                 if (!firstframe || ip != lr) {
2227                         printk("%s["REG"] ["REG"] %pS",
2228                                 loglvl, sp, ip, (void *)ip);
2229                         ret_addr = ftrace_graph_ret_addr(current,
2230                                                 &ftrace_idx, ip, stack);
2231                         if (ret_addr != ip)
2232                                 pr_cont(" (%pS)", (void *)ret_addr);
2233                         if (firstframe)
2234                                 pr_cont(" (unreliable)");
2235                         pr_cont("\n");
2236                 }
2237                 firstframe = 0;
2238
2239                 /*
2240                  * See if this is an exception frame.
2241                  * We look for the "regshere" marker in the current frame.
2242                  */
2243                 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2244                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2245                         struct pt_regs *regs = (struct pt_regs *)
2246                                 (sp + STACK_FRAME_OVERHEAD);
2247
2248                         lr = regs->link;
2249                         printk("%s--- interrupt: %lx at %pS\n",
2250                                loglvl, regs->trap, (void *)regs->nip);
2251                         __show_regs(regs);
2252                         printk("%s--- interrupt: %lx\n",
2253                                loglvl, regs->trap);
2254
2255                         firstframe = 1;
2256                 }
2257
2258                 sp = newsp;
2259         } while (count++ < kstack_depth_to_print);
2260
2261         put_task_stack(tsk);
2262 }
2263
2264 #ifdef CONFIG_PPC64
2265 /* Called with hard IRQs off */
2266 void notrace __ppc64_runlatch_on(void)
2267 {
2268         struct thread_info *ti = current_thread_info();
2269
2270         if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2271                 /*
2272                  * Least significant bit (RUN) is the only writable bit of
2273                  * the CTRL register, so we can avoid mfspr. 2.06 is not the
2274                  * earliest ISA where this is the case, but it's convenient.
2275                  */
2276                 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2277         } else {
2278                 unsigned long ctrl;
2279
2280                 /*
2281                  * Some architectures (e.g., Cell) have writable fields other
2282                  * than RUN, so do the read-modify-write.
2283                  */
2284                 ctrl = mfspr(SPRN_CTRLF);
2285                 ctrl |= CTRL_RUNLATCH;
2286                 mtspr(SPRN_CTRLT, ctrl);
2287         }
2288
2289         ti->local_flags |= _TLF_RUNLATCH;
2290 }
2291
2292 /* Called with hard IRQs off */
2293 void notrace __ppc64_runlatch_off(void)
2294 {
2295         struct thread_info *ti = current_thread_info();
2296
2297         ti->local_flags &= ~_TLF_RUNLATCH;
2298
2299         if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2300                 mtspr(SPRN_CTRLT, 0);
2301         } else {
2302                 unsigned long ctrl;
2303
2304                 ctrl = mfspr(SPRN_CTRLF);
2305                 ctrl &= ~CTRL_RUNLATCH;
2306                 mtspr(SPRN_CTRLT, ctrl);
2307         }
2308 }
2309 #endif /* CONFIG_PPC64 */
2310
2311 unsigned long arch_align_stack(unsigned long sp)
2312 {
2313         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2314                 sp -= get_random_int() & ~PAGE_MASK;
2315         return sp & ~0xf;
2316 }
2317
2318 static inline unsigned long brk_rnd(void)
2319 {
2320         unsigned long rnd = 0;
2321
2322         /* 8MB for 32bit, 1GB for 64bit */
2323         if (is_32bit_task())
2324                 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2325         else
2326                 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2327
2328         return rnd << PAGE_SHIFT;
2329 }
2330
2331 unsigned long arch_randomize_brk(struct mm_struct *mm)
2332 {
2333         unsigned long base = mm->brk;
2334         unsigned long ret;
2335
2336 #ifdef CONFIG_PPC_BOOK3S_64
2337         /*
2338          * If we are using 1TB segments and we are allowed to randomise
2339          * the heap, we can put it above 1TB so it is backed by a 1TB
2340          * segment. Otherwise the heap will be in the bottom 1TB
2341          * which always uses 256MB segments and this may result in a
2342          * performance penalty.
2343          */
2344         if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2345                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2346 #endif
2347
2348         ret = PAGE_ALIGN(base + brk_rnd());
2349
2350         if (ret < mm->brk)
2351                 return mm->brk;
2352
2353         return ret;
2354 }
2355
This page took 0.169924 seconds and 4 git commands to generate.