]> Git Repo - linux.git/blob - arch/riscv/kernel/traps.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / riscv / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/irq.h>
19 #include <linux/kexec.h>
20 #include <linux/entry-common.h>
21
22 #include <asm/asm-prototypes.h>
23 #include <asm/bug.h>
24 #include <asm/csr.h>
25 #include <asm/processor.h>
26 #include <asm/ptrace.h>
27 #include <asm/syscall.h>
28 #include <asm/thread_info.h>
29
30 int show_unhandled_signals = 1;
31
32 static DEFINE_SPINLOCK(die_lock);
33
34 static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
35 {
36         char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
37         const u16 *insns = (u16 *)instruction_pointer(regs);
38         long bad;
39         u16 val;
40         int i;
41
42         for (i = -10; i < 2; i++) {
43                 bad = get_kernel_nofault(val, &insns[i]);
44                 if (!bad) {
45                         p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
46                 } else {
47                         printk("%sCode: Unable to access instruction at 0x%px.\n",
48                                loglvl, &insns[i]);
49                         return;
50                 }
51         }
52         printk("%sCode: %s\n", loglvl, str);
53 }
54
55 void die(struct pt_regs *regs, const char *str)
56 {
57         static int die_counter;
58         int ret;
59         long cause;
60         unsigned long flags;
61
62         oops_enter();
63
64         spin_lock_irqsave(&die_lock, flags);
65         console_verbose();
66         bust_spinlocks(1);
67
68         pr_emerg("%s [#%d]\n", str, ++die_counter);
69         print_modules();
70         if (regs) {
71                 show_regs(regs);
72                 dump_kernel_instr(KERN_EMERG, regs);
73         }
74
75         cause = regs ? regs->cause : -1;
76         ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
77
78         if (kexec_should_crash(current))
79                 crash_kexec(regs);
80
81         bust_spinlocks(0);
82         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
83         spin_unlock_irqrestore(&die_lock, flags);
84         oops_exit();
85
86         if (in_interrupt())
87                 panic("Fatal exception in interrupt");
88         if (panic_on_oops)
89                 panic("Fatal exception");
90         if (ret != NOTIFY_STOP)
91                 make_task_dead(SIGSEGV);
92 }
93
94 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
95 {
96         struct task_struct *tsk = current;
97
98         if (show_unhandled_signals && unhandled_signal(tsk, signo)
99             && printk_ratelimit()) {
100                 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
101                         tsk->comm, task_pid_nr(tsk), signo, code, addr);
102                 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
103                 pr_cont("\n");
104                 __show_regs(regs);
105         }
106
107         force_sig_fault(signo, code, (void __user *)addr);
108 }
109
110 static void do_trap_error(struct pt_regs *regs, int signo, int code,
111         unsigned long addr, const char *str)
112 {
113         current->thread.bad_cause = regs->cause;
114
115         if (user_mode(regs)) {
116                 do_trap(regs, signo, code, addr);
117         } else {
118                 if (!fixup_exception(regs))
119                         die(regs, str);
120         }
121 }
122
123 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
124 #define __trap_section __noinstr_section(".xip.traps")
125 #else
126 #define __trap_section noinstr
127 #endif
128 #define DO_ERROR_INFO(name, signo, code, str)                                   \
129 asmlinkage __visible __trap_section void name(struct pt_regs *regs)             \
130 {                                                                               \
131         if (user_mode(regs)) {                                                  \
132                 irqentry_enter_from_user_mode(regs);                            \
133                 do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
134                 irqentry_exit_to_user_mode(regs);                               \
135         } else {                                                                \
136                 irqentry_state_t state = irqentry_nmi_enter(regs);              \
137                 do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
138                 irqentry_nmi_exit(regs, state);                                 \
139         }                                                                       \
140 }
141
142 DO_ERROR_INFO(do_trap_unknown,
143         SIGILL, ILL_ILLTRP, "unknown exception");
144 DO_ERROR_INFO(do_trap_insn_misaligned,
145         SIGBUS, BUS_ADRALN, "instruction address misaligned");
146 DO_ERROR_INFO(do_trap_insn_fault,
147         SIGSEGV, SEGV_ACCERR, "instruction access fault");
148 DO_ERROR_INFO(do_trap_insn_illegal,
149         SIGILL, ILL_ILLOPC, "illegal instruction");
150 DO_ERROR_INFO(do_trap_load_fault,
151         SIGSEGV, SEGV_ACCERR, "load access fault");
152 #ifndef CONFIG_RISCV_M_MODE
153 DO_ERROR_INFO(do_trap_load_misaligned,
154         SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
155 DO_ERROR_INFO(do_trap_store_misaligned,
156         SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
157 #else
158 int handle_misaligned_load(struct pt_regs *regs);
159 int handle_misaligned_store(struct pt_regs *regs);
160
161 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
162 {
163         if (user_mode(regs)) {
164                 irqentry_enter_from_user_mode(regs);
165
166                 if (handle_misaligned_load(regs))
167                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
168                               "Oops - load address misaligned");
169
170                 irqentry_exit_to_user_mode(regs);
171         } else {
172                 irqentry_state_t state = irqentry_nmi_enter(regs);
173
174                 if (handle_misaligned_load(regs))
175                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
176                               "Oops - load address misaligned");
177
178                 irqentry_nmi_exit(regs, state);
179         }
180 }
181
182 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
183 {
184         if (user_mode(regs)) {
185                 irqentry_enter_from_user_mode(regs);
186
187                 if (handle_misaligned_store(regs))
188                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
189                                 "Oops - store (or AMO) address misaligned");
190
191                 irqentry_exit_to_user_mode(regs);
192         } else {
193                 irqentry_state_t state = irqentry_nmi_enter(regs);
194
195                 if (handle_misaligned_store(regs))
196                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
197                                 "Oops - store (or AMO) address misaligned");
198
199                 irqentry_nmi_exit(regs, state);
200         }
201 }
202 #endif
203 DO_ERROR_INFO(do_trap_store_fault,
204         SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
205 DO_ERROR_INFO(do_trap_ecall_s,
206         SIGILL, ILL_ILLTRP, "environment call from S-mode");
207 DO_ERROR_INFO(do_trap_ecall_m,
208         SIGILL, ILL_ILLTRP, "environment call from M-mode");
209
210 static inline unsigned long get_break_insn_length(unsigned long pc)
211 {
212         bug_insn_t insn;
213
214         if (get_kernel_nofault(insn, (bug_insn_t *)pc))
215                 return 0;
216
217         return GET_INSN_LENGTH(insn);
218 }
219
220 void handle_break(struct pt_regs *regs)
221 {
222 #ifdef CONFIG_KPROBES
223         if (kprobe_single_step_handler(regs))
224                 return;
225
226         if (kprobe_breakpoint_handler(regs))
227                 return;
228 #endif
229 #ifdef CONFIG_UPROBES
230         if (uprobe_single_step_handler(regs))
231                 return;
232
233         if (uprobe_breakpoint_handler(regs))
234                 return;
235 #endif
236         current->thread.bad_cause = regs->cause;
237
238         if (user_mode(regs))
239                 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
240 #ifdef CONFIG_KGDB
241         else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
242                                                                 == NOTIFY_STOP)
243                 return;
244 #endif
245         else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
246                 regs->epc += get_break_insn_length(regs->epc);
247         else
248                 die(regs, "Kernel BUG");
249 }
250
251 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
252 {
253         if (user_mode(regs)) {
254                 irqentry_enter_from_user_mode(regs);
255
256                 handle_break(regs);
257
258                 irqentry_exit_to_user_mode(regs);
259         } else {
260                 irqentry_state_t state = irqentry_nmi_enter(regs);
261
262                 handle_break(regs);
263
264                 irqentry_nmi_exit(regs, state);
265         }
266 }
267
268 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
269 {
270         if (user_mode(regs)) {
271                 ulong syscall = regs->a7;
272
273                 regs->epc += 4;
274                 regs->orig_a0 = regs->a0;
275
276                 syscall = syscall_enter_from_user_mode(regs, syscall);
277
278                 if (syscall < NR_syscalls)
279                         syscall_handler(regs, syscall);
280                 else
281                         regs->a0 = -ENOSYS;
282
283                 syscall_exit_to_user_mode(regs);
284         } else {
285                 irqentry_state_t state = irqentry_nmi_enter(regs);
286
287                 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
288                         "Oops - environment call from U-mode");
289
290                 irqentry_nmi_exit(regs, state);
291         }
292
293 }
294
295 #ifdef CONFIG_MMU
296 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
297 {
298         irqentry_state_t state = irqentry_enter(regs);
299
300         handle_page_fault(regs);
301
302         local_irq_disable();
303
304         irqentry_exit(regs, state);
305 }
306 #endif
307
308 asmlinkage __visible noinstr void do_irq(struct pt_regs *regs)
309 {
310         struct pt_regs *old_regs;
311         irqentry_state_t state = irqentry_enter(regs);
312
313         irq_enter_rcu();
314         old_regs = set_irq_regs(regs);
315         handle_arch_irq(regs);
316         set_irq_regs(old_regs);
317         irq_exit_rcu();
318
319         irqentry_exit(regs, state);
320 }
321
322 #ifdef CONFIG_GENERIC_BUG
323 int is_valid_bugaddr(unsigned long pc)
324 {
325         bug_insn_t insn;
326
327         if (pc < VMALLOC_START)
328                 return 0;
329         if (get_kernel_nofault(insn, (bug_insn_t *)pc))
330                 return 0;
331         if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
332                 return (insn == __BUG_INSN_32);
333         else
334                 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
335 }
336 #endif /* CONFIG_GENERIC_BUG */
337
338 #ifdef CONFIG_VMAP_STACK
339 /*
340  * Extra stack space that allows us to provide panic messages when the kernel
341  * has overflowed its stack.
342  */
343 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
344                 overflow_stack)__aligned(16);
345 /*
346  * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
347  * we can call into C code to get the per-hart overflow stack.  Usage of this
348  * stack must be protected by spin_shadow_stack.
349  */
350 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
351
352 /*
353  * A pseudo spinlock to protect the shadow stack from being used by multiple
354  * harts concurrently.  This isn't a real spinlock because the lock side must
355  * be taken without a valid stack and only a single register, it's only taken
356  * while in the process of panicing anyway so the performance and error
357  * checking a proper spinlock gives us doesn't matter.
358  */
359 unsigned long spin_shadow_stack;
360
361 asmlinkage unsigned long get_overflow_stack(void)
362 {
363         return (unsigned long)this_cpu_ptr(overflow_stack) +
364                 OVERFLOW_STACK_SIZE;
365 }
366
367 asmlinkage void handle_bad_stack(struct pt_regs *regs)
368 {
369         unsigned long tsk_stk = (unsigned long)current->stack;
370         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
371
372         /*
373          * We're done with the shadow stack by this point, as we're on the
374          * overflow stack.  Tell any other concurrent overflowing harts that
375          * they can proceed with panicing by releasing the pseudo-spinlock.
376          *
377          * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
378          */
379         smp_store_release(&spin_shadow_stack, 0);
380
381         console_verbose();
382
383         pr_emerg("Insufficient stack space to handle exception!\n");
384         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
385                         tsk_stk, tsk_stk + THREAD_SIZE);
386         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
387                         ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
388
389         __show_regs(regs);
390         panic("Kernel stack overflow");
391
392         for (;;)
393                 wait_for_interrupt();
394 }
395 #endif
This page took 0.05789 seconds and 4 git commands to generate.