1 // SPDX-License-Identifier: GPL-2.0
3 * fault.c: Page fault handlers for the Sparc.
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
33 #include <asm/traps.h>
37 int show_unhandled_signals = 1;
39 static void __noreturn unhandled_fault(unsigned long address,
40 struct task_struct *tsk,
43 if ((unsigned long) address < PAGE_SIZE) {
45 "Unable to handle kernel NULL pointer dereference\n");
47 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
50 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53 (tsk->mm ? (unsigned long) tsk->mm->pgd :
54 (unsigned long) tsk->active_mm->pgd));
55 die_if_kernel("Oops", regs);
58 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
59 unsigned long address)
66 i = search_extables_range(ret_pc, &g2);
69 /* load & store will be handled by fixup */
73 /* store will be handled by fixup, load will bump out */
75 insn = *((unsigned int *) pc);
81 /* load will be handled by fixup, store will bump out */
82 /* for _from_ macros */
83 insn = *((unsigned int *) pc);
84 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
92 memset(®s, 0, sizeof(regs));
99 "nop\n" : "=r" (regs.psr));
100 unhandled_fault(address, current, ®s);
107 show_signal_msg(struct pt_regs *regs, int sig, int code,
108 unsigned long address, struct task_struct *tsk)
110 if (!unhandled_signal(tsk, sig))
113 if (!printk_ratelimit())
116 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118 tsk->comm, task_pid_nr(tsk), address,
119 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120 (void *)regs->u_regs[UREG_FP], code);
122 print_vma_addr(KERN_CONT " in ", regs->pc);
124 printk(KERN_CONT "\n");
127 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
130 if (unlikely(show_unhandled_signals))
131 show_signal_msg(regs, sig, code,
134 force_sig_fault(sig, code, (void __user *) addr, 0);
137 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
144 if (regs->psr & PSR_PS)
145 insn = *(unsigned int *) regs->pc;
147 __get_user(insn, (unsigned int *) regs->pc);
149 return safe_compute_effective_address(regs, insn);
152 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
155 unsigned long addr = compute_si_addr(regs, text_fault);
157 __do_fault_siginfo(code, sig, regs, addr);
160 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
161 unsigned long address)
163 struct vm_area_struct *vma;
164 struct task_struct *tsk = current;
165 struct mm_struct *mm = tsk->mm;
168 int from_user = !(regs->psr & PSR_PS);
171 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
177 * We fault-in kernel-space virtual memory on-demand. The
178 * 'reference' page table is init_mm.pgd.
180 * NOTE! We MUST NOT take any locks for this case. We may
181 * be in an interrupt or a critical region, and should
182 * only copy the information from the master page table,
186 if (address >= TASK_SIZE)
190 * If we're in an interrupt or have no user
191 * context, we must not take the fault..
193 if (pagefault_disabled() || !mm)
196 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
199 down_read(&mm->mmap_sem);
201 if (!from_user && address >= PAGE_OFFSET)
204 vma = find_vma(mm, address);
207 if (vma->vm_start <= address)
209 if (!(vma->vm_flags & VM_GROWSDOWN))
211 if (expand_stack(vma, address))
214 * Ok, we have a good vm_area for this memory access, so
220 if (!(vma->vm_flags & VM_WRITE))
223 /* Allow reads even for write-only mappings */
224 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
229 flags |= FAULT_FLAG_USER;
231 flags |= FAULT_FLAG_WRITE;
234 * If for any reason at all we couldn't handle the fault,
235 * make sure we exit gracefully rather than endlessly redo
238 fault = handle_mm_fault(vma, address, flags);
240 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
243 if (unlikely(fault & VM_FAULT_ERROR)) {
244 if (fault & VM_FAULT_OOM)
246 else if (fault & VM_FAULT_SIGSEGV)
248 else if (fault & VM_FAULT_SIGBUS)
253 if (flags & FAULT_FLAG_ALLOW_RETRY) {
254 if (fault & VM_FAULT_MAJOR) {
256 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
260 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
263 if (fault & VM_FAULT_RETRY) {
264 flags &= ~FAULT_FLAG_ALLOW_RETRY;
265 flags |= FAULT_FLAG_TRIED;
267 /* No need to up_read(&mm->mmap_sem) as we would
268 * have already released it in __lock_page_or_retry
276 up_read(&mm->mmap_sem);
280 * Something tried to access memory that isn't in our memory map..
281 * Fix it, but check if it's kernel or user first..
284 up_read(&mm->mmap_sem);
286 bad_area_nosemaphore:
287 /* User mode accesses just cause a SIGSEGV */
289 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
293 /* Is this in ex_table? */
295 g2 = regs->u_regs[UREG_G2];
297 fixup = search_extables_range(regs->pc, &g2);
298 /* Values below 10 are reserved for other things */
300 extern const unsigned int __memset_start[];
301 extern const unsigned int __memset_end[];
302 extern const unsigned int __csum_partial_copy_start[];
303 extern const unsigned int __csum_partial_copy_end[];
305 #ifdef DEBUG_EXCEPTIONS
306 printk("Exception: PC<%08lx> faddr<%08lx>\n",
308 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
309 regs->pc, fixup, g2);
311 if ((regs->pc >= (unsigned long)__memset_start &&
312 regs->pc < (unsigned long)__memset_end) ||
313 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
314 regs->pc < (unsigned long)__csum_partial_copy_end)) {
315 regs->u_regs[UREG_I4] = address;
316 regs->u_regs[UREG_I5] = regs->pc;
318 regs->u_regs[UREG_G2] = g2;
320 regs->npc = regs->pc + 4;
325 unhandled_fault(address, tsk, regs);
329 * We ran out of memory, or some other thing happened to us that made
330 * us unable to handle the page fault gracefully.
333 up_read(&mm->mmap_sem);
335 pagefault_out_of_memory();
341 up_read(&mm->mmap_sem);
342 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
349 * Synchronize this task's top level page-table
350 * with the 'reference' page table.
352 int offset = pgd_index(address);
358 pgd = tsk->active_mm->pgd + offset;
359 pgd_k = init_mm.pgd + offset;
361 if (!pgd_present(*pgd)) {
362 if (!pgd_present(*pgd_k))
363 goto bad_area_nosemaphore;
364 pgd_val(*pgd) = pgd_val(*pgd_k);
368 p4d = p4d_offset(pgd, address);
369 pud = pud_offset(p4d, address);
370 pmd = pmd_offset(pud, address);
372 p4d_k = p4d_offset(pgd_k, address);
373 pud_k = pud_offset(p4d_k, address);
374 pmd_k = pmd_offset(pud_k, address);
376 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
377 goto bad_area_nosemaphore;
384 /* This always deals with user addresses. */
385 static void force_user_fault(unsigned long address, int write)
387 struct vm_area_struct *vma;
388 struct task_struct *tsk = current;
389 struct mm_struct *mm = tsk->mm;
390 unsigned int flags = FAULT_FLAG_USER;
395 down_read(&mm->mmap_sem);
396 vma = find_vma(mm, address);
399 if (vma->vm_start <= address)
401 if (!(vma->vm_flags & VM_GROWSDOWN))
403 if (expand_stack(vma, address))
408 if (!(vma->vm_flags & VM_WRITE))
410 flags |= FAULT_FLAG_WRITE;
412 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
415 switch (handle_mm_fault(vma, address, flags)) {
416 case VM_FAULT_SIGBUS:
420 up_read(&mm->mmap_sem);
423 up_read(&mm->mmap_sem);
424 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
428 up_read(&mm->mmap_sem);
429 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
432 static void check_stack_aligned(unsigned long sp)
438 void window_overflow_fault(void)
442 sp = current_thread_info()->rwbuf_stkptrs[0];
443 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
444 force_user_fault(sp + 0x38, 1);
445 force_user_fault(sp, 1);
447 check_stack_aligned(sp);
450 void window_underflow_fault(unsigned long sp)
452 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
453 force_user_fault(sp + 0x38, 0);
454 force_user_fault(sp, 0);
456 check_stack_aligned(sp);
459 void window_ret_fault(struct pt_regs *regs)
463 sp = regs->u_regs[UREG_FP];
464 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
465 force_user_fault(sp + 0x38, 0);
466 force_user_fault(sp, 0);
468 check_stack_aligned(sp);