1 // SPDX-License-Identifier: GPL-2.0
3 * fault.c: Page fault handlers for the Sparc.
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
28 #include <asm/openprom.h>
29 #include <asm/oplib.h>
30 #include <asm/setup.h>
32 #include <asm/traps.h>
36 int show_unhandled_signals = 1;
38 static void __noreturn unhandled_fault(unsigned long address,
39 struct task_struct *tsk,
42 if ((unsigned long) address < PAGE_SIZE) {
44 "Unable to handle kernel NULL pointer dereference\n");
46 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
49 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
51 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
52 (tsk->mm ? (unsigned long) tsk->mm->pgd :
53 (unsigned long) tsk->active_mm->pgd));
54 die_if_kernel("Oops", regs);
57 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
58 unsigned long address)
65 i = search_extables_range(ret_pc, &g2);
68 /* load & store will be handled by fixup */
72 /* store will be handled by fixup, load will bump out */
74 insn = *((unsigned int *) pc);
80 /* load will be handled by fixup, store will bump out */
81 /* for _from_ macros */
82 insn = *((unsigned int *) pc);
83 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
91 memset(®s, 0, sizeof(regs));
98 "nop\n" : "=r" (regs.psr));
99 unhandled_fault(address, current, ®s);
106 show_signal_msg(struct pt_regs *regs, int sig, int code,
107 unsigned long address, struct task_struct *tsk)
109 if (!unhandled_signal(tsk, sig))
112 if (!printk_ratelimit())
115 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
116 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
117 tsk->comm, task_pid_nr(tsk), address,
118 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
119 (void *)regs->u_regs[UREG_FP], code);
121 print_vma_addr(KERN_CONT " in ", regs->pc);
123 printk(KERN_CONT "\n");
126 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
129 if (unlikely(show_unhandled_signals))
130 show_signal_msg(regs, sig, code,
133 force_sig_fault(sig, code, (void __user *) addr, 0);
136 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
143 if (regs->psr & PSR_PS)
144 insn = *(unsigned int *) regs->pc;
146 __get_user(insn, (unsigned int *) regs->pc);
148 return safe_compute_effective_address(regs, insn);
151 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
154 unsigned long addr = compute_si_addr(regs, text_fault);
156 __do_fault_siginfo(code, sig, regs, addr);
159 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
160 unsigned long address)
162 struct vm_area_struct *vma;
163 struct task_struct *tsk = current;
164 struct mm_struct *mm = tsk->mm;
167 int from_user = !(regs->psr & PSR_PS);
170 unsigned int flags = FAULT_FLAG_DEFAULT;
176 * We fault-in kernel-space virtual memory on-demand. The
177 * 'reference' page table is init_mm.pgd.
179 * NOTE! We MUST NOT take any locks for this case. We may
180 * be in an interrupt or a critical region, and should
181 * only copy the information from the master page table,
185 if (address >= TASK_SIZE)
189 * If we're in an interrupt or have no user
190 * context, we must not take the fault..
192 if (pagefault_disabled() || !mm)
195 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
200 if (!from_user && address >= PAGE_OFFSET)
203 vma = find_vma(mm, address);
206 if (vma->vm_start <= address)
208 if (!(vma->vm_flags & VM_GROWSDOWN))
210 if (expand_stack(vma, address))
213 * Ok, we have a good vm_area for this memory access, so
219 if (!(vma->vm_flags & VM_WRITE))
222 /* Allow reads even for write-only mappings */
223 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
228 flags |= FAULT_FLAG_USER;
230 flags |= FAULT_FLAG_WRITE;
233 * If for any reason at all we couldn't handle the fault,
234 * make sure we exit gracefully rather than endlessly redo
237 fault = handle_mm_fault(vma, address, flags);
239 if (fault_signal_pending(fault, regs))
242 if (unlikely(fault & VM_FAULT_ERROR)) {
243 if (fault & VM_FAULT_OOM)
245 else if (fault & VM_FAULT_SIGSEGV)
247 else if (fault & VM_FAULT_SIGBUS)
252 if (flags & FAULT_FLAG_ALLOW_RETRY) {
253 if (fault & VM_FAULT_MAJOR) {
255 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
259 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
262 if (fault & VM_FAULT_RETRY) {
263 flags |= FAULT_FLAG_TRIED;
265 /* No need to mmap_read_unlock(mm) as we would
266 * have already released it in __lock_page_or_retry
274 mmap_read_unlock(mm);
278 * Something tried to access memory that isn't in our memory map..
279 * Fix it, but check if it's kernel or user first..
282 mmap_read_unlock(mm);
284 bad_area_nosemaphore:
285 /* User mode accesses just cause a SIGSEGV */
287 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
291 /* Is this in ex_table? */
293 g2 = regs->u_regs[UREG_G2];
295 fixup = search_extables_range(regs->pc, &g2);
296 /* Values below 10 are reserved for other things */
298 extern const unsigned int __memset_start[];
299 extern const unsigned int __memset_end[];
300 extern const unsigned int __csum_partial_copy_start[];
301 extern const unsigned int __csum_partial_copy_end[];
303 #ifdef DEBUG_EXCEPTIONS
304 printk("Exception: PC<%08lx> faddr<%08lx>\n",
306 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
307 regs->pc, fixup, g2);
309 if ((regs->pc >= (unsigned long)__memset_start &&
310 regs->pc < (unsigned long)__memset_end) ||
311 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
312 regs->pc < (unsigned long)__csum_partial_copy_end)) {
313 regs->u_regs[UREG_I4] = address;
314 regs->u_regs[UREG_I5] = regs->pc;
316 regs->u_regs[UREG_G2] = g2;
318 regs->npc = regs->pc + 4;
323 unhandled_fault(address, tsk, regs);
327 * We ran out of memory, or some other thing happened to us that made
328 * us unable to handle the page fault gracefully.
331 mmap_read_unlock(mm);
333 pagefault_out_of_memory();
339 mmap_read_unlock(mm);
340 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
347 * Synchronize this task's top level page-table
348 * with the 'reference' page table.
350 int offset = pgd_index(address);
356 pgd = tsk->active_mm->pgd + offset;
357 pgd_k = init_mm.pgd + offset;
359 if (!pgd_present(*pgd)) {
360 if (!pgd_present(*pgd_k))
361 goto bad_area_nosemaphore;
362 pgd_val(*pgd) = pgd_val(*pgd_k);
366 p4d = p4d_offset(pgd, address);
367 pud = pud_offset(p4d, address);
368 pmd = pmd_offset(pud, address);
370 p4d_k = p4d_offset(pgd_k, address);
371 pud_k = pud_offset(p4d_k, address);
372 pmd_k = pmd_offset(pud_k, address);
374 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
375 goto bad_area_nosemaphore;
382 /* This always deals with user addresses. */
383 static void force_user_fault(unsigned long address, int write)
385 struct vm_area_struct *vma;
386 struct task_struct *tsk = current;
387 struct mm_struct *mm = tsk->mm;
388 unsigned int flags = FAULT_FLAG_USER;
394 vma = find_vma(mm, address);
397 if (vma->vm_start <= address)
399 if (!(vma->vm_flags & VM_GROWSDOWN))
401 if (expand_stack(vma, address))
406 if (!(vma->vm_flags & VM_WRITE))
408 flags |= FAULT_FLAG_WRITE;
410 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
413 switch (handle_mm_fault(vma, address, flags)) {
414 case VM_FAULT_SIGBUS:
418 mmap_read_unlock(mm);
421 mmap_read_unlock(mm);
422 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
426 mmap_read_unlock(mm);
427 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
430 static void check_stack_aligned(unsigned long sp)
436 void window_overflow_fault(void)
440 sp = current_thread_info()->rwbuf_stkptrs[0];
441 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
442 force_user_fault(sp + 0x38, 1);
443 force_user_fault(sp, 1);
445 check_stack_aligned(sp);
448 void window_underflow_fault(unsigned long sp)
450 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
451 force_user_fault(sp + 0x38, 0);
452 force_user_fault(sp, 0);
454 check_stack_aligned(sp);
457 void window_ret_fault(struct pt_regs *regs)
461 sp = regs->u_regs[UREG_FP];
462 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
463 force_user_fault(sp + 0x38, 0);
464 force_user_fault(sp, 0);
466 check_stack_aligned(sp);