1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
20 #include "../kernel/head.h"
23 * This routine handles page faults. It determines the address and the
24 * problem, and then passes it off to one of the appropriate routines.
26 asmlinkage void do_page_fault(struct pt_regs *regs)
28 struct task_struct *tsk;
29 struct vm_area_struct *vma;
31 unsigned long addr, cause;
32 unsigned int flags = FAULT_FLAG_DEFAULT;
33 int code = SEGV_MAPERR;
43 * Fault-in kernel-space virtual memory on-demand.
44 * The 'reference' page table is init_mm.pgd.
46 * NOTE! We MUST NOT take any locks for this case. We may
47 * be in an interrupt or a critical region, and should
48 * only copy the information from the master page table,
51 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
54 /* Enable interrupts if they were enabled in the parent context. */
55 if (likely(regs->status & SR_PIE))
59 * If we're in an interrupt, have no user context, or are running
60 * in an atomic region, then we must not take the fault.
62 if (unlikely(faulthandler_disabled() || !mm))
66 flags |= FAULT_FLAG_USER;
68 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
72 vma = find_vma(mm, addr);
75 if (likely(vma->vm_start <= addr))
77 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
79 if (unlikely(expand_stack(vma, addr)))
83 * Ok, we have a good vm_area for this memory access, so
90 case EXC_INST_PAGE_FAULT:
91 if (!(vma->vm_flags & VM_EXEC))
94 case EXC_LOAD_PAGE_FAULT:
95 if (!(vma->vm_flags & VM_READ))
98 case EXC_STORE_PAGE_FAULT:
99 if (!(vma->vm_flags & VM_WRITE))
101 flags |= FAULT_FLAG_WRITE;
104 panic("%s: unhandled cause %lu", __func__, cause);
108 * If for any reason at all we could not handle the fault,
109 * make sure we exit gracefully rather than endlessly redo
112 fault = handle_mm_fault(vma, addr, flags, regs);
115 * If we need to retry but a fatal signal is pending, handle the
116 * signal first. We do not need to release the mmap_lock because it
117 * would already be released in __lock_page_or_retry in mm/filemap.c.
119 if (fault_signal_pending(fault, regs))
122 if (unlikely(fault & VM_FAULT_ERROR)) {
123 if (fault & VM_FAULT_OOM)
125 else if (fault & VM_FAULT_SIGBUS)
130 if (flags & FAULT_FLAG_ALLOW_RETRY) {
131 if (fault & VM_FAULT_RETRY) {
132 flags |= FAULT_FLAG_TRIED;
135 * No need to mmap_read_unlock(mm) as we would
136 * have already released it in __lock_page_or_retry
143 mmap_read_unlock(mm);
147 * Something tried to access memory that isn't in our memory map.
148 * Fix it, but check if it's kernel or user first.
151 mmap_read_unlock(mm);
152 /* User mode accesses just cause a SIGSEGV */
153 if (user_mode(regs)) {
154 do_trap(regs, SIGSEGV, code, addr);
159 /* Are we prepared to handle this kernel fault? */
160 if (fixup_exception(regs))
164 * Oops. The kernel tried to access some bad page. We'll have to
165 * terminate things with extreme prejudice.
168 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
169 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
170 "paging request", addr);
175 * We ran out of memory, call the OOM killer, and return the userspace
176 * (which will retry the fault, or kill us if we got oom-killed).
179 mmap_read_unlock(mm);
180 if (!user_mode(regs))
182 pagefault_out_of_memory();
186 mmap_read_unlock(mm);
187 /* Kernel mode? Handle exceptions or die */
188 if (!user_mode(regs))
190 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
202 /* User mode accesses just cause a SIGSEGV */
204 return do_trap(regs, SIGSEGV, code, addr);
207 * Synchronize this task's top level page-table
208 * with the 'reference' page table.
210 * Do _not_ use "tsk->active_mm->pgd" here.
211 * We might be inside an interrupt in the middle
214 index = pgd_index(addr);
215 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
216 pgd_k = init_mm.pgd + index;
218 if (!pgd_present(*pgd_k))
220 set_pgd(pgd, *pgd_k);
222 p4d = p4d_offset(pgd, addr);
223 p4d_k = p4d_offset(pgd_k, addr);
224 if (!p4d_present(*p4d_k))
227 pud = pud_offset(p4d, addr);
228 pud_k = pud_offset(p4d_k, addr);
229 if (!pud_present(*pud_k))
233 * Since the vmalloc area is global, it is unnecessary
234 * to copy individual PTEs
236 pmd = pmd_offset(pud, addr);
237 pmd_k = pmd_offset(pud_k, addr);
238 if (!pmd_present(*pmd_k))
240 set_pmd(pmd, *pmd_k);
243 * Make sure the actual PTE exists as well to
244 * catch kernel vmalloc-area accesses to non-mapped
245 * addresses. If we don't do this, this will just
246 * silently loop forever.
248 pte_k = pte_offset_kernel(pmd_k, addr);
249 if (!pte_present(*pte_k))
253 * The kernel assumes that TLBs don't cache invalid
254 * entries, but in RISC-V, SFENCE.VMA specifies an
255 * ordering constraint, not a cache flush; it is
256 * necessary even after writing invalid entries.
258 local_flush_tlb_page(addr);