1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Page Fault Handling for ARC (TLB Miss / ProtV)
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 #include <linux/signal.h>
8 #include <linux/interrupt.h>
9 #include <linux/sched/signal.h>
10 #include <linux/errno.h>
11 #include <linux/ptrace.h>
12 #include <linux/uaccess.h>
13 #include <linux/kdebug.h>
14 #include <linux/perf_event.h>
15 #include <linux/mm_types.h>
16 #include <asm/pgalloc.h>
20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
21 * Refer to asm/processor.h for System Memory Map
23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
26 noinline static int handle_kernel_vaddr_fault(unsigned long address)
29 * Synchronize this task's top level page-table
30 * with the 'reference' page table.
37 pgd = pgd_offset_fast(current->active_mm, address);
38 pgd_k = pgd_offset_k(address);
40 if (!pgd_present(*pgd_k))
43 p4d = p4d_offset(pgd, address);
44 p4d_k = p4d_offset(pgd_k, address);
45 if (!p4d_present(*p4d_k))
48 pud = pud_offset(p4d, address);
49 pud_k = pud_offset(p4d_k, address);
50 if (!pud_present(*pud_k))
53 pmd = pmd_offset(pud, address);
54 pmd_k = pmd_offset(pud_k, address);
55 if (!pmd_present(*pmd_k))
60 /* XXX: create the TLB entry here */
67 void do_page_fault(unsigned long address, struct pt_regs *regs)
69 struct vm_area_struct *vma = NULL;
70 struct task_struct *tsk = current;
71 struct mm_struct *mm = tsk->mm;
72 int sig, si_code = SEGV_MAPERR;
73 unsigned int write = 0, exec = 0, mask;
74 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
75 unsigned int flags; /* handle_mm_fault() input */
78 * NOTE! We MUST NOT take any locks for this case. We may
79 * be in an interrupt or a critical region, and should
80 * only copy the information from the master page table,
83 if (address >= VMALLOC_START && !user_mode(regs)) {
84 if (unlikely(handle_kernel_vaddr_fault(address)))
91 * If we're in an interrupt or have no user
92 * context, we must not take the fault..
94 if (faulthandler_disabled() || !mm)
97 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
99 else if ((regs->ecr_vec == ECR_V_PROTV) &&
100 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
103 flags = FAULT_FLAG_DEFAULT;
105 flags |= FAULT_FLAG_USER;
107 flags |= FAULT_FLAG_WRITE;
110 down_read(&mm->mmap_sem);
112 vma = find_vma(mm, address);
115 if (unlikely(address < vma->vm_start)) {
116 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
121 * vm_area is good, now check permissions for this memory access
129 if (!(vma->vm_flags & mask)) {
130 si_code = SEGV_ACCERR;
134 fault = handle_mm_fault(vma, address, flags);
136 /* Quick path to respond to signals */
137 if (fault_signal_pending(fault, regs)) {
138 if (!user_mode(regs))
144 * Fault retry nuances, mmap_sem already relinquished by core mm
146 if (unlikely((fault & VM_FAULT_RETRY) &&
147 (flags & FAULT_FLAG_ALLOW_RETRY))) {
148 flags |= FAULT_FLAG_TRIED;
153 up_read(&mm->mmap_sem);
156 * Major/minor page fault accounting
157 * (in case of retry we only land here once)
159 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
161 if (likely(!(fault & VM_FAULT_ERROR))) {
162 if (fault & VM_FAULT_MAJOR) {
164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
172 /* Normal return path: fault Handled Gracefully */
176 if (!user_mode(regs))
179 if (fault & VM_FAULT_OOM) {
180 pagefault_out_of_memory();
184 if (fault & VM_FAULT_SIGBUS) {
186 si_code = BUS_ADRERR;
192 tsk->thread.fault_address = address;
193 force_sig_fault(sig, si_code, (void __user *)address);
197 if (fixup_exception(regs))
200 die("Oops", regs, address);