1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory fault handling for Hexagon
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
14 #include <asm/traps.h>
15 #include <linux/uaccess.h>
17 #include <linux/sched/signal.h>
18 #include <linux/signal.h>
19 #include <linux/extable.h>
20 #include <linux/hardirq.h>
21 #include <linux/perf_event.h>
24 * Decode of hardware exception sends us to one of several
25 * entry points. At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
34 * Canonical page fault handler
36 void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
38 struct vm_area_struct *vma;
39 struct mm_struct *mm = current->mm;
41 int si_code = SEGV_MAPERR;
43 const struct exception_table_entry *fixup;
44 unsigned int flags = FAULT_FLAG_DEFAULT;
47 * If we're in an interrupt or have no user context,
48 * then must not take the fault.
50 if (unlikely(in_interrupt() || !mm))
56 flags |= FAULT_FLAG_USER;
58 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
61 vma = find_vma(mm, address);
65 if (vma->vm_start <= address)
68 if (!(vma->vm_flags & VM_GROWSDOWN))
71 if (expand_stack(vma, address))
75 /* Address space is OK. Now check access rights. */
76 si_code = SEGV_ACCERR;
80 if (!(vma->vm_flags & VM_EXEC))
84 if (!(vma->vm_flags & VM_READ))
88 if (!(vma->vm_flags & VM_WRITE))
90 flags |= FAULT_FLAG_WRITE;
94 fault = handle_mm_fault(vma, address, flags, regs);
96 if (fault_signal_pending(fault, regs)) {
102 /* The fault is fully completed (including releasing mmap lock) */
103 if (fault & VM_FAULT_COMPLETED)
106 /* The most common case -- we are done. */
107 if (likely(!(fault & VM_FAULT_ERROR))) {
108 if (fault & VM_FAULT_RETRY) {
109 flags |= FAULT_FLAG_TRIED;
113 mmap_read_unlock(mm);
117 mmap_read_unlock(mm);
119 /* Handle copyin/out exception cases */
120 if (!user_mode(regs))
123 if (fault & VM_FAULT_OOM) {
124 pagefault_out_of_memory();
128 /* User-mode address is in the memory map, but we are
129 * unable to fix up the page fault.
131 if (fault & VM_FAULT_SIGBUS) {
133 si_code = BUS_ADRERR;
135 /* Address is not in the memory map */
138 si_code = SEGV_ACCERR;
140 force_sig_fault(si_signo, si_code, (void __user *)address);
144 mmap_read_unlock(mm);
146 if (user_mode(regs)) {
147 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
150 /* Kernel-mode fault falls through */
153 fixup = search_exception_tables(pt_elr(regs));
155 pt_set_elr(regs, fixup->fixup);
159 /* Things are looking very, very bad now */
161 printk(KERN_EMERG "Unable to handle kernel paging request at "
162 "virtual address 0x%08lx, regs %p\n", address, regs);
163 die("Bad Kernel VA", regs, SIGKILL);
167 void read_protection_fault(struct pt_regs *regs)
169 unsigned long badvadr = pt_badva(regs);
171 do_page_fault(badvadr, FLT_LOAD, regs);
174 void write_protection_fault(struct pt_regs *regs)
176 unsigned long badvadr = pt_badva(regs);
178 do_page_fault(badvadr, FLT_STORE, regs);
181 void execute_protection_fault(struct pt_regs *regs)
183 unsigned long badvadr = pt_badva(regs);
185 do_page_fault(badvadr, FLT_IFETCH, regs);