1 // TODO VM_EXEC flag work-around, cache aliasing
3 * arch/xtensa/mm/fault.c
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 - 2010 Tensilica Inc.
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
24 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
28 * This routine handles page faults. It determines the address,
29 * and the problem, and then passes it off to one of the appropriate
32 * Note: does not handle Miss and MultiHit.
35 void do_page_fault(struct pt_regs *regs)
37 struct vm_area_struct * vma;
38 struct mm_struct *mm = current->mm;
39 unsigned int exccause = regs->exccause;
40 unsigned int address = regs->excvaddr;
43 int is_write, is_exec;
45 unsigned int flags = FAULT_FLAG_DEFAULT;
49 /* We fault-in kernel-space virtual memory on-demand. The
50 * 'reference' page table is init_mm.pgd.
52 if (address >= TASK_SIZE && !user_mode(regs))
55 /* If we're in an interrupt or have no user
56 * context, we must not take the fault..
58 if (faulthandler_disabled() || !mm) {
59 bad_page_fault(regs, address, SIGSEGV);
63 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
64 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
65 exccause == EXCCAUSE_ITLB_MISS ||
66 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
68 pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
69 current->comm, current->pid,
70 address, exccause, regs->pc,
71 is_write ? "w" : "", is_exec ? "x" : "");
74 flags |= FAULT_FLAG_USER;
76 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
80 vma = find_vma(mm, address);
84 if (vma->vm_start <= address)
86 if (!(vma->vm_flags & VM_GROWSDOWN))
88 if (expand_stack(vma, address))
91 /* Ok, we have a good vm_area for this memory access, so
99 if (!(vma->vm_flags & VM_WRITE))
101 flags |= FAULT_FLAG_WRITE;
102 } else if (is_exec) {
103 if (!(vma->vm_flags & VM_EXEC))
105 } else /* Allow read even from write-only pages. */
106 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
109 /* If for any reason at all we couldn't handle the fault,
110 * make sure we exit gracefully rather than endlessly redo
113 fault = handle_mm_fault(vma, address, flags, regs);
115 if (fault_signal_pending(fault, regs)) {
116 if (!user_mode(regs))
121 if (unlikely(fault & VM_FAULT_ERROR)) {
122 if (fault & VM_FAULT_OOM)
124 else if (fault & VM_FAULT_SIGSEGV)
126 else if (fault & VM_FAULT_SIGBUS)
131 if (fault & VM_FAULT_RETRY) {
132 flags |= FAULT_FLAG_TRIED;
134 /* No need to mmap_read_unlock(mm) as we would
135 * have already released it in __lock_page_or_retry
142 mmap_read_unlock(mm);
145 /* Something tried to access memory that isn't in our memory map..
146 * Fix it, but check if it's kernel or user first..
149 mmap_read_unlock(mm);
150 if (user_mode(regs)) {
151 current->thread.bad_vaddr = address;
152 current->thread.error_code = is_write;
153 force_sig_fault(SIGSEGV, code, (void *) address);
156 bad_page_fault(regs, address, SIGSEGV);
160 /* We ran out of memory, or some other thing happened to us that made
161 * us unable to handle the page fault gracefully.
164 mmap_read_unlock(mm);
165 if (!user_mode(regs))
166 bad_page_fault(regs, address, SIGKILL);
168 pagefault_out_of_memory();
172 mmap_read_unlock(mm);
174 /* Send a sigbus, regardless of whether we were in kernel
177 current->thread.bad_vaddr = address;
178 force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
180 /* Kernel mode? Handle exceptions or die */
181 if (!user_mode(regs))
182 bad_page_fault(regs, address, SIGBUS);
187 /* Synchronize this task's top level page-table
188 * with the 'reference' page table.
190 struct mm_struct *act_mm = current->active_mm;
191 int index = pgd_index(address);
201 pgd = act_mm->pgd + index;
202 pgd_k = init_mm.pgd + index;
204 if (!pgd_present(*pgd_k))
207 pgd_val(*pgd) = pgd_val(*pgd_k);
209 p4d = p4d_offset(pgd, address);
210 p4d_k = p4d_offset(pgd_k, address);
211 if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
214 pud = pud_offset(p4d, address);
215 pud_k = pud_offset(p4d_k, address);
216 if (!pud_present(*pud) || !pud_present(*pud_k))
219 pmd = pmd_offset(pud, address);
220 pmd_k = pmd_offset(pud_k, address);
221 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
224 pmd_val(*pmd) = pmd_val(*pmd_k);
225 pte_k = pte_offset_kernel(pmd_k, address);
227 if (!pte_present(*pte_k))
232 bad_page_fault(regs, address, SIGKILL);
238 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
240 extern void __noreturn die(const char*, struct pt_regs*, long);
241 const struct exception_table_entry *entry;
243 /* Are we prepared to handle this kernel fault? */
244 if ((entry = search_exception_tables(regs->pc)) != NULL) {
245 pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
246 current->comm, regs->pc, entry->fixup);
247 current->thread.bad_uaddr = address;
248 regs->pc = entry->fixup;
252 /* Oops. The kernel tried to access some bad page. We'll have to
253 * terminate things with extreme prejudice.
255 pr_alert("Unable to handle kernel paging request at virtual "
256 "address %08lx\n pc = %08lx, ra = %08lx\n",
257 address, regs->pc, regs->areg[0]);
258 die("Oops", regs, sig);