]>
Commit | Line | Data |
---|---|---|
26ff6c11 PM |
1 | /* |
2 | * Page fault handler for SH with an MMU. | |
1da177e4 | 3 | * |
1da177e4 | 4 | * Copyright (C) 1999 Niibe Yutaka |
dbdb4e9f | 5 | * Copyright (C) 2003 - 2012 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/fault.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
26ff6c11 PM |
9 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
1da177e4 | 13 | */ |
1da177e4 | 14 | #include <linux/kernel.h> |
1da177e4 | 15 | #include <linux/mm.h> |
3f07c014 | 16 | #include <linux/sched/signal.h> |
0f08f338 PM |
17 | #include <linux/hardirq.h> |
18 | #include <linux/kprobes.h> | |
cdd6c482 | 19 | #include <linux/perf_event.h> |
dbdb4e9f | 20 | #include <linux/kdebug.h> |
70ffdb93 | 21 | #include <linux/uaccess.h> |
e7cc9a73 | 22 | #include <asm/io_trapped.h> |
1da177e4 | 23 | #include <asm/mmu_context.h> |
db2e1fa3 | 24 | #include <asm/tlbflush.h> |
e839ca52 | 25 | #include <asm/traps.h> |
1da177e4 | 26 | |
dbdb4e9f | 27 | static void |
e1656829 | 28 | force_sig_info_fault(int si_signo, int si_code, unsigned long address) |
dbdb4e9f | 29 | { |
2e1661d2 | 30 | force_sig_fault(si_signo, si_code, (void __user *)address); |
dbdb4e9f PM |
31 | } |
32 | ||
45c0e0e2 SM |
33 | /* |
34 | * This is useful to dump out the page tables associated with | |
35 | * 'addr' in mm 'mm'. | |
36 | */ | |
37 | static void show_pte(struct mm_struct *mm, unsigned long addr) | |
38 | { | |
39 | pgd_t *pgd; | |
40 | ||
90eed7d8 | 41 | if (mm) { |
45c0e0e2 | 42 | pgd = mm->pgd; |
90eed7d8 | 43 | } else { |
45c0e0e2 SM |
44 | pgd = get_TTB(); |
45 | ||
90eed7d8 PM |
46 | if (unlikely(!pgd)) |
47 | pgd = swapper_pg_dir; | |
48 | } | |
49 | ||
eaabf98b | 50 | pr_alert("pgd = %p\n", pgd); |
45c0e0e2 | 51 | pgd += pgd_index(addr); |
eaabf98b GU |
52 | pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2), |
53 | (u64)pgd_val(*pgd)); | |
45c0e0e2 SM |
54 | |
55 | do { | |
874e2cc1 | 56 | p4d_t *p4d; |
45c0e0e2 SM |
57 | pud_t *pud; |
58 | pmd_t *pmd; | |
59 | pte_t *pte; | |
60 | ||
61 | if (pgd_none(*pgd)) | |
62 | break; | |
63 | ||
64 | if (pgd_bad(*pgd)) { | |
eaabf98b | 65 | pr_cont("(bad)"); |
45c0e0e2 SM |
66 | break; |
67 | } | |
68 | ||
874e2cc1 MR |
69 | p4d = p4d_offset(pgd, addr); |
70 | if (PTRS_PER_P4D != 1) | |
71 | pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2), | |
72 | (u64)p4d_val(*p4d)); | |
73 | ||
74 | if (p4d_none(*p4d)) | |
75 | break; | |
76 | ||
77 | if (p4d_bad(*p4d)) { | |
78 | pr_cont("(bad)"); | |
79 | break; | |
80 | } | |
81 | ||
82 | pud = pud_offset(p4d, addr); | |
45c0e0e2 | 83 | if (PTRS_PER_PUD != 1) |
eaabf98b GU |
84 | pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2), |
85 | (u64)pud_val(*pud)); | |
45c0e0e2 SM |
86 | |
87 | if (pud_none(*pud)) | |
88 | break; | |
89 | ||
90 | if (pud_bad(*pud)) { | |
eaabf98b | 91 | pr_cont("(bad)"); |
45c0e0e2 SM |
92 | break; |
93 | } | |
94 | ||
95 | pmd = pmd_offset(pud, addr); | |
96 | if (PTRS_PER_PMD != 1) | |
eaabf98b GU |
97 | pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2), |
98 | (u64)pmd_val(*pmd)); | |
45c0e0e2 SM |
99 | |
100 | if (pmd_none(*pmd)) | |
101 | break; | |
102 | ||
103 | if (pmd_bad(*pmd)) { | |
eaabf98b | 104 | pr_cont("(bad)"); |
45c0e0e2 SM |
105 | break; |
106 | } | |
107 | ||
108 | /* We must not map this if we have highmem enabled */ | |
109 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) | |
110 | break; | |
111 | ||
112 | pte = pte_offset_kernel(pmd, addr); | |
eaabf98b GU |
113 | pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2), |
114 | (u64)pte_val(*pte)); | |
45c0e0e2 SM |
115 | } while (0); |
116 | ||
eaabf98b | 117 | pr_cont("\n"); |
45c0e0e2 SM |
118 | } |
119 | ||
0f60bb25 PM |
120 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
121 | { | |
122 | unsigned index = pgd_index(address); | |
123 | pgd_t *pgd_k; | |
874e2cc1 | 124 | p4d_t *p4d, *p4d_k; |
0f60bb25 PM |
125 | pud_t *pud, *pud_k; |
126 | pmd_t *pmd, *pmd_k; | |
127 | ||
128 | pgd += index; | |
129 | pgd_k = init_mm.pgd + index; | |
130 | ||
131 | if (!pgd_present(*pgd_k)) | |
132 | return NULL; | |
133 | ||
874e2cc1 MR |
134 | p4d = p4d_offset(pgd, address); |
135 | p4d_k = p4d_offset(pgd_k, address); | |
136 | if (!p4d_present(*p4d_k)) | |
137 | return NULL; | |
138 | ||
139 | pud = pud_offset(p4d, address); | |
140 | pud_k = pud_offset(p4d_k, address); | |
0f60bb25 PM |
141 | if (!pud_present(*pud_k)) |
142 | return NULL; | |
143 | ||
5d9b4b19 MF |
144 | if (!pud_present(*pud)) |
145 | set_pud(pud, *pud_k); | |
146 | ||
0f60bb25 PM |
147 | pmd = pmd_offset(pud, address); |
148 | pmd_k = pmd_offset(pud_k, address); | |
149 | if (!pmd_present(*pmd_k)) | |
150 | return NULL; | |
151 | ||
152 | if (!pmd_present(*pmd)) | |
153 | set_pmd(pmd, *pmd_k); | |
05dd2cd3 MF |
154 | else { |
155 | /* | |
156 | * The page tables are fully synchronised so there must | |
157 | * be another reason for the fault. Return NULL here to | |
158 | * signal that we have not taken care of the fault. | |
159 | */ | |
0f60bb25 | 160 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
05dd2cd3 MF |
161 | return NULL; |
162 | } | |
0f60bb25 PM |
163 | |
164 | return pmd_k; | |
165 | } | |
166 | ||
d8fd35fc PM |
167 | #ifdef CONFIG_SH_STORE_QUEUES |
168 | #define __FAULT_ADDR_LIMIT P3_ADDR_MAX | |
169 | #else | |
170 | #define __FAULT_ADDR_LIMIT VMALLOC_END | |
171 | #endif | |
172 | ||
0f60bb25 PM |
173 | /* |
174 | * Handle a fault on the vmalloc or module mapping area | |
175 | */ | |
176 | static noinline int vmalloc_fault(unsigned long address) | |
177 | { | |
178 | pgd_t *pgd_k; | |
179 | pmd_t *pmd_k; | |
180 | pte_t *pte_k; | |
181 | ||
c3e0af98 | 182 | /* Make sure we are in vmalloc/module/P3 area: */ |
d8fd35fc | 183 | if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT)) |
0f60bb25 PM |
184 | return -1; |
185 | ||
186 | /* | |
187 | * Synchronize this task's top level page-table | |
188 | * with the 'reference' page table. | |
189 | * | |
190 | * Do _not_ use "current" here. We might be inside | |
191 | * an interrupt in the middle of a task switch.. | |
192 | */ | |
193 | pgd_k = get_TTB(); | |
05dd2cd3 | 194 | pmd_k = vmalloc_sync_one(pgd_k, address); |
0f60bb25 PM |
195 | if (!pmd_k) |
196 | return -1; | |
197 | ||
198 | pte_k = pte_offset_kernel(pmd_k, address); | |
199 | if (!pte_present(*pte_k)) | |
200 | return -1; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
dbdb4e9f PM |
205 | static void |
206 | show_fault_oops(struct pt_regs *regs, unsigned long address) | |
207 | { | |
208 | if (!oops_may_print()) | |
209 | return; | |
210 | ||
eaabf98b GU |
211 | pr_alert("BUG: unable to handle kernel %s at %08lx\n", |
212 | address < PAGE_SIZE ? "NULL pointer dereference" | |
213 | : "paging request", | |
214 | address); | |
215 | pr_alert("PC:"); | |
fd722f25 | 216 | printk_address(regs->pc, 1); |
dbdb4e9f PM |
217 | |
218 | show_pte(NULL, address); | |
219 | } | |
220 | ||
221 | static noinline void | |
5a1dc78a | 222 | no_context(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
223 | unsigned long address) |
224 | { | |
225 | /* Are we prepared to handle this kernel fault? */ | |
226 | if (fixup_exception(regs)) | |
227 | return; | |
228 | ||
229 | if (handle_trapped_io(regs, address)) | |
230 | return; | |
231 | ||
232 | /* | |
233 | * Oops. The kernel tried to access some bad page. We'll have to | |
234 | * terminate things with extreme prejudice. | |
235 | */ | |
236 | bust_spinlocks(1); | |
237 | ||
238 | show_fault_oops(regs, address); | |
239 | ||
5a1dc78a | 240 | die("Oops", regs, error_code); |
dbdb4e9f PM |
241 | } |
242 | ||
243 | static void | |
5a1dc78a | 244 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
245 | unsigned long address, int si_code) |
246 | { | |
dbdb4e9f PM |
247 | /* User mode accesses just cause a SIGSEGV */ |
248 | if (user_mode(regs)) { | |
249 | /* | |
250 | * It's possible to have interrupts off here: | |
251 | */ | |
252 | local_irq_enable(); | |
253 | ||
e1656829 | 254 | force_sig_info_fault(SIGSEGV, si_code, address); |
dbdb4e9f PM |
255 | |
256 | return; | |
257 | } | |
258 | ||
5a1dc78a | 259 | no_context(regs, error_code, address); |
dbdb4e9f PM |
260 | } |
261 | ||
262 | static noinline void | |
5a1dc78a | 263 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
264 | unsigned long address) |
265 | { | |
5a1dc78a | 266 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
dbdb4e9f PM |
267 | } |
268 | ||
269 | static void | |
5a1dc78a | 270 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
271 | unsigned long address, int si_code) |
272 | { | |
273 | struct mm_struct *mm = current->mm; | |
274 | ||
275 | /* | |
276 | * Something tried to access memory that isn't in our memory map.. | |
277 | * Fix it, but check if it's kernel or user first.. | |
278 | */ | |
d8ed45c5 | 279 | mmap_read_unlock(mm); |
dbdb4e9f | 280 | |
5a1dc78a | 281 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
dbdb4e9f PM |
282 | } |
283 | ||
284 | static noinline void | |
5a1dc78a | 285 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
dbdb4e9f | 286 | { |
5a1dc78a | 287 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
dbdb4e9f PM |
288 | } |
289 | ||
290 | static noinline void | |
5a1dc78a | 291 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
dbdb4e9f PM |
292 | unsigned long address) |
293 | { | |
5a1dc78a | 294 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
dbdb4e9f PM |
295 | } |
296 | ||
dbdb4e9f | 297 | static void |
5a1dc78a | 298 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
dbdb4e9f PM |
299 | { |
300 | struct task_struct *tsk = current; | |
301 | struct mm_struct *mm = tsk->mm; | |
302 | ||
d8ed45c5 | 303 | mmap_read_unlock(mm); |
dbdb4e9f PM |
304 | |
305 | /* Kernel mode? Handle exceptions or die: */ | |
306 | if (!user_mode(regs)) | |
5a1dc78a | 307 | no_context(regs, error_code, address); |
dbdb4e9f | 308 | |
e1656829 | 309 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address); |
dbdb4e9f PM |
310 | } |
311 | ||
312 | static noinline int | |
5a1dc78a | 313 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
50a7ca3c | 314 | unsigned long address, vm_fault_t fault) |
dbdb4e9f PM |
315 | { |
316 | /* | |
317 | * Pagefault was interrupted by SIGKILL. We have no reason to | |
318 | * continue pagefault. | |
319 | */ | |
fb027ada | 320 | if (fault_signal_pending(fault, regs)) { |
dbdb4e9f | 321 | if (!user_mode(regs)) |
5a1dc78a | 322 | no_context(regs, error_code, address); |
dbdb4e9f PM |
323 | return 1; |
324 | } | |
325 | ||
c1e8d7c6 | 326 | /* Release mmap_lock first if necessary */ |
fb027ada | 327 | if (!(fault & VM_FAULT_RETRY)) |
d8ed45c5 | 328 | mmap_read_unlock(current->mm); |
fb027ada | 329 | |
dbdb4e9f PM |
330 | if (!(fault & VM_FAULT_ERROR)) |
331 | return 0; | |
332 | ||
333 | if (fault & VM_FAULT_OOM) { | |
334 | /* Kernel mode? Handle exceptions or die: */ | |
335 | if (!user_mode(regs)) { | |
5a1dc78a | 336 | no_context(regs, error_code, address); |
dbdb4e9f PM |
337 | return 1; |
338 | } | |
339 | ||
c2d23f91 DR |
340 | /* |
341 | * We ran out of memory, call the OOM killer, and return the | |
342 | * userspace (which will retry the fault, or kill us if we got | |
343 | * oom-killed): | |
344 | */ | |
345 | pagefault_out_of_memory(); | |
dbdb4e9f PM |
346 | } else { |
347 | if (fault & VM_FAULT_SIGBUS) | |
5a1dc78a | 348 | do_sigbus(regs, error_code, address); |
33692f27 LT |
349 | else if (fault & VM_FAULT_SIGSEGV) |
350 | bad_area(regs, error_code, address); | |
dbdb4e9f PM |
351 | else |
352 | BUG(); | |
353 | } | |
354 | ||
355 | return 1; | |
356 | } | |
357 | ||
28080329 | 358 | static inline int access_error(int error_code, struct vm_area_struct *vma) |
dbdb4e9f | 359 | { |
28080329 | 360 | if (error_code & FAULT_CODE_WRITE) { |
dbdb4e9f PM |
361 | /* write, present and write, not present: */ |
362 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | |
363 | return 1; | |
364 | return 0; | |
365 | } | |
366 | ||
28080329 PM |
367 | /* ITLB miss on NX page */ |
368 | if (unlikely((error_code & FAULT_CODE_ITLB) && | |
369 | !(vma->vm_flags & VM_EXEC))) | |
370 | return 1; | |
371 | ||
dbdb4e9f | 372 | /* read, not present: */ |
3122e80e | 373 | if (unlikely(!vma_is_accessible(vma))) |
dbdb4e9f PM |
374 | return 1; |
375 | ||
376 | return 0; | |
377 | } | |
378 | ||
0f60bb25 PM |
379 | static int fault_in_kernel_space(unsigned long address) |
380 | { | |
381 | return address >= TASK_SIZE; | |
382 | } | |
383 | ||
1da177e4 LT |
384 | /* |
385 | * This routine handles page faults. It determines the address, | |
386 | * and the problem, and then passes it off to one of the appropriate | |
387 | * routines. | |
388 | */ | |
b5a1bcbe | 389 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
5a1dc78a | 390 | unsigned long error_code, |
b5a1bcbe | 391 | unsigned long address) |
1da177e4 | 392 | { |
0f60bb25 | 393 | unsigned long vec; |
1da177e4 LT |
394 | struct task_struct *tsk; |
395 | struct mm_struct *mm; | |
396 | struct vm_area_struct * vma; | |
50a7ca3c | 397 | vm_fault_t fault; |
dde16072 | 398 | unsigned int flags = FAULT_FLAG_DEFAULT; |
1da177e4 | 399 | |
1da177e4 | 400 | tsk = current; |
0f60bb25 | 401 | mm = tsk->mm; |
0f60bb25 | 402 | vec = lookup_exception_vector(); |
1da177e4 | 403 | |
0f60bb25 PM |
404 | /* |
405 | * We fault-in kernel-space virtual memory on-demand. The | |
406 | * 'reference' page table is init_mm.pgd. | |
407 | * | |
408 | * NOTE! We MUST NOT take any locks for this case. We may | |
409 | * be in an interrupt or a critical region, and should | |
410 | * only copy the information from the master page table, | |
411 | * nothing more. | |
412 | */ | |
413 | if (unlikely(fault_in_kernel_space(address))) { | |
414 | if (vmalloc_fault(address) >= 0) | |
99a596f9 | 415 | return; |
b98cca44 | 416 | if (kprobe_page_fault(regs, vec)) |
96e14e54 | 417 | return; |
99a596f9 | 418 | |
5a1dc78a | 419 | bad_area_nosemaphore(regs, error_code, address); |
dbdb4e9f | 420 | return; |
99a596f9 SM |
421 | } |
422 | ||
b98cca44 | 423 | if (unlikely(kprobe_page_fault(regs, vec))) |
7433ab77 PM |
424 | return; |
425 | ||
f2fb4e4f | 426 | /* Only enable interrupts if they were on before the fault */ |
7433ab77 | 427 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
f2fb4e4f | 428 | local_irq_enable(); |
f2fb4e4f | 429 | |
a8b0ca17 | 430 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
f2fb4e4f | 431 | |
1da177e4 | 432 | /* |
0f60bb25 | 433 | * If we're in an interrupt, have no user context or are running |
70ffdb93 | 434 | * with pagefaults disabled then we must not take the fault: |
1da177e4 | 435 | */ |
70ffdb93 | 436 | if (unlikely(faulthandler_disabled() || !mm)) { |
5a1dc78a | 437 | bad_area_nosemaphore(regs, error_code, address); |
dbdb4e9f PM |
438 | return; |
439 | } | |
1da177e4 | 440 | |
11fd9824 | 441 | retry: |
a050ba1e | 442 | vma = lock_mm_and_find_vma(mm, address, regs); |
dbdb4e9f | 443 | if (unlikely(!vma)) { |
a050ba1e | 444 | bad_area_nosemaphore(regs, error_code, address); |
dbdb4e9f PM |
445 | return; |
446 | } | |
0f60bb25 PM |
447 | |
448 | /* | |
449 | * Ok, we have a good vm_area for this memory access, so | |
450 | * we can handle it.. | |
451 | */ | |
5a1dc78a PM |
452 | if (unlikely(access_error(error_code, vma))) { |
453 | bad_area_access_error(regs, error_code, address); | |
dbdb4e9f | 454 | return; |
1da177e4 LT |
455 | } |
456 | ||
5a1dc78a PM |
457 | set_thread_fault_code(error_code); |
458 | ||
759496ba JW |
459 | if (user_mode(regs)) |
460 | flags |= FAULT_FLAG_USER; | |
461 | if (error_code & FAULT_CODE_WRITE) | |
462 | flags |= FAULT_FLAG_WRITE; | |
463 | ||
1da177e4 LT |
464 | /* |
465 | * If for any reason at all we couldn't handle the fault, | |
466 | * make sure we exit gracefully rather than endlessly redo | |
467 | * the fault. | |
468 | */ | |
105f8862 | 469 | fault = handle_mm_fault(vma, address, flags, regs); |
11fd9824 | 470 | |
dbdb4e9f | 471 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
5a1dc78a | 472 | if (mm_fault_error(regs, error_code, address, fault)) |
dbdb4e9f | 473 | return; |
11fd9824 | 474 | |
d9272525 PX |
475 | /* The fault is fully completed (including releasing mmap lock) */ |
476 | if (fault & VM_FAULT_COMPLETED) | |
477 | return; | |
478 | ||
36ef159f QZ |
479 | if (fault & VM_FAULT_RETRY) { |
480 | flags |= FAULT_FLAG_TRIED; | |
481 | ||
482 | /* | |
483 | * No need to mmap_read_unlock(mm) as we would | |
484 | * have already released it in __lock_page_or_retry | |
485 | * in mm/filemap.c. | |
486 | */ | |
487 | goto retry; | |
7433ab77 | 488 | } |
1da177e4 | 489 | |
d8ed45c5 | 490 | mmap_read_unlock(mm); |
1da177e4 | 491 | } |