]> Git Repo - linux.git/blob - arch/sparc/mm/fault_32.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux.git] / arch / sparc / mm / fault_32.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fault.c:  Page fault handlers for the Sparc.
4  *
5  * Copyright (C) 1995 David S. Miller ([email protected])
6  * Copyright (C) 1996 Eddie C. Dost ([email protected])
7  * Copyright (C) 1997 Jakub Jelinek ([email protected])
8  */
9
10 #include <asm/head.h>
11
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
26 #include <linux/extable.h>
27
28 #include <asm/page.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
34
35 #include "mm_32.h"
36
37 int show_unhandled_signals = 1;
38
39 static void __noreturn unhandled_fault(unsigned long address,
40                                        struct task_struct *tsk,
41                                        struct pt_regs *regs)
42 {
43         if ((unsigned long) address < PAGE_SIZE) {
44                 printk(KERN_ALERT
45                     "Unable to handle kernel NULL pointer dereference\n");
46         } else {
47                 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48                        address);
49         }
50         printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51                 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52         printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53                 (tsk->mm ? (unsigned long) tsk->mm->pgd :
54                         (unsigned long) tsk->active_mm->pgd));
55         die_if_kernel("Oops", regs);
56 }
57
58 static inline void
59 show_signal_msg(struct pt_regs *regs, int sig, int code,
60                 unsigned long address, struct task_struct *tsk)
61 {
62         if (!unhandled_signal(tsk, sig))
63                 return;
64
65         if (!printk_ratelimit())
66                 return;
67
68         printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
69                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
70                tsk->comm, task_pid_nr(tsk), address,
71                (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
72                (void *)regs->u_regs[UREG_FP], code);
73
74         print_vma_addr(KERN_CONT " in ", regs->pc);
75
76         printk(KERN_CONT "\n");
77 }
78
79 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
80                                unsigned long addr)
81 {
82         if (unlikely(show_unhandled_signals))
83                 show_signal_msg(regs, sig, code,
84                                 addr, current);
85
86         force_sig_fault(sig, code, (void __user *) addr);
87 }
88
89 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
90 {
91         unsigned int insn;
92
93         if (text_fault)
94                 return regs->pc;
95
96         if (regs->psr & PSR_PS)
97                 insn = *(unsigned int *) regs->pc;
98         else
99                 __get_user(insn, (unsigned int *) regs->pc);
100
101         return safe_compute_effective_address(regs, insn);
102 }
103
104 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
105                                       int text_fault)
106 {
107         unsigned long addr = compute_si_addr(regs, text_fault);
108
109         __do_fault_siginfo(code, sig, regs, addr);
110 }
111
112 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
113                                unsigned long address)
114 {
115         struct vm_area_struct *vma;
116         struct task_struct *tsk = current;
117         struct mm_struct *mm = tsk->mm;
118         int from_user = !(regs->psr & PSR_PS);
119         int code;
120         vm_fault_t fault;
121         unsigned int flags = FAULT_FLAG_DEFAULT;
122
123         if (text_fault)
124                 address = regs->pc;
125
126         /*
127          * We fault-in kernel-space virtual memory on-demand. The
128          * 'reference' page table is init_mm.pgd.
129          *
130          * NOTE! We MUST NOT take any locks for this case. We may
131          * be in an interrupt or a critical region, and should
132          * only copy the information from the master page table,
133          * nothing more.
134          */
135         code = SEGV_MAPERR;
136         if (address >= TASK_SIZE)
137                 goto vmalloc_fault;
138
139         /*
140          * If we're in an interrupt or have no user
141          * context, we must not take the fault..
142          */
143         if (pagefault_disabled() || !mm)
144                 goto no_context;
145
146         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
147
148 retry:
149         mmap_read_lock(mm);
150
151         if (!from_user && address >= PAGE_OFFSET)
152                 goto bad_area;
153
154         vma = find_vma(mm, address);
155         if (!vma)
156                 goto bad_area;
157         if (vma->vm_start <= address)
158                 goto good_area;
159         if (!(vma->vm_flags & VM_GROWSDOWN))
160                 goto bad_area;
161         if (expand_stack(vma, address))
162                 goto bad_area;
163         /*
164          * Ok, we have a good vm_area for this memory access, so
165          * we can handle it..
166          */
167 good_area:
168         code = SEGV_ACCERR;
169         if (write) {
170                 if (!(vma->vm_flags & VM_WRITE))
171                         goto bad_area;
172         } else {
173                 /* Allow reads even for write-only mappings */
174                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
175                         goto bad_area;
176         }
177
178         if (from_user)
179                 flags |= FAULT_FLAG_USER;
180         if (write)
181                 flags |= FAULT_FLAG_WRITE;
182
183         /*
184          * If for any reason at all we couldn't handle the fault,
185          * make sure we exit gracefully rather than endlessly redo
186          * the fault.
187          */
188         fault = handle_mm_fault(vma, address, flags, regs);
189
190         if (fault_signal_pending(fault, regs))
191                 return;
192
193         if (unlikely(fault & VM_FAULT_ERROR)) {
194                 if (fault & VM_FAULT_OOM)
195                         goto out_of_memory;
196                 else if (fault & VM_FAULT_SIGSEGV)
197                         goto bad_area;
198                 else if (fault & VM_FAULT_SIGBUS)
199                         goto do_sigbus;
200                 BUG();
201         }
202
203         if (fault & VM_FAULT_RETRY) {
204                 flags |= FAULT_FLAG_TRIED;
205
206                 /* No need to mmap_read_unlock(mm) as we would
207                  * have already released it in __lock_page_or_retry
208                  * in mm/filemap.c.
209                  */
210
211                 goto retry;
212         }
213
214         mmap_read_unlock(mm);
215         return;
216
217         /*
218          * Something tried to access memory that isn't in our memory map..
219          * Fix it, but check if it's kernel or user first..
220          */
221 bad_area:
222         mmap_read_unlock(mm);
223
224 bad_area_nosemaphore:
225         /* User mode accesses just cause a SIGSEGV */
226         if (from_user) {
227                 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
228                 return;
229         }
230
231         /* Is this in ex_table? */
232 no_context:
233         if (!from_user) {
234                 const struct exception_table_entry *entry;
235
236                 entry = search_exception_tables(regs->pc);
237 #ifdef DEBUG_EXCEPTIONS
238                 printk("Exception: PC<%08lx> faddr<%08lx>\n",
239                        regs->pc, address);
240                 printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
241                         regs->pc, entry->fixup);
242 #endif
243                 regs->pc = entry->fixup;
244                 regs->npc = regs->pc + 4;
245                 return;
246         }
247
248         unhandled_fault(address, tsk, regs);
249
250 /*
251  * We ran out of memory, or some other thing happened to us that made
252  * us unable to handle the page fault gracefully.
253  */
254 out_of_memory:
255         mmap_read_unlock(mm);
256         if (from_user) {
257                 pagefault_out_of_memory();
258                 return;
259         }
260         goto no_context;
261
262 do_sigbus:
263         mmap_read_unlock(mm);
264         do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
265         if (!from_user)
266                 goto no_context;
267
268 vmalloc_fault:
269         {
270                 /*
271                  * Synchronize this task's top level page-table
272                  * with the 'reference' page table.
273                  */
274                 int offset = pgd_index(address);
275                 pgd_t *pgd, *pgd_k;
276                 p4d_t *p4d, *p4d_k;
277                 pud_t *pud, *pud_k;
278                 pmd_t *pmd, *pmd_k;
279
280                 pgd = tsk->active_mm->pgd + offset;
281                 pgd_k = init_mm.pgd + offset;
282
283                 if (!pgd_present(*pgd)) {
284                         if (!pgd_present(*pgd_k))
285                                 goto bad_area_nosemaphore;
286                         pgd_val(*pgd) = pgd_val(*pgd_k);
287                         return;
288                 }
289
290                 p4d = p4d_offset(pgd, address);
291                 pud = pud_offset(p4d, address);
292                 pmd = pmd_offset(pud, address);
293
294                 p4d_k = p4d_offset(pgd_k, address);
295                 pud_k = pud_offset(p4d_k, address);
296                 pmd_k = pmd_offset(pud_k, address);
297
298                 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
299                         goto bad_area_nosemaphore;
300
301                 *pmd = *pmd_k;
302                 return;
303         }
304 }
305
306 /* This always deals with user addresses. */
307 static void force_user_fault(unsigned long address, int write)
308 {
309         struct vm_area_struct *vma;
310         struct task_struct *tsk = current;
311         struct mm_struct *mm = tsk->mm;
312         unsigned int flags = FAULT_FLAG_USER;
313         int code;
314
315         code = SEGV_MAPERR;
316
317         mmap_read_lock(mm);
318         vma = find_vma(mm, address);
319         if (!vma)
320                 goto bad_area;
321         if (vma->vm_start <= address)
322                 goto good_area;
323         if (!(vma->vm_flags & VM_GROWSDOWN))
324                 goto bad_area;
325         if (expand_stack(vma, address))
326                 goto bad_area;
327 good_area:
328         code = SEGV_ACCERR;
329         if (write) {
330                 if (!(vma->vm_flags & VM_WRITE))
331                         goto bad_area;
332                 flags |= FAULT_FLAG_WRITE;
333         } else {
334                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
335                         goto bad_area;
336         }
337         switch (handle_mm_fault(vma, address, flags, NULL)) {
338         case VM_FAULT_SIGBUS:
339         case VM_FAULT_OOM:
340                 goto do_sigbus;
341         }
342         mmap_read_unlock(mm);
343         return;
344 bad_area:
345         mmap_read_unlock(mm);
346         __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
347         return;
348
349 do_sigbus:
350         mmap_read_unlock(mm);
351         __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
352 }
353
354 static void check_stack_aligned(unsigned long sp)
355 {
356         if (sp & 0x7UL)
357                 force_sig(SIGILL);
358 }
359
360 void window_overflow_fault(void)
361 {
362         unsigned long sp;
363
364         sp = current_thread_info()->rwbuf_stkptrs[0];
365         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
366                 force_user_fault(sp + 0x38, 1);
367         force_user_fault(sp, 1);
368
369         check_stack_aligned(sp);
370 }
371
372 void window_underflow_fault(unsigned long sp)
373 {
374         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
375                 force_user_fault(sp + 0x38, 0);
376         force_user_fault(sp, 0);
377
378         check_stack_aligned(sp);
379 }
380
381 void window_ret_fault(struct pt_regs *regs)
382 {
383         unsigned long sp;
384
385         sp = regs->u_regs[UREG_FP];
386         if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
387                 force_user_fault(sp + 0x38, 0);
388         force_user_fault(sp, 0);
389
390         check_stack_aligned(sp);
391 }
This page took 0.056117 seconds and 4 git commands to generate.