]> Git Repo - linux.git/blob - arch/powerpc/mm/copro_fault.c
scsi: zfcp: Trace when request remove fails after qdio send fails
[linux.git] / arch / powerpc / mm / copro_fault.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CoProcessor (SPU/AFU) mm fault handler
4  *
5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6  *
7  * Author: Arnd Bergmann <[email protected]>
8  * Author: Jeremy Kerr <[email protected]>
9  */
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/reg.h>
14 #include <asm/copro.h>
15 #include <asm/spu.h>
16 #include <misc/cxl-base.h>
17
18 /*
19  * This ought to be kept in sync with the powerpc specific do_page_fault
20  * function. Currently, there are a few corner cases that we haven't had
21  * to handle fortunately.
22  */
23 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
24                 unsigned long dsisr, vm_fault_t *flt)
25 {
26         struct vm_area_struct *vma;
27         unsigned long is_write;
28         int ret;
29
30         if (mm == NULL)
31                 return -EFAULT;
32
33         if (mm->pgd == NULL)
34                 return -EFAULT;
35
36         mmap_read_lock(mm);
37         ret = -EFAULT;
38         vma = find_vma(mm, ea);
39         if (!vma)
40                 goto out_unlock;
41
42         if (ea < vma->vm_start) {
43                 if (!(vma->vm_flags & VM_GROWSDOWN))
44                         goto out_unlock;
45                 if (expand_stack(vma, ea))
46                         goto out_unlock;
47         }
48
49         is_write = dsisr & DSISR_ISSTORE;
50         if (is_write) {
51                 if (!(vma->vm_flags & VM_WRITE))
52                         goto out_unlock;
53         } else {
54                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
55                         goto out_unlock;
56                 /*
57                  * PROT_NONE is covered by the VMA check above.
58                  * and hash should get a NOHPTE fault instead of
59                  * a PROTFAULT in case fixup is needed for things
60                  * like autonuma.
61                  */
62                 if (!radix_enabled())
63                         WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
64         }
65
66         ret = 0;
67         *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
68
69         /* The fault is fully completed (including releasing mmap lock) */
70         if (*flt & VM_FAULT_COMPLETED)
71                 return 0;
72
73         if (unlikely(*flt & VM_FAULT_ERROR)) {
74                 if (*flt & VM_FAULT_OOM) {
75                         ret = -ENOMEM;
76                         goto out_unlock;
77                 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
78                         ret = -EFAULT;
79                         goto out_unlock;
80                 }
81                 BUG();
82         }
83
84 out_unlock:
85         mmap_read_unlock(mm);
86         return ret;
87 }
88 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
89
90 #ifdef CONFIG_PPC_64S_HASH_MMU
91 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
92 {
93         u64 vsid, vsidkey;
94         int psize, ssize;
95
96         switch (get_region_id(ea)) {
97         case USER_REGION_ID:
98                 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
99                 if (mm == NULL)
100                         return 1;
101                 psize = get_slice_psize(mm, ea);
102                 ssize = user_segment_size(ea);
103                 vsid = get_user_vsid(&mm->context, ea, ssize);
104                 vsidkey = SLB_VSID_USER;
105                 break;
106         case VMALLOC_REGION_ID:
107                 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
108                 psize = mmu_vmalloc_psize;
109                 ssize = mmu_kernel_ssize;
110                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
111                 vsidkey = SLB_VSID_KERNEL;
112                 break;
113         case IO_REGION_ID:
114                 pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
115                 psize = mmu_io_psize;
116                 ssize = mmu_kernel_ssize;
117                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
118                 vsidkey = SLB_VSID_KERNEL;
119                 break;
120         case LINEAR_MAP_REGION_ID:
121                 pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
122                 psize = mmu_linear_psize;
123                 ssize = mmu_kernel_ssize;
124                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
125                 vsidkey = SLB_VSID_KERNEL;
126                 break;
127         default:
128                 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
129                 return 1;
130         }
131         /* Bad address */
132         if (!vsid)
133                 return 1;
134
135         vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
136
137         vsid |= mmu_psize_defs[psize].sllp |
138                 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
139
140         slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
141         slb->vsid = vsid;
142
143         return 0;
144 }
145 EXPORT_SYMBOL_GPL(copro_calculate_slb);
146
147 void copro_flush_all_slbs(struct mm_struct *mm)
148 {
149 #ifdef CONFIG_SPU_BASE
150         spu_flush_all_slbs(mm);
151 #endif
152         cxl_slbia(mm);
153 }
154 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
155 #endif
This page took 0.041014 seconds and 4 git commands to generate.