]>
Commit | Line | Data |
---|---|---|
d5fee0bb SJS |
1 | /* |
2 | * PowerPC Radix MMU mulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
d5fee0bb SJS |
21 | #include "cpu.h" |
22 | #include "exec/exec-all.h" | |
23 | #include "exec/helper-proto.h" | |
24 | #include "qemu/error-report.h" | |
25 | #include "sysemu/kvm.h" | |
26 | #include "kvm_ppc.h" | |
27 | #include "exec/log.h" | |
28 | #include "mmu-radix64.h" | |
29 | #include "mmu-book3s-v3.h" | |
30 | ||
31 | static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, | |
32 | uint64_t *lpid, uint64_t *pid) | |
33 | { | |
34 | /* We don't have HV support yet and shouldn't get here with it set anyway */ | |
35 | assert(!msr_hv); | |
36 | ||
37 | if (!msr_hv) { /* !MSR[HV] -> Guest */ | |
38 | switch (eaddr & R_EADDR_QUADRANT) { | |
39 | case R_EADDR_QUADRANT0: /* Guest application */ | |
40 | *lpid = env->spr[SPR_LPIDR]; | |
41 | *pid = env->spr[SPR_BOOKS_PID]; | |
42 | break; | |
43 | case R_EADDR_QUADRANT1: /* Illegal */ | |
44 | case R_EADDR_QUADRANT2: | |
45 | return false; | |
46 | case R_EADDR_QUADRANT3: /* Guest OS */ | |
47 | *lpid = env->spr[SPR_LPIDR]; | |
48 | *pid = 0; /* pid set to 0 -> addresses guest operating system */ | |
49 | break; | |
50 | } | |
51 | } | |
52 | ||
53 | return true; | |
54 | } | |
55 | ||
56 | static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) | |
57 | { | |
58 | CPUState *cs = CPU(cpu); | |
59 | CPUPPCState *env = &cpu->env; | |
60 | ||
61 | if (rwx == 2) { /* Instruction Segment Interrupt */ | |
62 | cs->exception_index = POWERPC_EXCP_ISEG; | |
63 | } else { /* Data Segment Interrupt */ | |
64 | cs->exception_index = POWERPC_EXCP_DSEG; | |
65 | env->spr[SPR_DAR] = eaddr; | |
66 | } | |
67 | env->error_code = 0; | |
68 | } | |
69 | ||
70 | static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, | |
71 | uint32_t cause) | |
72 | { | |
73 | CPUState *cs = CPU(cpu); | |
74 | CPUPPCState *env = &cpu->env; | |
75 | ||
76 | if (rwx == 2) { /* Instruction Storage Interrupt */ | |
77 | cs->exception_index = POWERPC_EXCP_ISI; | |
78 | env->error_code = cause; | |
79 | } else { /* Data Storage Interrupt */ | |
80 | cs->exception_index = POWERPC_EXCP_DSI; | |
81 | if (rwx == 1) { /* Write -> Store */ | |
82 | cause |= DSISR_ISSTORE; | |
83 | } | |
84 | env->spr[SPR_DSISR] = cause; | |
85 | env->spr[SPR_DAR] = eaddr; | |
86 | env->error_code = 0; | |
87 | } | |
88 | } | |
89 | ||
90 | ||
91 | static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, | |
92 | int *fault_cause, int *prot) | |
93 | { | |
94 | CPUPPCState *env = &cpu->env; | |
95 | const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; | |
96 | ||
97 | /* Check Page Attributes (pte58:59) */ | |
98 | if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { | |
99 | /* | |
100 | * Radix PTE entries with the non-idempotent I/O attribute are treated | |
101 | * as guarded storage | |
102 | */ | |
103 | *fault_cause |= SRR1_NOEXEC_GUARD; | |
104 | return true; | |
105 | } | |
106 | ||
107 | /* Determine permissions allowed by Encoded Access Authority */ | |
108 | if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ | |
109 | *prot = 0; | |
110 | } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { | |
111 | *prot = ppc_radix64_get_prot_eaa(pte); | |
112 | } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ | |
113 | *prot = ppc_radix64_get_prot_eaa(pte); | |
114 | *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ | |
115 | } | |
116 | ||
117 | /* Check if requested access type is allowed */ | |
118 | if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ | |
119 | *fault_cause |= DSISR_PROTFAULT; | |
120 | return true; | |
121 | } | |
122 | ||
123 | return false; | |
124 | } | |
125 | ||
126 | static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, | |
127 | hwaddr pte_addr, int *prot) | |
128 | { | |
129 | CPUState *cs = CPU(cpu); | |
130 | uint64_t npte; | |
131 | ||
132 | npte = pte | R_PTE_R; /* Always set reference bit */ | |
133 | ||
134 | if (rwx == 1) { /* Store/Write */ | |
135 | npte |= R_PTE_C; /* Set change bit */ | |
136 | } else { | |
137 | /* | |
138 | * Treat the page as read-only for now, so that a later write | |
139 | * will pass through this function again to set the C bit. | |
140 | */ | |
141 | *prot &= ~PAGE_WRITE; | |
142 | } | |
143 | ||
144 | if (pte ^ npte) { /* If pte has changed then write it back */ | |
145 | stq_phys(cs->as, pte_addr, npte); | |
146 | } | |
147 | } | |
148 | ||
6a042827 | 149 | static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, |
d5fee0bb SJS |
150 | uint64_t base_addr, uint64_t nls, |
151 | hwaddr *raddr, int *psize, | |
6a042827 | 152 | int *fault_cause, hwaddr *pte_addr) |
d5fee0bb SJS |
153 | { |
154 | CPUState *cs = CPU(cpu); | |
155 | uint64_t index, pde; | |
156 | ||
157 | if (nls < 5) { /* Directory maps less than 2**5 entries */ | |
158 | *fault_cause |= DSISR_R_BADCONFIG; | |
159 | return 0; | |
160 | } | |
161 | ||
162 | /* Read page <directory/table> entry from guest address space */ | |
163 | index = eaddr >> (*psize - nls); /* Shift */ | |
164 | index &= ((1UL << nls) - 1); /* Mask */ | |
165 | pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); | |
166 | if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ | |
167 | *fault_cause |= DSISR_NOPTE; | |
168 | return 0; | |
169 | } | |
170 | ||
171 | *psize -= nls; | |
172 | ||
173 | /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ | |
174 | if (pde & R_PTE_LEAF) { | |
175 | uint64_t rpn = pde & R_PTE_RPN; | |
176 | uint64_t mask = (1UL << *psize) - 1; | |
177 | ||
d5fee0bb SJS |
178 | /* Or high bits of rpn and low bits to ea to form whole real addr */ |
179 | *raddr = (rpn & ~mask) | (eaddr & mask); | |
180 | *pte_addr = base_addr + (index * sizeof(pde)); | |
181 | return pde; | |
182 | } | |
183 | ||
184 | /* Next Level of Radix Tree */ | |
6a042827 SJS |
185 | return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, |
186 | raddr, psize, fault_cause, pte_addr); | |
d5fee0bb SJS |
187 | } |
188 | ||
189 | int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, | |
190 | int mmu_idx) | |
191 | { | |
192 | CPUState *cs = CPU(cpu); | |
193 | CPUPPCState *env = &cpu->env; | |
194 | PPCVirtualHypervisorClass *vhc = | |
195 | PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); | |
196 | hwaddr raddr, pte_addr; | |
197 | uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; | |
198 | int page_size, prot, fault_cause = 0; | |
199 | ||
200 | assert((rwx == 0) || (rwx == 1) || (rwx == 2)); | |
201 | assert(!msr_hv); /* For now there is no Radix PowerNV Support */ | |
202 | assert(cpu->vhyp); | |
203 | assert(ppc64_use_proc_tbl(cpu)); | |
204 | ||
205 | /* Real Mode Access */ | |
206 | if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { | |
207 | /* In real mode top 4 effective addr bits (mostly) ignored */ | |
208 | raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; | |
209 | ||
210 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, | |
211 | PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, | |
212 | TARGET_PAGE_SIZE); | |
213 | return 0; | |
214 | } | |
215 | ||
216 | /* Virtual Mode Access - get the fully qualified address */ | |
217 | if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { | |
218 | ppc_radix64_raise_segi(cpu, rwx, eaddr); | |
219 | return 1; | |
220 | } | |
221 | ||
222 | /* Get Process Table */ | |
223 | patbe = vhc->get_patbe(cpu->vhyp); | |
224 | ||
225 | /* Index Process Table by PID to Find Corresponding Process Table Entry */ | |
226 | offset = pid * sizeof(struct prtb_entry); | |
227 | size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); | |
228 | if (offset >= size) { | |
229 | /* offset exceeds size of the process table */ | |
230 | ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); | |
231 | return 1; | |
232 | } | |
233 | prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); | |
234 | ||
235 | /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ | |
236 | page_size = PRTBE_R_GET_RTS(prtbe0); | |
6a042827 | 237 | pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, |
d5fee0bb | 238 | prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, |
6a042827 SJS |
239 | &raddr, &page_size, &fault_cause, &pte_addr); |
240 | if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { | |
241 | /* Couldn't get pte or access denied due to protection */ | |
d5fee0bb SJS |
242 | ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); |
243 | return 1; | |
244 | } | |
245 | ||
246 | /* Update Reference and Change Bits */ | |
247 | ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); | |
248 | ||
249 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, | |
250 | prot, mmu_idx, 1UL << page_size); | |
35068bd1 | 251 | return 0; |
d5fee0bb | 252 | } |
95cb0657 SJS |
253 | |
254 | hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) | |
255 | { | |
256 | CPUState *cs = CPU(cpu); | |
257 | CPUPPCState *env = &cpu->env; | |
258 | PPCVirtualHypervisorClass *vhc = | |
259 | PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); | |
260 | hwaddr raddr, pte_addr; | |
261 | uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; | |
262 | int page_size, fault_cause = 0; | |
263 | ||
264 | /* Handle Real Mode */ | |
265 | if (msr_dr == 0) { | |
266 | /* In real mode top 4 effective addr bits (mostly) ignored */ | |
267 | return eaddr & 0x0FFFFFFFFFFFFFFFULL; | |
268 | } | |
269 | ||
270 | /* Virtual Mode Access - get the fully qualified address */ | |
271 | if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { | |
272 | return -1; | |
273 | } | |
274 | ||
275 | /* Get Process Table */ | |
276 | patbe = vhc->get_patbe(cpu->vhyp); | |
277 | ||
278 | /* Index Process Table by PID to Find Corresponding Process Table Entry */ | |
279 | offset = pid * sizeof(struct prtb_entry); | |
280 | size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); | |
281 | if (offset >= size) { | |
282 | /* offset exceeds size of the process table */ | |
283 | return -1; | |
284 | } | |
285 | prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); | |
286 | ||
287 | /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ | |
288 | page_size = PRTBE_R_GET_RTS(prtbe0); | |
289 | pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, | |
290 | prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, | |
291 | &raddr, &page_size, &fault_cause, &pte_addr); | |
292 | if (!pte) { | |
293 | return -1; | |
294 | } | |
295 | ||
296 | return raddr & TARGET_PAGE_MASK; | |
297 | } |