]>
Commit | Line | Data |
---|---|---|
10b46525 DG |
1 | /* |
2 | * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (c) 2003-2007 Jocelyn Mayer | |
5 | * Copyright (c) 2013 David Gibson, IBM Corporation | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | #include "cpu.h" | |
2ef6175a | 21 | #include "exec/helper-proto.h" |
10b46525 DG |
22 | #include "sysemu/kvm.h" |
23 | #include "kvm_ppc.h" | |
24 | #include "mmu-hash64.h" | |
25 | ||
9d7c3f4a | 26 | //#define DEBUG_MMU |
10b46525 DG |
27 | //#define DEBUG_SLB |
28 | ||
9d7c3f4a DG |
29 | #ifdef DEBUG_MMU |
30 | # define LOG_MMU(...) qemu_log(__VA_ARGS__) | |
77710e7a | 31 | # define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0) |
9d7c3f4a DG |
32 | #else |
33 | # define LOG_MMU(...) do { } while (0) | |
77710e7a | 34 | # define LOG_MMU_STATE(cpu) do { } while (0) |
9d7c3f4a DG |
35 | #endif |
36 | ||
10b46525 DG |
37 | #ifdef DEBUG_SLB |
38 | # define LOG_SLB(...) qemu_log(__VA_ARGS__) | |
39 | #else | |
40 | # define LOG_SLB(...) do { } while (0) | |
41 | #endif | |
42 | ||
7c43bca0 AK |
43 | /* |
44 | * Used to indicate whether we have allocated htab in the | |
45 | * host kernel | |
46 | */ | |
47 | bool kvmppc_kern_htab; | |
10b46525 DG |
48 | /* |
49 | * SLB handling | |
50 | */ | |
51 | ||
0480884f | 52 | static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr) |
10b46525 DG |
53 | { |
54 | uint64_t esid_256M, esid_1T; | |
55 | int n; | |
56 | ||
57 | LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); | |
58 | ||
59 | esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; | |
60 | esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; | |
61 | ||
62 | for (n = 0; n < env->slb_nr; n++) { | |
63 | ppc_slb_t *slb = &env->slb[n]; | |
64 | ||
65 | LOG_SLB("%s: slot %d %016" PRIx64 " %016" | |
66 | PRIx64 "\n", __func__, n, slb->esid, slb->vsid); | |
67 | /* We check for 1T matches on all MMUs here - if the MMU | |
68 | * doesn't have 1T segment support, we will have prevented 1T | |
69 | * entries from being inserted in the slbmte code. */ | |
70 | if (((slb->esid == esid_256M) && | |
71 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) | |
72 | || ((slb->esid == esid_1T) && | |
73 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { | |
74 | return slb; | |
75 | } | |
76 | } | |
77 | ||
78 | return NULL; | |
79 | } | |
80 | ||
81 | void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) | |
82 | { | |
83 | int i; | |
84 | uint64_t slbe, slbv; | |
85 | ||
cb446eca | 86 | cpu_synchronize_state(CPU(ppc_env_get_cpu(env))); |
10b46525 DG |
87 | |
88 | cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); | |
89 | for (i = 0; i < env->slb_nr; i++) { | |
90 | slbe = env->slb[i].esid; | |
91 | slbv = env->slb[i].vsid; | |
92 | if (slbe == 0 && slbv == 0) { | |
93 | continue; | |
94 | } | |
95 | cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", | |
96 | i, slbe, slbv); | |
97 | } | |
98 | } | |
99 | ||
100 | void helper_slbia(CPUPPCState *env) | |
101 | { | |
00c8cb0a | 102 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
103 | int n, do_invalidate; |
104 | ||
105 | do_invalidate = 0; | |
106 | /* XXX: Warning: slbia never invalidates the first segment */ | |
107 | for (n = 1; n < env->slb_nr; n++) { | |
108 | ppc_slb_t *slb = &env->slb[n]; | |
109 | ||
110 | if (slb->esid & SLB_ESID_V) { | |
111 | slb->esid &= ~SLB_ESID_V; | |
112 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
113 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
114 | * in QEMU, we just invalidate all TLBs | |
115 | */ | |
116 | do_invalidate = 1; | |
117 | } | |
118 | } | |
119 | if (do_invalidate) { | |
00c8cb0a | 120 | tlb_flush(CPU(cpu), 1); |
10b46525 DG |
121 | } |
122 | } | |
123 | ||
124 | void helper_slbie(CPUPPCState *env, target_ulong addr) | |
125 | { | |
00c8cb0a | 126 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
127 | ppc_slb_t *slb; |
128 | ||
129 | slb = slb_lookup(env, addr); | |
130 | if (!slb) { | |
131 | return; | |
132 | } | |
133 | ||
134 | if (slb->esid & SLB_ESID_V) { | |
135 | slb->esid &= ~SLB_ESID_V; | |
136 | ||
137 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
138 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
139 | * in QEMU, we just invalidate all TLBs | |
140 | */ | |
00c8cb0a | 141 | tlb_flush(CPU(cpu), 1); |
10b46525 DG |
142 | } |
143 | } | |
144 | ||
145 | int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) | |
146 | { | |
147 | int slot = rb & 0xfff; | |
148 | ppc_slb_t *slb = &env->slb[slot]; | |
149 | ||
150 | if (rb & (0x1000 - env->slb_nr)) { | |
151 | return -1; /* Reserved bits set or slot too high */ | |
152 | } | |
153 | if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) { | |
154 | return -1; /* Bad segment size */ | |
155 | } | |
156 | if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { | |
157 | return -1; /* 1T segment on MMU that doesn't support it */ | |
158 | } | |
159 | ||
160 | /* Mask out the slot number as we store the entry */ | |
161 | slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); | |
162 | slb->vsid = rs; | |
163 | ||
164 | LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 | |
165 | " %016" PRIx64 "\n", __func__, slot, rb, rs, | |
166 | slb->esid, slb->vsid); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb, | |
172 | target_ulong *rt) | |
173 | { | |
174 | int slot = rb & 0xfff; | |
175 | ppc_slb_t *slb = &env->slb[slot]; | |
176 | ||
177 | if (slot >= env->slb_nr) { | |
178 | return -1; | |
179 | } | |
180 | ||
181 | *rt = slb->esid; | |
182 | return 0; | |
183 | } | |
184 | ||
185 | static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb, | |
186 | target_ulong *rt) | |
187 | { | |
188 | int slot = rb & 0xfff; | |
189 | ppc_slb_t *slb = &env->slb[slot]; | |
190 | ||
191 | if (slot >= env->slb_nr) { | |
192 | return -1; | |
193 | } | |
194 | ||
195 | *rt = slb->vsid; | |
196 | return 0; | |
197 | } | |
198 | ||
199 | void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) | |
200 | { | |
201 | if (ppc_store_slb(env, rb, rs) < 0) { | |
202 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
203 | POWERPC_EXCP_INVAL); | |
204 | } | |
205 | } | |
206 | ||
207 | target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) | |
208 | { | |
209 | target_ulong rt = 0; | |
210 | ||
211 | if (ppc_load_slb_esid(env, rb, &rt) < 0) { | |
212 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
213 | POWERPC_EXCP_INVAL); | |
214 | } | |
215 | return rt; | |
216 | } | |
217 | ||
218 | target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) | |
219 | { | |
220 | target_ulong rt = 0; | |
221 | ||
222 | if (ppc_load_slb_vsid(env, rb, &rt) < 0) { | |
223 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, | |
224 | POWERPC_EXCP_INVAL); | |
225 | } | |
226 | return rt; | |
227 | } | |
9d7c3f4a DG |
228 | |
229 | /* | |
230 | * 64-bit hash table MMU handling | |
231 | */ | |
232 | ||
e01b4445 DG |
233 | static int ppc_hash64_pte_prot(CPUPPCState *env, |
234 | ppc_slb_t *slb, ppc_hash_pte64_t pte) | |
496272a7 | 235 | { |
e01b4445 DG |
236 | unsigned pp, key; |
237 | /* Some pp bit combinations have undefined behaviour, so default | |
238 | * to no access in those cases */ | |
239 | int prot = 0; | |
240 | ||
241 | key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) | |
242 | : (slb->vsid & SLB_VSID_KS)); | |
243 | pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); | |
496272a7 | 244 | |
496272a7 DG |
245 | if (key == 0) { |
246 | switch (pp) { | |
247 | case 0x0: | |
248 | case 0x1: | |
249 | case 0x2: | |
e01b4445 DG |
250 | prot = PAGE_READ | PAGE_WRITE; |
251 | break; | |
252 | ||
496272a7 DG |
253 | case 0x3: |
254 | case 0x6: | |
e01b4445 | 255 | prot = PAGE_READ; |
496272a7 DG |
256 | break; |
257 | } | |
258 | } else { | |
259 | switch (pp) { | |
260 | case 0x0: | |
261 | case 0x6: | |
e01b4445 | 262 | prot = 0; |
496272a7 | 263 | break; |
e01b4445 | 264 | |
496272a7 DG |
265 | case 0x1: |
266 | case 0x3: | |
e01b4445 | 267 | prot = PAGE_READ; |
496272a7 | 268 | break; |
e01b4445 | 269 | |
496272a7 | 270 | case 0x2: |
e01b4445 | 271 | prot = PAGE_READ | PAGE_WRITE; |
496272a7 DG |
272 | break; |
273 | } | |
274 | } | |
496272a7 | 275 | |
e01b4445 | 276 | /* No execute if either noexec or guarded bits set */ |
57d0a39d DG |
277 | if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) |
278 | || (slb->vsid & SLB_VSID_N)) { | |
e01b4445 | 279 | prot |= PAGE_EXEC; |
496272a7 DG |
280 | } |
281 | ||
e01b4445 | 282 | return prot; |
496272a7 DG |
283 | } |
284 | ||
f80872e2 DG |
285 | static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte) |
286 | { | |
287 | int key, amrbits; | |
363248e8 | 288 | int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
f80872e2 DG |
289 | |
290 | ||
291 | /* Only recent MMUs implement Virtual Page Class Key Protection */ | |
292 | if (!(env->mmu_model & POWERPC_MMU_AMR)) { | |
363248e8 | 293 | return prot; |
f80872e2 DG |
294 | } |
295 | ||
296 | key = HPTE64_R_KEY(pte.pte1); | |
297 | amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; | |
298 | ||
299 | /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ | |
300 | /* env->spr[SPR_AMR]); */ | |
301 | ||
363248e8 CLG |
302 | /* |
303 | * A store is permitted if the AMR bit is 0. Remove write | |
304 | * protection if it is set. | |
305 | */ | |
f80872e2 | 306 | if (amrbits & 0x2) { |
363248e8 | 307 | prot &= ~PAGE_WRITE; |
f80872e2 | 308 | } |
363248e8 CLG |
309 | /* |
310 | * A load is permitted if the AMR bit is 0. Remove read | |
311 | * protection if it is set. | |
312 | */ | |
f80872e2 | 313 | if (amrbits & 0x1) { |
363248e8 | 314 | prot &= ~PAGE_READ; |
f80872e2 DG |
315 | } |
316 | ||
317 | return prot; | |
318 | } | |
319 | ||
7c43bca0 AK |
320 | uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) |
321 | { | |
322 | uint64_t token = 0; | |
323 | hwaddr pte_offset; | |
324 | ||
325 | pte_offset = pte_index * HASH_PTE_SIZE_64; | |
326 | if (kvmppc_kern_htab) { | |
327 | /* | |
328 | * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. | |
329 | */ | |
330 | token = kvmppc_hash64_read_pteg(cpu, pte_index); | |
331 | if (token) { | |
332 | return token; | |
333 | } | |
334 | /* | |
335 | * pteg read failed, even though we have allocated htab via | |
336 | * kvmppc_reset_htab. | |
337 | */ | |
338 | return 0; | |
339 | } | |
340 | /* | |
341 | * HTAB is controlled by QEMU. Just point to the internally | |
342 | * accessible PTEG. | |
343 | */ | |
344 | if (cpu->env.external_htab) { | |
345 | token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset; | |
346 | } else if (cpu->env.htab_base) { | |
347 | token = cpu->env.htab_base + pte_offset; | |
348 | } | |
349 | return token; | |
350 | } | |
351 | ||
352 | void ppc_hash64_stop_access(uint64_t token) | |
353 | { | |
354 | if (kvmppc_kern_htab) { | |
355 | return kvmppc_hash64_free_pteg(token); | |
356 | } | |
357 | } | |
358 | ||
359 | static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash, | |
aea390e4 DG |
360 | bool secondary, target_ulong ptem, |
361 | ppc_hash_pte64_t *pte) | |
362 | { | |
aea390e4 | 363 | int i; |
7c43bca0 AK |
364 | uint64_t token; |
365 | target_ulong pte0, pte1; | |
366 | target_ulong pte_index; | |
aea390e4 | 367 | |
7c43bca0 AK |
368 | pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; |
369 | token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index); | |
370 | if (!token) { | |
371 | return -1; | |
372 | } | |
aea390e4 | 373 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
7c43bca0 AK |
374 | pte0 = ppc_hash64_load_hpte0(env, token, i); |
375 | pte1 = ppc_hash64_load_hpte1(env, token, i); | |
aea390e4 DG |
376 | |
377 | if ((pte0 & HPTE64_V_VALID) | |
378 | && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) | |
379 | && HPTE64_V_COMPARE(pte0, ptem)) { | |
380 | pte->pte0 = pte0; | |
381 | pte->pte1 = pte1; | |
7c43bca0 AK |
382 | ppc_hash64_stop_access(token); |
383 | return (pte_index + i) * HASH_PTE_SIZE_64; | |
aea390e4 | 384 | } |
aea390e4 | 385 | } |
7c43bca0 AK |
386 | ppc_hash64_stop_access(token); |
387 | /* | |
388 | * We didn't find a valid entry. | |
389 | */ | |
aea390e4 DG |
390 | return -1; |
391 | } | |
392 | ||
7f3bdc2d DG |
393 | static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env, |
394 | ppc_slb_t *slb, target_ulong eaddr, | |
395 | ppc_hash_pte64_t *pte) | |
c69b6151 | 396 | { |
7c43bca0 | 397 | hwaddr pte_offset; |
a1ff751a | 398 | hwaddr hash; |
18148898 | 399 | uint64_t vsid, epnshift, epnmask, epn, ptem; |
a1ff751a | 400 | |
18148898 DG |
401 | /* Page size according to the SLB, which we use to generate the |
402 | * EPN for hash table lookup.. When we implement more recent MMU | |
403 | * extensions this might be different from the actual page size | |
404 | * encoded in the PTE */ | |
405 | epnshift = (slb->vsid & SLB_VSID_L) | |
a1ff751a | 406 | ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; |
18148898 | 407 | epnmask = ~((1ULL << epnshift) - 1); |
a1ff751a | 408 | |
a1ff751a | 409 | if (slb->vsid & SLB_VSID_B) { |
18148898 DG |
410 | /* 1TB segment */ |
411 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; | |
412 | epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; | |
413 | hash = vsid ^ (vsid << 25) ^ (epn >> epnshift); | |
a1ff751a | 414 | } else { |
18148898 DG |
415 | /* 256M segment */ |
416 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; | |
417 | epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; | |
418 | hash = vsid ^ (epn >> epnshift); | |
a1ff751a | 419 | } |
18148898 | 420 | ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); |
a1ff751a | 421 | |
a1ff751a DG |
422 | /* Page address translation */ |
423 | LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx | |
424 | " hash " TARGET_FMT_plx "\n", | |
425 | env->htab_base, env->htab_mask, hash); | |
426 | ||
a1ff751a DG |
427 | /* Primary PTEG lookup */ |
428 | LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
429 | " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx | |
430 | " hash=" TARGET_FMT_plx "\n", | |
431 | env->htab_base, env->htab_mask, vsid, ptem, hash); | |
7c43bca0 | 432 | pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte); |
7f3bdc2d | 433 | |
a1ff751a DG |
434 | if (pte_offset == -1) { |
435 | /* Secondary PTEG lookup */ | |
436 | LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
437 | " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx | |
438 | " hash=" TARGET_FMT_plx "\n", env->htab_base, | |
439 | env->htab_mask, vsid, ptem, ~hash); | |
440 | ||
7c43bca0 | 441 | pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte); |
a1ff751a DG |
442 | } |
443 | ||
7f3bdc2d | 444 | return pte_offset; |
c69b6151 | 445 | } |
0480884f | 446 | |
6d11d998 DG |
447 | static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte, |
448 | target_ulong eaddr) | |
449 | { | |
75d5ec89 | 450 | hwaddr rpn = pte.pte1 & HPTE64_R_RPN; |
6d11d998 DG |
451 | /* FIXME: Add support for SLLP extended page sizes */ |
452 | int target_page_bits = (slb->vsid & SLB_VSID_L) | |
453 | ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; | |
454 | hwaddr mask = (1ULL << target_page_bits) - 1; | |
455 | ||
456 | return (rpn & ~mask) | (eaddr & mask); | |
457 | } | |
458 | ||
d0e39c5d | 459 | int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, |
caa597bd | 460 | int rwx, int mmu_idx) |
0480884f | 461 | { |
d0e39c5d AF |
462 | CPUState *cs = CPU(cpu); |
463 | CPUPPCState *env = &cpu->env; | |
0480884f | 464 | ppc_slb_t *slb; |
7f3bdc2d DG |
465 | hwaddr pte_offset; |
466 | ppc_hash_pte64_t pte; | |
f80872e2 | 467 | int pp_prot, amr_prot, prot; |
b3440746 | 468 | uint64_t new_pte1; |
e01b4445 | 469 | const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; |
caa597bd | 470 | hwaddr raddr; |
0480884f | 471 | |
6a980110 DG |
472 | assert((rwx == 0) || (rwx == 1) || (rwx == 2)); |
473 | ||
65d61643 DG |
474 | /* 1. Handle real mode accesses */ |
475 | if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { | |
476 | /* Translation is off */ | |
477 | /* In real mode the top 4 effective address bits are ignored */ | |
caa597bd | 478 | raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; |
0c591eb0 | 479 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
caa597bd DG |
480 | PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, |
481 | TARGET_PAGE_SIZE); | |
65d61643 DG |
482 | return 0; |
483 | } | |
484 | ||
bb218042 | 485 | /* 2. Translation is on, so look up the SLB */ |
0480884f | 486 | slb = slb_lookup(env, eaddr); |
bb218042 | 487 | |
0480884f | 488 | if (!slb) { |
caa597bd | 489 | if (rwx == 2) { |
27103424 | 490 | cs->exception_index = POWERPC_EXCP_ISEG; |
caa597bd DG |
491 | env->error_code = 0; |
492 | } else { | |
27103424 | 493 | cs->exception_index = POWERPC_EXCP_DSEG; |
caa597bd DG |
494 | env->error_code = 0; |
495 | env->spr[SPR_DAR] = eaddr; | |
496 | } | |
497 | return 1; | |
0480884f DG |
498 | } |
499 | ||
bb218042 DG |
500 | /* 3. Check for segment level no-execute violation */ |
501 | if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { | |
27103424 | 502 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
503 | env->error_code = 0x10000000; |
504 | return 1; | |
bb218042 DG |
505 | } |
506 | ||
7f3bdc2d DG |
507 | /* 4. Locate the PTE in the hash table */ |
508 | pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte); | |
509 | if (pte_offset == -1) { | |
caa597bd | 510 | if (rwx == 2) { |
27103424 | 511 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
512 | env->error_code = 0x40000000; |
513 | } else { | |
27103424 | 514 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
515 | env->error_code = 0; |
516 | env->spr[SPR_DAR] = eaddr; | |
517 | if (rwx == 1) { | |
518 | env->spr[SPR_DSISR] = 0x42000000; | |
519 | } else { | |
520 | env->spr[SPR_DSISR] = 0x40000000; | |
521 | } | |
522 | } | |
523 | return 1; | |
7f3bdc2d DG |
524 | } |
525 | LOG_MMU("found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); | |
526 | ||
527 | /* 5. Check access permissions */ | |
7f3bdc2d | 528 | |
f80872e2 DG |
529 | pp_prot = ppc_hash64_pte_prot(env, slb, pte); |
530 | amr_prot = ppc_hash64_amr_prot(env, pte); | |
531 | prot = pp_prot & amr_prot; | |
6a980110 | 532 | |
caa597bd | 533 | if ((need_prot[rwx] & ~prot) != 0) { |
6a980110 DG |
534 | /* Access right violation */ |
535 | LOG_MMU("PTE access rejected\n"); | |
caa597bd | 536 | if (rwx == 2) { |
27103424 | 537 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
538 | env->error_code = 0x08000000; |
539 | } else { | |
f80872e2 DG |
540 | target_ulong dsisr = 0; |
541 | ||
27103424 | 542 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
543 | env->error_code = 0; |
544 | env->spr[SPR_DAR] = eaddr; | |
f80872e2 DG |
545 | if (need_prot[rwx] & ~pp_prot) { |
546 | dsisr |= 0x08000000; | |
547 | } | |
caa597bd | 548 | if (rwx == 1) { |
f80872e2 DG |
549 | dsisr |= 0x02000000; |
550 | } | |
551 | if (need_prot[rwx] & ~amr_prot) { | |
552 | dsisr |= 0x00200000; | |
caa597bd | 553 | } |
f80872e2 | 554 | env->spr[SPR_DSISR] = dsisr; |
caa597bd DG |
555 | } |
556 | return 1; | |
6a980110 DG |
557 | } |
558 | ||
87dc3fd1 DG |
559 | LOG_MMU("PTE access granted !\n"); |
560 | ||
561 | /* 6. Update PTE referenced and changed bits if necessary */ | |
562 | ||
b3440746 DG |
563 | new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ |
564 | if (rwx == 1) { | |
565 | new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ | |
566 | } else { | |
567 | /* Treat the page as read-only for now, so that a later write | |
568 | * will pass through this function again to set the C bit */ | |
caa597bd | 569 | prot &= ~PAGE_WRITE; |
b3440746 DG |
570 | } |
571 | ||
572 | if (new_pte1 != pte.pte1) { | |
3f94170b AK |
573 | ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64, |
574 | pte.pte0, new_pte1); | |
7f3bdc2d | 575 | } |
0480884f | 576 | |
6d11d998 DG |
577 | /* 7. Determine the real address from the PTE */ |
578 | ||
caa597bd DG |
579 | raddr = ppc_hash64_pte_raddr(slb, pte, eaddr); |
580 | ||
0c591eb0 | 581 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
caa597bd | 582 | prot, mmu_idx, TARGET_PAGE_SIZE); |
e01b4445 | 583 | |
e01b4445 | 584 | return 0; |
0480884f | 585 | } |
629bd516 | 586 | |
f2ad6be8 DG |
587 | hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr) |
588 | { | |
5883d8b2 DG |
589 | ppc_slb_t *slb; |
590 | hwaddr pte_offset; | |
591 | ppc_hash_pte64_t pte; | |
592 | ||
593 | if (msr_dr == 0) { | |
594 | /* In real mode the top 4 effective address bits are ignored */ | |
595 | return addr & 0x0FFFFFFFFFFFFFFFULL; | |
596 | } | |
f2ad6be8 | 597 | |
5883d8b2 DG |
598 | slb = slb_lookup(env, addr); |
599 | if (!slb) { | |
600 | return -1; | |
601 | } | |
602 | ||
603 | pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte); | |
604 | if (pte_offset == -1) { | |
f2ad6be8 DG |
605 | return -1; |
606 | } | |
607 | ||
5883d8b2 | 608 | return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK; |
f2ad6be8 | 609 | } |
c1385933 AK |
610 | |
611 | void ppc_hash64_store_hpte(CPUPPCState *env, | |
612 | target_ulong pte_index, | |
613 | target_ulong pte0, target_ulong pte1) | |
614 | { | |
33276f1b | 615 | CPUState *cs = CPU(ppc_env_get_cpu(env)); |
c1385933 AK |
616 | |
617 | if (kvmppc_kern_htab) { | |
618 | return kvmppc_hash64_write_pte(env, pte_index, pte0, pte1); | |
619 | } | |
620 | ||
621 | pte_index *= HASH_PTE_SIZE_64; | |
622 | if (env->external_htab) { | |
623 | stq_p(env->external_htab + pte_index, pte0); | |
624 | stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1); | |
625 | } else { | |
626 | stq_phys(cs->as, env->htab_base + pte_index, pte0); | |
627 | stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1); | |
628 | } | |
629 | } |