]>
Commit | Line | Data |
---|---|---|
599b9a5a BS |
1 | /* |
2 | * x86 exception helpers | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
b6a0aa05 | 20 | #include "qemu/osdep.h" |
599b9a5a | 21 | #include "cpu.h" |
63c91552 | 22 | #include "exec/exec-all.h" |
1de7afc9 | 23 | #include "qemu/log.h" |
9c17d615 | 24 | #include "sysemu/sysemu.h" |
2ef6175a | 25 | #include "exec/helper-proto.h" |
599b9a5a | 26 | |
599b9a5a BS |
27 | void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) |
28 | { | |
29 | raise_interrupt(env, intno, 1, 0, next_eip_addend); | |
30 | } | |
31 | ||
32 | void helper_raise_exception(CPUX86State *env, int exception_index) | |
33 | { | |
34 | raise_exception(env, exception_index); | |
35 | } | |
36 | ||
599b9a5a BS |
37 | /* |
38 | * Check nested exceptions and change to double or triple fault if | |
39 | * needed. It should only be called, if this is not an interrupt. | |
40 | * Returns the new exception number. | |
41 | */ | |
65c9d60a PB |
42 | static int check_exception(CPUX86State *env, int intno, int *error_code, |
43 | uintptr_t retaddr) | |
599b9a5a BS |
44 | { |
45 | int first_contributory = env->old_exception == 0 || | |
46 | (env->old_exception >= 10 && | |
47 | env->old_exception <= 13); | |
48 | int second_contributory = intno == 0 || | |
49 | (intno >= 10 && intno <= 13); | |
50 | ||
51 | qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", | |
52 | env->old_exception, intno); | |
53 | ||
54 | #if !defined(CONFIG_USER_ONLY) | |
55 | if (env->old_exception == EXCP08_DBLE) { | |
f8dc4c64 | 56 | if (env->hflags & HF_GUEST_MASK) { |
65c9d60a | 57 | cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */ |
599b9a5a BS |
58 | } |
59 | ||
60 | qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); | |
61 | ||
cf83f140 | 62 | qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); |
599b9a5a BS |
63 | return EXCP_HLT; |
64 | } | |
65 | #endif | |
66 | ||
67 | if ((first_contributory && second_contributory) | |
68 | || (env->old_exception == EXCP0E_PAGE && | |
69 | (second_contributory || (intno == EXCP0E_PAGE)))) { | |
70 | intno = EXCP08_DBLE; | |
71 | *error_code = 0; | |
72 | } | |
73 | ||
74 | if (second_contributory || (intno == EXCP0E_PAGE) || | |
75 | (intno == EXCP08_DBLE)) { | |
76 | env->old_exception = intno; | |
77 | } | |
78 | ||
79 | return intno; | |
80 | } | |
81 | ||
82 | /* | |
83 | * Signal an interruption. It is executed in the main CPU loop. | |
84 | * is_int is TRUE if coming from the int instruction. next_eip is the | |
a78d0eab | 85 | * env->eip value AFTER the interrupt instruction. It is only relevant if |
599b9a5a BS |
86 | * is_int is TRUE. |
87 | */ | |
88 | static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, | |
89 | int is_int, int error_code, | |
91980095 PD |
90 | int next_eip_addend, |
91 | uintptr_t retaddr) | |
599b9a5a | 92 | { |
6aa9e42f | 93 | CPUState *cs = env_cpu(env); |
27103424 | 94 | |
599b9a5a BS |
95 | if (!is_int) { |
96 | cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, | |
65c9d60a PB |
97 | error_code, retaddr); |
98 | intno = check_exception(env, intno, &error_code, retaddr); | |
599b9a5a | 99 | } else { |
65c9d60a | 100 | cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr); |
599b9a5a BS |
101 | } |
102 | ||
27103424 | 103 | cs->exception_index = intno; |
599b9a5a BS |
104 | env->error_code = error_code; |
105 | env->exception_is_int = is_int; | |
106 | env->exception_next_eip = env->eip + next_eip_addend; | |
91980095 | 107 | cpu_loop_exit_restore(cs, retaddr); |
599b9a5a BS |
108 | } |
109 | ||
110 | /* shortcuts to generate exceptions */ | |
111 | ||
112 | void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, | |
113 | int error_code, int next_eip_addend) | |
114 | { | |
91980095 | 115 | raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); |
599b9a5a BS |
116 | } |
117 | ||
118 | void raise_exception_err(CPUX86State *env, int exception_index, | |
119 | int error_code) | |
120 | { | |
91980095 PD |
121 | raise_interrupt2(env, exception_index, 0, error_code, 0, 0); |
122 | } | |
123 | ||
124 | void raise_exception_err_ra(CPUX86State *env, int exception_index, | |
125 | int error_code, uintptr_t retaddr) | |
126 | { | |
127 | raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr); | |
599b9a5a BS |
128 | } |
129 | ||
130 | void raise_exception(CPUX86State *env, int exception_index) | |
131 | { | |
91980095 PD |
132 | raise_interrupt2(env, exception_index, 0, 0, 0, 0); |
133 | } | |
134 | ||
135 | void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr) | |
136 | { | |
137 | raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); | |
599b9a5a | 138 | } |
6578eb25 | 139 | |
5d004421 | 140 | #if !defined(CONFIG_USER_ONLY) |
fe441054 JK |
141 | static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type, |
142 | int *prot) | |
143 | { | |
144 | CPUX86State *env = &X86_CPU(cs)->env; | |
145 | uint64_t rsvd_mask = PG_HI_RSVD_MASK; | |
146 | uint64_t ptep, pte; | |
147 | uint64_t exit_info_1 = 0; | |
148 | target_ulong pde_addr, pte_addr; | |
149 | uint32_t page_offset; | |
150 | int page_size; | |
151 | ||
152 | if (likely(!(env->hflags2 & HF2_NPT_MASK))) { | |
153 | return gphys; | |
154 | } | |
155 | ||
156 | if (!(env->nested_pg_mode & SVM_NPT_NXE)) { | |
157 | rsvd_mask |= PG_NX_MASK; | |
158 | } | |
159 | ||
160 | if (env->nested_pg_mode & SVM_NPT_PAE) { | |
161 | uint64_t pde, pdpe; | |
162 | target_ulong pdpe_addr; | |
163 | ||
164 | #ifdef TARGET_X86_64 | |
165 | if (env->nested_pg_mode & SVM_NPT_LMA) { | |
166 | uint64_t pml5e; | |
167 | uint64_t pml4e_addr, pml4e; | |
168 | ||
169 | pml5e = env->nested_cr3; | |
170 | ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; | |
171 | ||
172 | pml4e_addr = (pml5e & PG_ADDRESS_MASK) + | |
173 | (((gphys >> 39) & 0x1ff) << 3); | |
174 | pml4e = x86_ldq_phys(cs, pml4e_addr); | |
175 | if (!(pml4e & PG_PRESENT_MASK)) { | |
176 | goto do_fault; | |
177 | } | |
178 | if (pml4e & (rsvd_mask | PG_PSE_MASK)) { | |
179 | goto do_fault_rsvd; | |
180 | } | |
181 | if (!(pml4e & PG_ACCESSED_MASK)) { | |
182 | pml4e |= PG_ACCESSED_MASK; | |
183 | x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); | |
184 | } | |
185 | ptep &= pml4e ^ PG_NX_MASK; | |
186 | pdpe_addr = (pml4e & PG_ADDRESS_MASK) + | |
187 | (((gphys >> 30) & 0x1ff) << 3); | |
188 | pdpe = x86_ldq_phys(cs, pdpe_addr); | |
189 | if (!(pdpe & PG_PRESENT_MASK)) { | |
190 | goto do_fault; | |
191 | } | |
192 | if (pdpe & rsvd_mask) { | |
193 | goto do_fault_rsvd; | |
194 | } | |
195 | ptep &= pdpe ^ PG_NX_MASK; | |
196 | if (!(pdpe & PG_ACCESSED_MASK)) { | |
197 | pdpe |= PG_ACCESSED_MASK; | |
198 | x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); | |
199 | } | |
200 | if (pdpe & PG_PSE_MASK) { | |
201 | /* 1 GB page */ | |
202 | page_size = 1024 * 1024 * 1024; | |
203 | pte_addr = pdpe_addr; | |
204 | pte = pdpe; | |
205 | goto do_check_protect; | |
206 | } | |
207 | } else | |
208 | #endif | |
209 | { | |
210 | pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18); | |
211 | pdpe = x86_ldq_phys(cs, pdpe_addr); | |
212 | if (!(pdpe & PG_PRESENT_MASK)) { | |
213 | goto do_fault; | |
214 | } | |
215 | rsvd_mask |= PG_HI_USER_MASK; | |
216 | if (pdpe & (rsvd_mask | PG_NX_MASK)) { | |
217 | goto do_fault_rsvd; | |
218 | } | |
219 | ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; | |
220 | } | |
221 | ||
222 | pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3); | |
223 | pde = x86_ldq_phys(cs, pde_addr); | |
224 | if (!(pde & PG_PRESENT_MASK)) { | |
225 | goto do_fault; | |
226 | } | |
227 | if (pde & rsvd_mask) { | |
228 | goto do_fault_rsvd; | |
229 | } | |
230 | ptep &= pde ^ PG_NX_MASK; | |
231 | if (pde & PG_PSE_MASK) { | |
232 | /* 2 MB page */ | |
233 | page_size = 2048 * 1024; | |
234 | pte_addr = pde_addr; | |
235 | pte = pde; | |
236 | goto do_check_protect; | |
237 | } | |
238 | /* 4 KB page */ | |
239 | if (!(pde & PG_ACCESSED_MASK)) { | |
240 | pde |= PG_ACCESSED_MASK; | |
241 | x86_stl_phys_notdirty(cs, pde_addr, pde); | |
242 | } | |
243 | pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3); | |
244 | pte = x86_ldq_phys(cs, pte_addr); | |
245 | if (!(pte & PG_PRESENT_MASK)) { | |
246 | goto do_fault; | |
247 | } | |
248 | if (pte & rsvd_mask) { | |
249 | goto do_fault_rsvd; | |
250 | } | |
251 | /* combine pde and pte nx, user and rw protections */ | |
252 | ptep &= pte ^ PG_NX_MASK; | |
253 | page_size = 4096; | |
254 | } else { | |
255 | uint32_t pde; | |
256 | ||
257 | /* page directory entry */ | |
258 | pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc); | |
259 | pde = x86_ldl_phys(cs, pde_addr); | |
260 | if (!(pde & PG_PRESENT_MASK)) { | |
261 | goto do_fault; | |
262 | } | |
263 | ptep = pde | PG_NX_MASK; | |
264 | ||
265 | /* if PSE bit is set, then we use a 4MB page */ | |
266 | if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { | |
267 | page_size = 4096 * 1024; | |
268 | pte_addr = pde_addr; | |
269 | ||
270 | /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. | |
271 | * Leave bits 20-13 in place for setting accessed/dirty bits below. | |
272 | */ | |
273 | pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); | |
274 | rsvd_mask = 0x200000; | |
275 | goto do_check_protect_pse36; | |
276 | } | |
277 | ||
278 | if (!(pde & PG_ACCESSED_MASK)) { | |
279 | pde |= PG_ACCESSED_MASK; | |
280 | x86_stl_phys_notdirty(cs, pde_addr, pde); | |
281 | } | |
282 | ||
283 | /* page directory entry */ | |
284 | pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc); | |
285 | pte = x86_ldl_phys(cs, pte_addr); | |
286 | if (!(pte & PG_PRESENT_MASK)) { | |
287 | goto do_fault; | |
288 | } | |
289 | /* combine pde and pte user and rw protections */ | |
290 | ptep &= pte | PG_NX_MASK; | |
291 | page_size = 4096; | |
292 | rsvd_mask = 0; | |
293 | } | |
294 | ||
295 | do_check_protect: | |
296 | rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; | |
297 | do_check_protect_pse36: | |
298 | if (pte & rsvd_mask) { | |
299 | goto do_fault_rsvd; | |
300 | } | |
301 | ptep ^= PG_NX_MASK; | |
302 | ||
303 | if (!(ptep & PG_USER_MASK)) { | |
304 | goto do_fault_protect; | |
305 | } | |
306 | if (ptep & PG_NX_MASK) { | |
307 | if (access_type == MMU_INST_FETCH) { | |
308 | goto do_fault_protect; | |
309 | } | |
310 | *prot &= ~PAGE_EXEC; | |
311 | } | |
312 | if (!(ptep & PG_RW_MASK)) { | |
313 | if (access_type == MMU_DATA_STORE) { | |
314 | goto do_fault_protect; | |
315 | } | |
316 | *prot &= ~PAGE_WRITE; | |
317 | } | |
318 | ||
319 | pte &= PG_ADDRESS_MASK & ~(page_size - 1); | |
320 | page_offset = gphys & (page_size - 1); | |
321 | return pte + page_offset; | |
322 | ||
323 | do_fault_rsvd: | |
324 | exit_info_1 |= SVM_NPTEXIT_RSVD; | |
325 | do_fault_protect: | |
326 | exit_info_1 |= SVM_NPTEXIT_P; | |
327 | do_fault: | |
328 | x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), | |
329 | gphys); | |
330 | exit_info_1 |= SVM_NPTEXIT_US; | |
331 | if (access_type == MMU_DATA_STORE) { | |
332 | exit_info_1 |= SVM_NPTEXIT_RW; | |
333 | } else if (access_type == MMU_INST_FETCH) { | |
334 | exit_info_1 |= SVM_NPTEXIT_ID; | |
335 | } | |
336 | if (prot) { | |
337 | exit_info_1 |= SVM_NPTEXIT_GPA; | |
338 | } else { /* page table access */ | |
339 | exit_info_1 |= SVM_NPTEXIT_GPT; | |
340 | } | |
341 | cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr); | |
342 | } | |
343 | ||
6578eb25 PB |
344 | /* return value: |
345 | * -1 = cannot handle fault | |
346 | * 0 = nothing more to do | |
347 | * 1 = generate PF fault | |
348 | */ | |
5d004421 RH |
349 | static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, |
350 | int is_write1, int mmu_idx) | |
6578eb25 PB |
351 | { |
352 | X86CPU *cpu = X86_CPU(cs); | |
353 | CPUX86State *env = &cpu->env; | |
354 | uint64_t ptep, pte; | |
355 | int32_t a20_mask; | |
356 | target_ulong pde_addr, pte_addr; | |
357 | int error_code = 0; | |
358 | int is_dirty, prot, page_size, is_write, is_user; | |
359 | hwaddr paddr; | |
360 | uint64_t rsvd_mask = PG_HI_RSVD_MASK; | |
361 | uint32_t page_offset; | |
362 | target_ulong vaddr; | |
363 | ||
364 | is_user = mmu_idx == MMU_USER_IDX; | |
365 | #if defined(DEBUG_MMU) | |
366 | printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", | |
367 | addr, is_write1, is_user, env->eip); | |
368 | #endif | |
369 | is_write = is_write1 & 1; | |
370 | ||
371 | a20_mask = x86_get_a20_mask(env); | |
372 | if (!(env->cr[0] & CR0_PG_MASK)) { | |
373 | pte = addr; | |
374 | #ifdef TARGET_X86_64 | |
375 | if (!(env->hflags & HF_LMA_MASK)) { | |
376 | /* Without long mode we can only address 32bits in real mode */ | |
377 | pte = (uint32_t)pte; | |
378 | } | |
379 | #endif | |
380 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
381 | page_size = 4096; | |
382 | goto do_mapping; | |
383 | } | |
384 | ||
385 | if (!(env->efer & MSR_EFER_NXE)) { | |
386 | rsvd_mask |= PG_NX_MASK; | |
387 | } | |
388 | ||
389 | if (env->cr[4] & CR4_PAE_MASK) { | |
390 | uint64_t pde, pdpe; | |
391 | target_ulong pdpe_addr; | |
392 | ||
393 | #ifdef TARGET_X86_64 | |
394 | if (env->hflags & HF_LMA_MASK) { | |
395 | bool la57 = env->cr[4] & CR4_LA57_MASK; | |
396 | uint64_t pml5e_addr, pml5e; | |
397 | uint64_t pml4e_addr, pml4e; | |
398 | int32_t sext; | |
399 | ||
400 | /* test virtual address sign extension */ | |
401 | sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; | |
402 | if (sext != 0 && sext != -1) { | |
403 | env->error_code = 0; | |
404 | cs->exception_index = EXCP0D_GPF; | |
405 | return 1; | |
406 | } | |
407 | ||
408 | if (la57) { | |
409 | pml5e_addr = ((env->cr[3] & ~0xfff) + | |
410 | (((addr >> 48) & 0x1ff) << 3)) & a20_mask; | |
fe441054 | 411 | pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
412 | pml5e = x86_ldq_phys(cs, pml5e_addr); |
413 | if (!(pml5e & PG_PRESENT_MASK)) { | |
414 | goto do_fault; | |
415 | } | |
416 | if (pml5e & (rsvd_mask | PG_PSE_MASK)) { | |
417 | goto do_fault_rsvd; | |
418 | } | |
419 | if (!(pml5e & PG_ACCESSED_MASK)) { | |
420 | pml5e |= PG_ACCESSED_MASK; | |
421 | x86_stl_phys_notdirty(cs, pml5e_addr, pml5e); | |
422 | } | |
423 | ptep = pml5e ^ PG_NX_MASK; | |
424 | } else { | |
425 | pml5e = env->cr[3]; | |
426 | ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; | |
427 | } | |
428 | ||
429 | pml4e_addr = ((pml5e & PG_ADDRESS_MASK) + | |
430 | (((addr >> 39) & 0x1ff) << 3)) & a20_mask; | |
fe441054 | 431 | pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false); |
6578eb25 PB |
432 | pml4e = x86_ldq_phys(cs, pml4e_addr); |
433 | if (!(pml4e & PG_PRESENT_MASK)) { | |
434 | goto do_fault; | |
435 | } | |
436 | if (pml4e & (rsvd_mask | PG_PSE_MASK)) { | |
437 | goto do_fault_rsvd; | |
438 | } | |
439 | if (!(pml4e & PG_ACCESSED_MASK)) { | |
440 | pml4e |= PG_ACCESSED_MASK; | |
441 | x86_stl_phys_notdirty(cs, pml4e_addr, pml4e); | |
442 | } | |
443 | ptep &= pml4e ^ PG_NX_MASK; | |
444 | pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & | |
445 | a20_mask; | |
fe441054 | 446 | pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
447 | pdpe = x86_ldq_phys(cs, pdpe_addr); |
448 | if (!(pdpe & PG_PRESENT_MASK)) { | |
449 | goto do_fault; | |
450 | } | |
451 | if (pdpe & rsvd_mask) { | |
452 | goto do_fault_rsvd; | |
453 | } | |
454 | ptep &= pdpe ^ PG_NX_MASK; | |
455 | if (!(pdpe & PG_ACCESSED_MASK)) { | |
456 | pdpe |= PG_ACCESSED_MASK; | |
457 | x86_stl_phys_notdirty(cs, pdpe_addr, pdpe); | |
458 | } | |
459 | if (pdpe & PG_PSE_MASK) { | |
460 | /* 1 GB page */ | |
461 | page_size = 1024 * 1024 * 1024; | |
462 | pte_addr = pdpe_addr; | |
463 | pte = pdpe; | |
464 | goto do_check_protect; | |
465 | } | |
466 | } else | |
467 | #endif | |
468 | { | |
469 | /* XXX: load them when cr3 is loaded ? */ | |
470 | pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & | |
471 | a20_mask; | |
fe441054 | 472 | pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false); |
6578eb25 PB |
473 | pdpe = x86_ldq_phys(cs, pdpe_addr); |
474 | if (!(pdpe & PG_PRESENT_MASK)) { | |
475 | goto do_fault; | |
476 | } | |
477 | rsvd_mask |= PG_HI_USER_MASK; | |
478 | if (pdpe & (rsvd_mask | PG_NX_MASK)) { | |
479 | goto do_fault_rsvd; | |
480 | } | |
481 | ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; | |
482 | } | |
483 | ||
484 | pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & | |
485 | a20_mask; | |
fe441054 | 486 | pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
487 | pde = x86_ldq_phys(cs, pde_addr); |
488 | if (!(pde & PG_PRESENT_MASK)) { | |
489 | goto do_fault; | |
490 | } | |
491 | if (pde & rsvd_mask) { | |
492 | goto do_fault_rsvd; | |
493 | } | |
494 | ptep &= pde ^ PG_NX_MASK; | |
495 | if (pde & PG_PSE_MASK) { | |
496 | /* 2 MB page */ | |
497 | page_size = 2048 * 1024; | |
498 | pte_addr = pde_addr; | |
499 | pte = pde; | |
500 | goto do_check_protect; | |
501 | } | |
502 | /* 4 KB page */ | |
503 | if (!(pde & PG_ACCESSED_MASK)) { | |
504 | pde |= PG_ACCESSED_MASK; | |
505 | x86_stl_phys_notdirty(cs, pde_addr, pde); | |
506 | } | |
507 | pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & | |
508 | a20_mask; | |
fe441054 | 509 | pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
510 | pte = x86_ldq_phys(cs, pte_addr); |
511 | if (!(pte & PG_PRESENT_MASK)) { | |
512 | goto do_fault; | |
513 | } | |
514 | if (pte & rsvd_mask) { | |
515 | goto do_fault_rsvd; | |
516 | } | |
517 | /* combine pde and pte nx, user and rw protections */ | |
518 | ptep &= pte ^ PG_NX_MASK; | |
519 | page_size = 4096; | |
520 | } else { | |
521 | uint32_t pde; | |
522 | ||
523 | /* page directory entry */ | |
524 | pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & | |
525 | a20_mask; | |
fe441054 | 526 | pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
527 | pde = x86_ldl_phys(cs, pde_addr); |
528 | if (!(pde & PG_PRESENT_MASK)) { | |
529 | goto do_fault; | |
530 | } | |
531 | ptep = pde | PG_NX_MASK; | |
532 | ||
533 | /* if PSE bit is set, then we use a 4MB page */ | |
534 | if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { | |
535 | page_size = 4096 * 1024; | |
536 | pte_addr = pde_addr; | |
537 | ||
538 | /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. | |
539 | * Leave bits 20-13 in place for setting accessed/dirty bits below. | |
540 | */ | |
541 | pte = pde | ((pde & 0x1fe000LL) << (32 - 13)); | |
542 | rsvd_mask = 0x200000; | |
543 | goto do_check_protect_pse36; | |
544 | } | |
545 | ||
546 | if (!(pde & PG_ACCESSED_MASK)) { | |
547 | pde |= PG_ACCESSED_MASK; | |
548 | x86_stl_phys_notdirty(cs, pde_addr, pde); | |
549 | } | |
550 | ||
551 | /* page directory entry */ | |
552 | pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & | |
553 | a20_mask; | |
fe441054 | 554 | pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL); |
6578eb25 PB |
555 | pte = x86_ldl_phys(cs, pte_addr); |
556 | if (!(pte & PG_PRESENT_MASK)) { | |
557 | goto do_fault; | |
558 | } | |
559 | /* combine pde and pte user and rw protections */ | |
560 | ptep &= pte | PG_NX_MASK; | |
561 | page_size = 4096; | |
562 | rsvd_mask = 0; | |
563 | } | |
564 | ||
565 | do_check_protect: | |
566 | rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; | |
567 | do_check_protect_pse36: | |
568 | if (pte & rsvd_mask) { | |
569 | goto do_fault_rsvd; | |
570 | } | |
571 | ptep ^= PG_NX_MASK; | |
572 | ||
573 | /* can the page can be put in the TLB? prot will tell us */ | |
574 | if (is_user && !(ptep & PG_USER_MASK)) { | |
575 | goto do_fault_protect; | |
576 | } | |
577 | ||
578 | prot = 0; | |
579 | if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) { | |
580 | prot |= PAGE_READ; | |
581 | if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) { | |
582 | prot |= PAGE_WRITE; | |
583 | } | |
584 | } | |
585 | if (!(ptep & PG_NX_MASK) && | |
586 | (mmu_idx == MMU_USER_IDX || | |
587 | !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { | |
588 | prot |= PAGE_EXEC; | |
589 | } | |
590 | if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) && | |
591 | (ptep & PG_USER_MASK) && env->pkru) { | |
592 | uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT; | |
593 | uint32_t pkru_ad = (env->pkru >> pk * 2) & 1; | |
594 | uint32_t pkru_wd = (env->pkru >> pk * 2) & 2; | |
595 | uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
596 | ||
597 | if (pkru_ad) { | |
598 | pkru_prot &= ~(PAGE_READ | PAGE_WRITE); | |
599 | } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) { | |
600 | pkru_prot &= ~PAGE_WRITE; | |
601 | } | |
602 | ||
603 | prot &= pkru_prot; | |
604 | if ((pkru_prot & (1 << is_write1)) == 0) { | |
605 | assert(is_write1 != 2); | |
606 | error_code |= PG_ERROR_PK_MASK; | |
607 | goto do_fault_protect; | |
608 | } | |
609 | } | |
610 | ||
611 | if ((prot & (1 << is_write1)) == 0) { | |
612 | goto do_fault_protect; | |
613 | } | |
614 | ||
615 | /* yes, it can! */ | |
616 | is_dirty = is_write && !(pte & PG_DIRTY_MASK); | |
617 | if (!(pte & PG_ACCESSED_MASK) || is_dirty) { | |
618 | pte |= PG_ACCESSED_MASK; | |
619 | if (is_dirty) { | |
620 | pte |= PG_DIRTY_MASK; | |
621 | } | |
622 | x86_stl_phys_notdirty(cs, pte_addr, pte); | |
623 | } | |
624 | ||
625 | if (!(pte & PG_DIRTY_MASK)) { | |
626 | /* only set write access if already dirty... otherwise wait | |
627 | for dirty access */ | |
628 | assert(!is_write); | |
629 | prot &= ~PAGE_WRITE; | |
630 | } | |
631 | ||
632 | do_mapping: | |
633 | pte = pte & a20_mask; | |
634 | ||
635 | /* align to page_size */ | |
636 | pte &= PG_ADDRESS_MASK & ~(page_size - 1); | |
fe441054 JK |
637 | page_offset = addr & (page_size - 1); |
638 | paddr = get_hphys(cs, pte + page_offset, is_write1, &prot); | |
6578eb25 PB |
639 | |
640 | /* Even if 4MB pages, we map only one 4KB page in the cache to | |
641 | avoid filling it too fast */ | |
642 | vaddr = addr & TARGET_PAGE_MASK; | |
fe441054 | 643 | paddr &= TARGET_PAGE_MASK; |
6578eb25 PB |
644 | |
645 | assert(prot & (1 << is_write1)); | |
646 | tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env), | |
647 | prot, mmu_idx, page_size); | |
648 | return 0; | |
649 | do_fault_rsvd: | |
650 | error_code |= PG_ERROR_RSVD_MASK; | |
651 | do_fault_protect: | |
652 | error_code |= PG_ERROR_P_MASK; | |
653 | do_fault: | |
654 | error_code |= (is_write << PG_ERROR_W_BIT); | |
655 | if (is_user) | |
656 | error_code |= PG_ERROR_U_MASK; | |
657 | if (is_write1 == 2 && | |
658 | (((env->efer & MSR_EFER_NXE) && | |
659 | (env->cr[4] & CR4_PAE_MASK)) || | |
660 | (env->cr[4] & CR4_SMEP_MASK))) | |
661 | error_code |= PG_ERROR_I_D_MASK; | |
662 | if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { | |
663 | /* cr2 is not modified in case of exceptions */ | |
664 | x86_stq_phys(cs, | |
665 | env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), | |
666 | addr); | |
667 | } else { | |
668 | env->cr[2] = addr; | |
669 | } | |
670 | env->error_code = error_code; | |
671 | cs->exception_index = EXCP0E_PAGE; | |
672 | return 1; | |
673 | } | |
674 | #endif | |
5d004421 RH |
675 | |
676 | bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, | |
677 | MMUAccessType access_type, int mmu_idx, | |
678 | bool probe, uintptr_t retaddr) | |
679 | { | |
680 | X86CPU *cpu = X86_CPU(cs); | |
681 | CPUX86State *env = &cpu->env; | |
682 | ||
683 | #ifdef CONFIG_USER_ONLY | |
684 | /* user mode only emulation */ | |
685 | env->cr[2] = addr; | |
686 | env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; | |
687 | env->error_code |= PG_ERROR_U_MASK; | |
688 | cs->exception_index = EXCP0E_PAGE; | |
689 | env->exception_is_int = 0; | |
690 | env->exception_next_eip = -1; | |
691 | cpu_loop_exit_restore(cs, retaddr); | |
692 | #else | |
693 | env->retaddr = retaddr; | |
694 | if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) { | |
695 | /* FIXME: On error in get_hphys we have already jumped out. */ | |
696 | g_assert(!probe); | |
697 | raise_exception_err_ra(env, cs->exception_index, | |
698 | env->error_code, retaddr); | |
699 | } | |
700 | return true; | |
701 | #endif | |
702 | } |