2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
40 #include "exec/cpu_ldst_useronly_template.h"
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
55 #include "exec/cpu_ldst_template.h"
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
83 index = selector & ~7;
84 if ((index + 7) > dt->limit) {
87 ptr = dt->base + index;
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
104 if (e2 & DESC_G_MASK) {
105 limit = (limit << 12) | 0xfff;
110 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
133 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
134 uint32_t *esp_ptr, int dpl,
137 X86CPU *cpu = x86_env_get_cpu(env);
138 int type, index, shift;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
144 for (i = 0; i < env->tr.limit; i++) {
145 printf("%02x ", env->tr.base[i]);
154 if (!(env->tr.flags & DESC_P_MASK)) {
155 cpu_abort(CPU(cpu), "invalid tss");
157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
158 if ((type & 7) != 1) {
159 cpu_abort(CPU(cpu), "invalid tss type");
162 index = (dpl * 4 + 2) << shift;
163 if (index + (4 << shift) - 1 > env->tr.limit) {
164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
181 if ((selector & 0xfffc) != 0) {
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185 if (!(e2 & DESC_S_MASK)) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
190 if (seg_reg == R_CS) {
191 if (!(e2 & DESC_CS_MASK)) {
192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202 if (dpl != cpl || dpl != rpl) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
206 /* not readable code */
207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
212 if (dpl < cpl || dpl < rpl) {
213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
220 cpu_x86_load_seg_cache(env, seg_reg, selector,
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
225 if (seg_reg == R_SS || seg_reg == R_CS) {
226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
253 /* if task gate, we read the TSS segment and we load it */
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
258 tss_selector = e1 >> 16;
259 if (tss_selector & 4) {
260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265 if (e2 & DESC_S_MASK) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
269 if ((type & 7) != 1) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 if (!(e2 & DESC_P_MASK)) {
275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
286 tss_limit < tss_limit_max) {
287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
291 old_tss_limit_max = 103;
293 old_tss_limit_max = 43;
296 /* read all the registers from the new TSS */
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
302 for (i = 0; i < 8; i++) {
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
306 for (i = 0; i < 6; i++) {
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
317 for (i = 0; i < 8; i++) {
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
321 for (i = 0; i < 4; i++) {
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
355 old_eflags = cpu_compute_eflags(env);
356 if (source == SWITCH_TSS_IRET) {
357 old_eflags &= ~NT_MASK;
360 /* save the current state in the old TSS */
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
373 for (i = 0; i < 6; i++) {
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
389 for (i = 0; i < 4; i++) {
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
395 /* now if an exception occurs, it will occurs in the next task
398 if (source == SWITCH_TSS_CALL) {
399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
400 new_eflags |= NT_MASK;
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 ptr = env->gdt.base + (tss_selector & ~7);
409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
410 e2 |= DESC_TSS_BUSY_MASK;
411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
427 /* load all registers without an exception, then reload them with
428 possible exception */
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
433 eflags_mask &= 0xffff;
435 cpu_load_eflags(env, new_eflags, eflags_mask);
436 /* XXX: what to do in 16 bit case? */
437 env->regs[R_EAX] = new_regs[0];
438 env->regs[R_ECX] = new_regs[1];
439 env->regs[R_EDX] = new_regs[2];
440 env->regs[R_EBX] = new_regs[3];
441 env->regs[R_ESP] = new_regs[4];
442 env->regs[R_EBP] = new_regs[5];
443 env->regs[R_ESI] = new_regs[6];
444 env->regs[R_EDI] = new_regs[7];
445 if (new_eflags & VM_MASK) {
446 for (i = 0; i < 6; i++) {
447 load_seg_vm(env, i, new_segs[i]);
450 /* first just selectors as the rest may trigger exceptions */
451 for (i = 0; i < 6; i++) {
452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 env->ldt.selector = new_ldt & ~4;
463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466 if ((new_ldt & 0xfffc) != 0) {
468 index = new_ldt & ~7;
469 if ((index + 7) > dt->limit) {
470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472 ptr = dt->base + index;
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
478 if (!(e2 & DESC_P_MASK)) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 int cpl = new_segs[R_CS] & 3;
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip > env->segs[R_CS].limit) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 for (i = 0; i < DR7_MAX_BP; i++) {
505 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
506 !hw_global_breakpoint_enabled(env->dr[7], i)) {
507 hw_breakpoint_remove(env, i);
510 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
515 static void switch_tss(CPUX86State *env, int tss_selector,
516 uint32_t e1, uint32_t e2, int source,
519 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
522 static inline unsigned int get_sp_mask(unsigned int e2)
524 if (e2 & DESC_B_MASK) {
531 static int exception_has_error_code(int intno)
547 #define SET_ESP(val, sp_mask) \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
555 env->regs[R_ESP] = (val); \
559 #define SET_ESP(val, sp_mask) \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
566 /* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
570 /* XXX: add a is_user flag to have proper security support */
571 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
589 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
595 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
600 /* protected mode interrupt */
601 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
602 int error_code, unsigned int next_eip,
606 target_ulong ptr, ssp;
607 int type, dpl, selector, ss_dpl, cpl;
608 int has_error_code, new_stack, shift;
609 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
610 uint32_t old_eip, sp_mask;
611 int vm86 = env->eflags & VM_MASK;
614 if (!is_int && !is_hw) {
615 has_error_code = exception_has_error_code(intno);
624 if (intno * 8 + 7 > dt->limit) {
625 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
627 ptr = dt->base + intno * 8;
628 e1 = cpu_ldl_kernel(env, ptr);
629 e2 = cpu_ldl_kernel(env, ptr + 4);
630 /* check gate type */
631 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
635 if (!(e2 & DESC_P_MASK)) {
636 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
638 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
639 if (has_error_code) {
643 /* push the error code */
644 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
646 if (env->segs[R_SS].flags & DESC_B_MASK) {
651 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
652 ssp = env->segs[R_SS].base + esp;
654 cpu_stl_kernel(env, ssp, error_code);
656 cpu_stw_kernel(env, ssp, error_code);
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
667 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
672 /* check privilege if software int */
673 if (is_int && dpl < cpl) {
674 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK)) {
678 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
681 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
682 if ((selector & 0xfffc) == 0) {
683 raise_exception_err(env, EXCP0D_GPF, 0);
685 if (load_segment(env, &e1, &e2, selector) != 0) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
695 if (!(e2 & DESC_P_MASK)) {
696 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
698 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
701 if ((ss & 0xfffc) == 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if ((ss & 3) != dpl) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
716 !(ss_e2 & DESC_W_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 if (!(ss_e2 & DESC_P_MASK)) {
720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
726 /* to same privilege */
728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
733 esp = env->regs[R_ESP];
736 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0; /* avoid warning */
738 sp_mask = 0; /* avoid warning */
739 ssp = 0; /* avoid warning */
740 esp = 0; /* avoid warning */
746 /* XXX: check that enough room is available */
747 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
766 PUSHL(ssp, esp, sp_mask, old_eip);
767 if (has_error_code) {
768 PUSHL(ssp, esp, sp_mask, error_code);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHW(ssp, esp, sp_mask, old_eip);
784 if (has_error_code) {
785 PUSHW(ssp, esp, sp_mask, error_code);
789 /* interrupt gate clear IF mask */
790 if ((type & 1) == 0) {
791 env->eflags &= ~IF_MASK;
793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806 SET_ESP(esp, sp_mask);
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
818 #define PUSHQ_RA(sp, val, ra) \
821 cpu_stq_kernel_ra(env, sp, (val), ra); \
824 #define POPQ_RA(sp, val, ra) \
826 val = cpu_ldq_kernel_ra(env, sp, ra); \
830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
833 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
835 X86CPU *cpu = x86_env_get_cpu(env);
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
843 if (!(env->tr.flags & DESC_P_MASK)) {
844 cpu_abort(CPU(cpu), "invalid tss");
846 index = 8 * level + 4;
847 if ((index + 7) > env->tr.limit) {
848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
850 return cpu_ldq_kernel(env, env->tr.base + index);
853 /* 64 bit interrupt */
854 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
855 int error_code, target_ulong next_eip, int is_hw)
859 int type, dpl, selector, cpl, ist;
860 int has_error_code, new_stack;
861 uint32_t e1, e2, e3, ss;
862 target_ulong old_eip, esp, offset;
865 if (!is_int && !is_hw) {
866 has_error_code = exception_has_error_code(intno);
875 if (intno * 16 + 15 > dt->limit) {
876 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = cpu_ldl_kernel(env, ptr);
880 e2 = cpu_ldl_kernel(env, ptr + 4);
881 e3 = cpu_ldl_kernel(env, ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privilege if software int */
895 if (is_int && dpl < cpl) {
896 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
898 /* check valid bit */
899 if (!(e2 & DESC_P_MASK)) {
900 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
903 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
905 if ((selector & 0xfffc) == 0) {
906 raise_exception_err(env, EXCP0D_GPF, 0);
909 if (load_segment(env, &e1, &e2, selector) != 0) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
919 if (!(e2 & DESC_P_MASK)) {
920 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
922 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
925 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
926 /* to inner privilege */
928 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
932 if (env->eflags & VM_MASK) {
933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
936 esp = env->regs[R_ESP];
939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
940 new_stack = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
943 esp &= ~0xfLL; /* align stack */
945 PUSHQ(esp, env->segs[R_SS].selector);
946 PUSHQ(esp, env->regs[R_ESP]);
947 PUSHQ(esp, cpu_compute_eflags(env));
948 PUSHQ(esp, env->segs[R_CS].selector);
950 if (has_error_code) {
951 PUSHQ(esp, error_code);
954 /* interrupt gate clear IF mask */
955 if ((type & 1) == 0) {
956 env->eflags &= ~IF_MASK;
958 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
962 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 env->regs[R_ESP] = esp;
966 selector = (selector & ~3) | dpl;
967 cpu_x86_load_seg_cache(env, R_CS, selector,
968 get_seg_base(e1, e2),
969 get_seg_limit(e1, e2),
976 #if defined(CONFIG_USER_ONLY)
977 void helper_syscall(CPUX86State *env, int next_eip_addend)
979 CPUState *cs = CPU(x86_env_get_cpu(env));
981 cs->exception_index = EXCP_SYSCALL;
982 env->exception_next_eip = env->eip + next_eip_addend;
986 void helper_syscall(CPUX86State *env, int next_eip_addend)
990 if (!(env->efer & MSR_EFER_SCE)) {
991 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
993 selector = (env->star >> 32) & 0xffff;
994 if (env->hflags & HF_LMA_MASK) {
997 env->regs[R_ECX] = env->eip + next_eip_addend;
998 env->regs[11] = cpu_compute_eflags(env);
1000 code64 = env->hflags & HF_CS64_MASK;
1002 env->eflags &= ~env->fmask;
1003 cpu_load_eflags(env, env->eflags, 0);
1004 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1006 DESC_G_MASK | DESC_P_MASK |
1008 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1010 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1012 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1014 DESC_W_MASK | DESC_A_MASK);
1016 env->eip = env->lstar;
1018 env->eip = env->cstar;
1021 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1023 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1024 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1029 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033 DESC_W_MASK | DESC_A_MASK);
1034 env->eip = (uint32_t)env->star;
1040 #ifdef TARGET_X86_64
1041 void helper_sysret(CPUX86State *env, int dflag)
1045 if (!(env->efer & MSR_EFER_SCE)) {
1046 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1048 cpl = env->hflags & HF_CPL_MASK;
1049 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1050 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1052 selector = (env->star >> 48) & 0xffff;
1053 if (env->hflags & HF_LMA_MASK) {
1054 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1055 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1058 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1060 DESC_G_MASK | DESC_P_MASK |
1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1064 env->eip = env->regs[R_ECX];
1066 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1071 env->eip = (uint32_t)env->regs[R_ECX];
1073 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_W_MASK | DESC_A_MASK);
1079 env->eflags |= IF_MASK;
1080 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1085 env->eip = (uint32_t)env->regs[R_ECX];
1086 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_W_MASK | DESC_A_MASK);
1095 /* real mode interrupt */
1096 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1097 int error_code, unsigned int next_eip)
1100 target_ulong ptr, ssp;
1102 uint32_t offset, esp;
1103 uint32_t old_cs, old_eip;
1105 /* real mode (simpler!) */
1107 if (intno * 4 + 3 > dt->limit) {
1108 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1110 ptr = dt->base + intno * 4;
1111 offset = cpu_lduw_kernel(env, ptr);
1112 selector = cpu_lduw_kernel(env, ptr + 2);
1113 esp = env->regs[R_ESP];
1114 ssp = env->segs[R_SS].base;
1120 old_cs = env->segs[R_CS].selector;
1121 /* XXX: use SS segment size? */
1122 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1123 PUSHW(ssp, esp, 0xffff, old_cs);
1124 PUSHW(ssp, esp, 0xffff, old_eip);
1126 /* update processor state */
1127 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1129 env->segs[R_CS].selector = selector;
1130 env->segs[R_CS].base = (selector << 4);
1131 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1134 #if defined(CONFIG_USER_ONLY)
1135 /* fake user mode interrupt */
1136 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
1141 int dpl, cpl, shift;
1145 if (env->hflags & HF_LMA_MASK) {
1150 ptr = dt->base + (intno << shift);
1151 e2 = cpu_ldl_kernel(env, ptr + 4);
1153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154 cpl = env->hflags & HF_CPL_MASK;
1155 /* check privilege if software int */
1156 if (is_int && dpl < cpl) {
1157 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 /* Since we emulate only user space, we cannot do more than
1161 exiting the emulation with the suitable exception and error
1162 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1163 if (is_int || intno == EXCP_SYSCALL) {
1164 env->eip = next_eip;
1170 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1171 int error_code, int is_hw, int rm)
1173 CPUState *cs = CPU(x86_env_get_cpu(env));
1174 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1175 control.event_inj));
1177 if (!(event_inj & SVM_EVTINJ_VALID)) {
1181 type = SVM_EVTINJ_TYPE_SOFT;
1183 type = SVM_EVTINJ_TYPE_EXEPT;
1185 event_inj = intno | type | SVM_EVTINJ_VALID;
1186 if (!rm && exception_has_error_code(intno)) {
1187 event_inj |= SVM_EVTINJ_VALID_ERR;
1188 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1189 control.event_inj_err),
1193 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1200 * Begin execution of an interruption. is_int is TRUE if coming from
1201 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1202 * instruction. It is only relevant if is_int is TRUE.
1204 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1205 int error_code, target_ulong next_eip, int is_hw)
1207 CPUX86State *env = &cpu->env;
1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1213 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1214 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1215 count, intno, error_code, is_int,
1216 env->hflags & HF_CPL_MASK,
1217 env->segs[R_CS].selector, env->eip,
1218 (int)env->segs[R_CS].base + env->eip,
1219 env->segs[R_SS].selector, env->regs[R_ESP]);
1220 if (intno == 0x0e) {
1221 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1223 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1226 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1233 ptr = env->segs[R_CS].base + env->eip;
1234 for (i = 0; i < 16; i++) {
1235 qemu_log(" %02x", ldub(ptr + i));
1243 if (env->cr[0] & CR0_PE_MASK) {
1244 #if !defined(CONFIG_USER_ONLY)
1245 if (env->hflags & HF_SVMI_MASK) {
1246 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1249 #ifdef TARGET_X86_64
1250 if (env->hflags & HF_LMA_MASK) {
1251 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1255 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env->hflags & HF_SVMI_MASK) {
1261 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1264 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1267 #if !defined(CONFIG_USER_ONLY)
1268 if (env->hflags & HF_SVMI_MASK) {
1269 CPUState *cs = CPU(cpu);
1270 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1271 offsetof(struct vmcb,
1272 control.event_inj));
1275 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1276 event_inj & ~SVM_EVTINJ_VALID);
1281 void x86_cpu_do_interrupt(CPUState *cs)
1283 X86CPU *cpu = X86_CPU(cs);
1284 CPUX86State *env = &cpu->env;
1286 #if defined(CONFIG_USER_ONLY)
1287 /* if user mode only, we simulate a fake exception
1288 which will be handled outside the cpu execution
1290 do_interrupt_user(env, cs->exception_index,
1291 env->exception_is_int,
1293 env->exception_next_eip);
1294 /* successfully delivered */
1295 env->old_exception = -1;
1297 /* simulate a real cpu exception. On i386, it can
1298 trigger new exceptions, but we do not handle
1299 double or triple faults yet. */
1300 do_interrupt_all(cpu, cs->exception_index,
1301 env->exception_is_int,
1303 env->exception_next_eip, 0);
1304 /* successfully delivered */
1305 env->old_exception = -1;
1309 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1311 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1314 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1316 X86CPU *cpu = X86_CPU(cs);
1317 CPUX86State *env = &cpu->env;
1320 #if !defined(CONFIG_USER_ONLY)
1321 if (interrupt_request & CPU_INTERRUPT_POLL) {
1322 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1323 apic_poll_irq(cpu->apic_state);
1326 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1328 } else if (env->hflags2 & HF2_GIF_MASK) {
1329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1330 !(env->hflags & HF_SMM_MASK)) {
1331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1332 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1335 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1336 !(env->hflags2 & HF2_NMI_MASK)) {
1337 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1338 env->hflags2 |= HF2_NMI_MASK;
1339 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1341 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1342 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1343 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1345 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1346 (((env->hflags2 & HF2_VINTR_MASK) &&
1347 (env->hflags2 & HF2_HIF_MASK)) ||
1348 (!(env->hflags2 & HF2_VINTR_MASK) &&
1349 (env->eflags & IF_MASK &&
1350 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1352 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1353 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1354 CPU_INTERRUPT_VIRQ);
1355 intno = cpu_get_pic_interrupt(env);
1356 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1357 "Servicing hardware INT=0x%02x\n", intno);
1358 do_interrupt_x86_hardirq(env, intno, 1);
1359 /* ensure that no TB jump will be modified as
1360 the program flow was changed */
1362 #if !defined(CONFIG_USER_ONLY)
1363 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1364 (env->eflags & IF_MASK) &&
1365 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1367 /* FIXME: this should respect TPR */
1368 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1369 intno = x86_ldl_phys(cs, env->vm_vmcb
1370 + offsetof(struct vmcb, control.int_vector));
1371 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1372 "Servicing virtual hardware INT=0x%02x\n", intno);
1373 do_interrupt_x86_hardirq(env, intno, 1);
1374 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1383 void helper_enter_level(CPUX86State *env, int level, int data32,
1387 uint32_t esp_mask, esp, ebp;
1389 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1390 ssp = env->segs[R_SS].base;
1391 ebp = env->regs[R_EBP];
1392 esp = env->regs[R_ESP];
1399 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1400 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1405 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1412 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1413 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1418 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1422 #ifdef TARGET_X86_64
1423 void helper_enter64_level(CPUX86State *env, int level, int data64,
1426 target_ulong esp, ebp;
1428 ebp = env->regs[R_EBP];
1429 esp = env->regs[R_ESP];
1437 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1441 cpu_stq_data_ra(env, esp, t1, GETPC());
1448 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1452 cpu_stw_data_ra(env, esp, t1, GETPC());
1457 void helper_lldt(CPUX86State *env, int selector)
1461 int index, entry_limit;
1465 if ((selector & 0xfffc) == 0) {
1466 /* XXX: NULL selector case: invalid LDT */
1470 if (selector & 0x4) {
1471 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1474 index = selector & ~7;
1475 #ifdef TARGET_X86_64
1476 if (env->hflags & HF_LMA_MASK) {
1483 if ((index + entry_limit) > dt->limit) {
1484 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1486 ptr = dt->base + index;
1487 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1488 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1489 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1490 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1492 if (!(e2 & DESC_P_MASK)) {
1493 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1495 #ifdef TARGET_X86_64
1496 if (env->hflags & HF_LMA_MASK) {
1499 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1500 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1501 env->ldt.base |= (target_ulong)e3 << 32;
1505 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1508 env->ldt.selector = selector;
1511 void helper_ltr(CPUX86State *env, int selector)
1515 int index, type, entry_limit;
1519 if ((selector & 0xfffc) == 0) {
1520 /* NULL selector case: invalid TR */
1525 if (selector & 0x4) {
1526 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1529 index = selector & ~7;
1530 #ifdef TARGET_X86_64
1531 if (env->hflags & HF_LMA_MASK) {
1538 if ((index + entry_limit) > dt->limit) {
1539 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541 ptr = dt->base + index;
1542 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1543 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1544 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1545 if ((e2 & DESC_S_MASK) ||
1546 (type != 1 && type != 9)) {
1547 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1549 if (!(e2 & DESC_P_MASK)) {
1550 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1552 #ifdef TARGET_X86_64
1553 if (env->hflags & HF_LMA_MASK) {
1556 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1557 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1558 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1559 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1561 load_seg_cache_raw_dt(&env->tr, e1, e2);
1562 env->tr.base |= (target_ulong)e3 << 32;
1566 load_seg_cache_raw_dt(&env->tr, e1, e2);
1568 e2 |= DESC_TSS_BUSY_MASK;
1569 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1571 env->tr.selector = selector;
1574 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1575 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1584 cpl = env->hflags & HF_CPL_MASK;
1585 if ((selector & 0xfffc) == 0) {
1586 /* null selector case */
1588 #ifdef TARGET_X86_64
1589 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1592 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1594 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1597 if (selector & 0x4) {
1602 index = selector & ~7;
1603 if ((index + 7) > dt->limit) {
1604 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1606 ptr = dt->base + index;
1607 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1608 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1610 if (!(e2 & DESC_S_MASK)) {
1611 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1614 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1615 if (seg_reg == R_SS) {
1616 /* must be writable segment */
1617 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1620 if (rpl != cpl || dpl != cpl) {
1621 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1624 /* must be readable segment */
1625 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1626 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1629 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1630 /* if not conforming code, test rights */
1631 if (dpl < cpl || dpl < rpl) {
1632 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1637 if (!(e2 & DESC_P_MASK)) {
1638 if (seg_reg == R_SS) {
1639 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1641 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1645 /* set the access bit if not already set */
1646 if (!(e2 & DESC_A_MASK)) {
1648 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1651 cpu_x86_load_seg_cache(env, seg_reg, selector,
1652 get_seg_base(e1, e2),
1653 get_seg_limit(e1, e2),
1656 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1657 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1662 /* protected mode jump */
1663 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1664 target_ulong next_eip)
1667 uint32_t e1, e2, cpl, dpl, rpl, limit;
1669 if ((new_cs & 0xfffc) == 0) {
1670 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1672 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1673 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1675 cpl = env->hflags & HF_CPL_MASK;
1676 if (e2 & DESC_S_MASK) {
1677 if (!(e2 & DESC_CS_MASK)) {
1678 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1680 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1681 if (e2 & DESC_C_MASK) {
1682 /* conforming code segment */
1684 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1687 /* non conforming code segment */
1690 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1693 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1696 if (!(e2 & DESC_P_MASK)) {
1697 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1699 limit = get_seg_limit(e1, e2);
1700 if (new_eip > limit &&
1701 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1702 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1704 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1705 get_seg_base(e1, e2), limit, e2);
1708 /* jump to call or task gate */
1709 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1711 cpl = env->hflags & HF_CPL_MASK;
1712 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1714 case 1: /* 286 TSS */
1715 case 9: /* 386 TSS */
1716 case 5: /* task gate */
1717 if (dpl < cpl || dpl < rpl) {
1718 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1720 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1722 case 4: /* 286 call gate */
1723 case 12: /* 386 call gate */
1724 if ((dpl < cpl) || (dpl < rpl)) {
1725 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1727 if (!(e2 & DESC_P_MASK)) {
1728 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1731 new_eip = (e1 & 0xffff);
1733 new_eip |= (e2 & 0xffff0000);
1735 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1736 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1738 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1739 /* must be code segment */
1740 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1741 (DESC_S_MASK | DESC_CS_MASK))) {
1742 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1744 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1745 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1746 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1748 if (!(e2 & DESC_P_MASK)) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1751 limit = get_seg_limit(e1, e2);
1752 if (new_eip > limit) {
1753 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1755 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1756 get_seg_base(e1, e2), limit, e2);
1760 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1766 /* real mode call */
1767 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1768 int shift, int next_eip)
1771 uint32_t esp, esp_mask;
1775 esp = env->regs[R_ESP];
1776 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1777 ssp = env->segs[R_SS].base;
1779 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1780 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1782 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1783 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1786 SET_ESP(esp, esp_mask);
1788 env->segs[R_CS].selector = new_cs;
1789 env->segs[R_CS].base = (new_cs << 4);
1792 /* protected mode call */
1793 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1794 int shift, target_ulong next_eip)
1797 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1798 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1799 uint32_t val, limit, old_sp_mask;
1800 target_ulong ssp, old_ssp;
1802 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1803 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1804 if ((new_cs & 0xfffc) == 0) {
1805 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1807 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1808 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1810 cpl = env->hflags & HF_CPL_MASK;
1811 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1812 if (e2 & DESC_S_MASK) {
1813 if (!(e2 & DESC_CS_MASK)) {
1814 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1816 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1817 if (e2 & DESC_C_MASK) {
1818 /* conforming code segment */
1820 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1823 /* non conforming code segment */
1826 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1829 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1832 if (!(e2 & DESC_P_MASK)) {
1833 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1836 #ifdef TARGET_X86_64
1837 /* XXX: check 16/32 bit cases in long mode */
1842 rsp = env->regs[R_ESP];
1843 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1844 PUSHQ_RA(rsp, next_eip, GETPC());
1845 /* from this point, not restartable */
1846 env->regs[R_ESP] = rsp;
1847 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1848 get_seg_base(e1, e2),
1849 get_seg_limit(e1, e2), e2);
1854 sp = env->regs[R_ESP];
1855 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1856 ssp = env->segs[R_SS].base;
1858 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1859 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1861 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1862 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1865 limit = get_seg_limit(e1, e2);
1866 if (new_eip > limit) {
1867 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1869 /* from this point, not restartable */
1870 SET_ESP(sp, sp_mask);
1871 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1872 get_seg_base(e1, e2), limit, e2);
1876 /* check gate type */
1877 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1878 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1881 case 1: /* available 286 TSS */
1882 case 9: /* available 386 TSS */
1883 case 5: /* task gate */
1884 if (dpl < cpl || dpl < rpl) {
1885 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1887 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1889 case 4: /* 286 call gate */
1890 case 12: /* 386 call gate */
1893 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1898 if (dpl < cpl || dpl < rpl) {
1899 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1901 /* check valid bit */
1902 if (!(e2 & DESC_P_MASK)) {
1903 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1905 selector = e1 >> 16;
1906 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1907 param_count = e2 & 0x1f;
1908 if ((selector & 0xfffc) == 0) {
1909 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1912 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1913 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1915 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1920 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1922 if (!(e2 & DESC_P_MASK)) {
1923 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1926 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1927 /* to inner privilege */
1928 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1929 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1930 TARGET_FMT_lx "\n", ss, sp, param_count,
1932 if ((ss & 0xfffc) == 0) {
1933 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1935 if ((ss & 3) != dpl) {
1936 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1938 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1939 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1941 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1942 if (ss_dpl != dpl) {
1943 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1945 if (!(ss_e2 & DESC_S_MASK) ||
1946 (ss_e2 & DESC_CS_MASK) ||
1947 !(ss_e2 & DESC_W_MASK)) {
1948 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1950 if (!(ss_e2 & DESC_P_MASK)) {
1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1954 /* push_size = ((param_count * 2) + 8) << shift; */
1956 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1957 old_ssp = env->segs[R_SS].base;
1959 sp_mask = get_sp_mask(ss_e2);
1960 ssp = get_seg_base(ss_e1, ss_e2);
1962 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1963 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1964 for (i = param_count - 1; i >= 0; i--) {
1965 val = cpu_ldl_kernel_ra(env, old_ssp +
1966 ((env->regs[R_ESP] + i * 4) &
1967 old_sp_mask), GETPC());
1968 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1971 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1972 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1973 for (i = param_count - 1; i >= 0; i--) {
1974 val = cpu_lduw_kernel_ra(env, old_ssp +
1975 ((env->regs[R_ESP] + i * 2) &
1976 old_sp_mask), GETPC());
1977 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1982 /* to same privilege */
1983 sp = env->regs[R_ESP];
1984 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1985 ssp = env->segs[R_SS].base;
1986 /* push_size = (4 << shift); */
1991 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1992 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1994 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1995 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1998 /* from this point, not restartable */
2001 ss = (ss & ~3) | dpl;
2002 cpu_x86_load_seg_cache(env, R_SS, ss,
2004 get_seg_limit(ss_e1, ss_e2),
2008 selector = (selector & ~3) | dpl;
2009 cpu_x86_load_seg_cache(env, R_CS, selector,
2010 get_seg_base(e1, e2),
2011 get_seg_limit(e1, e2),
2013 SET_ESP(sp, sp_mask);
2018 /* real and vm86 mode iret */
2019 void helper_iret_real(CPUX86State *env, int shift)
2021 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2025 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2026 sp = env->regs[R_ESP];
2027 ssp = env->segs[R_SS].base;
2030 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2031 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2033 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2036 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2037 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2038 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2040 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2041 env->segs[R_CS].selector = new_cs;
2042 env->segs[R_CS].base = (new_cs << 4);
2044 if (env->eflags & VM_MASK) {
2045 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2048 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2052 eflags_mask &= 0xffff;
2054 cpu_load_eflags(env, new_eflags, eflags_mask);
2055 env->hflags2 &= ~HF2_NMI_MASK;
2058 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2063 /* XXX: on x86_64, we do not want to nullify FS and GS because
2064 they may still contain a valid base. I would be interested to
2065 know how a real x86_64 CPU behaves */
2066 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2067 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2071 e2 = env->segs[seg_reg].flags;
2072 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2073 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2074 /* data or non conforming code segment */
2076 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2081 /* protected mode iret */
2082 static inline void helper_ret_protected(CPUX86State *env, int shift,
2083 int is_iret, int addend,
2086 uint32_t new_cs, new_eflags, new_ss;
2087 uint32_t new_es, new_ds, new_fs, new_gs;
2088 uint32_t e1, e2, ss_e1, ss_e2;
2089 int cpl, dpl, rpl, eflags_mask, iopl;
2090 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2092 #ifdef TARGET_X86_64
2098 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2100 sp = env->regs[R_ESP];
2101 ssp = env->segs[R_SS].base;
2102 new_eflags = 0; /* avoid warning */
2103 #ifdef TARGET_X86_64
2105 POPQ_RA(sp, new_eip, retaddr);
2106 POPQ_RA(sp, new_cs, retaddr);
2109 POPQ_RA(sp, new_eflags, retaddr);
2116 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2117 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2120 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2121 if (new_eflags & VM_MASK) {
2122 goto return_to_vm86;
2127 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2128 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2130 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2134 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2135 new_cs, new_eip, shift, addend);
2136 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2137 if ((new_cs & 0xfffc) == 0) {
2138 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2140 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2141 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2143 if (!(e2 & DESC_S_MASK) ||
2144 !(e2 & DESC_CS_MASK)) {
2145 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2147 cpl = env->hflags & HF_CPL_MASK;
2150 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2152 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2153 if (e2 & DESC_C_MASK) {
2155 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2159 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2162 if (!(e2 & DESC_P_MASK)) {
2163 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2167 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2168 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2169 /* return to same privilege level */
2170 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2175 /* return to different privilege level */
2176 #ifdef TARGET_X86_64
2178 POPQ_RA(sp, new_esp, retaddr);
2179 POPQ_RA(sp, new_ss, retaddr);
2186 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2187 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2191 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2192 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2195 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2197 if ((new_ss & 0xfffc) == 0) {
2198 #ifdef TARGET_X86_64
2199 /* NULL ss is allowed in long mode if cpl != 3 */
2200 /* XXX: test CS64? */
2201 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2202 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2204 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2205 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2206 DESC_W_MASK | DESC_A_MASK);
2207 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2211 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2214 if ((new_ss & 3) != rpl) {
2215 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2217 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2218 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2220 if (!(ss_e2 & DESC_S_MASK) ||
2221 (ss_e2 & DESC_CS_MASK) ||
2222 !(ss_e2 & DESC_W_MASK)) {
2223 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2225 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2227 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2229 if (!(ss_e2 & DESC_P_MASK)) {
2230 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2232 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2233 get_seg_base(ss_e1, ss_e2),
2234 get_seg_limit(ss_e1, ss_e2),
2238 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2239 get_seg_base(e1, e2),
2240 get_seg_limit(e1, e2),
2243 #ifdef TARGET_X86_64
2244 if (env->hflags & HF_CS64_MASK) {
2249 sp_mask = get_sp_mask(ss_e2);
2252 /* validate data segments */
2253 validate_seg(env, R_ES, rpl);
2254 validate_seg(env, R_DS, rpl);
2255 validate_seg(env, R_FS, rpl);
2256 validate_seg(env, R_GS, rpl);
2260 SET_ESP(sp, sp_mask);
2263 /* NOTE: 'cpl' is the _old_ CPL */
2264 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2266 eflags_mask |= IOPL_MASK;
2268 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2270 eflags_mask |= IF_MASK;
2273 eflags_mask &= 0xffff;
2275 cpu_load_eflags(env, new_eflags, eflags_mask);
2280 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2285 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2287 /* modify processor state */
2288 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2289 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2291 load_seg_vm(env, R_CS, new_cs & 0xffff);
2292 load_seg_vm(env, R_SS, new_ss & 0xffff);
2293 load_seg_vm(env, R_ES, new_es & 0xffff);
2294 load_seg_vm(env, R_DS, new_ds & 0xffff);
2295 load_seg_vm(env, R_FS, new_fs & 0xffff);
2296 load_seg_vm(env, R_GS, new_gs & 0xffff);
2298 env->eip = new_eip & 0xffff;
2299 env->regs[R_ESP] = new_esp;
2302 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2304 int tss_selector, type;
2307 /* specific case for TSS */
2308 if (env->eflags & NT_MASK) {
2309 #ifdef TARGET_X86_64
2310 if (env->hflags & HF_LMA_MASK) {
2311 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2314 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2315 if (tss_selector & 4) {
2316 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2318 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2319 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2321 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2322 /* NOTE: we check both segment and busy TSS */
2324 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2326 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2328 helper_ret_protected(env, shift, 1, 0, GETPC());
2330 env->hflags2 &= ~HF2_NMI_MASK;
2333 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2335 helper_ret_protected(env, shift, 0, addend, GETPC());
2338 void helper_sysenter(CPUX86State *env)
2340 if (env->sysenter_cs == 0) {
2341 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2343 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2345 #ifdef TARGET_X86_64
2346 if (env->hflags & HF_LMA_MASK) {
2347 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2351 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2356 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2358 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2360 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2362 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2366 DESC_W_MASK | DESC_A_MASK);
2367 env->regs[R_ESP] = env->sysenter_esp;
2368 env->eip = env->sysenter_eip;
2371 void helper_sysexit(CPUX86State *env, int dflag)
2375 cpl = env->hflags & HF_CPL_MASK;
2376 if (env->sysenter_cs == 0 || cpl != 0) {
2377 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2379 #ifdef TARGET_X86_64
2381 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2384 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2387 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2389 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2390 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2391 DESC_W_MASK | DESC_A_MASK);
2395 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2397 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2399 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2400 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2402 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2403 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2404 DESC_W_MASK | DESC_A_MASK);
2406 env->regs[R_ESP] = env->regs[R_ECX];
2407 env->eip = env->regs[R_EDX];
2410 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2413 uint32_t e1, e2, eflags, selector;
2414 int rpl, dpl, cpl, type;
2416 selector = selector1 & 0xffff;
2417 eflags = cpu_cc_compute_all(env, CC_OP);
2418 if ((selector & 0xfffc) == 0) {
2421 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 cpl = env->hflags & HF_CPL_MASK;
2427 if (e2 & DESC_S_MASK) {
2428 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2431 if (dpl < cpl || dpl < rpl) {
2436 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2447 if (dpl < cpl || dpl < rpl) {
2449 CC_SRC = eflags & ~CC_Z;
2453 limit = get_seg_limit(e1, e2);
2454 CC_SRC = eflags | CC_Z;
2458 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2460 uint32_t e1, e2, eflags, selector;
2461 int rpl, dpl, cpl, type;
2463 selector = selector1 & 0xffff;
2464 eflags = cpu_cc_compute_all(env, CC_OP);
2465 if ((selector & 0xfffc) == 0) {
2468 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2472 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473 cpl = env->hflags & HF_CPL_MASK;
2474 if (e2 & DESC_S_MASK) {
2475 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2478 if (dpl < cpl || dpl < rpl) {
2483 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2497 if (dpl < cpl || dpl < rpl) {
2499 CC_SRC = eflags & ~CC_Z;
2503 CC_SRC = eflags | CC_Z;
2504 return e2 & 0x00f0ff00;
2507 void helper_verr(CPUX86State *env, target_ulong selector1)
2509 uint32_t e1, e2, eflags, selector;
2512 selector = selector1 & 0xffff;
2513 eflags = cpu_cc_compute_all(env, CC_OP);
2514 if ((selector & 0xfffc) == 0) {
2517 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2520 if (!(e2 & DESC_S_MASK)) {
2524 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2525 cpl = env->hflags & HF_CPL_MASK;
2526 if (e2 & DESC_CS_MASK) {
2527 if (!(e2 & DESC_R_MASK)) {
2530 if (!(e2 & DESC_C_MASK)) {
2531 if (dpl < cpl || dpl < rpl) {
2536 if (dpl < cpl || dpl < rpl) {
2538 CC_SRC = eflags & ~CC_Z;
2542 CC_SRC = eflags | CC_Z;
2545 void helper_verw(CPUX86State *env, target_ulong selector1)
2547 uint32_t e1, e2, eflags, selector;
2550 selector = selector1 & 0xffff;
2551 eflags = cpu_cc_compute_all(env, CC_OP);
2552 if ((selector & 0xfffc) == 0) {
2555 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2558 if (!(e2 & DESC_S_MASK)) {
2562 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563 cpl = env->hflags & HF_CPL_MASK;
2564 if (e2 & DESC_CS_MASK) {
2567 if (dpl < cpl || dpl < rpl) {
2570 if (!(e2 & DESC_W_MASK)) {
2572 CC_SRC = eflags & ~CC_Z;
2576 CC_SRC = eflags | CC_Z;
2579 #if defined(CONFIG_USER_ONLY)
2580 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2582 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2583 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2585 cpu_x86_load_seg_cache(env, seg_reg, selector,
2586 (selector << 4), 0xffff,
2587 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2588 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2590 helper_load_seg(env, seg_reg, selector);
2595 /* check if Port I/O is allowed in TSS */
2596 static inline void check_io(CPUX86State *env, int addr, int size,
2599 int io_offset, val, mask;
2601 /* TSS must be a valid 32 bit one */
2602 if (!(env->tr.flags & DESC_P_MASK) ||
2603 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2604 env->tr.limit < 103) {
2607 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2608 io_offset += (addr >> 3);
2609 /* Note: the check needs two bytes */
2610 if ((io_offset + 1) > env->tr.limit) {
2613 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2615 mask = (1 << size) - 1;
2616 /* all bits must be zero to allow the I/O */
2617 if ((val & mask) != 0) {
2619 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2623 void helper_check_iob(CPUX86State *env, uint32_t t0)
2625 check_io(env, t0, 1, GETPC());
2628 void helper_check_iow(CPUX86State *env, uint32_t t0)
2630 check_io(env, t0, 2, GETPC());
2633 void helper_check_iol(CPUX86State *env, uint32_t t0)
2635 check_io(env, t0, 4, GETPC());