4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
23 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table[256] = {
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 static const uint8_t rclw_table[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock);
122 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
124 load_eflags(t0, update_mask);
127 target_ulong helper_read_eflags(void)
130 eflags = helper_cc_compute_all(CC_OP);
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
157 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg, int selector)
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
189 int type, index, shift;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
207 cpu_abort(env, "invalid tss type");
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg, int selector)
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 old_tss_limit_max = 103;
324 old_tss_limit_max = 43;
326 /* read all the registers from the new TSS */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1 = ldub_kernel(env->tr.base);
359 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360 stb_kernel(env->tr.base, v1);
361 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363 /* clear busy bit (it is restartable) */
364 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
367 ptr = env->gdt.base + (env->tr.selector & ~7);
368 e2 = ldl_kernel(ptr + 4);
369 e2 &= ~DESC_TSS_BUSY_MASK;
370 stl_kernel(ptr + 4, e2);
372 old_eflags = compute_eflags();
373 if (source == SWITCH_TSS_IRET)
374 old_eflags &= ~NT_MASK;
376 /* save the current state in the old TSS */
379 stl_kernel(env->tr.base + 0x20, next_eip);
380 stl_kernel(env->tr.base + 0x24, old_eflags);
381 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389 for(i = 0; i < 6; i++)
390 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
393 stw_kernel(env->tr.base + 0x0e, next_eip);
394 stw_kernel(env->tr.base + 0x10, old_eflags);
395 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403 for(i = 0; i < 4; i++)
404 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
407 /* now if an exception occurs, it will occurs in the next task
410 if (source == SWITCH_TSS_CALL) {
411 stw_kernel(tss_base, env->tr.selector);
412 new_eflags |= NT_MASK;
416 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
419 ptr = env->gdt.base + (tss_selector & ~7);
420 e2 = ldl_kernel(ptr + 4);
421 e2 |= DESC_TSS_BUSY_MASK;
422 stl_kernel(ptr + 4, e2);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env->cr[0] |= CR0_TS_MASK;
428 env->hflags |= HF_TS_MASK;
429 env->tr.selector = tss_selector;
430 env->tr.base = tss_base;
431 env->tr.limit = tss_limit;
432 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435 cpu_x86_update_cr3(env, new_cr3);
438 /* load all registers without an exception, then reload them with
439 possible exception */
441 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
444 eflags_mask &= 0xffff;
445 load_eflags(new_eflags, eflags_mask);
446 /* XXX: what to do in 16 bit case ? */
455 if (new_eflags & VM_MASK) {
456 for(i = 0; i < 6; i++)
457 load_seg_vm(i, new_segs[i]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env, 3);
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i = 0; i < 6; i++)
465 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
468 env->ldt.selector = new_ldt & ~4;
475 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 if ((new_ldt & 0xfffc) != 0) {
479 index = new_ldt & ~7;
480 if ((index + 7) > dt->limit)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 ptr = dt->base + index;
483 e1 = ldl_kernel(ptr);
484 e2 = ldl_kernel(ptr + 4);
485 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 if (!(e2 & DESC_P_MASK))
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 load_seg_cache_raw_dt(&env->ldt, e1, e2);
492 /* load the segments */
493 if (!(new_eflags & VM_MASK)) {
494 tss_load_seg(R_CS, new_segs[R_CS]);
495 tss_load_seg(R_SS, new_segs[R_SS]);
496 tss_load_seg(R_ES, new_segs[R_ES]);
497 tss_load_seg(R_DS, new_segs[R_DS]);
498 tss_load_seg(R_FS, new_segs[R_FS]);
499 tss_load_seg(R_GS, new_segs[R_GS]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip > env->segs[R_CS].limit) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env->dr[7] & 0x55) {
511 for (i = 0; i < 4; i++) {
512 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513 hw_breakpoint_remove(env, i);
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr, int size)
523 int io_offset, val, mask;
525 /* TSS must be a valid 32 bit one */
526 if (!(env->tr.flags & DESC_P_MASK) ||
527 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
530 io_offset = lduw_kernel(env->tr.base + 0x66);
531 io_offset += (addr >> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset + 1) > env->tr.limit)
535 val = lduw_kernel(env->tr.base + io_offset);
537 mask = (1 << size) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val & mask) != 0) {
541 raise_exception_err(EXCP0D_GPF, 0);
545 void helper_check_iob(uint32_t t0)
550 void helper_check_iow(uint32_t t0)
555 void helper_check_iol(uint32_t t0)
560 void helper_outb(uint32_t port, uint32_t data)
562 cpu_outb(env, port, data & 0xff);
565 target_ulong helper_inb(uint32_t port)
567 return cpu_inb(env, port);
570 void helper_outw(uint32_t port, uint32_t data)
572 cpu_outw(env, port, data & 0xffff);
575 target_ulong helper_inw(uint32_t port)
577 return cpu_inw(env, port);
580 void helper_outl(uint32_t port, uint32_t data)
582 cpu_outl(env, port, data);
585 target_ulong helper_inl(uint32_t port)
587 return cpu_inl(env, port);
590 static inline unsigned int get_sp_mask(unsigned int e2)
592 if (e2 & DESC_B_MASK)
599 #define SET_ESP(val, sp_mask)\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
609 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
612 /* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
616 /* XXX: add a is_user flag to have proper security support */
617 #define PUSHW(ssp, sp, sp_mask, val)\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
623 #define PUSHL(ssp, sp, sp_mask, val)\
626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
629 #define POPW(ssp, sp, sp_mask, val)\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
635 #define POPL(ssp, sp, sp_mask, val)\
637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
641 /* protected mode interrupt */
642 static void do_interrupt_protected(int intno, int is_int, int error_code,
643 unsigned int next_eip, int is_hw)
646 target_ulong ptr, ssp;
647 int type, dpl, selector, ss_dpl, cpl;
648 int has_error_code, new_stack, shift;
649 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
650 uint32_t old_eip, sp_mask;
653 if (!is_int && !is_hw) {
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
698 stl_kernel(ssp, error_code);
700 stw_kernel(ssp, error_code);
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
715 /* check privilege if software int */
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754 sp_mask = get_sp_mask(ss_e2);
755 ssp = get_seg_base(ss_e1, ss_e2);
756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757 /* to same privilege */
758 if (env->eflags & VM_MASK)
759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
761 sp_mask = get_sp_mask(env->segs[R_SS].flags);
762 ssp = env->segs[R_SS].base;
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0; /* avoid warning */
768 sp_mask = 0; /* avoid warning */
769 ssp = 0; /* avoid warning */
770 esp = 0; /* avoid warning */
776 /* XXX: check that enough room is available */
777 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778 if (env->eflags & VM_MASK)
784 if (env->eflags & VM_MASK) {
785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791 PUSHL(ssp, esp, sp_mask, ESP);
793 PUSHL(ssp, esp, sp_mask, compute_eflags());
794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795 PUSHL(ssp, esp, sp_mask, old_eip);
796 if (has_error_code) {
797 PUSHL(ssp, esp, sp_mask, error_code);
801 if (env->eflags & VM_MASK) {
802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808 PUSHW(ssp, esp, sp_mask, ESP);
810 PUSHW(ssp, esp, sp_mask, compute_eflags());
811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812 PUSHW(ssp, esp, sp_mask, old_eip);
813 if (has_error_code) {
814 PUSHW(ssp, esp, sp_mask, error_code);
819 if (env->eflags & VM_MASK) {
820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
825 ss = (ss & ~3) | dpl;
826 cpu_x86_load_seg_cache(env, R_SS, ss,
827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
829 SET_ESP(esp, sp_mask);
831 selector = (selector & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_CS, selector,
833 get_seg_base(e1, e2),
834 get_seg_limit(e1, e2),
836 cpu_x86_set_cpl(env, dpl);
839 /* interrupt gate clear IF mask */
840 if ((type & 1) == 0) {
841 env->eflags &= ~IF_MASK;
843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848 #define PUSHQ(sp, val)\
851 stq_kernel(sp, (val));\
854 #define POPQ(sp, val)\
856 val = ldq_kernel(sp);\
860 static inline target_ulong get_rsp_from_tss(int level)
865 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
866 env->tr.base, env->tr.limit);
869 if (!(env->tr.flags & DESC_P_MASK))
870 cpu_abort(env, "invalid tss");
871 index = 8 * level + 4;
872 if ((index + 7) > env->tr.limit)
873 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
874 return ldq_kernel(env->tr.base + index);
877 /* 64 bit interrupt */
878 static void do_interrupt64(int intno, int is_int, int error_code,
879 target_ulong next_eip, int is_hw)
883 int type, dpl, selector, cpl, ist;
884 int has_error_code, new_stack;
885 uint32_t e1, e2, e3, ss;
886 target_ulong old_eip, esp, offset;
889 if (!is_int && !is_hw) {
908 if (intno * 16 + 15 > dt->limit)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 ptr = dt->base + intno * 16;
911 e1 = ldl_kernel(ptr);
912 e2 = ldl_kernel(ptr + 4);
913 e3 = ldl_kernel(ptr + 8);
914 /* check gate type */
915 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
921 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
926 /* check privilege if software int */
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
929 /* check valid bit */
930 if (!(e2 & DESC_P_MASK))
931 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
935 if ((selector & 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF, 0);
938 if (load_segment(&e1, &e2, selector) != 0)
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 if (!(e2 & DESC_P_MASK))
946 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
947 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
948 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
949 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
950 /* to inner privilege */
952 esp = get_rsp_from_tss(ist + 3);
954 esp = get_rsp_from_tss(dpl);
955 esp &= ~0xfLL; /* align stack */
958 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
959 /* to same privilege */
960 if (env->eflags & VM_MASK)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964 esp = get_rsp_from_tss(ist + 3);
967 esp &= ~0xfLL; /* align stack */
970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971 new_stack = 0; /* avoid warning */
972 esp = 0; /* avoid warning */
975 PUSHQ(esp, env->segs[R_SS].selector);
977 PUSHQ(esp, compute_eflags());
978 PUSHQ(esp, env->segs[R_CS].selector);
980 if (has_error_code) {
981 PUSHQ(esp, error_code);
986 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
990 selector = (selector & ~3) | dpl;
991 cpu_x86_load_seg_cache(env, R_CS, selector,
992 get_seg_base(e1, e2),
993 get_seg_limit(e1, e2),
995 cpu_x86_set_cpl(env, dpl);
998 /* interrupt gate clear IF mask */
999 if ((type & 1) == 0) {
1000 env->eflags &= ~IF_MASK;
1002 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1006 #ifdef TARGET_X86_64
1007 #if defined(CONFIG_USER_ONLY)
1008 void helper_syscall(int next_eip_addend)
1010 env->exception_index = EXCP_SYSCALL;
1011 env->exception_next_eip = env->eip + next_eip_addend;
1015 void helper_syscall(int next_eip_addend)
1019 if (!(env->efer & MSR_EFER_SCE)) {
1020 raise_exception_err(EXCP06_ILLOP, 0);
1022 selector = (env->star >> 32) & 0xffff;
1023 if (env->hflags & HF_LMA_MASK) {
1026 ECX = env->eip + next_eip_addend;
1027 env->regs[11] = compute_eflags();
1029 code64 = env->hflags & HF_CS64_MASK;
1031 cpu_x86_set_cpl(env, 0);
1032 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1034 DESC_G_MASK | DESC_P_MASK |
1036 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1037 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1039 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1041 DESC_W_MASK | DESC_A_MASK);
1042 env->eflags &= ~env->fmask;
1043 load_eflags(env->eflags, 0);
1045 env->eip = env->lstar;
1047 env->eip = env->cstar;
1049 ECX = (uint32_t)(env->eip + next_eip_addend);
1051 cpu_x86_set_cpl(env, 0);
1052 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1061 DESC_W_MASK | DESC_A_MASK);
1062 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063 env->eip = (uint32_t)env->star;
1069 #ifdef TARGET_X86_64
1070 void helper_sysret(int dflag)
1074 if (!(env->efer & MSR_EFER_SCE)) {
1075 raise_exception_err(EXCP06_ILLOP, 0);
1077 cpl = env->hflags & HF_CPL_MASK;
1078 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1079 raise_exception_err(EXCP0D_GPF, 0);
1081 selector = (env->star >> 48) & 0xffff;
1082 if (env->hflags & HF_LMA_MASK) {
1084 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1086 DESC_G_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1099 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_W_MASK | DESC_A_MASK);
1104 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1105 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1106 cpu_x86_set_cpl(env, 3);
1108 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1113 env->eip = (uint32_t)ECX;
1114 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1116 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118 DESC_W_MASK | DESC_A_MASK);
1119 env->eflags |= IF_MASK;
1120 cpu_x86_set_cpl(env, 3);
1123 if (kqemu_is_ok(env)) {
1124 if (env->hflags & HF_LMA_MASK)
1125 CC_OP = CC_OP_EFLAGS;
1126 env->exception_index = -1;
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno, int is_int, int error_code,
1135 unsigned int next_eip)
1138 target_ulong ptr, ssp;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
1143 /* real mode (simpler !) */
1145 if (intno * 4 + 3 > dt->limit)
1146 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147 ptr = dt->base + intno * 4;
1148 offset = lduw_kernel(ptr);
1149 selector = lduw_kernel(ptr + 2);
1151 ssp = env->segs[R_SS].base;
1156 old_cs = env->segs[R_CS].selector;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp, esp, 0xffff, compute_eflags());
1159 PUSHW(ssp, esp, 0xffff, old_cs);
1160 PUSHW(ssp, esp, 0xffff, old_eip);
1162 /* update processor state */
1163 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1165 env->segs[R_CS].selector = selector;
1166 env->segs[R_CS].base = (selector << 4);
1167 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1170 /* fake user mode interrupt */
1171 void do_interrupt_user(int intno, int is_int, int error_code,
1172 target_ulong next_eip)
1176 int dpl, cpl, shift;
1180 if (env->hflags & HF_LMA_MASK) {
1185 ptr = dt->base + (intno << shift);
1186 e2 = ldl_kernel(ptr + 4);
1188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189 cpl = env->hflags & HF_CPL_MASK;
1190 /* check privilege if software int */
1191 if (is_int && dpl < cpl)
1192 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1206 void do_interrupt(int intno, int is_int, int error_code,
1207 target_ulong next_eip, int is_hw)
1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1213 count, intno, error_code, is_int,
1214 env->hflags & HF_CPL_MASK,
1215 env->segs[R_CS].selector, EIP,
1216 (int)env->segs[R_CS].base + EIP,
1217 env->segs[R_SS].selector, ESP);
1218 if (intno == 0x0e) {
1219 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1221 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1224 log_cpu_state(env, X86_DUMP_CCOP);
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for(i = 0; i < 16; i++) {
1232 qemu_log(" %02x", ldub(ptr + i));
1240 if (env->cr[0] & CR0_PE_MASK) {
1241 #ifdef TARGET_X86_64
1242 if (env->hflags & HF_LMA_MASK) {
1243 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1247 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1250 do_interrupt_real(intno, is_int, error_code, next_eip);
1255 * Check nested exceptions and change to double or triple fault if
1256 * needed. It should only be called, if this is not an interrupt.
1257 * Returns the new exception number.
1259 static int check_exception(int intno, int *error_code)
1261 int first_contributory = env->old_exception == 0 ||
1262 (env->old_exception >= 10 &&
1263 env->old_exception <= 13);
1264 int second_contributory = intno == 0 ||
1265 (intno >= 10 && intno <= 13);
1267 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1268 env->old_exception, intno);
1270 if (env->old_exception == EXCP08_DBLE)
1271 cpu_abort(env, "triple fault");
1273 if ((first_contributory && second_contributory)
1274 || (env->old_exception == EXCP0E_PAGE &&
1275 (second_contributory || (intno == EXCP0E_PAGE)))) {
1276 intno = EXCP08_DBLE;
1280 if (second_contributory || (intno == EXCP0E_PAGE) ||
1281 (intno == EXCP08_DBLE))
1282 env->old_exception = intno;
1288 * Signal an interruption. It is executed in the main CPU loop.
1289 * is_int is TRUE if coming from the int instruction. next_eip is the
1290 * EIP value AFTER the interrupt instruction. It is only relevant if
1293 static void noreturn raise_interrupt(int intno, int is_int, int error_code,
1294 int next_eip_addend)
1297 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1298 intno = check_exception(intno, &error_code);
1300 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1303 env->exception_index = intno;
1304 env->error_code = error_code;
1305 env->exception_is_int = is_int;
1306 env->exception_next_eip = env->eip + next_eip_addend;
1310 /* shortcuts to generate exceptions */
1312 void raise_exception_err(int exception_index, int error_code)
1314 raise_interrupt(exception_index, 0, error_code, 0);
1317 void raise_exception(int exception_index)
1319 raise_interrupt(exception_index, 0, 0, 0);
1324 #if defined(CONFIG_USER_ONLY)
1326 void do_smm_enter(void)
1330 void helper_rsm(void)
1336 #ifdef TARGET_X86_64
1337 #define SMM_REVISION_ID 0x00020064
1339 #define SMM_REVISION_ID 0x00020000
1342 void do_smm_enter(void)
1344 target_ulong sm_state;
1348 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1349 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1351 env->hflags |= HF_SMM_MASK;
1352 cpu_smm_update(env);
1354 sm_state = env->smbase + 0x8000;
1356 #ifdef TARGET_X86_64
1357 for(i = 0; i < 6; i++) {
1359 offset = 0x7e00 + i * 16;
1360 stw_phys(sm_state + offset, dt->selector);
1361 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1362 stl_phys(sm_state + offset + 4, dt->limit);
1363 stq_phys(sm_state + offset + 8, dt->base);
1366 stq_phys(sm_state + 0x7e68, env->gdt.base);
1367 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1369 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1370 stq_phys(sm_state + 0x7e78, env->ldt.base);
1371 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1372 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1374 stq_phys(sm_state + 0x7e88, env->idt.base);
1375 stl_phys(sm_state + 0x7e84, env->idt.limit);
1377 stw_phys(sm_state + 0x7e90, env->tr.selector);
1378 stq_phys(sm_state + 0x7e98, env->tr.base);
1379 stl_phys(sm_state + 0x7e94, env->tr.limit);
1380 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1382 stq_phys(sm_state + 0x7ed0, env->efer);
1384 stq_phys(sm_state + 0x7ff8, EAX);
1385 stq_phys(sm_state + 0x7ff0, ECX);
1386 stq_phys(sm_state + 0x7fe8, EDX);
1387 stq_phys(sm_state + 0x7fe0, EBX);
1388 stq_phys(sm_state + 0x7fd8, ESP);
1389 stq_phys(sm_state + 0x7fd0, EBP);
1390 stq_phys(sm_state + 0x7fc8, ESI);
1391 stq_phys(sm_state + 0x7fc0, EDI);
1392 for(i = 8; i < 16; i++)
1393 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1394 stq_phys(sm_state + 0x7f78, env->eip);
1395 stl_phys(sm_state + 0x7f70, compute_eflags());
1396 stl_phys(sm_state + 0x7f68, env->dr[6]);
1397 stl_phys(sm_state + 0x7f60, env->dr[7]);
1399 stl_phys(sm_state + 0x7f48, env->cr[4]);
1400 stl_phys(sm_state + 0x7f50, env->cr[3]);
1401 stl_phys(sm_state + 0x7f58, env->cr[0]);
1403 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1404 stl_phys(sm_state + 0x7f00, env->smbase);
1406 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1407 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1408 stl_phys(sm_state + 0x7ff4, compute_eflags());
1409 stl_phys(sm_state + 0x7ff0, env->eip);
1410 stl_phys(sm_state + 0x7fec, EDI);
1411 stl_phys(sm_state + 0x7fe8, ESI);
1412 stl_phys(sm_state + 0x7fe4, EBP);
1413 stl_phys(sm_state + 0x7fe0, ESP);
1414 stl_phys(sm_state + 0x7fdc, EBX);
1415 stl_phys(sm_state + 0x7fd8, EDX);
1416 stl_phys(sm_state + 0x7fd4, ECX);
1417 stl_phys(sm_state + 0x7fd0, EAX);
1418 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1419 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1421 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1422 stl_phys(sm_state + 0x7f64, env->tr.base);
1423 stl_phys(sm_state + 0x7f60, env->tr.limit);
1424 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1426 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1427 stl_phys(sm_state + 0x7f80, env->ldt.base);
1428 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1429 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1431 stl_phys(sm_state + 0x7f74, env->gdt.base);
1432 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1434 stl_phys(sm_state + 0x7f58, env->idt.base);
1435 stl_phys(sm_state + 0x7f54, env->idt.limit);
1437 for(i = 0; i < 6; i++) {
1440 offset = 0x7f84 + i * 12;
1442 offset = 0x7f2c + (i - 3) * 12;
1443 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1444 stl_phys(sm_state + offset + 8, dt->base);
1445 stl_phys(sm_state + offset + 4, dt->limit);
1446 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1448 stl_phys(sm_state + 0x7f14, env->cr[4]);
1450 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1451 stl_phys(sm_state + 0x7ef8, env->smbase);
1453 /* init SMM cpu state */
1455 #ifdef TARGET_X86_64
1456 cpu_load_efer(env, 0);
1458 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1459 env->eip = 0x00008000;
1460 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1462 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1463 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1464 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1465 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1466 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1468 cpu_x86_update_cr0(env,
1469 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1470 cpu_x86_update_cr4(env, 0);
1471 env->dr[7] = 0x00000400;
1472 CC_OP = CC_OP_EFLAGS;
1475 void helper_rsm(void)
1477 target_ulong sm_state;
1481 sm_state = env->smbase + 0x8000;
1482 #ifdef TARGET_X86_64
1483 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1485 for(i = 0; i < 6; i++) {
1486 offset = 0x7e00 + i * 16;
1487 cpu_x86_load_seg_cache(env, i,
1488 lduw_phys(sm_state + offset),
1489 ldq_phys(sm_state + offset + 8),
1490 ldl_phys(sm_state + offset + 4),
1491 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1494 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1495 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1497 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1498 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1499 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1500 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1502 env->idt.base = ldq_phys(sm_state + 0x7e88);
1503 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1505 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1506 env->tr.base = ldq_phys(sm_state + 0x7e98);
1507 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1508 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1510 EAX = ldq_phys(sm_state + 0x7ff8);
1511 ECX = ldq_phys(sm_state + 0x7ff0);
1512 EDX = ldq_phys(sm_state + 0x7fe8);
1513 EBX = ldq_phys(sm_state + 0x7fe0);
1514 ESP = ldq_phys(sm_state + 0x7fd8);
1515 EBP = ldq_phys(sm_state + 0x7fd0);
1516 ESI = ldq_phys(sm_state + 0x7fc8);
1517 EDI = ldq_phys(sm_state + 0x7fc0);
1518 for(i = 8; i < 16; i++)
1519 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1520 env->eip = ldq_phys(sm_state + 0x7f78);
1521 load_eflags(ldl_phys(sm_state + 0x7f70),
1522 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1523 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1524 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1526 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1527 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1528 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1530 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1531 if (val & 0x20000) {
1532 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1535 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1536 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1537 load_eflags(ldl_phys(sm_state + 0x7ff4),
1538 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1539 env->eip = ldl_phys(sm_state + 0x7ff0);
1540 EDI = ldl_phys(sm_state + 0x7fec);
1541 ESI = ldl_phys(sm_state + 0x7fe8);
1542 EBP = ldl_phys(sm_state + 0x7fe4);
1543 ESP = ldl_phys(sm_state + 0x7fe0);
1544 EBX = ldl_phys(sm_state + 0x7fdc);
1545 EDX = ldl_phys(sm_state + 0x7fd8);
1546 ECX = ldl_phys(sm_state + 0x7fd4);
1547 EAX = ldl_phys(sm_state + 0x7fd0);
1548 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1549 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1551 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1552 env->tr.base = ldl_phys(sm_state + 0x7f64);
1553 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1554 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1556 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1557 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1558 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1559 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1561 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1562 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1564 env->idt.base = ldl_phys(sm_state + 0x7f58);
1565 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1567 for(i = 0; i < 6; i++) {
1569 offset = 0x7f84 + i * 12;
1571 offset = 0x7f2c + (i - 3) * 12;
1572 cpu_x86_load_seg_cache(env, i,
1573 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1574 ldl_phys(sm_state + offset + 8),
1575 ldl_phys(sm_state + offset + 4),
1576 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1578 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1580 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1581 if (val & 0x20000) {
1582 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1585 CC_OP = CC_OP_EFLAGS;
1586 env->hflags &= ~HF_SMM_MASK;
1587 cpu_smm_update(env);
1589 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1590 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1593 #endif /* !CONFIG_USER_ONLY */
1596 /* division, flags are undefined */
1598 void helper_divb_AL(target_ulong t0)
1600 unsigned int num, den, q, r;
1602 num = (EAX & 0xffff);
1605 raise_exception(EXCP00_DIVZ);
1609 raise_exception(EXCP00_DIVZ);
1611 r = (num % den) & 0xff;
1612 EAX = (EAX & ~0xffff) | (r << 8) | q;
1615 void helper_idivb_AL(target_ulong t0)
1622 raise_exception(EXCP00_DIVZ);
1626 raise_exception(EXCP00_DIVZ);
1628 r = (num % den) & 0xff;
1629 EAX = (EAX & ~0xffff) | (r << 8) | q;
1632 void helper_divw_AX(target_ulong t0)
1634 unsigned int num, den, q, r;
1636 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1637 den = (t0 & 0xffff);
1639 raise_exception(EXCP00_DIVZ);
1643 raise_exception(EXCP00_DIVZ);
1645 r = (num % den) & 0xffff;
1646 EAX = (EAX & ~0xffff) | q;
1647 EDX = (EDX & ~0xffff) | r;
1650 void helper_idivw_AX(target_ulong t0)
1654 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1657 raise_exception(EXCP00_DIVZ);
1660 if (q != (int16_t)q)
1661 raise_exception(EXCP00_DIVZ);
1663 r = (num % den) & 0xffff;
1664 EAX = (EAX & ~0xffff) | q;
1665 EDX = (EDX & ~0xffff) | r;
1668 void helper_divl_EAX(target_ulong t0)
1670 unsigned int den, r;
1673 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1676 raise_exception(EXCP00_DIVZ);
1681 raise_exception(EXCP00_DIVZ);
1686 void helper_idivl_EAX(target_ulong t0)
1691 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1694 raise_exception(EXCP00_DIVZ);
1698 if (q != (int32_t)q)
1699 raise_exception(EXCP00_DIVZ);
1706 /* XXX: exception */
1707 void helper_aam(int base)
1713 EAX = (EAX & ~0xffff) | al | (ah << 8);
1717 void helper_aad(int base)
1721 ah = (EAX >> 8) & 0xff;
1722 al = ((ah * base) + al) & 0xff;
1723 EAX = (EAX & ~0xffff) | al;
1727 void helper_aaa(void)
1733 eflags = helper_cc_compute_all(CC_OP);
1736 ah = (EAX >> 8) & 0xff;
1738 icarry = (al > 0xf9);
1739 if (((al & 0x0f) > 9 ) || af) {
1740 al = (al + 6) & 0x0f;
1741 ah = (ah + 1 + icarry) & 0xff;
1742 eflags |= CC_C | CC_A;
1744 eflags &= ~(CC_C | CC_A);
1747 EAX = (EAX & ~0xffff) | al | (ah << 8);
1751 void helper_aas(void)
1757 eflags = helper_cc_compute_all(CC_OP);
1760 ah = (EAX >> 8) & 0xff;
1763 if (((al & 0x0f) > 9 ) || af) {
1764 al = (al - 6) & 0x0f;
1765 ah = (ah - 1 - icarry) & 0xff;
1766 eflags |= CC_C | CC_A;
1768 eflags &= ~(CC_C | CC_A);
1771 EAX = (EAX & ~0xffff) | al | (ah << 8);
1775 void helper_daa(void)
1780 eflags = helper_cc_compute_all(CC_OP);
1786 if (((al & 0x0f) > 9 ) || af) {
1787 al = (al + 6) & 0xff;
1790 if ((al > 0x9f) || cf) {
1791 al = (al + 0x60) & 0xff;
1794 EAX = (EAX & ~0xff) | al;
1795 /* well, speed is not an issue here, so we compute the flags by hand */
1796 eflags |= (al == 0) << 6; /* zf */
1797 eflags |= parity_table[al]; /* pf */
1798 eflags |= (al & 0x80); /* sf */
1802 void helper_das(void)
1804 int al, al1, af, cf;
1807 eflags = helper_cc_compute_all(CC_OP);
1814 if (((al & 0x0f) > 9 ) || af) {
1818 al = (al - 6) & 0xff;
1820 if ((al1 > 0x99) || cf) {
1821 al = (al - 0x60) & 0xff;
1824 EAX = (EAX & ~0xff) | al;
1825 /* well, speed is not an issue here, so we compute the flags by hand */
1826 eflags |= (al == 0) << 6; /* zf */
1827 eflags |= parity_table[al]; /* pf */
1828 eflags |= (al & 0x80); /* sf */
1832 void helper_into(int next_eip_addend)
1835 eflags = helper_cc_compute_all(CC_OP);
1836 if (eflags & CC_O) {
1837 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1841 void helper_cmpxchg8b(target_ulong a0)
1846 eflags = helper_cc_compute_all(CC_OP);
1848 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1849 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1852 /* always do the store */
1854 EDX = (uint32_t)(d >> 32);
1861 #ifdef TARGET_X86_64
1862 void helper_cmpxchg16b(target_ulong a0)
1867 if ((a0 & 0xf) != 0)
1868 raise_exception(EXCP0D_GPF);
1869 eflags = helper_cc_compute_all(CC_OP);
1872 if (d0 == EAX && d1 == EDX) {
1877 /* always do the store */
1888 void helper_single_step(void)
1890 #ifndef CONFIG_USER_ONLY
1891 check_hw_breakpoints(env, 1);
1892 env->dr[6] |= DR6_BS;
1894 raise_exception(EXCP01_DB);
1897 void helper_cpuid(void)
1899 uint32_t eax, ebx, ecx, edx;
1901 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1903 cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1910 void helper_enter_level(int level, int data32, target_ulong t1)
1913 uint32_t esp_mask, esp, ebp;
1915 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1916 ssp = env->segs[R_SS].base;
1925 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1928 stl(ssp + (esp & esp_mask), t1);
1935 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1938 stw(ssp + (esp & esp_mask), t1);
1942 #ifdef TARGET_X86_64
1943 void helper_enter64_level(int level, int data64, target_ulong t1)
1945 target_ulong esp, ebp;
1965 stw(esp, lduw(ebp));
1973 void helper_lldt(int selector)
1977 int index, entry_limit;
1981 if ((selector & 0xfffc) == 0) {
1982 /* XXX: NULL selector case: invalid LDT */
1987 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1989 index = selector & ~7;
1990 #ifdef TARGET_X86_64
1991 if (env->hflags & HF_LMA_MASK)
1996 if ((index + entry_limit) > dt->limit)
1997 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1998 ptr = dt->base + index;
1999 e1 = ldl_kernel(ptr);
2000 e2 = ldl_kernel(ptr + 4);
2001 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2002 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2003 if (!(e2 & DESC_P_MASK))
2004 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2005 #ifdef TARGET_X86_64
2006 if (env->hflags & HF_LMA_MASK) {
2008 e3 = ldl_kernel(ptr + 8);
2009 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2010 env->ldt.base |= (target_ulong)e3 << 32;
2014 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2017 env->ldt.selector = selector;
2020 void helper_ltr(int selector)
2024 int index, type, entry_limit;
2028 if ((selector & 0xfffc) == 0) {
2029 /* NULL selector case: invalid TR */
2035 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2037 index = selector & ~7;
2038 #ifdef TARGET_X86_64
2039 if (env->hflags & HF_LMA_MASK)
2044 if ((index + entry_limit) > dt->limit)
2045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2046 ptr = dt->base + index;
2047 e1 = ldl_kernel(ptr);
2048 e2 = ldl_kernel(ptr + 4);
2049 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2050 if ((e2 & DESC_S_MASK) ||
2051 (type != 1 && type != 9))
2052 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2053 if (!(e2 & DESC_P_MASK))
2054 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2055 #ifdef TARGET_X86_64
2056 if (env->hflags & HF_LMA_MASK) {
2058 e3 = ldl_kernel(ptr + 8);
2059 e4 = ldl_kernel(ptr + 12);
2060 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2061 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2062 load_seg_cache_raw_dt(&env->tr, e1, e2);
2063 env->tr.base |= (target_ulong)e3 << 32;
2067 load_seg_cache_raw_dt(&env->tr, e1, e2);
2069 e2 |= DESC_TSS_BUSY_MASK;
2070 stl_kernel(ptr + 4, e2);
2072 env->tr.selector = selector;
2075 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2076 void helper_load_seg(int seg_reg, int selector)
2085 cpl = env->hflags & HF_CPL_MASK;
2086 if ((selector & 0xfffc) == 0) {
2087 /* null selector case */
2089 #ifdef TARGET_X86_64
2090 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2093 raise_exception_err(EXCP0D_GPF, 0);
2094 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2101 index = selector & ~7;
2102 if ((index + 7) > dt->limit)
2103 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104 ptr = dt->base + index;
2105 e1 = ldl_kernel(ptr);
2106 e2 = ldl_kernel(ptr + 4);
2108 if (!(e2 & DESC_S_MASK))
2109 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2111 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2112 if (seg_reg == R_SS) {
2113 /* must be writable segment */
2114 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116 if (rpl != cpl || dpl != cpl)
2117 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 /* must be readable segment */
2120 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2121 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2123 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2124 /* if not conforming code, test rights */
2125 if (dpl < cpl || dpl < rpl)
2126 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 if (!(e2 & DESC_P_MASK)) {
2131 if (seg_reg == R_SS)
2132 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2134 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2137 /* set the access bit if not already set */
2138 if (!(e2 & DESC_A_MASK)) {
2140 stl_kernel(ptr + 4, e2);
2143 cpu_x86_load_seg_cache(env, seg_reg, selector,
2144 get_seg_base(e1, e2),
2145 get_seg_limit(e1, e2),
2148 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2149 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2154 /* protected mode jump */
2155 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2156 int next_eip_addend)
2159 uint32_t e1, e2, cpl, dpl, rpl, limit;
2160 target_ulong next_eip;
2162 if ((new_cs & 0xfffc) == 0)
2163 raise_exception_err(EXCP0D_GPF, 0);
2164 if (load_segment(&e1, &e2, new_cs) != 0)
2165 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2166 cpl = env->hflags & HF_CPL_MASK;
2167 if (e2 & DESC_S_MASK) {
2168 if (!(e2 & DESC_CS_MASK))
2169 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2170 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2171 if (e2 & DESC_C_MASK) {
2172 /* conforming code segment */
2174 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2176 /* non conforming code segment */
2179 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2181 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183 if (!(e2 & DESC_P_MASK))
2184 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2185 limit = get_seg_limit(e1, e2);
2186 if (new_eip > limit &&
2187 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2188 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2189 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2190 get_seg_base(e1, e2), limit, e2);
2193 /* jump to call or task gate */
2194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2196 cpl = env->hflags & HF_CPL_MASK;
2197 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2199 case 1: /* 286 TSS */
2200 case 9: /* 386 TSS */
2201 case 5: /* task gate */
2202 if (dpl < cpl || dpl < rpl)
2203 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204 next_eip = env->eip + next_eip_addend;
2205 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2206 CC_OP = CC_OP_EFLAGS;
2208 case 4: /* 286 call gate */
2209 case 12: /* 386 call gate */
2210 if ((dpl < cpl) || (dpl < rpl))
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 if (!(e2 & DESC_P_MASK))
2213 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2215 new_eip = (e1 & 0xffff);
2217 new_eip |= (e2 & 0xffff0000);
2218 if (load_segment(&e1, &e2, gate_cs) != 0)
2219 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2220 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2221 /* must be code segment */
2222 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2223 (DESC_S_MASK | DESC_CS_MASK)))
2224 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2225 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2226 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2227 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2228 if (!(e2 & DESC_P_MASK))
2229 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2230 limit = get_seg_limit(e1, e2);
2231 if (new_eip > limit)
2232 raise_exception_err(EXCP0D_GPF, 0);
2233 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2234 get_seg_base(e1, e2), limit, e2);
2238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244 /* real mode call */
2245 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2246 int shift, int next_eip)
2249 uint32_t esp, esp_mask;
2254 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2255 ssp = env->segs[R_SS].base;
2257 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2258 PUSHL(ssp, esp, esp_mask, next_eip);
2260 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2261 PUSHW(ssp, esp, esp_mask, next_eip);
2264 SET_ESP(esp, esp_mask);
2266 env->segs[R_CS].selector = new_cs;
2267 env->segs[R_CS].base = (new_cs << 4);
2270 /* protected mode call */
2271 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2272 int shift, int next_eip_addend)
2275 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2276 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2277 uint32_t val, limit, old_sp_mask;
2278 target_ulong ssp, old_ssp, next_eip;
2280 next_eip = env->eip + next_eip_addend;
2281 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2282 LOG_PCALL_STATE(env);
2283 if ((new_cs & 0xfffc) == 0)
2284 raise_exception_err(EXCP0D_GPF, 0);
2285 if (load_segment(&e1, &e2, new_cs) != 0)
2286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287 cpl = env->hflags & HF_CPL_MASK;
2288 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2289 if (e2 & DESC_S_MASK) {
2290 if (!(e2 & DESC_CS_MASK))
2291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2293 if (e2 & DESC_C_MASK) {
2294 /* conforming code segment */
2296 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2298 /* non conforming code segment */
2301 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2303 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305 if (!(e2 & DESC_P_MASK))
2306 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2308 #ifdef TARGET_X86_64
2309 /* XXX: check 16/32 bit cases in long mode */
2314 PUSHQ(rsp, env->segs[R_CS].selector);
2315 PUSHQ(rsp, next_eip);
2316 /* from this point, not restartable */
2318 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2319 get_seg_base(e1, e2),
2320 get_seg_limit(e1, e2), e2);
2326 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2327 ssp = env->segs[R_SS].base;
2329 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2330 PUSHL(ssp, sp, sp_mask, next_eip);
2332 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2333 PUSHW(ssp, sp, sp_mask, next_eip);
2336 limit = get_seg_limit(e1, e2);
2337 if (new_eip > limit)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 /* from this point, not restartable */
2340 SET_ESP(sp, sp_mask);
2341 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2342 get_seg_base(e1, e2), limit, e2);
2346 /* check gate type */
2347 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2348 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2351 case 1: /* available 286 TSS */
2352 case 9: /* available 386 TSS */
2353 case 5: /* task gate */
2354 if (dpl < cpl || dpl < rpl)
2355 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2356 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2357 CC_OP = CC_OP_EFLAGS;
2359 case 4: /* 286 call gate */
2360 case 12: /* 386 call gate */
2363 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2368 if (dpl < cpl || dpl < rpl)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 /* check valid bit */
2371 if (!(e2 & DESC_P_MASK))
2372 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2373 selector = e1 >> 16;
2374 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2375 param_count = e2 & 0x1f;
2376 if ((selector & 0xfffc) == 0)
2377 raise_exception_err(EXCP0D_GPF, 0);
2379 if (load_segment(&e1, &e2, selector) != 0)
2380 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2381 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2382 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2385 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2386 if (!(e2 & DESC_P_MASK))
2387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2389 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2390 /* to inner privilege */
2391 get_ss_esp_from_tss(&ss, &sp, dpl);
2392 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2393 ss, sp, param_count, ESP);
2394 if ((ss & 0xfffc) == 0)
2395 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2396 if ((ss & 3) != dpl)
2397 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2398 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2399 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2400 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2402 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2403 if (!(ss_e2 & DESC_S_MASK) ||
2404 (ss_e2 & DESC_CS_MASK) ||
2405 !(ss_e2 & DESC_W_MASK))
2406 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2407 if (!(ss_e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410 // push_size = ((param_count * 2) + 8) << shift;
2412 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2413 old_ssp = env->segs[R_SS].base;
2415 sp_mask = get_sp_mask(ss_e2);
2416 ssp = get_seg_base(ss_e1, ss_e2);
2418 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2419 PUSHL(ssp, sp, sp_mask, ESP);
2420 for(i = param_count - 1; i >= 0; i--) {
2421 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2422 PUSHL(ssp, sp, sp_mask, val);
2425 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2426 PUSHW(ssp, sp, sp_mask, ESP);
2427 for(i = param_count - 1; i >= 0; i--) {
2428 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2429 PUSHW(ssp, sp, sp_mask, val);
2434 /* to same privilege */
2436 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2437 ssp = env->segs[R_SS].base;
2438 // push_size = (4 << shift);
2443 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2444 PUSHL(ssp, sp, sp_mask, next_eip);
2446 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2447 PUSHW(ssp, sp, sp_mask, next_eip);
2450 /* from this point, not restartable */
2453 ss = (ss & ~3) | dpl;
2454 cpu_x86_load_seg_cache(env, R_SS, ss,
2456 get_seg_limit(ss_e1, ss_e2),
2460 selector = (selector & ~3) | dpl;
2461 cpu_x86_load_seg_cache(env, R_CS, selector,
2462 get_seg_base(e1, e2),
2463 get_seg_limit(e1, e2),
2465 cpu_x86_set_cpl(env, dpl);
2466 SET_ESP(sp, sp_mask);
2470 if (kqemu_is_ok(env)) {
2471 env->exception_index = -1;
2477 /* real and vm86 mode iret */
2478 void helper_iret_real(int shift)
2480 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2484 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2486 ssp = env->segs[R_SS].base;
2489 POPL(ssp, sp, sp_mask, new_eip);
2490 POPL(ssp, sp, sp_mask, new_cs);
2492 POPL(ssp, sp, sp_mask, new_eflags);
2495 POPW(ssp, sp, sp_mask, new_eip);
2496 POPW(ssp, sp, sp_mask, new_cs);
2497 POPW(ssp, sp, sp_mask, new_eflags);
2499 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2500 env->segs[R_CS].selector = new_cs;
2501 env->segs[R_CS].base = (new_cs << 4);
2503 if (env->eflags & VM_MASK)
2504 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2506 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2508 eflags_mask &= 0xffff;
2509 load_eflags(new_eflags, eflags_mask);
2510 env->hflags2 &= ~HF2_NMI_MASK;
2513 static inline void validate_seg(int seg_reg, int cpl)
2518 /* XXX: on x86_64, we do not want to nullify FS and GS because
2519 they may still contain a valid base. I would be interested to
2520 know how a real x86_64 CPU behaves */
2521 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2522 (env->segs[seg_reg].selector & 0xfffc) == 0)
2525 e2 = env->segs[seg_reg].flags;
2526 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2527 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2528 /* data or non conforming code segment */
2530 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2535 /* protected mode iret */
2536 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2538 uint32_t new_cs, new_eflags, new_ss;
2539 uint32_t new_es, new_ds, new_fs, new_gs;
2540 uint32_t e1, e2, ss_e1, ss_e2;
2541 int cpl, dpl, rpl, eflags_mask, iopl;
2542 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2544 #ifdef TARGET_X86_64
2549 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2551 ssp = env->segs[R_SS].base;
2552 new_eflags = 0; /* avoid warning */
2553 #ifdef TARGET_X86_64
2559 POPQ(sp, new_eflags);
2565 POPL(ssp, sp, sp_mask, new_eip);
2566 POPL(ssp, sp, sp_mask, new_cs);
2569 POPL(ssp, sp, sp_mask, new_eflags);
2570 if (new_eflags & VM_MASK)
2571 goto return_to_vm86;
2575 POPW(ssp, sp, sp_mask, new_eip);
2576 POPW(ssp, sp, sp_mask, new_cs);
2578 POPW(ssp, sp, sp_mask, new_eflags);
2580 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2581 new_cs, new_eip, shift, addend);
2582 LOG_PCALL_STATE(env);
2583 if ((new_cs & 0xfffc) == 0)
2584 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2585 if (load_segment(&e1, &e2, new_cs) != 0)
2586 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2587 if (!(e2 & DESC_S_MASK) ||
2588 !(e2 & DESC_CS_MASK))
2589 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2590 cpl = env->hflags & HF_CPL_MASK;
2593 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2594 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2595 if (e2 & DESC_C_MASK) {
2597 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2600 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2602 if (!(e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2606 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2607 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2608 /* return to same privilege level */
2609 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2610 get_seg_base(e1, e2),
2611 get_seg_limit(e1, e2),
2614 /* return to different privilege level */
2615 #ifdef TARGET_X86_64
2624 POPL(ssp, sp, sp_mask, new_esp);
2625 POPL(ssp, sp, sp_mask, new_ss);
2629 POPW(ssp, sp, sp_mask, new_esp);
2630 POPW(ssp, sp, sp_mask, new_ss);
2632 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2634 if ((new_ss & 0xfffc) == 0) {
2635 #ifdef TARGET_X86_64
2636 /* NULL ss is allowed in long mode if cpl != 3*/
2637 /* XXX: test CS64 ? */
2638 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2639 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2641 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2642 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2643 DESC_W_MASK | DESC_A_MASK);
2644 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2648 raise_exception_err(EXCP0D_GPF, 0);
2651 if ((new_ss & 3) != rpl)
2652 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2653 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2654 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2655 if (!(ss_e2 & DESC_S_MASK) ||
2656 (ss_e2 & DESC_CS_MASK) ||
2657 !(ss_e2 & DESC_W_MASK))
2658 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2659 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2661 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2662 if (!(ss_e2 & DESC_P_MASK))
2663 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2664 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2665 get_seg_base(ss_e1, ss_e2),
2666 get_seg_limit(ss_e1, ss_e2),
2670 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2671 get_seg_base(e1, e2),
2672 get_seg_limit(e1, e2),
2674 cpu_x86_set_cpl(env, rpl);
2676 #ifdef TARGET_X86_64
2677 if (env->hflags & HF_CS64_MASK)
2681 sp_mask = get_sp_mask(ss_e2);
2683 /* validate data segments */
2684 validate_seg(R_ES, rpl);
2685 validate_seg(R_DS, rpl);
2686 validate_seg(R_FS, rpl);
2687 validate_seg(R_GS, rpl);
2691 SET_ESP(sp, sp_mask);
2694 /* NOTE: 'cpl' is the _old_ CPL */
2695 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2697 eflags_mask |= IOPL_MASK;
2698 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2700 eflags_mask |= IF_MASK;
2702 eflags_mask &= 0xffff;
2703 load_eflags(new_eflags, eflags_mask);
2708 POPL(ssp, sp, sp_mask, new_esp);
2709 POPL(ssp, sp, sp_mask, new_ss);
2710 POPL(ssp, sp, sp_mask, new_es);
2711 POPL(ssp, sp, sp_mask, new_ds);
2712 POPL(ssp, sp, sp_mask, new_fs);
2713 POPL(ssp, sp, sp_mask, new_gs);
2715 /* modify processor state */
2716 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2717 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2718 load_seg_vm(R_CS, new_cs & 0xffff);
2719 cpu_x86_set_cpl(env, 3);
2720 load_seg_vm(R_SS, new_ss & 0xffff);
2721 load_seg_vm(R_ES, new_es & 0xffff);
2722 load_seg_vm(R_DS, new_ds & 0xffff);
2723 load_seg_vm(R_FS, new_fs & 0xffff);
2724 load_seg_vm(R_GS, new_gs & 0xffff);
2726 env->eip = new_eip & 0xffff;
2730 void helper_iret_protected(int shift, int next_eip)
2732 int tss_selector, type;
2735 /* specific case for TSS */
2736 if (env->eflags & NT_MASK) {
2737 #ifdef TARGET_X86_64
2738 if (env->hflags & HF_LMA_MASK)
2739 raise_exception_err(EXCP0D_GPF, 0);
2741 tss_selector = lduw_kernel(env->tr.base + 0);
2742 if (tss_selector & 4)
2743 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2744 if (load_segment(&e1, &e2, tss_selector) != 0)
2745 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2746 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2747 /* NOTE: we check both segment and busy TSS */
2749 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2750 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2752 helper_ret_protected(shift, 1, 0);
2754 env->hflags2 &= ~HF2_NMI_MASK;
2756 if (kqemu_is_ok(env)) {
2757 CC_OP = CC_OP_EFLAGS;
2758 env->exception_index = -1;
2764 void helper_lret_protected(int shift, int addend)
2766 helper_ret_protected(shift, 0, addend);
2768 if (kqemu_is_ok(env)) {
2769 env->exception_index = -1;
2775 void helper_sysenter(void)
2777 if (env->sysenter_cs == 0) {
2778 raise_exception_err(EXCP0D_GPF, 0);
2780 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2781 cpu_x86_set_cpl(env, 0);
2783 #ifdef TARGET_X86_64
2784 if (env->hflags & HF_LMA_MASK) {
2785 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2787 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2789 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2793 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2795 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2797 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2799 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2801 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2803 DESC_W_MASK | DESC_A_MASK);
2804 ESP = env->sysenter_esp;
2805 EIP = env->sysenter_eip;
2808 void helper_sysexit(int dflag)
2812 cpl = env->hflags & HF_CPL_MASK;
2813 if (env->sysenter_cs == 0 || cpl != 0) {
2814 raise_exception_err(EXCP0D_GPF, 0);
2816 cpu_x86_set_cpl(env, 3);
2817 #ifdef TARGET_X86_64
2819 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2821 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2822 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2823 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2824 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2826 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2827 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2828 DESC_W_MASK | DESC_A_MASK);
2832 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2834 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2837 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2839 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841 DESC_W_MASK | DESC_A_MASK);
2846 if (kqemu_is_ok(env)) {
2847 env->exception_index = -1;
2853 #if defined(CONFIG_USER_ONLY)
2854 target_ulong helper_read_crN(int reg)
2859 void helper_write_crN(int reg, target_ulong t0)
2863 void helper_movl_drN_T0(int reg, target_ulong t0)
2867 target_ulong helper_read_crN(int reg)
2871 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2877 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2878 val = cpu_get_apic_tpr(env);
2887 void helper_write_crN(int reg, target_ulong t0)
2889 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2892 cpu_x86_update_cr0(env, t0);
2895 cpu_x86_update_cr3(env, t0);
2898 cpu_x86_update_cr4(env, t0);
2901 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2902 cpu_set_apic_tpr(env, t0);
2904 env->v_tpr = t0 & 0x0f;
2912 void helper_movl_drN_T0(int reg, target_ulong t0)
2917 hw_breakpoint_remove(env, reg);
2919 hw_breakpoint_insert(env, reg);
2920 } else if (reg == 7) {
2921 for (i = 0; i < 4; i++)
2922 hw_breakpoint_remove(env, i);
2924 for (i = 0; i < 4; i++)
2925 hw_breakpoint_insert(env, i);
2931 void helper_lmsw(target_ulong t0)
2933 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2934 if already set to one. */
2935 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2936 helper_write_crN(0, t0);
2939 void helper_clts(void)
2941 env->cr[0] &= ~CR0_TS_MASK;
2942 env->hflags &= ~HF_TS_MASK;
2945 void helper_invlpg(target_ulong addr)
2947 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2948 tlb_flush_page(env, addr);
2951 void helper_rdtsc(void)
2955 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2956 raise_exception(EXCP0D_GPF);
2958 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2960 val = cpu_get_tsc(env) + env->tsc_offset;
2961 EAX = (uint32_t)(val);
2962 EDX = (uint32_t)(val >> 32);
2965 void helper_rdpmc(void)
2967 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2968 raise_exception(EXCP0D_GPF);
2970 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2972 /* currently unimplemented */
2973 raise_exception_err(EXCP06_ILLOP, 0);
2976 #if defined(CONFIG_USER_ONLY)
2977 void helper_wrmsr(void)
2981 void helper_rdmsr(void)
2985 void helper_wrmsr(void)
2989 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2991 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2993 switch((uint32_t)ECX) {
2994 case MSR_IA32_SYSENTER_CS:
2995 env->sysenter_cs = val & 0xffff;
2997 case MSR_IA32_SYSENTER_ESP:
2998 env->sysenter_esp = val;
3000 case MSR_IA32_SYSENTER_EIP:
3001 env->sysenter_eip = val;
3003 case MSR_IA32_APICBASE:
3004 cpu_set_apic_base(env, val);
3008 uint64_t update_mask;
3010 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3011 update_mask |= MSR_EFER_SCE;
3012 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3013 update_mask |= MSR_EFER_LME;
3014 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3015 update_mask |= MSR_EFER_FFXSR;
3016 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3017 update_mask |= MSR_EFER_NXE;
3018 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3019 update_mask |= MSR_EFER_SVME;
3020 cpu_load_efer(env, (env->efer & ~update_mask) |
3021 (val & update_mask));
3030 case MSR_VM_HSAVE_PA:
3031 env->vm_hsave = val;
3033 #ifdef TARGET_X86_64
3044 env->segs[R_FS].base = val;
3047 env->segs[R_GS].base = val;
3049 case MSR_KERNELGSBASE:
3050 env->kernelgsbase = val;
3053 case MSR_MTRRphysBase(0):
3054 case MSR_MTRRphysBase(1):
3055 case MSR_MTRRphysBase(2):
3056 case MSR_MTRRphysBase(3):
3057 case MSR_MTRRphysBase(4):
3058 case MSR_MTRRphysBase(5):
3059 case MSR_MTRRphysBase(6):
3060 case MSR_MTRRphysBase(7):
3061 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3063 case MSR_MTRRphysMask(0):
3064 case MSR_MTRRphysMask(1):
3065 case MSR_MTRRphysMask(2):
3066 case MSR_MTRRphysMask(3):
3067 case MSR_MTRRphysMask(4):
3068 case MSR_MTRRphysMask(5):
3069 case MSR_MTRRphysMask(6):
3070 case MSR_MTRRphysMask(7):
3071 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3073 case MSR_MTRRfix64K_00000:
3074 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3076 case MSR_MTRRfix16K_80000:
3077 case MSR_MTRRfix16K_A0000:
3078 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3080 case MSR_MTRRfix4K_C0000:
3081 case MSR_MTRRfix4K_C8000:
3082 case MSR_MTRRfix4K_D0000:
3083 case MSR_MTRRfix4K_D8000:
3084 case MSR_MTRRfix4K_E0000:
3085 case MSR_MTRRfix4K_E8000:
3086 case MSR_MTRRfix4K_F0000:
3087 case MSR_MTRRfix4K_F8000:
3088 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3090 case MSR_MTRRdefType:
3091 env->mtrr_deftype = val;
3094 /* XXX: exception ? */
3099 void helper_rdmsr(void)
3103 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3105 switch((uint32_t)ECX) {
3106 case MSR_IA32_SYSENTER_CS:
3107 val = env->sysenter_cs;
3109 case MSR_IA32_SYSENTER_ESP:
3110 val = env->sysenter_esp;
3112 case MSR_IA32_SYSENTER_EIP:
3113 val = env->sysenter_eip;
3115 case MSR_IA32_APICBASE:
3116 val = cpu_get_apic_base(env);
3127 case MSR_VM_HSAVE_PA:
3128 val = env->vm_hsave;
3130 case MSR_IA32_PERF_STATUS:
3131 /* tsc_increment_by_tick */
3133 /* CPU multiplier */
3134 val |= (((uint64_t)4ULL) << 40);
3136 #ifdef TARGET_X86_64
3147 val = env->segs[R_FS].base;
3150 val = env->segs[R_GS].base;
3152 case MSR_KERNELGSBASE:
3153 val = env->kernelgsbase;
3157 case MSR_QPI_COMMBASE:
3158 if (env->kqemu_enabled) {
3159 val = kqemu_comm_base;
3165 case MSR_MTRRphysBase(0):
3166 case MSR_MTRRphysBase(1):
3167 case MSR_MTRRphysBase(2):
3168 case MSR_MTRRphysBase(3):
3169 case MSR_MTRRphysBase(4):
3170 case MSR_MTRRphysBase(5):
3171 case MSR_MTRRphysBase(6):
3172 case MSR_MTRRphysBase(7):
3173 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3175 case MSR_MTRRphysMask(0):
3176 case MSR_MTRRphysMask(1):
3177 case MSR_MTRRphysMask(2):
3178 case MSR_MTRRphysMask(3):
3179 case MSR_MTRRphysMask(4):
3180 case MSR_MTRRphysMask(5):
3181 case MSR_MTRRphysMask(6):
3182 case MSR_MTRRphysMask(7):
3183 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3185 case MSR_MTRRfix64K_00000:
3186 val = env->mtrr_fixed[0];
3188 case MSR_MTRRfix16K_80000:
3189 case MSR_MTRRfix16K_A0000:
3190 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3192 case MSR_MTRRfix4K_C0000:
3193 case MSR_MTRRfix4K_C8000:
3194 case MSR_MTRRfix4K_D0000:
3195 case MSR_MTRRfix4K_D8000:
3196 case MSR_MTRRfix4K_E0000:
3197 case MSR_MTRRfix4K_E8000:
3198 case MSR_MTRRfix4K_F0000:
3199 case MSR_MTRRfix4K_F8000:
3200 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3202 case MSR_MTRRdefType:
3203 val = env->mtrr_deftype;
3206 /* XXX: exception ? */
3210 EAX = (uint32_t)(val);
3211 EDX = (uint32_t)(val >> 32);
3215 target_ulong helper_lsl(target_ulong selector1)
3218 uint32_t e1, e2, eflags, selector;
3219 int rpl, dpl, cpl, type;
3221 selector = selector1 & 0xffff;
3222 eflags = helper_cc_compute_all(CC_OP);
3223 if (load_segment(&e1, &e2, selector) != 0)
3226 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3227 cpl = env->hflags & HF_CPL_MASK;
3228 if (e2 & DESC_S_MASK) {
3229 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3232 if (dpl < cpl || dpl < rpl)
3236 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3247 if (dpl < cpl || dpl < rpl) {
3249 CC_SRC = eflags & ~CC_Z;
3253 limit = get_seg_limit(e1, e2);
3254 CC_SRC = eflags | CC_Z;
3258 target_ulong helper_lar(target_ulong selector1)
3260 uint32_t e1, e2, eflags, selector;
3261 int rpl, dpl, cpl, type;
3263 selector = selector1 & 0xffff;
3264 eflags = helper_cc_compute_all(CC_OP);
3265 if ((selector & 0xfffc) == 0)
3267 if (load_segment(&e1, &e2, selector) != 0)
3270 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3271 cpl = env->hflags & HF_CPL_MASK;
3272 if (e2 & DESC_S_MASK) {
3273 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3276 if (dpl < cpl || dpl < rpl)
3280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3294 if (dpl < cpl || dpl < rpl) {
3296 CC_SRC = eflags & ~CC_Z;
3300 CC_SRC = eflags | CC_Z;
3301 return e2 & 0x00f0ff00;
3304 void helper_verr(target_ulong selector1)
3306 uint32_t e1, e2, eflags, selector;
3309 selector = selector1 & 0xffff;
3310 eflags = helper_cc_compute_all(CC_OP);
3311 if ((selector & 0xfffc) == 0)
3313 if (load_segment(&e1, &e2, selector) != 0)
3315 if (!(e2 & DESC_S_MASK))
3318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3319 cpl = env->hflags & HF_CPL_MASK;
3320 if (e2 & DESC_CS_MASK) {
3321 if (!(e2 & DESC_R_MASK))
3323 if (!(e2 & DESC_C_MASK)) {
3324 if (dpl < cpl || dpl < rpl)
3328 if (dpl < cpl || dpl < rpl) {
3330 CC_SRC = eflags & ~CC_Z;
3334 CC_SRC = eflags | CC_Z;
3337 void helper_verw(target_ulong selector1)
3339 uint32_t e1, e2, eflags, selector;
3342 selector = selector1 & 0xffff;
3343 eflags = helper_cc_compute_all(CC_OP);
3344 if ((selector & 0xfffc) == 0)
3346 if (load_segment(&e1, &e2, selector) != 0)
3348 if (!(e2 & DESC_S_MASK))
3351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3352 cpl = env->hflags & HF_CPL_MASK;
3353 if (e2 & DESC_CS_MASK) {
3356 if (dpl < cpl || dpl < rpl)
3358 if (!(e2 & DESC_W_MASK)) {
3360 CC_SRC = eflags & ~CC_Z;
3364 CC_SRC = eflags | CC_Z;
3367 /* x87 FPU helpers */
3369 static void fpu_set_exception(int mask)
3372 if (env->fpus & (~env->fpuc & FPUC_EM))
3373 env->fpus |= FPUS_SE | FPUS_B;
3376 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3379 fpu_set_exception(FPUS_ZE);
3383 static void fpu_raise_exception(void)
3385 if (env->cr[0] & CR0_NE_MASK) {
3386 raise_exception(EXCP10_COPR);
3388 #if !defined(CONFIG_USER_ONLY)
3395 void helper_flds_FT0(uint32_t val)
3402 FT0 = float32_to_floatx(u.f, &env->fp_status);
3405 void helper_fldl_FT0(uint64_t val)
3412 FT0 = float64_to_floatx(u.f, &env->fp_status);
3415 void helper_fildl_FT0(int32_t val)
3417 FT0 = int32_to_floatx(val, &env->fp_status);
3420 void helper_flds_ST0(uint32_t val)
3427 new_fpstt = (env->fpstt - 1) & 7;
3429 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3430 env->fpstt = new_fpstt;
3431 env->fptags[new_fpstt] = 0; /* validate stack entry */
3434 void helper_fldl_ST0(uint64_t val)
3441 new_fpstt = (env->fpstt - 1) & 7;
3443 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3444 env->fpstt = new_fpstt;
3445 env->fptags[new_fpstt] = 0; /* validate stack entry */
3448 void helper_fildl_ST0(int32_t val)
3451 new_fpstt = (env->fpstt - 1) & 7;
3452 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3453 env->fpstt = new_fpstt;
3454 env->fptags[new_fpstt] = 0; /* validate stack entry */
3457 void helper_fildll_ST0(int64_t val)
3460 new_fpstt = (env->fpstt - 1) & 7;
3461 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3462 env->fpstt = new_fpstt;
3463 env->fptags[new_fpstt] = 0; /* validate stack entry */
3466 uint32_t helper_fsts_ST0(void)
3472 u.f = floatx_to_float32(ST0, &env->fp_status);
3476 uint64_t helper_fstl_ST0(void)
3482 u.f = floatx_to_float64(ST0, &env->fp_status);
3486 int32_t helper_fist_ST0(void)
3489 val = floatx_to_int32(ST0, &env->fp_status);
3490 if (val != (int16_t)val)
3495 int32_t helper_fistl_ST0(void)
3498 val = floatx_to_int32(ST0, &env->fp_status);
3502 int64_t helper_fistll_ST0(void)
3505 val = floatx_to_int64(ST0, &env->fp_status);
3509 int32_t helper_fistt_ST0(void)
3512 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3513 if (val != (int16_t)val)
3518 int32_t helper_fisttl_ST0(void)
3521 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3525 int64_t helper_fisttll_ST0(void)
3528 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3532 void helper_fldt_ST0(target_ulong ptr)
3535 new_fpstt = (env->fpstt - 1) & 7;
3536 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3537 env->fpstt = new_fpstt;
3538 env->fptags[new_fpstt] = 0; /* validate stack entry */
3541 void helper_fstt_ST0(target_ulong ptr)
3543 helper_fstt(ST0, ptr);
3546 void helper_fpush(void)
3551 void helper_fpop(void)
3556 void helper_fdecstp(void)
3558 env->fpstt = (env->fpstt - 1) & 7;
3559 env->fpus &= (~0x4700);
3562 void helper_fincstp(void)
3564 env->fpstt = (env->fpstt + 1) & 7;
3565 env->fpus &= (~0x4700);
3570 void helper_ffree_STN(int st_index)
3572 env->fptags[(env->fpstt + st_index) & 7] = 1;
3575 void helper_fmov_ST0_FT0(void)
3580 void helper_fmov_FT0_STN(int st_index)
3585 void helper_fmov_ST0_STN(int st_index)
3590 void helper_fmov_STN_ST0(int st_index)
3595 void helper_fxchg_ST0_STN(int st_index)
3603 /* FPU operations */
3605 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3607 void helper_fcom_ST0_FT0(void)
3611 ret = floatx_compare(ST0, FT0, &env->fp_status);
3612 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3615 void helper_fucom_ST0_FT0(void)
3619 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3620 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3623 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3625 void helper_fcomi_ST0_FT0(void)
3630 ret = floatx_compare(ST0, FT0, &env->fp_status);
3631 eflags = helper_cc_compute_all(CC_OP);
3632 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3636 void helper_fucomi_ST0_FT0(void)
3641 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3642 eflags = helper_cc_compute_all(CC_OP);
3643 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3647 void helper_fadd_ST0_FT0(void)
3652 void helper_fmul_ST0_FT0(void)
3657 void helper_fsub_ST0_FT0(void)
3662 void helper_fsubr_ST0_FT0(void)
3667 void helper_fdiv_ST0_FT0(void)
3669 ST0 = helper_fdiv(ST0, FT0);
3672 void helper_fdivr_ST0_FT0(void)
3674 ST0 = helper_fdiv(FT0, ST0);
3677 /* fp operations between STN and ST0 */
3679 void helper_fadd_STN_ST0(int st_index)
3681 ST(st_index) += ST0;
3684 void helper_fmul_STN_ST0(int st_index)
3686 ST(st_index) *= ST0;
3689 void helper_fsub_STN_ST0(int st_index)
3691 ST(st_index) -= ST0;
3694 void helper_fsubr_STN_ST0(int st_index)
3701 void helper_fdiv_STN_ST0(int st_index)
3705 *p = helper_fdiv(*p, ST0);
3708 void helper_fdivr_STN_ST0(int st_index)
3712 *p = helper_fdiv(ST0, *p);
3715 /* misc FPU operations */
3716 void helper_fchs_ST0(void)
3718 ST0 = floatx_chs(ST0);
3721 void helper_fabs_ST0(void)
3723 ST0 = floatx_abs(ST0);
3726 void helper_fld1_ST0(void)
3731 void helper_fldl2t_ST0(void)
3736 void helper_fldl2e_ST0(void)
3741 void helper_fldpi_ST0(void)
3746 void helper_fldlg2_ST0(void)
3751 void helper_fldln2_ST0(void)
3756 void helper_fldz_ST0(void)
3761 void helper_fldz_FT0(void)
3766 uint32_t helper_fnstsw(void)
3768 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3771 uint32_t helper_fnstcw(void)
3776 static void update_fp_status(void)
3780 /* set rounding mode */
3781 switch(env->fpuc & RC_MASK) {
3784 rnd_type = float_round_nearest_even;
3787 rnd_type = float_round_down;
3790 rnd_type = float_round_up;
3793 rnd_type = float_round_to_zero;
3796 set_float_rounding_mode(rnd_type, &env->fp_status);
3798 switch((env->fpuc >> 8) & 3) {
3810 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3814 void helper_fldcw(uint32_t val)
3820 void helper_fclex(void)
3822 env->fpus &= 0x7f00;
3825 void helper_fwait(void)
3827 if (env->fpus & FPUS_SE)
3828 fpu_raise_exception();
3831 void helper_fninit(void)
3848 void helper_fbld_ST0(target_ulong ptr)
3856 for(i = 8; i >= 0; i--) {
3858 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3861 if (ldub(ptr + 9) & 0x80)
3867 void helper_fbst_ST0(target_ulong ptr)
3870 target_ulong mem_ref, mem_end;
3873 val = floatx_to_int64(ST0, &env->fp_status);
3875 mem_end = mem_ref + 9;
3882 while (mem_ref < mem_end) {
3887 v = ((v / 10) << 4) | (v % 10);
3890 while (mem_ref < mem_end) {
3895 void helper_f2xm1(void)
3897 ST0 = pow(2.0,ST0) - 1.0;
3900 void helper_fyl2x(void)
3902 CPU86_LDouble fptemp;
3906 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3910 env->fpus &= (~0x4700);
3915 void helper_fptan(void)
3917 CPU86_LDouble fptemp;
3920 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3926 env->fpus &= (~0x400); /* C2 <-- 0 */
3927 /* the above code is for |arg| < 2**52 only */
3931 void helper_fpatan(void)
3933 CPU86_LDouble fptemp, fpsrcop;
3937 ST1 = atan2(fpsrcop,fptemp);
3941 void helper_fxtract(void)
3943 CPU86_LDoubleU temp;
3944 unsigned int expdif;
3947 expdif = EXPD(temp) - EXPBIAS;
3948 /*DP exponent bias*/
3955 void helper_fprem1(void)
3957 CPU86_LDouble dblq, fpsrcop, fptemp;
3958 CPU86_LDoubleU fpsrcop1, fptemp1;
3960 signed long long int q;
3962 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3963 ST0 = 0.0 / 0.0; /* NaN */
3964 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3970 fpsrcop1.d = fpsrcop;
3972 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3975 /* optimisation? taken from the AMD docs */
3976 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3977 /* ST0 is unchanged */
3982 dblq = fpsrcop / fptemp;
3983 /* round dblq towards nearest integer */
3985 ST0 = fpsrcop - fptemp * dblq;
3987 /* convert dblq to q by truncating towards zero */
3989 q = (signed long long int)(-dblq);
3991 q = (signed long long int)dblq;
3993 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3994 /* (C0,C3,C1) <-- (q2,q1,q0) */
3995 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3996 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3997 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3999 env->fpus |= 0x400; /* C2 <-- 1 */
4000 fptemp = pow(2.0, expdif - 50);
4001 fpsrcop = (ST0 / ST1) / fptemp;
4002 /* fpsrcop = integer obtained by chopping */
4003 fpsrcop = (fpsrcop < 0.0) ?
4004 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4005 ST0 -= (ST1 * fpsrcop * fptemp);
4009 void helper_fprem(void)
4011 CPU86_LDouble dblq, fpsrcop, fptemp;
4012 CPU86_LDoubleU fpsrcop1, fptemp1;
4014 signed long long int q;
4016 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4017 ST0 = 0.0 / 0.0; /* NaN */
4018 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4022 fpsrcop = (CPU86_LDouble)ST0;
4023 fptemp = (CPU86_LDouble)ST1;
4024 fpsrcop1.d = fpsrcop;
4026 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4029 /* optimisation? taken from the AMD docs */
4030 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4031 /* ST0 is unchanged */
4035 if ( expdif < 53 ) {
4036 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4037 /* round dblq towards zero */
4038 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4039 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4041 /* convert dblq to q by truncating towards zero */
4043 q = (signed long long int)(-dblq);
4045 q = (signed long long int)dblq;
4047 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4048 /* (C0,C3,C1) <-- (q2,q1,q0) */
4049 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4050 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4051 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4053 int N = 32 + (expdif % 32); /* as per AMD docs */
4054 env->fpus |= 0x400; /* C2 <-- 1 */
4055 fptemp = pow(2.0, (double)(expdif - N));
4056 fpsrcop = (ST0 / ST1) / fptemp;
4057 /* fpsrcop = integer obtained by chopping */
4058 fpsrcop = (fpsrcop < 0.0) ?
4059 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4060 ST0 -= (ST1 * fpsrcop * fptemp);
4064 void helper_fyl2xp1(void)
4066 CPU86_LDouble fptemp;
4069 if ((fptemp+1.0)>0.0) {
4070 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4074 env->fpus &= (~0x4700);
4079 void helper_fsqrt(void)
4081 CPU86_LDouble fptemp;
4085 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4091 void helper_fsincos(void)
4093 CPU86_LDouble fptemp;
4096 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4102 env->fpus &= (~0x400); /* C2 <-- 0 */
4103 /* the above code is for |arg| < 2**63 only */
4107 void helper_frndint(void)
4109 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4112 void helper_fscale(void)
4114 ST0 = ldexp (ST0, (int)(ST1));
4117 void helper_fsin(void)
4119 CPU86_LDouble fptemp;
4122 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4126 env->fpus &= (~0x400); /* C2 <-- 0 */
4127 /* the above code is for |arg| < 2**53 only */
4131 void helper_fcos(void)
4133 CPU86_LDouble fptemp;
4136 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4140 env->fpus &= (~0x400); /* C2 <-- 0 */
4141 /* the above code is for |arg5 < 2**63 only */
4145 void helper_fxam_ST0(void)
4147 CPU86_LDoubleU temp;
4152 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4154 env->fpus |= 0x200; /* C1 <-- 1 */
4156 /* XXX: test fptags too */
4157 expdif = EXPD(temp);
4158 if (expdif == MAXEXPD) {
4159 #ifdef USE_X86LDOUBLE
4160 if (MANTD(temp) == 0x8000000000000000ULL)
4162 if (MANTD(temp) == 0)
4164 env->fpus |= 0x500 /*Infinity*/;
4166 env->fpus |= 0x100 /*NaN*/;
4167 } else if (expdif == 0) {
4168 if (MANTD(temp) == 0)
4169 env->fpus |= 0x4000 /*Zero*/;
4171 env->fpus |= 0x4400 /*Denormal*/;
4177 void helper_fstenv(target_ulong ptr, int data32)
4179 int fpus, fptag, exp, i;
4183 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4185 for (i=7; i>=0; i--) {
4187 if (env->fptags[i]) {
4190 tmp.d = env->fpregs[i].d;
4193 if (exp == 0 && mant == 0) {
4196 } else if (exp == 0 || exp == MAXEXPD
4197 #ifdef USE_X86LDOUBLE
4198 || (mant & (1LL << 63)) == 0
4201 /* NaNs, infinity, denormal */
4208 stl(ptr, env->fpuc);
4210 stl(ptr + 8, fptag);
4211 stl(ptr + 12, 0); /* fpip */
4212 stl(ptr + 16, 0); /* fpcs */
4213 stl(ptr + 20, 0); /* fpoo */
4214 stl(ptr + 24, 0); /* fpos */
4217 stw(ptr, env->fpuc);
4219 stw(ptr + 4, fptag);
4227 void helper_fldenv(target_ulong ptr, int data32)
4232 env->fpuc = lduw(ptr);
4233 fpus = lduw(ptr + 4);
4234 fptag = lduw(ptr + 8);
4237 env->fpuc = lduw(ptr);
4238 fpus = lduw(ptr + 2);
4239 fptag = lduw(ptr + 4);
4241 env->fpstt = (fpus >> 11) & 7;
4242 env->fpus = fpus & ~0x3800;
4243 for(i = 0;i < 8; i++) {
4244 env->fptags[i] = ((fptag & 3) == 3);
4249 void helper_fsave(target_ulong ptr, int data32)
4254 helper_fstenv(ptr, data32);
4256 ptr += (14 << data32);
4257 for(i = 0;i < 8; i++) {
4259 helper_fstt(tmp, ptr);
4277 void helper_frstor(target_ulong ptr, int data32)
4282 helper_fldenv(ptr, data32);
4283 ptr += (14 << data32);
4285 for(i = 0;i < 8; i++) {
4286 tmp = helper_fldt(ptr);
4292 void helper_fxsave(target_ulong ptr, int data64)
4294 int fpus, fptag, i, nb_xmm_regs;
4298 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4300 for(i = 0; i < 8; i++) {
4301 fptag |= (env->fptags[i] << i);
4303 stw(ptr, env->fpuc);
4305 stw(ptr + 4, fptag ^ 0xff);
4306 #ifdef TARGET_X86_64
4308 stq(ptr + 0x08, 0); /* rip */
4309 stq(ptr + 0x10, 0); /* rdp */
4313 stl(ptr + 0x08, 0); /* eip */
4314 stl(ptr + 0x0c, 0); /* sel */
4315 stl(ptr + 0x10, 0); /* dp */
4316 stl(ptr + 0x14, 0); /* sel */
4320 for(i = 0;i < 8; i++) {
4322 helper_fstt(tmp, addr);
4326 if (env->cr[4] & CR4_OSFXSR_MASK) {
4327 /* XXX: finish it */
4328 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4329 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4330 if (env->hflags & HF_CS64_MASK)
4335 for(i = 0; i < nb_xmm_regs; i++) {
4336 stq(addr, env->xmm_regs[i].XMM_Q(0));
4337 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4343 void helper_fxrstor(target_ulong ptr, int data64)
4345 int i, fpus, fptag, nb_xmm_regs;
4349 env->fpuc = lduw(ptr);
4350 fpus = lduw(ptr + 2);
4351 fptag = lduw(ptr + 4);
4352 env->fpstt = (fpus >> 11) & 7;
4353 env->fpus = fpus & ~0x3800;
4355 for(i = 0;i < 8; i++) {
4356 env->fptags[i] = ((fptag >> i) & 1);
4360 for(i = 0;i < 8; i++) {
4361 tmp = helper_fldt(addr);
4366 if (env->cr[4] & CR4_OSFXSR_MASK) {
4367 /* XXX: finish it */
4368 env->mxcsr = ldl(ptr + 0x18);
4370 if (env->hflags & HF_CS64_MASK)
4375 for(i = 0; i < nb_xmm_regs; i++) {
4376 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4377 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4383 #ifndef USE_X86LDOUBLE
4385 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4387 CPU86_LDoubleU temp;
4392 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4393 /* exponent + sign */
4394 e = EXPD(temp) - EXPBIAS + 16383;
4395 e |= SIGND(temp) >> 16;
4399 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4401 CPU86_LDoubleU temp;
4405 /* XXX: handle overflow ? */
4406 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4407 e |= (upper >> 4) & 0x800; /* sign */
4408 ll = (mant >> 11) & ((1LL << 52) - 1);
4410 temp.l.upper = (e << 20) | (ll >> 32);
4413 temp.ll = ll | ((uint64_t)e << 52);
4420 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4422 CPU86_LDoubleU temp;
4425 *pmant = temp.l.lower;
4426 *pexp = temp.l.upper;
4429 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4431 CPU86_LDoubleU temp;
4433 temp.l.upper = upper;
4434 temp.l.lower = mant;
4439 #ifdef TARGET_X86_64
4441 //#define DEBUG_MULDIV
4443 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4452 static void neg128(uint64_t *plow, uint64_t *phigh)
4456 add128(plow, phigh, 1, 0);
4459 /* return TRUE if overflow */
4460 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4462 uint64_t q, r, a1, a0;
4475 /* XXX: use a better algorithm */
4476 for(i = 0; i < 64; i++) {
4478 a1 = (a1 << 1) | (a0 >> 63);
4479 if (ab || a1 >= b) {
4485 a0 = (a0 << 1) | qb;
4487 #if defined(DEBUG_MULDIV)
4488 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4489 *phigh, *plow, b, a0, a1);
4497 /* return TRUE if overflow */
4498 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4501 sa = ((int64_t)*phigh < 0);
4503 neg128(plow, phigh);
4507 if (div64(plow, phigh, b) != 0)
4510 if (*plow > (1ULL << 63))
4514 if (*plow >= (1ULL << 63))
4522 void helper_mulq_EAX_T0(target_ulong t0)
4526 mulu64(&r0, &r1, EAX, t0);
4533 void helper_imulq_EAX_T0(target_ulong t0)
4537 muls64(&r0, &r1, EAX, t0);
4541 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4544 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4548 muls64(&r0, &r1, t0, t1);
4550 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4554 void helper_divq_EAX(target_ulong t0)
4558 raise_exception(EXCP00_DIVZ);
4562 if (div64(&r0, &r1, t0))
4563 raise_exception(EXCP00_DIVZ);
4568 void helper_idivq_EAX(target_ulong t0)
4572 raise_exception(EXCP00_DIVZ);
4576 if (idiv64(&r0, &r1, t0))
4577 raise_exception(EXCP00_DIVZ);
4583 static void do_hlt(void)
4585 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4587 env->exception_index = EXCP_HLT;
4591 void helper_hlt(int next_eip_addend)
4593 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4594 EIP += next_eip_addend;
4599 void helper_monitor(target_ulong ptr)
4601 if ((uint32_t)ECX != 0)
4602 raise_exception(EXCP0D_GPF);
4603 /* XXX: store address ? */
4604 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4607 void helper_mwait(int next_eip_addend)
4609 if ((uint32_t)ECX != 0)
4610 raise_exception(EXCP0D_GPF);
4611 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4612 EIP += next_eip_addend;
4614 /* XXX: not complete but not completely erroneous */
4615 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4616 /* more than one CPU: do not sleep because another CPU may
4623 void helper_debug(void)
4625 env->exception_index = EXCP_DEBUG;
4629 void helper_raise_interrupt(int intno, int next_eip_addend)
4631 raise_interrupt(intno, 1, 0, next_eip_addend);
4634 void helper_raise_exception(int exception_index)
4636 raise_exception(exception_index);
4639 void helper_cli(void)
4641 env->eflags &= ~IF_MASK;
4644 void helper_sti(void)
4646 env->eflags |= IF_MASK;
4650 /* vm86plus instructions */
4651 void helper_cli_vm(void)
4653 env->eflags &= ~VIF_MASK;
4656 void helper_sti_vm(void)
4658 env->eflags |= VIF_MASK;
4659 if (env->eflags & VIP_MASK) {
4660 raise_exception(EXCP0D_GPF);
4665 void helper_set_inhibit_irq(void)
4667 env->hflags |= HF_INHIBIT_IRQ_MASK;
4670 void helper_reset_inhibit_irq(void)
4672 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4675 void helper_boundw(target_ulong a0, int v)
4679 high = ldsw(a0 + 2);
4681 if (v < low || v > high) {
4682 raise_exception(EXCP05_BOUND);
4686 void helper_boundl(target_ulong a0, int v)
4691 if (v < low || v > high) {
4692 raise_exception(EXCP05_BOUND);
4696 static float approx_rsqrt(float a)
4698 return 1.0 / sqrt(a);
4701 static float approx_rcp(float a)
4706 #if !defined(CONFIG_USER_ONLY)
4708 #define MMUSUFFIX _mmu
4711 #include "softmmu_template.h"
4714 #include "softmmu_template.h"
4717 #include "softmmu_template.h"
4720 #include "softmmu_template.h"
4724 #if !defined(CONFIG_USER_ONLY)
4725 /* try to fill the TLB and return an exception if error. If retaddr is
4726 NULL, it means that the function was called in C code (i.e. not
4727 from generated code or from helper.c) */
4728 /* XXX: fix it to restore all registers */
4729 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4731 TranslationBlock *tb;
4734 CPUX86State *saved_env;
4736 /* XXX: hack to restore env in all cases, even if not called from
4739 env = cpu_single_env;
4741 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4744 /* now we have a real cpu fault */
4745 pc = (unsigned long)retaddr;
4746 tb = tb_find_pc(pc);
4748 /* the PC is inside the translated code. It means that we have
4749 a virtual CPU fault */
4750 cpu_restore_state(tb, env, pc, NULL);
4753 raise_exception_err(env->exception_index, env->error_code);
4759 /* Secure Virtual Machine helpers */
4761 #if defined(CONFIG_USER_ONLY)
4763 void helper_vmrun(int aflag, int next_eip_addend)
4766 void helper_vmmcall(void)
4769 void helper_vmload(int aflag)
4772 void helper_vmsave(int aflag)
4775 void helper_stgi(void)
4778 void helper_clgi(void)
4781 void helper_skinit(void)
4784 void helper_invlpga(int aflag)
4787 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4790 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4794 void helper_svm_check_io(uint32_t port, uint32_t param,
4795 uint32_t next_eip_addend)
4800 static inline void svm_save_seg(target_phys_addr_t addr,
4801 const SegmentCache *sc)
4803 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4805 stq_phys(addr + offsetof(struct vmcb_seg, base),
4807 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4809 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4810 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4813 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4817 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4818 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4819 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4820 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4821 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4824 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4825 CPUState *env, int seg_reg)
4827 SegmentCache sc1, *sc = &sc1;
4828 svm_load_seg(addr, sc);
4829 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4830 sc->base, sc->limit, sc->flags);
4833 void helper_vmrun(int aflag, int next_eip_addend)
4839 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4844 addr = (uint32_t)EAX;
4846 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4848 env->vm_vmcb = addr;
4850 /* save the current CPU state in the hsave page */
4851 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4852 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4854 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4855 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4857 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4858 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4859 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4860 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4861 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4862 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4864 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4865 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4867 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4869 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4871 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4873 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4876 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4877 EIP + next_eip_addend);
4878 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4879 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4881 /* load the interception bitmaps so we do not need to access the
4883 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4884 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4885 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4886 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4887 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4888 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4890 /* enable intercepts */
4891 env->hflags |= HF_SVMI_MASK;
4893 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4895 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4896 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4898 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4899 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4901 /* clear exit_info_2 so we behave like the real hardware */
4902 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4904 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4905 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4906 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4907 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4908 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4909 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4910 if (int_ctl & V_INTR_MASKING_MASK) {
4911 env->v_tpr = int_ctl & V_TPR_MASK;
4912 env->hflags2 |= HF2_VINTR_MASK;
4913 if (env->eflags & IF_MASK)
4914 env->hflags2 |= HF2_HIF_MASK;
4918 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4920 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4921 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4922 CC_OP = CC_OP_EFLAGS;
4924 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4926 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4928 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4930 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4933 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4935 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4936 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4937 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4938 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4939 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4941 /* FIXME: guest state consistency checks */
4943 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4944 case TLB_CONTROL_DO_NOTHING:
4946 case TLB_CONTROL_FLUSH_ALL_ASID:
4947 /* FIXME: this is not 100% correct but should work for now */
4952 env->hflags2 |= HF2_GIF_MASK;
4954 if (int_ctl & V_IRQ_MASK) {
4955 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4958 /* maybe we need to inject an event */
4959 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4960 if (event_inj & SVM_EVTINJ_VALID) {
4961 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4962 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4963 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4964 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4966 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4967 /* FIXME: need to implement valid_err */
4968 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4969 case SVM_EVTINJ_TYPE_INTR:
4970 env->exception_index = vector;
4971 env->error_code = event_inj_err;
4972 env->exception_is_int = 0;
4973 env->exception_next_eip = -1;
4974 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4975 /* XXX: is it always correct ? */
4976 do_interrupt(vector, 0, 0, 0, 1);
4978 case SVM_EVTINJ_TYPE_NMI:
4979 env->exception_index = EXCP02_NMI;
4980 env->error_code = event_inj_err;
4981 env->exception_is_int = 0;
4982 env->exception_next_eip = EIP;
4983 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4986 case SVM_EVTINJ_TYPE_EXEPT:
4987 env->exception_index = vector;
4988 env->error_code = event_inj_err;
4989 env->exception_is_int = 0;
4990 env->exception_next_eip = -1;
4991 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4994 case SVM_EVTINJ_TYPE_SOFT:
4995 env->exception_index = vector;
4996 env->error_code = event_inj_err;
4997 env->exception_is_int = 1;
4998 env->exception_next_eip = EIP;
4999 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5003 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5007 void helper_vmmcall(void)
5009 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5010 raise_exception(EXCP06_ILLOP);
5013 void helper_vmload(int aflag)
5016 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5021 addr = (uint32_t)EAX;
5023 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5024 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5025 env->segs[R_FS].base);
5027 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5029 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5031 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5033 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5036 #ifdef TARGET_X86_64
5037 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5038 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5039 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5040 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5042 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5043 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5044 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5045 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5048 void helper_vmsave(int aflag)
5051 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5056 addr = (uint32_t)EAX;
5058 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5059 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5060 env->segs[R_FS].base);
5062 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5064 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5066 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5068 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5071 #ifdef TARGET_X86_64
5072 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5073 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5074 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5075 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5077 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5078 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5079 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5080 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5083 void helper_stgi(void)
5085 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5086 env->hflags2 |= HF2_GIF_MASK;
5089 void helper_clgi(void)
5091 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5092 env->hflags2 &= ~HF2_GIF_MASK;
5095 void helper_skinit(void)
5097 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5098 /* XXX: not implemented */
5099 raise_exception(EXCP06_ILLOP);
5102 void helper_invlpga(int aflag)
5105 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5110 addr = (uint32_t)EAX;
5112 /* XXX: could use the ASID to see if it is needed to do the
5114 tlb_flush_page(env, addr);
5117 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5119 if (likely(!(env->hflags & HF_SVMI_MASK)))
5122 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5123 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5124 helper_vmexit(type, param);
5127 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5128 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5129 helper_vmexit(type, param);
5132 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5133 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5134 helper_vmexit(type, param);
5137 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5138 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5139 helper_vmexit(type, param);
5142 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5143 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5144 helper_vmexit(type, param);
5148 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5149 /* FIXME: this should be read in at vmrun (faster this way?) */
5150 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5152 switch((uint32_t)ECX) {
5157 case 0xc0000000 ... 0xc0001fff:
5158 t0 = (8192 + ECX - 0xc0000000) * 2;
5162 case 0xc0010000 ... 0xc0011fff:
5163 t0 = (16384 + ECX - 0xc0010000) * 2;
5168 helper_vmexit(type, param);
5173 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5174 helper_vmexit(type, param);
5178 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5179 helper_vmexit(type, param);
5185 void helper_svm_check_io(uint32_t port, uint32_t param,
5186 uint32_t next_eip_addend)
5188 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5189 /* FIXME: this should be read in at vmrun (faster this way?) */
5190 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5191 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5192 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5194 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5195 env->eip + next_eip_addend);
5196 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5201 /* Note: currently only 32 bits of exit_code are used */
5202 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5206 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5207 exit_code, exit_info_1,
5208 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5211 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5212 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5213 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5215 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5218 /* Save the VM state in the vmcb */
5219 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5221 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5223 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5225 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5228 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5229 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5231 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5232 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5234 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5235 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5236 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5237 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5238 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5240 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5241 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5242 int_ctl |= env->v_tpr & V_TPR_MASK;
5243 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5244 int_ctl |= V_IRQ_MASK;
5245 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5247 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5248 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5249 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5250 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5251 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5252 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5253 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5255 /* Reload the host state from vm_hsave */
5256 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5257 env->hflags &= ~HF_SVMI_MASK;
5259 env->intercept_exceptions = 0;
5260 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5261 env->tsc_offset = 0;
5263 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5264 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5266 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5267 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5269 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5270 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5271 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5272 /* we need to set the efer after the crs so the hidden flags get
5275 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5277 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5278 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5279 CC_OP = CC_OP_EFLAGS;
5281 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5283 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5285 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5287 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5290 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5291 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5292 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5294 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5295 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5298 cpu_x86_set_cpl(env, 0);
5299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5300 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5302 env->hflags2 &= ~HF2_GIF_MASK;
5303 /* FIXME: Resets the current ASID register to zero (host ASID). */
5305 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5307 /* Clears the TSC_OFFSET inside the processor. */
5309 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5310 from the page table indicated the host's CR3. If the PDPEs contain
5311 illegal state, the processor causes a shutdown. */
5313 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5314 env->cr[0] |= CR0_PE_MASK;
5315 env->eflags &= ~VM_MASK;
5317 /* Disables all breakpoints in the host DR7 register. */
5319 /* Checks the reloaded host state for consistency. */
5321 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5322 host's code segment or non-canonical (in the case of long mode), a
5323 #GP fault is delivered inside the host.) */
5325 /* remove any pending exception */
5326 env->exception_index = -1;
5327 env->error_code = 0;
5328 env->old_exception = -1;
5336 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5337 void helper_enter_mmx(void)
5340 *(uint32_t *)(env->fptags) = 0;
5341 *(uint32_t *)(env->fptags + 4) = 0;
5344 void helper_emms(void)
5346 /* set to empty state */
5347 *(uint32_t *)(env->fptags) = 0x01010101;
5348 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5352 void helper_movq(void *d, void *s)
5354 *(uint64_t *)d = *(uint64_t *)s;
5358 #include "ops_sse.h"
5361 #include "ops_sse.h"
5364 #include "helper_template.h"
5368 #include "helper_template.h"
5372 #include "helper_template.h"
5375 #ifdef TARGET_X86_64
5378 #include "helper_template.h"
5383 /* bit operations */
5384 target_ulong helper_bsf(target_ulong t0)
5391 while ((res & 1) == 0) {
5398 target_ulong helper_bsr(target_ulong t0)
5401 target_ulong res, mask;
5404 count = TARGET_LONG_BITS - 1;
5405 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5406 while ((res & mask) == 0) {
5414 static int compute_all_eflags(void)
5419 static int compute_c_eflags(void)
5421 return CC_SRC & CC_C;
5424 uint32_t helper_cc_compute_all(int op)
5427 default: /* should never happen */ return 0;
5429 case CC_OP_EFLAGS: return compute_all_eflags();
5431 case CC_OP_MULB: return compute_all_mulb();
5432 case CC_OP_MULW: return compute_all_mulw();
5433 case CC_OP_MULL: return compute_all_mull();
5435 case CC_OP_ADDB: return compute_all_addb();
5436 case CC_OP_ADDW: return compute_all_addw();
5437 case CC_OP_ADDL: return compute_all_addl();
5439 case CC_OP_ADCB: return compute_all_adcb();
5440 case CC_OP_ADCW: return compute_all_adcw();
5441 case CC_OP_ADCL: return compute_all_adcl();
5443 case CC_OP_SUBB: return compute_all_subb();
5444 case CC_OP_SUBW: return compute_all_subw();
5445 case CC_OP_SUBL: return compute_all_subl();
5447 case CC_OP_SBBB: return compute_all_sbbb();
5448 case CC_OP_SBBW: return compute_all_sbbw();
5449 case CC_OP_SBBL: return compute_all_sbbl();
5451 case CC_OP_LOGICB: return compute_all_logicb();
5452 case CC_OP_LOGICW: return compute_all_logicw();
5453 case CC_OP_LOGICL: return compute_all_logicl();
5455 case CC_OP_INCB: return compute_all_incb();
5456 case CC_OP_INCW: return compute_all_incw();
5457 case CC_OP_INCL: return compute_all_incl();
5459 case CC_OP_DECB: return compute_all_decb();
5460 case CC_OP_DECW: return compute_all_decw();
5461 case CC_OP_DECL: return compute_all_decl();
5463 case CC_OP_SHLB: return compute_all_shlb();
5464 case CC_OP_SHLW: return compute_all_shlw();
5465 case CC_OP_SHLL: return compute_all_shll();
5467 case CC_OP_SARB: return compute_all_sarb();
5468 case CC_OP_SARW: return compute_all_sarw();
5469 case CC_OP_SARL: return compute_all_sarl();
5471 #ifdef TARGET_X86_64
5472 case CC_OP_MULQ: return compute_all_mulq();
5474 case CC_OP_ADDQ: return compute_all_addq();
5476 case CC_OP_ADCQ: return compute_all_adcq();
5478 case CC_OP_SUBQ: return compute_all_subq();
5480 case CC_OP_SBBQ: return compute_all_sbbq();
5482 case CC_OP_LOGICQ: return compute_all_logicq();
5484 case CC_OP_INCQ: return compute_all_incq();
5486 case CC_OP_DECQ: return compute_all_decq();
5488 case CC_OP_SHLQ: return compute_all_shlq();
5490 case CC_OP_SARQ: return compute_all_sarq();
5495 uint32_t helper_cc_compute_c(int op)
5498 default: /* should never happen */ return 0;
5500 case CC_OP_EFLAGS: return compute_c_eflags();
5502 case CC_OP_MULB: return compute_c_mull();
5503 case CC_OP_MULW: return compute_c_mull();
5504 case CC_OP_MULL: return compute_c_mull();
5506 case CC_OP_ADDB: return compute_c_addb();
5507 case CC_OP_ADDW: return compute_c_addw();
5508 case CC_OP_ADDL: return compute_c_addl();
5510 case CC_OP_ADCB: return compute_c_adcb();
5511 case CC_OP_ADCW: return compute_c_adcw();
5512 case CC_OP_ADCL: return compute_c_adcl();
5514 case CC_OP_SUBB: return compute_c_subb();
5515 case CC_OP_SUBW: return compute_c_subw();
5516 case CC_OP_SUBL: return compute_c_subl();
5518 case CC_OP_SBBB: return compute_c_sbbb();
5519 case CC_OP_SBBW: return compute_c_sbbw();
5520 case CC_OP_SBBL: return compute_c_sbbl();
5522 case CC_OP_LOGICB: return compute_c_logicb();
5523 case CC_OP_LOGICW: return compute_c_logicw();
5524 case CC_OP_LOGICL: return compute_c_logicl();
5526 case CC_OP_INCB: return compute_c_incl();
5527 case CC_OP_INCW: return compute_c_incl();
5528 case CC_OP_INCL: return compute_c_incl();
5530 case CC_OP_DECB: return compute_c_incl();
5531 case CC_OP_DECW: return compute_c_incl();
5532 case CC_OP_DECL: return compute_c_incl();
5534 case CC_OP_SHLB: return compute_c_shlb();
5535 case CC_OP_SHLW: return compute_c_shlw();
5536 case CC_OP_SHLL: return compute_c_shll();
5538 case CC_OP_SARB: return compute_c_sarl();
5539 case CC_OP_SARW: return compute_c_sarl();
5540 case CC_OP_SARL: return compute_c_sarl();
5542 #ifdef TARGET_X86_64
5543 case CC_OP_MULQ: return compute_c_mull();
5545 case CC_OP_ADDQ: return compute_c_addq();
5547 case CC_OP_ADCQ: return compute_c_adcq();
5549 case CC_OP_SUBQ: return compute_c_subq();
5551 case CC_OP_SBBQ: return compute_c_sbbq();
5553 case CC_OP_LOGICQ: return compute_c_logicq();
5555 case CC_OP_INCQ: return compute_c_incl();
5557 case CC_OP_DECQ: return compute_c_incl();
5559 case CC_OP_SHLQ: return compute_c_shlq();
5561 case CC_OP_SARQ: return compute_c_sarl();