]> Git Repo - qemu.git/blame - target-i386/op_helper.c
x86: split condition code and shift templates
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
3e457172
BS
20#include "cpu.h"
21#include "dyngen-exec.h"
eaa728ee 22#include "host-utils.h"
35bed8ee 23#include "ioport.h"
3e457172
BS
24#include "qemu-log.h"
25#include "cpu-defs.h"
26#include "helper.h"
eaa728ee 27
3e457172
BS
28#if !defined(CONFIG_USER_ONLY)
29#include "softmmu_exec.h"
30#endif /* !defined(CONFIG_USER_ONLY) */
eaa728ee 31
3e457172 32//#define DEBUG_PCALL
20054ef0 33//#define DEBUG_MULDIV
d12d51d5
AL
34
35#ifdef DEBUG_PCALL
20054ef0
BS
36# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37# define LOG_PCALL_STATE(env) \
38 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5 39#else
20054ef0
BS
40# define LOG_PCALL(...) do { } while (0)
41# define LOG_PCALL_STATE(env) do { } while (0)
d12d51d5
AL
42#endif
43
3e457172
BS
44/* n must be a constant to be efficient */
45static inline target_long lshift(target_long x, int n)
46{
47 if (n >= 0) {
48 return x << n;
49 } else {
50 return x >> (-n);
51 }
52}
53
3e457172
BS
54static inline uint32_t compute_eflags(void)
55{
56 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
57}
58
59/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
60static inline void load_eflags(int eflags, int update_mask)
61{
62 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
63 DF = 1 - (2 * ((eflags >> 10) & 1));
64 env->eflags = (env->eflags & ~update_mask) |
65 (eflags & update_mask) | 0x2;
66}
67
68/* load efer and update the corresponding hflags. XXX: do consistency
20054ef0 69 checks with cpuid bits? */
317ac620 70static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
3e457172
BS
71{
72 env->efer = val;
73 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
74 if (env->efer & MSR_EFER_LMA) {
75 env->hflags |= HF_LMA_MASK;
76 }
77 if (env->efer & MSR_EFER_SVME) {
78 env->hflags |= HF_SVME_MASK;
79 }
80}
d12d51d5 81
d9957a8b 82static const uint8_t parity_table[256] = {
eaa728ee
FB
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
94 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
95 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
96 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
97 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
98 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
99 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
100 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
101 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
102 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
103 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
104 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
105 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
106 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
107 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
108 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
109 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
110 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
111 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
112 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
113 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
114 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
115};
116
117/* modulo 17 table */
d9957a8b 118static const uint8_t rclw_table[32] = {
eaa728ee 119 0, 1, 2, 3, 4, 5, 6, 7,
20054ef0
BS
120 8, 9, 10, 11, 12, 13, 14, 15,
121 16, 0, 1, 2, 3, 4, 5, 6,
122 7, 8, 9, 10, 11, 12, 13, 14,
eaa728ee
FB
123};
124
125/* modulo 9 table */
d9957a8b 126static const uint8_t rclb_table[32] = {
eaa728ee
FB
127 0, 1, 2, 3, 4, 5, 6, 7,
128 8, 0, 1, 2, 3, 4, 5, 6,
129 7, 8, 0, 1, 2, 3, 4, 5,
130 6, 7, 8, 0, 1, 2, 3, 4,
131};
132
eaa728ee
FB
133/* broken thread support */
134
c227f099 135static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
136
137void helper_lock(void)
138{
139 spin_lock(&global_cpu_lock);
140}
141
142void helper_unlock(void)
143{
144 spin_unlock(&global_cpu_lock);
145}
146
147void helper_write_eflags(target_ulong t0, uint32_t update_mask)
148{
149 load_eflags(t0, update_mask);
150}
151
152target_ulong helper_read_eflags(void)
153{
154 uint32_t eflags;
20054ef0 155
a7812ae4 156 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
157 eflags |= (DF & DF_MASK);
158 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
159 return eflags;
160}
161
162/* return non zero if error */
163static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
164 int selector)
165{
166 SegmentCache *dt;
167 int index;
168 target_ulong ptr;
169
20054ef0 170 if (selector & 0x4) {
eaa728ee 171 dt = &env->ldt;
20054ef0 172 } else {
eaa728ee 173 dt = &env->gdt;
20054ef0 174 }
eaa728ee 175 index = selector & ~7;
20054ef0 176 if ((index + 7) > dt->limit) {
eaa728ee 177 return -1;
20054ef0 178 }
eaa728ee
FB
179 ptr = dt->base + index;
180 *e1_ptr = ldl_kernel(ptr);
181 *e2_ptr = ldl_kernel(ptr + 4);
182 return 0;
183}
184
185static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
186{
187 unsigned int limit;
20054ef0 188
eaa728ee 189 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 190 if (e2 & DESC_G_MASK) {
eaa728ee 191 limit = (limit << 12) | 0xfff;
20054ef0 192 }
eaa728ee
FB
193 return limit;
194}
195
196static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
197{
20054ef0 198 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
199}
200
20054ef0
BS
201static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
202 uint32_t e2)
eaa728ee
FB
203{
204 sc->base = get_seg_base(e1, e2);
205 sc->limit = get_seg_limit(e1, e2);
206 sc->flags = e2;
207}
208
209/* init the segment cache in vm86 mode. */
210static inline void load_seg_vm(int seg, int selector)
211{
212 selector &= 0xffff;
213 cpu_x86_load_seg_cache(env, seg, selector,
214 (selector << 4), 0xffff, 0);
215}
216
217static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
218 uint32_t *esp_ptr, int dpl)
219{
220 int type, index, shift;
221
222#if 0
223 {
224 int i;
225 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 226 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 227 printf("%02x ", env->tr.base[i]);
20054ef0
BS
228 if ((i & 7) == 7) {
229 printf("\n");
230 }
eaa728ee
FB
231 }
232 printf("\n");
233 }
234#endif
235
20054ef0 236 if (!(env->tr.flags & DESC_P_MASK)) {
eaa728ee 237 cpu_abort(env, "invalid tss");
20054ef0 238 }
eaa728ee 239 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 240 if ((type & 7) != 1) {
eaa728ee 241 cpu_abort(env, "invalid tss type");
20054ef0 242 }
eaa728ee
FB
243 shift = type >> 3;
244 index = (dpl * 4 + 2) << shift;
20054ef0 245 if (index + (4 << shift) - 1 > env->tr.limit) {
77b2bc2c 246 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 247 }
eaa728ee
FB
248 if (shift == 0) {
249 *esp_ptr = lduw_kernel(env->tr.base + index);
250 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
251 } else {
252 *esp_ptr = ldl_kernel(env->tr.base + index);
253 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
254 }
255}
256
257/* XXX: merge with load_seg() */
258static void tss_load_seg(int seg_reg, int selector)
259{
260 uint32_t e1, e2;
261 int rpl, dpl, cpl;
262
263 if ((selector & 0xfffc) != 0) {
20054ef0 264 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 265 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
266 }
267 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 268 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 269 }
eaa728ee
FB
270 rpl = selector & 3;
271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
272 cpl = env->hflags & HF_CPL_MASK;
273 if (seg_reg == R_CS) {
20054ef0 274 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 275 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
276 }
277 /* XXX: is it correct? */
278 if (dpl != rpl) {
77b2bc2c 279 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
280 }
281 if ((e2 & DESC_C_MASK) && dpl > rpl) {
77b2bc2c 282 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 283 }
eaa728ee
FB
284 } else if (seg_reg == R_SS) {
285 /* SS must be writable data */
20054ef0 286 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 287 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
288 }
289 if (dpl != cpl || dpl != rpl) {
77b2bc2c 290 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 291 }
eaa728ee
FB
292 } else {
293 /* not readable code */
20054ef0 294 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
77b2bc2c 295 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 296 }
eaa728ee
FB
297 /* if data or non conforming code, checks the rights */
298 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 299 if (dpl < cpl || dpl < rpl) {
77b2bc2c 300 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 301 }
eaa728ee
FB
302 }
303 }
20054ef0 304 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 305 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 306 }
eaa728ee 307 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
308 get_seg_base(e1, e2),
309 get_seg_limit(e1, e2),
310 e2);
eaa728ee 311 } else {
20054ef0 312 if (seg_reg == R_SS || seg_reg == R_CS) {
77b2bc2c 313 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 314 }
eaa728ee
FB
315 }
316}
317
318#define SWITCH_TSS_JMP 0
319#define SWITCH_TSS_IRET 1
320#define SWITCH_TSS_CALL 2
321
322/* XXX: restore CPU state in registers (PowerPC case) */
323static void switch_tss(int tss_selector,
324 uint32_t e1, uint32_t e2, int source,
325 uint32_t next_eip)
326{
327 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
328 target_ulong tss_base;
329 uint32_t new_regs[8], new_segs[6];
330 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
331 uint32_t old_eflags, eflags_mask;
332 SegmentCache *dt;
333 int index;
334 target_ulong ptr;
335
336 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
337 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
338 source);
eaa728ee
FB
339
340 /* if task gate, we read the TSS segment and we load it */
341 if (type == 5) {
20054ef0 342 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 343 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 344 }
eaa728ee 345 tss_selector = e1 >> 16;
20054ef0 346 if (tss_selector & 4) {
77b2bc2c 347 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0
BS
348 }
349 if (load_segment(&e1, &e2, tss_selector) != 0) {
77b2bc2c 350 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0
BS
351 }
352 if (e2 & DESC_S_MASK) {
77b2bc2c 353 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 354 }
eaa728ee 355 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 356 if ((type & 7) != 1) {
77b2bc2c 357 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 358 }
eaa728ee
FB
359 }
360
20054ef0 361 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 362 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 363 }
eaa728ee 364
20054ef0 365 if (type & 8) {
eaa728ee 366 tss_limit_max = 103;
20054ef0 367 } else {
eaa728ee 368 tss_limit_max = 43;
20054ef0 369 }
eaa728ee
FB
370 tss_limit = get_seg_limit(e1, e2);
371 tss_base = get_seg_base(e1, e2);
372 if ((tss_selector & 4) != 0 ||
20054ef0 373 tss_limit < tss_limit_max) {
77b2bc2c 374 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 375 }
eaa728ee 376 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 377 if (old_type & 8) {
eaa728ee 378 old_tss_limit_max = 103;
20054ef0 379 } else {
eaa728ee 380 old_tss_limit_max = 43;
20054ef0 381 }
eaa728ee
FB
382
383 /* read all the registers from the new TSS */
384 if (type & 8) {
385 /* 32 bit */
386 new_cr3 = ldl_kernel(tss_base + 0x1c);
387 new_eip = ldl_kernel(tss_base + 0x20);
388 new_eflags = ldl_kernel(tss_base + 0x24);
20054ef0 389 for (i = 0; i < 8; i++) {
eaa728ee 390 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
20054ef0
BS
391 }
392 for (i = 0; i < 6; i++) {
eaa728ee 393 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
20054ef0 394 }
eaa728ee
FB
395 new_ldt = lduw_kernel(tss_base + 0x60);
396 new_trap = ldl_kernel(tss_base + 0x64);
397 } else {
398 /* 16 bit */
399 new_cr3 = 0;
400 new_eip = lduw_kernel(tss_base + 0x0e);
401 new_eflags = lduw_kernel(tss_base + 0x10);
20054ef0 402 for (i = 0; i < 8; i++) {
eaa728ee 403 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
20054ef0
BS
404 }
405 for (i = 0; i < 4; i++) {
eaa728ee 406 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
20054ef0 407 }
eaa728ee
FB
408 new_ldt = lduw_kernel(tss_base + 0x2a);
409 new_segs[R_FS] = 0;
410 new_segs[R_GS] = 0;
411 new_trap = 0;
412 }
4581cbcd
BS
413 /* XXX: avoid a compiler warning, see
414 http://support.amd.com/us/Processor_TechDocs/24593.pdf
415 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
416 (void)new_trap;
eaa728ee
FB
417
418 /* NOTE: we must avoid memory exceptions during the task switch,
419 so we make dummy accesses before */
420 /* XXX: it can still fail in some cases, so a bigger hack is
421 necessary to valid the TLB after having done the accesses */
422
423 v1 = ldub_kernel(env->tr.base);
424 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
425 stb_kernel(env->tr.base, v1);
426 stb_kernel(env->tr.base + old_tss_limit_max, v2);
427
428 /* clear busy bit (it is restartable) */
429 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
430 target_ulong ptr;
431 uint32_t e2;
20054ef0 432
eaa728ee
FB
433 ptr = env->gdt.base + (env->tr.selector & ~7);
434 e2 = ldl_kernel(ptr + 4);
435 e2 &= ~DESC_TSS_BUSY_MASK;
436 stl_kernel(ptr + 4, e2);
437 }
438 old_eflags = compute_eflags();
20054ef0 439 if (source == SWITCH_TSS_IRET) {
eaa728ee 440 old_eflags &= ~NT_MASK;
20054ef0 441 }
eaa728ee
FB
442
443 /* save the current state in the old TSS */
444 if (type & 8) {
445 /* 32 bit */
446 stl_kernel(env->tr.base + 0x20, next_eip);
447 stl_kernel(env->tr.base + 0x24, old_eflags);
448 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
449 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
450 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
451 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
452 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
453 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
454 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
455 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
20054ef0 456 for (i = 0; i < 6; i++) {
eaa728ee 457 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
20054ef0 458 }
eaa728ee
FB
459 } else {
460 /* 16 bit */
461 stw_kernel(env->tr.base + 0x0e, next_eip);
462 stw_kernel(env->tr.base + 0x10, old_eflags);
463 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
464 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
465 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
466 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
467 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
468 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
469 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
470 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
20054ef0 471 for (i = 0; i < 4; i++) {
eaa728ee 472 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
20054ef0 473 }
eaa728ee
FB
474 }
475
476 /* now if an exception occurs, it will occurs in the next task
477 context */
478
479 if (source == SWITCH_TSS_CALL) {
480 stw_kernel(tss_base, env->tr.selector);
481 new_eflags |= NT_MASK;
482 }
483
484 /* set busy bit */
485 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
486 target_ulong ptr;
487 uint32_t e2;
20054ef0 488
eaa728ee
FB
489 ptr = env->gdt.base + (tss_selector & ~7);
490 e2 = ldl_kernel(ptr + 4);
491 e2 |= DESC_TSS_BUSY_MASK;
492 stl_kernel(ptr + 4, e2);
493 }
494
495 /* set the new CPU state */
496 /* from this point, any exception which occurs can give problems */
497 env->cr[0] |= CR0_TS_MASK;
498 env->hflags |= HF_TS_MASK;
499 env->tr.selector = tss_selector;
500 env->tr.base = tss_base;
501 env->tr.limit = tss_limit;
502 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
503
504 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
505 cpu_x86_update_cr3(env, new_cr3);
506 }
507
508 /* load all registers without an exception, then reload them with
509 possible exception */
510 env->eip = new_eip;
511 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
512 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 513 if (!(type & 8)) {
eaa728ee 514 eflags_mask &= 0xffff;
20054ef0 515 }
eaa728ee 516 load_eflags(new_eflags, eflags_mask);
20054ef0 517 /* XXX: what to do in 16 bit case? */
eaa728ee
FB
518 EAX = new_regs[0];
519 ECX = new_regs[1];
520 EDX = new_regs[2];
521 EBX = new_regs[3];
522 ESP = new_regs[4];
523 EBP = new_regs[5];
524 ESI = new_regs[6];
525 EDI = new_regs[7];
526 if (new_eflags & VM_MASK) {
20054ef0 527 for (i = 0; i < 6; i++) {
eaa728ee 528 load_seg_vm(i, new_segs[i]);
20054ef0 529 }
eaa728ee
FB
530 /* in vm86, CPL is always 3 */
531 cpu_x86_set_cpl(env, 3);
532 } else {
533 /* CPL is set the RPL of CS */
534 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
535 /* first just selectors as the rest may trigger exceptions */
20054ef0 536 for (i = 0; i < 6; i++) {
eaa728ee 537 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 538 }
eaa728ee
FB
539 }
540
541 env->ldt.selector = new_ldt & ~4;
542 env->ldt.base = 0;
543 env->ldt.limit = 0;
544 env->ldt.flags = 0;
545
546 /* load the LDT */
20054ef0 547 if (new_ldt & 4) {
77b2bc2c 548 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 549 }
eaa728ee
FB
550
551 if ((new_ldt & 0xfffc) != 0) {
552 dt = &env->gdt;
553 index = new_ldt & ~7;
20054ef0 554 if ((index + 7) > dt->limit) {
77b2bc2c 555 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 556 }
eaa728ee
FB
557 ptr = dt->base + index;
558 e1 = ldl_kernel(ptr);
559 e2 = ldl_kernel(ptr + 4);
20054ef0 560 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 561 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0
BS
562 }
563 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 564 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 565 }
eaa728ee
FB
566 load_seg_cache_raw_dt(&env->ldt, e1, e2);
567 }
568
569 /* load the segments */
570 if (!(new_eflags & VM_MASK)) {
571 tss_load_seg(R_CS, new_segs[R_CS]);
572 tss_load_seg(R_SS, new_segs[R_SS]);
573 tss_load_seg(R_ES, new_segs[R_ES]);
574 tss_load_seg(R_DS, new_segs[R_DS]);
575 tss_load_seg(R_FS, new_segs[R_FS]);
576 tss_load_seg(R_GS, new_segs[R_GS]);
577 }
578
579 /* check that EIP is in the CS segment limits */
580 if (new_eip > env->segs[R_CS].limit) {
20054ef0 581 /* XXX: different exception if CALL? */
77b2bc2c 582 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 583 }
01df040b
AL
584
585#ifndef CONFIG_USER_ONLY
586 /* reset local breakpoints */
587 if (env->dr[7] & 0x55) {
588 for (i = 0; i < 4; i++) {
20054ef0 589 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
01df040b 590 hw_breakpoint_remove(env, i);
20054ef0 591 }
01df040b
AL
592 }
593 env->dr[7] &= ~0x55;
594 }
595#endif
eaa728ee
FB
596}
597
598/* check if Port I/O is allowed in TSS */
599static inline void check_io(int addr, int size)
600{
601 int io_offset, val, mask;
602
603 /* TSS must be a valid 32 bit one */
604 if (!(env->tr.flags & DESC_P_MASK) ||
605 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
20054ef0 606 env->tr.limit < 103) {
eaa728ee 607 goto fail;
20054ef0 608 }
eaa728ee
FB
609 io_offset = lduw_kernel(env->tr.base + 0x66);
610 io_offset += (addr >> 3);
611 /* Note: the check needs two bytes */
20054ef0 612 if ((io_offset + 1) > env->tr.limit) {
eaa728ee 613 goto fail;
20054ef0 614 }
eaa728ee
FB
615 val = lduw_kernel(env->tr.base + io_offset);
616 val >>= (addr & 7);
617 mask = (1 << size) - 1;
618 /* all bits must be zero to allow the I/O */
619 if ((val & mask) != 0) {
620 fail:
77b2bc2c 621 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
622 }
623}
624
625void helper_check_iob(uint32_t t0)
626{
627 check_io(t0, 1);
628}
629
630void helper_check_iow(uint32_t t0)
631{
632 check_io(t0, 2);
633}
634
635void helper_check_iol(uint32_t t0)
636{
637 check_io(t0, 4);
638}
639
640void helper_outb(uint32_t port, uint32_t data)
641{
afcea8cb 642 cpu_outb(port, data & 0xff);
eaa728ee
FB
643}
644
645target_ulong helper_inb(uint32_t port)
646{
afcea8cb 647 return cpu_inb(port);
eaa728ee
FB
648}
649
650void helper_outw(uint32_t port, uint32_t data)
651{
afcea8cb 652 cpu_outw(port, data & 0xffff);
eaa728ee
FB
653}
654
655target_ulong helper_inw(uint32_t port)
656{
afcea8cb 657 return cpu_inw(port);
eaa728ee
FB
658}
659
660void helper_outl(uint32_t port, uint32_t data)
661{
afcea8cb 662 cpu_outl(port, data);
eaa728ee
FB
663}
664
665target_ulong helper_inl(uint32_t port)
666{
afcea8cb 667 return cpu_inl(port);
eaa728ee
FB
668}
669
670static inline unsigned int get_sp_mask(unsigned int e2)
671{
20054ef0 672 if (e2 & DESC_B_MASK) {
eaa728ee 673 return 0xffffffff;
20054ef0 674 } else {
eaa728ee 675 return 0xffff;
20054ef0 676 }
eaa728ee
FB
677}
678
20054ef0 679static int exception_has_error_code(int intno)
2ed51f5b 680{
20054ef0
BS
681 switch (intno) {
682 case 8:
683 case 10:
684 case 11:
685 case 12:
686 case 13:
687 case 14:
688 case 17:
689 return 1;
690 }
691 return 0;
2ed51f5b
AL
692}
693
eaa728ee 694#ifdef TARGET_X86_64
20054ef0
BS
695#define SET_ESP(val, sp_mask) \
696 do { \
697 if ((sp_mask) == 0xffff) { \
698 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
699 } else if ((sp_mask) == 0xffffffffLL) { \
700 ESP = (uint32_t)(val); \
701 } else { \
702 ESP = (val); \
703 } \
704 } while (0)
eaa728ee 705#else
20054ef0
BS
706#define SET_ESP(val, sp_mask) \
707 do { \
708 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
709 } while (0)
eaa728ee
FB
710#endif
711
c0a04f0e
AL
712/* in 64-bit machines, this can overflow. So this segment addition macro
713 * can be used to trim the value to 32-bit whenever needed */
714#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
715
eaa728ee 716/* XXX: add a is_user flag to have proper security support */
20054ef0
BS
717#define PUSHW(ssp, sp, sp_mask, val) \
718 { \
719 sp -= 2; \
720 stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
721 }
eaa728ee 722
20054ef0
BS
723#define PUSHL(ssp, sp, sp_mask, val) \
724 { \
725 sp -= 4; \
726 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
727 }
eaa728ee 728
20054ef0
BS
729#define POPW(ssp, sp, sp_mask, val) \
730 { \
731 val = lduw_kernel((ssp) + (sp & (sp_mask))); \
732 sp += 2; \
733 }
eaa728ee 734
20054ef0
BS
735#define POPL(ssp, sp, sp_mask, val) \
736 { \
737 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
738 sp += 4; \
739 }
eaa728ee
FB
740
741/* protected mode interrupt */
742static void do_interrupt_protected(int intno, int is_int, int error_code,
743 unsigned int next_eip, int is_hw)
744{
745 SegmentCache *dt;
746 target_ulong ptr, ssp;
747 int type, dpl, selector, ss_dpl, cpl;
748 int has_error_code, new_stack, shift;
1c918eba 749 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 750 uint32_t old_eip, sp_mask;
eaa728ee 751
eaa728ee 752 has_error_code = 0;
20054ef0
BS
753 if (!is_int && !is_hw) {
754 has_error_code = exception_has_error_code(intno);
755 }
756 if (is_int) {
eaa728ee 757 old_eip = next_eip;
20054ef0 758 } else {
eaa728ee 759 old_eip = env->eip;
20054ef0 760 }
eaa728ee
FB
761
762 dt = &env->idt;
20054ef0 763 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 764 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 765 }
eaa728ee
FB
766 ptr = dt->base + intno * 8;
767 e1 = ldl_kernel(ptr);
768 e2 = ldl_kernel(ptr + 4);
769 /* check gate type */
770 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 771 switch (type) {
eaa728ee
FB
772 case 5: /* task gate */
773 /* must do that check here to return the correct error code */
20054ef0 774 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 775 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 776 }
eaa728ee
FB
777 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
778 if (has_error_code) {
779 int type;
780 uint32_t mask;
20054ef0 781
eaa728ee
FB
782 /* push the error code */
783 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
784 shift = type >> 3;
20054ef0 785 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 786 mask = 0xffffffff;
20054ef0 787 } else {
eaa728ee 788 mask = 0xffff;
20054ef0 789 }
eaa728ee
FB
790 esp = (ESP - (2 << shift)) & mask;
791 ssp = env->segs[R_SS].base + esp;
20054ef0 792 if (shift) {
eaa728ee 793 stl_kernel(ssp, error_code);
20054ef0 794 } else {
eaa728ee 795 stw_kernel(ssp, error_code);
20054ef0 796 }
eaa728ee
FB
797 SET_ESP(esp, mask);
798 }
799 return;
800 case 6: /* 286 interrupt gate */
801 case 7: /* 286 trap gate */
802 case 14: /* 386 interrupt gate */
803 case 15: /* 386 trap gate */
804 break;
805 default:
77b2bc2c 806 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
807 break;
808 }
809 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
810 cpl = env->hflags & HF_CPL_MASK;
1235fc06 811 /* check privilege if software int */
20054ef0 812 if (is_int && dpl < cpl) {
77b2bc2c 813 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 814 }
eaa728ee 815 /* check valid bit */
20054ef0 816 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 817 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 818 }
eaa728ee
FB
819 selector = e1 >> 16;
820 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 821 if ((selector & 0xfffc) == 0) {
77b2bc2c 822 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
823 }
824 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 825 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
826 }
827 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 828 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 829 }
eaa728ee 830 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 831 if (dpl > cpl) {
77b2bc2c 832 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
833 }
834 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 835 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 836 }
eaa728ee
FB
837 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
838 /* to inner privilege */
839 get_ss_esp_from_tss(&ss, &esp, dpl);
20054ef0 840 if ((ss & 0xfffc) == 0) {
77b2bc2c 841 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
842 }
843 if ((ss & 3) != dpl) {
77b2bc2c 844 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
845 }
846 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 847 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 848 }
eaa728ee 849 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 850 if (ss_dpl != dpl) {
77b2bc2c 851 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 852 }
eaa728ee
FB
853 if (!(ss_e2 & DESC_S_MASK) ||
854 (ss_e2 & DESC_CS_MASK) ||
20054ef0 855 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 856 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
857 }
858 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 859 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 860 }
eaa728ee
FB
861 new_stack = 1;
862 sp_mask = get_sp_mask(ss_e2);
863 ssp = get_seg_base(ss_e1, ss_e2);
864 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
865 /* to same privilege */
20054ef0 866 if (env->eflags & VM_MASK) {
77b2bc2c 867 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 868 }
eaa728ee
FB
869 new_stack = 0;
870 sp_mask = get_sp_mask(env->segs[R_SS].flags);
871 ssp = env->segs[R_SS].base;
872 esp = ESP;
873 dpl = cpl;
874 } else {
77b2bc2c 875 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
876 new_stack = 0; /* avoid warning */
877 sp_mask = 0; /* avoid warning */
878 ssp = 0; /* avoid warning */
879 esp = 0; /* avoid warning */
880 }
881
882 shift = type >> 3;
883
884#if 0
885 /* XXX: check that enough room is available */
886 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
20054ef0 887 if (env->eflags & VM_MASK) {
eaa728ee 888 push_size += 8;
20054ef0 889 }
eaa728ee
FB
890 push_size <<= shift;
891#endif
892 if (shift == 1) {
893 if (new_stack) {
894 if (env->eflags & VM_MASK) {
895 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
896 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
897 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
898 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
899 }
900 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
901 PUSHL(ssp, esp, sp_mask, ESP);
902 }
903 PUSHL(ssp, esp, sp_mask, compute_eflags());
904 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
905 PUSHL(ssp, esp, sp_mask, old_eip);
906 if (has_error_code) {
907 PUSHL(ssp, esp, sp_mask, error_code);
908 }
909 } else {
910 if (new_stack) {
911 if (env->eflags & VM_MASK) {
912 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
913 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
914 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
915 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
916 }
917 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
918 PUSHW(ssp, esp, sp_mask, ESP);
919 }
920 PUSHW(ssp, esp, sp_mask, compute_eflags());
921 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
922 PUSHW(ssp, esp, sp_mask, old_eip);
923 if (has_error_code) {
924 PUSHW(ssp, esp, sp_mask, error_code);
925 }
926 }
927
928 if (new_stack) {
929 if (env->eflags & VM_MASK) {
930 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
931 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
932 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
933 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
934 }
935 ss = (ss & ~3) | dpl;
936 cpu_x86_load_seg_cache(env, R_SS, ss,
937 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
938 }
939 SET_ESP(esp, sp_mask);
940
941 selector = (selector & ~3) | dpl;
942 cpu_x86_load_seg_cache(env, R_CS, selector,
943 get_seg_base(e1, e2),
944 get_seg_limit(e1, e2),
945 e2);
946 cpu_x86_set_cpl(env, dpl);
947 env->eip = offset;
948
949 /* interrupt gate clear IF mask */
950 if ((type & 1) == 0) {
951 env->eflags &= ~IF_MASK;
952 }
953 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
954}
955
956#ifdef TARGET_X86_64
957
20054ef0
BS
958#define PUSHQ(sp, val) \
959 { \
960 sp -= 8; \
961 stq_kernel(sp, (val)); \
962 }
eaa728ee 963
20054ef0
BS
964#define POPQ(sp, val) \
965 { \
966 val = ldq_kernel(sp); \
967 sp += 8; \
968 }
eaa728ee
FB
969
970static inline target_ulong get_rsp_from_tss(int level)
971{
972 int index;
973
974#if 0
975 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
976 env->tr.base, env->tr.limit);
977#endif
978
20054ef0 979 if (!(env->tr.flags & DESC_P_MASK)) {
eaa728ee 980 cpu_abort(env, "invalid tss");
20054ef0 981 }
eaa728ee 982 index = 8 * level + 4;
20054ef0 983 if ((index + 7) > env->tr.limit) {
77b2bc2c 984 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 985 }
eaa728ee
FB
986 return ldq_kernel(env->tr.base + index);
987}
988
989/* 64 bit interrupt */
990static void do_interrupt64(int intno, int is_int, int error_code,
991 target_ulong next_eip, int is_hw)
992{
993 SegmentCache *dt;
994 target_ulong ptr;
995 int type, dpl, selector, cpl, ist;
996 int has_error_code, new_stack;
997 uint32_t e1, e2, e3, ss;
998 target_ulong old_eip, esp, offset;
eaa728ee 999
eaa728ee 1000 has_error_code = 0;
20054ef0
BS
1001 if (!is_int && !is_hw) {
1002 has_error_code = exception_has_error_code(intno);
1003 }
1004 if (is_int) {
eaa728ee 1005 old_eip = next_eip;
20054ef0 1006 } else {
eaa728ee 1007 old_eip = env->eip;
20054ef0 1008 }
eaa728ee
FB
1009
1010 dt = &env->idt;
20054ef0 1011 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 1012 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 1013 }
eaa728ee
FB
1014 ptr = dt->base + intno * 16;
1015 e1 = ldl_kernel(ptr);
1016 e2 = ldl_kernel(ptr + 4);
1017 e3 = ldl_kernel(ptr + 8);
1018 /* check gate type */
1019 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 1020 switch (type) {
eaa728ee
FB
1021 case 14: /* 386 interrupt gate */
1022 case 15: /* 386 trap gate */
1023 break;
1024 default:
77b2bc2c 1025 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
1026 break;
1027 }
1028 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1029 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1030 /* check privilege if software int */
20054ef0 1031 if (is_int && dpl < cpl) {
77b2bc2c 1032 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 1033 }
eaa728ee 1034 /* check valid bit */
20054ef0 1035 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1036 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 1037 }
eaa728ee
FB
1038 selector = e1 >> 16;
1039 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1040 ist = e2 & 7;
20054ef0 1041 if ((selector & 0xfffc) == 0) {
77b2bc2c 1042 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1043 }
eaa728ee 1044
20054ef0 1045 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 1046 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1047 }
1048 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 1049 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1050 }
eaa728ee 1051 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1052 if (dpl > cpl) {
77b2bc2c 1053 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1054 }
1055 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1056 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
1057 }
1058 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 1059 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1060 }
eaa728ee
FB
1061 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1062 /* to inner privilege */
20054ef0 1063 if (ist != 0) {
eaa728ee 1064 esp = get_rsp_from_tss(ist + 3);
20054ef0 1065 } else {
eaa728ee 1066 esp = get_rsp_from_tss(dpl);
20054ef0 1067 }
eaa728ee
FB
1068 esp &= ~0xfLL; /* align stack */
1069 ss = 0;
1070 new_stack = 1;
1071 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1072 /* to same privilege */
20054ef0 1073 if (env->eflags & VM_MASK) {
77b2bc2c 1074 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1075 }
eaa728ee 1076 new_stack = 0;
20054ef0 1077 if (ist != 0) {
eaa728ee 1078 esp = get_rsp_from_tss(ist + 3);
20054ef0 1079 } else {
eaa728ee 1080 esp = ESP;
20054ef0 1081 }
eaa728ee
FB
1082 esp &= ~0xfLL; /* align stack */
1083 dpl = cpl;
1084 } else {
77b2bc2c 1085 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
1086 new_stack = 0; /* avoid warning */
1087 esp = 0; /* avoid warning */
1088 }
1089
1090 PUSHQ(esp, env->segs[R_SS].selector);
1091 PUSHQ(esp, ESP);
1092 PUSHQ(esp, compute_eflags());
1093 PUSHQ(esp, env->segs[R_CS].selector);
1094 PUSHQ(esp, old_eip);
1095 if (has_error_code) {
1096 PUSHQ(esp, error_code);
1097 }
1098
1099 if (new_stack) {
1100 ss = 0 | dpl;
1101 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1102 }
1103 ESP = esp;
1104
1105 selector = (selector & ~3) | dpl;
1106 cpu_x86_load_seg_cache(env, R_CS, selector,
1107 get_seg_base(e1, e2),
1108 get_seg_limit(e1, e2),
1109 e2);
1110 cpu_x86_set_cpl(env, dpl);
1111 env->eip = offset;
1112
1113 /* interrupt gate clear IF mask */
1114 if ((type & 1) == 0) {
1115 env->eflags &= ~IF_MASK;
1116 }
1117 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1118}
1119#endif
1120
d9957a8b 1121#ifdef TARGET_X86_64
eaa728ee
FB
1122#if defined(CONFIG_USER_ONLY)
1123void helper_syscall(int next_eip_addend)
1124{
1125 env->exception_index = EXCP_SYSCALL;
1126 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1127 cpu_loop_exit(env);
eaa728ee
FB
1128}
1129#else
1130void helper_syscall(int next_eip_addend)
1131{
1132 int selector;
1133
1134 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1135 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1136 }
1137 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1138 if (env->hflags & HF_LMA_MASK) {
1139 int code64;
1140
1141 ECX = env->eip + next_eip_addend;
1142 env->regs[11] = compute_eflags();
1143
1144 code64 = env->hflags & HF_CS64_MASK;
1145
1146 cpu_x86_set_cpl(env, 0);
1147 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1148 0, 0xffffffff,
1149 DESC_G_MASK | DESC_P_MASK |
1150 DESC_S_MASK |
20054ef0
BS
1151 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1152 DESC_L_MASK);
eaa728ee
FB
1153 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1154 0, 0xffffffff,
1155 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1156 DESC_S_MASK |
1157 DESC_W_MASK | DESC_A_MASK);
1158 env->eflags &= ~env->fmask;
1159 load_eflags(env->eflags, 0);
20054ef0 1160 if (code64) {
eaa728ee 1161 env->eip = env->lstar;
20054ef0 1162 } else {
eaa728ee 1163 env->eip = env->cstar;
20054ef0 1164 }
d9957a8b 1165 } else {
eaa728ee
FB
1166 ECX = (uint32_t)(env->eip + next_eip_addend);
1167
1168 cpu_x86_set_cpl(env, 0);
1169 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1170 0, 0xffffffff,
1171 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1172 DESC_S_MASK |
1173 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1174 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1175 0, 0xffffffff,
1176 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1177 DESC_S_MASK |
1178 DESC_W_MASK | DESC_A_MASK);
1179 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1180 env->eip = (uint32_t)env->star;
1181 }
1182}
1183#endif
d9957a8b 1184#endif
eaa728ee 1185
d9957a8b 1186#ifdef TARGET_X86_64
eaa728ee
FB
1187void helper_sysret(int dflag)
1188{
1189 int cpl, selector;
1190
1191 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1192 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1193 }
1194 cpl = env->hflags & HF_CPL_MASK;
1195 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
77b2bc2c 1196 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
1197 }
1198 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1199 if (env->hflags & HF_LMA_MASK) {
1200 if (dflag == 2) {
1201 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1202 0, 0xffffffff,
1203 DESC_G_MASK | DESC_P_MASK |
1204 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1205 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1206 DESC_L_MASK);
1207 env->eip = ECX;
1208 } else {
1209 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1210 0, 0xffffffff,
1211 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1214 env->eip = (uint32_t)ECX;
1215 }
1216 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1217 0, 0xffffffff,
1218 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1219 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1220 DESC_W_MASK | DESC_A_MASK);
1221 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1222 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1223 cpu_x86_set_cpl(env, 3);
d9957a8b 1224 } else {
eaa728ee
FB
1225 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1226 0, 0xffffffff,
1227 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1228 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1229 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1230 env->eip = (uint32_t)ECX;
1231 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1232 0, 0xffffffff,
1233 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1234 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1235 DESC_W_MASK | DESC_A_MASK);
1236 env->eflags |= IF_MASK;
1237 cpu_x86_set_cpl(env, 3);
1238 }
eaa728ee 1239}
d9957a8b 1240#endif
eaa728ee
FB
1241
1242/* real mode interrupt */
1243static void do_interrupt_real(int intno, int is_int, int error_code,
1244 unsigned int next_eip)
1245{
1246 SegmentCache *dt;
1247 target_ulong ptr, ssp;
1248 int selector;
1249 uint32_t offset, esp;
1250 uint32_t old_cs, old_eip;
eaa728ee 1251
20054ef0 1252 /* real mode (simpler!) */
eaa728ee 1253 dt = &env->idt;
20054ef0 1254 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1255 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1256 }
eaa728ee
FB
1257 ptr = dt->base + intno * 4;
1258 offset = lduw_kernel(ptr);
1259 selector = lduw_kernel(ptr + 2);
1260 esp = ESP;
1261 ssp = env->segs[R_SS].base;
20054ef0 1262 if (is_int) {
eaa728ee 1263 old_eip = next_eip;
20054ef0 1264 } else {
eaa728ee 1265 old_eip = env->eip;
20054ef0 1266 }
eaa728ee 1267 old_cs = env->segs[R_CS].selector;
20054ef0 1268 /* XXX: use SS segment size? */
eaa728ee
FB
1269 PUSHW(ssp, esp, 0xffff, compute_eflags());
1270 PUSHW(ssp, esp, 0xffff, old_cs);
1271 PUSHW(ssp, esp, 0xffff, old_eip);
1272
1273 /* update processor state */
1274 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1275 env->eip = offset;
1276 env->segs[R_CS].selector = selector;
1277 env->segs[R_CS].base = (selector << 4);
1278 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1279}
1280
e694d4e2 1281#if defined(CONFIG_USER_ONLY)
eaa728ee 1282/* fake user mode interrupt */
e694d4e2
BS
1283static void do_interrupt_user(int intno, int is_int, int error_code,
1284 target_ulong next_eip)
eaa728ee
FB
1285{
1286 SegmentCache *dt;
1287 target_ulong ptr;
1288 int dpl, cpl, shift;
1289 uint32_t e2;
1290
1291 dt = &env->idt;
1292 if (env->hflags & HF_LMA_MASK) {
1293 shift = 4;
1294 } else {
1295 shift = 3;
1296 }
1297 ptr = dt->base + (intno << shift);
1298 e2 = ldl_kernel(ptr + 4);
1299
1300 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1301 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1302 /* check privilege if software int */
20054ef0 1303 if (is_int && dpl < cpl) {
77b2bc2c 1304 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1305 }
eaa728ee
FB
1306
1307 /* Since we emulate only user space, we cannot do more than
1308 exiting the emulation with the suitable exception and error
1309 code */
20054ef0 1310 if (is_int) {
eaa728ee 1311 EIP = next_eip;
20054ef0 1312 }
eaa728ee
FB
1313}
1314
e694d4e2
BS
1315#else
1316
2ed51f5b 1317static void handle_even_inj(int intno, int is_int, int error_code,
20054ef0 1318 int is_hw, int rm)
2ed51f5b 1319{
20054ef0
BS
1320 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1321 control.event_inj));
1322
2ed51f5b 1323 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1324 int type;
1325
1326 if (is_int) {
1327 type = SVM_EVTINJ_TYPE_SOFT;
1328 } else {
1329 type = SVM_EVTINJ_TYPE_EXEPT;
1330 }
1331 event_inj = intno | type | SVM_EVTINJ_VALID;
1332 if (!rm && exception_has_error_code(intno)) {
1333 event_inj |= SVM_EVTINJ_VALID_ERR;
1334 stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1335 control.event_inj_err),
1336 error_code);
1337 }
1338 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1339 event_inj);
2ed51f5b
AL
1340 }
1341}
00ea18d1 1342#endif
2ed51f5b 1343
eaa728ee
FB
1344/*
1345 * Begin execution of an interruption. is_int is TRUE if coming from
1346 * the int instruction. next_eip is the EIP value AFTER the interrupt
1347 * instruction. It is only relevant if is_int is TRUE.
1348 */
e694d4e2
BS
1349static void do_interrupt_all(int intno, int is_int, int error_code,
1350 target_ulong next_eip, int is_hw)
eaa728ee 1351{
8fec2b8c 1352 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1353 if ((env->cr[0] & CR0_PE_MASK)) {
1354 static int count;
20054ef0
BS
1355
1356 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1357 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1358 count, intno, error_code, is_int,
1359 env->hflags & HF_CPL_MASK,
1360 env->segs[R_CS].selector, EIP,
1361 (int)env->segs[R_CS].base + EIP,
1362 env->segs[R_SS].selector, ESP);
eaa728ee 1363 if (intno == 0x0e) {
93fcfe39 1364 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1365 } else {
93fcfe39 1366 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1367 }
93fcfe39
AL
1368 qemu_log("\n");
1369 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1370#if 0
1371 {
1372 int i;
9bd5494e 1373 target_ulong ptr;
20054ef0 1374
93fcfe39 1375 qemu_log(" code=");
eaa728ee 1376 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1377 for (i = 0; i < 16; i++) {
93fcfe39 1378 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1379 }
93fcfe39 1380 qemu_log("\n");
eaa728ee
FB
1381 }
1382#endif
1383 count++;
1384 }
1385 }
1386 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1387#if !defined(CONFIG_USER_ONLY)
20054ef0 1388 if (env->hflags & HF_SVMI_MASK) {
2ed51f5b 1389 handle_even_inj(intno, is_int, error_code, is_hw, 0);
20054ef0 1390 }
00ea18d1 1391#endif
eb38c52c 1392#ifdef TARGET_X86_64
eaa728ee
FB
1393 if (env->hflags & HF_LMA_MASK) {
1394 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1395 } else
1396#endif
1397 {
1398 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1399 }
1400 } else {
00ea18d1 1401#if !defined(CONFIG_USER_ONLY)
20054ef0 1402 if (env->hflags & HF_SVMI_MASK) {
2ed51f5b 1403 handle_even_inj(intno, is_int, error_code, is_hw, 1);
20054ef0 1404 }
00ea18d1 1405#endif
eaa728ee
FB
1406 do_interrupt_real(intno, is_int, error_code, next_eip);
1407 }
2ed51f5b 1408
00ea18d1 1409#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1410 if (env->hflags & HF_SVMI_MASK) {
20054ef0
BS
1411 uint32_t event_inj = ldl_phys(env->vm_vmcb +
1412 offsetof(struct vmcb,
1413 control.event_inj));
1414
1415 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1416 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1417 }
00ea18d1 1418#endif
eaa728ee
FB
1419}
1420
317ac620 1421void do_interrupt(CPUX86State *env1)
e694d4e2 1422{
317ac620 1423 CPUX86State *saved_env;
e694d4e2
BS
1424
1425 saved_env = env;
1426 env = env1;
1427#if defined(CONFIG_USER_ONLY)
1428 /* if user mode only, we simulate a fake exception
1429 which will be handled outside the cpu execution
1430 loop */
1431 do_interrupt_user(env->exception_index,
1432 env->exception_is_int,
1433 env->error_code,
1434 env->exception_next_eip);
1435 /* successfully delivered */
1436 env->old_exception = -1;
1437#else
1438 /* simulate a real cpu exception. On i386, it can
1439 trigger new exceptions, but we do not handle
1440 double or triple faults yet. */
1441 do_interrupt_all(env->exception_index,
1442 env->exception_is_int,
1443 env->error_code,
1444 env->exception_next_eip, 0);
1445 /* successfully delivered */
1446 env->old_exception = -1;
1447#endif
1448 env = saved_env;
1449}
1450
317ac620 1451void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
e694d4e2 1452{
317ac620 1453 CPUX86State *saved_env;
e694d4e2
BS
1454
1455 saved_env = env;
1456 env = env1;
1457 do_interrupt_all(intno, 0, 0, 0, is_hw);
1458 env = saved_env;
1459}
1460
eaa728ee
FB
1461/* SMM support */
1462
1463#if defined(CONFIG_USER_ONLY)
1464
317ac620 1465void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1466{
1467}
1468
1469void helper_rsm(void)
1470{
1471}
1472
1473#else
1474
1475#ifdef TARGET_X86_64
1476#define SMM_REVISION_ID 0x00020064
1477#else
1478#define SMM_REVISION_ID 0x00020000
1479#endif
1480
317ac620 1481void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1482{
1483 target_ulong sm_state;
1484 SegmentCache *dt;
1485 int i, offset;
317ac620 1486 CPUX86State *saved_env;
e694d4e2
BS
1487
1488 saved_env = env;
1489 env = env1;
eaa728ee 1490
93fcfe39
AL
1491 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1492 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1493
1494 env->hflags |= HF_SMM_MASK;
1495 cpu_smm_update(env);
1496
1497 sm_state = env->smbase + 0x8000;
1498
1499#ifdef TARGET_X86_64
20054ef0 1500 for (i = 0; i < 6; i++) {
eaa728ee
FB
1501 dt = &env->segs[i];
1502 offset = 0x7e00 + i * 16;
1503 stw_phys(sm_state + offset, dt->selector);
1504 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1505 stl_phys(sm_state + offset + 4, dt->limit);
1506 stq_phys(sm_state + offset + 8, dt->base);
1507 }
1508
1509 stq_phys(sm_state + 0x7e68, env->gdt.base);
1510 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1511
1512 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1513 stq_phys(sm_state + 0x7e78, env->ldt.base);
1514 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1515 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1516
1517 stq_phys(sm_state + 0x7e88, env->idt.base);
1518 stl_phys(sm_state + 0x7e84, env->idt.limit);
1519
1520 stw_phys(sm_state + 0x7e90, env->tr.selector);
1521 stq_phys(sm_state + 0x7e98, env->tr.base);
1522 stl_phys(sm_state + 0x7e94, env->tr.limit);
1523 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1524
1525 stq_phys(sm_state + 0x7ed0, env->efer);
1526
1527 stq_phys(sm_state + 0x7ff8, EAX);
1528 stq_phys(sm_state + 0x7ff0, ECX);
1529 stq_phys(sm_state + 0x7fe8, EDX);
1530 stq_phys(sm_state + 0x7fe0, EBX);
1531 stq_phys(sm_state + 0x7fd8, ESP);
1532 stq_phys(sm_state + 0x7fd0, EBP);
1533 stq_phys(sm_state + 0x7fc8, ESI);
1534 stq_phys(sm_state + 0x7fc0, EDI);
20054ef0 1535 for (i = 8; i < 16; i++) {
eaa728ee 1536 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
20054ef0 1537 }
eaa728ee
FB
1538 stq_phys(sm_state + 0x7f78, env->eip);
1539 stl_phys(sm_state + 0x7f70, compute_eflags());
1540 stl_phys(sm_state + 0x7f68, env->dr[6]);
1541 stl_phys(sm_state + 0x7f60, env->dr[7]);
1542
1543 stl_phys(sm_state + 0x7f48, env->cr[4]);
1544 stl_phys(sm_state + 0x7f50, env->cr[3]);
1545 stl_phys(sm_state + 0x7f58, env->cr[0]);
1546
1547 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1548 stl_phys(sm_state + 0x7f00, env->smbase);
1549#else
1550 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1551 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1552 stl_phys(sm_state + 0x7ff4, compute_eflags());
1553 stl_phys(sm_state + 0x7ff0, env->eip);
1554 stl_phys(sm_state + 0x7fec, EDI);
1555 stl_phys(sm_state + 0x7fe8, ESI);
1556 stl_phys(sm_state + 0x7fe4, EBP);
1557 stl_phys(sm_state + 0x7fe0, ESP);
1558 stl_phys(sm_state + 0x7fdc, EBX);
1559 stl_phys(sm_state + 0x7fd8, EDX);
1560 stl_phys(sm_state + 0x7fd4, ECX);
1561 stl_phys(sm_state + 0x7fd0, EAX);
1562 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1563 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1564
1565 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1566 stl_phys(sm_state + 0x7f64, env->tr.base);
1567 stl_phys(sm_state + 0x7f60, env->tr.limit);
1568 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1569
1570 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1571 stl_phys(sm_state + 0x7f80, env->ldt.base);
1572 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1573 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1574
1575 stl_phys(sm_state + 0x7f74, env->gdt.base);
1576 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1577
1578 stl_phys(sm_state + 0x7f58, env->idt.base);
1579 stl_phys(sm_state + 0x7f54, env->idt.limit);
1580
20054ef0 1581 for (i = 0; i < 6; i++) {
eaa728ee 1582 dt = &env->segs[i];
20054ef0 1583 if (i < 3) {
eaa728ee 1584 offset = 0x7f84 + i * 12;
20054ef0 1585 } else {
eaa728ee 1586 offset = 0x7f2c + (i - 3) * 12;
20054ef0 1587 }
eaa728ee
FB
1588 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1589 stl_phys(sm_state + offset + 8, dt->base);
1590 stl_phys(sm_state + offset + 4, dt->limit);
1591 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1592 }
1593 stl_phys(sm_state + 0x7f14, env->cr[4]);
1594
1595 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1596 stl_phys(sm_state + 0x7ef8, env->smbase);
1597#endif
1598 /* init SMM cpu state */
1599
1600#ifdef TARGET_X86_64
5efc27bb 1601 cpu_load_efer(env, 0);
eaa728ee
FB
1602#endif
1603 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1604 env->eip = 0x00008000;
1605 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1606 0xffffffff, 0);
1607 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1608 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1609 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1610 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1611 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1612
1613 cpu_x86_update_cr0(env,
20054ef0
BS
1614 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1615 CR0_PG_MASK));
eaa728ee
FB
1616 cpu_x86_update_cr4(env, 0);
1617 env->dr[7] = 0x00000400;
1618 CC_OP = CC_OP_EFLAGS;
e694d4e2 1619 env = saved_env;
eaa728ee
FB
1620}
1621
1622void helper_rsm(void)
1623{
1624 target_ulong sm_state;
1625 int i, offset;
1626 uint32_t val;
1627
1628 sm_state = env->smbase + 0x8000;
1629#ifdef TARGET_X86_64
5efc27bb 1630 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee 1631
20054ef0 1632 for (i = 0; i < 6; i++) {
eaa728ee
FB
1633 offset = 0x7e00 + i * 16;
1634 cpu_x86_load_seg_cache(env, i,
1635 lduw_phys(sm_state + offset),
1636 ldq_phys(sm_state + offset + 8),
1637 ldl_phys(sm_state + offset + 4),
20054ef0
BS
1638 (lduw_phys(sm_state + offset + 2) &
1639 0xf0ff) << 8);
eaa728ee
FB
1640 }
1641
1642 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1643 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1644
1645 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1646 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1647 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1648 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1649
1650 env->idt.base = ldq_phys(sm_state + 0x7e88);
1651 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1652
1653 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1654 env->tr.base = ldq_phys(sm_state + 0x7e98);
1655 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1656 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1657
1658 EAX = ldq_phys(sm_state + 0x7ff8);
1659 ECX = ldq_phys(sm_state + 0x7ff0);
1660 EDX = ldq_phys(sm_state + 0x7fe8);
1661 EBX = ldq_phys(sm_state + 0x7fe0);
1662 ESP = ldq_phys(sm_state + 0x7fd8);
1663 EBP = ldq_phys(sm_state + 0x7fd0);
1664 ESI = ldq_phys(sm_state + 0x7fc8);
1665 EDI = ldq_phys(sm_state + 0x7fc0);
20054ef0 1666 for (i = 8; i < 16; i++) {
eaa728ee 1667 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
20054ef0 1668 }
eaa728ee
FB
1669 env->eip = ldq_phys(sm_state + 0x7f78);
1670 load_eflags(ldl_phys(sm_state + 0x7f70),
1671 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1672 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1673 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1674
1675 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1676 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1677 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1678
1679 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1680 if (val & 0x20000) {
1681 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1682 }
1683#else
1684 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1685 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1686 load_eflags(ldl_phys(sm_state + 0x7ff4),
1687 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1688 env->eip = ldl_phys(sm_state + 0x7ff0);
1689 EDI = ldl_phys(sm_state + 0x7fec);
1690 ESI = ldl_phys(sm_state + 0x7fe8);
1691 EBP = ldl_phys(sm_state + 0x7fe4);
1692 ESP = ldl_phys(sm_state + 0x7fe0);
1693 EBX = ldl_phys(sm_state + 0x7fdc);
1694 EDX = ldl_phys(sm_state + 0x7fd8);
1695 ECX = ldl_phys(sm_state + 0x7fd4);
1696 EAX = ldl_phys(sm_state + 0x7fd0);
1697 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1698 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1699
1700 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1701 env->tr.base = ldl_phys(sm_state + 0x7f64);
1702 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1703 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1704
1705 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1706 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1707 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1708 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1709
1710 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1711 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1712
1713 env->idt.base = ldl_phys(sm_state + 0x7f58);
1714 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1715
20054ef0
BS
1716 for (i = 0; i < 6; i++) {
1717 if (i < 3) {
eaa728ee 1718 offset = 0x7f84 + i * 12;
20054ef0 1719 } else {
eaa728ee 1720 offset = 0x7f2c + (i - 3) * 12;
20054ef0 1721 }
eaa728ee
FB
1722 cpu_x86_load_seg_cache(env, i,
1723 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1724 ldl_phys(sm_state + offset + 8),
1725 ldl_phys(sm_state + offset + 4),
1726 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1727 }
1728 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1729
1730 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1731 if (val & 0x20000) {
1732 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1733 }
1734#endif
1735 CC_OP = CC_OP_EFLAGS;
1736 env->hflags &= ~HF_SMM_MASK;
1737 cpu_smm_update(env);
1738
93fcfe39
AL
1739 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1740 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1741}
1742
1743#endif /* !CONFIG_USER_ONLY */
1744
1745
1746/* division, flags are undefined */
1747
1748void helper_divb_AL(target_ulong t0)
1749{
1750 unsigned int num, den, q, r;
1751
1752 num = (EAX & 0xffff);
1753 den = (t0 & 0xff);
1754 if (den == 0) {
77b2bc2c 1755 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1756 }
1757 q = (num / den);
20054ef0 1758 if (q > 0xff) {
77b2bc2c 1759 raise_exception(env, EXCP00_DIVZ);
20054ef0 1760 }
eaa728ee
FB
1761 q &= 0xff;
1762 r = (num % den) & 0xff;
1763 EAX = (EAX & ~0xffff) | (r << 8) | q;
1764}
1765
1766void helper_idivb_AL(target_ulong t0)
1767{
1768 int num, den, q, r;
1769
1770 num = (int16_t)EAX;
1771 den = (int8_t)t0;
1772 if (den == 0) {
77b2bc2c 1773 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1774 }
1775 q = (num / den);
20054ef0 1776 if (q != (int8_t)q) {
77b2bc2c 1777 raise_exception(env, EXCP00_DIVZ);
20054ef0 1778 }
eaa728ee
FB
1779 q &= 0xff;
1780 r = (num % den) & 0xff;
1781 EAX = (EAX & ~0xffff) | (r << 8) | q;
1782}
1783
1784void helper_divw_AX(target_ulong t0)
1785{
1786 unsigned int num, den, q, r;
1787
1788 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1789 den = (t0 & 0xffff);
1790 if (den == 0) {
77b2bc2c 1791 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1792 }
1793 q = (num / den);
20054ef0 1794 if (q > 0xffff) {
77b2bc2c 1795 raise_exception(env, EXCP00_DIVZ);
20054ef0 1796 }
eaa728ee
FB
1797 q &= 0xffff;
1798 r = (num % den) & 0xffff;
1799 EAX = (EAX & ~0xffff) | q;
1800 EDX = (EDX & ~0xffff) | r;
1801}
1802
1803void helper_idivw_AX(target_ulong t0)
1804{
1805 int num, den, q, r;
1806
1807 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1808 den = (int16_t)t0;
1809 if (den == 0) {
77b2bc2c 1810 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1811 }
1812 q = (num / den);
20054ef0 1813 if (q != (int16_t)q) {
77b2bc2c 1814 raise_exception(env, EXCP00_DIVZ);
20054ef0 1815 }
eaa728ee
FB
1816 q &= 0xffff;
1817 r = (num % den) & 0xffff;
1818 EAX = (EAX & ~0xffff) | q;
1819 EDX = (EDX & ~0xffff) | r;
1820}
1821
1822void helper_divl_EAX(target_ulong t0)
1823{
1824 unsigned int den, r;
1825 uint64_t num, q;
1826
1827 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1828 den = t0;
1829 if (den == 0) {
77b2bc2c 1830 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1831 }
1832 q = (num / den);
1833 r = (num % den);
20054ef0 1834 if (q > 0xffffffff) {
77b2bc2c 1835 raise_exception(env, EXCP00_DIVZ);
20054ef0 1836 }
eaa728ee
FB
1837 EAX = (uint32_t)q;
1838 EDX = (uint32_t)r;
1839}
1840
1841void helper_idivl_EAX(target_ulong t0)
1842{
1843 int den, r;
1844 int64_t num, q;
1845
1846 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1847 den = t0;
1848 if (den == 0) {
77b2bc2c 1849 raise_exception(env, EXCP00_DIVZ);
eaa728ee
FB
1850 }
1851 q = (num / den);
1852 r = (num % den);
20054ef0 1853 if (q != (int32_t)q) {
77b2bc2c 1854 raise_exception(env, EXCP00_DIVZ);
20054ef0 1855 }
eaa728ee
FB
1856 EAX = (uint32_t)q;
1857 EDX = (uint32_t)r;
1858}
1859
1860/* bcd */
1861
1862/* XXX: exception */
1863void helper_aam(int base)
1864{
1865 int al, ah;
20054ef0 1866
eaa728ee
FB
1867 al = EAX & 0xff;
1868 ah = al / base;
1869 al = al % base;
1870 EAX = (EAX & ~0xffff) | al | (ah << 8);
1871 CC_DST = al;
1872}
1873
1874void helper_aad(int base)
1875{
1876 int al, ah;
20054ef0 1877
eaa728ee
FB
1878 al = EAX & 0xff;
1879 ah = (EAX >> 8) & 0xff;
1880 al = ((ah * base) + al) & 0xff;
1881 EAX = (EAX & ~0xffff) | al;
1882 CC_DST = al;
1883}
1884
1885void helper_aaa(void)
1886{
1887 int icarry;
1888 int al, ah, af;
1889 int eflags;
1890
a7812ae4 1891 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1892 af = eflags & CC_A;
1893 al = EAX & 0xff;
1894 ah = (EAX >> 8) & 0xff;
1895
1896 icarry = (al > 0xf9);
20054ef0 1897 if (((al & 0x0f) > 9) || af) {
eaa728ee
FB
1898 al = (al + 6) & 0x0f;
1899 ah = (ah + 1 + icarry) & 0xff;
1900 eflags |= CC_C | CC_A;
1901 } else {
1902 eflags &= ~(CC_C | CC_A);
1903 al &= 0x0f;
1904 }
1905 EAX = (EAX & ~0xffff) | al | (ah << 8);
1906 CC_SRC = eflags;
eaa728ee
FB
1907}
1908
1909void helper_aas(void)
1910{
1911 int icarry;
1912 int al, ah, af;
1913 int eflags;
1914
a7812ae4 1915 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1916 af = eflags & CC_A;
1917 al = EAX & 0xff;
1918 ah = (EAX >> 8) & 0xff;
1919
1920 icarry = (al < 6);
20054ef0 1921 if (((al & 0x0f) > 9) || af) {
eaa728ee
FB
1922 al = (al - 6) & 0x0f;
1923 ah = (ah - 1 - icarry) & 0xff;
1924 eflags |= CC_C | CC_A;
1925 } else {
1926 eflags &= ~(CC_C | CC_A);
1927 al &= 0x0f;
1928 }
1929 EAX = (EAX & ~0xffff) | al | (ah << 8);
1930 CC_SRC = eflags;
eaa728ee
FB
1931}
1932
1933void helper_daa(void)
1934{
c6bfc164 1935 int old_al, al, af, cf;
eaa728ee
FB
1936 int eflags;
1937
a7812ae4 1938 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1939 cf = eflags & CC_C;
1940 af = eflags & CC_A;
c6bfc164 1941 old_al = al = EAX & 0xff;
eaa728ee
FB
1942
1943 eflags = 0;
20054ef0 1944 if (((al & 0x0f) > 9) || af) {
eaa728ee
FB
1945 al = (al + 6) & 0xff;
1946 eflags |= CC_A;
1947 }
c6bfc164 1948 if ((old_al > 0x99) || cf) {
eaa728ee
FB
1949 al = (al + 0x60) & 0xff;
1950 eflags |= CC_C;
1951 }
1952 EAX = (EAX & ~0xff) | al;
1953 /* well, speed is not an issue here, so we compute the flags by hand */
1954 eflags |= (al == 0) << 6; /* zf */
1955 eflags |= parity_table[al]; /* pf */
1956 eflags |= (al & 0x80); /* sf */
1957 CC_SRC = eflags;
eaa728ee
FB
1958}
1959
1960void helper_das(void)
1961{
1962 int al, al1, af, cf;
1963 int eflags;
1964
a7812ae4 1965 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1966 cf = eflags & CC_C;
1967 af = eflags & CC_A;
1968 al = EAX & 0xff;
1969
1970 eflags = 0;
1971 al1 = al;
20054ef0 1972 if (((al & 0x0f) > 9) || af) {
eaa728ee 1973 eflags |= CC_A;
20054ef0 1974 if (al < 6 || cf) {
eaa728ee 1975 eflags |= CC_C;
20054ef0 1976 }
eaa728ee
FB
1977 al = (al - 6) & 0xff;
1978 }
1979 if ((al1 > 0x99) || cf) {
1980 al = (al - 0x60) & 0xff;
1981 eflags |= CC_C;
1982 }
1983 EAX = (EAX & ~0xff) | al;
1984 /* well, speed is not an issue here, so we compute the flags by hand */
1985 eflags |= (al == 0) << 6; /* zf */
1986 eflags |= parity_table[al]; /* pf */
1987 eflags |= (al & 0x80); /* sf */
1988 CC_SRC = eflags;
eaa728ee
FB
1989}
1990
1991void helper_into(int next_eip_addend)
1992{
1993 int eflags;
20054ef0 1994
a7812ae4 1995 eflags = helper_cc_compute_all(CC_OP);
eaa728ee 1996 if (eflags & CC_O) {
77b2bc2c 1997 raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
eaa728ee
FB
1998 }
1999}
2000
2001void helper_cmpxchg8b(target_ulong a0)
2002{
2003 uint64_t d;
2004 int eflags;
2005
a7812ae4 2006 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2007 d = ldq(a0);
2008 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2009 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2010 eflags |= CC_Z;
2011 } else {
278ed7c3 2012 /* always do the store */
20054ef0 2013 stq(a0, d);
eaa728ee
FB
2014 EDX = (uint32_t)(d >> 32);
2015 EAX = (uint32_t)d;
2016 eflags &= ~CC_Z;
2017 }
2018 CC_SRC = eflags;
2019}
2020
2021#ifdef TARGET_X86_64
2022void helper_cmpxchg16b(target_ulong a0)
2023{
2024 uint64_t d0, d1;
2025 int eflags;
2026
20054ef0 2027 if ((a0 & 0xf) != 0) {
77b2bc2c 2028 raise_exception(env, EXCP0D_GPF);
20054ef0 2029 }
a7812ae4 2030 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2031 d0 = ldq(a0);
2032 d1 = ldq(a0 + 8);
2033 if (d0 == EAX && d1 == EDX) {
2034 stq(a0, EBX);
2035 stq(a0 + 8, ECX);
2036 eflags |= CC_Z;
2037 } else {
278ed7c3 2038 /* always do the store */
20054ef0
BS
2039 stq(a0, d0);
2040 stq(a0 + 8, d1);
eaa728ee
FB
2041 EDX = d1;
2042 EAX = d0;
2043 eflags &= ~CC_Z;
2044 }
2045 CC_SRC = eflags;
2046}
2047#endif
2048
2049void helper_single_step(void)
2050{
01df040b
AL
2051#ifndef CONFIG_USER_ONLY
2052 check_hw_breakpoints(env, 1);
2053 env->dr[6] |= DR6_BS;
2054#endif
77b2bc2c 2055 raise_exception(env, EXCP01_DB);
eaa728ee
FB
2056}
2057
2058void helper_cpuid(void)
2059{
6fd805e1 2060 uint32_t eax, ebx, ecx, edx;
eaa728ee 2061
872929aa 2062 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 2063
e00b6f80 2064 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
2065 EAX = eax;
2066 EBX = ebx;
2067 ECX = ecx;
2068 EDX = edx;
eaa728ee
FB
2069}
2070
2071void helper_enter_level(int level, int data32, target_ulong t1)
2072{
2073 target_ulong ssp;
2074 uint32_t esp_mask, esp, ebp;
2075
2076 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2077 ssp = env->segs[R_SS].base;
2078 ebp = EBP;
2079 esp = ESP;
2080 if (data32) {
2081 /* 32 bit */
2082 esp -= 4;
2083 while (--level) {
2084 esp -= 4;
2085 ebp -= 4;
2086 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2087 }
2088 esp -= 4;
2089 stl(ssp + (esp & esp_mask), t1);
2090 } else {
2091 /* 16 bit */
2092 esp -= 2;
2093 while (--level) {
2094 esp -= 2;
2095 ebp -= 2;
2096 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2097 }
2098 esp -= 2;
2099 stw(ssp + (esp & esp_mask), t1);
2100 }
2101}
2102
2103#ifdef TARGET_X86_64
2104void helper_enter64_level(int level, int data64, target_ulong t1)
2105{
2106 target_ulong esp, ebp;
20054ef0 2107
eaa728ee
FB
2108 ebp = EBP;
2109 esp = ESP;
2110
2111 if (data64) {
2112 /* 64 bit */
2113 esp -= 8;
2114 while (--level) {
2115 esp -= 8;
2116 ebp -= 8;
2117 stq(esp, ldq(ebp));
2118 }
2119 esp -= 8;
2120 stq(esp, t1);
2121 } else {
2122 /* 16 bit */
2123 esp -= 2;
2124 while (--level) {
2125 esp -= 2;
2126 ebp -= 2;
2127 stw(esp, lduw(ebp));
2128 }
2129 esp -= 2;
2130 stw(esp, t1);
2131 }
2132}
2133#endif
2134
2135void helper_lldt(int selector)
2136{
2137 SegmentCache *dt;
2138 uint32_t e1, e2;
2139 int index, entry_limit;
2140 target_ulong ptr;
2141
2142 selector &= 0xffff;
2143 if ((selector & 0xfffc) == 0) {
2144 /* XXX: NULL selector case: invalid LDT */
2145 env->ldt.base = 0;
2146 env->ldt.limit = 0;
2147 } else {
20054ef0 2148 if (selector & 0x4) {
77b2bc2c 2149 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2150 }
eaa728ee
FB
2151 dt = &env->gdt;
2152 index = selector & ~7;
2153#ifdef TARGET_X86_64
20054ef0 2154 if (env->hflags & HF_LMA_MASK) {
eaa728ee 2155 entry_limit = 15;
20054ef0 2156 } else
eaa728ee 2157#endif
20054ef0 2158 {
eaa728ee 2159 entry_limit = 7;
20054ef0
BS
2160 }
2161 if ((index + entry_limit) > dt->limit) {
77b2bc2c 2162 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2163 }
eaa728ee
FB
2164 ptr = dt->base + index;
2165 e1 = ldl_kernel(ptr);
2166 e2 = ldl_kernel(ptr + 4);
20054ef0 2167 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 2168 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2169 }
2170 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2171 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 2172 }
eaa728ee
FB
2173#ifdef TARGET_X86_64
2174 if (env->hflags & HF_LMA_MASK) {
2175 uint32_t e3;
20054ef0 2176
eaa728ee
FB
2177 e3 = ldl_kernel(ptr + 8);
2178 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2179 env->ldt.base |= (target_ulong)e3 << 32;
2180 } else
2181#endif
2182 {
2183 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2184 }
2185 }
2186 env->ldt.selector = selector;
2187}
2188
2189void helper_ltr(int selector)
2190{
2191 SegmentCache *dt;
2192 uint32_t e1, e2;
2193 int index, type, entry_limit;
2194 target_ulong ptr;
2195
2196 selector &= 0xffff;
2197 if ((selector & 0xfffc) == 0) {
2198 /* NULL selector case: invalid TR */
2199 env->tr.base = 0;
2200 env->tr.limit = 0;
2201 env->tr.flags = 0;
2202 } else {
20054ef0 2203 if (selector & 0x4) {
77b2bc2c 2204 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2205 }
eaa728ee
FB
2206 dt = &env->gdt;
2207 index = selector & ~7;
2208#ifdef TARGET_X86_64
20054ef0 2209 if (env->hflags & HF_LMA_MASK) {
eaa728ee 2210 entry_limit = 15;
20054ef0 2211 } else
eaa728ee 2212#endif
20054ef0 2213 {
eaa728ee 2214 entry_limit = 7;
20054ef0
BS
2215 }
2216 if ((index + entry_limit) > dt->limit) {
77b2bc2c 2217 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2218 }
eaa728ee
FB
2219 ptr = dt->base + index;
2220 e1 = ldl_kernel(ptr);
2221 e2 = ldl_kernel(ptr + 4);
2222 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2223 if ((e2 & DESC_S_MASK) ||
20054ef0 2224 (type != 1 && type != 9)) {
77b2bc2c 2225 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2226 }
2227 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2228 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 2229 }
eaa728ee
FB
2230#ifdef TARGET_X86_64
2231 if (env->hflags & HF_LMA_MASK) {
2232 uint32_t e3, e4;
20054ef0 2233
eaa728ee
FB
2234 e3 = ldl_kernel(ptr + 8);
2235 e4 = ldl_kernel(ptr + 12);
20054ef0 2236 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
77b2bc2c 2237 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2238 }
eaa728ee
FB
2239 load_seg_cache_raw_dt(&env->tr, e1, e2);
2240 env->tr.base |= (target_ulong)e3 << 32;
2241 } else
2242#endif
2243 {
2244 load_seg_cache_raw_dt(&env->tr, e1, e2);
2245 }
2246 e2 |= DESC_TSS_BUSY_MASK;
2247 stl_kernel(ptr + 4, e2);
2248 }
2249 env->tr.selector = selector;
2250}
2251
2252/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2253void helper_load_seg(int seg_reg, int selector)
2254{
2255 uint32_t e1, e2;
2256 int cpl, dpl, rpl;
2257 SegmentCache *dt;
2258 int index;
2259 target_ulong ptr;
2260
2261 selector &= 0xffff;
2262 cpl = env->hflags & HF_CPL_MASK;
2263 if ((selector & 0xfffc) == 0) {
2264 /* null selector case */
2265 if (seg_reg == R_SS
2266#ifdef TARGET_X86_64
2267 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2268#endif
20054ef0 2269 ) {
77b2bc2c 2270 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2271 }
eaa728ee
FB
2272 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2273 } else {
2274
20054ef0 2275 if (selector & 0x4) {
eaa728ee 2276 dt = &env->ldt;
20054ef0 2277 } else {
eaa728ee 2278 dt = &env->gdt;
20054ef0 2279 }
eaa728ee 2280 index = selector & ~7;
20054ef0 2281 if ((index + 7) > dt->limit) {
77b2bc2c 2282 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2283 }
eaa728ee
FB
2284 ptr = dt->base + index;
2285 e1 = ldl_kernel(ptr);
2286 e2 = ldl_kernel(ptr + 4);
2287
20054ef0 2288 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 2289 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2290 }
eaa728ee
FB
2291 rpl = selector & 3;
2292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2293 if (seg_reg == R_SS) {
2294 /* must be writable segment */
20054ef0 2295 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 2296 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2297 }
2298 if (rpl != cpl || dpl != cpl) {
77b2bc2c 2299 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2300 }
eaa728ee
FB
2301 } else {
2302 /* must be readable segment */
20054ef0 2303 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
77b2bc2c 2304 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2305 }
eaa728ee
FB
2306
2307 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2308 /* if not conforming code, test rights */
20054ef0 2309 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2310 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2311 }
eaa728ee
FB
2312 }
2313 }
2314
2315 if (!(e2 & DESC_P_MASK)) {
20054ef0 2316 if (seg_reg == R_SS) {
77b2bc2c 2317 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
20054ef0 2318 } else {
77b2bc2c 2319 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 2320 }
eaa728ee
FB
2321 }
2322
2323 /* set the access bit if not already set */
2324 if (!(e2 & DESC_A_MASK)) {
2325 e2 |= DESC_A_MASK;
2326 stl_kernel(ptr + 4, e2);
2327 }
2328
2329 cpu_x86_load_seg_cache(env, seg_reg, selector,
2330 get_seg_base(e1, e2),
2331 get_seg_limit(e1, e2),
2332 e2);
2333#if 0
93fcfe39 2334 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2335 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2336#endif
2337 }
2338}
2339
2340/* protected mode jump */
2341void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2342 int next_eip_addend)
2343{
2344 int gate_cs, type;
2345 uint32_t e1, e2, cpl, dpl, rpl, limit;
2346 target_ulong next_eip;
2347
20054ef0 2348 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2349 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
2350 }
2351 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2352 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2353 }
eaa728ee
FB
2354 cpl = env->hflags & HF_CPL_MASK;
2355 if (e2 & DESC_S_MASK) {
20054ef0 2356 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 2357 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2358 }
eaa728ee
FB
2359 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2360 if (e2 & DESC_C_MASK) {
2361 /* conforming code segment */
20054ef0 2362 if (dpl > cpl) {
77b2bc2c 2363 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2364 }
eaa728ee
FB
2365 } else {
2366 /* non conforming code segment */
2367 rpl = new_cs & 3;
20054ef0 2368 if (rpl > cpl) {
77b2bc2c 2369 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2370 }
2371 if (dpl != cpl) {
77b2bc2c 2372 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2373 }
eaa728ee 2374 }
20054ef0 2375 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2376 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2377 }
eaa728ee
FB
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit &&
20054ef0 2380 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
77b2bc2c 2381 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2382 }
eaa728ee
FB
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 } else {
2387 /* jump to call or task gate */
2388 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2389 rpl = new_cs & 3;
2390 cpl = env->hflags & HF_CPL_MASK;
2391 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2392 switch (type) {
eaa728ee
FB
2393 case 1: /* 286 TSS */
2394 case 9: /* 386 TSS */
2395 case 5: /* task gate */
20054ef0 2396 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2397 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2398 }
eaa728ee
FB
2399 next_eip = env->eip + next_eip_addend;
2400 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401 CC_OP = CC_OP_EFLAGS;
2402 break;
2403 case 4: /* 286 call gate */
2404 case 12: /* 386 call gate */
20054ef0 2405 if ((dpl < cpl) || (dpl < rpl)) {
77b2bc2c 2406 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2407 }
2408 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2409 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2410 }
eaa728ee
FB
2411 gate_cs = e1 >> 16;
2412 new_eip = (e1 & 0xffff);
20054ef0 2413 if (type == 12) {
eaa728ee 2414 new_eip |= (e2 & 0xffff0000);
20054ef0
BS
2415 }
2416 if (load_segment(&e1, &e2, gate_cs) != 0) {
77b2bc2c 2417 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2418 }
eaa728ee
FB
2419 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420 /* must be code segment */
2421 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 2422 (DESC_S_MASK | DESC_CS_MASK))) {
77b2bc2c 2423 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2424 }
eaa728ee 2425 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 2426 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
77b2bc2c 2427 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0
BS
2428 }
2429 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2430 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2431 }
eaa728ee 2432 limit = get_seg_limit(e1, e2);
20054ef0 2433 if (new_eip > limit) {
77b2bc2c 2434 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2435 }
eaa728ee
FB
2436 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2437 get_seg_base(e1, e2), limit, e2);
2438 EIP = new_eip;
2439 break;
2440 default:
77b2bc2c 2441 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
2442 break;
2443 }
2444 }
2445}
2446
2447/* real mode call */
2448void helper_lcall_real(int new_cs, target_ulong new_eip1,
2449 int shift, int next_eip)
2450{
2451 int new_eip;
2452 uint32_t esp, esp_mask;
2453 target_ulong ssp;
2454
2455 new_eip = new_eip1;
2456 esp = ESP;
2457 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2458 ssp = env->segs[R_SS].base;
2459 if (shift) {
2460 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2461 PUSHL(ssp, esp, esp_mask, next_eip);
2462 } else {
2463 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2464 PUSHW(ssp, esp, esp_mask, next_eip);
2465 }
2466
2467 SET_ESP(esp, esp_mask);
2468 env->eip = new_eip;
2469 env->segs[R_CS].selector = new_cs;
2470 env->segs[R_CS].base = (new_cs << 4);
2471}
2472
2473/* protected mode call */
20054ef0 2474void helper_lcall_protected(int new_cs, target_ulong new_eip,
eaa728ee
FB
2475 int shift, int next_eip_addend)
2476{
2477 int new_stack, i;
2478 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2479 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2480 uint32_t val, limit, old_sp_mask;
2481 target_ulong ssp, old_ssp, next_eip;
2482
2483 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2484 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2485 LOG_PCALL_STATE(env);
20054ef0 2486 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2487 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
2488 }
2489 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2490 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2491 }
eaa728ee 2492 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2493 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 2494 if (e2 & DESC_S_MASK) {
20054ef0 2495 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 2496 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2497 }
eaa728ee
FB
2498 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2499 if (e2 & DESC_C_MASK) {
2500 /* conforming code segment */
20054ef0 2501 if (dpl > cpl) {
77b2bc2c 2502 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2503 }
eaa728ee
FB
2504 } else {
2505 /* non conforming code segment */
2506 rpl = new_cs & 3;
20054ef0 2507 if (rpl > cpl) {
77b2bc2c 2508 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2509 }
2510 if (dpl != cpl) {
77b2bc2c 2511 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2512 }
eaa728ee 2513 }
20054ef0 2514 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2515 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2516 }
eaa728ee
FB
2517
2518#ifdef TARGET_X86_64
2519 /* XXX: check 16/32 bit cases in long mode */
2520 if (shift == 2) {
2521 target_ulong rsp;
20054ef0 2522
eaa728ee
FB
2523 /* 64 bit case */
2524 rsp = ESP;
2525 PUSHQ(rsp, env->segs[R_CS].selector);
2526 PUSHQ(rsp, next_eip);
2527 /* from this point, not restartable */
2528 ESP = rsp;
2529 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2530 get_seg_base(e1, e2),
2531 get_seg_limit(e1, e2), e2);
2532 EIP = new_eip;
2533 } else
2534#endif
2535 {
2536 sp = ESP;
2537 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2538 ssp = env->segs[R_SS].base;
2539 if (shift) {
2540 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2541 PUSHL(ssp, sp, sp_mask, next_eip);
2542 } else {
2543 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2544 PUSHW(ssp, sp, sp_mask, next_eip);
2545 }
2546
2547 limit = get_seg_limit(e1, e2);
20054ef0 2548 if (new_eip > limit) {
77b2bc2c 2549 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2550 }
eaa728ee
FB
2551 /* from this point, not restartable */
2552 SET_ESP(sp, sp_mask);
2553 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2554 get_seg_base(e1, e2), limit, e2);
2555 EIP = new_eip;
2556 }
2557 } else {
2558 /* check gate type */
2559 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2561 rpl = new_cs & 3;
20054ef0 2562 switch (type) {
eaa728ee
FB
2563 case 1: /* available 286 TSS */
2564 case 9: /* available 386 TSS */
2565 case 5: /* task gate */
20054ef0 2566 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2567 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2568 }
eaa728ee
FB
2569 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2570 CC_OP = CC_OP_EFLAGS;
2571 return;
2572 case 4: /* 286 call gate */
2573 case 12: /* 386 call gate */
2574 break;
2575 default:
77b2bc2c 2576 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
2577 break;
2578 }
2579 shift = type >> 3;
2580
20054ef0 2581 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2582 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2583 }
eaa728ee 2584 /* check valid bit */
20054ef0 2585 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2586 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2587 }
eaa728ee
FB
2588 selector = e1 >> 16;
2589 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2590 param_count = e2 & 0x1f;
20054ef0 2591 if ((selector & 0xfffc) == 0) {
77b2bc2c 2592 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2593 }
eaa728ee 2594
20054ef0 2595 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 2596 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2597 }
2598 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 2599 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2600 }
eaa728ee 2601 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2602 if (dpl > cpl) {
77b2bc2c 2603 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2604 }
2605 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2606 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 2607 }
eaa728ee
FB
2608
2609 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2610 /* to inner privilege */
2611 get_ss_esp_from_tss(&ss, &sp, dpl);
20054ef0
BS
2612 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2613 "\n",
2614 ss, sp, param_count, ESP);
2615 if ((ss & 0xfffc) == 0) {
77b2bc2c 2616 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2617 }
2618 if ((ss & 3) != dpl) {
77b2bc2c 2619 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2620 }
2621 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 2622 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2623 }
eaa728ee 2624 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2625 if (ss_dpl != dpl) {
77b2bc2c 2626 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2627 }
eaa728ee
FB
2628 if (!(ss_e2 & DESC_S_MASK) ||
2629 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2630 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2631 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2632 }
2633 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2634 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2635 }
eaa728ee 2636
20054ef0 2637 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
2638
2639 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2640 old_ssp = env->segs[R_SS].base;
2641
2642 sp_mask = get_sp_mask(ss_e2);
2643 ssp = get_seg_base(ss_e1, ss_e2);
2644 if (shift) {
2645 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2646 PUSHL(ssp, sp, sp_mask, ESP);
20054ef0 2647 for (i = param_count - 1; i >= 0; i--) {
eaa728ee
FB
2648 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2649 PUSHL(ssp, sp, sp_mask, val);
2650 }
2651 } else {
2652 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2653 PUSHW(ssp, sp, sp_mask, ESP);
20054ef0 2654 for (i = param_count - 1; i >= 0; i--) {
eaa728ee
FB
2655 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2656 PUSHW(ssp, sp, sp_mask, val);
2657 }
2658 }
2659 new_stack = 1;
2660 } else {
2661 /* to same privilege */
2662 sp = ESP;
2663 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2664 ssp = env->segs[R_SS].base;
20054ef0 2665 /* push_size = (4 << shift); */
eaa728ee
FB
2666 new_stack = 0;
2667 }
2668
2669 if (shift) {
2670 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2671 PUSHL(ssp, sp, sp_mask, next_eip);
2672 } else {
2673 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2674 PUSHW(ssp, sp, sp_mask, next_eip);
2675 }
2676
2677 /* from this point, not restartable */
2678
2679 if (new_stack) {
2680 ss = (ss & ~3) | dpl;
2681 cpu_x86_load_seg_cache(env, R_SS, ss,
2682 ssp,
2683 get_seg_limit(ss_e1, ss_e2),
2684 ss_e2);
2685 }
2686
2687 selector = (selector & ~3) | dpl;
2688 cpu_x86_load_seg_cache(env, R_CS, selector,
2689 get_seg_base(e1, e2),
2690 get_seg_limit(e1, e2),
2691 e2);
2692 cpu_x86_set_cpl(env, dpl);
2693 SET_ESP(sp, sp_mask);
2694 EIP = offset;
2695 }
eaa728ee
FB
2696}
2697
2698/* real and vm86 mode iret */
2699void helper_iret_real(int shift)
2700{
2701 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2702 target_ulong ssp;
2703 int eflags_mask;
2704
20054ef0 2705 sp_mask = 0xffff; /* XXXX: use SS segment size? */
eaa728ee
FB
2706 sp = ESP;
2707 ssp = env->segs[R_SS].base;
2708 if (shift == 1) {
2709 /* 32 bits */
2710 POPL(ssp, sp, sp_mask, new_eip);
2711 POPL(ssp, sp, sp_mask, new_cs);
2712 new_cs &= 0xffff;
2713 POPL(ssp, sp, sp_mask, new_eflags);
2714 } else {
2715 /* 16 bits */
2716 POPW(ssp, sp, sp_mask, new_eip);
2717 POPW(ssp, sp, sp_mask, new_cs);
2718 POPW(ssp, sp, sp_mask, new_eflags);
2719 }
2720 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2721 env->segs[R_CS].selector = new_cs;
2722 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2723 env->eip = new_eip;
20054ef0
BS
2724 if (env->eflags & VM_MASK) {
2725 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2726 NT_MASK;
2727 } else {
2728 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2729 RF_MASK | NT_MASK;
2730 }
2731 if (shift == 0) {
eaa728ee 2732 eflags_mask &= 0xffff;
20054ef0 2733 }
eaa728ee 2734 load_eflags(new_eflags, eflags_mask);
db620f46 2735 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2736}
2737
2738static inline void validate_seg(int seg_reg, int cpl)
2739{
2740 int dpl;
2741 uint32_t e2;
2742
2743 /* XXX: on x86_64, we do not want to nullify FS and GS because
2744 they may still contain a valid base. I would be interested to
2745 know how a real x86_64 CPU behaves */
2746 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2747 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2748 return;
20054ef0 2749 }
eaa728ee
FB
2750
2751 e2 = env->segs[seg_reg].flags;
2752 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2753 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2754 /* data or non conforming code segment */
2755 if (dpl < cpl) {
2756 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2757 }
2758 }
2759}
2760
2761/* protected mode iret */
2762static inline void helper_ret_protected(int shift, int is_iret, int addend)
2763{
2764 uint32_t new_cs, new_eflags, new_ss;
2765 uint32_t new_es, new_ds, new_fs, new_gs;
2766 uint32_t e1, e2, ss_e1, ss_e2;
2767 int cpl, dpl, rpl, eflags_mask, iopl;
2768 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2769
2770#ifdef TARGET_X86_64
20054ef0 2771 if (shift == 2) {
eaa728ee 2772 sp_mask = -1;
20054ef0 2773 } else
eaa728ee 2774#endif
20054ef0 2775 {
eaa728ee 2776 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2777 }
eaa728ee
FB
2778 sp = ESP;
2779 ssp = env->segs[R_SS].base;
2780 new_eflags = 0; /* avoid warning */
2781#ifdef TARGET_X86_64
2782 if (shift == 2) {
2783 POPQ(sp, new_eip);
2784 POPQ(sp, new_cs);
2785 new_cs &= 0xffff;
2786 if (is_iret) {
2787 POPQ(sp, new_eflags);
2788 }
2789 } else
2790#endif
20054ef0
BS
2791 {
2792 if (shift == 1) {
2793 /* 32 bits */
2794 POPL(ssp, sp, sp_mask, new_eip);
2795 POPL(ssp, sp, sp_mask, new_cs);
2796 new_cs &= 0xffff;
2797 if (is_iret) {
2798 POPL(ssp, sp, sp_mask, new_eflags);
2799 if (new_eflags & VM_MASK) {
2800 goto return_to_vm86;
2801 }
2802 }
2803 } else {
2804 /* 16 bits */
2805 POPW(ssp, sp, sp_mask, new_eip);
2806 POPW(ssp, sp, sp_mask, new_cs);
2807 if (is_iret) {
2808 POPW(ssp, sp, sp_mask, new_eflags);
2809 }
eaa728ee 2810 }
eaa728ee 2811 }
d12d51d5
AL
2812 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2813 new_cs, new_eip, shift, addend);
2814 LOG_PCALL_STATE(env);
20054ef0 2815 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2816 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2817 }
2818 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2819 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2820 }
eaa728ee 2821 if (!(e2 & DESC_S_MASK) ||
20054ef0 2822 !(e2 & DESC_CS_MASK)) {
77b2bc2c 2823 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2824 }
eaa728ee
FB
2825 cpl = env->hflags & HF_CPL_MASK;
2826 rpl = new_cs & 3;
20054ef0 2827 if (rpl < cpl) {
77b2bc2c 2828 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2829 }
eaa728ee
FB
2830 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2831 if (e2 & DESC_C_MASK) {
20054ef0 2832 if (dpl > rpl) {
77b2bc2c 2833 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2834 }
eaa728ee 2835 } else {
20054ef0 2836 if (dpl != rpl) {
77b2bc2c 2837 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2838 }
eaa728ee 2839 }
20054ef0 2840 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2841 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2842 }
eaa728ee
FB
2843
2844 sp += addend;
2845 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2846 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2847 /* return to same privilege level */
eaa728ee
FB
2848 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2849 get_seg_base(e1, e2),
2850 get_seg_limit(e1, e2),
2851 e2);
2852 } else {
2853 /* return to different privilege level */
2854#ifdef TARGET_X86_64
2855 if (shift == 2) {
2856 POPQ(sp, new_esp);
2857 POPQ(sp, new_ss);
2858 new_ss &= 0xffff;
2859 } else
2860#endif
20054ef0
BS
2861 {
2862 if (shift == 1) {
2863 /* 32 bits */
2864 POPL(ssp, sp, sp_mask, new_esp);
2865 POPL(ssp, sp, sp_mask, new_ss);
2866 new_ss &= 0xffff;
2867 } else {
2868 /* 16 bits */
2869 POPW(ssp, sp, sp_mask, new_esp);
2870 POPW(ssp, sp, sp_mask, new_ss);
2871 }
eaa728ee 2872 }
d12d51d5 2873 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2874 new_ss, new_esp);
eaa728ee
FB
2875 if ((new_ss & 0xfffc) == 0) {
2876#ifdef TARGET_X86_64
20054ef0
BS
2877 /* NULL ss is allowed in long mode if cpl != 3 */
2878 /* XXX: test CS64? */
eaa728ee
FB
2879 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2880 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2881 0, 0xffffffff,
2882 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2883 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2884 DESC_W_MASK | DESC_A_MASK);
20054ef0 2885 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2886 } else
2887#endif
2888 {
77b2bc2c 2889 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2890 }
2891 } else {
20054ef0 2892 if ((new_ss & 3) != rpl) {
77b2bc2c 2893 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2894 }
2895 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
77b2bc2c 2896 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2897 }
eaa728ee
FB
2898 if (!(ss_e2 & DESC_S_MASK) ||
2899 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2900 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2901 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2902 }
eaa728ee 2903 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2904 if (dpl != rpl) {
77b2bc2c 2905 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2906 }
2907 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2908 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
20054ef0 2909 }
eaa728ee
FB
2910 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2911 get_seg_base(ss_e1, ss_e2),
2912 get_seg_limit(ss_e1, ss_e2),
2913 ss_e2);
2914 }
2915
2916 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2917 get_seg_base(e1, e2),
2918 get_seg_limit(e1, e2),
2919 e2);
2920 cpu_x86_set_cpl(env, rpl);
2921 sp = new_esp;
2922#ifdef TARGET_X86_64
20054ef0 2923 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2924 sp_mask = -1;
20054ef0 2925 } else
eaa728ee 2926#endif
20054ef0 2927 {
eaa728ee 2928 sp_mask = get_sp_mask(ss_e2);
20054ef0 2929 }
eaa728ee
FB
2930
2931 /* validate data segments */
2932 validate_seg(R_ES, rpl);
2933 validate_seg(R_DS, rpl);
2934 validate_seg(R_FS, rpl);
2935 validate_seg(R_GS, rpl);
2936
2937 sp += addend;
2938 }
2939 SET_ESP(sp, sp_mask);
2940 env->eip = new_eip;
2941 if (is_iret) {
2942 /* NOTE: 'cpl' is the _old_ CPL */
2943 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2944 if (cpl == 0) {
eaa728ee 2945 eflags_mask |= IOPL_MASK;
20054ef0 2946 }
eaa728ee 2947 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2948 if (cpl <= iopl) {
eaa728ee 2949 eflags_mask |= IF_MASK;
20054ef0
BS
2950 }
2951 if (shift == 0) {
eaa728ee 2952 eflags_mask &= 0xffff;
20054ef0 2953 }
eaa728ee
FB
2954 load_eflags(new_eflags, eflags_mask);
2955 }
2956 return;
2957
2958 return_to_vm86:
2959 POPL(ssp, sp, sp_mask, new_esp);
2960 POPL(ssp, sp, sp_mask, new_ss);
2961 POPL(ssp, sp, sp_mask, new_es);
2962 POPL(ssp, sp, sp_mask, new_ds);
2963 POPL(ssp, sp, sp_mask, new_fs);
2964 POPL(ssp, sp, sp_mask, new_gs);
2965
2966 /* modify processor state */
2967 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2968 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2969 load_seg_vm(R_CS, new_cs & 0xffff);
2970 cpu_x86_set_cpl(env, 3);
2971 load_seg_vm(R_SS, new_ss & 0xffff);
2972 load_seg_vm(R_ES, new_es & 0xffff);
2973 load_seg_vm(R_DS, new_ds & 0xffff);
2974 load_seg_vm(R_FS, new_fs & 0xffff);
2975 load_seg_vm(R_GS, new_gs & 0xffff);
2976
2977 env->eip = new_eip & 0xffff;
2978 ESP = new_esp;
2979}
2980
2981void helper_iret_protected(int shift, int next_eip)
2982{
2983 int tss_selector, type;
2984 uint32_t e1, e2;
2985
2986 /* specific case for TSS */
2987 if (env->eflags & NT_MASK) {
2988#ifdef TARGET_X86_64
20054ef0 2989 if (env->hflags & HF_LMA_MASK) {
77b2bc2c 2990 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2991 }
eaa728ee
FB
2992#endif
2993 tss_selector = lduw_kernel(env->tr.base + 0);
20054ef0 2994 if (tss_selector & 4) {
77b2bc2c 2995 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0
BS
2996 }
2997 if (load_segment(&e1, &e2, tss_selector) != 0) {
77b2bc2c 2998 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2999 }
eaa728ee
FB
3000 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3001 /* NOTE: we check both segment and busy TSS */
20054ef0 3002 if (type != 3) {
77b2bc2c 3003 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 3004 }
eaa728ee
FB
3005 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3006 } else {
3007 helper_ret_protected(shift, 1, 0);
3008 }
db620f46 3009 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
3010}
3011
3012void helper_lret_protected(int shift, int addend)
3013{
3014 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
3015}
3016
3017void helper_sysenter(void)
3018{
3019 if (env->sysenter_cs == 0) {
77b2bc2c 3020 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
3021 }
3022 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3023 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
3024
3025#ifdef TARGET_X86_64
3026 if (env->hflags & HF_LMA_MASK) {
3027 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3028 0, 0xffffffff,
3029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3030 DESC_S_MASK |
20054ef0
BS
3031 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3032 DESC_L_MASK);
2436b61a
AZ
3033 } else
3034#endif
3035 {
3036 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3037 0, 0xffffffff,
3038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3039 DESC_S_MASK |
3040 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3041 }
eaa728ee
FB
3042 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3043 0, 0xffffffff,
3044 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3045 DESC_S_MASK |
3046 DESC_W_MASK | DESC_A_MASK);
3047 ESP = env->sysenter_esp;
3048 EIP = env->sysenter_eip;
3049}
3050
2436b61a 3051void helper_sysexit(int dflag)
eaa728ee
FB
3052{
3053 int cpl;
3054
3055 cpl = env->hflags & HF_CPL_MASK;
3056 if (env->sysenter_cs == 0 || cpl != 0) {
77b2bc2c 3057 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
3058 }
3059 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
3060#ifdef TARGET_X86_64
3061 if (dflag == 2) {
20054ef0
BS
3062 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
3063 3, 0, 0xffffffff,
2436b61a
AZ
3064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
3066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3067 DESC_L_MASK);
3068 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
3069 3, 0, 0xffffffff,
2436b61a
AZ
3070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3072 DESC_W_MASK | DESC_A_MASK);
3073 } else
3074#endif
3075 {
20054ef0
BS
3076 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
3077 3, 0, 0xffffffff,
2436b61a
AZ
3078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
3081 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
3082 3, 0, 0xffffffff,
2436b61a
AZ
3083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3085 DESC_W_MASK | DESC_A_MASK);
3086 }
eaa728ee
FB
3087 ESP = ECX;
3088 EIP = EDX;
eaa728ee
FB
3089}
3090
872929aa
FB
3091#if defined(CONFIG_USER_ONLY)
3092target_ulong helper_read_crN(int reg)
eaa728ee 3093{
872929aa
FB
3094 return 0;
3095}
3096
3097void helper_write_crN(int reg, target_ulong t0)
3098{
3099}
01df040b
AL
3100
3101void helper_movl_drN_T0(int reg, target_ulong t0)
3102{
3103}
872929aa
FB
3104#else
3105target_ulong helper_read_crN(int reg)
3106{
3107 target_ulong val;
3108
3109 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
20054ef0 3110 switch (reg) {
872929aa
FB
3111 default:
3112 val = env->cr[reg];
3113 break;
3114 case 8:
db620f46 3115 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3116 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
3117 } else {
3118 val = env->v_tpr;
3119 }
872929aa
FB
3120 break;
3121 }
3122 return val;
3123}
3124
3125void helper_write_crN(int reg, target_ulong t0)
3126{
3127 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
20054ef0 3128 switch (reg) {
eaa728ee
FB
3129 case 0:
3130 cpu_x86_update_cr0(env, t0);
3131 break;
3132 case 3:
3133 cpu_x86_update_cr3(env, t0);
3134 break;
3135 case 4:
3136 cpu_x86_update_cr4(env, t0);
3137 break;
3138 case 8:
db620f46 3139 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3140 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
3141 }
3142 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
3143 break;
3144 default:
3145 env->cr[reg] = t0;
3146 break;
3147 }
eaa728ee 3148}
01df040b
AL
3149
3150void helper_movl_drN_T0(int reg, target_ulong t0)
3151{
3152 int i;
3153
3154 if (reg < 4) {
3155 hw_breakpoint_remove(env, reg);
3156 env->dr[reg] = t0;
3157 hw_breakpoint_insert(env, reg);
3158 } else if (reg == 7) {
20054ef0 3159 for (i = 0; i < 4; i++) {
01df040b 3160 hw_breakpoint_remove(env, i);
20054ef0 3161 }
01df040b 3162 env->dr[7] = t0;
20054ef0 3163 for (i = 0; i < 4; i++) {
01df040b 3164 hw_breakpoint_insert(env, i);
20054ef0
BS
3165 }
3166 } else {
01df040b 3167 env->dr[reg] = t0;
20054ef0 3168 }
01df040b 3169}
872929aa 3170#endif
eaa728ee
FB
3171
3172void helper_lmsw(target_ulong t0)
3173{
3174 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3175 if already set to one. */
3176 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 3177 helper_write_crN(0, t0);
eaa728ee
FB
3178}
3179
3180void helper_clts(void)
3181{
3182 env->cr[0] &= ~CR0_TS_MASK;
3183 env->hflags &= ~HF_TS_MASK;
3184}
3185
eaa728ee
FB
3186void helper_invlpg(target_ulong addr)
3187{
872929aa 3188 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3189 tlb_flush_page(env, addr);
eaa728ee
FB
3190}
3191
3192void helper_rdtsc(void)
3193{
3194 uint64_t val;
3195
3196 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
77b2bc2c 3197 raise_exception(env, EXCP0D_GPF);
eaa728ee 3198 }
872929aa
FB
3199 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3200
33c263df 3201 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3202 EAX = (uint32_t)(val);
3203 EDX = (uint32_t)(val >> 32);
3204}
3205
1b050077
AP
3206void helper_rdtscp(void)
3207{
3208 helper_rdtsc();
3209 ECX = (uint32_t)(env->tsc_aux);
3210}
3211
eaa728ee
FB
3212void helper_rdpmc(void)
3213{
3214 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
77b2bc2c 3215 raise_exception(env, EXCP0D_GPF);
eaa728ee 3216 }
eaa728ee 3217 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
20054ef0 3218
eaa728ee 3219 /* currently unimplemented */
71547a3b 3220 qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
77b2bc2c 3221 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
3222}
3223
3224#if defined(CONFIG_USER_ONLY)
3225void helper_wrmsr(void)
3226{
3227}
3228
3229void helper_rdmsr(void)
3230{
3231}
3232#else
3233void helper_wrmsr(void)
3234{
3235 uint64_t val;
3236
872929aa
FB
3237 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3238
eaa728ee
FB
3239 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3240
20054ef0 3241 switch ((uint32_t)ECX) {
eaa728ee
FB
3242 case MSR_IA32_SYSENTER_CS:
3243 env->sysenter_cs = val & 0xffff;
3244 break;
3245 case MSR_IA32_SYSENTER_ESP:
3246 env->sysenter_esp = val;
3247 break;
3248 case MSR_IA32_SYSENTER_EIP:
3249 env->sysenter_eip = val;
3250 break;
3251 case MSR_IA32_APICBASE:
4a942cea 3252 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3253 break;
3254 case MSR_EFER:
3255 {
3256 uint64_t update_mask;
20054ef0 3257
eaa728ee 3258 update_mask = 0;
20054ef0 3259 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
eaa728ee 3260 update_mask |= MSR_EFER_SCE;
20054ef0
BS
3261 }
3262 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
eaa728ee 3263 update_mask |= MSR_EFER_LME;
20054ef0
BS
3264 }
3265 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
eaa728ee 3266 update_mask |= MSR_EFER_FFXSR;
20054ef0
BS
3267 }
3268 if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
eaa728ee 3269 update_mask |= MSR_EFER_NXE;
20054ef0
BS
3270 }
3271 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5efc27bb 3272 update_mask |= MSR_EFER_SVME;
20054ef0
BS
3273 }
3274 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
eef26553 3275 update_mask |= MSR_EFER_FFXSR;
20054ef0 3276 }
5efc27bb
FB
3277 cpu_load_efer(env, (env->efer & ~update_mask) |
3278 (val & update_mask));
eaa728ee
FB
3279 }
3280 break;
3281 case MSR_STAR:
3282 env->star = val;
3283 break;
3284 case MSR_PAT:
3285 env->pat = val;
3286 break;
3287 case MSR_VM_HSAVE_PA:
3288 env->vm_hsave = val;
3289 break;
3290#ifdef TARGET_X86_64
3291 case MSR_LSTAR:
3292 env->lstar = val;
3293 break;
3294 case MSR_CSTAR:
3295 env->cstar = val;
3296 break;
3297 case MSR_FMASK:
3298 env->fmask = val;
3299 break;
3300 case MSR_FSBASE:
3301 env->segs[R_FS].base = val;
3302 break;
3303 case MSR_GSBASE:
3304 env->segs[R_GS].base = val;
3305 break;
3306 case MSR_KERNELGSBASE:
3307 env->kernelgsbase = val;
3308 break;
3309#endif
165d9b82
AL
3310 case MSR_MTRRphysBase(0):
3311 case MSR_MTRRphysBase(1):
3312 case MSR_MTRRphysBase(2):
3313 case MSR_MTRRphysBase(3):
3314 case MSR_MTRRphysBase(4):
3315 case MSR_MTRRphysBase(5):
3316 case MSR_MTRRphysBase(6):
3317 case MSR_MTRRphysBase(7):
3318 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3319 break;
3320 case MSR_MTRRphysMask(0):
3321 case MSR_MTRRphysMask(1):
3322 case MSR_MTRRphysMask(2):
3323 case MSR_MTRRphysMask(3):
3324 case MSR_MTRRphysMask(4):
3325 case MSR_MTRRphysMask(5):
3326 case MSR_MTRRphysMask(6):
3327 case MSR_MTRRphysMask(7):
3328 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3329 break;
3330 case MSR_MTRRfix64K_00000:
3331 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3332 break;
3333 case MSR_MTRRfix16K_80000:
3334 case MSR_MTRRfix16K_A0000:
3335 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3336 break;
3337 case MSR_MTRRfix4K_C0000:
3338 case MSR_MTRRfix4K_C8000:
3339 case MSR_MTRRfix4K_D0000:
3340 case MSR_MTRRfix4K_D8000:
3341 case MSR_MTRRfix4K_E0000:
3342 case MSR_MTRRfix4K_E8000:
3343 case MSR_MTRRfix4K_F0000:
3344 case MSR_MTRRfix4K_F8000:
3345 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3346 break;
3347 case MSR_MTRRdefType:
3348 env->mtrr_deftype = val;
3349 break;
79c4f6b0
HY
3350 case MSR_MCG_STATUS:
3351 env->mcg_status = val;
3352 break;
3353 case MSR_MCG_CTL:
3354 if ((env->mcg_cap & MCG_CTL_P)
20054ef0 3355 && (val == 0 || val == ~(uint64_t)0)) {
79c4f6b0 3356 env->mcg_ctl = val;
20054ef0 3357 }
79c4f6b0 3358 break;
1b050077
AP
3359 case MSR_TSC_AUX:
3360 env->tsc_aux = val;
3361 break;
21e87c46
AK
3362 case MSR_IA32_MISC_ENABLE:
3363 env->msr_ia32_misc_enable = val;
3364 break;
eaa728ee 3365 default:
79c4f6b0
HY
3366 if ((uint32_t)ECX >= MSR_MC0_CTL
3367 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3368 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3369 if ((offset & 0x3) != 0
20054ef0 3370 || (val == 0 || val == ~(uint64_t)0)) {
79c4f6b0 3371 env->mce_banks[offset] = val;
20054ef0 3372 }
79c4f6b0
HY
3373 break;
3374 }
20054ef0 3375 /* XXX: exception? */
eaa728ee
FB
3376 break;
3377 }
3378}
3379
3380void helper_rdmsr(void)
3381{
3382 uint64_t val;
872929aa
FB
3383
3384 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3385
20054ef0 3386 switch ((uint32_t)ECX) {
eaa728ee
FB
3387 case MSR_IA32_SYSENTER_CS:
3388 val = env->sysenter_cs;
3389 break;
3390 case MSR_IA32_SYSENTER_ESP:
3391 val = env->sysenter_esp;
3392 break;
3393 case MSR_IA32_SYSENTER_EIP:
3394 val = env->sysenter_eip;
3395 break;
3396 case MSR_IA32_APICBASE:
4a942cea 3397 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3398 break;
3399 case MSR_EFER:
3400 val = env->efer;
3401 break;
3402 case MSR_STAR:
3403 val = env->star;
3404 break;
3405 case MSR_PAT:
3406 val = env->pat;
3407 break;
3408 case MSR_VM_HSAVE_PA:
3409 val = env->vm_hsave;
3410 break;
d5e49a81
AZ
3411 case MSR_IA32_PERF_STATUS:
3412 /* tsc_increment_by_tick */
3413 val = 1000ULL;
3414 /* CPU multiplier */
3415 val |= (((uint64_t)4ULL) << 40);
3416 break;
eaa728ee
FB
3417#ifdef TARGET_X86_64
3418 case MSR_LSTAR:
3419 val = env->lstar;
3420 break;
3421 case MSR_CSTAR:
3422 val = env->cstar;
3423 break;
3424 case MSR_FMASK:
3425 val = env->fmask;
3426 break;
3427 case MSR_FSBASE:
3428 val = env->segs[R_FS].base;
3429 break;
3430 case MSR_GSBASE:
3431 val = env->segs[R_GS].base;
3432 break;
3433 case MSR_KERNELGSBASE:
3434 val = env->kernelgsbase;
3435 break;
1b050077
AP
3436 case MSR_TSC_AUX:
3437 val = env->tsc_aux;
3438 break;
eaa728ee 3439#endif
165d9b82
AL
3440 case MSR_MTRRphysBase(0):
3441 case MSR_MTRRphysBase(1):
3442 case MSR_MTRRphysBase(2):
3443 case MSR_MTRRphysBase(3):
3444 case MSR_MTRRphysBase(4):
3445 case MSR_MTRRphysBase(5):
3446 case MSR_MTRRphysBase(6):
3447 case MSR_MTRRphysBase(7):
3448 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3449 break;
3450 case MSR_MTRRphysMask(0):
3451 case MSR_MTRRphysMask(1):
3452 case MSR_MTRRphysMask(2):
3453 case MSR_MTRRphysMask(3):
3454 case MSR_MTRRphysMask(4):
3455 case MSR_MTRRphysMask(5):
3456 case MSR_MTRRphysMask(6):
3457 case MSR_MTRRphysMask(7):
3458 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3459 break;
3460 case MSR_MTRRfix64K_00000:
3461 val = env->mtrr_fixed[0];
3462 break;
3463 case MSR_MTRRfix16K_80000:
3464 case MSR_MTRRfix16K_A0000:
3465 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3466 break;
3467 case MSR_MTRRfix4K_C0000:
3468 case MSR_MTRRfix4K_C8000:
3469 case MSR_MTRRfix4K_D0000:
3470 case MSR_MTRRfix4K_D8000:
3471 case MSR_MTRRfix4K_E0000:
3472 case MSR_MTRRfix4K_E8000:
3473 case MSR_MTRRfix4K_F0000:
3474 case MSR_MTRRfix4K_F8000:
3475 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3476 break;
3477 case MSR_MTRRdefType:
3478 val = env->mtrr_deftype;
3479 break;
dd5e3b17 3480 case MSR_MTRRcap:
20054ef0
BS
3481 if (env->cpuid_features & CPUID_MTRR) {
3482 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3483 MSR_MTRRcap_WC_SUPPORTED;
3484 } else {
3485 /* XXX: exception? */
dd5e3b17 3486 val = 0;
20054ef0 3487 }
dd5e3b17 3488 break;
79c4f6b0
HY
3489 case MSR_MCG_CAP:
3490 val = env->mcg_cap;
3491 break;
3492 case MSR_MCG_CTL:
20054ef0 3493 if (env->mcg_cap & MCG_CTL_P) {
79c4f6b0 3494 val = env->mcg_ctl;
20054ef0 3495 } else {
79c4f6b0 3496 val = 0;
20054ef0 3497 }
79c4f6b0
HY
3498 break;
3499 case MSR_MCG_STATUS:
3500 val = env->mcg_status;
3501 break;
21e87c46
AK
3502 case MSR_IA32_MISC_ENABLE:
3503 val = env->msr_ia32_misc_enable;
3504 break;
eaa728ee 3505 default:
79c4f6b0
HY
3506 if ((uint32_t)ECX >= MSR_MC0_CTL
3507 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3508 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3509 val = env->mce_banks[offset];
3510 break;
3511 }
20054ef0 3512 /* XXX: exception? */
eaa728ee
FB
3513 val = 0;
3514 break;
3515 }
3516 EAX = (uint32_t)(val);
3517 EDX = (uint32_t)(val >> 32);
3518}
3519#endif
3520
3521target_ulong helper_lsl(target_ulong selector1)
3522{
3523 unsigned int limit;
3524 uint32_t e1, e2, eflags, selector;
3525 int rpl, dpl, cpl, type;
3526
3527 selector = selector1 & 0xffff;
a7812ae4 3528 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3529 if ((selector & 0xfffc) == 0) {
dc1ded53 3530 goto fail;
20054ef0
BS
3531 }
3532 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3533 goto fail;
20054ef0 3534 }
eaa728ee
FB
3535 rpl = selector & 3;
3536 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3537 cpl = env->hflags & HF_CPL_MASK;
3538 if (e2 & DESC_S_MASK) {
3539 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3540 /* conforming */
3541 } else {
20054ef0 3542 if (dpl < cpl || dpl < rpl) {
eaa728ee 3543 goto fail;
20054ef0 3544 }
eaa728ee
FB
3545 }
3546 } else {
3547 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 3548 switch (type) {
eaa728ee
FB
3549 case 1:
3550 case 2:
3551 case 3:
3552 case 9:
3553 case 11:
3554 break;
3555 default:
3556 goto fail;
3557 }
3558 if (dpl < cpl || dpl < rpl) {
3559 fail:
3560 CC_SRC = eflags & ~CC_Z;
3561 return 0;
3562 }
3563 }
3564 limit = get_seg_limit(e1, e2);
3565 CC_SRC = eflags | CC_Z;
3566 return limit;
3567}
3568
3569target_ulong helper_lar(target_ulong selector1)
3570{
3571 uint32_t e1, e2, eflags, selector;
3572 int rpl, dpl, cpl, type;
3573
3574 selector = selector1 & 0xffff;
a7812ae4 3575 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3576 if ((selector & 0xfffc) == 0) {
eaa728ee 3577 goto fail;
20054ef0
BS
3578 }
3579 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3580 goto fail;
20054ef0 3581 }
eaa728ee
FB
3582 rpl = selector & 3;
3583 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3584 cpl = env->hflags & HF_CPL_MASK;
3585 if (e2 & DESC_S_MASK) {
3586 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3587 /* conforming */
3588 } else {
20054ef0 3589 if (dpl < cpl || dpl < rpl) {
eaa728ee 3590 goto fail;
20054ef0 3591 }
eaa728ee
FB
3592 }
3593 } else {
3594 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 3595 switch (type) {
eaa728ee
FB
3596 case 1:
3597 case 2:
3598 case 3:
3599 case 4:
3600 case 5:
3601 case 9:
3602 case 11:
3603 case 12:
3604 break;
3605 default:
3606 goto fail;
3607 }
3608 if (dpl < cpl || dpl < rpl) {
3609 fail:
3610 CC_SRC = eflags & ~CC_Z;
3611 return 0;
3612 }
3613 }
3614 CC_SRC = eflags | CC_Z;
3615 return e2 & 0x00f0ff00;
3616}
3617
3618void helper_verr(target_ulong selector1)
3619{
3620 uint32_t e1, e2, eflags, selector;
3621 int rpl, dpl, cpl;
3622
3623 selector = selector1 & 0xffff;
a7812ae4 3624 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3625 if ((selector & 0xfffc) == 0) {
eaa728ee 3626 goto fail;
20054ef0
BS
3627 }
3628 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3629 goto fail;
20054ef0
BS
3630 }
3631 if (!(e2 & DESC_S_MASK)) {
eaa728ee 3632 goto fail;
20054ef0 3633 }
eaa728ee
FB
3634 rpl = selector & 3;
3635 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3636 cpl = env->hflags & HF_CPL_MASK;
3637 if (e2 & DESC_CS_MASK) {
20054ef0 3638 if (!(e2 & DESC_R_MASK)) {
eaa728ee 3639 goto fail;
20054ef0 3640 }
eaa728ee 3641 if (!(e2 & DESC_C_MASK)) {
20054ef0 3642 if (dpl < cpl || dpl < rpl) {
eaa728ee 3643 goto fail;
20054ef0 3644 }
eaa728ee
FB
3645 }
3646 } else {
3647 if (dpl < cpl || dpl < rpl) {
3648 fail:
3649 CC_SRC = eflags & ~CC_Z;
3650 return;
3651 }
3652 }
3653 CC_SRC = eflags | CC_Z;
3654}
3655
3656void helper_verw(target_ulong selector1)
3657{
3658 uint32_t e1, e2, eflags, selector;
3659 int rpl, dpl, cpl;
3660
3661 selector = selector1 & 0xffff;
a7812ae4 3662 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3663 if ((selector & 0xfffc) == 0) {
eaa728ee 3664 goto fail;
20054ef0
BS
3665 }
3666 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3667 goto fail;
20054ef0
BS
3668 }
3669 if (!(e2 & DESC_S_MASK)) {
eaa728ee 3670 goto fail;
20054ef0 3671 }
eaa728ee
FB
3672 rpl = selector & 3;
3673 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3674 cpl = env->hflags & HF_CPL_MASK;
3675 if (e2 & DESC_CS_MASK) {
3676 goto fail;
3677 } else {
20054ef0 3678 if (dpl < cpl || dpl < rpl) {
eaa728ee 3679 goto fail;
20054ef0 3680 }
eaa728ee
FB
3681 if (!(e2 & DESC_W_MASK)) {
3682 fail:
3683 CC_SRC = eflags & ~CC_Z;
3684 return;
3685 }
3686 }
3687 CC_SRC = eflags | CC_Z;
3688}
3689
f299f437
BS
3690#if defined(CONFIG_USER_ONLY)
3691void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
eaa728ee 3692{
f299f437 3693 CPUX86State *saved_env;
eaa728ee 3694
f299f437
BS
3695 saved_env = env;
3696 env = s;
3697 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3698 selector &= 0xffff;
3699 cpu_x86_load_seg_cache(env, seg_reg, selector,
3700 (selector << 4), 0xffff, 0);
3701 } else {
3702 helper_load_seg(seg_reg, selector);
13822781 3703 }
f299f437 3704 env = saved_env;
eaa728ee 3705}
eaa728ee 3706#endif
20054ef0 3707
f299f437
BS
3708#ifdef TARGET_X86_64
3709static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
eaa728ee 3710{
f299f437
BS
3711 *plow += a;
3712 /* carry test */
3713 if (*plow < a) {
3714 (*phigh)++;
3715 }
3716 *phigh += b;
eaa728ee
FB
3717}
3718
f299f437 3719static void neg128(uint64_t *plow, uint64_t *phigh)
eaa728ee 3720{
f299f437
BS
3721 *plow = ~*plow;
3722 *phigh = ~*phigh;
3723 add128(plow, phigh, 1, 0);
eaa728ee
FB
3724}
3725
f299f437
BS
3726/* return TRUE if overflow */
3727static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
eaa728ee 3728{
f299f437
BS
3729 uint64_t q, r, a1, a0;
3730 int i, qb, ab;
20054ef0 3731
f299f437
BS
3732 a0 = *plow;
3733 a1 = *phigh;
3734 if (a1 == 0) {
3735 q = a0 / b;
3736 r = a0 % b;
3737 *plow = q;
3738 *phigh = r;
3739 } else {
3740 if (a1 >= b) {
3741 return 1;
3742 }
3743 /* XXX: use a better algorithm */
3744 for (i = 0; i < 64; i++) {
3745 ab = a1 >> 63;
3746 a1 = (a1 << 1) | (a0 >> 63);
3747 if (ab || a1 >= b) {
3748 a1 -= b;
3749 qb = 1;
3750 } else {
3751 qb = 0;
3752 }
3753 a0 = (a0 << 1) | qb;
3754 }
3755#if defined(DEBUG_MULDIV)
3756 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
3757 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3758 *phigh, *plow, b, a0, a1);
3759#endif
3760 *plow = a0;
3761 *phigh = a1;
3762 }
3763 return 0;
eaa728ee
FB
3764}
3765
f299f437
BS
3766/* return TRUE if overflow */
3767static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
eaa728ee 3768{
f299f437 3769 int sa, sb;
20054ef0 3770
f299f437
BS
3771 sa = ((int64_t)*phigh < 0);
3772 if (sa) {
3773 neg128(plow, phigh);
3774 }
3775 sb = (b < 0);
3776 if (sb) {
3777 b = -b;
3778 }
3779 if (div64(plow, phigh, b) != 0) {
3780 return 1;
3781 }
3782 if (sa ^ sb) {
3783 if (*plow > (1ULL << 63)) {
3784 return 1;
3785 }
3786 *plow = -*plow;
3787 } else {
3788 if (*plow >= (1ULL << 63)) {
3789 return 1;
3790 }
3791 }
3792 if (sa) {
3793 *phigh = -*phigh;
3794 }
3795 return 0;
eaa728ee
FB
3796}
3797
f299f437 3798void helper_mulq_EAX_T0(target_ulong t0)
eaa728ee 3799{
f299f437 3800 uint64_t r0, r1;
20054ef0 3801
f299f437
BS
3802 mulu64(&r0, &r1, EAX, t0);
3803 EAX = r0;
3804 EDX = r1;
3805 CC_DST = r0;
3806 CC_SRC = r1;
eaa728ee
FB
3807}
3808
f299f437 3809void helper_imulq_EAX_T0(target_ulong t0)
eaa728ee 3810{
f299f437 3811 uint64_t r0, r1;
20054ef0 3812
f299f437
BS
3813 muls64(&r0, &r1, EAX, t0);
3814 EAX = r0;
3815 EDX = r1;
3816 CC_DST = r0;
3817 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
eaa728ee
FB
3818}
3819
f299f437 3820target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
eaa728ee 3821{
f299f437 3822 uint64_t r0, r1;
20054ef0 3823
f299f437
BS
3824 muls64(&r0, &r1, t0, t1);
3825 CC_DST = r0;
3826 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3827 return r0;
eaa728ee
FB
3828}
3829
f299f437 3830void helper_divq_EAX(target_ulong t0)
eaa728ee 3831{
f299f437 3832 uint64_t r0, r1;
20054ef0 3833
f299f437
BS
3834 if (t0 == 0) {
3835 raise_exception(env, EXCP00_DIVZ);
3836 }
3837 r0 = EAX;
3838 r1 = EDX;
3839 if (div64(&r0, &r1, t0)) {
3840 raise_exception(env, EXCP00_DIVZ);
3841 }
3842 EAX = r0;
3843 EDX = r1;
eaa728ee
FB
3844}
3845
f299f437 3846void helper_idivq_EAX(target_ulong t0)
eaa728ee 3847{
f299f437 3848 uint64_t r0, r1;
20054ef0 3849
f299f437
BS
3850 if (t0 == 0) {
3851 raise_exception(env, EXCP00_DIVZ);
20054ef0 3852 }
f299f437
BS
3853 r0 = EAX;
3854 r1 = EDX;
3855 if (idiv64(&r0, &r1, t0)) {
3856 raise_exception(env, EXCP00_DIVZ);
3857 }
3858 EAX = r0;
3859 EDX = r1;
eaa728ee 3860}
f299f437 3861#endif
eaa728ee 3862
f299f437 3863static void do_hlt(void)
eaa728ee 3864{
f299f437
BS
3865 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3866 env->halted = 1;
3867 env->exception_index = EXCP_HLT;
3868 cpu_loop_exit(env);
eaa728ee
FB
3869}
3870
f299f437 3871void helper_hlt(int next_eip_addend)
eaa728ee 3872{
f299f437
BS
3873 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
3874 EIP += next_eip_addend;
20054ef0 3875
f299f437 3876 do_hlt();
eaa728ee
FB
3877}
3878
f299f437 3879void helper_monitor(target_ulong ptr)
eaa728ee 3880{
f299f437
BS
3881 if ((uint32_t)ECX != 0) {
3882 raise_exception(env, EXCP0D_GPF);
20054ef0 3883 }
f299f437
BS
3884 /* XXX: store address? */
3885 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
3886}
3887
f299f437 3888void helper_mwait(int next_eip_addend)
eaa728ee 3889{
f299f437
BS
3890 if ((uint32_t)ECX != 0) {
3891 raise_exception(env, EXCP0D_GPF);
3892 }
3893 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
3894 EIP += next_eip_addend;
20054ef0 3895
f299f437
BS
3896 /* XXX: not complete but not completely erroneous */
3897 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3898 /* more than one CPU: do not sleep because another CPU may
3899 wake this one */
3900 } else {
3901 do_hlt();
3902 }
eaa728ee
FB
3903}
3904
f299f437 3905void helper_debug(void)
eaa728ee 3906{
f299f437
BS
3907 env->exception_index = EXCP_DEBUG;
3908 cpu_loop_exit(env);
eaa728ee
FB
3909}
3910
f299f437 3911void helper_reset_rf(void)
eaa728ee 3912{
f299f437 3913 env->eflags &= ~RF_MASK;
eaa728ee
FB
3914}
3915
f299f437 3916void helper_cli(void)
eaa728ee 3917{
f299f437 3918 env->eflags &= ~IF_MASK;
eaa728ee
FB
3919}
3920
f299f437 3921void helper_sti(void)
eaa728ee 3922{
f299f437 3923 env->eflags |= IF_MASK;
eaa728ee
FB
3924}
3925
f299f437
BS
3926#if 0
3927/* vm86plus instructions */
3928void helper_cli_vm(void)
eaa728ee 3929{
f299f437 3930 env->eflags &= ~VIF_MASK;
eaa728ee
FB
3931}
3932
f299f437 3933void helper_sti_vm(void)
eaa728ee 3934{
f299f437
BS
3935 env->eflags |= VIF_MASK;
3936 if (env->eflags & VIP_MASK) {
3937 raise_exception(env, EXCP0D_GPF);
3938 }
eaa728ee 3939}
f299f437 3940#endif
eaa728ee 3941
f299f437 3942void helper_set_inhibit_irq(void)
eaa728ee 3943{
f299f437 3944 env->hflags |= HF_INHIBIT_IRQ_MASK;
eaa728ee
FB
3945}
3946
f299f437 3947void helper_reset_inhibit_irq(void)
eaa728ee 3948{
f299f437 3949 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
eaa728ee
FB
3950}
3951
f299f437 3952void helper_boundw(target_ulong a0, int v)
eaa728ee 3953{
f299f437 3954 int low, high;
eaa728ee 3955
f299f437
BS
3956 low = ldsw(a0);
3957 high = ldsw(a0 + 2);
3958 v = (int16_t)v;
3959 if (v < low || v > high) {
3960 raise_exception(env, EXCP05_BOUND);
3961 }
eaa728ee
FB
3962}
3963
f299f437 3964void helper_boundl(target_ulong a0, int v)
eaa728ee 3965{
f299f437 3966 int low, high;
eaa728ee 3967
f299f437
BS
3968 low = ldl(a0);
3969 high = ldl(a0 + 4);
3970 if (v < low || v > high) {
3971 raise_exception(env, EXCP05_BOUND);
3972 }
eaa728ee
FB
3973}
3974
eaa728ee
FB
3975#if !defined(CONFIG_USER_ONLY)
3976
3977#define MMUSUFFIX _mmu
3978
3979#define SHIFT 0
3980#include "softmmu_template.h"
3981
3982#define SHIFT 1
3983#include "softmmu_template.h"
3984
3985#define SHIFT 2
3986#include "softmmu_template.h"
3987
3988#define SHIFT 3
3989#include "softmmu_template.h"
3990
3991#endif
3992
d9957a8b 3993#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
3994/* try to fill the TLB and return an exception if error. If retaddr is
3995 NULL, it means that the function was called in C code (i.e. not
3996 from generated code or from helper.c) */
3997/* XXX: fix it to restore all registers */
317ac620 3998void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 3999 uintptr_t retaddr)
eaa728ee
FB
4000{
4001 TranslationBlock *tb;
4002 int ret;
eaa728ee
FB
4003 CPUX86State *saved_env;
4004
eaa728ee 4005 saved_env = env;
bccd9ec5 4006 env = env1;
eaa728ee 4007
97b348e7 4008 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
eaa728ee
FB
4009 if (ret) {
4010 if (retaddr) {
4011 /* now we have a real cpu fault */
20503968 4012 tb = tb_find_pc(retaddr);
eaa728ee
FB
4013 if (tb) {
4014 /* the PC is inside the translated code. It means that we have
4015 a virtual CPU fault */
20503968 4016 cpu_restore_state(tb, env, retaddr);
eaa728ee
FB
4017 }
4018 }
77b2bc2c 4019 raise_exception_err(env, env->exception_index, env->error_code);
eaa728ee
FB
4020 }
4021 env = saved_env;
4022}
d9957a8b 4023#endif
eaa728ee
FB
4024
4025/* Secure Virtual Machine helpers */
4026
eaa728ee
FB
4027#if defined(CONFIG_USER_ONLY)
4028
db620f46 4029void helper_vmrun(int aflag, int next_eip_addend)
20054ef0 4030{
eaa728ee 4031}
20054ef0
BS
4032
4033void helper_vmmcall(void)
4034{
eaa728ee 4035}
20054ef0 4036
914178d3 4037void helper_vmload(int aflag)
20054ef0 4038{
eaa728ee 4039}
20054ef0 4040
914178d3 4041void helper_vmsave(int aflag)
20054ef0 4042{
eaa728ee 4043}
20054ef0 4044
872929aa
FB
4045void helper_stgi(void)
4046{
4047}
20054ef0 4048
872929aa
FB
4049void helper_clgi(void)
4050{
4051}
20054ef0
BS
4052
4053void helper_skinit(void)
4054{
eaa728ee 4055}
20054ef0 4056
914178d3 4057void helper_invlpga(int aflag)
20054ef0 4058{
eaa728ee 4059}
20054ef0
BS
4060
4061void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4062{
eaa728ee 4063}
20054ef0 4064
77b2bc2c
BS
4065void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4066{
4067}
4068
eaa728ee
FB
4069void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4070{
4071}
4072
77b2bc2c
BS
4073void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
4074 uint64_t param)
e694d4e2
BS
4075{
4076}
4077
20054ef0 4078void helper_svm_check_io(uint32_t port, uint32_t param,
eaa728ee
FB
4079 uint32_t next_eip_addend)
4080{
4081}
4082#else
4083
c227f099 4084static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4085 const SegmentCache *sc)
eaa728ee 4086{
20054ef0 4087 stw_phys(addr + offsetof(struct vmcb_seg, selector),
872929aa 4088 sc->selector);
20054ef0 4089 stq_phys(addr + offsetof(struct vmcb_seg, base),
872929aa 4090 sc->base);
20054ef0 4091 stl_phys(addr + offsetof(struct vmcb_seg, limit),
872929aa 4092 sc->limit);
20054ef0 4093 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4094 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa 4095}
20054ef0 4096
c227f099 4097static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4098{
4099 unsigned int flags;
4100
4101 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4102 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4103 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4104 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4105 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4106}
4107
20054ef0 4108static inline void svm_load_seg_cache(target_phys_addr_t addr,
317ac620 4109 CPUX86State *env, int seg_reg)
eaa728ee 4110{
872929aa 4111 SegmentCache sc1, *sc = &sc1;
20054ef0 4112
872929aa
FB
4113 svm_load_seg(addr, sc);
4114 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4115 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4116}
4117
db620f46 4118void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4119{
4120 target_ulong addr;
4121 uint32_t event_inj;
4122 uint32_t int_ctl;
4123
872929aa
FB
4124 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4125
20054ef0 4126 if (aflag == 2) {
914178d3 4127 addr = EAX;
20054ef0 4128 } else {
914178d3 4129 addr = (uint32_t)EAX;
20054ef0 4130 }
914178d3 4131
93fcfe39 4132 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4133
4134 env->vm_vmcb = addr;
4135
4136 /* save the current CPU state in the hsave page */
20054ef0
BS
4137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
4138 env->gdt.base);
4139 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
4140 env->gdt.limit);
eaa728ee 4141
20054ef0
BS
4142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
4143 env->idt.base);
4144 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
4145 env->idt.limit);
eaa728ee
FB
4146
4147 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4148 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4149 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4150 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4151 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4152 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4153
4154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
20054ef0
BS
4155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
4156 compute_eflags());
eaa728ee 4157
20054ef0
BS
4158 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4159 &env->segs[R_ES]);
4160 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
872929aa 4161 &env->segs[R_CS]);
20054ef0 4162 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
872929aa 4163 &env->segs[R_SS]);
20054ef0 4164 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
872929aa 4165 &env->segs[R_DS]);
eaa728ee 4166
db620f46
FB
4167 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4168 EIP + next_eip_addend);
eaa728ee
FB
4169 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4170 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4171
4172 /* load the interception bitmaps so we do not need to access the
4173 vmcb in svm mode */
20054ef0
BS
4174 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4175 control.intercept));
4176 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
4177 offsetof(struct vmcb,
4178 control.intercept_cr_read));
4179 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
4180 offsetof(struct vmcb,
4181 control.intercept_cr_write));
4182 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
4183 offsetof(struct vmcb,
4184 control.intercept_dr_read));
4185 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
4186 offsetof(struct vmcb,
4187 control.intercept_dr_write));
4188 env->intercept_exceptions = ldl_phys(env->vm_vmcb +
4189 offsetof(struct vmcb,
4190 control.intercept_exceptions
4191 ));
eaa728ee 4192
872929aa
FB
4193 /* enable intercepts */
4194 env->hflags |= HF_SVMI_MASK;
4195
20054ef0
BS
4196 env->tsc_offset = ldq_phys(env->vm_vmcb +
4197 offsetof(struct vmcb, control.tsc_offset));
33c263df 4198
20054ef0
BS
4199 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4200 save.gdtr.base));
4201 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4202 save.gdtr.limit));
eaa728ee 4203
20054ef0
BS
4204 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4205 save.idtr.base));
4206 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4207 save.idtr.limit));
eaa728ee
FB
4208
4209 /* clear exit_info_2 so we behave like the real hardware */
4210 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4211
20054ef0
BS
4212 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4213 save.cr0)));
4214 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4215 save.cr4)));
4216 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4217 save.cr3)));
eaa728ee
FB
4218 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4219 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4220 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4221 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4222 env->v_tpr = int_ctl & V_TPR_MASK;
4223 env->hflags2 |= HF2_VINTR_MASK;
20054ef0 4224 if (env->eflags & IF_MASK) {
db620f46 4225 env->hflags2 |= HF2_HIF_MASK;
20054ef0 4226 }
eaa728ee
FB
4227 }
4228
20054ef0 4229 cpu_load_efer(env,
5efc27bb 4230 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4231 env->eflags = 0;
4232 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4233 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4234 CC_OP = CC_OP_EFLAGS;
eaa728ee 4235
872929aa
FB
4236 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4237 env, R_ES);
4238 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4239 env, R_CS);
4240 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4241 env, R_SS);
4242 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4243 env, R_DS);
eaa728ee
FB
4244
4245 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4246 env->eip = EIP;
4247 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4248 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4249 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4250 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
20054ef0
BS
4251 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
4252 save.cpl)));
eaa728ee
FB
4253
4254 /* FIXME: guest state consistency checks */
4255
20054ef0
BS
4256 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4257 case TLB_CONTROL_DO_NOTHING:
4258 break;
4259 case TLB_CONTROL_FLUSH_ALL_ASID:
4260 /* FIXME: this is not 100% correct but should work for now */
4261 tlb_flush(env, 1);
eaa728ee
FB
4262 break;
4263 }
4264
960540b4 4265 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 4266
db620f46
FB
4267 if (int_ctl & V_IRQ_MASK) {
4268 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4269 }
4270
eaa728ee 4271 /* maybe we need to inject an event */
20054ef0
BS
4272 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4273 control.event_inj));
eaa728ee
FB
4274 if (event_inj & SVM_EVTINJ_VALID) {
4275 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4276 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
20054ef0
BS
4277 uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
4278 offsetof(struct vmcb,
4279 control.event_inj_err));
eaa728ee 4280
93fcfe39 4281 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
4282 /* FIXME: need to implement valid_err */
4283 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4284 case SVM_EVTINJ_TYPE_INTR:
20054ef0
BS
4285 env->exception_index = vector;
4286 env->error_code = event_inj_err;
4287 env->exception_is_int = 0;
4288 env->exception_next_eip = -1;
4289 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4290 /* XXX: is it always correct? */
77b2bc2c 4291 do_interrupt_x86_hardirq(env, vector, 1);
20054ef0 4292 break;
eaa728ee 4293 case SVM_EVTINJ_TYPE_NMI:
20054ef0
BS
4294 env->exception_index = EXCP02_NMI;
4295 env->error_code = event_inj_err;
4296 env->exception_is_int = 0;
4297 env->exception_next_eip = EIP;
4298 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4299 cpu_loop_exit(env);
4300 break;
eaa728ee 4301 case SVM_EVTINJ_TYPE_EXEPT:
20054ef0
BS
4302 env->exception_index = vector;
4303 env->error_code = event_inj_err;
4304 env->exception_is_int = 0;
4305 env->exception_next_eip = -1;
4306 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4307 cpu_loop_exit(env);
4308 break;
eaa728ee 4309 case SVM_EVTINJ_TYPE_SOFT:
20054ef0
BS
4310 env->exception_index = vector;
4311 env->error_code = event_inj_err;
4312 env->exception_is_int = 1;
4313 env->exception_next_eip = EIP;
4314 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
4315 cpu_loop_exit(env);
4316 break;
eaa728ee 4317 }
20054ef0
BS
4318 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
4319 env->error_code);
eaa728ee 4320 }
eaa728ee
FB
4321}
4322
4323void helper_vmmcall(void)
4324{
872929aa 4325 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
77b2bc2c 4326 raise_exception(env, EXCP06_ILLOP);
eaa728ee
FB
4327}
4328
914178d3 4329void helper_vmload(int aflag)
eaa728ee
FB
4330{
4331 target_ulong addr;
20054ef0 4332
872929aa
FB
4333 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4334
20054ef0 4335 if (aflag == 2) {
914178d3 4336 addr = EAX;
20054ef0 4337 } else {
914178d3 4338 addr = (uint32_t)EAX;
20054ef0 4339 }
914178d3 4340
20054ef0
BS
4341 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
4342 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4343 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4344 env->segs[R_FS].base);
eaa728ee 4345
872929aa
FB
4346 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4347 env, R_FS);
4348 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4349 env, R_GS);
4350 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4351 &env->tr);
4352 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4353 &env->ldt);
eaa728ee
FB
4354
4355#ifdef TARGET_X86_64
20054ef0
BS
4356 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
4357 save.kernel_gs_base));
eaa728ee
FB
4358 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4359 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4360 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4361#endif
4362 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4363 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
20054ef0
BS
4364 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
4365 save.sysenter_esp));
4366 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
4367 save.sysenter_eip));
eaa728ee
FB
4368}
4369
914178d3 4370void helper_vmsave(int aflag)
eaa728ee
FB
4371{
4372 target_ulong addr;
20054ef0 4373
872929aa 4374 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3 4375
20054ef0 4376 if (aflag == 2) {
914178d3 4377 addr = EAX;
20054ef0 4378 } else {
914178d3 4379 addr = (uint32_t)EAX;
20054ef0 4380 }
914178d3 4381
20054ef0
BS
4382 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
4383 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4384 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4385 env->segs[R_FS].base);
eaa728ee 4386
20054ef0 4387 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
872929aa 4388 &env->segs[R_FS]);
20054ef0 4389 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
872929aa 4390 &env->segs[R_GS]);
20054ef0 4391 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
872929aa 4392 &env->tr);
20054ef0 4393 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
872929aa 4394 &env->ldt);
eaa728ee
FB
4395
4396#ifdef TARGET_X86_64
20054ef0
BS
4397 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
4398 env->kernelgsbase);
eaa728ee
FB
4399 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4400 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4401 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4402#endif
4403 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4404 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
20054ef0
BS
4405 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
4406 env->sysenter_esp);
4407 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
4408 env->sysenter_eip);
eaa728ee
FB
4409}
4410
872929aa
FB
4411void helper_stgi(void)
4412{
4413 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 4414 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
4415}
4416
4417void helper_clgi(void)
4418{
4419 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 4420 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
4421}
4422
eaa728ee
FB
4423void helper_skinit(void)
4424{
872929aa
FB
4425 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
4426 /* XXX: not implemented */
77b2bc2c 4427 raise_exception(env, EXCP06_ILLOP);
eaa728ee
FB
4428}
4429
914178d3 4430void helper_invlpga(int aflag)
eaa728ee 4431{
914178d3 4432 target_ulong addr;
20054ef0 4433
872929aa 4434 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
20054ef0
BS
4435
4436 if (aflag == 2) {
914178d3 4437 addr = EAX;
20054ef0 4438 } else {
914178d3 4439 addr = (uint32_t)EAX;
20054ef0 4440 }
914178d3
FB
4441
4442 /* XXX: could use the ASID to see if it is needed to do the
4443 flush */
4444 tlb_flush_page(env, addr);
eaa728ee
FB
4445}
4446
4447void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4448{
20054ef0 4449 if (likely(!(env->hflags & HF_SVMI_MASK))) {
872929aa 4450 return;
20054ef0
BS
4451 }
4452 switch (type) {
eaa728ee 4453 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 4454 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
4455 helper_vmexit(type, param);
4456 }
4457 break;
872929aa
FB
4458 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4459 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
4460 helper_vmexit(type, param);
4461 }
4462 break;
872929aa
FB
4463 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
4464 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
4465 helper_vmexit(type, param);
4466 }
4467 break;
872929aa
FB
4468 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
4469 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
4470 helper_vmexit(type, param);
4471 }
4472 break;
872929aa
FB
4473 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
4474 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
4475 helper_vmexit(type, param);
4476 }
4477 break;
eaa728ee 4478 case SVM_EXIT_MSR:
872929aa 4479 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee 4480 /* FIXME: this should be read in at vmrun (faster this way?) */
20054ef0
BS
4481 uint64_t addr = ldq_phys(env->vm_vmcb +
4482 offsetof(struct vmcb,
4483 control.msrpm_base_pa));
eaa728ee 4484 uint32_t t0, t1;
20054ef0
BS
4485
4486 switch ((uint32_t)ECX) {
eaa728ee
FB
4487 case 0 ... 0x1fff:
4488 t0 = (ECX * 2) % 8;
583cd3cb 4489 t1 = (ECX * 2) / 8;
eaa728ee
FB
4490 break;
4491 case 0xc0000000 ... 0xc0001fff:
4492 t0 = (8192 + ECX - 0xc0000000) * 2;
4493 t1 = (t0 / 8);
4494 t0 %= 8;
4495 break;
4496 case 0xc0010000 ... 0xc0011fff:
4497 t0 = (16384 + ECX - 0xc0010000) * 2;
4498 t1 = (t0 / 8);
4499 t0 %= 8;
4500 break;
4501 default:
4502 helper_vmexit(type, param);
4503 t0 = 0;
4504 t1 = 0;
4505 break;
4506 }
20054ef0 4507 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
eaa728ee 4508 helper_vmexit(type, param);
20054ef0 4509 }
eaa728ee
FB
4510 }
4511 break;
4512 default:
872929aa 4513 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
4514 helper_vmexit(type, param);
4515 }
4516 break;
4517 }
4518}
4519
77b2bc2c
BS
4520void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
4521 uint64_t param)
e694d4e2 4522{
317ac620 4523 CPUX86State *saved_env;
e694d4e2
BS
4524
4525 saved_env = env;
4526 env = env1;
77b2bc2c 4527 helper_svm_check_intercept_param(type, param);
e694d4e2
BS
4528 env = saved_env;
4529}
4530
20054ef0 4531void helper_svm_check_io(uint32_t port, uint32_t param,
eaa728ee
FB
4532 uint32_t next_eip_addend)
4533{
872929aa 4534 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee 4535 /* FIXME: this should be read in at vmrun (faster this way?) */
20054ef0
BS
4536 uint64_t addr = ldq_phys(env->vm_vmcb +
4537 offsetof(struct vmcb, control.iopm_base_pa));
eaa728ee 4538 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
20054ef0
BS
4539
4540 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
eaa728ee 4541 /* next EIP */
20054ef0 4542 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
eaa728ee
FB
4543 env->eip + next_eip_addend);
4544 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
4545 }
4546 }
4547}
4548
4549/* Note: currently only 32 bits of exit_code are used */
4550void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4551{
4552 uint32_t int_ctl;
4553
20054ef0
BS
4554 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
4555 PRIx64 ", " TARGET_FMT_lx ")!\n",
4556 exit_code, exit_info_1,
4557 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4558 control.exit_info_2)),
4559 EIP);
eaa728ee 4560
20054ef0
BS
4561 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
4562 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
4563 SVM_INTERRUPT_SHADOW_MASK);
eaa728ee
FB
4564 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4565 } else {
4566 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4567 }
4568
4569 /* Save the VM state in the vmcb */
20054ef0 4570 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
872929aa 4571 &env->segs[R_ES]);
20054ef0 4572 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
872929aa 4573 &env->segs[R_CS]);
20054ef0 4574 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
872929aa 4575 &env->segs[R_SS]);
20054ef0 4576 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
872929aa 4577 &env->segs[R_DS]);
eaa728ee 4578
20054ef0
BS
4579 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
4580 env->gdt.base);
4581 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
4582 env->gdt.limit);
eaa728ee 4583
20054ef0
BS
4584 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
4585 env->idt.base);
4586 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
4587 env->idt.limit);
eaa728ee
FB
4588
4589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4590 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4593 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4594
db620f46
FB
4595 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4596 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
4597 int_ctl |= env->v_tpr & V_TPR_MASK;
20054ef0 4598 if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
db620f46 4599 int_ctl |= V_IRQ_MASK;
20054ef0 4600 }
db620f46 4601 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee 4602
20054ef0
BS
4603 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
4604 compute_eflags());
eaa728ee
FB
4605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4606 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4607 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
20054ef0
BS
4610 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
4611 env->hflags & HF_CPL_MASK);
eaa728ee
FB
4612
4613 /* Reload the host state from vm_hsave */
db620f46 4614 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 4615 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
4616 env->intercept = 0;
4617 env->intercept_exceptions = 0;
4618 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 4619 env->tsc_offset = 0;
eaa728ee 4620
20054ef0
BS
4621 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4622 save.gdtr.base));
4623 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4624 save.gdtr.limit));
4625
4626 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4627 save.idtr.base));
4628 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4629 save.idtr.limit));
4630
4631 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4632 save.cr0)) |
4633 CR0_PE_MASK);
4634 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4635 save.cr4)));
4636 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4637 save.cr3)));
5efc27bb
FB
4638 /* we need to set the efer after the crs so the hidden flags get
4639 set properly */
20054ef0
BS
4640 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4641 save.efer)));
eaa728ee
FB
4642 env->eflags = 0;
4643 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4644 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4645 CC_OP = CC_OP_EFLAGS;
4646
872929aa
FB
4647 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
4648 env, R_ES);
4649 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
4650 env, R_CS);
4651 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
4652 env, R_SS);
4653 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
4654 env, R_DS);
eaa728ee
FB
4655
4656 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4657 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4658 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4659
4660 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4661 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4662
4663 /* other setups */
4664 cpu_x86_set_cpl(env, 0);
20054ef0
BS
4665 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
4666 exit_code);
4667 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
4668 exit_info_1);
eaa728ee 4669
2ed51f5b 4670 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
20054ef0
BS
4671 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4672 control.event_inj)));
2ed51f5b 4673 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
20054ef0
BS
4674 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4675 control.event_inj_err)));
ab5ea558 4676 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 4677
960540b4 4678 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
4679 /* FIXME: Resets the current ASID register to zero (host ASID). */
4680
4681 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4682
4683 /* Clears the TSC_OFFSET inside the processor. */
4684
4685 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4686 from the page table indicated the host's CR3. If the PDPEs contain
4687 illegal state, the processor causes a shutdown. */
4688
4689 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4690 env->cr[0] |= CR0_PE_MASK;
4691 env->eflags &= ~VM_MASK;
4692
4693 /* Disables all breakpoints in the host DR7 register. */
4694
4695 /* Checks the reloaded host state for consistency. */
4696
4697 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4698 host's code segment or non-canonical (in the case of long mode), a
20054ef0 4699 #GP fault is delivered inside the host. */
eaa728ee
FB
4700
4701 /* remove any pending exception */
4702 env->exception_index = -1;
4703 env->error_code = 0;
4704 env->old_exception = -1;
4705
1162c041 4706 cpu_loop_exit(env);
eaa728ee
FB
4707}
4708
77b2bc2c
BS
4709void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4710{
4711 env = nenv;
4712 helper_vmexit(exit_code, exit_info_1);
4713}
4714
eaa728ee
FB
4715#endif
4716
eaa728ee 4717#define SHIFT 0
38de4c46 4718#include "cc_helper_template.h"
eaa728ee
FB
4719#undef SHIFT
4720
4721#define SHIFT 1
38de4c46 4722#include "cc_helper_template.h"
eaa728ee
FB
4723#undef SHIFT
4724
4725#define SHIFT 2
38de4c46 4726#include "cc_helper_template.h"
eaa728ee
FB
4727#undef SHIFT
4728
4729#ifdef TARGET_X86_64
4730
4731#define SHIFT 3
38de4c46 4732#include "cc_helper_template.h"
eaa728ee
FB
4733#undef SHIFT
4734
4735#endif
4736
38de4c46
BS
4737#define SHIFT 0
4738#include "shift_helper_template.h"
4739#undef SHIFT
4740
4741#define SHIFT 1
4742#include "shift_helper_template.h"
4743#undef SHIFT
4744
4745#define SHIFT 2
4746#include "shift_helper_template.h"
4747#undef SHIFT
4748
4749#ifdef TARGET_X86_64
4750#define SHIFT 3
4751#include "shift_helper_template.h"
4752#undef SHIFT
4753#endif
4754
eaa728ee
FB
4755/* bit operations */
4756target_ulong helper_bsf(target_ulong t0)
4757{
4758 int count;
4759 target_ulong res;
4760
4761 res = t0;
4762 count = 0;
4763 while ((res & 1) == 0) {
4764 count++;
4765 res >>= 1;
4766 }
4767 return count;
4768}
4769
31501a71 4770target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
4771{
4772 int count;
4773 target_ulong res, mask;
31501a71
AP
4774
4775 if (wordsize > 0 && t0 == 0) {
4776 return wordsize;
4777 }
eaa728ee
FB
4778 res = t0;
4779 count = TARGET_LONG_BITS - 1;
4780 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
4781 while ((res & mask) == 0) {
4782 count--;
4783 res <<= 1;
4784 }
31501a71
AP
4785 if (wordsize > 0) {
4786 return wordsize - 1 - count;
4787 }
eaa728ee
FB
4788 return count;
4789}
4790
31501a71
AP
4791target_ulong helper_bsr(target_ulong t0)
4792{
20054ef0 4793 return helper_lzcnt(t0, 0);
31501a71 4794}
eaa728ee
FB
4795
4796static int compute_all_eflags(void)
4797{
4798 return CC_SRC;
4799}
4800
4801static int compute_c_eflags(void)
4802{
4803 return CC_SRC & CC_C;
4804}
4805
a7812ae4
PB
4806uint32_t helper_cc_compute_all(int op)
4807{
4808 switch (op) {
20054ef0
BS
4809 default: /* should never happen */
4810 return 0;
4811
4812 case CC_OP_EFLAGS:
4813 return compute_all_eflags();
4814
4815 case CC_OP_MULB:
4816 return compute_all_mulb();
4817 case CC_OP_MULW:
4818 return compute_all_mulw();
4819 case CC_OP_MULL:
4820 return compute_all_mull();
4821
4822 case CC_OP_ADDB:
4823 return compute_all_addb();
4824 case CC_OP_ADDW:
4825 return compute_all_addw();
4826 case CC_OP_ADDL:
4827 return compute_all_addl();
4828
4829 case CC_OP_ADCB:
4830 return compute_all_adcb();
4831 case CC_OP_ADCW:
4832 return compute_all_adcw();
4833 case CC_OP_ADCL:
4834 return compute_all_adcl();
4835
4836 case CC_OP_SUBB:
4837 return compute_all_subb();
4838 case CC_OP_SUBW:
4839 return compute_all_subw();
4840 case CC_OP_SUBL:
4841 return compute_all_subl();
4842
4843 case CC_OP_SBBB:
4844 return compute_all_sbbb();
4845 case CC_OP_SBBW:
4846 return compute_all_sbbw();
4847 case CC_OP_SBBL:
4848 return compute_all_sbbl();
4849
4850 case CC_OP_LOGICB:
4851 return compute_all_logicb();
4852 case CC_OP_LOGICW:
4853 return compute_all_logicw();
4854 case CC_OP_LOGICL:
4855 return compute_all_logicl();
4856
4857 case CC_OP_INCB:
4858 return compute_all_incb();
4859 case CC_OP_INCW:
4860 return compute_all_incw();
4861 case CC_OP_INCL:
4862 return compute_all_incl();
4863
4864 case CC_OP_DECB:
4865 return compute_all_decb();
4866 case CC_OP_DECW:
4867 return compute_all_decw();
4868 case CC_OP_DECL:
4869 return compute_all_decl();
4870
4871 case CC_OP_SHLB:
4872 return compute_all_shlb();
4873 case CC_OP_SHLW:
4874 return compute_all_shlw();
4875 case CC_OP_SHLL:
4876 return compute_all_shll();
4877
4878 case CC_OP_SARB:
4879 return compute_all_sarb();
4880 case CC_OP_SARW:
4881 return compute_all_sarw();
4882 case CC_OP_SARL:
4883 return compute_all_sarl();
eaa728ee
FB
4884
4885#ifdef TARGET_X86_64
20054ef0
BS
4886 case CC_OP_MULQ:
4887 return compute_all_mulq();
eaa728ee 4888
20054ef0
BS
4889 case CC_OP_ADDQ:
4890 return compute_all_addq();
eaa728ee 4891
20054ef0
BS
4892 case CC_OP_ADCQ:
4893 return compute_all_adcq();
eaa728ee 4894
20054ef0
BS
4895 case CC_OP_SUBQ:
4896 return compute_all_subq();
eaa728ee 4897
20054ef0
BS
4898 case CC_OP_SBBQ:
4899 return compute_all_sbbq();
eaa728ee 4900
20054ef0
BS
4901 case CC_OP_LOGICQ:
4902 return compute_all_logicq();
eaa728ee 4903
20054ef0
BS
4904 case CC_OP_INCQ:
4905 return compute_all_incq();
eaa728ee 4906
20054ef0
BS
4907 case CC_OP_DECQ:
4908 return compute_all_decq();
eaa728ee 4909
20054ef0
BS
4910 case CC_OP_SHLQ:
4911 return compute_all_shlq();
eaa728ee 4912
20054ef0
BS
4913 case CC_OP_SARQ:
4914 return compute_all_sarq();
eaa728ee 4915#endif
a7812ae4
PB
4916 }
4917}
4918
317ac620 4919uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
e694d4e2 4920{
317ac620 4921 CPUX86State *saved_env;
e694d4e2
BS
4922 uint32_t ret;
4923
4924 saved_env = env;
4925 env = env1;
4926 ret = helper_cc_compute_all(op);
4927 env = saved_env;
4928 return ret;
4929}
4930
a7812ae4
PB
4931uint32_t helper_cc_compute_c(int op)
4932{
4933 switch (op) {
20054ef0
BS
4934 default: /* should never happen */
4935 return 0;
4936
4937 case CC_OP_EFLAGS:
4938 return compute_c_eflags();
4939
4940 case CC_OP_MULB:
4941 return compute_c_mull();
4942 case CC_OP_MULW:
4943 return compute_c_mull();
4944 case CC_OP_MULL:
4945 return compute_c_mull();
4946
4947 case CC_OP_ADDB:
4948 return compute_c_addb();
4949 case CC_OP_ADDW:
4950 return compute_c_addw();
4951 case CC_OP_ADDL:
4952 return compute_c_addl();
4953
4954 case CC_OP_ADCB:
4955 return compute_c_adcb();
4956 case CC_OP_ADCW:
4957 return compute_c_adcw();
4958 case CC_OP_ADCL:
4959 return compute_c_adcl();
4960
4961 case CC_OP_SUBB:
4962 return compute_c_subb();
4963 case CC_OP_SUBW:
4964 return compute_c_subw();
4965 case CC_OP_SUBL:
4966 return compute_c_subl();
4967
4968 case CC_OP_SBBB:
4969 return compute_c_sbbb();
4970 case CC_OP_SBBW:
4971 return compute_c_sbbw();
4972 case CC_OP_SBBL:
4973 return compute_c_sbbl();
4974
4975 case CC_OP_LOGICB:
4976 return compute_c_logicb();
4977 case CC_OP_LOGICW:
4978 return compute_c_logicw();
4979 case CC_OP_LOGICL:
4980 return compute_c_logicl();
4981
4982 case CC_OP_INCB:
4983 return compute_c_incl();
4984 case CC_OP_INCW:
4985 return compute_c_incl();
4986 case CC_OP_INCL:
4987 return compute_c_incl();
4988
4989 case CC_OP_DECB:
4990 return compute_c_incl();
4991 case CC_OP_DECW:
4992 return compute_c_incl();
4993 case CC_OP_DECL:
4994 return compute_c_incl();
4995
4996 case CC_OP_SHLB:
4997 return compute_c_shlb();
4998 case CC_OP_SHLW:
4999 return compute_c_shlw();
5000 case CC_OP_SHLL:
5001 return compute_c_shll();
5002
5003 case CC_OP_SARB:
5004 return compute_c_sarl();
5005 case CC_OP_SARW:
5006 return compute_c_sarl();
5007 case CC_OP_SARL:
5008 return compute_c_sarl();
a7812ae4
PB
5009
5010#ifdef TARGET_X86_64
20054ef0
BS
5011 case CC_OP_MULQ:
5012 return compute_c_mull();
a7812ae4 5013
20054ef0
BS
5014 case CC_OP_ADDQ:
5015 return compute_c_addq();
a7812ae4 5016
20054ef0
BS
5017 case CC_OP_ADCQ:
5018 return compute_c_adcq();
a7812ae4 5019
20054ef0
BS
5020 case CC_OP_SUBQ:
5021 return compute_c_subq();
a7812ae4 5022
20054ef0
BS
5023 case CC_OP_SBBQ:
5024 return compute_c_sbbq();
a7812ae4 5025
20054ef0
BS
5026 case CC_OP_LOGICQ:
5027 return compute_c_logicq();
a7812ae4 5028
20054ef0
BS
5029 case CC_OP_INCQ:
5030 return compute_c_incl();
a7812ae4 5031
20054ef0
BS
5032 case CC_OP_DECQ:
5033 return compute_c_incl();
a7812ae4 5034
20054ef0
BS
5035 case CC_OP_SHLQ:
5036 return compute_c_shlq();
a7812ae4 5037
20054ef0
BS
5038 case CC_OP_SARQ:
5039 return compute_c_sarl();
a7812ae4
PB
5040#endif
5041 }
5042}
This page took 1.249077 seconds and 4 git commands to generate.