]> Git Repo - qemu.git/blame - target-i386/helper.c
Fix >4G physical memory dump for Sparc32
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21
f3f2d9be
FB
22//#define DEBUG_PCALL
23
8145122b
FB
24#if 0
25#define raise_exception_err(a, b)\
26do {\
9540a78b
FB
27 if (logfile)\
28 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
8145122b
FB
29 (raise_exception_err)(a, b);\
30} while (0)
31#endif
32
2c0262af
FB
33const uint8_t parity_table[256] = {
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66};
67
68/* modulo 17 table */
69const uint8_t rclw_table[32] = {
5fafdf24 70 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af
FB
71 8, 9,10,11,12,13,14,15,
72 16, 0, 1, 2, 3, 4, 5, 6,
73 7, 8, 9,10,11,12,13,14,
74};
75
76/* modulo 9 table */
77const uint8_t rclb_table[32] = {
5fafdf24 78 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af 79 8, 0, 1, 2, 3, 4, 5, 6,
5fafdf24 80 7, 8, 0, 1, 2, 3, 4, 5,
2c0262af
FB
81 6, 7, 8, 0, 1, 2, 3, 4,
82};
83
84const CPU86_LDouble f15rk[7] =
85{
86 0.00000000000000000000L,
87 1.00000000000000000000L,
88 3.14159265358979323851L, /*pi*/
89 0.30102999566398119523L, /*lg2*/
90 0.69314718055994530943L, /*ln2*/
91 1.44269504088896340739L, /*l2e*/
92 3.32192809488736234781L, /*l2t*/
93};
3b46e624 94
2c0262af
FB
95/* thread support */
96
97spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
98
99void cpu_lock(void)
100{
101 spin_lock(&global_cpu_lock);
102}
103
104void cpu_unlock(void)
105{
106 spin_unlock(&global_cpu_lock);
107}
108
7e84c249
FB
109/* return non zero if error */
110static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
111 int selector)
112{
113 SegmentCache *dt;
114 int index;
14ce26e7 115 target_ulong ptr;
7e84c249
FB
116
117 if (selector & 0x4)
118 dt = &env->ldt;
119 else
120 dt = &env->gdt;
121 index = selector & ~7;
122 if ((index + 7) > dt->limit)
123 return -1;
124 ptr = dt->base + index;
125 *e1_ptr = ldl_kernel(ptr);
126 *e2_ptr = ldl_kernel(ptr + 4);
127 return 0;
128}
3b46e624 129
7e84c249
FB
130static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131{
132 unsigned int limit;
133 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
134 if (e2 & DESC_G_MASK)
135 limit = (limit << 12) | 0xfff;
136 return limit;
137}
138
14ce26e7 139static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
7e84c249 140{
14ce26e7 141 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
7e84c249
FB
142}
143
144static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
145{
146 sc->base = get_seg_base(e1, e2);
147 sc->limit = get_seg_limit(e1, e2);
148 sc->flags = e2;
149}
150
151/* init the segment cache in vm86 mode. */
152static inline void load_seg_vm(int seg, int selector)
153{
154 selector &= 0xffff;
5fafdf24 155 cpu_x86_load_seg_cache(env, seg, selector,
14ce26e7 156 (selector << 4), 0xffff, 0);
7e84c249
FB
157}
158
5fafdf24 159static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
2c0262af
FB
160 uint32_t *esp_ptr, int dpl)
161{
162 int type, index, shift;
3b46e624 163
2c0262af
FB
164#if 0
165 {
166 int i;
167 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
168 for(i=0;i<env->tr.limit;i++) {
169 printf("%02x ", env->tr.base[i]);
170 if ((i & 7) == 7) printf("\n");
171 }
172 printf("\n");
173 }
174#endif
175
176 if (!(env->tr.flags & DESC_P_MASK))
177 cpu_abort(env, "invalid tss");
178 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
179 if ((type & 7) != 1)
180 cpu_abort(env, "invalid tss type");
181 shift = type >> 3;
182 index = (dpl * 4 + 2) << shift;
183 if (index + (4 << shift) - 1 > env->tr.limit)
184 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
185 if (shift == 0) {
61382a50
FB
186 *esp_ptr = lduw_kernel(env->tr.base + index);
187 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 188 } else {
61382a50
FB
189 *esp_ptr = ldl_kernel(env->tr.base + index);
190 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
191 }
192}
193
7e84c249
FB
194/* XXX: merge with load_seg() */
195static void tss_load_seg(int seg_reg, int selector)
196{
197 uint32_t e1, e2;
198 int rpl, dpl, cpl;
199
200 if ((selector & 0xfffc) != 0) {
201 if (load_segment(&e1, &e2, selector) != 0)
202 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
203 if (!(e2 & DESC_S_MASK))
204 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205 rpl = selector & 3;
206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
207 cpl = env->hflags & HF_CPL_MASK;
208 if (seg_reg == R_CS) {
209 if (!(e2 & DESC_CS_MASK))
210 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
9540a78b 211 /* XXX: is it correct ? */
7e84c249
FB
212 if (dpl != rpl)
213 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214 if ((e2 & DESC_C_MASK) && dpl > rpl)
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
7e84c249
FB
216 } else if (seg_reg == R_SS) {
217 /* SS must be writable data */
218 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
219 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220 if (dpl != cpl || dpl != rpl)
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 } else {
223 /* not readable code */
224 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
225 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226 /* if data or non conforming code, checks the rights */
227 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
228 if (dpl < cpl || dpl < rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 }
231 }
232 if (!(e2 & DESC_P_MASK))
233 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
5fafdf24 234 cpu_x86_load_seg_cache(env, seg_reg, selector,
7e84c249
FB
235 get_seg_base(e1, e2),
236 get_seg_limit(e1, e2),
237 e2);
238 } else {
5fafdf24 239 if (seg_reg == R_SS || seg_reg == R_CS)
7e84c249
FB
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 }
242}
243
244#define SWITCH_TSS_JMP 0
245#define SWITCH_TSS_IRET 1
246#define SWITCH_TSS_CALL 2
247
248/* XXX: restore CPU state in registers (PowerPC case) */
5fafdf24 249static void switch_tss(int tss_selector,
883da8e2
FB
250 uint32_t e1, uint32_t e2, int source,
251 uint32_t next_eip)
2c0262af 252{
7e84c249 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
14ce26e7 254 target_ulong tss_base;
7e84c249
FB
255 uint32_t new_regs[8], new_segs[6];
256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
257 uint32_t old_eflags, eflags_mask;
2c0262af
FB
258 SegmentCache *dt;
259 int index;
14ce26e7 260 target_ulong ptr;
2c0262af 261
7e84c249 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
dc6f57fd 263#ifdef DEBUG_PCALL
e19e89a5 264 if (loglevel & CPU_LOG_PCALL)
dc6f57fd
FB
265 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
266#endif
7e84c249
FB
267
268 /* if task gate, we read the TSS segment and we load it */
269 if (type == 5) {
270 if (!(e2 & DESC_P_MASK))
271 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
272 tss_selector = e1 >> 16;
273 if (tss_selector & 4)
274 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
275 if (load_segment(&e1, &e2, tss_selector) != 0)
276 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
277 if (e2 & DESC_S_MASK)
278 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280 if ((type & 7) != 1)
281 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
282 }
283
284 if (!(e2 & DESC_P_MASK))
285 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
286
287 if (type & 8)
288 tss_limit_max = 103;
2c0262af 289 else
7e84c249
FB
290 tss_limit_max = 43;
291 tss_limit = get_seg_limit(e1, e2);
292 tss_base = get_seg_base(e1, e2);
5fafdf24 293 if ((tss_selector & 4) != 0 ||
7e84c249
FB
294 tss_limit < tss_limit_max)
295 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
297 if (old_type & 8)
298 old_tss_limit_max = 103;
299 else
300 old_tss_limit_max = 43;
301
302 /* read all the registers from the new TSS */
303 if (type & 8) {
304 /* 32 bit */
305 new_cr3 = ldl_kernel(tss_base + 0x1c);
306 new_eip = ldl_kernel(tss_base + 0x20);
307 new_eflags = ldl_kernel(tss_base + 0x24);
308 for(i = 0; i < 8; i++)
309 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
310 for(i = 0; i < 6; i++)
311 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
312 new_ldt = lduw_kernel(tss_base + 0x60);
313 new_trap = ldl_kernel(tss_base + 0x64);
314 } else {
315 /* 16 bit */
316 new_cr3 = 0;
317 new_eip = lduw_kernel(tss_base + 0x0e);
318 new_eflags = lduw_kernel(tss_base + 0x10);
319 for(i = 0; i < 8; i++)
320 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
321 for(i = 0; i < 4; i++)
322 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
323 new_ldt = lduw_kernel(tss_base + 0x2a);
324 new_segs[R_FS] = 0;
325 new_segs[R_GS] = 0;
326 new_trap = 0;
327 }
3b46e624 328
7e84c249
FB
329 /* NOTE: we must avoid memory exceptions during the task switch,
330 so we make dummy accesses before */
331 /* XXX: it can still fail in some cases, so a bigger hack is
332 necessary to valid the TLB after having done the accesses */
333
334 v1 = ldub_kernel(env->tr.base);
265d3497 335 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
7e84c249
FB
336 stb_kernel(env->tr.base, v1);
337 stb_kernel(env->tr.base + old_tss_limit_max, v2);
3b46e624 338
7e84c249
FB
339 /* clear busy bit (it is restartable) */
340 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
14ce26e7 341 target_ulong ptr;
7e84c249 342 uint32_t e2;
883da8e2 343 ptr = env->gdt.base + (env->tr.selector & ~7);
7e84c249
FB
344 e2 = ldl_kernel(ptr + 4);
345 e2 &= ~DESC_TSS_BUSY_MASK;
346 stl_kernel(ptr + 4, e2);
347 }
348 old_eflags = compute_eflags();
349 if (source == SWITCH_TSS_IRET)
350 old_eflags &= ~NT_MASK;
3b46e624 351
7e84c249
FB
352 /* save the current state in the old TSS */
353 if (type & 8) {
354 /* 32 bit */
883da8e2 355 stl_kernel(env->tr.base + 0x20, next_eip);
7e84c249 356 stl_kernel(env->tr.base + 0x24, old_eflags);
0d1a29f9
FB
357 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
358 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
359 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
360 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
361 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
362 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
363 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
364 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
7e84c249
FB
365 for(i = 0; i < 6; i++)
366 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
367 } else {
368 /* 16 bit */
883da8e2 369 stw_kernel(env->tr.base + 0x0e, next_eip);
7e84c249 370 stw_kernel(env->tr.base + 0x10, old_eflags);
0d1a29f9
FB
371 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
372 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
373 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
374 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
375 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
376 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
377 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
378 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
7e84c249
FB
379 for(i = 0; i < 4; i++)
380 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
381 }
3b46e624 382
7e84c249
FB
383 /* now if an exception occurs, it will occurs in the next task
384 context */
385
386 if (source == SWITCH_TSS_CALL) {
387 stw_kernel(tss_base, env->tr.selector);
388 new_eflags |= NT_MASK;
389 }
390
391 /* set busy bit */
392 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
14ce26e7 393 target_ulong ptr;
7e84c249 394 uint32_t e2;
883da8e2 395 ptr = env->gdt.base + (tss_selector & ~7);
7e84c249
FB
396 e2 = ldl_kernel(ptr + 4);
397 e2 |= DESC_TSS_BUSY_MASK;
398 stl_kernel(ptr + 4, e2);
399 }
400
401 /* set the new CPU state */
402 /* from this point, any exception which occurs can give problems */
403 env->cr[0] |= CR0_TS_MASK;
883da8e2 404 env->hflags |= HF_TS_MASK;
7e84c249
FB
405 env->tr.selector = tss_selector;
406 env->tr.base = tss_base;
407 env->tr.limit = tss_limit;
408 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
3b46e624 409
7e84c249 410 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
1ac157da 411 cpu_x86_update_cr3(env, new_cr3);
7e84c249 412 }
3b46e624 413
7e84c249
FB
414 /* load all registers without an exception, then reload them with
415 possible exception */
416 env->eip = new_eip;
5fafdf24 417 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
8145122b 418 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
7e84c249
FB
419 if (!(type & 8))
420 eflags_mask &= 0xffff;
421 load_eflags(new_eflags, eflags_mask);
0d1a29f9
FB
422 /* XXX: what to do in 16 bit case ? */
423 EAX = new_regs[0];
424 ECX = new_regs[1];
425 EDX = new_regs[2];
426 EBX = new_regs[3];
427 ESP = new_regs[4];
428 EBP = new_regs[5];
429 ESI = new_regs[6];
430 EDI = new_regs[7];
7e84c249 431 if (new_eflags & VM_MASK) {
5fafdf24 432 for(i = 0; i < 6; i++)
7e84c249
FB
433 load_seg_vm(i, new_segs[i]);
434 /* in vm86, CPL is always 3 */
435 cpu_x86_set_cpl(env, 3);
436 } else {
437 /* CPL is set the RPL of CS */
438 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
439 /* first just selectors as the rest may trigger exceptions */
440 for(i = 0; i < 6; i++)
14ce26e7 441 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
7e84c249 442 }
3b46e624 443
7e84c249 444 env->ldt.selector = new_ldt & ~4;
14ce26e7 445 env->ldt.base = 0;
7e84c249
FB
446 env->ldt.limit = 0;
447 env->ldt.flags = 0;
448
449 /* load the LDT */
450 if (new_ldt & 4)
451 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452
8145122b
FB
453 if ((new_ldt & 0xfffc) != 0) {
454 dt = &env->gdt;
455 index = new_ldt & ~7;
456 if ((index + 7) > dt->limit)
457 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
458 ptr = dt->base + index;
459 e1 = ldl_kernel(ptr);
460 e2 = ldl_kernel(ptr + 4);
461 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
462 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463 if (!(e2 & DESC_P_MASK))
464 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 load_seg_cache_raw_dt(&env->ldt, e1, e2);
466 }
3b46e624 467
7e84c249
FB
468 /* load the segments */
469 if (!(new_eflags & VM_MASK)) {
470 tss_load_seg(R_CS, new_segs[R_CS]);
471 tss_load_seg(R_SS, new_segs[R_SS]);
472 tss_load_seg(R_ES, new_segs[R_ES]);
473 tss_load_seg(R_DS, new_segs[R_DS]);
474 tss_load_seg(R_FS, new_segs[R_FS]);
475 tss_load_seg(R_GS, new_segs[R_GS]);
476 }
3b46e624 477
7e84c249
FB
478 /* check that EIP is in the CS segment limits */
479 if (new_eip > env->segs[R_CS].limit) {
883da8e2 480 /* XXX: different exception if CALL ? */
7e84c249
FB
481 raise_exception_err(EXCP0D_GPF, 0);
482 }
2c0262af 483}
7e84c249
FB
484
485/* check if Port I/O is allowed in TSS */
486static inline void check_io(int addr, int size)
2c0262af 487{
7e84c249 488 int io_offset, val, mask;
3b46e624 489
7e84c249
FB
490 /* TSS must be a valid 32 bit one */
491 if (!(env->tr.flags & DESC_P_MASK) ||
492 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
493 env->tr.limit < 103)
494 goto fail;
495 io_offset = lduw_kernel(env->tr.base + 0x66);
496 io_offset += (addr >> 3);
497 /* Note: the check needs two bytes */
498 if ((io_offset + 1) > env->tr.limit)
499 goto fail;
500 val = lduw_kernel(env->tr.base + io_offset);
501 val >>= (addr & 7);
502 mask = (1 << size) - 1;
503 /* all bits must be zero to allow the I/O */
504 if ((val & mask) != 0) {
505 fail:
506 raise_exception_err(EXCP0D_GPF, 0);
507 }
2c0262af
FB
508}
509
7e84c249 510void check_iob_T0(void)
2c0262af 511{
7e84c249 512 check_io(T0, 1);
2c0262af
FB
513}
514
7e84c249 515void check_iow_T0(void)
2c0262af 516{
7e84c249 517 check_io(T0, 2);
2c0262af
FB
518}
519
7e84c249 520void check_iol_T0(void)
2c0262af 521{
7e84c249
FB
522 check_io(T0, 4);
523}
524
525void check_iob_DX(void)
526{
527 check_io(EDX & 0xffff, 1);
528}
529
530void check_iow_DX(void)
531{
532 check_io(EDX & 0xffff, 2);
533}
534
535void check_iol_DX(void)
536{
537 check_io(EDX & 0xffff, 4);
2c0262af
FB
538}
539
891b38e4
FB
540static inline unsigned int get_sp_mask(unsigned int e2)
541{
542 if (e2 & DESC_B_MASK)
543 return 0xffffffff;
544 else
545 return 0xffff;
546}
547
8d7b0fbb
FB
548#ifdef TARGET_X86_64
549#define SET_ESP(val, sp_mask)\
550do {\
551 if ((sp_mask) == 0xffff)\
552 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
553 else if ((sp_mask) == 0xffffffffLL)\
554 ESP = (uint32_t)(val);\
555 else\
556 ESP = (val);\
557} while (0)
558#else
559#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
560#endif
561
891b38e4
FB
562/* XXX: add a is_user flag to have proper security support */
563#define PUSHW(ssp, sp, sp_mask, val)\
564{\
565 sp -= 2;\
566 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
567}
568
569#define PUSHL(ssp, sp, sp_mask, val)\
570{\
571 sp -= 4;\
572 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
573}
574
575#define POPW(ssp, sp, sp_mask, val)\
576{\
577 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578 sp += 2;\
579}
580
581#define POPL(ssp, sp, sp_mask, val)\
582{\
14ce26e7 583 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
891b38e4
FB
584 sp += 4;\
585}
586
2c0262af
FB
587/* protected mode interrupt */
588static void do_interrupt_protected(int intno, int is_int, int error_code,
589 unsigned int next_eip, int is_hw)
590{
591 SegmentCache *dt;
14ce26e7 592 target_ulong ptr, ssp;
8d7b0fbb 593 int type, dpl, selector, ss_dpl, cpl;
2c0262af 594 int has_error_code, new_stack, shift;
891b38e4 595 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
8d7b0fbb 596 uint32_t old_eip, sp_mask;
0573fbfc 597 int svm_should_check = 1;
2c0262af 598
0573fbfc
TS
599 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
600 next_eip = EIP;
601 svm_should_check = 0;
602 }
603
604 if (svm_should_check
605 && (INTERCEPTEDl(_exceptions, 1 << intno)
606 && !is_int)) {
607 raise_interrupt(intno, is_int, error_code, 0);
608 }
7e84c249
FB
609 has_error_code = 0;
610 if (!is_int && !is_hw) {
611 switch(intno) {
612 case 8:
613 case 10:
614 case 11:
615 case 12:
616 case 13:
617 case 14:
618 case 17:
619 has_error_code = 1;
620 break;
621 }
622 }
883da8e2
FB
623 if (is_int)
624 old_eip = next_eip;
625 else
626 old_eip = env->eip;
7e84c249 627
2c0262af
FB
628 dt = &env->idt;
629 if (intno * 8 + 7 > dt->limit)
630 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
631 ptr = dt->base + intno * 8;
61382a50
FB
632 e1 = ldl_kernel(ptr);
633 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
634 /* check gate type */
635 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
636 switch(type) {
637 case 5: /* task gate */
7e84c249
FB
638 /* must do that check here to return the correct error code */
639 if (!(e2 & DESC_P_MASK))
640 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
883da8e2 641 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
7e84c249 642 if (has_error_code) {
8d7b0fbb
FB
643 int type;
644 uint32_t mask;
7e84c249 645 /* push the error code */
3f20e1dd
FB
646 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
647 shift = type >> 3;
7e84c249
FB
648 if (env->segs[R_SS].flags & DESC_B_MASK)
649 mask = 0xffffffff;
650 else
651 mask = 0xffff;
0d1a29f9 652 esp = (ESP - (2 << shift)) & mask;
7e84c249
FB
653 ssp = env->segs[R_SS].base + esp;
654 if (shift)
655 stl_kernel(ssp, error_code);
656 else
657 stw_kernel(ssp, error_code);
8d7b0fbb 658 SET_ESP(esp, mask);
7e84c249
FB
659 }
660 return;
2c0262af
FB
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
665 break;
666 default:
667 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
668 break;
669 }
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
672 /* check privledge if software int */
673 if (is_int && dpl < cpl)
674 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
675 /* check valid bit */
676 if (!(e2 & DESC_P_MASK))
677 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
678 selector = e1 >> 16;
679 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
680 if ((selector & 0xfffc) == 0)
681 raise_exception_err(EXCP0D_GPF, 0);
682
683 if (load_segment(&e1, &e2, selector) != 0)
684 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
686 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688 if (dpl > cpl)
689 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
690 if (!(e2 & DESC_P_MASK))
691 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
692 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 693 /* to inner privilege */
2c0262af
FB
694 get_ss_esp_from_tss(&ss, &esp, dpl);
695 if ((ss & 0xfffc) == 0)
696 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
697 if ((ss & 3) != dpl)
698 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
700 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
701 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
702 if (ss_dpl != dpl)
703 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
704 if (!(ss_e2 & DESC_S_MASK) ||
705 (ss_e2 & DESC_CS_MASK) ||
706 !(ss_e2 & DESC_W_MASK))
707 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
708 if (!(ss_e2 & DESC_P_MASK))
709 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710 new_stack = 1;
891b38e4
FB
711 sp_mask = get_sp_mask(ss_e2);
712 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 713 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 714 /* to same privilege */
8e682019
FB
715 if (env->eflags & VM_MASK)
716 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 717 new_stack = 0;
891b38e4
FB
718 sp_mask = get_sp_mask(env->segs[R_SS].flags);
719 ssp = env->segs[R_SS].base;
720 esp = ESP;
4796f5e9 721 dpl = cpl;
2c0262af
FB
722 } else {
723 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
724 new_stack = 0; /* avoid warning */
891b38e4 725 sp_mask = 0; /* avoid warning */
14ce26e7 726 ssp = 0; /* avoid warning */
891b38e4 727 esp = 0; /* avoid warning */
2c0262af
FB
728 }
729
730 shift = type >> 3;
891b38e4
FB
731
732#if 0
733 /* XXX: check that enough room is available */
2c0262af
FB
734 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
735 if (env->eflags & VM_MASK)
736 push_size += 8;
737 push_size <<= shift;
891b38e4 738#endif
2c0262af 739 if (shift == 1) {
2c0262af 740 if (new_stack) {
8e682019
FB
741 if (env->eflags & VM_MASK) {
742 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
743 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
745 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
746 }
891b38e4
FB
747 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
748 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 749 }
891b38e4
FB
750 PUSHL(ssp, esp, sp_mask, compute_eflags());
751 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
752 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 753 if (has_error_code) {
891b38e4 754 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
755 }
756 } else {
757 if (new_stack) {
8e682019
FB
758 if (env->eflags & VM_MASK) {
759 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
760 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
762 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
763 }
891b38e4
FB
764 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
765 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 766 }
891b38e4
FB
767 PUSHW(ssp, esp, sp_mask, compute_eflags());
768 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
769 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 770 if (has_error_code) {
891b38e4 771 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
772 }
773 }
3b46e624 774
891b38e4 775 if (new_stack) {
8e682019 776 if (env->eflags & VM_MASK) {
14ce26e7
FB
777 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
778 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
8e682019 781 }
891b38e4 782 ss = (ss & ~3) | dpl;
5fafdf24 783 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
784 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
785 }
8d7b0fbb 786 SET_ESP(esp, sp_mask);
891b38e4
FB
787
788 selector = (selector & ~3) | dpl;
5fafdf24 789 cpu_x86_load_seg_cache(env, R_CS, selector,
891b38e4
FB
790 get_seg_base(e1, e2),
791 get_seg_limit(e1, e2),
792 e2);
793 cpu_x86_set_cpl(env, dpl);
794 env->eip = offset;
795
2c0262af
FB
796 /* interrupt gate clear IF mask */
797 if ((type & 1) == 0) {
798 env->eflags &= ~IF_MASK;
799 }
800 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
801}
802
14ce26e7
FB
803#ifdef TARGET_X86_64
804
805#define PUSHQ(sp, val)\
806{\
807 sp -= 8;\
808 stq_kernel(sp, (val));\
809}
810
811#define POPQ(sp, val)\
812{\
813 val = ldq_kernel(sp);\
814 sp += 8;\
815}
816
817static inline target_ulong get_rsp_from_tss(int level)
818{
819 int index;
3b46e624 820
14ce26e7 821#if 0
5fafdf24 822 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
14ce26e7
FB
823 env->tr.base, env->tr.limit);
824#endif
825
826 if (!(env->tr.flags & DESC_P_MASK))
827 cpu_abort(env, "invalid tss");
828 index = 8 * level + 4;
829 if ((index + 7) > env->tr.limit)
830 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
831 return ldq_kernel(env->tr.base + index);
832}
833
834/* 64 bit interrupt */
835static void do_interrupt64(int intno, int is_int, int error_code,
836 target_ulong next_eip, int is_hw)
837{
838 SegmentCache *dt;
839 target_ulong ptr;
840 int type, dpl, selector, cpl, ist;
841 int has_error_code, new_stack;
842 uint32_t e1, e2, e3, ss;
843 target_ulong old_eip, esp, offset;
0573fbfc 844 int svm_should_check = 1;
14ce26e7 845
0573fbfc
TS
846 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
847 next_eip = EIP;
848 svm_should_check = 0;
849 }
850 if (svm_should_check
851 && INTERCEPTEDl(_exceptions, 1 << intno)
852 && !is_int) {
853 raise_interrupt(intno, is_int, error_code, 0);
854 }
14ce26e7
FB
855 has_error_code = 0;
856 if (!is_int && !is_hw) {
857 switch(intno) {
858 case 8:
859 case 10:
860 case 11:
861 case 12:
862 case 13:
863 case 14:
864 case 17:
865 has_error_code = 1;
866 break;
867 }
868 }
869 if (is_int)
870 old_eip = next_eip;
871 else
872 old_eip = env->eip;
873
874 dt = &env->idt;
875 if (intno * 16 + 15 > dt->limit)
876 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
877 ptr = dt->base + intno * 16;
878 e1 = ldl_kernel(ptr);
879 e2 = ldl_kernel(ptr + 4);
880 e3 = ldl_kernel(ptr + 8);
881 /* check gate type */
882 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
883 switch(type) {
884 case 14: /* 386 interrupt gate */
885 case 15: /* 386 trap gate */
886 break;
887 default:
888 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
889 break;
890 }
891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892 cpl = env->hflags & HF_CPL_MASK;
893 /* check privledge if software int */
894 if (is_int && dpl < cpl)
895 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
896 /* check valid bit */
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
902 if ((selector & 0xfffc) == 0)
903 raise_exception_err(EXCP0D_GPF, 0);
904
905 if (load_segment(&e1, &e2, selector) != 0)
906 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
910 if (dpl > cpl)
911 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
912 if (!(e2 & DESC_P_MASK))
913 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
914 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
915 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
7f75ffd3 917 /* to inner privilege */
14ce26e7
FB
918 if (ist != 0)
919 esp = get_rsp_from_tss(ist + 3);
920 else
921 esp = get_rsp_from_tss(dpl);
9540a78b 922 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
923 ss = 0;
924 new_stack = 1;
925 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 926 /* to same privilege */
14ce26e7
FB
927 if (env->eflags & VM_MASK)
928 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929 new_stack = 0;
9540a78b
FB
930 if (ist != 0)
931 esp = get_rsp_from_tss(ist + 3);
932 else
933 esp = ESP;
934 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 esp = 0; /* avoid warning */
940 }
941
942 PUSHQ(esp, env->segs[R_SS].selector);
943 PUSHQ(esp, ESP);
944 PUSHQ(esp, compute_eflags());
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
949 }
3b46e624 950
14ce26e7
FB
951 if (new_stack) {
952 ss = 0 | dpl;
953 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
954 }
955 ESP = esp;
956
957 selector = (selector & ~3) | dpl;
5fafdf24 958 cpu_x86_load_seg_cache(env, R_CS, selector,
14ce26e7
FB
959 get_seg_base(e1, e2),
960 get_seg_limit(e1, e2),
961 e2);
962 cpu_x86_set_cpl(env, dpl);
963 env->eip = offset;
964
965 /* interrupt gate clear IF mask */
966 if ((type & 1) == 0) {
967 env->eflags &= ~IF_MASK;
968 }
969 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
970}
f419b321 971#endif
14ce26e7 972
06c2f506 973void helper_syscall(int next_eip_addend)
14ce26e7
FB
974{
975 int selector;
976
977 if (!(env->efer & MSR_EFER_SCE)) {
978 raise_exception_err(EXCP06_ILLOP, 0);
979 }
980 selector = (env->star >> 32) & 0xffff;
f419b321 981#ifdef TARGET_X86_64
14ce26e7 982 if (env->hflags & HF_LMA_MASK) {
9540a78b
FB
983 int code64;
984
06c2f506 985 ECX = env->eip + next_eip_addend;
14ce26e7 986 env->regs[11] = compute_eflags();
3b46e624 987
9540a78b 988 code64 = env->hflags & HF_CS64_MASK;
14ce26e7
FB
989
990 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
991 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
992 0, 0xffffffff,
d80c7d1c 993 DESC_G_MASK | DESC_P_MASK |
14ce26e7
FB
994 DESC_S_MASK |
995 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
5fafdf24 996 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
997 0, 0xffffffff,
998 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
999 DESC_S_MASK |
1000 DESC_W_MASK | DESC_A_MASK);
1001 env->eflags &= ~env->fmask;
9540a78b 1002 if (code64)
14ce26e7
FB
1003 env->eip = env->lstar;
1004 else
1005 env->eip = env->cstar;
5fafdf24 1006 } else
f419b321
FB
1007#endif
1008 {
06c2f506 1009 ECX = (uint32_t)(env->eip + next_eip_addend);
3b46e624 1010
14ce26e7 1011 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
1012 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1013 0, 0xffffffff,
14ce26e7
FB
1014 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1015 DESC_S_MASK |
1016 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 1017 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
1018 0, 0xffffffff,
1019 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1020 DESC_S_MASK |
1021 DESC_W_MASK | DESC_A_MASK);
1022 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1023 env->eip = (uint32_t)env->star;
1024 }
1025}
1026
1027void helper_sysret(int dflag)
1028{
1029 int cpl, selector;
1030
f419b321
FB
1031 if (!(env->efer & MSR_EFER_SCE)) {
1032 raise_exception_err(EXCP06_ILLOP, 0);
1033 }
14ce26e7
FB
1034 cpl = env->hflags & HF_CPL_MASK;
1035 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1036 raise_exception_err(EXCP0D_GPF, 0);
1037 }
1038 selector = (env->star >> 48) & 0xffff;
f419b321 1039#ifdef TARGET_X86_64
14ce26e7
FB
1040 if (env->hflags & HF_LMA_MASK) {
1041 if (dflag == 2) {
5fafdf24
TS
1042 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1043 0, 0xffffffff,
d80c7d1c 1044 DESC_G_MASK | DESC_P_MASK |
14ce26e7 1045 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
5fafdf24 1046 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
14ce26e7
FB
1047 DESC_L_MASK);
1048 env->eip = ECX;
1049 } else {
5fafdf24
TS
1050 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1051 0, 0xffffffff,
14ce26e7
FB
1052 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1054 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1055 env->eip = (uint32_t)ECX;
1056 }
5fafdf24 1057 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1061 DESC_W_MASK | DESC_A_MASK);
5fafdf24 1062 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
31313213 1063 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
14ce26e7 1064 cpu_x86_set_cpl(env, 3);
5fafdf24 1065 } else
f419b321
FB
1066#endif
1067 {
5fafdf24
TS
1068 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1069 0, 0xffffffff,
14ce26e7
FB
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1073 env->eip = (uint32_t)ECX;
5fafdf24 1074 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_W_MASK | DESC_A_MASK);
1079 env->eflags |= IF_MASK;
1080 cpu_x86_set_cpl(env, 3);
1081 }
f419b321
FB
1082#ifdef USE_KQEMU
1083 if (kqemu_is_ok(env)) {
1084 if (env->hflags & HF_LMA_MASK)
1085 CC_OP = CC_OP_EFLAGS;
1086 env->exception_index = -1;
1087 cpu_loop_exit();
1088 }
14ce26e7 1089#endif
f419b321 1090}
14ce26e7 1091
2c0262af
FB
1092/* real mode interrupt */
1093static void do_interrupt_real(int intno, int is_int, int error_code,
4136f33c 1094 unsigned int next_eip)
2c0262af
FB
1095{
1096 SegmentCache *dt;
14ce26e7 1097 target_ulong ptr, ssp;
2c0262af
FB
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
0573fbfc 1101 int svm_should_check = 1;
2c0262af 1102
0573fbfc
TS
1103 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1104 next_eip = EIP;
1105 svm_should_check = 0;
1106 }
1107 if (svm_should_check
1108 && INTERCEPTEDl(_exceptions, 1 << intno)
1109 && !is_int) {
1110 raise_interrupt(intno, is_int, error_code, 0);
1111 }
2c0262af
FB
1112 /* real mode (simpler !) */
1113 dt = &env->idt;
1114 if (intno * 4 + 3 > dt->limit)
1115 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1116 ptr = dt->base + intno * 4;
61382a50
FB
1117 offset = lduw_kernel(ptr);
1118 selector = lduw_kernel(ptr + 2);
2c0262af
FB
1119 esp = ESP;
1120 ssp = env->segs[R_SS].base;
1121 if (is_int)
1122 old_eip = next_eip;
1123 else
1124 old_eip = env->eip;
1125 old_cs = env->segs[R_CS].selector;
891b38e4
FB
1126 /* XXX: use SS segment size ? */
1127 PUSHW(ssp, esp, 0xffff, compute_eflags());
1128 PUSHW(ssp, esp, 0xffff, old_cs);
1129 PUSHW(ssp, esp, 0xffff, old_eip);
3b46e624 1130
2c0262af
FB
1131 /* update processor state */
1132 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1133 env->eip = offset;
1134 env->segs[R_CS].selector = selector;
14ce26e7 1135 env->segs[R_CS].base = (selector << 4);
2c0262af
FB
1136 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1137}
1138
1139/* fake user mode interrupt */
5fafdf24 1140void do_interrupt_user(int intno, int is_int, int error_code,
14ce26e7 1141 target_ulong next_eip)
2c0262af
FB
1142{
1143 SegmentCache *dt;
14ce26e7 1144 target_ulong ptr;
2c0262af
FB
1145 int dpl, cpl;
1146 uint32_t e2;
1147
1148 dt = &env->idt;
1149 ptr = dt->base + (intno * 8);
61382a50 1150 e2 = ldl_kernel(ptr + 4);
3b46e624 1151
2c0262af
FB
1152 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1153 cpl = env->hflags & HF_CPL_MASK;
1154 /* check privledge if software int */
1155 if (is_int && dpl < cpl)
1156 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157
1158 /* Since we emulate only user space, we cannot do more than
1159 exiting the emulation with the suitable exception and error
1160 code */
1161 if (is_int)
1162 EIP = next_eip;
1163}
1164
1165/*
e19e89a5 1166 * Begin execution of an interruption. is_int is TRUE if coming from
2c0262af 1167 * the int instruction. next_eip is the EIP value AFTER the interrupt
3b46e624 1168 * instruction. It is only relevant if is_int is TRUE.
2c0262af 1169 */
5fafdf24 1170void do_interrupt(int intno, int is_int, int error_code,
14ce26e7 1171 target_ulong next_eip, int is_hw)
2c0262af 1172{
1247c5f7 1173 if (loglevel & CPU_LOG_INT) {
e19e89a5
FB
1174 if ((env->cr[0] & CR0_PE_MASK)) {
1175 static int count;
14ce26e7 1176 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
dc6f57fd
FB
1177 count, intno, error_code, is_int,
1178 env->hflags & HF_CPL_MASK,
1179 env->segs[R_CS].selector, EIP,
2ee73ac3 1180 (int)env->segs[R_CS].base + EIP,
8145122b
FB
1181 env->segs[R_SS].selector, ESP);
1182 if (intno == 0x0e) {
14ce26e7 1183 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
8145122b 1184 } else {
14ce26e7 1185 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
8145122b 1186 }
e19e89a5 1187 fprintf(logfile, "\n");
06c2f506 1188 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1247c5f7 1189#if 0
e19e89a5
FB
1190 {
1191 int i;
1192 uint8_t *ptr;
1193 fprintf(logfile, " code=");
1194 ptr = env->segs[R_CS].base + env->eip;
1195 for(i = 0; i < 16; i++) {
1196 fprintf(logfile, " %02x", ldub(ptr + i));
dc6f57fd 1197 }
e19e89a5 1198 fprintf(logfile, "\n");
dc6f57fd 1199 }
8e682019 1200#endif
e19e89a5 1201 count++;
4136f33c 1202 }
4136f33c 1203 }
2c0262af 1204 if (env->cr[0] & CR0_PE_MASK) {
14ce26e7
FB
1205#if TARGET_X86_64
1206 if (env->hflags & HF_LMA_MASK) {
1207 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1208 } else
1209#endif
1210 {
1211 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1212 }
2c0262af
FB
1213 } else {
1214 do_interrupt_real(intno, is_int, error_code, next_eip);
1215 }
1216}
1217
678dde13
TS
1218/*
1219 * Check nested exceptions and change to double or triple fault if
1220 * needed. It should only be called, if this is not an interrupt.
1221 * Returns the new exception number.
1222 */
1223int check_exception(int intno, int *error_code)
1224{
1225 char first_contributory = env->old_exception == 0 ||
1226 (env->old_exception >= 10 &&
1227 env->old_exception <= 13);
1228 char second_contributory = intno == 0 ||
1229 (intno >= 10 && intno <= 13);
1230
1231 if (loglevel & CPU_LOG_INT)
1232 fprintf(logfile, "check_exception old: %x new %x\n",
1233 env->old_exception, intno);
1234
1235 if (env->old_exception == EXCP08_DBLE)
1236 cpu_abort(env, "triple fault");
1237
1238 if ((first_contributory && second_contributory)
1239 || (env->old_exception == EXCP0E_PAGE &&
1240 (second_contributory || (intno == EXCP0E_PAGE)))) {
1241 intno = EXCP08_DBLE;
1242 *error_code = 0;
1243 }
1244
1245 if (second_contributory || (intno == EXCP0E_PAGE) ||
1246 (intno == EXCP08_DBLE))
1247 env->old_exception = intno;
1248
1249 return intno;
1250}
1251
2c0262af
FB
1252/*
1253 * Signal an interruption. It is executed in the main CPU loop.
1254 * is_int is TRUE if coming from the int instruction. next_eip is the
1255 * EIP value AFTER the interrupt instruction. It is only relevant if
3b46e624 1256 * is_int is TRUE.
2c0262af 1257 */
5fafdf24 1258void raise_interrupt(int intno, int is_int, int error_code,
a8ede8ba 1259 int next_eip_addend)
2c0262af 1260{
0573fbfc
TS
1261 if (!is_int) {
1262 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
678dde13 1263 intno = check_exception(intno, &error_code);
0573fbfc 1264 }
678dde13 1265
2c0262af
FB
1266 env->exception_index = intno;
1267 env->error_code = error_code;
1268 env->exception_is_int = is_int;
a8ede8ba 1269 env->exception_next_eip = env->eip + next_eip_addend;
2c0262af
FB
1270 cpu_loop_exit();
1271}
1272
0d1a29f9
FB
1273/* same as raise_exception_err, but do not restore global registers */
1274static void raise_exception_err_norestore(int exception_index, int error_code)
1275{
678dde13
TS
1276 exception_index = check_exception(exception_index, &error_code);
1277
0d1a29f9
FB
1278 env->exception_index = exception_index;
1279 env->error_code = error_code;
1280 env->exception_is_int = 0;
1281 env->exception_next_eip = 0;
1282 longjmp(env->jmp_env, 1);
1283}
1284
2c0262af 1285/* shortcuts to generate exceptions */
8145122b
FB
1286
1287void (raise_exception_err)(int exception_index, int error_code)
2c0262af
FB
1288{
1289 raise_interrupt(exception_index, 0, error_code, 0);
1290}
1291
1292void raise_exception(int exception_index)
1293{
1294 raise_interrupt(exception_index, 0, 0, 0);
1295}
1296
3b21e03e
FB
1297/* SMM support */
1298
5fafdf24 1299#if defined(CONFIG_USER_ONLY)
74ce674f
FB
1300
1301void do_smm_enter(void)
1302{
1303}
1304
1305void helper_rsm(void)
1306{
1307}
1308
1309#else
1310
3b21e03e
FB
1311#ifdef TARGET_X86_64
1312#define SMM_REVISION_ID 0x00020064
1313#else
1314#define SMM_REVISION_ID 0x00020000
1315#endif
1316
1317void do_smm_enter(void)
1318{
1319 target_ulong sm_state;
1320 SegmentCache *dt;
1321 int i, offset;
1322
1323 if (loglevel & CPU_LOG_INT) {
1324 fprintf(logfile, "SMM: enter\n");
1325 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1326 }
1327
1328 env->hflags |= HF_SMM_MASK;
1329 cpu_smm_update(env);
1330
1331 sm_state = env->smbase + 0x8000;
3b46e624 1332
3b21e03e
FB
1333#ifdef TARGET_X86_64
1334 for(i = 0; i < 6; i++) {
1335 dt = &env->segs[i];
1336 offset = 0x7e00 + i * 16;
1337 stw_phys(sm_state + offset, dt->selector);
1338 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1339 stl_phys(sm_state + offset + 4, dt->limit);
1340 stq_phys(sm_state + offset + 8, dt->base);
1341 }
1342
1343 stq_phys(sm_state + 0x7e68, env->gdt.base);
1344 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1345
1346 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1347 stq_phys(sm_state + 0x7e78, env->ldt.base);
1348 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1349 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1350
3b21e03e
FB
1351 stq_phys(sm_state + 0x7e88, env->idt.base);
1352 stl_phys(sm_state + 0x7e84, env->idt.limit);
1353
1354 stw_phys(sm_state + 0x7e90, env->tr.selector);
1355 stq_phys(sm_state + 0x7e98, env->tr.base);
1356 stl_phys(sm_state + 0x7e94, env->tr.limit);
1357 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1358
3b21e03e
FB
1359 stq_phys(sm_state + 0x7ed0, env->efer);
1360
1361 stq_phys(sm_state + 0x7ff8, EAX);
1362 stq_phys(sm_state + 0x7ff0, ECX);
1363 stq_phys(sm_state + 0x7fe8, EDX);
1364 stq_phys(sm_state + 0x7fe0, EBX);
1365 stq_phys(sm_state + 0x7fd8, ESP);
1366 stq_phys(sm_state + 0x7fd0, EBP);
1367 stq_phys(sm_state + 0x7fc8, ESI);
1368 stq_phys(sm_state + 0x7fc0, EDI);
5fafdf24 1369 for(i = 8; i < 16; i++)
3b21e03e
FB
1370 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1371 stq_phys(sm_state + 0x7f78, env->eip);
1372 stl_phys(sm_state + 0x7f70, compute_eflags());
1373 stl_phys(sm_state + 0x7f68, env->dr[6]);
1374 stl_phys(sm_state + 0x7f60, env->dr[7]);
1375
1376 stl_phys(sm_state + 0x7f48, env->cr[4]);
1377 stl_phys(sm_state + 0x7f50, env->cr[3]);
1378 stl_phys(sm_state + 0x7f58, env->cr[0]);
1379
1380 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1381 stl_phys(sm_state + 0x7f00, env->smbase);
1382#else
1383 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1384 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1385 stl_phys(sm_state + 0x7ff4, compute_eflags());
1386 stl_phys(sm_state + 0x7ff0, env->eip);
1387 stl_phys(sm_state + 0x7fec, EDI);
1388 stl_phys(sm_state + 0x7fe8, ESI);
1389 stl_phys(sm_state + 0x7fe4, EBP);
1390 stl_phys(sm_state + 0x7fe0, ESP);
1391 stl_phys(sm_state + 0x7fdc, EBX);
1392 stl_phys(sm_state + 0x7fd8, EDX);
1393 stl_phys(sm_state + 0x7fd4, ECX);
1394 stl_phys(sm_state + 0x7fd0, EAX);
1395 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1396 stl_phys(sm_state + 0x7fc8, env->dr[7]);
3b46e624 1397
3b21e03e
FB
1398 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1399 stl_phys(sm_state + 0x7f64, env->tr.base);
1400 stl_phys(sm_state + 0x7f60, env->tr.limit);
1401 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1402
3b21e03e
FB
1403 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1404 stl_phys(sm_state + 0x7f80, env->ldt.base);
1405 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1406 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1407
3b21e03e
FB
1408 stl_phys(sm_state + 0x7f74, env->gdt.base);
1409 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1410
1411 stl_phys(sm_state + 0x7f58, env->idt.base);
1412 stl_phys(sm_state + 0x7f54, env->idt.limit);
1413
1414 for(i = 0; i < 6; i++) {
1415 dt = &env->segs[i];
1416 if (i < 3)
1417 offset = 0x7f84 + i * 12;
1418 else
1419 offset = 0x7f2c + (i - 3) * 12;
1420 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1421 stl_phys(sm_state + offset + 8, dt->base);
1422 stl_phys(sm_state + offset + 4, dt->limit);
1423 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1424 }
1425 stl_phys(sm_state + 0x7f14, env->cr[4]);
1426
1427 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1428 stl_phys(sm_state + 0x7ef8, env->smbase);
1429#endif
1430 /* init SMM cpu state */
1431
8988ae89
FB
1432#ifdef TARGET_X86_64
1433 env->efer = 0;
1434 env->hflags &= ~HF_LMA_MASK;
1435#endif
3b21e03e
FB
1436 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1437 env->eip = 0x00008000;
1438 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1439 0xffffffff, 0);
1440 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1441 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1442 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1443 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1444 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
3b46e624 1445
5fafdf24 1446 cpu_x86_update_cr0(env,
3b21e03e
FB
1447 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1448 cpu_x86_update_cr4(env, 0);
1449 env->dr[7] = 0x00000400;
3b21e03e
FB
1450 CC_OP = CC_OP_EFLAGS;
1451}
1452
1453void helper_rsm(void)
1454{
1455 target_ulong sm_state;
1456 int i, offset;
1457 uint32_t val;
1458
1459 sm_state = env->smbase + 0x8000;
1460#ifdef TARGET_X86_64
8988ae89
FB
1461 env->efer = ldq_phys(sm_state + 0x7ed0);
1462 if (env->efer & MSR_EFER_LMA)
1463 env->hflags |= HF_LMA_MASK;
1464 else
1465 env->hflags &= ~HF_LMA_MASK;
1466
3b21e03e
FB
1467 for(i = 0; i < 6; i++) {
1468 offset = 0x7e00 + i * 16;
5fafdf24 1469 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1470 lduw_phys(sm_state + offset),
1471 ldq_phys(sm_state + offset + 8),
1472 ldl_phys(sm_state + offset + 4),
1473 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1474 }
1475
1476 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1478
1479 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
3b46e624 1483
3b21e03e
FB
1484 env->idt.base = ldq_phys(sm_state + 0x7e88);
1485 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1486
1487 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488 env->tr.base = ldq_phys(sm_state + 0x7e98);
1489 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
3b46e624 1491
3b21e03e
FB
1492 EAX = ldq_phys(sm_state + 0x7ff8);
1493 ECX = ldq_phys(sm_state + 0x7ff0);
1494 EDX = ldq_phys(sm_state + 0x7fe8);
1495 EBX = ldq_phys(sm_state + 0x7fe0);
1496 ESP = ldq_phys(sm_state + 0x7fd8);
1497 EBP = ldq_phys(sm_state + 0x7fd0);
1498 ESI = ldq_phys(sm_state + 0x7fc8);
1499 EDI = ldq_phys(sm_state + 0x7fc0);
5fafdf24 1500 for(i = 8; i < 16; i++)
3b21e03e
FB
1501 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502 env->eip = ldq_phys(sm_state + 0x7f78);
5fafdf24 1503 load_eflags(ldl_phys(sm_state + 0x7f70),
3b21e03e
FB
1504 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1507
1508 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1511
1512 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513 if (val & 0x20000) {
1514 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1515 }
1516#else
1517 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
5fafdf24 1519 load_eflags(ldl_phys(sm_state + 0x7ff4),
3b21e03e
FB
1520 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521 env->eip = ldl_phys(sm_state + 0x7ff0);
1522 EDI = ldl_phys(sm_state + 0x7fec);
1523 ESI = ldl_phys(sm_state + 0x7fe8);
1524 EBP = ldl_phys(sm_state + 0x7fe4);
1525 ESP = ldl_phys(sm_state + 0x7fe0);
1526 EBX = ldl_phys(sm_state + 0x7fdc);
1527 EDX = ldl_phys(sm_state + 0x7fd8);
1528 ECX = ldl_phys(sm_state + 0x7fd4);
1529 EAX = ldl_phys(sm_state + 0x7fd0);
1530 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
3b46e624 1532
3b21e03e
FB
1533 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534 env->tr.base = ldl_phys(sm_state + 0x7f64);
1535 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
3b46e624 1537
3b21e03e
FB
1538 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
3b46e624 1542
3b21e03e
FB
1543 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1545
1546 env->idt.base = ldl_phys(sm_state + 0x7f58);
1547 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1548
1549 for(i = 0; i < 6; i++) {
1550 if (i < 3)
1551 offset = 0x7f84 + i * 12;
1552 else
1553 offset = 0x7f2c + (i - 3) * 12;
5fafdf24 1554 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1555 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556 ldl_phys(sm_state + offset + 8),
1557 ldl_phys(sm_state + offset + 4),
1558 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1559 }
1560 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1561
1562 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563 if (val & 0x20000) {
1564 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1565 }
1566#endif
1567 CC_OP = CC_OP_EFLAGS;
1568 env->hflags &= ~HF_SMM_MASK;
1569 cpu_smm_update(env);
1570
1571 if (loglevel & CPU_LOG_INT) {
1572 fprintf(logfile, "SMM: after RSM\n");
1573 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1574 }
1575}
1576
74ce674f
FB
1577#endif /* !CONFIG_USER_ONLY */
1578
1579
2c0262af
FB
1580#ifdef BUGGY_GCC_DIV64
1581/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1582 call it from another function */
45bbbb46 1583uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
2c0262af
FB
1584{
1585 *q_ptr = num / den;
1586 return num % den;
1587}
1588
45bbbb46 1589int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
2c0262af
FB
1590{
1591 *q_ptr = num / den;
1592 return num % den;
1593}
1594#endif
1595
14ce26e7 1596void helper_divl_EAX_T0(void)
2c0262af 1597{
45bbbb46
FB
1598 unsigned int den, r;
1599 uint64_t num, q;
3b46e624 1600
31313213 1601 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2c0262af
FB
1602 den = T0;
1603 if (den == 0) {
2c0262af
FB
1604 raise_exception(EXCP00_DIVZ);
1605 }
1606#ifdef BUGGY_GCC_DIV64
14ce26e7 1607 r = div32(&q, num, den);
2c0262af
FB
1608#else
1609 q = (num / den);
1610 r = (num % den);
1611#endif
45bbbb46
FB
1612 if (q > 0xffffffff)
1613 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1614 EAX = (uint32_t)q;
1615 EDX = (uint32_t)r;
2c0262af
FB
1616}
1617
14ce26e7 1618void helper_idivl_EAX_T0(void)
2c0262af 1619{
45bbbb46
FB
1620 int den, r;
1621 int64_t num, q;
3b46e624 1622
31313213 1623 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2c0262af
FB
1624 den = T0;
1625 if (den == 0) {
2c0262af
FB
1626 raise_exception(EXCP00_DIVZ);
1627 }
1628#ifdef BUGGY_GCC_DIV64
14ce26e7 1629 r = idiv32(&q, num, den);
2c0262af
FB
1630#else
1631 q = (num / den);
1632 r = (num % den);
1633#endif
45bbbb46
FB
1634 if (q != (int32_t)q)
1635 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1636 EAX = (uint32_t)q;
1637 EDX = (uint32_t)r;
2c0262af
FB
1638}
1639
1640void helper_cmpxchg8b(void)
1641{
1642 uint64_t d;
1643 int eflags;
1644
1645 eflags = cc_table[CC_OP].compute_all();
14ce26e7 1646 d = ldq(A0);
2c0262af 1647 if (d == (((uint64_t)EDX << 32) | EAX)) {
14ce26e7 1648 stq(A0, ((uint64_t)ECX << 32) | EBX);
2c0262af
FB
1649 eflags |= CC_Z;
1650 } else {
1651 EDX = d >> 32;
1652 EAX = d;
1653 eflags &= ~CC_Z;
1654 }
1655 CC_SRC = eflags;
1656}
1657
88fe8a41
TS
1658void helper_single_step()
1659{
1660 env->dr[6] |= 0x4000;
1661 raise_exception(EXCP01_SSTP);
1662}
1663
2c0262af
FB
1664void helper_cpuid(void)
1665{
f419b321
FB
1666 uint32_t index;
1667 index = (uint32_t)EAX;
3b46e624 1668
f419b321
FB
1669 /* test if maximum index reached */
1670 if (index & 0x80000000) {
5fafdf24 1671 if (index > env->cpuid_xlevel)
f419b321
FB
1672 index = env->cpuid_level;
1673 } else {
5fafdf24 1674 if (index > env->cpuid_level)
f419b321
FB
1675 index = env->cpuid_level;
1676 }
3b46e624 1677
f419b321 1678 switch(index) {
8e682019 1679 case 0:
f419b321 1680 EAX = env->cpuid_level;
14ce26e7
FB
1681 EBX = env->cpuid_vendor1;
1682 EDX = env->cpuid_vendor2;
1683 ECX = env->cpuid_vendor3;
8e682019
FB
1684 break;
1685 case 1:
14ce26e7 1686 EAX = env->cpuid_version;
eae7629b 1687 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
9df217a3 1688 ECX = env->cpuid_ext_features;
14ce26e7 1689 EDX = env->cpuid_features;
8e682019 1690 break;
f419b321 1691 case 2:
8e682019 1692 /* cache info: needed for Pentium Pro compatibility */
d8134d91 1693 EAX = 1;
2c0262af
FB
1694 EBX = 0;
1695 ECX = 0;
d8134d91 1696 EDX = 0x2c307d;
8e682019 1697 break;
14ce26e7 1698 case 0x80000000:
f419b321 1699 EAX = env->cpuid_xlevel;
14ce26e7
FB
1700 EBX = env->cpuid_vendor1;
1701 EDX = env->cpuid_vendor2;
1702 ECX = env->cpuid_vendor3;
1703 break;
1704 case 0x80000001:
1705 EAX = env->cpuid_features;
1706 EBX = 0;
0573fbfc 1707 ECX = env->cpuid_ext3_features;
f419b321
FB
1708 EDX = env->cpuid_ext2_features;
1709 break;
1710 case 0x80000002:
1711 case 0x80000003:
1712 case 0x80000004:
1713 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1714 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1715 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1716 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
14ce26e7 1717 break;
8f091a59
FB
1718 case 0x80000005:
1719 /* cache info (L1 cache) */
1720 EAX = 0x01ff01ff;
1721 EBX = 0x01ff01ff;
1722 ECX = 0x40020140;
1723 EDX = 0x40020140;
1724 break;
1725 case 0x80000006:
1726 /* cache info (L2 cache) */
1727 EAX = 0;
1728 EBX = 0x42004200;
1729 ECX = 0x02008140;
1730 EDX = 0;
1731 break;
14ce26e7
FB
1732 case 0x80000008:
1733 /* virtual & phys address size in low 2 bytes. */
1734 EAX = 0x00003028;
1735 EBX = 0;
1736 ECX = 0;
1737 EDX = 0;
1738 break;
f419b321
FB
1739 default:
1740 /* reserved values: zero */
1741 EAX = 0;
1742 EBX = 0;
1743 ECX = 0;
1744 EDX = 0;
1745 break;
2c0262af
FB
1746 }
1747}
1748
61a8c4ec
FB
1749void helper_enter_level(int level, int data32)
1750{
14ce26e7 1751 target_ulong ssp;
61a8c4ec
FB
1752 uint32_t esp_mask, esp, ebp;
1753
1754 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1755 ssp = env->segs[R_SS].base;
1756 ebp = EBP;
1757 esp = ESP;
1758 if (data32) {
1759 /* 32 bit */
1760 esp -= 4;
1761 while (--level) {
1762 esp -= 4;
1763 ebp -= 4;
1764 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1765 }
1766 esp -= 4;
1767 stl(ssp + (esp & esp_mask), T1);
1768 } else {
1769 /* 16 bit */
1770 esp -= 2;
1771 while (--level) {
1772 esp -= 2;
1773 ebp -= 2;
1774 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1775 }
1776 esp -= 2;
1777 stw(ssp + (esp & esp_mask), T1);
1778 }
1779}
1780
8f091a59
FB
1781#ifdef TARGET_X86_64
1782void helper_enter64_level(int level, int data64)
1783{
1784 target_ulong esp, ebp;
1785 ebp = EBP;
1786 esp = ESP;
1787
1788 if (data64) {
1789 /* 64 bit */
1790 esp -= 8;
1791 while (--level) {
1792 esp -= 8;
1793 ebp -= 8;
1794 stq(esp, ldq(ebp));
1795 }
1796 esp -= 8;
1797 stq(esp, T1);
1798 } else {
1799 /* 16 bit */
1800 esp -= 2;
1801 while (--level) {
1802 esp -= 2;
1803 ebp -= 2;
1804 stw(esp, lduw(ebp));
1805 }
1806 esp -= 2;
1807 stw(esp, T1);
1808 }
1809}
1810#endif
1811
2c0262af
FB
1812void helper_lldt_T0(void)
1813{
1814 int selector;
1815 SegmentCache *dt;
1816 uint32_t e1, e2;
14ce26e7
FB
1817 int index, entry_limit;
1818 target_ulong ptr;
3b46e624 1819
2c0262af
FB
1820 selector = T0 & 0xffff;
1821 if ((selector & 0xfffc) == 0) {
1822 /* XXX: NULL selector case: invalid LDT */
14ce26e7 1823 env->ldt.base = 0;
2c0262af
FB
1824 env->ldt.limit = 0;
1825 } else {
1826 if (selector & 0x4)
1827 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1828 dt = &env->gdt;
1829 index = selector & ~7;
14ce26e7
FB
1830#ifdef TARGET_X86_64
1831 if (env->hflags & HF_LMA_MASK)
1832 entry_limit = 15;
1833 else
3b46e624 1834#endif
14ce26e7
FB
1835 entry_limit = 7;
1836 if ((index + entry_limit) > dt->limit)
2c0262af
FB
1837 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1838 ptr = dt->base + index;
61382a50
FB
1839 e1 = ldl_kernel(ptr);
1840 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1841 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1842 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1843 if (!(e2 & DESC_P_MASK))
1844 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
1845#ifdef TARGET_X86_64
1846 if (env->hflags & HF_LMA_MASK) {
1847 uint32_t e3;
1848 e3 = ldl_kernel(ptr + 8);
1849 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1850 env->ldt.base |= (target_ulong)e3 << 32;
1851 } else
1852#endif
1853 {
1854 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1855 }
2c0262af
FB
1856 }
1857 env->ldt.selector = selector;
1858}
1859
1860void helper_ltr_T0(void)
1861{
1862 int selector;
1863 SegmentCache *dt;
1864 uint32_t e1, e2;
14ce26e7
FB
1865 int index, type, entry_limit;
1866 target_ulong ptr;
3b46e624 1867
2c0262af
FB
1868 selector = T0 & 0xffff;
1869 if ((selector & 0xfffc) == 0) {
14ce26e7
FB
1870 /* NULL selector case: invalid TR */
1871 env->tr.base = 0;
2c0262af
FB
1872 env->tr.limit = 0;
1873 env->tr.flags = 0;
1874 } else {
1875 if (selector & 0x4)
1876 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1877 dt = &env->gdt;
1878 index = selector & ~7;
14ce26e7
FB
1879#ifdef TARGET_X86_64
1880 if (env->hflags & HF_LMA_MASK)
1881 entry_limit = 15;
1882 else
3b46e624 1883#endif
14ce26e7
FB
1884 entry_limit = 7;
1885 if ((index + entry_limit) > dt->limit)
2c0262af
FB
1886 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1887 ptr = dt->base + index;
61382a50
FB
1888 e1 = ldl_kernel(ptr);
1889 e2 = ldl_kernel(ptr + 4);
2c0262af 1890 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5fafdf24 1891 if ((e2 & DESC_S_MASK) ||
7e84c249 1892 (type != 1 && type != 9))
2c0262af
FB
1893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1894 if (!(e2 & DESC_P_MASK))
1895 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
1896#ifdef TARGET_X86_64
1897 if (env->hflags & HF_LMA_MASK) {
b0ee3ff0 1898 uint32_t e3, e4;
14ce26e7 1899 e3 = ldl_kernel(ptr + 8);
b0ee3ff0
TS
1900 e4 = ldl_kernel(ptr + 12);
1901 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1902 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
14ce26e7
FB
1903 load_seg_cache_raw_dt(&env->tr, e1, e2);
1904 env->tr.base |= (target_ulong)e3 << 32;
5fafdf24 1905 } else
14ce26e7
FB
1906#endif
1907 {
1908 load_seg_cache_raw_dt(&env->tr, e1, e2);
1909 }
8e682019 1910 e2 |= DESC_TSS_BUSY_MASK;
61382a50 1911 stl_kernel(ptr + 4, e2);
2c0262af
FB
1912 }
1913 env->tr.selector = selector;
1914}
1915
3ab493de 1916/* only works if protected mode and not VM86. seg_reg must be != R_CS */
8e682019 1917void load_seg(int seg_reg, int selector)
2c0262af
FB
1918{
1919 uint32_t e1, e2;
3ab493de
FB
1920 int cpl, dpl, rpl;
1921 SegmentCache *dt;
1922 int index;
14ce26e7 1923 target_ulong ptr;
3ab493de 1924
8e682019 1925 selector &= 0xffff;
b359d4e7 1926 cpl = env->hflags & HF_CPL_MASK;
2c0262af
FB
1927 if ((selector & 0xfffc) == 0) {
1928 /* null selector case */
4d6b6c0a
FB
1929 if (seg_reg == R_SS
1930#ifdef TARGET_X86_64
b359d4e7 1931 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
4d6b6c0a
FB
1932#endif
1933 )
2c0262af 1934 raise_exception_err(EXCP0D_GPF, 0);
14ce26e7 1935 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2c0262af 1936 } else {
3b46e624 1937
3ab493de
FB
1938 if (selector & 0x4)
1939 dt = &env->ldt;
1940 else
1941 dt = &env->gdt;
1942 index = selector & ~7;
8e682019 1943 if ((index + 7) > dt->limit)
2c0262af 1944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1945 ptr = dt->base + index;
1946 e1 = ldl_kernel(ptr);
1947 e2 = ldl_kernel(ptr + 4);
3b46e624 1948
8e682019 1949 if (!(e2 & DESC_S_MASK))
2c0262af 1950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1951 rpl = selector & 3;
1952 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2c0262af 1953 if (seg_reg == R_SS) {
3ab493de 1954 /* must be writable segment */
8e682019 1955 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2c0262af 1956 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
8e682019 1957 if (rpl != cpl || dpl != cpl)
3ab493de 1958 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 1959 } else {
3ab493de 1960 /* must be readable segment */
8e682019 1961 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2c0262af 1962 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3b46e624 1963
3ab493de
FB
1964 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1965 /* if not conforming code, test rights */
5fafdf24 1966 if (dpl < cpl || dpl < rpl)
3ab493de 1967 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de 1968 }
2c0262af
FB
1969 }
1970
1971 if (!(e2 & DESC_P_MASK)) {
2c0262af
FB
1972 if (seg_reg == R_SS)
1973 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1974 else
1975 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1976 }
3ab493de
FB
1977
1978 /* set the access bit if not already set */
1979 if (!(e2 & DESC_A_MASK)) {
1980 e2 |= DESC_A_MASK;
1981 stl_kernel(ptr + 4, e2);
1982 }
1983
5fafdf24 1984 cpu_x86_load_seg_cache(env, seg_reg, selector,
2c0262af
FB
1985 get_seg_base(e1, e2),
1986 get_seg_limit(e1, e2),
1987 e2);
1988#if 0
5fafdf24 1989 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2c0262af
FB
1990 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1991#endif
1992 }
1993}
1994
1995/* protected mode jump */
f419b321 1996void helper_ljmp_protected_T0_T1(int next_eip_addend)
2c0262af 1997{
14ce26e7 1998 int new_cs, gate_cs, type;
2c0262af 1999 uint32_t e1, e2, cpl, dpl, rpl, limit;
f419b321 2000 target_ulong new_eip, next_eip;
3b46e624 2001
2c0262af
FB
2002 new_cs = T0;
2003 new_eip = T1;
2004 if ((new_cs & 0xfffc) == 0)
2005 raise_exception_err(EXCP0D_GPF, 0);
2006 if (load_segment(&e1, &e2, new_cs) != 0)
2007 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2008 cpl = env->hflags & HF_CPL_MASK;
2009 if (e2 & DESC_S_MASK) {
2010 if (!(e2 & DESC_CS_MASK))
2011 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2012 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2013 if (e2 & DESC_C_MASK) {
2c0262af
FB
2014 /* conforming code segment */
2015 if (dpl > cpl)
2016 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2017 } else {
2018 /* non conforming code segment */
2019 rpl = new_cs & 3;
2020 if (rpl > cpl)
2021 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2022 if (dpl != cpl)
2023 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2024 }
2025 if (!(e2 & DESC_P_MASK))
2026 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2027 limit = get_seg_limit(e1, e2);
5fafdf24 2028 if (new_eip > limit &&
ca954f6d 2029 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2c0262af
FB
2030 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2031 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2032 get_seg_base(e1, e2), limit, e2);
2033 EIP = new_eip;
2034 } else {
7e84c249
FB
2035 /* jump to call or task gate */
2036 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2037 rpl = new_cs & 3;
2038 cpl = env->hflags & HF_CPL_MASK;
2039 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2040 switch(type) {
2041 case 1: /* 286 TSS */
2042 case 9: /* 386 TSS */
2043 case 5: /* task gate */
2044 if (dpl < cpl || dpl < rpl)
2045 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
f419b321 2046 next_eip = env->eip + next_eip_addend;
08cea4ee 2047 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
447c2cef 2048 CC_OP = CC_OP_EFLAGS;
7e84c249
FB
2049 break;
2050 case 4: /* 286 call gate */
2051 case 12: /* 386 call gate */
2052 if ((dpl < cpl) || (dpl < rpl))
2053 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2054 if (!(e2 & DESC_P_MASK))
2055 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2056 gate_cs = e1 >> 16;
516633dc
FB
2057 new_eip = (e1 & 0xffff);
2058 if (type == 12)
2059 new_eip |= (e2 & 0xffff0000);
7e84c249
FB
2060 if (load_segment(&e1, &e2, gate_cs) != 0)
2061 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2062 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2063 /* must be code segment */
5fafdf24 2064 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
7e84c249
FB
2065 (DESC_S_MASK | DESC_CS_MASK)))
2066 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
5fafdf24 2067 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
7e84c249
FB
2068 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2069 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2070 if (!(e2 & DESC_P_MASK))
2071 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
7e84c249
FB
2072 limit = get_seg_limit(e1, e2);
2073 if (new_eip > limit)
2074 raise_exception_err(EXCP0D_GPF, 0);
2075 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2076 get_seg_base(e1, e2), limit, e2);
2077 EIP = new_eip;
2078 break;
2079 default:
2080 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2081 break;
2082 }
2c0262af
FB
2083 }
2084}
2085
2086/* real mode call */
2087void helper_lcall_real_T0_T1(int shift, int next_eip)
2088{
2089 int new_cs, new_eip;
2090 uint32_t esp, esp_mask;
14ce26e7 2091 target_ulong ssp;
2c0262af
FB
2092
2093 new_cs = T0;
2094 new_eip = T1;
2095 esp = ESP;
891b38e4 2096 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
2097 ssp = env->segs[R_SS].base;
2098 if (shift) {
891b38e4
FB
2099 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2100 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 2101 } else {
891b38e4
FB
2102 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2103 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
2104 }
2105
8d7b0fbb 2106 SET_ESP(esp, esp_mask);
2c0262af
FB
2107 env->eip = new_eip;
2108 env->segs[R_CS].selector = new_cs;
14ce26e7 2109 env->segs[R_CS].base = (new_cs << 4);
2c0262af
FB
2110}
2111
2112/* protected mode call */
f419b321 2113void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2c0262af 2114{
649ea05a 2115 int new_cs, new_stack, i;
2c0262af 2116 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
2117 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2118 uint32_t val, limit, old_sp_mask;
649ea05a 2119 target_ulong ssp, old_ssp, next_eip, new_eip;
3b46e624 2120
2c0262af
FB
2121 new_cs = T0;
2122 new_eip = T1;
f419b321 2123 next_eip = env->eip + next_eip_addend;
f3f2d9be 2124#ifdef DEBUG_PCALL
e19e89a5
FB
2125 if (loglevel & CPU_LOG_PCALL) {
2126 fprintf(logfile, "lcall %04x:%08x s=%d\n",
649ea05a 2127 new_cs, (uint32_t)new_eip, shift);
7fe48483 2128 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
f3f2d9be
FB
2129 }
2130#endif
2c0262af
FB
2131 if ((new_cs & 0xfffc) == 0)
2132 raise_exception_err(EXCP0D_GPF, 0);
2133 if (load_segment(&e1, &e2, new_cs) != 0)
2134 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2135 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be 2136#ifdef DEBUG_PCALL
e19e89a5 2137 if (loglevel & CPU_LOG_PCALL) {
f3f2d9be
FB
2138 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2139 }
2140#endif
2c0262af
FB
2141 if (e2 & DESC_S_MASK) {
2142 if (!(e2 & DESC_CS_MASK))
2143 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2144 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2145 if (e2 & DESC_C_MASK) {
2c0262af
FB
2146 /* conforming code segment */
2147 if (dpl > cpl)
2148 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2149 } else {
2150 /* non conforming code segment */
2151 rpl = new_cs & 3;
2152 if (rpl > cpl)
2153 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2154 if (dpl != cpl)
2155 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156 }
2157 if (!(e2 & DESC_P_MASK))
2158 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2159
f419b321
FB
2160#ifdef TARGET_X86_64
2161 /* XXX: check 16/32 bit cases in long mode */
2162 if (shift == 2) {
2163 target_ulong rsp;
2164 /* 64 bit case */
2165 rsp = ESP;
2166 PUSHQ(rsp, env->segs[R_CS].selector);
2167 PUSHQ(rsp, next_eip);
2168 /* from this point, not restartable */
2169 ESP = rsp;
2170 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
5fafdf24 2171 get_seg_base(e1, e2),
f419b321
FB
2172 get_seg_limit(e1, e2), e2);
2173 EIP = new_eip;
5fafdf24 2174 } else
f419b321
FB
2175#endif
2176 {
2177 sp = ESP;
2178 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2179 ssp = env->segs[R_SS].base;
2180 if (shift) {
2181 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2182 PUSHL(ssp, sp, sp_mask, next_eip);
2183 } else {
2184 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2185 PUSHW(ssp, sp, sp_mask, next_eip);
2186 }
3b46e624 2187
f419b321
FB
2188 limit = get_seg_limit(e1, e2);
2189 if (new_eip > limit)
2190 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191 /* from this point, not restartable */
8d7b0fbb 2192 SET_ESP(sp, sp_mask);
f419b321
FB
2193 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2194 get_seg_base(e1, e2), limit, e2);
2195 EIP = new_eip;
2c0262af 2196 }
2c0262af
FB
2197 } else {
2198 /* check gate type */
2199 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
2200 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2201 rpl = new_cs & 3;
2c0262af
FB
2202 switch(type) {
2203 case 1: /* available 286 TSS */
2204 case 9: /* available 386 TSS */
2205 case 5: /* task gate */
7e84c249
FB
2206 if (dpl < cpl || dpl < rpl)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
883da8e2 2208 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
447c2cef 2209 CC_OP = CC_OP_EFLAGS;
8145122b 2210 return;
2c0262af
FB
2211 case 4: /* 286 call gate */
2212 case 12: /* 386 call gate */
2213 break;
2214 default:
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 break;
2217 }
2218 shift = type >> 3;
2219
2c0262af
FB
2220 if (dpl < cpl || dpl < rpl)
2221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222 /* check valid bit */
2223 if (!(e2 & DESC_P_MASK))
2224 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2225 selector = e1 >> 16;
2226 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 2227 param_count = e2 & 0x1f;
2c0262af
FB
2228 if ((selector & 0xfffc) == 0)
2229 raise_exception_err(EXCP0D_GPF, 0);
2230
2231 if (load_segment(&e1, &e2, selector) != 0)
2232 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2233 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2234 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2236 if (dpl > cpl)
2237 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2238 if (!(e2 & DESC_P_MASK))
2239 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2240
2241 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 2242 /* to inner privilege */
2c0262af 2243 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be 2244#ifdef DEBUG_PCALL
e19e89a5 2245 if (loglevel & CPU_LOG_PCALL)
5fafdf24 2246 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
f3f2d9be
FB
2247 ss, sp, param_count, ESP);
2248#endif
2c0262af
FB
2249 if ((ss & 0xfffc) == 0)
2250 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2251 if ((ss & 3) != dpl)
2252 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2253 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2254 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2255 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2256 if (ss_dpl != dpl)
2257 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2258 if (!(ss_e2 & DESC_S_MASK) ||
2259 (ss_e2 & DESC_CS_MASK) ||
2260 !(ss_e2 & DESC_W_MASK))
2261 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2262 if (!(ss_e2 & DESC_P_MASK))
2263 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3b46e624 2264
891b38e4 2265 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 2266
891b38e4
FB
2267 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2268 old_ssp = env->segs[R_SS].base;
3b46e624 2269
891b38e4
FB
2270 sp_mask = get_sp_mask(ss_e2);
2271 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 2272 if (shift) {
891b38e4
FB
2273 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2274 PUSHL(ssp, sp, sp_mask, ESP);
2275 for(i = param_count - 1; i >= 0; i--) {
2276 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2277 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
2278 }
2279 } else {
891b38e4
FB
2280 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2281 PUSHW(ssp, sp, sp_mask, ESP);
2282 for(i = param_count - 1; i >= 0; i--) {
2283 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2284 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
2285 }
2286 }
891b38e4 2287 new_stack = 1;
2c0262af 2288 } else {
7f75ffd3 2289 /* to same privilege */
891b38e4
FB
2290 sp = ESP;
2291 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2292 ssp = env->segs[R_SS].base;
2293 // push_size = (4 << shift);
2294 new_stack = 0;
2c0262af
FB
2295 }
2296
2297 if (shift) {
891b38e4
FB
2298 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2299 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 2300 } else {
891b38e4
FB
2301 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2302 PUSHW(ssp, sp, sp_mask, next_eip);
2303 }
2304
2305 /* from this point, not restartable */
2306
2307 if (new_stack) {
2308 ss = (ss & ~3) | dpl;
5fafdf24 2309 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
2310 ssp,
2311 get_seg_limit(ss_e1, ss_e2),
2312 ss_e2);
2c0262af
FB
2313 }
2314
2c0262af 2315 selector = (selector & ~3) | dpl;
5fafdf24 2316 cpu_x86_load_seg_cache(env, R_CS, selector,
2c0262af
FB
2317 get_seg_base(e1, e2),
2318 get_seg_limit(e1, e2),
2319 e2);
2320 cpu_x86_set_cpl(env, dpl);
8d7b0fbb 2321 SET_ESP(sp, sp_mask);
2c0262af
FB
2322 EIP = offset;
2323 }
9df217a3
FB
2324#ifdef USE_KQEMU
2325 if (kqemu_is_ok(env)) {
2326 env->exception_index = -1;
2327 cpu_loop_exit();
2328 }
2329#endif
2c0262af
FB
2330}
2331
7e84c249 2332/* real and vm86 mode iret */
2c0262af
FB
2333void helper_iret_real(int shift)
2334{
891b38e4 2335 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
14ce26e7 2336 target_ulong ssp;
2c0262af 2337 int eflags_mask;
7e84c249 2338
891b38e4
FB
2339 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2340 sp = ESP;
2341 ssp = env->segs[R_SS].base;
2c0262af
FB
2342 if (shift == 1) {
2343 /* 32 bits */
891b38e4
FB
2344 POPL(ssp, sp, sp_mask, new_eip);
2345 POPL(ssp, sp, sp_mask, new_cs);
2346 new_cs &= 0xffff;
2347 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
2348 } else {
2349 /* 16 bits */
891b38e4
FB
2350 POPW(ssp, sp, sp_mask, new_eip);
2351 POPW(ssp, sp, sp_mask, new_cs);
2352 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2353 }
4136f33c 2354 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
2355 load_seg_vm(R_CS, new_cs);
2356 env->eip = new_eip;
7e84c249 2357 if (env->eflags & VM_MASK)
8145122b 2358 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
7e84c249 2359 else
8145122b 2360 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2c0262af
FB
2361 if (shift == 0)
2362 eflags_mask &= 0xffff;
2363 load_eflags(new_eflags, eflags_mask);
2364}
2365
8e682019
FB
2366static inline void validate_seg(int seg_reg, int cpl)
2367{
2368 int dpl;
2369 uint32_t e2;
cd072e01
FB
2370
2371 /* XXX: on x86_64, we do not want to nullify FS and GS because
2372 they may still contain a valid base. I would be interested to
2373 know how a real x86_64 CPU behaves */
5fafdf24 2374 if ((seg_reg == R_FS || seg_reg == R_GS) &&
cd072e01
FB
2375 (env->segs[seg_reg].selector & 0xfffc) == 0)
2376 return;
2377
8e682019
FB
2378 e2 = env->segs[seg_reg].flags;
2379 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2380 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2381 /* data or non conforming code segment */
2382 if (dpl < cpl) {
14ce26e7 2383 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
8e682019
FB
2384 }
2385 }
2386}
2387
2c0262af
FB
2388/* protected mode iret */
2389static inline void helper_ret_protected(int shift, int is_iret, int addend)
2390{
14ce26e7 2391 uint32_t new_cs, new_eflags, new_ss;
2c0262af
FB
2392 uint32_t new_es, new_ds, new_fs, new_gs;
2393 uint32_t e1, e2, ss_e1, ss_e2;
4136f33c 2394 int cpl, dpl, rpl, eflags_mask, iopl;
14ce26e7 2395 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3b46e624 2396
14ce26e7
FB
2397#ifdef TARGET_X86_64
2398 if (shift == 2)
2399 sp_mask = -1;
2400 else
2401#endif
2402 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 2403 sp = ESP;
891b38e4 2404 ssp = env->segs[R_SS].base;
354ff226 2405 new_eflags = 0; /* avoid warning */
14ce26e7
FB
2406#ifdef TARGET_X86_64
2407 if (shift == 2) {
2408 POPQ(sp, new_eip);
2409 POPQ(sp, new_cs);
2410 new_cs &= 0xffff;
2411 if (is_iret) {
2412 POPQ(sp, new_eflags);
2413 }
2414 } else
2415#endif
2c0262af
FB
2416 if (shift == 1) {
2417 /* 32 bits */
891b38e4
FB
2418 POPL(ssp, sp, sp_mask, new_eip);
2419 POPL(ssp, sp, sp_mask, new_cs);
2420 new_cs &= 0xffff;
2421 if (is_iret) {
2422 POPL(ssp, sp, sp_mask, new_eflags);
2423 if (new_eflags & VM_MASK)
2424 goto return_to_vm86;
2425 }
2c0262af
FB
2426 } else {
2427 /* 16 bits */
891b38e4
FB
2428 POPW(ssp, sp, sp_mask, new_eip);
2429 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 2430 if (is_iret)
891b38e4 2431 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2432 }
891b38e4 2433#ifdef DEBUG_PCALL
e19e89a5 2434 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2435 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
e19e89a5 2436 new_cs, new_eip, shift, addend);
7fe48483 2437 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
891b38e4
FB
2438 }
2439#endif
2c0262af
FB
2440 if ((new_cs & 0xfffc) == 0)
2441 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2442 if (load_segment(&e1, &e2, new_cs) != 0)
2443 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2444 if (!(e2 & DESC_S_MASK) ||
2445 !(e2 & DESC_CS_MASK))
2446 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2447 cpl = env->hflags & HF_CPL_MASK;
5fafdf24 2448 rpl = new_cs & 3;
2c0262af
FB
2449 if (rpl < cpl)
2450 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2451 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2452 if (e2 & DESC_C_MASK) {
2c0262af
FB
2453 if (dpl > rpl)
2454 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2455 } else {
2456 if (dpl != rpl)
2457 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2458 }
2459 if (!(e2 & DESC_P_MASK))
2460 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3b46e624 2461
891b38e4 2462 sp += addend;
5fafdf24 2463 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
ca954f6d 2464 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2c0262af 2465 /* return to same priledge level */
5fafdf24 2466 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2467 get_seg_base(e1, e2),
2468 get_seg_limit(e1, e2),
2469 e2);
2c0262af 2470 } else {
7f75ffd3 2471 /* return to different privilege level */
14ce26e7
FB
2472#ifdef TARGET_X86_64
2473 if (shift == 2) {
2474 POPQ(sp, new_esp);
2475 POPQ(sp, new_ss);
2476 new_ss &= 0xffff;
2477 } else
2478#endif
2c0262af
FB
2479 if (shift == 1) {
2480 /* 32 bits */
891b38e4
FB
2481 POPL(ssp, sp, sp_mask, new_esp);
2482 POPL(ssp, sp, sp_mask, new_ss);
2483 new_ss &= 0xffff;
2c0262af
FB
2484 } else {
2485 /* 16 bits */
891b38e4
FB
2486 POPW(ssp, sp, sp_mask, new_esp);
2487 POPW(ssp, sp, sp_mask, new_ss);
2c0262af 2488 }
e19e89a5
FB
2489#ifdef DEBUG_PCALL
2490 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2491 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
e19e89a5
FB
2492 new_ss, new_esp);
2493 }
2494#endif
b359d4e7
FB
2495 if ((new_ss & 0xfffc) == 0) {
2496#ifdef TARGET_X86_64
2497 /* NULL ss is allowed in long mode if cpl != 3*/
d80c7d1c 2498 /* XXX: test CS64 ? */
b359d4e7 2499 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
5fafdf24 2500 cpu_x86_load_seg_cache(env, R_SS, new_ss,
b359d4e7
FB
2501 0, 0xffffffff,
2502 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2503 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2504 DESC_W_MASK | DESC_A_MASK);
d80c7d1c 2505 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
5fafdf24 2506 } else
b359d4e7
FB
2507#endif
2508 {
2509 raise_exception_err(EXCP0D_GPF, 0);
2510 }
14ce26e7
FB
2511 } else {
2512 if ((new_ss & 3) != rpl)
2513 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2514 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2515 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2516 if (!(ss_e2 & DESC_S_MASK) ||
2517 (ss_e2 & DESC_CS_MASK) ||
2518 !(ss_e2 & DESC_W_MASK))
2519 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2520 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2521 if (dpl != rpl)
2522 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2523 if (!(ss_e2 & DESC_P_MASK))
2524 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
5fafdf24 2525 cpu_x86_load_seg_cache(env, R_SS, new_ss,
14ce26e7
FB
2526 get_seg_base(ss_e1, ss_e2),
2527 get_seg_limit(ss_e1, ss_e2),
2528 ss_e2);
2529 }
2c0262af 2530
5fafdf24 2531 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2532 get_seg_base(e1, e2),
2533 get_seg_limit(e1, e2),
2534 e2);
2c0262af 2535 cpu_x86_set_cpl(env, rpl);
891b38e4 2536 sp = new_esp;
14ce26e7 2537#ifdef TARGET_X86_64
2c8e0301 2538 if (env->hflags & HF_CS64_MASK)
14ce26e7
FB
2539 sp_mask = -1;
2540 else
2541#endif
2542 sp_mask = get_sp_mask(ss_e2);
8e682019
FB
2543
2544 /* validate data segments */
89984cd2
FB
2545 validate_seg(R_ES, rpl);
2546 validate_seg(R_DS, rpl);
2547 validate_seg(R_FS, rpl);
2548 validate_seg(R_GS, rpl);
4afa6482
FB
2549
2550 sp += addend;
2c0262af 2551 }
8d7b0fbb 2552 SET_ESP(sp, sp_mask);
2c0262af
FB
2553 env->eip = new_eip;
2554 if (is_iret) {
4136f33c 2555 /* NOTE: 'cpl' is the _old_ CPL */
8145122b 2556 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2c0262af 2557 if (cpl == 0)
4136f33c
FB
2558 eflags_mask |= IOPL_MASK;
2559 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2560 if (cpl <= iopl)
2561 eflags_mask |= IF_MASK;
2c0262af
FB
2562 if (shift == 0)
2563 eflags_mask &= 0xffff;
2564 load_eflags(new_eflags, eflags_mask);
2565 }
2566 return;
2567
2568 return_to_vm86:
891b38e4
FB
2569 POPL(ssp, sp, sp_mask, new_esp);
2570 POPL(ssp, sp, sp_mask, new_ss);
2571 POPL(ssp, sp, sp_mask, new_es);
2572 POPL(ssp, sp, sp_mask, new_ds);
2573 POPL(ssp, sp, sp_mask, new_fs);
2574 POPL(ssp, sp, sp_mask, new_gs);
3b46e624 2575
2c0262af 2576 /* modify processor state */
5fafdf24 2577 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
8145122b 2578 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
891b38e4 2579 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 2580 cpu_x86_set_cpl(env, 3);
891b38e4
FB
2581 load_seg_vm(R_SS, new_ss & 0xffff);
2582 load_seg_vm(R_ES, new_es & 0xffff);
2583 load_seg_vm(R_DS, new_ds & 0xffff);
2584 load_seg_vm(R_FS, new_fs & 0xffff);
2585 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af 2586
fd836909 2587 env->eip = new_eip & 0xffff;
2c0262af
FB
2588 ESP = new_esp;
2589}
2590
08cea4ee 2591void helper_iret_protected(int shift, int next_eip)
2c0262af 2592{
7e84c249
FB
2593 int tss_selector, type;
2594 uint32_t e1, e2;
3b46e624 2595
7e84c249
FB
2596 /* specific case for TSS */
2597 if (env->eflags & NT_MASK) {
14ce26e7
FB
2598#ifdef TARGET_X86_64
2599 if (env->hflags & HF_LMA_MASK)
2600 raise_exception_err(EXCP0D_GPF, 0);
2601#endif
7e84c249
FB
2602 tss_selector = lduw_kernel(env->tr.base + 0);
2603 if (tss_selector & 4)
2604 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2605 if (load_segment(&e1, &e2, tss_selector) != 0)
2606 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2607 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2608 /* NOTE: we check both segment and busy TSS */
2609 if (type != 3)
2610 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
08cea4ee 2611 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
7e84c249
FB
2612 } else {
2613 helper_ret_protected(shift, 1, 0);
2614 }
9df217a3
FB
2615#ifdef USE_KQEMU
2616 if (kqemu_is_ok(env)) {
2617 CC_OP = CC_OP_EFLAGS;
2618 env->exception_index = -1;
2619 cpu_loop_exit();
2620 }
2621#endif
2c0262af
FB
2622}
2623
2624void helper_lret_protected(int shift, int addend)
2625{
2626 helper_ret_protected(shift, 0, addend);
9df217a3
FB
2627#ifdef USE_KQEMU
2628 if (kqemu_is_ok(env)) {
9df217a3
FB
2629 env->exception_index = -1;
2630 cpu_loop_exit();
2631 }
2632#endif
2c0262af
FB
2633}
2634
023fe10d
FB
2635void helper_sysenter(void)
2636{
2637 if (env->sysenter_cs == 0) {
2638 raise_exception_err(EXCP0D_GPF, 0);
2639 }
2640 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2641 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
2642 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2643 0, 0xffffffff,
023fe10d
FB
2644 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2645 DESC_S_MASK |
2646 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2647 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
14ce26e7 2648 0, 0xffffffff,
023fe10d
FB
2649 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2650 DESC_S_MASK |
2651 DESC_W_MASK | DESC_A_MASK);
2652 ESP = env->sysenter_esp;
2653 EIP = env->sysenter_eip;
2654}
2655
2656void helper_sysexit(void)
2657{
2658 int cpl;
2659
2660 cpl = env->hflags & HF_CPL_MASK;
2661 if (env->sysenter_cs == 0 || cpl != 0) {
2662 raise_exception_err(EXCP0D_GPF, 0);
2663 }
2664 cpu_x86_set_cpl(env, 3);
5fafdf24
TS
2665 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2666 0, 0xffffffff,
023fe10d
FB
2667 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2668 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2669 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2670 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
14ce26e7 2671 0, 0xffffffff,
023fe10d
FB
2672 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2673 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2674 DESC_W_MASK | DESC_A_MASK);
2675 ESP = ECX;
2676 EIP = EDX;
9df217a3
FB
2677#ifdef USE_KQEMU
2678 if (kqemu_is_ok(env)) {
2679 env->exception_index = -1;
2680 cpu_loop_exit();
2681 }
2682#endif
023fe10d
FB
2683}
2684
2c0262af
FB
2685void helper_movl_crN_T0(int reg)
2686{
5fafdf24 2687#if !defined(CONFIG_USER_ONLY)
2c0262af
FB
2688 switch(reg) {
2689 case 0:
1ac157da 2690 cpu_x86_update_cr0(env, T0);
2c0262af
FB
2691 break;
2692 case 3:
1ac157da
FB
2693 cpu_x86_update_cr3(env, T0);
2694 break;
2695 case 4:
2696 cpu_x86_update_cr4(env, T0);
2697 break;
4d6b6c0a
FB
2698 case 8:
2699 cpu_set_apic_tpr(env, T0);
2700 break;
1ac157da
FB
2701 default:
2702 env->cr[reg] = T0;
2c0262af
FB
2703 break;
2704 }
4d6b6c0a 2705#endif
2c0262af
FB
2706}
2707
2708/* XXX: do more */
2709void helper_movl_drN_T0(int reg)
2710{
2711 env->dr[reg] = T0;
2712}
2713
8f091a59 2714void helper_invlpg(target_ulong addr)
2c0262af
FB
2715{
2716 cpu_x86_flush_tlb(env, addr);
2717}
2718
2c0262af
FB
2719void helper_rdtsc(void)
2720{
2721 uint64_t val;
ecada8a2
FB
2722
2723 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2724 raise_exception(EXCP0D_GPF);
2725 }
28ab0e2e 2726 val = cpu_get_tsc(env);
14ce26e7
FB
2727 EAX = (uint32_t)(val);
2728 EDX = (uint32_t)(val >> 32);
2729}
2730
5fafdf24 2731#if defined(CONFIG_USER_ONLY)
14ce26e7
FB
2732void helper_wrmsr(void)
2733{
2c0262af
FB
2734}
2735
14ce26e7
FB
2736void helper_rdmsr(void)
2737{
2738}
2739#else
2c0262af
FB
2740void helper_wrmsr(void)
2741{
14ce26e7
FB
2742 uint64_t val;
2743
2744 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2745
2746 switch((uint32_t)ECX) {
2c0262af 2747 case MSR_IA32_SYSENTER_CS:
14ce26e7 2748 env->sysenter_cs = val & 0xffff;
2c0262af
FB
2749 break;
2750 case MSR_IA32_SYSENTER_ESP:
14ce26e7 2751 env->sysenter_esp = val;
2c0262af
FB
2752 break;
2753 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
2754 env->sysenter_eip = val;
2755 break;
2756 case MSR_IA32_APICBASE:
2757 cpu_set_apic_base(env, val);
2758 break;
14ce26e7 2759 case MSR_EFER:
f419b321
FB
2760 {
2761 uint64_t update_mask;
2762 update_mask = 0;
2763 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2764 update_mask |= MSR_EFER_SCE;
2765 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2766 update_mask |= MSR_EFER_LME;
2767 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2768 update_mask |= MSR_EFER_FFXSR;
2769 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2770 update_mask |= MSR_EFER_NXE;
5fafdf24 2771 env->efer = (env->efer & ~update_mask) |
f419b321
FB
2772 (val & update_mask);
2773 }
2c0262af 2774 break;
14ce26e7
FB
2775 case MSR_STAR:
2776 env->star = val;
2777 break;
8f091a59
FB
2778 case MSR_PAT:
2779 env->pat = val;
2780 break;
0573fbfc
TS
2781 case MSR_VM_HSAVE_PA:
2782 env->vm_hsave = val;
2783 break;
f419b321 2784#ifdef TARGET_X86_64
14ce26e7
FB
2785 case MSR_LSTAR:
2786 env->lstar = val;
2787 break;
2788 case MSR_CSTAR:
2789 env->cstar = val;
2790 break;
2791 case MSR_FMASK:
2792 env->fmask = val;
2793 break;
2794 case MSR_FSBASE:
2795 env->segs[R_FS].base = val;
2796 break;
2797 case MSR_GSBASE:
2798 env->segs[R_GS].base = val;
2799 break;
2800 case MSR_KERNELGSBASE:
2801 env->kernelgsbase = val;
2802 break;
2803#endif
2c0262af
FB
2804 default:
2805 /* XXX: exception ? */
5fafdf24 2806 break;
2c0262af
FB
2807 }
2808}
2809
2810void helper_rdmsr(void)
2811{
14ce26e7
FB
2812 uint64_t val;
2813 switch((uint32_t)ECX) {
2c0262af 2814 case MSR_IA32_SYSENTER_CS:
14ce26e7 2815 val = env->sysenter_cs;
2c0262af
FB
2816 break;
2817 case MSR_IA32_SYSENTER_ESP:
14ce26e7 2818 val = env->sysenter_esp;
2c0262af
FB
2819 break;
2820 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
2821 val = env->sysenter_eip;
2822 break;
2823 case MSR_IA32_APICBASE:
2824 val = cpu_get_apic_base(env);
2825 break;
14ce26e7
FB
2826 case MSR_EFER:
2827 val = env->efer;
2828 break;
2829 case MSR_STAR:
2830 val = env->star;
2831 break;
8f091a59
FB
2832 case MSR_PAT:
2833 val = env->pat;
2834 break;
0573fbfc
TS
2835 case MSR_VM_HSAVE_PA:
2836 val = env->vm_hsave;
2837 break;
f419b321 2838#ifdef TARGET_X86_64
14ce26e7
FB
2839 case MSR_LSTAR:
2840 val = env->lstar;
2841 break;
2842 case MSR_CSTAR:
2843 val = env->cstar;
2844 break;
2845 case MSR_FMASK:
2846 val = env->fmask;
2847 break;
2848 case MSR_FSBASE:
2849 val = env->segs[R_FS].base;
2850 break;
2851 case MSR_GSBASE:
2852 val = env->segs[R_GS].base;
2c0262af 2853 break;
14ce26e7
FB
2854 case MSR_KERNELGSBASE:
2855 val = env->kernelgsbase;
2856 break;
2857#endif
2c0262af
FB
2858 default:
2859 /* XXX: exception ? */
14ce26e7 2860 val = 0;
5fafdf24 2861 break;
2c0262af 2862 }
14ce26e7
FB
2863 EAX = (uint32_t)(val);
2864 EDX = (uint32_t)(val >> 32);
2c0262af 2865}
14ce26e7 2866#endif
2c0262af
FB
2867
2868void helper_lsl(void)
2869{
2870 unsigned int selector, limit;
5516d670 2871 uint32_t e1, e2, eflags;
3ab493de 2872 int rpl, dpl, cpl, type;
2c0262af 2873
5516d670 2874 eflags = cc_table[CC_OP].compute_all();
2c0262af
FB
2875 selector = T0 & 0xffff;
2876 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2877 goto fail;
3ab493de
FB
2878 rpl = selector & 3;
2879 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2880 cpl = env->hflags & HF_CPL_MASK;
2881 if (e2 & DESC_S_MASK) {
2882 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2883 /* conforming */
2884 } else {
2885 if (dpl < cpl || dpl < rpl)
5516d670 2886 goto fail;
3ab493de
FB
2887 }
2888 } else {
2889 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2890 switch(type) {
2891 case 1:
2892 case 2:
2893 case 3:
2894 case 9:
2895 case 11:
2896 break;
2897 default:
5516d670 2898 goto fail;
3ab493de 2899 }
5516d670
FB
2900 if (dpl < cpl || dpl < rpl) {
2901 fail:
2902 CC_SRC = eflags & ~CC_Z;
3ab493de 2903 return;
5516d670 2904 }
3ab493de
FB
2905 }
2906 limit = get_seg_limit(e1, e2);
2c0262af 2907 T1 = limit;
5516d670 2908 CC_SRC = eflags | CC_Z;
2c0262af
FB
2909}
2910
2911void helper_lar(void)
2912{
2913 unsigned int selector;
5516d670 2914 uint32_t e1, e2, eflags;
3ab493de 2915 int rpl, dpl, cpl, type;
2c0262af 2916
5516d670 2917 eflags = cc_table[CC_OP].compute_all();
2c0262af 2918 selector = T0 & 0xffff;
3ab493de 2919 if ((selector & 0xfffc) == 0)
5516d670 2920 goto fail;
2c0262af 2921 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2922 goto fail;
3ab493de
FB
2923 rpl = selector & 3;
2924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2925 cpl = env->hflags & HF_CPL_MASK;
2926 if (e2 & DESC_S_MASK) {
2927 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2928 /* conforming */
2929 } else {
2930 if (dpl < cpl || dpl < rpl)
5516d670 2931 goto fail;
3ab493de
FB
2932 }
2933 } else {
2934 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2935 switch(type) {
2936 case 1:
2937 case 2:
2938 case 3:
2939 case 4:
2940 case 5:
2941 case 9:
2942 case 11:
2943 case 12:
2944 break;
2945 default:
5516d670 2946 goto fail;
3ab493de 2947 }
5516d670
FB
2948 if (dpl < cpl || dpl < rpl) {
2949 fail:
2950 CC_SRC = eflags & ~CC_Z;
3ab493de 2951 return;
5516d670 2952 }
3ab493de 2953 }
2c0262af 2954 T1 = e2 & 0x00f0ff00;
5516d670 2955 CC_SRC = eflags | CC_Z;
2c0262af
FB
2956}
2957
3ab493de
FB
2958void helper_verr(void)
2959{
2960 unsigned int selector;
5516d670 2961 uint32_t e1, e2, eflags;
3ab493de
FB
2962 int rpl, dpl, cpl;
2963
5516d670 2964 eflags = cc_table[CC_OP].compute_all();
3ab493de
FB
2965 selector = T0 & 0xffff;
2966 if ((selector & 0xfffc) == 0)
5516d670 2967 goto fail;
3ab493de 2968 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2969 goto fail;
3ab493de 2970 if (!(e2 & DESC_S_MASK))
5516d670 2971 goto fail;
3ab493de
FB
2972 rpl = selector & 3;
2973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2974 cpl = env->hflags & HF_CPL_MASK;
2975 if (e2 & DESC_CS_MASK) {
2976 if (!(e2 & DESC_R_MASK))
5516d670 2977 goto fail;
3ab493de
FB
2978 if (!(e2 & DESC_C_MASK)) {
2979 if (dpl < cpl || dpl < rpl)
5516d670 2980 goto fail;
3ab493de
FB
2981 }
2982 } else {
5516d670
FB
2983 if (dpl < cpl || dpl < rpl) {
2984 fail:
2985 CC_SRC = eflags & ~CC_Z;
3ab493de 2986 return;
5516d670 2987 }
3ab493de 2988 }
5516d670 2989 CC_SRC = eflags | CC_Z;
3ab493de
FB
2990}
2991
2992void helper_verw(void)
2993{
2994 unsigned int selector;
5516d670 2995 uint32_t e1, e2, eflags;
3ab493de
FB
2996 int rpl, dpl, cpl;
2997
5516d670 2998 eflags = cc_table[CC_OP].compute_all();
3ab493de
FB
2999 selector = T0 & 0xffff;
3000 if ((selector & 0xfffc) == 0)
5516d670 3001 goto fail;
3ab493de 3002 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3003 goto fail;
3ab493de 3004 if (!(e2 & DESC_S_MASK))
5516d670 3005 goto fail;
3ab493de
FB
3006 rpl = selector & 3;
3007 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3008 cpl = env->hflags & HF_CPL_MASK;
3009 if (e2 & DESC_CS_MASK) {
5516d670 3010 goto fail;
3ab493de
FB
3011 } else {
3012 if (dpl < cpl || dpl < rpl)
5516d670
FB
3013 goto fail;
3014 if (!(e2 & DESC_W_MASK)) {
3015 fail:
3016 CC_SRC = eflags & ~CC_Z;
3ab493de 3017 return;
5516d670 3018 }
3ab493de 3019 }
5516d670 3020 CC_SRC = eflags | CC_Z;
3ab493de
FB
3021}
3022
2c0262af
FB
3023/* FPU helpers */
3024
2c0262af
FB
3025void helper_fldt_ST0_A0(void)
3026{
3027 int new_fpstt;
3028 new_fpstt = (env->fpstt - 1) & 7;
664e0f19 3029 env->fpregs[new_fpstt].d = helper_fldt(A0);
2c0262af
FB
3030 env->fpstt = new_fpstt;
3031 env->fptags[new_fpstt] = 0; /* validate stack entry */
3032}
3033
3034void helper_fstt_ST0_A0(void)
3035{
14ce26e7 3036 helper_fstt(ST0, A0);
2c0262af 3037}
2c0262af 3038
2ee73ac3
FB
3039void fpu_set_exception(int mask)
3040{
3041 env->fpus |= mask;
3042 if (env->fpus & (~env->fpuc & FPUC_EM))
3043 env->fpus |= FPUS_SE | FPUS_B;
3044}
3045
3046CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3047{
5fafdf24 3048 if (b == 0.0)
2ee73ac3
FB
3049 fpu_set_exception(FPUS_ZE);
3050 return a / b;
3051}
3052
3053void fpu_raise_exception(void)
3054{
3055 if (env->cr[0] & CR0_NE_MASK) {
3056 raise_exception(EXCP10_COPR);
5fafdf24
TS
3057 }
3058#if !defined(CONFIG_USER_ONLY)
2ee73ac3
FB
3059 else {
3060 cpu_set_ferr(env);
3061 }
3062#endif
3063}
3064
2c0262af
FB
3065/* BCD ops */
3066
2c0262af
FB
3067void helper_fbld_ST0_A0(void)
3068{
3069 CPU86_LDouble tmp;
3070 uint64_t val;
3071 unsigned int v;
3072 int i;
3073
3074 val = 0;
3075 for(i = 8; i >= 0; i--) {
14ce26e7 3076 v = ldub(A0 + i);
2c0262af
FB
3077 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3078 }
3079 tmp = val;
14ce26e7 3080 if (ldub(A0 + 9) & 0x80)
2c0262af
FB
3081 tmp = -tmp;
3082 fpush();
3083 ST0 = tmp;
3084}
3085
3086void helper_fbst_ST0_A0(void)
3087{
2c0262af 3088 int v;
14ce26e7 3089 target_ulong mem_ref, mem_end;
2c0262af
FB
3090 int64_t val;
3091
7a0e1f41 3092 val = floatx_to_int64(ST0, &env->fp_status);
14ce26e7 3093 mem_ref = A0;
2c0262af
FB
3094 mem_end = mem_ref + 9;
3095 if (val < 0) {
3096 stb(mem_end, 0x80);
3097 val = -val;
3098 } else {
3099 stb(mem_end, 0x00);
3100 }
3101 while (mem_ref < mem_end) {
3102 if (val == 0)
3103 break;
3104 v = val % 100;
3105 val = val / 100;
3106 v = ((v / 10) << 4) | (v % 10);
3107 stb(mem_ref++, v);
3108 }
3109 while (mem_ref < mem_end) {
3110 stb(mem_ref++, 0);
3111 }
3112}
3113
3114void helper_f2xm1(void)
3115{
3116 ST0 = pow(2.0,ST0) - 1.0;
3117}
3118
3119void helper_fyl2x(void)
3120{
3121 CPU86_LDouble fptemp;
3b46e624 3122
2c0262af
FB
3123 fptemp = ST0;
3124 if (fptemp>0.0){
3125 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3126 ST1 *= fptemp;
3127 fpop();
5fafdf24 3128 } else {
2c0262af
FB
3129 env->fpus &= (~0x4700);
3130 env->fpus |= 0x400;
3131 }
3132}
3133
3134void helper_fptan(void)
3135{
3136 CPU86_LDouble fptemp;
3137
3138 fptemp = ST0;
3139 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3140 env->fpus |= 0x400;
3141 } else {
3142 ST0 = tan(fptemp);
3143 fpush();
3144 ST0 = 1.0;
3145 env->fpus &= (~0x400); /* C2 <-- 0 */
3146 /* the above code is for |arg| < 2**52 only */
3147 }
3148}
3149
3150void helper_fpatan(void)
3151{
3152 CPU86_LDouble fptemp, fpsrcop;
3153
3154 fpsrcop = ST1;
3155 fptemp = ST0;
3156 ST1 = atan2(fpsrcop,fptemp);
3157 fpop();
3158}
3159
3160void helper_fxtract(void)
3161{
3162 CPU86_LDoubleU temp;
3163 unsigned int expdif;
3164
3165 temp.d = ST0;
3166 expdif = EXPD(temp) - EXPBIAS;
3167 /*DP exponent bias*/
3168 ST0 = expdif;
3169 fpush();
3170 BIASEXPONENT(temp);
3171 ST0 = temp.d;
3172}
3173
3174void helper_fprem1(void)
3175{
3176 CPU86_LDouble dblq, fpsrcop, fptemp;
3177 CPU86_LDoubleU fpsrcop1, fptemp1;
3178 int expdif;
7524c84d
TS
3179 signed long long int q;
3180
3181 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3182 ST0 = 0.0 / 0.0; /* NaN */
3183 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3184 return;
3185 }
2c0262af
FB
3186
3187 fpsrcop = ST0;
3188 fptemp = ST1;
3189 fpsrcop1.d = fpsrcop;
3190 fptemp1.d = fptemp;
3191 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3192
3193 if (expdif < 0) {
3194 /* optimisation? taken from the AMD docs */
3195 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3196 /* ST0 is unchanged */
3197 return;
3198 }
3199
2c0262af
FB
3200 if (expdif < 53) {
3201 dblq = fpsrcop / fptemp;
7524c84d
TS
3202 /* round dblq towards nearest integer */
3203 dblq = rint(dblq);
3204 ST0 = fpsrcop - fptemp * dblq;
3205
3206 /* convert dblq to q by truncating towards zero */
3207 if (dblq < 0.0)
3208 q = (signed long long int)(-dblq);
3209 else
3210 q = (signed long long int)dblq;
3211
2c0262af 3212 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3213 /* (C0,C3,C1) <-- (q2,q1,q0) */
3214 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3215 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3216 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af
FB
3217 } else {
3218 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3219 fptemp = pow(2.0, expdif - 50);
2c0262af 3220 fpsrcop = (ST0 / ST1) / fptemp;
7524c84d
TS
3221 /* fpsrcop = integer obtained by chopping */
3222 fpsrcop = (fpsrcop < 0.0) ?
3223 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3224 ST0 -= (ST1 * fpsrcop * fptemp);
3225 }
3226}
3227
3228void helper_fprem(void)
3229{
3230 CPU86_LDouble dblq, fpsrcop, fptemp;
3231 CPU86_LDoubleU fpsrcop1, fptemp1;
3232 int expdif;
7524c84d
TS
3233 signed long long int q;
3234
3235 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3236 ST0 = 0.0 / 0.0; /* NaN */
3237 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3238 return;
3239 }
3240
3241 fpsrcop = (CPU86_LDouble)ST0;
3242 fptemp = (CPU86_LDouble)ST1;
2c0262af
FB
3243 fpsrcop1.d = fpsrcop;
3244 fptemp1.d = fptemp;
3245 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3246
3247 if (expdif < 0) {
3248 /* optimisation? taken from the AMD docs */
3249 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3250 /* ST0 is unchanged */
3251 return;
3252 }
3253
2c0262af 3254 if ( expdif < 53 ) {
7524c84d
TS
3255 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3256 /* round dblq towards zero */
3257 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3258 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3259
3260 /* convert dblq to q by truncating towards zero */
3261 if (dblq < 0.0)
3262 q = (signed long long int)(-dblq);
3263 else
3264 q = (signed long long int)dblq;
3265
2c0262af 3266 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3267 /* (C0,C3,C1) <-- (q2,q1,q0) */
3268 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3269 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3270 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af 3271 } else {
7524c84d 3272 int N = 32 + (expdif % 32); /* as per AMD docs */
2c0262af 3273 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3274 fptemp = pow(2.0, (double)(expdif - N));
2c0262af
FB
3275 fpsrcop = (ST0 / ST1) / fptemp;
3276 /* fpsrcop = integer obtained by chopping */
7524c84d
TS
3277 fpsrcop = (fpsrcop < 0.0) ?
3278 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3279 ST0 -= (ST1 * fpsrcop * fptemp);
3280 }
3281}
3282
3283void helper_fyl2xp1(void)
3284{
3285 CPU86_LDouble fptemp;
3286
3287 fptemp = ST0;
3288 if ((fptemp+1.0)>0.0) {
3289 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3290 ST1 *= fptemp;
3291 fpop();
5fafdf24 3292 } else {
2c0262af
FB
3293 env->fpus &= (~0x4700);
3294 env->fpus |= 0x400;
3295 }
3296}
3297
3298void helper_fsqrt(void)
3299{
3300 CPU86_LDouble fptemp;
3301
3302 fptemp = ST0;
5fafdf24 3303 if (fptemp<0.0) {
2c0262af
FB
3304 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3305 env->fpus |= 0x400;
3306 }
3307 ST0 = sqrt(fptemp);
3308}
3309
3310void helper_fsincos(void)
3311{
3312 CPU86_LDouble fptemp;
3313
3314 fptemp = ST0;
3315 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3316 env->fpus |= 0x400;
3317 } else {
3318 ST0 = sin(fptemp);
3319 fpush();
3320 ST0 = cos(fptemp);
3321 env->fpus &= (~0x400); /* C2 <-- 0 */
3322 /* the above code is for |arg| < 2**63 only */
3323 }
3324}
3325
3326void helper_frndint(void)
3327{
7a0e1f41 3328 ST0 = floatx_round_to_int(ST0, &env->fp_status);
2c0262af
FB
3329}
3330
3331void helper_fscale(void)
3332{
5fafdf24 3333 ST0 = ldexp (ST0, (int)(ST1));
2c0262af
FB
3334}
3335
3336void helper_fsin(void)
3337{
3338 CPU86_LDouble fptemp;
3339
3340 fptemp = ST0;
3341 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3342 env->fpus |= 0x400;
3343 } else {
3344 ST0 = sin(fptemp);
3345 env->fpus &= (~0x400); /* C2 <-- 0 */
3346 /* the above code is for |arg| < 2**53 only */
3347 }
3348}
3349
3350void helper_fcos(void)
3351{
3352 CPU86_LDouble fptemp;
3353
3354 fptemp = ST0;
3355 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3356 env->fpus |= 0x400;
3357 } else {
3358 ST0 = cos(fptemp);
3359 env->fpus &= (~0x400); /* C2 <-- 0 */
3360 /* the above code is for |arg5 < 2**63 only */
3361 }
3362}
3363
3364void helper_fxam_ST0(void)
3365{
3366 CPU86_LDoubleU temp;
3367 int expdif;
3368
3369 temp.d = ST0;
3370
3371 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3372 if (SIGND(temp))
3373 env->fpus |= 0x200; /* C1 <-- 1 */
3374
a891c7a1 3375 /* XXX: test fptags too */
2c0262af
FB
3376 expdif = EXPD(temp);
3377 if (expdif == MAXEXPD) {
a891c7a1
FB
3378#ifdef USE_X86LDOUBLE
3379 if (MANTD(temp) == 0x8000000000000000ULL)
3380#else
2c0262af 3381 if (MANTD(temp) == 0)
a891c7a1 3382#endif
2c0262af
FB
3383 env->fpus |= 0x500 /*Infinity*/;
3384 else
3385 env->fpus |= 0x100 /*NaN*/;
3386 } else if (expdif == 0) {
3387 if (MANTD(temp) == 0)
3388 env->fpus |= 0x4000 /*Zero*/;
3389 else
3390 env->fpus |= 0x4400 /*Denormal*/;
3391 } else {
3392 env->fpus |= 0x400;
3393 }
3394}
3395
14ce26e7 3396void helper_fstenv(target_ulong ptr, int data32)
2c0262af
FB
3397{
3398 int fpus, fptag, exp, i;
3399 uint64_t mant;
3400 CPU86_LDoubleU tmp;
3401
3402 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3403 fptag = 0;
3404 for (i=7; i>=0; i--) {
3405 fptag <<= 2;
3406 if (env->fptags[i]) {
3407 fptag |= 3;
3408 } else {
664e0f19 3409 tmp.d = env->fpregs[i].d;
2c0262af
FB
3410 exp = EXPD(tmp);
3411 mant = MANTD(tmp);
3412 if (exp == 0 && mant == 0) {
3413 /* zero */
3414 fptag |= 1;
3415 } else if (exp == 0 || exp == MAXEXPD
3416#ifdef USE_X86LDOUBLE
3417 || (mant & (1LL << 63)) == 0
3418#endif
3419 ) {
3420 /* NaNs, infinity, denormal */
3421 fptag |= 2;
3422 }
3423 }
3424 }
3425 if (data32) {
3426 /* 32 bit */
3427 stl(ptr, env->fpuc);
3428 stl(ptr + 4, fpus);
3429 stl(ptr + 8, fptag);
2edcdce3
FB
3430 stl(ptr + 12, 0); /* fpip */
3431 stl(ptr + 16, 0); /* fpcs */
3432 stl(ptr + 20, 0); /* fpoo */
3433 stl(ptr + 24, 0); /* fpos */
2c0262af
FB
3434 } else {
3435 /* 16 bit */
3436 stw(ptr, env->fpuc);
3437 stw(ptr + 2, fpus);
3438 stw(ptr + 4, fptag);
3439 stw(ptr + 6, 0);
3440 stw(ptr + 8, 0);
3441 stw(ptr + 10, 0);
3442 stw(ptr + 12, 0);
3443 }
3444}
3445
14ce26e7 3446void helper_fldenv(target_ulong ptr, int data32)
2c0262af
FB
3447{
3448 int i, fpus, fptag;
3449
3450 if (data32) {
3451 env->fpuc = lduw(ptr);
3452 fpus = lduw(ptr + 4);
3453 fptag = lduw(ptr + 8);
3454 }
3455 else {
3456 env->fpuc = lduw(ptr);
3457 fpus = lduw(ptr + 2);
3458 fptag = lduw(ptr + 4);
3459 }
3460 env->fpstt = (fpus >> 11) & 7;
3461 env->fpus = fpus & ~0x3800;
2edcdce3 3462 for(i = 0;i < 8; i++) {
2c0262af
FB
3463 env->fptags[i] = ((fptag & 3) == 3);
3464 fptag >>= 2;
3465 }
3466}
3467
14ce26e7 3468void helper_fsave(target_ulong ptr, int data32)
2c0262af
FB
3469{
3470 CPU86_LDouble tmp;
3471 int i;
3472
3473 helper_fstenv(ptr, data32);
3474
3475 ptr += (14 << data32);
3476 for(i = 0;i < 8; i++) {
3477 tmp = ST(i);
2c0262af 3478 helper_fstt(tmp, ptr);
2c0262af
FB
3479 ptr += 10;
3480 }
3481
3482 /* fninit */
3483 env->fpus = 0;
3484 env->fpstt = 0;
3485 env->fpuc = 0x37f;
3486 env->fptags[0] = 1;
3487 env->fptags[1] = 1;
3488 env->fptags[2] = 1;
3489 env->fptags[3] = 1;
3490 env->fptags[4] = 1;
3491 env->fptags[5] = 1;
3492 env->fptags[6] = 1;
3493 env->fptags[7] = 1;
3494}
3495
14ce26e7 3496void helper_frstor(target_ulong ptr, int data32)
2c0262af
FB
3497{
3498 CPU86_LDouble tmp;
3499 int i;
3500
3501 helper_fldenv(ptr, data32);
3502 ptr += (14 << data32);
3503
3504 for(i = 0;i < 8; i++) {
2c0262af 3505 tmp = helper_fldt(ptr);
2c0262af
FB
3506 ST(i) = tmp;
3507 ptr += 10;
3508 }
3509}
3510
14ce26e7
FB
3511void helper_fxsave(target_ulong ptr, int data64)
3512{
3513 int fpus, fptag, i, nb_xmm_regs;
3514 CPU86_LDouble tmp;
3515 target_ulong addr;
3516
3517 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3518 fptag = 0;
3519 for(i = 0; i < 8; i++) {
d3c61721 3520 fptag |= (env->fptags[i] << i);
14ce26e7
FB
3521 }
3522 stw(ptr, env->fpuc);
3523 stw(ptr + 2, fpus);
d3c61721 3524 stw(ptr + 4, fptag ^ 0xff);
14ce26e7
FB
3525
3526 addr = ptr + 0x20;
3527 for(i = 0;i < 8; i++) {
3528 tmp = ST(i);
3529 helper_fstt(tmp, addr);
3530 addr += 16;
3531 }
3b46e624 3532
14ce26e7 3533 if (env->cr[4] & CR4_OSFXSR_MASK) {
a8ede8ba 3534 /* XXX: finish it */
664e0f19 3535 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
d3c61721 3536 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
14ce26e7
FB
3537 nb_xmm_regs = 8 << data64;
3538 addr = ptr + 0xa0;
3539 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
3540 stq(addr, env->xmm_regs[i].XMM_Q(0));
3541 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
14ce26e7
FB
3542 addr += 16;
3543 }
3544 }
3545}
3546
3547void helper_fxrstor(target_ulong ptr, int data64)
3548{
3549 int i, fpus, fptag, nb_xmm_regs;
3550 CPU86_LDouble tmp;
3551 target_ulong addr;
3552
3553 env->fpuc = lduw(ptr);
3554 fpus = lduw(ptr + 2);
d3c61721 3555 fptag = lduw(ptr + 4);
14ce26e7
FB
3556 env->fpstt = (fpus >> 11) & 7;
3557 env->fpus = fpus & ~0x3800;
3558 fptag ^= 0xff;
3559 for(i = 0;i < 8; i++) {
d3c61721 3560 env->fptags[i] = ((fptag >> i) & 1);
14ce26e7
FB
3561 }
3562
3563 addr = ptr + 0x20;
3564 for(i = 0;i < 8; i++) {
3565 tmp = helper_fldt(addr);
3566 ST(i) = tmp;
3567 addr += 16;
3568 }
3569
3570 if (env->cr[4] & CR4_OSFXSR_MASK) {
31313213 3571 /* XXX: finish it */
664e0f19 3572 env->mxcsr = ldl(ptr + 0x18);
14ce26e7
FB
3573 //ldl(ptr + 0x1c);
3574 nb_xmm_regs = 8 << data64;
3575 addr = ptr + 0xa0;
3576 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
3577 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3578 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
14ce26e7
FB
3579 addr += 16;
3580 }
3581 }
3582}
1f1af9fd
FB
3583
3584#ifndef USE_X86LDOUBLE
3585
3586void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3587{
3588 CPU86_LDoubleU temp;
3589 int e;
3590
3591 temp.d = f;
3592 /* mantissa */
3593 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3594 /* exponent + sign */
3595 e = EXPD(temp) - EXPBIAS + 16383;
3596 e |= SIGND(temp) >> 16;
3597 *pexp = e;
3598}
3599
3600CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3601{
3602 CPU86_LDoubleU temp;
3603 int e;
3604 uint64_t ll;
3605
3606 /* XXX: handle overflow ? */
3607 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3608 e |= (upper >> 4) & 0x800; /* sign */
3609 ll = (mant >> 11) & ((1LL << 52) - 1);
3610#ifdef __arm__
3611 temp.l.upper = (e << 20) | (ll >> 32);
3612 temp.l.lower = ll;
3613#else
3614 temp.ll = ll | ((uint64_t)e << 52);
3615#endif
3616 return temp.d;
3617}
3618
3619#else
3620
3621void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3622{
3623 CPU86_LDoubleU temp;
3624
3625 temp.d = f;
3626 *pmant = temp.l.lower;
3627 *pexp = temp.l.upper;
3628}
3629
3630CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3631{
3632 CPU86_LDoubleU temp;
3633
3634 temp.l.upper = upper;
3635 temp.l.lower = mant;
3636 return temp.d;
3637}
3638#endif
3639
14ce26e7
FB
3640#ifdef TARGET_X86_64
3641
3642//#define DEBUG_MULDIV
3643
3644static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3645{
3646 *plow += a;
3647 /* carry test */
3648 if (*plow < a)
3649 (*phigh)++;
3650 *phigh += b;
3651}
3652
3653static void neg128(uint64_t *plow, uint64_t *phigh)
3654{
3655 *plow = ~ *plow;
3656 *phigh = ~ *phigh;
3657 add128(plow, phigh, 1, 0);
3658}
3659
45bbbb46
FB
3660/* return TRUE if overflow */
3661static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
14ce26e7
FB
3662{
3663 uint64_t q, r, a1, a0;
c0b24a1d 3664 int i, qb, ab;
14ce26e7
FB
3665
3666 a0 = *plow;
3667 a1 = *phigh;
3668 if (a1 == 0) {
3669 q = a0 / b;
3670 r = a0 % b;
3671 *plow = q;
3672 *phigh = r;
3673 } else {
45bbbb46
FB
3674 if (a1 >= b)
3675 return 1;
14ce26e7
FB
3676 /* XXX: use a better algorithm */
3677 for(i = 0; i < 64; i++) {
c0b24a1d 3678 ab = a1 >> 63;
a8ede8ba 3679 a1 = (a1 << 1) | (a0 >> 63);
c0b24a1d 3680 if (ab || a1 >= b) {
14ce26e7
FB
3681 a1 -= b;
3682 qb = 1;
3683 } else {
3684 qb = 0;
3685 }
14ce26e7
FB
3686 a0 = (a0 << 1) | qb;
3687 }
a8ede8ba 3688#if defined(DEBUG_MULDIV)
26a76461 3689 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
14ce26e7
FB
3690 *phigh, *plow, b, a0, a1);
3691#endif
3692 *plow = a0;
3693 *phigh = a1;
3694 }
45bbbb46 3695 return 0;
14ce26e7
FB
3696}
3697
45bbbb46
FB
3698/* return TRUE if overflow */
3699static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
14ce26e7
FB
3700{
3701 int sa, sb;
3702 sa = ((int64_t)*phigh < 0);
3703 if (sa)
3704 neg128(plow, phigh);
3705 sb = (b < 0);
3706 if (sb)
3707 b = -b;
45bbbb46
FB
3708 if (div64(plow, phigh, b) != 0)
3709 return 1;
3710 if (sa ^ sb) {
3711 if (*plow > (1ULL << 63))
3712 return 1;
14ce26e7 3713 *plow = - *plow;
45bbbb46
FB
3714 } else {
3715 if (*plow >= (1ULL << 63))
3716 return 1;
3717 }
31313213 3718 if (sa)
14ce26e7 3719 *phigh = - *phigh;
45bbbb46 3720 return 0;
14ce26e7
FB
3721}
3722
3723void helper_mulq_EAX_T0(void)
3724{
3725 uint64_t r0, r1;
3726
69d35728 3727 mulu64(&r1, &r0, EAX, T0);
14ce26e7
FB
3728 EAX = r0;
3729 EDX = r1;
3730 CC_DST = r0;
3731 CC_SRC = r1;
3732}
3733
3734void helper_imulq_EAX_T0(void)
3735{
3736 uint64_t r0, r1;
3737
69d35728 3738 muls64(&r1, &r0, EAX, T0);
14ce26e7
FB
3739 EAX = r0;
3740 EDX = r1;
3741 CC_DST = r0;
a8ede8ba 3742 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
14ce26e7
FB
3743}
3744
3745void helper_imulq_T0_T1(void)
3746{
3747 uint64_t r0, r1;
3748
69d35728 3749 muls64(&r1, &r0, T0, T1);
14ce26e7
FB
3750 T0 = r0;
3751 CC_DST = r0;
3752 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3753}
3754
3755void helper_divq_EAX_T0(void)
3756{
3757 uint64_t r0, r1;
3758 if (T0 == 0) {
3759 raise_exception(EXCP00_DIVZ);
3760 }
3761 r0 = EAX;
3762 r1 = EDX;
45bbbb46
FB
3763 if (div64(&r0, &r1, T0))
3764 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
3765 EAX = r0;
3766 EDX = r1;
3767}
3768
3769void helper_idivq_EAX_T0(void)
3770{
3771 uint64_t r0, r1;
3772 if (T0 == 0) {
3773 raise_exception(EXCP00_DIVZ);
3774 }
3775 r0 = EAX;
3776 r1 = EDX;
45bbbb46
FB
3777 if (idiv64(&r0, &r1, T0))
3778 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
3779 EAX = r0;
3780 EDX = r1;
3781}
3782
68cae3d8
FB
3783void helper_bswapq_T0(void)
3784{
3785 T0 = bswap64(T0);
3786}
14ce26e7
FB
3787#endif
3788
3d7374c5
FB
3789void helper_hlt(void)
3790{
3791 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3792 env->hflags |= HF_HALTED_MASK;
3793 env->exception_index = EXCP_HLT;
3794 cpu_loop_exit();
3795}
3796
3797void helper_monitor(void)
3798{
d80c7d1c 3799 if ((uint32_t)ECX != 0)
3d7374c5
FB
3800 raise_exception(EXCP0D_GPF);
3801 /* XXX: store address ? */
3802}
3803
3804void helper_mwait(void)
3805{
d80c7d1c 3806 if ((uint32_t)ECX != 0)
3d7374c5
FB
3807 raise_exception(EXCP0D_GPF);
3808 /* XXX: not complete but not completely erroneous */
3809 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3810 /* more than one CPU: do not sleep because another CPU may
3811 wake this one */
3812 } else {
3813 helper_hlt();
3814 }
3815}
3816
664e0f19
FB
3817float approx_rsqrt(float a)
3818{
3819 return 1.0 / sqrt(a);
3820}
3821
3822float approx_rcp(float a)
3823{
3824 return 1.0 / a;
3825}
3826
7a0e1f41 3827void update_fp_status(void)
4d6b6c0a 3828{
7a0e1f41 3829 int rnd_type;
4d6b6c0a 3830
7a0e1f41
FB
3831 /* set rounding mode */
3832 switch(env->fpuc & RC_MASK) {
3833 default:
3834 case RC_NEAR:
3835 rnd_type = float_round_nearest_even;
3836 break;
3837 case RC_DOWN:
3838 rnd_type = float_round_down;
3839 break;
3840 case RC_UP:
3841 rnd_type = float_round_up;
3842 break;
3843 case RC_CHOP:
3844 rnd_type = float_round_to_zero;
3845 break;
3846 }
3847 set_float_rounding_mode(rnd_type, &env->fp_status);
3848#ifdef FLOATX80
3849 switch((env->fpuc >> 8) & 3) {
3850 case 0:
3851 rnd_type = 32;
3852 break;
3853 case 2:
3854 rnd_type = 64;
3855 break;
3856 case 3:
3857 default:
3858 rnd_type = 80;
3859 break;
3860 }
3861 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4d6b6c0a 3862#endif
7a0e1f41 3863}
664e0f19 3864
5fafdf24 3865#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3866
3867#define MMUSUFFIX _mmu
3868#define GETPC() (__builtin_return_address(0))
3869
2c0262af
FB
3870#define SHIFT 0
3871#include "softmmu_template.h"
3872
3873#define SHIFT 1
3874#include "softmmu_template.h"
3875
3876#define SHIFT 2
3877#include "softmmu_template.h"
3878
3879#define SHIFT 3
3880#include "softmmu_template.h"
3881
61382a50
FB
3882#endif
3883
3884/* try to fill the TLB and return an exception if error. If retaddr is
3885 NULL, it means that the function was called in C code (i.e. not
3886 from generated code or from helper.c) */
3887/* XXX: fix it to restore all registers */
14ce26e7 3888void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
2c0262af
FB
3889{
3890 TranslationBlock *tb;
3891 int ret;
3892 unsigned long pc;
61382a50
FB
3893 CPUX86State *saved_env;
3894
3895 /* XXX: hack to restore env in all cases, even if not called from
3896 generated code */
3897 saved_env = env;
3898 env = cpu_single_env;
61382a50
FB
3899
3900 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2c0262af 3901 if (ret) {
61382a50
FB
3902 if (retaddr) {
3903 /* now we have a real cpu fault */
3904 pc = (unsigned long)retaddr;
3905 tb = tb_find_pc(pc);
3906 if (tb) {
3907 /* the PC is inside the translated code. It means that we have
3908 a virtual CPU fault */
58fe2f10 3909 cpu_restore_state(tb, env, pc, NULL);
61382a50 3910 }
2c0262af 3911 }
0d1a29f9 3912 if (retaddr)
54ca9095 3913 raise_exception_err(env->exception_index, env->error_code);
0d1a29f9 3914 else
54ca9095 3915 raise_exception_err_norestore(env->exception_index, env->error_code);
2c0262af 3916 }
61382a50 3917 env = saved_env;
2c0262af 3918}
0573fbfc
TS
3919
3920
3921/* Secure Virtual Machine helpers */
3922
3923void helper_stgi(void)
3924{
3925 env->hflags |= HF_GIF_MASK;
3926}
3927
3928void helper_clgi(void)
3929{
3930 env->hflags &= ~HF_GIF_MASK;
3931}
3932
3933#if defined(CONFIG_USER_ONLY)
3934
3935void helper_vmrun(target_ulong addr) { }
3936void helper_vmmcall(void) { }
3937void helper_vmload(target_ulong addr) { }
3938void helper_vmsave(target_ulong addr) { }
3939void helper_skinit(void) { }
3940void helper_invlpga(void) { }
3941void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3942int svm_check_intercept_param(uint32_t type, uint64_t param)
3943{
3944 return 0;
3945}
3946
3947#else
3948
3949static inline uint32_t
3950vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3951{
3952 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
3953 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
3954 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
3955 | (vmcb_base & 0xff000000) /* Base 31-24 */
3956 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
3957}
3958
3959static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3960{
3961 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
3962 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
3963}
3964
3965extern uint8_t *phys_ram_base;
3966void helper_vmrun(target_ulong addr)
3967{
3968 uint32_t event_inj;
3969 uint32_t int_ctl;
3970
3971 if (loglevel & CPU_LOG_TB_IN_ASM)
3972 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3973
3974 env->vm_vmcb = addr;
3975 regs_to_env();
3976
3977 /* save the current CPU state in the hsave page */
3978 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3979 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3980
3981 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3982 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3983
3984 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3985 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3986 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3987 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3988 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3989 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3990 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3991
3992 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3993 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
3994
3995 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
3996 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
3997 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
3998 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
3999
4000 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4001 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4002 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4003
4004 /* load the interception bitmaps so we do not need to access the
4005 vmcb in svm mode */
4006 /* We shift all the intercept bits so we can OR them with the TB
4007 flags later on */
4008 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4009 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4010 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4011 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4012 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4013 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4014
4015 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4016 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4017
4018 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4019 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4020
4021 /* clear exit_info_2 so we behave like the real hardware */
4022 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4023
4024 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4025 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4026 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4027 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4028 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4029 if (int_ctl & V_INTR_MASKING_MASK) {
4030 env->cr[8] = int_ctl & V_TPR_MASK;
4031 if (env->eflags & IF_MASK)
4032 env->hflags |= HF_HIF_MASK;
4033 }
4034
4035#ifdef TARGET_X86_64
4036 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4037 env->hflags &= ~HF_LMA_MASK;
4038 if (env->efer & MSR_EFER_LMA)
4039 env->hflags |= HF_LMA_MASK;
4040#endif
4041 env->eflags = 0;
4042 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4043 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4044 CC_OP = CC_OP_EFLAGS;
4045 CC_DST = 0xffffffff;
4046
4047 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4048 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4049 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4050 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4051
4052 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4053 env->eip = EIP;
4054 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4055 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4056 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4057 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4058 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4059
4060 /* FIXME: guest state consistency checks */
4061
4062 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4063 case TLB_CONTROL_DO_NOTHING:
4064 break;
4065 case TLB_CONTROL_FLUSH_ALL_ASID:
4066 /* FIXME: this is not 100% correct but should work for now */
4067 tlb_flush(env, 1);
4068 break;
4069 }
4070
4071 helper_stgi();
4072
4073 regs_to_env();
4074
4075 /* maybe we need to inject an event */
4076 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4077 if (event_inj & SVM_EVTINJ_VALID) {
4078 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4079 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4080 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4081 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4082
4083 if (loglevel & CPU_LOG_TB_IN_ASM)
4084 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4085 /* FIXME: need to implement valid_err */
4086 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4087 case SVM_EVTINJ_TYPE_INTR:
4088 env->exception_index = vector;
4089 env->error_code = event_inj_err;
4090 env->exception_is_int = 1;
4091 env->exception_next_eip = -1;
4092 if (loglevel & CPU_LOG_TB_IN_ASM)
4093 fprintf(logfile, "INTR");
4094 break;
4095 case SVM_EVTINJ_TYPE_NMI:
4096 env->exception_index = vector;
4097 env->error_code = event_inj_err;
4098 env->exception_is_int = 1;
4099 env->exception_next_eip = EIP;
4100 if (loglevel & CPU_LOG_TB_IN_ASM)
4101 fprintf(logfile, "NMI");
4102 break;
4103 case SVM_EVTINJ_TYPE_EXEPT:
4104 env->exception_index = vector;
4105 env->error_code = event_inj_err;
4106 env->exception_is_int = 0;
4107 env->exception_next_eip = -1;
4108 if (loglevel & CPU_LOG_TB_IN_ASM)
4109 fprintf(logfile, "EXEPT");
4110 break;
4111 case SVM_EVTINJ_TYPE_SOFT:
4112 env->exception_index = vector;
4113 env->error_code = event_inj_err;
4114 env->exception_is_int = 1;
4115 env->exception_next_eip = EIP;
4116 if (loglevel & CPU_LOG_TB_IN_ASM)
4117 fprintf(logfile, "SOFT");
4118 break;
4119 }
4120 if (loglevel & CPU_LOG_TB_IN_ASM)
4121 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4122 }
4123 if (int_ctl & V_IRQ_MASK)
4124 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4125
4126 cpu_loop_exit();
4127}
4128
4129void helper_vmmcall(void)
4130{
4131 if (loglevel & CPU_LOG_TB_IN_ASM)
4132 fprintf(logfile,"vmmcall!\n");
4133}
4134
4135void helper_vmload(target_ulong addr)
4136{
4137 if (loglevel & CPU_LOG_TB_IN_ASM)
4138 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4139 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4140 env->segs[R_FS].base);
4141
4142 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4143 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4144 SVM_LOAD_SEG2(addr, tr, tr);
4145 SVM_LOAD_SEG2(addr, ldt, ldtr);
4146
4147#ifdef TARGET_X86_64
4148 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4149 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4150 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4151 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4152#endif
4153 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4154 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4155 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4156 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4157}
4158
4159void helper_vmsave(target_ulong addr)
4160{
4161 if (loglevel & CPU_LOG_TB_IN_ASM)
4162 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4163 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4164 env->segs[R_FS].base);
4165
4166 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4167 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4168 SVM_SAVE_SEG(addr, tr, tr);
4169 SVM_SAVE_SEG(addr, ldt, ldtr);
4170
4171#ifdef TARGET_X86_64
4172 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4173 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4174 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4175 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4176#endif
4177 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4178 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4179 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4180 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4181}
4182
4183void helper_skinit(void)
4184{
4185 if (loglevel & CPU_LOG_TB_IN_ASM)
4186 fprintf(logfile,"skinit!\n");
4187}
4188
4189void helper_invlpga(void)
4190{
4191 tlb_flush(env, 0);
4192}
4193
4194int svm_check_intercept_param(uint32_t type, uint64_t param)
4195{
4196 switch(type) {
4197 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4198 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4199 vmexit(type, param);
4200 return 1;
4201 }
4202 break;
4203 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4204 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4205 vmexit(type, param);
4206 return 1;
4207 }
4208 break;
4209 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4210 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4211 vmexit(type, param);
4212 return 1;
4213 }
4214 break;
4215 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4216 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4217 vmexit(type, param);
4218 return 1;
4219 }
4220 break;
4221 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4222 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4223 vmexit(type, param);
4224 return 1;
4225 }
4226 break;
4227 case SVM_EXIT_IOIO:
4228 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4229 /* FIXME: this should be read in at vmrun (faster this way?) */
4230 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4231 uint16_t port = (uint16_t) (param >> 16);
4232
4233 if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4234 vmexit(type, param);
4235 }
4236 break;
4237
4238 case SVM_EXIT_MSR:
4239 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4240 /* FIXME: this should be read in at vmrun (faster this way?) */
4241 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4242 switch((uint32_t)ECX) {
4243 case 0 ... 0x1fff:
4244 T0 = (ECX * 2) % 8;
4245 T1 = ECX / 8;
4246 break;
4247 case 0xc0000000 ... 0xc0001fff:
4248 T0 = (8192 + ECX - 0xc0000000) * 2;
4249 T1 = (T0 / 8);
4250 T0 %= 8;
4251 break;
4252 case 0xc0010000 ... 0xc0011fff:
4253 T0 = (16384 + ECX - 0xc0010000) * 2;
4254 T1 = (T0 / 8);
4255 T0 %= 8;
4256 break;
4257 default:
4258 vmexit(type, param);
4259 return 1;
4260 }
4261 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4262 vmexit(type, param);
4263 return 1;
4264 }
4265 break;
4266 default:
4267 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4268 vmexit(type, param);
4269 return 1;
4270 }
4271 break;
4272 }
4273 return 0;
4274}
4275
4276void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4277{
4278 uint32_t int_ctl;
4279
4280 if (loglevel & CPU_LOG_TB_IN_ASM)
4281 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4282 exit_code, exit_info_1,
4283 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4284 EIP);
4285
4286 /* Save the VM state in the vmcb */
4287 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4288 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4289 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4290 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4291
4292 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4294
4295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4296 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4297
4298 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4300 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4301 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4302 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4303
4304 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4305 int_ctl &= ~V_TPR_MASK;
4306 int_ctl |= env->cr[8] & V_TPR_MASK;
4307 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4308 }
4309
4310 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4311 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4316 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4317
4318 /* Reload the host state from vm_hsave */
4319 env->hflags &= ~HF_HIF_MASK;
4320 env->intercept = 0;
4321 env->intercept_exceptions = 0;
4322 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4323
4324 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4325 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4326
4327 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4328 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4329
4330 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4331 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4332 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4333 if (int_ctl & V_INTR_MASKING_MASK)
4334 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4335 /* we need to set the efer after the crs so the hidden flags get set properly */
4336#ifdef TARGET_X86_64
4337 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4338 env->hflags &= ~HF_LMA_MASK;
4339 if (env->efer & MSR_EFER_LMA)
4340 env->hflags |= HF_LMA_MASK;
4341#endif
4342
4343 env->eflags = 0;
4344 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4345 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4346 CC_OP = CC_OP_EFLAGS;
4347
4348 SVM_LOAD_SEG(env->vm_hsave, ES, es);
4349 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4350 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4351 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4352
4353 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4354 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4355 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4356
4357 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4358 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4359
4360 /* other setups */
4361 cpu_x86_set_cpl(env, 0);
4362 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4363 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4365
4366 helper_clgi();
4367 /* FIXME: Resets the current ASID register to zero (host ASID). */
4368
4369 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4370
4371 /* Clears the TSC_OFFSET inside the processor. */
4372
4373 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4374 from the page table indicated the host's CR3. If the PDPEs contain
4375 illegal state, the processor causes a shutdown. */
4376
4377 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4378 env->cr[0] |= CR0_PE_MASK;
4379 env->eflags &= ~VM_MASK;
4380
4381 /* Disables all breakpoints in the host DR7 register. */
4382
4383 /* Checks the reloaded host state for consistency. */
4384
4385 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4386 host's code segment or non-canonical (in the case of long mode), a
4387 #GP fault is delivered inside the host.) */
4388
4389 /* remove any pending exception */
4390 env->exception_index = -1;
4391 env->error_code = 0;
4392 env->old_exception = -1;
4393
4394 regs_to_env();
4395 cpu_loop_exit();
4396}
4397
4398#endif
This page took 0.743782 seconds and 4 git commands to generate.