]> Git Repo - qemu.git/blame - target-i386/op_helper.c
Disable bluetooth proxy compilation on win32.
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
22#include "host-utils.h"
23
24//#define DEBUG_PCALL
25
26#if 0
27#define raise_exception_err(a, b)\
28do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
96
97/* broken thread support */
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
101void helper_lock(void)
102{
103 spin_lock(&global_cpu_lock);
104}
105
106void helper_unlock(void)
107{
108 spin_unlock(&global_cpu_lock);
109}
110
111void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112{
113 load_eflags(t0, update_mask);
114}
115
116target_ulong helper_read_eflags(void)
117{
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
123}
124
125/* return non zero if error */
126static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
128{
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
132
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
144}
145
146static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147{
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
153}
154
155static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156{
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158}
159
160static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161{
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
165}
166
167/* init the segment cache in vm86 mode. */
168static inline void load_seg_vm(int seg, int selector)
169{
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
173}
174
175static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
177{
178 int type, index, shift;
179
180#if 0
181 {
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
187 }
188 printf("\n");
189 }
190#endif
191
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207 }
208}
209
210/* XXX: merge with load_seg() */
211static void tss_load_seg(int seg_reg, int selector)
212{
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
215
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 }
247 }
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258}
259
260#define SWITCH_TSS_JMP 0
261#define SWITCH_TSS_IRET 1
262#define SWITCH_TSS_CALL 2
263
264/* XXX: restore CPU state in registers (PowerPC case) */
265static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
268{
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
277
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279#ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282#endif
283
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298 }
299
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
317
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
343 }
344
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
349
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
354
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
363 }
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
367
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397 }
398
399 /* now if an exception occurs, it will occurs in the next task
400 context */
401
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
405 }
406
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 }
459
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
483
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
492 }
493
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
498 }
499}
500
501/* check if Port I/O is allowed in TSS */
502static inline void check_io(int addr, int size)
503{
504 int io_offset, val, mask;
505
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526void helper_check_iob(uint32_t t0)
527{
528 check_io(t0, 1);
529}
530
531void helper_check_iow(uint32_t t0)
532{
533 check_io(t0, 2);
534}
535
536void helper_check_iol(uint32_t t0)
537{
538 check_io(t0, 4);
539}
540
541void helper_outb(uint32_t port, uint32_t data)
542{
543 cpu_outb(env, port, data & 0xff);
544}
545
546target_ulong helper_inb(uint32_t port)
547{
548 return cpu_inb(env, port);
549}
550
551void helper_outw(uint32_t port, uint32_t data)
552{
553 cpu_outw(env, port, data & 0xffff);
554}
555
556target_ulong helper_inw(uint32_t port)
557{
558 return cpu_inw(env, port);
559}
560
561void helper_outl(uint32_t port, uint32_t data)
562{
563 cpu_outl(env, port, data);
564}
565
566target_ulong helper_inl(uint32_t port)
567{
568 return cpu_inl(env, port);
569}
570
571static inline unsigned int get_sp_mask(unsigned int e2)
572{
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
577}
578
579#ifdef TARGET_X86_64
580#define SET_ESP(val, sp_mask)\
581do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588} while (0)
589#else
590#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591#endif
592
c0a04f0e
AL
593/* in 64-bit machines, this can overflow. So this segment addition macro
594 * can be used to trim the value to 32-bit whenever needed */
595#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
596
eaa728ee
FB
597/* XXX: add a is_user flag to have proper security support */
598#define PUSHW(ssp, sp, sp_mask, val)\
599{\
600 sp -= 2;\
601 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
602}
603
604#define PUSHL(ssp, sp, sp_mask, val)\
605{\
606 sp -= 4;\
c0a04f0e 607 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
608}
609
610#define POPW(ssp, sp, sp_mask, val)\
611{\
612 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613 sp += 2;\
614}
615
616#define POPL(ssp, sp, sp_mask, val)\
617{\
c0a04f0e 618 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
619 sp += 4;\
620}
621
622/* protected mode interrupt */
623static void do_interrupt_protected(int intno, int is_int, int error_code,
624 unsigned int next_eip, int is_hw)
625{
626 SegmentCache *dt;
627 target_ulong ptr, ssp;
628 int type, dpl, selector, ss_dpl, cpl;
629 int has_error_code, new_stack, shift;
630 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631 uint32_t old_eip, sp_mask;
eaa728ee 632
eaa728ee
FB
633 has_error_code = 0;
634 if (!is_int && !is_hw) {
635 switch(intno) {
636 case 8:
637 case 10:
638 case 11:
639 case 12:
640 case 13:
641 case 14:
642 case 17:
643 has_error_code = 1;
644 break;
645 }
646 }
647 if (is_int)
648 old_eip = next_eip;
649 else
650 old_eip = env->eip;
651
652 dt = &env->idt;
653 if (intno * 8 + 7 > dt->limit)
654 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655 ptr = dt->base + intno * 8;
656 e1 = ldl_kernel(ptr);
657 e2 = ldl_kernel(ptr + 4);
658 /* check gate type */
659 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660 switch(type) {
661 case 5: /* task gate */
662 /* must do that check here to return the correct error code */
663 if (!(e2 & DESC_P_MASK))
664 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666 if (has_error_code) {
667 int type;
668 uint32_t mask;
669 /* push the error code */
670 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671 shift = type >> 3;
672 if (env->segs[R_SS].flags & DESC_B_MASK)
673 mask = 0xffffffff;
674 else
675 mask = 0xffff;
676 esp = (ESP - (2 << shift)) & mask;
677 ssp = env->segs[R_SS].base + esp;
678 if (shift)
679 stl_kernel(ssp, error_code);
680 else
681 stw_kernel(ssp, error_code);
682 SET_ESP(esp, mask);
683 }
684 return;
685 case 6: /* 286 interrupt gate */
686 case 7: /* 286 trap gate */
687 case 14: /* 386 interrupt gate */
688 case 15: /* 386 trap gate */
689 break;
690 default:
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 break;
693 }
694 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695 cpl = env->hflags & HF_CPL_MASK;
1235fc06 696 /* check privilege if software int */
eaa728ee
FB
697 if (is_int && dpl < cpl)
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 /* check valid bit */
700 if (!(e2 & DESC_P_MASK))
701 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702 selector = e1 >> 16;
703 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704 if ((selector & 0xfffc) == 0)
705 raise_exception_err(EXCP0D_GPF, 0);
706
707 if (load_segment(&e1, &e2, selector) != 0)
708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712 if (dpl > cpl)
713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714 if (!(e2 & DESC_P_MASK))
715 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717 /* to inner privilege */
718 get_ss_esp_from_tss(&ss, &esp, dpl);
719 if ((ss & 0xfffc) == 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if ((ss & 3) != dpl)
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726 if (ss_dpl != dpl)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_S_MASK) ||
729 (ss_e2 & DESC_CS_MASK) ||
730 !(ss_e2 & DESC_W_MASK))
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 if (!(ss_e2 & DESC_P_MASK))
733 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734 new_stack = 1;
735 sp_mask = get_sp_mask(ss_e2);
736 ssp = get_seg_base(ss_e1, ss_e2);
737 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738 /* to same privilege */
739 if (env->eflags & VM_MASK)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0;
742 sp_mask = get_sp_mask(env->segs[R_SS].flags);
743 ssp = env->segs[R_SS].base;
744 esp = ESP;
745 dpl = cpl;
746 } else {
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748 new_stack = 0; /* avoid warning */
749 sp_mask = 0; /* avoid warning */
750 ssp = 0; /* avoid warning */
751 esp = 0; /* avoid warning */
752 }
753
754 shift = type >> 3;
755
756#if 0
757 /* XXX: check that enough room is available */
758 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759 if (env->eflags & VM_MASK)
760 push_size += 8;
761 push_size <<= shift;
762#endif
763 if (shift == 1) {
764 if (new_stack) {
765 if (env->eflags & VM_MASK) {
766 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
770 }
771 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772 PUSHL(ssp, esp, sp_mask, ESP);
773 }
774 PUSHL(ssp, esp, sp_mask, compute_eflags());
775 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776 PUSHL(ssp, esp, sp_mask, old_eip);
777 if (has_error_code) {
778 PUSHL(ssp, esp, sp_mask, error_code);
779 }
780 } else {
781 if (new_stack) {
782 if (env->eflags & VM_MASK) {
783 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
787 }
788 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789 PUSHW(ssp, esp, sp_mask, ESP);
790 }
791 PUSHW(ssp, esp, sp_mask, compute_eflags());
792 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793 PUSHW(ssp, esp, sp_mask, old_eip);
794 if (has_error_code) {
795 PUSHW(ssp, esp, sp_mask, error_code);
796 }
797 }
798
799 if (new_stack) {
800 if (env->eflags & VM_MASK) {
801 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
805 }
806 ss = (ss & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_SS, ss,
808 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
809 }
810 SET_ESP(esp, sp_mask);
811
812 selector = (selector & ~3) | dpl;
813 cpu_x86_load_seg_cache(env, R_CS, selector,
814 get_seg_base(e1, e2),
815 get_seg_limit(e1, e2),
816 e2);
817 cpu_x86_set_cpl(env, dpl);
818 env->eip = offset;
819
820 /* interrupt gate clear IF mask */
821 if ((type & 1) == 0) {
822 env->eflags &= ~IF_MASK;
823 }
824 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825}
826
827#ifdef TARGET_X86_64
828
829#define PUSHQ(sp, val)\
830{\
831 sp -= 8;\
832 stq_kernel(sp, (val));\
833}
834
835#define POPQ(sp, val)\
836{\
837 val = ldq_kernel(sp);\
838 sp += 8;\
839}
840
841static inline target_ulong get_rsp_from_tss(int level)
842{
843 int index;
844
845#if 0
846 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847 env->tr.base, env->tr.limit);
848#endif
849
850 if (!(env->tr.flags & DESC_P_MASK))
851 cpu_abort(env, "invalid tss");
852 index = 8 * level + 4;
853 if ((index + 7) > env->tr.limit)
854 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855 return ldq_kernel(env->tr.base + index);
856}
857
858/* 64 bit interrupt */
859static void do_interrupt64(int intno, int is_int, int error_code,
860 target_ulong next_eip, int is_hw)
861{
862 SegmentCache *dt;
863 target_ulong ptr;
864 int type, dpl, selector, cpl, ist;
865 int has_error_code, new_stack;
866 uint32_t e1, e2, e3, ss;
867 target_ulong old_eip, esp, offset;
eaa728ee 868
eaa728ee
FB
869 has_error_code = 0;
870 if (!is_int && !is_hw) {
871 switch(intno) {
872 case 8:
873 case 10:
874 case 11:
875 case 12:
876 case 13:
877 case 14:
878 case 17:
879 has_error_code = 1;
880 break;
881 }
882 }
883 if (is_int)
884 old_eip = next_eip;
885 else
886 old_eip = env->eip;
887
888 dt = &env->idt;
889 if (intno * 16 + 15 > dt->limit)
890 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891 ptr = dt->base + intno * 16;
892 e1 = ldl_kernel(ptr);
893 e2 = ldl_kernel(ptr + 4);
894 e3 = ldl_kernel(ptr + 8);
895 /* check gate type */
896 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897 switch(type) {
898 case 14: /* 386 interrupt gate */
899 case 15: /* 386 trap gate */
900 break;
901 default:
902 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903 break;
904 }
905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906 cpl = env->hflags & HF_CPL_MASK;
1235fc06 907 /* check privilege if software int */
eaa728ee
FB
908 if (is_int && dpl < cpl)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 /* check valid bit */
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913 selector = e1 >> 16;
914 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915 ist = e2 & 7;
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
918
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931 /* to inner privilege */
932 if (ist != 0)
933 esp = get_rsp_from_tss(ist + 3);
934 else
935 esp = get_rsp_from_tss(dpl);
936 esp &= ~0xfLL; /* align stack */
937 ss = 0;
938 new_stack = 1;
939 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940 /* to same privilege */
941 if (env->eflags & VM_MASK)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 new_stack = 0;
944 if (ist != 0)
945 esp = get_rsp_from_tss(ist + 3);
946 else
947 esp = ESP;
948 esp &= ~0xfLL; /* align stack */
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 esp = 0; /* avoid warning */
954 }
955
956 PUSHQ(esp, env->segs[R_SS].selector);
957 PUSHQ(esp, ESP);
958 PUSHQ(esp, compute_eflags());
959 PUSHQ(esp, env->segs[R_CS].selector);
960 PUSHQ(esp, old_eip);
961 if (has_error_code) {
962 PUSHQ(esp, error_code);
963 }
964
965 if (new_stack) {
966 ss = 0 | dpl;
967 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
968 }
969 ESP = esp;
970
971 selector = (selector & ~3) | dpl;
972 cpu_x86_load_seg_cache(env, R_CS, selector,
973 get_seg_base(e1, e2),
974 get_seg_limit(e1, e2),
975 e2);
976 cpu_x86_set_cpl(env, dpl);
977 env->eip = offset;
978
979 /* interrupt gate clear IF mask */
980 if ((type & 1) == 0) {
981 env->eflags &= ~IF_MASK;
982 }
983 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
984}
985#endif
986
987#if defined(CONFIG_USER_ONLY)
988void helper_syscall(int next_eip_addend)
989{
990 env->exception_index = EXCP_SYSCALL;
991 env->exception_next_eip = env->eip + next_eip_addend;
992 cpu_loop_exit();
993}
994#else
995void helper_syscall(int next_eip_addend)
996{
997 int selector;
998
999 if (!(env->efer & MSR_EFER_SCE)) {
1000 raise_exception_err(EXCP06_ILLOP, 0);
1001 }
1002 selector = (env->star >> 32) & 0xffff;
1003#ifdef TARGET_X86_64
1004 if (env->hflags & HF_LMA_MASK) {
1005 int code64;
1006
1007 ECX = env->eip + next_eip_addend;
1008 env->regs[11] = compute_eflags();
1009
1010 code64 = env->hflags & HF_CS64_MASK;
1011
1012 cpu_x86_set_cpl(env, 0);
1013 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014 0, 0xffffffff,
1015 DESC_G_MASK | DESC_P_MASK |
1016 DESC_S_MASK |
1017 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021 DESC_S_MASK |
1022 DESC_W_MASK | DESC_A_MASK);
1023 env->eflags &= ~env->fmask;
1024 load_eflags(env->eflags, 0);
1025 if (code64)
1026 env->eip = env->lstar;
1027 else
1028 env->eip = env->cstar;
1029 } else
1030#endif
1031 {
1032 ECX = (uint32_t)(env->eip + next_eip_addend);
1033
1034 cpu_x86_set_cpl(env, 0);
1035 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041 0, 0xffffffff,
1042 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043 DESC_S_MASK |
1044 DESC_W_MASK | DESC_A_MASK);
1045 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046 env->eip = (uint32_t)env->star;
1047 }
1048}
1049#endif
1050
1051void helper_sysret(int dflag)
1052{
1053 int cpl, selector;
1054
1055 if (!(env->efer & MSR_EFER_SCE)) {
1056 raise_exception_err(EXCP06_ILLOP, 0);
1057 }
1058 cpl = env->hflags & HF_CPL_MASK;
1059 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 }
1062 selector = (env->star >> 48) & 0xffff;
1063#ifdef TARGET_X86_64
1064 if (env->hflags & HF_LMA_MASK) {
1065 if (dflag == 2) {
1066 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071 DESC_L_MASK);
1072 env->eip = ECX;
1073 } else {
1074 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079 env->eip = (uint32_t)ECX;
1080 }
1081 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
1086 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088 cpu_x86_set_cpl(env, 3);
1089 } else
1090#endif
1091 {
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_W_MASK | DESC_A_MASK);
1103 env->eflags |= IF_MASK;
1104 cpu_x86_set_cpl(env, 3);
1105 }
1106#ifdef USE_KQEMU
1107 if (kqemu_is_ok(env)) {
1108 if (env->hflags & HF_LMA_MASK)
1109 CC_OP = CC_OP_EFLAGS;
1110 env->exception_index = -1;
1111 cpu_loop_exit();
1112 }
1113#endif
1114}
1115
1116/* real mode interrupt */
1117static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1119{
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
eaa728ee 1125
eaa728ee
FB
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1144
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151}
1152
1153/* fake user mode interrupt */
1154void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1156{
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1161
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1167 }
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1170
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1173 /* check privilege if software int */
eaa728ee
FB
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1182}
1183
1184/*
1185 * Begin execution of an interruption. is_int is TRUE if coming from
1186 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187 * instruction. It is only relevant if is_int is TRUE.
1188 */
1189void do_interrupt(int intno, int is_int, int error_code,
1190 target_ulong next_eip, int is_hw)
1191{
1192 if (loglevel & CPU_LOG_INT) {
1193 if ((env->cr[0] & CR0_PE_MASK)) {
1194 static int count;
1195 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196 count, intno, error_code, is_int,
1197 env->hflags & HF_CPL_MASK,
1198 env->segs[R_CS].selector, EIP,
1199 (int)env->segs[R_CS].base + EIP,
1200 env->segs[R_SS].selector, ESP);
1201 if (intno == 0x0e) {
1202 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203 } else {
1204 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1205 }
1206 fprintf(logfile, "\n");
1207 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208#if 0
1209 {
1210 int i;
1211 uint8_t *ptr;
1212 fprintf(logfile, " code=");
1213 ptr = env->segs[R_CS].base + env->eip;
1214 for(i = 0; i < 16; i++) {
1215 fprintf(logfile, " %02x", ldub(ptr + i));
1216 }
1217 fprintf(logfile, "\n");
1218 }
1219#endif
1220 count++;
1221 }
1222 }
1223 if (env->cr[0] & CR0_PE_MASK) {
eb38c52c 1224#ifdef TARGET_X86_64
eaa728ee
FB
1225 if (env->hflags & HF_LMA_MASK) {
1226 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227 } else
1228#endif
1229 {
1230 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1231 }
1232 } else {
1233 do_interrupt_real(intno, is_int, error_code, next_eip);
1234 }
1235}
1236
1237/*
1238 * Check nested exceptions and change to double or triple fault if
1239 * needed. It should only be called, if this is not an interrupt.
1240 * Returns the new exception number.
1241 */
1242static int check_exception(int intno, int *error_code)
1243{
1244 int first_contributory = env->old_exception == 0 ||
1245 (env->old_exception >= 10 &&
1246 env->old_exception <= 13);
1247 int second_contributory = intno == 0 ||
1248 (intno >= 10 && intno <= 13);
1249
1250 if (loglevel & CPU_LOG_INT)
1251 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252 env->old_exception, intno);
1253
1254 if (env->old_exception == EXCP08_DBLE)
1255 cpu_abort(env, "triple fault");
1256
1257 if ((first_contributory && second_contributory)
1258 || (env->old_exception == EXCP0E_PAGE &&
1259 (second_contributory || (intno == EXCP0E_PAGE)))) {
1260 intno = EXCP08_DBLE;
1261 *error_code = 0;
1262 }
1263
1264 if (second_contributory || (intno == EXCP0E_PAGE) ||
1265 (intno == EXCP08_DBLE))
1266 env->old_exception = intno;
1267
1268 return intno;
1269}
1270
1271/*
1272 * Signal an interruption. It is executed in the main CPU loop.
1273 * is_int is TRUE if coming from the int instruction. next_eip is the
1274 * EIP value AFTER the interrupt instruction. It is only relevant if
1275 * is_int is TRUE.
1276 */
1277void raise_interrupt(int intno, int is_int, int error_code,
1278 int next_eip_addend)
1279{
1280 if (!is_int) {
1281 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282 intno = check_exception(intno, &error_code);
872929aa
FB
1283 } else {
1284 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1285 }
1286
1287 env->exception_index = intno;
1288 env->error_code = error_code;
1289 env->exception_is_int = is_int;
1290 env->exception_next_eip = env->eip + next_eip_addend;
1291 cpu_loop_exit();
1292}
1293
eaa728ee
FB
1294/* shortcuts to generate exceptions */
1295
1296void (raise_exception_err)(int exception_index, int error_code)
1297{
1298 raise_interrupt(exception_index, 0, error_code, 0);
1299}
1300
1301void raise_exception(int exception_index)
1302{
1303 raise_interrupt(exception_index, 0, 0, 0);
1304}
1305
1306/* SMM support */
1307
1308#if defined(CONFIG_USER_ONLY)
1309
1310void do_smm_enter(void)
1311{
1312}
1313
1314void helper_rsm(void)
1315{
1316}
1317
1318#else
1319
1320#ifdef TARGET_X86_64
1321#define SMM_REVISION_ID 0x00020064
1322#else
1323#define SMM_REVISION_ID 0x00020000
1324#endif
1325
1326void do_smm_enter(void)
1327{
1328 target_ulong sm_state;
1329 SegmentCache *dt;
1330 int i, offset;
1331
1332 if (loglevel & CPU_LOG_INT) {
1333 fprintf(logfile, "SMM: enter\n");
1334 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1335 }
1336
1337 env->hflags |= HF_SMM_MASK;
1338 cpu_smm_update(env);
1339
1340 sm_state = env->smbase + 0x8000;
1341
1342#ifdef TARGET_X86_64
1343 for(i = 0; i < 6; i++) {
1344 dt = &env->segs[i];
1345 offset = 0x7e00 + i * 16;
1346 stw_phys(sm_state + offset, dt->selector);
1347 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348 stl_phys(sm_state + offset + 4, dt->limit);
1349 stq_phys(sm_state + offset + 8, dt->base);
1350 }
1351
1352 stq_phys(sm_state + 0x7e68, env->gdt.base);
1353 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1354
1355 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356 stq_phys(sm_state + 0x7e78, env->ldt.base);
1357 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1359
1360 stq_phys(sm_state + 0x7e88, env->idt.base);
1361 stl_phys(sm_state + 0x7e84, env->idt.limit);
1362
1363 stw_phys(sm_state + 0x7e90, env->tr.selector);
1364 stq_phys(sm_state + 0x7e98, env->tr.base);
1365 stl_phys(sm_state + 0x7e94, env->tr.limit);
1366 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1367
1368 stq_phys(sm_state + 0x7ed0, env->efer);
1369
1370 stq_phys(sm_state + 0x7ff8, EAX);
1371 stq_phys(sm_state + 0x7ff0, ECX);
1372 stq_phys(sm_state + 0x7fe8, EDX);
1373 stq_phys(sm_state + 0x7fe0, EBX);
1374 stq_phys(sm_state + 0x7fd8, ESP);
1375 stq_phys(sm_state + 0x7fd0, EBP);
1376 stq_phys(sm_state + 0x7fc8, ESI);
1377 stq_phys(sm_state + 0x7fc0, EDI);
1378 for(i = 8; i < 16; i++)
1379 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380 stq_phys(sm_state + 0x7f78, env->eip);
1381 stl_phys(sm_state + 0x7f70, compute_eflags());
1382 stl_phys(sm_state + 0x7f68, env->dr[6]);
1383 stl_phys(sm_state + 0x7f60, env->dr[7]);
1384
1385 stl_phys(sm_state + 0x7f48, env->cr[4]);
1386 stl_phys(sm_state + 0x7f50, env->cr[3]);
1387 stl_phys(sm_state + 0x7f58, env->cr[0]);
1388
1389 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390 stl_phys(sm_state + 0x7f00, env->smbase);
1391#else
1392 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394 stl_phys(sm_state + 0x7ff4, compute_eflags());
1395 stl_phys(sm_state + 0x7ff0, env->eip);
1396 stl_phys(sm_state + 0x7fec, EDI);
1397 stl_phys(sm_state + 0x7fe8, ESI);
1398 stl_phys(sm_state + 0x7fe4, EBP);
1399 stl_phys(sm_state + 0x7fe0, ESP);
1400 stl_phys(sm_state + 0x7fdc, EBX);
1401 stl_phys(sm_state + 0x7fd8, EDX);
1402 stl_phys(sm_state + 0x7fd4, ECX);
1403 stl_phys(sm_state + 0x7fd0, EAX);
1404 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1406
1407 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408 stl_phys(sm_state + 0x7f64, env->tr.base);
1409 stl_phys(sm_state + 0x7f60, env->tr.limit);
1410 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1411
1412 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413 stl_phys(sm_state + 0x7f80, env->ldt.base);
1414 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1416
1417 stl_phys(sm_state + 0x7f74, env->gdt.base);
1418 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1419
1420 stl_phys(sm_state + 0x7f58, env->idt.base);
1421 stl_phys(sm_state + 0x7f54, env->idt.limit);
1422
1423 for(i = 0; i < 6; i++) {
1424 dt = &env->segs[i];
1425 if (i < 3)
1426 offset = 0x7f84 + i * 12;
1427 else
1428 offset = 0x7f2c + (i - 3) * 12;
1429 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430 stl_phys(sm_state + offset + 8, dt->base);
1431 stl_phys(sm_state + offset + 4, dt->limit);
1432 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1433 }
1434 stl_phys(sm_state + 0x7f14, env->cr[4]);
1435
1436 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437 stl_phys(sm_state + 0x7ef8, env->smbase);
1438#endif
1439 /* init SMM cpu state */
1440
1441#ifdef TARGET_X86_64
5efc27bb 1442 cpu_load_efer(env, 0);
eaa728ee
FB
1443#endif
1444 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445 env->eip = 0x00008000;
1446 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1453
1454 cpu_x86_update_cr0(env,
1455 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456 cpu_x86_update_cr4(env, 0);
1457 env->dr[7] = 0x00000400;
1458 CC_OP = CC_OP_EFLAGS;
1459}
1460
1461void helper_rsm(void)
1462{
1463 target_ulong sm_state;
1464 int i, offset;
1465 uint32_t val;
1466
1467 sm_state = env->smbase + 0x8000;
1468#ifdef TARGET_X86_64
5efc27bb 1469 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1470
1471 for(i = 0; i < 6; i++) {
1472 offset = 0x7e00 + i * 16;
1473 cpu_x86_load_seg_cache(env, i,
1474 lduw_phys(sm_state + offset),
1475 ldq_phys(sm_state + offset + 8),
1476 ldl_phys(sm_state + offset + 4),
1477 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1478 }
1479
1480 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1482
1483 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1487
1488 env->idt.base = ldq_phys(sm_state + 0x7e88);
1489 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1490
1491 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492 env->tr.base = ldq_phys(sm_state + 0x7e98);
1493 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1495
1496 EAX = ldq_phys(sm_state + 0x7ff8);
1497 ECX = ldq_phys(sm_state + 0x7ff0);
1498 EDX = ldq_phys(sm_state + 0x7fe8);
1499 EBX = ldq_phys(sm_state + 0x7fe0);
1500 ESP = ldq_phys(sm_state + 0x7fd8);
1501 EBP = ldq_phys(sm_state + 0x7fd0);
1502 ESI = ldq_phys(sm_state + 0x7fc8);
1503 EDI = ldq_phys(sm_state + 0x7fc0);
1504 for(i = 8; i < 16; i++)
1505 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506 env->eip = ldq_phys(sm_state + 0x7f78);
1507 load_eflags(ldl_phys(sm_state + 0x7f70),
1508 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1511
1512 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1515
1516 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517 if (val & 0x20000) {
1518 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1519 }
1520#else
1521 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523 load_eflags(ldl_phys(sm_state + 0x7ff4),
1524 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525 env->eip = ldl_phys(sm_state + 0x7ff0);
1526 EDI = ldl_phys(sm_state + 0x7fec);
1527 ESI = ldl_phys(sm_state + 0x7fe8);
1528 EBP = ldl_phys(sm_state + 0x7fe4);
1529 ESP = ldl_phys(sm_state + 0x7fe0);
1530 EBX = ldl_phys(sm_state + 0x7fdc);
1531 EDX = ldl_phys(sm_state + 0x7fd8);
1532 ECX = ldl_phys(sm_state + 0x7fd4);
1533 EAX = ldl_phys(sm_state + 0x7fd0);
1534 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1536
1537 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538 env->tr.base = ldl_phys(sm_state + 0x7f64);
1539 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1541
1542 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1546
1547 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1549
1550 env->idt.base = ldl_phys(sm_state + 0x7f58);
1551 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1552
1553 for(i = 0; i < 6; i++) {
1554 if (i < 3)
1555 offset = 0x7f84 + i * 12;
1556 else
1557 offset = 0x7f2c + (i - 3) * 12;
1558 cpu_x86_load_seg_cache(env, i,
1559 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560 ldl_phys(sm_state + offset + 8),
1561 ldl_phys(sm_state + offset + 4),
1562 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1563 }
1564 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1565
1566 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567 if (val & 0x20000) {
1568 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1569 }
1570#endif
1571 CC_OP = CC_OP_EFLAGS;
1572 env->hflags &= ~HF_SMM_MASK;
1573 cpu_smm_update(env);
1574
1575 if (loglevel & CPU_LOG_INT) {
1576 fprintf(logfile, "SMM: after RSM\n");
1577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578 }
1579}
1580
1581#endif /* !CONFIG_USER_ONLY */
1582
1583
1584/* division, flags are undefined */
1585
1586void helper_divb_AL(target_ulong t0)
1587{
1588 unsigned int num, den, q, r;
1589
1590 num = (EAX & 0xffff);
1591 den = (t0 & 0xff);
1592 if (den == 0) {
1593 raise_exception(EXCP00_DIVZ);
1594 }
1595 q = (num / den);
1596 if (q > 0xff)
1597 raise_exception(EXCP00_DIVZ);
1598 q &= 0xff;
1599 r = (num % den) & 0xff;
1600 EAX = (EAX & ~0xffff) | (r << 8) | q;
1601}
1602
1603void helper_idivb_AL(target_ulong t0)
1604{
1605 int num, den, q, r;
1606
1607 num = (int16_t)EAX;
1608 den = (int8_t)t0;
1609 if (den == 0) {
1610 raise_exception(EXCP00_DIVZ);
1611 }
1612 q = (num / den);
1613 if (q != (int8_t)q)
1614 raise_exception(EXCP00_DIVZ);
1615 q &= 0xff;
1616 r = (num % den) & 0xff;
1617 EAX = (EAX & ~0xffff) | (r << 8) | q;
1618}
1619
1620void helper_divw_AX(target_ulong t0)
1621{
1622 unsigned int num, den, q, r;
1623
1624 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625 den = (t0 & 0xffff);
1626 if (den == 0) {
1627 raise_exception(EXCP00_DIVZ);
1628 }
1629 q = (num / den);
1630 if (q > 0xffff)
1631 raise_exception(EXCP00_DIVZ);
1632 q &= 0xffff;
1633 r = (num % den) & 0xffff;
1634 EAX = (EAX & ~0xffff) | q;
1635 EDX = (EDX & ~0xffff) | r;
1636}
1637
1638void helper_idivw_AX(target_ulong t0)
1639{
1640 int num, den, q, r;
1641
1642 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643 den = (int16_t)t0;
1644 if (den == 0) {
1645 raise_exception(EXCP00_DIVZ);
1646 }
1647 q = (num / den);
1648 if (q != (int16_t)q)
1649 raise_exception(EXCP00_DIVZ);
1650 q &= 0xffff;
1651 r = (num % den) & 0xffff;
1652 EAX = (EAX & ~0xffff) | q;
1653 EDX = (EDX & ~0xffff) | r;
1654}
1655
1656void helper_divl_EAX(target_ulong t0)
1657{
1658 unsigned int den, r;
1659 uint64_t num, q;
1660
1661 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662 den = t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1665 }
1666 q = (num / den);
1667 r = (num % den);
1668 if (q > 0xffffffff)
1669 raise_exception(EXCP00_DIVZ);
1670 EAX = (uint32_t)q;
1671 EDX = (uint32_t)r;
1672}
1673
1674void helper_idivl_EAX(target_ulong t0)
1675{
1676 int den, r;
1677 int64_t num, q;
1678
1679 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680 den = t0;
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1683 }
1684 q = (num / den);
1685 r = (num % den);
1686 if (q != (int32_t)q)
1687 raise_exception(EXCP00_DIVZ);
1688 EAX = (uint32_t)q;
1689 EDX = (uint32_t)r;
1690}
1691
1692/* bcd */
1693
1694/* XXX: exception */
1695void helper_aam(int base)
1696{
1697 int al, ah;
1698 al = EAX & 0xff;
1699 ah = al / base;
1700 al = al % base;
1701 EAX = (EAX & ~0xffff) | al | (ah << 8);
1702 CC_DST = al;
1703}
1704
1705void helper_aad(int base)
1706{
1707 int al, ah;
1708 al = EAX & 0xff;
1709 ah = (EAX >> 8) & 0xff;
1710 al = ((ah * base) + al) & 0xff;
1711 EAX = (EAX & ~0xffff) | al;
1712 CC_DST = al;
1713}
1714
1715void helper_aaa(void)
1716{
1717 int icarry;
1718 int al, ah, af;
1719 int eflags;
1720
1721 eflags = cc_table[CC_OP].compute_all();
1722 af = eflags & CC_A;
1723 al = EAX & 0xff;
1724 ah = (EAX >> 8) & 0xff;
1725
1726 icarry = (al > 0xf9);
1727 if (((al & 0x0f) > 9 ) || af) {
1728 al = (al + 6) & 0x0f;
1729 ah = (ah + 1 + icarry) & 0xff;
1730 eflags |= CC_C | CC_A;
1731 } else {
1732 eflags &= ~(CC_C | CC_A);
1733 al &= 0x0f;
1734 }
1735 EAX = (EAX & ~0xffff) | al | (ah << 8);
1736 CC_SRC = eflags;
1737 FORCE_RET();
1738}
1739
1740void helper_aas(void)
1741{
1742 int icarry;
1743 int al, ah, af;
1744 int eflags;
1745
1746 eflags = cc_table[CC_OP].compute_all();
1747 af = eflags & CC_A;
1748 al = EAX & 0xff;
1749 ah = (EAX >> 8) & 0xff;
1750
1751 icarry = (al < 6);
1752 if (((al & 0x0f) > 9 ) || af) {
1753 al = (al - 6) & 0x0f;
1754 ah = (ah - 1 - icarry) & 0xff;
1755 eflags |= CC_C | CC_A;
1756 } else {
1757 eflags &= ~(CC_C | CC_A);
1758 al &= 0x0f;
1759 }
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_SRC = eflags;
1762 FORCE_RET();
1763}
1764
1765void helper_daa(void)
1766{
1767 int al, af, cf;
1768 int eflags;
1769
1770 eflags = cc_table[CC_OP].compute_all();
1771 cf = eflags & CC_C;
1772 af = eflags & CC_A;
1773 al = EAX & 0xff;
1774
1775 eflags = 0;
1776 if (((al & 0x0f) > 9 ) || af) {
1777 al = (al + 6) & 0xff;
1778 eflags |= CC_A;
1779 }
1780 if ((al > 0x9f) || cf) {
1781 al = (al + 0x60) & 0xff;
1782 eflags |= CC_C;
1783 }
1784 EAX = (EAX & ~0xff) | al;
1785 /* well, speed is not an issue here, so we compute the flags by hand */
1786 eflags |= (al == 0) << 6; /* zf */
1787 eflags |= parity_table[al]; /* pf */
1788 eflags |= (al & 0x80); /* sf */
1789 CC_SRC = eflags;
1790 FORCE_RET();
1791}
1792
1793void helper_das(void)
1794{
1795 int al, al1, af, cf;
1796 int eflags;
1797
1798 eflags = cc_table[CC_OP].compute_all();
1799 cf = eflags & CC_C;
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1802
1803 eflags = 0;
1804 al1 = al;
1805 if (((al & 0x0f) > 9 ) || af) {
1806 eflags |= CC_A;
1807 if (al < 6 || cf)
1808 eflags |= CC_C;
1809 al = (al - 6) & 0xff;
1810 }
1811 if ((al1 > 0x99) || cf) {
1812 al = (al - 0x60) & 0xff;
1813 eflags |= CC_C;
1814 }
1815 EAX = (EAX & ~0xff) | al;
1816 /* well, speed is not an issue here, so we compute the flags by hand */
1817 eflags |= (al == 0) << 6; /* zf */
1818 eflags |= parity_table[al]; /* pf */
1819 eflags |= (al & 0x80); /* sf */
1820 CC_SRC = eflags;
1821 FORCE_RET();
1822}
1823
1824void helper_into(int next_eip_addend)
1825{
1826 int eflags;
1827 eflags = cc_table[CC_OP].compute_all();
1828 if (eflags & CC_O) {
1829 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1830 }
1831}
1832
1833void helper_cmpxchg8b(target_ulong a0)
1834{
1835 uint64_t d;
1836 int eflags;
1837
1838 eflags = cc_table[CC_OP].compute_all();
1839 d = ldq(a0);
1840 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842 eflags |= CC_Z;
1843 } else {
278ed7c3
FB
1844 /* always do the store */
1845 stq(a0, d);
eaa728ee
FB
1846 EDX = (uint32_t)(d >> 32);
1847 EAX = (uint32_t)d;
1848 eflags &= ~CC_Z;
1849 }
1850 CC_SRC = eflags;
1851}
1852
1853#ifdef TARGET_X86_64
1854void helper_cmpxchg16b(target_ulong a0)
1855{
1856 uint64_t d0, d1;
1857 int eflags;
1858
278ed7c3
FB
1859 if ((a0 & 0xf) != 0)
1860 raise_exception(EXCP0D_GPF);
eaa728ee
FB
1861 eflags = cc_table[CC_OP].compute_all();
1862 d0 = ldq(a0);
1863 d1 = ldq(a0 + 8);
1864 if (d0 == EAX && d1 == EDX) {
1865 stq(a0, EBX);
1866 stq(a0 + 8, ECX);
1867 eflags |= CC_Z;
1868 } else {
278ed7c3
FB
1869 /* always do the store */
1870 stq(a0, d0);
1871 stq(a0 + 8, d1);
eaa728ee
FB
1872 EDX = d1;
1873 EAX = d0;
1874 eflags &= ~CC_Z;
1875 }
1876 CC_SRC = eflags;
1877}
1878#endif
1879
1880void helper_single_step(void)
1881{
1882 env->dr[6] |= 0x4000;
1883 raise_exception(EXCP01_SSTP);
1884}
1885
1886void helper_cpuid(void)
1887{
1888 uint32_t index;
eaa728ee 1889
872929aa
FB
1890 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1891
1892 index = (uint32_t)EAX;
eaa728ee
FB
1893 /* test if maximum index reached */
1894 if (index & 0x80000000) {
1895 if (index > env->cpuid_xlevel)
1896 index = env->cpuid_level;
1897 } else {
1898 if (index > env->cpuid_level)
1899 index = env->cpuid_level;
1900 }
1901
1902 switch(index) {
1903 case 0:
1904 EAX = env->cpuid_level;
1905 EBX = env->cpuid_vendor1;
1906 EDX = env->cpuid_vendor2;
1907 ECX = env->cpuid_vendor3;
1908 break;
1909 case 1:
1910 EAX = env->cpuid_version;
1911 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912 ECX = env->cpuid_ext_features;
1913 EDX = env->cpuid_features;
1914 break;
1915 case 2:
1916 /* cache info: needed for Pentium Pro compatibility */
1917 EAX = 1;
1918 EBX = 0;
1919 ECX = 0;
1920 EDX = 0x2c307d;
1921 break;
e737b32a
AZ
1922 case 4:
1923 /* cache info: needed for Core compatibility */
1924 switch (ECX) {
1925 case 0: /* L1 dcache info */
1926 EAX = 0x0000121;
1927 EBX = 0x1c0003f;
1928 ECX = 0x000003f;
1929 EDX = 0x0000001;
1930 break;
1931 case 1: /* L1 icache info */
1932 EAX = 0x0000122;
1933 EBX = 0x1c0003f;
1934 ECX = 0x000003f;
1935 EDX = 0x0000001;
1936 break;
1937 case 2: /* L2 cache info */
1938 EAX = 0x0000143;
1939 EBX = 0x3c0003f;
1940 ECX = 0x0000fff;
1941 EDX = 0x0000001;
1942 break;
1943 default: /* end of info */
1944 EAX = 0;
1945 EBX = 0;
1946 ECX = 0;
1947 EDX = 0;
1948 break;
1949 }
1950
1951 break;
1952 case 5:
1953 /* mwait info: needed for Core compatibility */
1954 EAX = 0; /* Smallest monitor-line size in bytes */
1955 EBX = 0; /* Largest monitor-line size in bytes */
1956 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1957 EDX = 0;
1958 break;
558fa836
PB
1959 case 6:
1960 /* Thermal and Power Leaf */
1961 EAX = 0;
1962 EBX = 0;
1963 ECX = 0;
1964 EDX = 0;
1965 break;
1966 case 9:
1967 /* Direct Cache Access Information Leaf */
1968 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
1969 EBX = 0;
1970 ECX = 0;
1971 EDX = 0;
1972 break;
1973 case 0xA:
1974 /* Architectural Performance Monitoring Leaf */
1975 EAX = 0;
1976 EBX = 0;
1977 ECX = 0;
1978 EDX = 0;
1979 break;
eaa728ee
FB
1980 case 0x80000000:
1981 EAX = env->cpuid_xlevel;
1982 EBX = env->cpuid_vendor1;
1983 EDX = env->cpuid_vendor2;
1984 ECX = env->cpuid_vendor3;
1985 break;
1986 case 0x80000001:
1987 EAX = env->cpuid_features;
1988 EBX = 0;
1989 ECX = env->cpuid_ext3_features;
1990 EDX = env->cpuid_ext2_features;
1991 break;
1992 case 0x80000002:
1993 case 0x80000003:
1994 case 0x80000004:
1995 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1996 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1997 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1998 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1999 break;
2000 case 0x80000005:
2001 /* cache info (L1 cache) */
2002 EAX = 0x01ff01ff;
2003 EBX = 0x01ff01ff;
2004 ECX = 0x40020140;
2005 EDX = 0x40020140;
2006 break;
2007 case 0x80000006:
2008 /* cache info (L2 cache) */
2009 EAX = 0;
2010 EBX = 0x42004200;
2011 ECX = 0x02008140;
2012 EDX = 0;
2013 break;
2014 case 0x80000008:
2015 /* virtual & phys address size in low 2 bytes. */
2016/* XXX: This value must match the one used in the MMU code. */
da260249
FB
2017 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2018 /* 64 bit processor */
2019#if defined(USE_KQEMU)
2020 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2021#else
eaa728ee 2022/* XXX: The physical address space is limited to 42 bits in exec.c. */
da260249
FB
2023 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2024#endif
2025 } else {
2026#if defined(USE_KQEMU)
2027 EAX = 0x00000020; /* 32 bits physical */
eaa728ee 2028#else
da260249 2029 EAX = 0x00000024; /* 36 bits physical */
eaa728ee 2030#endif
da260249 2031 }
eaa728ee
FB
2032 EBX = 0;
2033 ECX = 0;
2034 EDX = 0;
2035 break;
2036 case 0x8000000A:
2037 EAX = 0x00000001;
2038 EBX = 0;
2039 ECX = 0;
2040 EDX = 0;
2041 break;
2042 default:
2043 /* reserved values: zero */
2044 EAX = 0;
2045 EBX = 0;
2046 ECX = 0;
2047 EDX = 0;
2048 break;
2049 }
2050}
2051
2052void helper_enter_level(int level, int data32, target_ulong t1)
2053{
2054 target_ulong ssp;
2055 uint32_t esp_mask, esp, ebp;
2056
2057 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2058 ssp = env->segs[R_SS].base;
2059 ebp = EBP;
2060 esp = ESP;
2061 if (data32) {
2062 /* 32 bit */
2063 esp -= 4;
2064 while (--level) {
2065 esp -= 4;
2066 ebp -= 4;
2067 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2068 }
2069 esp -= 4;
2070 stl(ssp + (esp & esp_mask), t1);
2071 } else {
2072 /* 16 bit */
2073 esp -= 2;
2074 while (--level) {
2075 esp -= 2;
2076 ebp -= 2;
2077 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2078 }
2079 esp -= 2;
2080 stw(ssp + (esp & esp_mask), t1);
2081 }
2082}
2083
2084#ifdef TARGET_X86_64
2085void helper_enter64_level(int level, int data64, target_ulong t1)
2086{
2087 target_ulong esp, ebp;
2088 ebp = EBP;
2089 esp = ESP;
2090
2091 if (data64) {
2092 /* 64 bit */
2093 esp -= 8;
2094 while (--level) {
2095 esp -= 8;
2096 ebp -= 8;
2097 stq(esp, ldq(ebp));
2098 }
2099 esp -= 8;
2100 stq(esp, t1);
2101 } else {
2102 /* 16 bit */
2103 esp -= 2;
2104 while (--level) {
2105 esp -= 2;
2106 ebp -= 2;
2107 stw(esp, lduw(ebp));
2108 }
2109 esp -= 2;
2110 stw(esp, t1);
2111 }
2112}
2113#endif
2114
2115void helper_lldt(int selector)
2116{
2117 SegmentCache *dt;
2118 uint32_t e1, e2;
2119 int index, entry_limit;
2120 target_ulong ptr;
2121
2122 selector &= 0xffff;
2123 if ((selector & 0xfffc) == 0) {
2124 /* XXX: NULL selector case: invalid LDT */
2125 env->ldt.base = 0;
2126 env->ldt.limit = 0;
2127 } else {
2128 if (selector & 0x4)
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 dt = &env->gdt;
2131 index = selector & ~7;
2132#ifdef TARGET_X86_64
2133 if (env->hflags & HF_LMA_MASK)
2134 entry_limit = 15;
2135 else
2136#endif
2137 entry_limit = 7;
2138 if ((index + entry_limit) > dt->limit)
2139 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140 ptr = dt->base + index;
2141 e1 = ldl_kernel(ptr);
2142 e2 = ldl_kernel(ptr + 4);
2143 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 if (!(e2 & DESC_P_MASK))
2146 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2147#ifdef TARGET_X86_64
2148 if (env->hflags & HF_LMA_MASK) {
2149 uint32_t e3;
2150 e3 = ldl_kernel(ptr + 8);
2151 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2152 env->ldt.base |= (target_ulong)e3 << 32;
2153 } else
2154#endif
2155 {
2156 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2157 }
2158 }
2159 env->ldt.selector = selector;
2160}
2161
2162void helper_ltr(int selector)
2163{
2164 SegmentCache *dt;
2165 uint32_t e1, e2;
2166 int index, type, entry_limit;
2167 target_ulong ptr;
2168
2169 selector &= 0xffff;
2170 if ((selector & 0xfffc) == 0) {
2171 /* NULL selector case: invalid TR */
2172 env->tr.base = 0;
2173 env->tr.limit = 0;
2174 env->tr.flags = 0;
2175 } else {
2176 if (selector & 0x4)
2177 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2178 dt = &env->gdt;
2179 index = selector & ~7;
2180#ifdef TARGET_X86_64
2181 if (env->hflags & HF_LMA_MASK)
2182 entry_limit = 15;
2183 else
2184#endif
2185 entry_limit = 7;
2186 if ((index + entry_limit) > dt->limit)
2187 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2188 ptr = dt->base + index;
2189 e1 = ldl_kernel(ptr);
2190 e2 = ldl_kernel(ptr + 4);
2191 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2192 if ((e2 & DESC_S_MASK) ||
2193 (type != 1 && type != 9))
2194 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2195 if (!(e2 & DESC_P_MASK))
2196 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2197#ifdef TARGET_X86_64
2198 if (env->hflags & HF_LMA_MASK) {
2199 uint32_t e3, e4;
2200 e3 = ldl_kernel(ptr + 8);
2201 e4 = ldl_kernel(ptr + 12);
2202 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2203 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2204 load_seg_cache_raw_dt(&env->tr, e1, e2);
2205 env->tr.base |= (target_ulong)e3 << 32;
2206 } else
2207#endif
2208 {
2209 load_seg_cache_raw_dt(&env->tr, e1, e2);
2210 }
2211 e2 |= DESC_TSS_BUSY_MASK;
2212 stl_kernel(ptr + 4, e2);
2213 }
2214 env->tr.selector = selector;
2215}
2216
2217/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2218void helper_load_seg(int seg_reg, int selector)
2219{
2220 uint32_t e1, e2;
2221 int cpl, dpl, rpl;
2222 SegmentCache *dt;
2223 int index;
2224 target_ulong ptr;
2225
2226 selector &= 0xffff;
2227 cpl = env->hflags & HF_CPL_MASK;
2228 if ((selector & 0xfffc) == 0) {
2229 /* null selector case */
2230 if (seg_reg == R_SS
2231#ifdef TARGET_X86_64
2232 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2233#endif
2234 )
2235 raise_exception_err(EXCP0D_GPF, 0);
2236 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2237 } else {
2238
2239 if (selector & 0x4)
2240 dt = &env->ldt;
2241 else
2242 dt = &env->gdt;
2243 index = selector & ~7;
2244 if ((index + 7) > dt->limit)
2245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2246 ptr = dt->base + index;
2247 e1 = ldl_kernel(ptr);
2248 e2 = ldl_kernel(ptr + 4);
2249
2250 if (!(e2 & DESC_S_MASK))
2251 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2252 rpl = selector & 3;
2253 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2254 if (seg_reg == R_SS) {
2255 /* must be writable segment */
2256 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2257 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2258 if (rpl != cpl || dpl != cpl)
2259 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2260 } else {
2261 /* must be readable segment */
2262 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2263 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2264
2265 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2266 /* if not conforming code, test rights */
2267 if (dpl < cpl || dpl < rpl)
2268 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2269 }
2270 }
2271
2272 if (!(e2 & DESC_P_MASK)) {
2273 if (seg_reg == R_SS)
2274 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2275 else
2276 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2277 }
2278
2279 /* set the access bit if not already set */
2280 if (!(e2 & DESC_A_MASK)) {
2281 e2 |= DESC_A_MASK;
2282 stl_kernel(ptr + 4, e2);
2283 }
2284
2285 cpu_x86_load_seg_cache(env, seg_reg, selector,
2286 get_seg_base(e1, e2),
2287 get_seg_limit(e1, e2),
2288 e2);
2289#if 0
2290 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2291 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2292#endif
2293 }
2294}
2295
2296/* protected mode jump */
2297void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2298 int next_eip_addend)
2299{
2300 int gate_cs, type;
2301 uint32_t e1, e2, cpl, dpl, rpl, limit;
2302 target_ulong next_eip;
2303
2304 if ((new_cs & 0xfffc) == 0)
2305 raise_exception_err(EXCP0D_GPF, 0);
2306 if (load_segment(&e1, &e2, new_cs) != 0)
2307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2308 cpl = env->hflags & HF_CPL_MASK;
2309 if (e2 & DESC_S_MASK) {
2310 if (!(e2 & DESC_CS_MASK))
2311 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2312 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2313 if (e2 & DESC_C_MASK) {
2314 /* conforming code segment */
2315 if (dpl > cpl)
2316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317 } else {
2318 /* non conforming code segment */
2319 rpl = new_cs & 3;
2320 if (rpl > cpl)
2321 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2322 if (dpl != cpl)
2323 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2324 }
2325 if (!(e2 & DESC_P_MASK))
2326 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2327 limit = get_seg_limit(e1, e2);
2328 if (new_eip > limit &&
2329 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2330 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2331 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2332 get_seg_base(e1, e2), limit, e2);
2333 EIP = new_eip;
2334 } else {
2335 /* jump to call or task gate */
2336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2337 rpl = new_cs & 3;
2338 cpl = env->hflags & HF_CPL_MASK;
2339 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2340 switch(type) {
2341 case 1: /* 286 TSS */
2342 case 9: /* 386 TSS */
2343 case 5: /* task gate */
2344 if (dpl < cpl || dpl < rpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 next_eip = env->eip + next_eip_addend;
2347 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2348 CC_OP = CC_OP_EFLAGS;
2349 break;
2350 case 4: /* 286 call gate */
2351 case 12: /* 386 call gate */
2352 if ((dpl < cpl) || (dpl < rpl))
2353 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2354 if (!(e2 & DESC_P_MASK))
2355 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2356 gate_cs = e1 >> 16;
2357 new_eip = (e1 & 0xffff);
2358 if (type == 12)
2359 new_eip |= (e2 & 0xffff0000);
2360 if (load_segment(&e1, &e2, gate_cs) != 0)
2361 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2362 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2363 /* must be code segment */
2364 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2365 (DESC_S_MASK | DESC_CS_MASK)))
2366 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2367 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2368 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2369 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2370 if (!(e2 & DESC_P_MASK))
2371 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2372 limit = get_seg_limit(e1, e2);
2373 if (new_eip > limit)
2374 raise_exception_err(EXCP0D_GPF, 0);
2375 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2376 get_seg_base(e1, e2), limit, e2);
2377 EIP = new_eip;
2378 break;
2379 default:
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 break;
2382 }
2383 }
2384}
2385
2386/* real mode call */
2387void helper_lcall_real(int new_cs, target_ulong new_eip1,
2388 int shift, int next_eip)
2389{
2390 int new_eip;
2391 uint32_t esp, esp_mask;
2392 target_ulong ssp;
2393
2394 new_eip = new_eip1;
2395 esp = ESP;
2396 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2397 ssp = env->segs[R_SS].base;
2398 if (shift) {
2399 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2400 PUSHL(ssp, esp, esp_mask, next_eip);
2401 } else {
2402 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2403 PUSHW(ssp, esp, esp_mask, next_eip);
2404 }
2405
2406 SET_ESP(esp, esp_mask);
2407 env->eip = new_eip;
2408 env->segs[R_CS].selector = new_cs;
2409 env->segs[R_CS].base = (new_cs << 4);
2410}
2411
2412/* protected mode call */
2413void helper_lcall_protected(int new_cs, target_ulong new_eip,
2414 int shift, int next_eip_addend)
2415{
2416 int new_stack, i;
2417 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2418 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2419 uint32_t val, limit, old_sp_mask;
2420 target_ulong ssp, old_ssp, next_eip;
2421
2422 next_eip = env->eip + next_eip_addend;
2423#ifdef DEBUG_PCALL
2424 if (loglevel & CPU_LOG_PCALL) {
2425 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2426 new_cs, (uint32_t)new_eip, shift);
2427 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2428 }
2429#endif
2430 if ((new_cs & 0xfffc) == 0)
2431 raise_exception_err(EXCP0D_GPF, 0);
2432 if (load_segment(&e1, &e2, new_cs) != 0)
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 cpl = env->hflags & HF_CPL_MASK;
2435#ifdef DEBUG_PCALL
2436 if (loglevel & CPU_LOG_PCALL) {
2437 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2438 }
2439#endif
2440 if (e2 & DESC_S_MASK) {
2441 if (!(e2 & DESC_CS_MASK))
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2444 if (e2 & DESC_C_MASK) {
2445 /* conforming code segment */
2446 if (dpl > cpl)
2447 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2448 } else {
2449 /* non conforming code segment */
2450 rpl = new_cs & 3;
2451 if (rpl > cpl)
2452 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2453 if (dpl != cpl)
2454 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2455 }
2456 if (!(e2 & DESC_P_MASK))
2457 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2458
2459#ifdef TARGET_X86_64
2460 /* XXX: check 16/32 bit cases in long mode */
2461 if (shift == 2) {
2462 target_ulong rsp;
2463 /* 64 bit case */
2464 rsp = ESP;
2465 PUSHQ(rsp, env->segs[R_CS].selector);
2466 PUSHQ(rsp, next_eip);
2467 /* from this point, not restartable */
2468 ESP = rsp;
2469 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2470 get_seg_base(e1, e2),
2471 get_seg_limit(e1, e2), e2);
2472 EIP = new_eip;
2473 } else
2474#endif
2475 {
2476 sp = ESP;
2477 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2478 ssp = env->segs[R_SS].base;
2479 if (shift) {
2480 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481 PUSHL(ssp, sp, sp_mask, next_eip);
2482 } else {
2483 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484 PUSHW(ssp, sp, sp_mask, next_eip);
2485 }
2486
2487 limit = get_seg_limit(e1, e2);
2488 if (new_eip > limit)
2489 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2490 /* from this point, not restartable */
2491 SET_ESP(sp, sp_mask);
2492 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2493 get_seg_base(e1, e2), limit, e2);
2494 EIP = new_eip;
2495 }
2496 } else {
2497 /* check gate type */
2498 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2499 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2500 rpl = new_cs & 3;
2501 switch(type) {
2502 case 1: /* available 286 TSS */
2503 case 9: /* available 386 TSS */
2504 case 5: /* task gate */
2505 if (dpl < cpl || dpl < rpl)
2506 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2507 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2508 CC_OP = CC_OP_EFLAGS;
2509 return;
2510 case 4: /* 286 call gate */
2511 case 12: /* 386 call gate */
2512 break;
2513 default:
2514 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2515 break;
2516 }
2517 shift = type >> 3;
2518
2519 if (dpl < cpl || dpl < rpl)
2520 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2521 /* check valid bit */
2522 if (!(e2 & DESC_P_MASK))
2523 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2524 selector = e1 >> 16;
2525 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2526 param_count = e2 & 0x1f;
2527 if ((selector & 0xfffc) == 0)
2528 raise_exception_err(EXCP0D_GPF, 0);
2529
2530 if (load_segment(&e1, &e2, selector) != 0)
2531 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2532 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2533 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2534 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2535 if (dpl > cpl)
2536 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2537 if (!(e2 & DESC_P_MASK))
2538 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2539
2540 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2541 /* to inner privilege */
2542 get_ss_esp_from_tss(&ss, &sp, dpl);
2543#ifdef DEBUG_PCALL
2544 if (loglevel & CPU_LOG_PCALL)
2545 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2546 ss, sp, param_count, ESP);
2547#endif
2548 if ((ss & 0xfffc) == 0)
2549 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2550 if ((ss & 3) != dpl)
2551 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2552 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2553 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2554 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2555 if (ss_dpl != dpl)
2556 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2557 if (!(ss_e2 & DESC_S_MASK) ||
2558 (ss_e2 & DESC_CS_MASK) ||
2559 !(ss_e2 & DESC_W_MASK))
2560 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2561 if (!(ss_e2 & DESC_P_MASK))
2562 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2563
2564 // push_size = ((param_count * 2) + 8) << shift;
2565
2566 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2567 old_ssp = env->segs[R_SS].base;
2568
2569 sp_mask = get_sp_mask(ss_e2);
2570 ssp = get_seg_base(ss_e1, ss_e2);
2571 if (shift) {
2572 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2573 PUSHL(ssp, sp, sp_mask, ESP);
2574 for(i = param_count - 1; i >= 0; i--) {
2575 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2576 PUSHL(ssp, sp, sp_mask, val);
2577 }
2578 } else {
2579 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2580 PUSHW(ssp, sp, sp_mask, ESP);
2581 for(i = param_count - 1; i >= 0; i--) {
2582 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2583 PUSHW(ssp, sp, sp_mask, val);
2584 }
2585 }
2586 new_stack = 1;
2587 } else {
2588 /* to same privilege */
2589 sp = ESP;
2590 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2591 ssp = env->segs[R_SS].base;
2592 // push_size = (4 << shift);
2593 new_stack = 0;
2594 }
2595
2596 if (shift) {
2597 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2598 PUSHL(ssp, sp, sp_mask, next_eip);
2599 } else {
2600 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2601 PUSHW(ssp, sp, sp_mask, next_eip);
2602 }
2603
2604 /* from this point, not restartable */
2605
2606 if (new_stack) {
2607 ss = (ss & ~3) | dpl;
2608 cpu_x86_load_seg_cache(env, R_SS, ss,
2609 ssp,
2610 get_seg_limit(ss_e1, ss_e2),
2611 ss_e2);
2612 }
2613
2614 selector = (selector & ~3) | dpl;
2615 cpu_x86_load_seg_cache(env, R_CS, selector,
2616 get_seg_base(e1, e2),
2617 get_seg_limit(e1, e2),
2618 e2);
2619 cpu_x86_set_cpl(env, dpl);
2620 SET_ESP(sp, sp_mask);
2621 EIP = offset;
2622 }
2623#ifdef USE_KQEMU
2624 if (kqemu_is_ok(env)) {
2625 env->exception_index = -1;
2626 cpu_loop_exit();
2627 }
2628#endif
2629}
2630
2631/* real and vm86 mode iret */
2632void helper_iret_real(int shift)
2633{
2634 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2635 target_ulong ssp;
2636 int eflags_mask;
2637
2638 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2639 sp = ESP;
2640 ssp = env->segs[R_SS].base;
2641 if (shift == 1) {
2642 /* 32 bits */
2643 POPL(ssp, sp, sp_mask, new_eip);
2644 POPL(ssp, sp, sp_mask, new_cs);
2645 new_cs &= 0xffff;
2646 POPL(ssp, sp, sp_mask, new_eflags);
2647 } else {
2648 /* 16 bits */
2649 POPW(ssp, sp, sp_mask, new_eip);
2650 POPW(ssp, sp, sp_mask, new_cs);
2651 POPW(ssp, sp, sp_mask, new_eflags);
2652 }
2653 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2654 load_seg_vm(R_CS, new_cs);
2655 env->eip = new_eip;
2656 if (env->eflags & VM_MASK)
2657 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2658 else
2659 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2660 if (shift == 0)
2661 eflags_mask &= 0xffff;
2662 load_eflags(new_eflags, eflags_mask);
db620f46 2663 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2664}
2665
2666static inline void validate_seg(int seg_reg, int cpl)
2667{
2668 int dpl;
2669 uint32_t e2;
2670
2671 /* XXX: on x86_64, we do not want to nullify FS and GS because
2672 they may still contain a valid base. I would be interested to
2673 know how a real x86_64 CPU behaves */
2674 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2675 (env->segs[seg_reg].selector & 0xfffc) == 0)
2676 return;
2677
2678 e2 = env->segs[seg_reg].flags;
2679 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2680 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2681 /* data or non conforming code segment */
2682 if (dpl < cpl) {
2683 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2684 }
2685 }
2686}
2687
2688/* protected mode iret */
2689static inline void helper_ret_protected(int shift, int is_iret, int addend)
2690{
2691 uint32_t new_cs, new_eflags, new_ss;
2692 uint32_t new_es, new_ds, new_fs, new_gs;
2693 uint32_t e1, e2, ss_e1, ss_e2;
2694 int cpl, dpl, rpl, eflags_mask, iopl;
2695 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2696
2697#ifdef TARGET_X86_64
2698 if (shift == 2)
2699 sp_mask = -1;
2700 else
2701#endif
2702 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2703 sp = ESP;
2704 ssp = env->segs[R_SS].base;
2705 new_eflags = 0; /* avoid warning */
2706#ifdef TARGET_X86_64
2707 if (shift == 2) {
2708 POPQ(sp, new_eip);
2709 POPQ(sp, new_cs);
2710 new_cs &= 0xffff;
2711 if (is_iret) {
2712 POPQ(sp, new_eflags);
2713 }
2714 } else
2715#endif
2716 if (shift == 1) {
2717 /* 32 bits */
2718 POPL(ssp, sp, sp_mask, new_eip);
2719 POPL(ssp, sp, sp_mask, new_cs);
2720 new_cs &= 0xffff;
2721 if (is_iret) {
2722 POPL(ssp, sp, sp_mask, new_eflags);
2723 if (new_eflags & VM_MASK)
2724 goto return_to_vm86;
2725 }
2726 } else {
2727 /* 16 bits */
2728 POPW(ssp, sp, sp_mask, new_eip);
2729 POPW(ssp, sp, sp_mask, new_cs);
2730 if (is_iret)
2731 POPW(ssp, sp, sp_mask, new_eflags);
2732 }
2733#ifdef DEBUG_PCALL
2734 if (loglevel & CPU_LOG_PCALL) {
2735 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2736 new_cs, new_eip, shift, addend);
2737 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2738 }
2739#endif
2740 if ((new_cs & 0xfffc) == 0)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 if (load_segment(&e1, &e2, new_cs) != 0)
2743 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2744 if (!(e2 & DESC_S_MASK) ||
2745 !(e2 & DESC_CS_MASK))
2746 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2747 cpl = env->hflags & HF_CPL_MASK;
2748 rpl = new_cs & 3;
2749 if (rpl < cpl)
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2752 if (e2 & DESC_C_MASK) {
2753 if (dpl > rpl)
2754 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2755 } else {
2756 if (dpl != rpl)
2757 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2758 }
2759 if (!(e2 & DESC_P_MASK))
2760 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2761
2762 sp += addend;
2763 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2764 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2765 /* return to same privilege level */
eaa728ee
FB
2766 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2767 get_seg_base(e1, e2),
2768 get_seg_limit(e1, e2),
2769 e2);
2770 } else {
2771 /* return to different privilege level */
2772#ifdef TARGET_X86_64
2773 if (shift == 2) {
2774 POPQ(sp, new_esp);
2775 POPQ(sp, new_ss);
2776 new_ss &= 0xffff;
2777 } else
2778#endif
2779 if (shift == 1) {
2780 /* 32 bits */
2781 POPL(ssp, sp, sp_mask, new_esp);
2782 POPL(ssp, sp, sp_mask, new_ss);
2783 new_ss &= 0xffff;
2784 } else {
2785 /* 16 bits */
2786 POPW(ssp, sp, sp_mask, new_esp);
2787 POPW(ssp, sp, sp_mask, new_ss);
2788 }
2789#ifdef DEBUG_PCALL
2790 if (loglevel & CPU_LOG_PCALL) {
2791 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2792 new_ss, new_esp);
2793 }
2794#endif
2795 if ((new_ss & 0xfffc) == 0) {
2796#ifdef TARGET_X86_64
2797 /* NULL ss is allowed in long mode if cpl != 3*/
2798 /* XXX: test CS64 ? */
2799 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2800 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2801 0, 0xffffffff,
2802 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2803 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2804 DESC_W_MASK | DESC_A_MASK);
2805 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2806 } else
2807#endif
2808 {
2809 raise_exception_err(EXCP0D_GPF, 0);
2810 }
2811 } else {
2812 if ((new_ss & 3) != rpl)
2813 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2814 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2815 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2816 if (!(ss_e2 & DESC_S_MASK) ||
2817 (ss_e2 & DESC_CS_MASK) ||
2818 !(ss_e2 & DESC_W_MASK))
2819 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2820 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2821 if (dpl != rpl)
2822 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2823 if (!(ss_e2 & DESC_P_MASK))
2824 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2825 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2826 get_seg_base(ss_e1, ss_e2),
2827 get_seg_limit(ss_e1, ss_e2),
2828 ss_e2);
2829 }
2830
2831 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2832 get_seg_base(e1, e2),
2833 get_seg_limit(e1, e2),
2834 e2);
2835 cpu_x86_set_cpl(env, rpl);
2836 sp = new_esp;
2837#ifdef TARGET_X86_64
2838 if (env->hflags & HF_CS64_MASK)
2839 sp_mask = -1;
2840 else
2841#endif
2842 sp_mask = get_sp_mask(ss_e2);
2843
2844 /* validate data segments */
2845 validate_seg(R_ES, rpl);
2846 validate_seg(R_DS, rpl);
2847 validate_seg(R_FS, rpl);
2848 validate_seg(R_GS, rpl);
2849
2850 sp += addend;
2851 }
2852 SET_ESP(sp, sp_mask);
2853 env->eip = new_eip;
2854 if (is_iret) {
2855 /* NOTE: 'cpl' is the _old_ CPL */
2856 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2857 if (cpl == 0)
2858 eflags_mask |= IOPL_MASK;
2859 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2860 if (cpl <= iopl)
2861 eflags_mask |= IF_MASK;
2862 if (shift == 0)
2863 eflags_mask &= 0xffff;
2864 load_eflags(new_eflags, eflags_mask);
2865 }
2866 return;
2867
2868 return_to_vm86:
2869 POPL(ssp, sp, sp_mask, new_esp);
2870 POPL(ssp, sp, sp_mask, new_ss);
2871 POPL(ssp, sp, sp_mask, new_es);
2872 POPL(ssp, sp, sp_mask, new_ds);
2873 POPL(ssp, sp, sp_mask, new_fs);
2874 POPL(ssp, sp, sp_mask, new_gs);
2875
2876 /* modify processor state */
2877 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2878 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2879 load_seg_vm(R_CS, new_cs & 0xffff);
2880 cpu_x86_set_cpl(env, 3);
2881 load_seg_vm(R_SS, new_ss & 0xffff);
2882 load_seg_vm(R_ES, new_es & 0xffff);
2883 load_seg_vm(R_DS, new_ds & 0xffff);
2884 load_seg_vm(R_FS, new_fs & 0xffff);
2885 load_seg_vm(R_GS, new_gs & 0xffff);
2886
2887 env->eip = new_eip & 0xffff;
2888 ESP = new_esp;
2889}
2890
2891void helper_iret_protected(int shift, int next_eip)
2892{
2893 int tss_selector, type;
2894 uint32_t e1, e2;
2895
2896 /* specific case for TSS */
2897 if (env->eflags & NT_MASK) {
2898#ifdef TARGET_X86_64
2899 if (env->hflags & HF_LMA_MASK)
2900 raise_exception_err(EXCP0D_GPF, 0);
2901#endif
2902 tss_selector = lduw_kernel(env->tr.base + 0);
2903 if (tss_selector & 4)
2904 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2905 if (load_segment(&e1, &e2, tss_selector) != 0)
2906 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2907 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2908 /* NOTE: we check both segment and busy TSS */
2909 if (type != 3)
2910 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2911 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2912 } else {
2913 helper_ret_protected(shift, 1, 0);
2914 }
db620f46 2915 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2916#ifdef USE_KQEMU
2917 if (kqemu_is_ok(env)) {
2918 CC_OP = CC_OP_EFLAGS;
2919 env->exception_index = -1;
2920 cpu_loop_exit();
2921 }
2922#endif
2923}
2924
2925void helper_lret_protected(int shift, int addend)
2926{
2927 helper_ret_protected(shift, 0, addend);
2928#ifdef USE_KQEMU
2929 if (kqemu_is_ok(env)) {
2930 env->exception_index = -1;
2931 cpu_loop_exit();
2932 }
2933#endif
2934}
2935
2936void helper_sysenter(void)
2937{
2938 if (env->sysenter_cs == 0) {
2939 raise_exception_err(EXCP0D_GPF, 0);
2940 }
2941 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2942 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2943
2944#ifdef TARGET_X86_64
2945 if (env->hflags & HF_LMA_MASK) {
2946 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2947 0, 0xffffffff,
2948 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2949 DESC_S_MASK |
2950 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2951 } else
2952#endif
2953 {
2954 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2955 0, 0xffffffff,
2956 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2957 DESC_S_MASK |
2958 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2959 }
eaa728ee
FB
2960 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2961 0, 0xffffffff,
2962 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2963 DESC_S_MASK |
2964 DESC_W_MASK | DESC_A_MASK);
2965 ESP = env->sysenter_esp;
2966 EIP = env->sysenter_eip;
2967}
2968
2436b61a 2969void helper_sysexit(int dflag)
eaa728ee
FB
2970{
2971 int cpl;
2972
2973 cpl = env->hflags & HF_CPL_MASK;
2974 if (env->sysenter_cs == 0 || cpl != 0) {
2975 raise_exception_err(EXCP0D_GPF, 0);
2976 }
2977 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2978#ifdef TARGET_X86_64
2979 if (dflag == 2) {
2980 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2981 0, 0xffffffff,
2982 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2983 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2984 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2985 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2986 0, 0xffffffff,
2987 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2988 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2989 DESC_W_MASK | DESC_A_MASK);
2990 } else
2991#endif
2992 {
2993 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2994 0, 0xffffffff,
2995 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2996 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2997 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2998 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2999 0, 0xffffffff,
3000 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3001 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3002 DESC_W_MASK | DESC_A_MASK);
3003 }
eaa728ee
FB
3004 ESP = ECX;
3005 EIP = EDX;
3006#ifdef USE_KQEMU
3007 if (kqemu_is_ok(env)) {
3008 env->exception_index = -1;
3009 cpu_loop_exit();
3010 }
3011#endif
3012}
3013
872929aa
FB
3014#if defined(CONFIG_USER_ONLY)
3015target_ulong helper_read_crN(int reg)
eaa728ee 3016{
872929aa
FB
3017 return 0;
3018}
3019
3020void helper_write_crN(int reg, target_ulong t0)
3021{
3022}
3023#else
3024target_ulong helper_read_crN(int reg)
3025{
3026 target_ulong val;
3027
3028 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3029 switch(reg) {
3030 default:
3031 val = env->cr[reg];
3032 break;
3033 case 8:
db620f46
FB
3034 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3035 val = cpu_get_apic_tpr(env);
3036 } else {
3037 val = env->v_tpr;
3038 }
872929aa
FB
3039 break;
3040 }
3041 return val;
3042}
3043
3044void helper_write_crN(int reg, target_ulong t0)
3045{
3046 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
3047 switch(reg) {
3048 case 0:
3049 cpu_x86_update_cr0(env, t0);
3050 break;
3051 case 3:
3052 cpu_x86_update_cr3(env, t0);
3053 break;
3054 case 4:
3055 cpu_x86_update_cr4(env, t0);
3056 break;
3057 case 8:
db620f46
FB
3058 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3059 cpu_set_apic_tpr(env, t0);
3060 }
3061 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
3062 break;
3063 default:
3064 env->cr[reg] = t0;
3065 break;
3066 }
eaa728ee 3067}
872929aa 3068#endif
eaa728ee
FB
3069
3070void helper_lmsw(target_ulong t0)
3071{
3072 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3073 if already set to one. */
3074 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 3075 helper_write_crN(0, t0);
eaa728ee
FB
3076}
3077
3078void helper_clts(void)
3079{
3080 env->cr[0] &= ~CR0_TS_MASK;
3081 env->hflags &= ~HF_TS_MASK;
3082}
3083
eaa728ee
FB
3084/* XXX: do more */
3085void helper_movl_drN_T0(int reg, target_ulong t0)
3086{
3087 env->dr[reg] = t0;
3088}
3089
3090void helper_invlpg(target_ulong addr)
3091{
872929aa 3092 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3093 tlb_flush_page(env, addr);
eaa728ee
FB
3094}
3095
3096void helper_rdtsc(void)
3097{
3098 uint64_t val;
3099
3100 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3101 raise_exception(EXCP0D_GPF);
3102 }
872929aa
FB
3103 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3104
33c263df 3105 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3106 EAX = (uint32_t)(val);
3107 EDX = (uint32_t)(val >> 32);
3108}
3109
3110void helper_rdpmc(void)
3111{
3112 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3113 raise_exception(EXCP0D_GPF);
3114 }
eaa728ee
FB
3115 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3116
3117 /* currently unimplemented */
3118 raise_exception_err(EXCP06_ILLOP, 0);
3119}
3120
3121#if defined(CONFIG_USER_ONLY)
3122void helper_wrmsr(void)
3123{
3124}
3125
3126void helper_rdmsr(void)
3127{
3128}
3129#else
3130void helper_wrmsr(void)
3131{
3132 uint64_t val;
3133
872929aa
FB
3134 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3135
eaa728ee
FB
3136 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3137
3138 switch((uint32_t)ECX) {
3139 case MSR_IA32_SYSENTER_CS:
3140 env->sysenter_cs = val & 0xffff;
3141 break;
3142 case MSR_IA32_SYSENTER_ESP:
3143 env->sysenter_esp = val;
3144 break;
3145 case MSR_IA32_SYSENTER_EIP:
3146 env->sysenter_eip = val;
3147 break;
3148 case MSR_IA32_APICBASE:
3149 cpu_set_apic_base(env, val);
3150 break;
3151 case MSR_EFER:
3152 {
3153 uint64_t update_mask;
3154 update_mask = 0;
3155 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3156 update_mask |= MSR_EFER_SCE;
3157 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3158 update_mask |= MSR_EFER_LME;
3159 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3160 update_mask |= MSR_EFER_FFXSR;
3161 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3162 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3163 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3164 update_mask |= MSR_EFER_SVME;
3165 cpu_load_efer(env, (env->efer & ~update_mask) |
3166 (val & update_mask));
eaa728ee
FB
3167 }
3168 break;
3169 case MSR_STAR:
3170 env->star = val;
3171 break;
3172 case MSR_PAT:
3173 env->pat = val;
3174 break;
3175 case MSR_VM_HSAVE_PA:
3176 env->vm_hsave = val;
3177 break;
e737b32a
AZ
3178 case MSR_IA32_PERF_STATUS:
3179 /* tsc_increment_by_tick */
3180 val = 1000ULL;
3181 /* CPU multiplier */
3182 val |= (((uint64_t)4ULL) << 40);
3183 break;
eaa728ee
FB
3184#ifdef TARGET_X86_64
3185 case MSR_LSTAR:
3186 env->lstar = val;
3187 break;
3188 case MSR_CSTAR:
3189 env->cstar = val;
3190 break;
3191 case MSR_FMASK:
3192 env->fmask = val;
3193 break;
3194 case MSR_FSBASE:
3195 env->segs[R_FS].base = val;
3196 break;
3197 case MSR_GSBASE:
3198 env->segs[R_GS].base = val;
3199 break;
3200 case MSR_KERNELGSBASE:
3201 env->kernelgsbase = val;
3202 break;
3203#endif
3204 default:
3205 /* XXX: exception ? */
3206 break;
3207 }
3208}
3209
3210void helper_rdmsr(void)
3211{
3212 uint64_t val;
872929aa
FB
3213
3214 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3215
eaa728ee
FB
3216 switch((uint32_t)ECX) {
3217 case MSR_IA32_SYSENTER_CS:
3218 val = env->sysenter_cs;
3219 break;
3220 case MSR_IA32_SYSENTER_ESP:
3221 val = env->sysenter_esp;
3222 break;
3223 case MSR_IA32_SYSENTER_EIP:
3224 val = env->sysenter_eip;
3225 break;
3226 case MSR_IA32_APICBASE:
3227 val = cpu_get_apic_base(env);
3228 break;
3229 case MSR_EFER:
3230 val = env->efer;
3231 break;
3232 case MSR_STAR:
3233 val = env->star;
3234 break;
3235 case MSR_PAT:
3236 val = env->pat;
3237 break;
3238 case MSR_VM_HSAVE_PA:
3239 val = env->vm_hsave;
3240 break;
3241#ifdef TARGET_X86_64
3242 case MSR_LSTAR:
3243 val = env->lstar;
3244 break;
3245 case MSR_CSTAR:
3246 val = env->cstar;
3247 break;
3248 case MSR_FMASK:
3249 val = env->fmask;
3250 break;
3251 case MSR_FSBASE:
3252 val = env->segs[R_FS].base;
3253 break;
3254 case MSR_GSBASE:
3255 val = env->segs[R_GS].base;
3256 break;
3257 case MSR_KERNELGSBASE:
3258 val = env->kernelgsbase;
3259 break;
da260249
FB
3260#endif
3261#ifdef USE_KQEMU
3262 case MSR_QPI_COMMBASE:
3263 if (env->kqemu_enabled) {
3264 val = kqemu_comm_base;
3265 } else {
3266 val = 0;
3267 }
3268 break;
eaa728ee
FB
3269#endif
3270 default:
3271 /* XXX: exception ? */
3272 val = 0;
3273 break;
3274 }
3275 EAX = (uint32_t)(val);
3276 EDX = (uint32_t)(val >> 32);
3277}
3278#endif
3279
3280target_ulong helper_lsl(target_ulong selector1)
3281{
3282 unsigned int limit;
3283 uint32_t e1, e2, eflags, selector;
3284 int rpl, dpl, cpl, type;
3285
3286 selector = selector1 & 0xffff;
3287 eflags = cc_table[CC_OP].compute_all();
3288 if (load_segment(&e1, &e2, selector) != 0)
3289 goto fail;
3290 rpl = selector & 3;
3291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292 cpl = env->hflags & HF_CPL_MASK;
3293 if (e2 & DESC_S_MASK) {
3294 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3295 /* conforming */
3296 } else {
3297 if (dpl < cpl || dpl < rpl)
3298 goto fail;
3299 }
3300 } else {
3301 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3302 switch(type) {
3303 case 1:
3304 case 2:
3305 case 3:
3306 case 9:
3307 case 11:
3308 break;
3309 default:
3310 goto fail;
3311 }
3312 if (dpl < cpl || dpl < rpl) {
3313 fail:
3314 CC_SRC = eflags & ~CC_Z;
3315 return 0;
3316 }
3317 }
3318 limit = get_seg_limit(e1, e2);
3319 CC_SRC = eflags | CC_Z;
3320 return limit;
3321}
3322
3323target_ulong helper_lar(target_ulong selector1)
3324{
3325 uint32_t e1, e2, eflags, selector;
3326 int rpl, dpl, cpl, type;
3327
3328 selector = selector1 & 0xffff;
3329 eflags = cc_table[CC_OP].compute_all();
3330 if ((selector & 0xfffc) == 0)
3331 goto fail;
3332 if (load_segment(&e1, &e2, selector) != 0)
3333 goto fail;
3334 rpl = selector & 3;
3335 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3336 cpl = env->hflags & HF_CPL_MASK;
3337 if (e2 & DESC_S_MASK) {
3338 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3339 /* conforming */
3340 } else {
3341 if (dpl < cpl || dpl < rpl)
3342 goto fail;
3343 }
3344 } else {
3345 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3346 switch(type) {
3347 case 1:
3348 case 2:
3349 case 3:
3350 case 4:
3351 case 5:
3352 case 9:
3353 case 11:
3354 case 12:
3355 break;
3356 default:
3357 goto fail;
3358 }
3359 if (dpl < cpl || dpl < rpl) {
3360 fail:
3361 CC_SRC = eflags & ~CC_Z;
3362 return 0;
3363 }
3364 }
3365 CC_SRC = eflags | CC_Z;
3366 return e2 & 0x00f0ff00;
3367}
3368
3369void helper_verr(target_ulong selector1)
3370{
3371 uint32_t e1, e2, eflags, selector;
3372 int rpl, dpl, cpl;
3373
3374 selector = selector1 & 0xffff;
3375 eflags = cc_table[CC_OP].compute_all();
3376 if ((selector & 0xfffc) == 0)
3377 goto fail;
3378 if (load_segment(&e1, &e2, selector) != 0)
3379 goto fail;
3380 if (!(e2 & DESC_S_MASK))
3381 goto fail;
3382 rpl = selector & 3;
3383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3384 cpl = env->hflags & HF_CPL_MASK;
3385 if (e2 & DESC_CS_MASK) {
3386 if (!(e2 & DESC_R_MASK))
3387 goto fail;
3388 if (!(e2 & DESC_C_MASK)) {
3389 if (dpl < cpl || dpl < rpl)
3390 goto fail;
3391 }
3392 } else {
3393 if (dpl < cpl || dpl < rpl) {
3394 fail:
3395 CC_SRC = eflags & ~CC_Z;
3396 return;
3397 }
3398 }
3399 CC_SRC = eflags | CC_Z;
3400}
3401
3402void helper_verw(target_ulong selector1)
3403{
3404 uint32_t e1, e2, eflags, selector;
3405 int rpl, dpl, cpl;
3406
3407 selector = selector1 & 0xffff;
3408 eflags = cc_table[CC_OP].compute_all();
3409 if ((selector & 0xfffc) == 0)
3410 goto fail;
3411 if (load_segment(&e1, &e2, selector) != 0)
3412 goto fail;
3413 if (!(e2 & DESC_S_MASK))
3414 goto fail;
3415 rpl = selector & 3;
3416 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3417 cpl = env->hflags & HF_CPL_MASK;
3418 if (e2 & DESC_CS_MASK) {
3419 goto fail;
3420 } else {
3421 if (dpl < cpl || dpl < rpl)
3422 goto fail;
3423 if (!(e2 & DESC_W_MASK)) {
3424 fail:
3425 CC_SRC = eflags & ~CC_Z;
3426 return;
3427 }
3428 }
3429 CC_SRC = eflags | CC_Z;
3430}
3431
3432/* x87 FPU helpers */
3433
3434static void fpu_set_exception(int mask)
3435{
3436 env->fpus |= mask;
3437 if (env->fpus & (~env->fpuc & FPUC_EM))
3438 env->fpus |= FPUS_SE | FPUS_B;
3439}
3440
3441static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3442{
3443 if (b == 0.0)
3444 fpu_set_exception(FPUS_ZE);
3445 return a / b;
3446}
3447
3448void fpu_raise_exception(void)
3449{
3450 if (env->cr[0] & CR0_NE_MASK) {
3451 raise_exception(EXCP10_COPR);
3452 }
3453#if !defined(CONFIG_USER_ONLY)
3454 else {
3455 cpu_set_ferr(env);
3456 }
3457#endif
3458}
3459
3460void helper_flds_FT0(uint32_t val)
3461{
3462 union {
3463 float32 f;
3464 uint32_t i;
3465 } u;
3466 u.i = val;
3467 FT0 = float32_to_floatx(u.f, &env->fp_status);
3468}
3469
3470void helper_fldl_FT0(uint64_t val)
3471{
3472 union {
3473 float64 f;
3474 uint64_t i;
3475 } u;
3476 u.i = val;
3477 FT0 = float64_to_floatx(u.f, &env->fp_status);
3478}
3479
3480void helper_fildl_FT0(int32_t val)
3481{
3482 FT0 = int32_to_floatx(val, &env->fp_status);
3483}
3484
3485void helper_flds_ST0(uint32_t val)
3486{
3487 int new_fpstt;
3488 union {
3489 float32 f;
3490 uint32_t i;
3491 } u;
3492 new_fpstt = (env->fpstt - 1) & 7;
3493 u.i = val;
3494 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3495 env->fpstt = new_fpstt;
3496 env->fptags[new_fpstt] = 0; /* validate stack entry */
3497}
3498
3499void helper_fldl_ST0(uint64_t val)
3500{
3501 int new_fpstt;
3502 union {
3503 float64 f;
3504 uint64_t i;
3505 } u;
3506 new_fpstt = (env->fpstt - 1) & 7;
3507 u.i = val;
3508 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3509 env->fpstt = new_fpstt;
3510 env->fptags[new_fpstt] = 0; /* validate stack entry */
3511}
3512
3513void helper_fildl_ST0(int32_t val)
3514{
3515 int new_fpstt;
3516 new_fpstt = (env->fpstt - 1) & 7;
3517 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3518 env->fpstt = new_fpstt;
3519 env->fptags[new_fpstt] = 0; /* validate stack entry */
3520}
3521
3522void helper_fildll_ST0(int64_t val)
3523{
3524 int new_fpstt;
3525 new_fpstt = (env->fpstt - 1) & 7;
3526 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3527 env->fpstt = new_fpstt;
3528 env->fptags[new_fpstt] = 0; /* validate stack entry */
3529}
3530
3531uint32_t helper_fsts_ST0(void)
3532{
3533 union {
3534 float32 f;
3535 uint32_t i;
3536 } u;
3537 u.f = floatx_to_float32(ST0, &env->fp_status);
3538 return u.i;
3539}
3540
3541uint64_t helper_fstl_ST0(void)
3542{
3543 union {
3544 float64 f;
3545 uint64_t i;
3546 } u;
3547 u.f = floatx_to_float64(ST0, &env->fp_status);
3548 return u.i;
3549}
3550
3551int32_t helper_fist_ST0(void)
3552{
3553 int32_t val;
3554 val = floatx_to_int32(ST0, &env->fp_status);
3555 if (val != (int16_t)val)
3556 val = -32768;
3557 return val;
3558}
3559
3560int32_t helper_fistl_ST0(void)
3561{
3562 int32_t val;
3563 val = floatx_to_int32(ST0, &env->fp_status);
3564 return val;
3565}
3566
3567int64_t helper_fistll_ST0(void)
3568{
3569 int64_t val;
3570 val = floatx_to_int64(ST0, &env->fp_status);
3571 return val;
3572}
3573
3574int32_t helper_fistt_ST0(void)
3575{
3576 int32_t val;
3577 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3578 if (val != (int16_t)val)
3579 val = -32768;
3580 return val;
3581}
3582
3583int32_t helper_fisttl_ST0(void)
3584{
3585 int32_t val;
3586 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3587 return val;
3588}
3589
3590int64_t helper_fisttll_ST0(void)
3591{
3592 int64_t val;
3593 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3594 return val;
3595}
3596
3597void helper_fldt_ST0(target_ulong ptr)
3598{
3599 int new_fpstt;
3600 new_fpstt = (env->fpstt - 1) & 7;
3601 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3602 env->fpstt = new_fpstt;
3603 env->fptags[new_fpstt] = 0; /* validate stack entry */
3604}
3605
3606void helper_fstt_ST0(target_ulong ptr)
3607{
3608 helper_fstt(ST0, ptr);
3609}
3610
3611void helper_fpush(void)
3612{
3613 fpush();
3614}
3615
3616void helper_fpop(void)
3617{
3618 fpop();
3619}
3620
3621void helper_fdecstp(void)
3622{
3623 env->fpstt = (env->fpstt - 1) & 7;
3624 env->fpus &= (~0x4700);
3625}
3626
3627void helper_fincstp(void)
3628{
3629 env->fpstt = (env->fpstt + 1) & 7;
3630 env->fpus &= (~0x4700);
3631}
3632
3633/* FPU move */
3634
3635void helper_ffree_STN(int st_index)
3636{
3637 env->fptags[(env->fpstt + st_index) & 7] = 1;
3638}
3639
3640void helper_fmov_ST0_FT0(void)
3641{
3642 ST0 = FT0;
3643}
3644
3645void helper_fmov_FT0_STN(int st_index)
3646{
3647 FT0 = ST(st_index);
3648}
3649
3650void helper_fmov_ST0_STN(int st_index)
3651{
3652 ST0 = ST(st_index);
3653}
3654
3655void helper_fmov_STN_ST0(int st_index)
3656{
3657 ST(st_index) = ST0;
3658}
3659
3660void helper_fxchg_ST0_STN(int st_index)
3661{
3662 CPU86_LDouble tmp;
3663 tmp = ST(st_index);
3664 ST(st_index) = ST0;
3665 ST0 = tmp;
3666}
3667
3668/* FPU operations */
3669
3670static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3671
3672void helper_fcom_ST0_FT0(void)
3673{
3674 int ret;
3675
3676 ret = floatx_compare(ST0, FT0, &env->fp_status);
3677 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3678 FORCE_RET();
3679}
3680
3681void helper_fucom_ST0_FT0(void)
3682{
3683 int ret;
3684
3685 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3686 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3687 FORCE_RET();
3688}
3689
3690static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3691
3692void helper_fcomi_ST0_FT0(void)
3693{
3694 int eflags;
3695 int ret;
3696
3697 ret = floatx_compare(ST0, FT0, &env->fp_status);
3698 eflags = cc_table[CC_OP].compute_all();
3699 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3700 CC_SRC = eflags;
3701 FORCE_RET();
3702}
3703
3704void helper_fucomi_ST0_FT0(void)
3705{
3706 int eflags;
3707 int ret;
3708
3709 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3710 eflags = cc_table[CC_OP].compute_all();
3711 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3712 CC_SRC = eflags;
3713 FORCE_RET();
3714}
3715
3716void helper_fadd_ST0_FT0(void)
3717{
3718 ST0 += FT0;
3719}
3720
3721void helper_fmul_ST0_FT0(void)
3722{
3723 ST0 *= FT0;
3724}
3725
3726void helper_fsub_ST0_FT0(void)
3727{
3728 ST0 -= FT0;
3729}
3730
3731void helper_fsubr_ST0_FT0(void)
3732{
3733 ST0 = FT0 - ST0;
3734}
3735
3736void helper_fdiv_ST0_FT0(void)
3737{
3738 ST0 = helper_fdiv(ST0, FT0);
3739}
3740
3741void helper_fdivr_ST0_FT0(void)
3742{
3743 ST0 = helper_fdiv(FT0, ST0);
3744}
3745
3746/* fp operations between STN and ST0 */
3747
3748void helper_fadd_STN_ST0(int st_index)
3749{
3750 ST(st_index) += ST0;
3751}
3752
3753void helper_fmul_STN_ST0(int st_index)
3754{
3755 ST(st_index) *= ST0;
3756}
3757
3758void helper_fsub_STN_ST0(int st_index)
3759{
3760 ST(st_index) -= ST0;
3761}
3762
3763void helper_fsubr_STN_ST0(int st_index)
3764{
3765 CPU86_LDouble *p;
3766 p = &ST(st_index);
3767 *p = ST0 - *p;
3768}
3769
3770void helper_fdiv_STN_ST0(int st_index)
3771{
3772 CPU86_LDouble *p;
3773 p = &ST(st_index);
3774 *p = helper_fdiv(*p, ST0);
3775}
3776
3777void helper_fdivr_STN_ST0(int st_index)
3778{
3779 CPU86_LDouble *p;
3780 p = &ST(st_index);
3781 *p = helper_fdiv(ST0, *p);
3782}
3783
3784/* misc FPU operations */
3785void helper_fchs_ST0(void)
3786{
3787 ST0 = floatx_chs(ST0);
3788}
3789
3790void helper_fabs_ST0(void)
3791{
3792 ST0 = floatx_abs(ST0);
3793}
3794
3795void helper_fld1_ST0(void)
3796{
3797 ST0 = f15rk[1];
3798}
3799
3800void helper_fldl2t_ST0(void)
3801{
3802 ST0 = f15rk[6];
3803}
3804
3805void helper_fldl2e_ST0(void)
3806{
3807 ST0 = f15rk[5];
3808}
3809
3810void helper_fldpi_ST0(void)
3811{
3812 ST0 = f15rk[2];
3813}
3814
3815void helper_fldlg2_ST0(void)
3816{
3817 ST0 = f15rk[3];
3818}
3819
3820void helper_fldln2_ST0(void)
3821{
3822 ST0 = f15rk[4];
3823}
3824
3825void helper_fldz_ST0(void)
3826{
3827 ST0 = f15rk[0];
3828}
3829
3830void helper_fldz_FT0(void)
3831{
3832 FT0 = f15rk[0];
3833}
3834
3835uint32_t helper_fnstsw(void)
3836{
3837 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3838}
3839
3840uint32_t helper_fnstcw(void)
3841{
3842 return env->fpuc;
3843}
3844
3845static void update_fp_status(void)
3846{
3847 int rnd_type;
3848
3849 /* set rounding mode */
3850 switch(env->fpuc & RC_MASK) {
3851 default:
3852 case RC_NEAR:
3853 rnd_type = float_round_nearest_even;
3854 break;
3855 case RC_DOWN:
3856 rnd_type = float_round_down;
3857 break;
3858 case RC_UP:
3859 rnd_type = float_round_up;
3860 break;
3861 case RC_CHOP:
3862 rnd_type = float_round_to_zero;
3863 break;
3864 }
3865 set_float_rounding_mode(rnd_type, &env->fp_status);
3866#ifdef FLOATX80
3867 switch((env->fpuc >> 8) & 3) {
3868 case 0:
3869 rnd_type = 32;
3870 break;
3871 case 2:
3872 rnd_type = 64;
3873 break;
3874 case 3:
3875 default:
3876 rnd_type = 80;
3877 break;
3878 }
3879 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3880#endif
3881}
3882
3883void helper_fldcw(uint32_t val)
3884{
3885 env->fpuc = val;
3886 update_fp_status();
3887}
3888
3889void helper_fclex(void)
3890{
3891 env->fpus &= 0x7f00;
3892}
3893
3894void helper_fwait(void)
3895{
3896 if (env->fpus & FPUS_SE)
3897 fpu_raise_exception();
3898 FORCE_RET();
3899}
3900
3901void helper_fninit(void)
3902{
3903 env->fpus = 0;
3904 env->fpstt = 0;
3905 env->fpuc = 0x37f;
3906 env->fptags[0] = 1;
3907 env->fptags[1] = 1;
3908 env->fptags[2] = 1;
3909 env->fptags[3] = 1;
3910 env->fptags[4] = 1;
3911 env->fptags[5] = 1;
3912 env->fptags[6] = 1;
3913 env->fptags[7] = 1;
3914}
3915
3916/* BCD ops */
3917
3918void helper_fbld_ST0(target_ulong ptr)
3919{
3920 CPU86_LDouble tmp;
3921 uint64_t val;
3922 unsigned int v;
3923 int i;
3924
3925 val = 0;
3926 for(i = 8; i >= 0; i--) {
3927 v = ldub(ptr + i);
3928 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3929 }
3930 tmp = val;
3931 if (ldub(ptr + 9) & 0x80)
3932 tmp = -tmp;
3933 fpush();
3934 ST0 = tmp;
3935}
3936
3937void helper_fbst_ST0(target_ulong ptr)
3938{
3939 int v;
3940 target_ulong mem_ref, mem_end;
3941 int64_t val;
3942
3943 val = floatx_to_int64(ST0, &env->fp_status);
3944 mem_ref = ptr;
3945 mem_end = mem_ref + 9;
3946 if (val < 0) {
3947 stb(mem_end, 0x80);
3948 val = -val;
3949 } else {
3950 stb(mem_end, 0x00);
3951 }
3952 while (mem_ref < mem_end) {
3953 if (val == 0)
3954 break;
3955 v = val % 100;
3956 val = val / 100;
3957 v = ((v / 10) << 4) | (v % 10);
3958 stb(mem_ref++, v);
3959 }
3960 while (mem_ref < mem_end) {
3961 stb(mem_ref++, 0);
3962 }
3963}
3964
3965void helper_f2xm1(void)
3966{
3967 ST0 = pow(2.0,ST0) - 1.0;
3968}
3969
3970void helper_fyl2x(void)
3971{
3972 CPU86_LDouble fptemp;
3973
3974 fptemp = ST0;
3975 if (fptemp>0.0){
3976 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3977 ST1 *= fptemp;
3978 fpop();
3979 } else {
3980 env->fpus &= (~0x4700);
3981 env->fpus |= 0x400;
3982 }
3983}
3984
3985void helper_fptan(void)
3986{
3987 CPU86_LDouble fptemp;
3988
3989 fptemp = ST0;
3990 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3991 env->fpus |= 0x400;
3992 } else {
3993 ST0 = tan(fptemp);
3994 fpush();
3995 ST0 = 1.0;
3996 env->fpus &= (~0x400); /* C2 <-- 0 */
3997 /* the above code is for |arg| < 2**52 only */
3998 }
3999}
4000
4001void helper_fpatan(void)
4002{
4003 CPU86_LDouble fptemp, fpsrcop;
4004
4005 fpsrcop = ST1;
4006 fptemp = ST0;
4007 ST1 = atan2(fpsrcop,fptemp);
4008 fpop();
4009}
4010
4011void helper_fxtract(void)
4012{
4013 CPU86_LDoubleU temp;
4014 unsigned int expdif;
4015
4016 temp.d = ST0;
4017 expdif = EXPD(temp) - EXPBIAS;
4018 /*DP exponent bias*/
4019 ST0 = expdif;
4020 fpush();
4021 BIASEXPONENT(temp);
4022 ST0 = temp.d;
4023}
4024
4025void helper_fprem1(void)
4026{
4027 CPU86_LDouble dblq, fpsrcop, fptemp;
4028 CPU86_LDoubleU fpsrcop1, fptemp1;
4029 int expdif;
4030 signed long long int q;
4031
4032 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4033 ST0 = 0.0 / 0.0; /* NaN */
4034 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4035 return;
4036 }
4037
4038 fpsrcop = ST0;
4039 fptemp = ST1;
4040 fpsrcop1.d = fpsrcop;
4041 fptemp1.d = fptemp;
4042 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4043
4044 if (expdif < 0) {
4045 /* optimisation? taken from the AMD docs */
4046 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4047 /* ST0 is unchanged */
4048 return;
4049 }
4050
4051 if (expdif < 53) {
4052 dblq = fpsrcop / fptemp;
4053 /* round dblq towards nearest integer */
4054 dblq = rint(dblq);
4055 ST0 = fpsrcop - fptemp * dblq;
4056
4057 /* convert dblq to q by truncating towards zero */
4058 if (dblq < 0.0)
4059 q = (signed long long int)(-dblq);
4060 else
4061 q = (signed long long int)dblq;
4062
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 /* (C0,C3,C1) <-- (q2,q1,q0) */
4065 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4066 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4067 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4068 } else {
4069 env->fpus |= 0x400; /* C2 <-- 1 */
4070 fptemp = pow(2.0, expdif - 50);
4071 fpsrcop = (ST0 / ST1) / fptemp;
4072 /* fpsrcop = integer obtained by chopping */
4073 fpsrcop = (fpsrcop < 0.0) ?
4074 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4075 ST0 -= (ST1 * fpsrcop * fptemp);
4076 }
4077}
4078
4079void helper_fprem(void)
4080{
4081 CPU86_LDouble dblq, fpsrcop, fptemp;
4082 CPU86_LDoubleU fpsrcop1, fptemp1;
4083 int expdif;
4084 signed long long int q;
4085
4086 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4087 ST0 = 0.0 / 0.0; /* NaN */
4088 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4089 return;
4090 }
4091
4092 fpsrcop = (CPU86_LDouble)ST0;
4093 fptemp = (CPU86_LDouble)ST1;
4094 fpsrcop1.d = fpsrcop;
4095 fptemp1.d = fptemp;
4096 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4097
4098 if (expdif < 0) {
4099 /* optimisation? taken from the AMD docs */
4100 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4101 /* ST0 is unchanged */
4102 return;
4103 }
4104
4105 if ( expdif < 53 ) {
4106 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4107 /* round dblq towards zero */
4108 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4109 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4110
4111 /* convert dblq to q by truncating towards zero */
4112 if (dblq < 0.0)
4113 q = (signed long long int)(-dblq);
4114 else
4115 q = (signed long long int)dblq;
4116
4117 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4118 /* (C0,C3,C1) <-- (q2,q1,q0) */
4119 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4120 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4121 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4122 } else {
4123 int N = 32 + (expdif % 32); /* as per AMD docs */
4124 env->fpus |= 0x400; /* C2 <-- 1 */
4125 fptemp = pow(2.0, (double)(expdif - N));
4126 fpsrcop = (ST0 / ST1) / fptemp;
4127 /* fpsrcop = integer obtained by chopping */
4128 fpsrcop = (fpsrcop < 0.0) ?
4129 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4130 ST0 -= (ST1 * fpsrcop * fptemp);
4131 }
4132}
4133
4134void helper_fyl2xp1(void)
4135{
4136 CPU86_LDouble fptemp;
4137
4138 fptemp = ST0;
4139 if ((fptemp+1.0)>0.0) {
4140 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4141 ST1 *= fptemp;
4142 fpop();
4143 } else {
4144 env->fpus &= (~0x4700);
4145 env->fpus |= 0x400;
4146 }
4147}
4148
4149void helper_fsqrt(void)
4150{
4151 CPU86_LDouble fptemp;
4152
4153 fptemp = ST0;
4154 if (fptemp<0.0) {
4155 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4156 env->fpus |= 0x400;
4157 }
4158 ST0 = sqrt(fptemp);
4159}
4160
4161void helper_fsincos(void)
4162{
4163 CPU86_LDouble fptemp;
4164
4165 fptemp = ST0;
4166 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4167 env->fpus |= 0x400;
4168 } else {
4169 ST0 = sin(fptemp);
4170 fpush();
4171 ST0 = cos(fptemp);
4172 env->fpus &= (~0x400); /* C2 <-- 0 */
4173 /* the above code is for |arg| < 2**63 only */
4174 }
4175}
4176
4177void helper_frndint(void)
4178{
4179 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4180}
4181
4182void helper_fscale(void)
4183{
4184 ST0 = ldexp (ST0, (int)(ST1));
4185}
4186
4187void helper_fsin(void)
4188{
4189 CPU86_LDouble fptemp;
4190
4191 fptemp = ST0;
4192 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4193 env->fpus |= 0x400;
4194 } else {
4195 ST0 = sin(fptemp);
4196 env->fpus &= (~0x400); /* C2 <-- 0 */
4197 /* the above code is for |arg| < 2**53 only */
4198 }
4199}
4200
4201void helper_fcos(void)
4202{
4203 CPU86_LDouble fptemp;
4204
4205 fptemp = ST0;
4206 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4207 env->fpus |= 0x400;
4208 } else {
4209 ST0 = cos(fptemp);
4210 env->fpus &= (~0x400); /* C2 <-- 0 */
4211 /* the above code is for |arg5 < 2**63 only */
4212 }
4213}
4214
4215void helper_fxam_ST0(void)
4216{
4217 CPU86_LDoubleU temp;
4218 int expdif;
4219
4220 temp.d = ST0;
4221
4222 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4223 if (SIGND(temp))
4224 env->fpus |= 0x200; /* C1 <-- 1 */
4225
4226 /* XXX: test fptags too */
4227 expdif = EXPD(temp);
4228 if (expdif == MAXEXPD) {
4229#ifdef USE_X86LDOUBLE
4230 if (MANTD(temp) == 0x8000000000000000ULL)
4231#else
4232 if (MANTD(temp) == 0)
4233#endif
4234 env->fpus |= 0x500 /*Infinity*/;
4235 else
4236 env->fpus |= 0x100 /*NaN*/;
4237 } else if (expdif == 0) {
4238 if (MANTD(temp) == 0)
4239 env->fpus |= 0x4000 /*Zero*/;
4240 else
4241 env->fpus |= 0x4400 /*Denormal*/;
4242 } else {
4243 env->fpus |= 0x400;
4244 }
4245}
4246
4247void helper_fstenv(target_ulong ptr, int data32)
4248{
4249 int fpus, fptag, exp, i;
4250 uint64_t mant;
4251 CPU86_LDoubleU tmp;
4252
4253 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4254 fptag = 0;
4255 for (i=7; i>=0; i--) {
4256 fptag <<= 2;
4257 if (env->fptags[i]) {
4258 fptag |= 3;
4259 } else {
4260 tmp.d = env->fpregs[i].d;
4261 exp = EXPD(tmp);
4262 mant = MANTD(tmp);
4263 if (exp == 0 && mant == 0) {
4264 /* zero */
4265 fptag |= 1;
4266 } else if (exp == 0 || exp == MAXEXPD
4267#ifdef USE_X86LDOUBLE
4268 || (mant & (1LL << 63)) == 0
4269#endif
4270 ) {
4271 /* NaNs, infinity, denormal */
4272 fptag |= 2;
4273 }
4274 }
4275 }
4276 if (data32) {
4277 /* 32 bit */
4278 stl(ptr, env->fpuc);
4279 stl(ptr + 4, fpus);
4280 stl(ptr + 8, fptag);
4281 stl(ptr + 12, 0); /* fpip */
4282 stl(ptr + 16, 0); /* fpcs */
4283 stl(ptr + 20, 0); /* fpoo */
4284 stl(ptr + 24, 0); /* fpos */
4285 } else {
4286 /* 16 bit */
4287 stw(ptr, env->fpuc);
4288 stw(ptr + 2, fpus);
4289 stw(ptr + 4, fptag);
4290 stw(ptr + 6, 0);
4291 stw(ptr + 8, 0);
4292 stw(ptr + 10, 0);
4293 stw(ptr + 12, 0);
4294 }
4295}
4296
4297void helper_fldenv(target_ulong ptr, int data32)
4298{
4299 int i, fpus, fptag;
4300
4301 if (data32) {
4302 env->fpuc = lduw(ptr);
4303 fpus = lduw(ptr + 4);
4304 fptag = lduw(ptr + 8);
4305 }
4306 else {
4307 env->fpuc = lduw(ptr);
4308 fpus = lduw(ptr + 2);
4309 fptag = lduw(ptr + 4);
4310 }
4311 env->fpstt = (fpus >> 11) & 7;
4312 env->fpus = fpus & ~0x3800;
4313 for(i = 0;i < 8; i++) {
4314 env->fptags[i] = ((fptag & 3) == 3);
4315 fptag >>= 2;
4316 }
4317}
4318
4319void helper_fsave(target_ulong ptr, int data32)
4320{
4321 CPU86_LDouble tmp;
4322 int i;
4323
4324 helper_fstenv(ptr, data32);
4325
4326 ptr += (14 << data32);
4327 for(i = 0;i < 8; i++) {
4328 tmp = ST(i);
4329 helper_fstt(tmp, ptr);
4330 ptr += 10;
4331 }
4332
4333 /* fninit */
4334 env->fpus = 0;
4335 env->fpstt = 0;
4336 env->fpuc = 0x37f;
4337 env->fptags[0] = 1;
4338 env->fptags[1] = 1;
4339 env->fptags[2] = 1;
4340 env->fptags[3] = 1;
4341 env->fptags[4] = 1;
4342 env->fptags[5] = 1;
4343 env->fptags[6] = 1;
4344 env->fptags[7] = 1;
4345}
4346
4347void helper_frstor(target_ulong ptr, int data32)
4348{
4349 CPU86_LDouble tmp;
4350 int i;
4351
4352 helper_fldenv(ptr, data32);
4353 ptr += (14 << data32);
4354
4355 for(i = 0;i < 8; i++) {
4356 tmp = helper_fldt(ptr);
4357 ST(i) = tmp;
4358 ptr += 10;
4359 }
4360}
4361
4362void helper_fxsave(target_ulong ptr, int data64)
4363{
4364 int fpus, fptag, i, nb_xmm_regs;
4365 CPU86_LDouble tmp;
4366 target_ulong addr;
4367
4368 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4369 fptag = 0;
4370 for(i = 0; i < 8; i++) {
4371 fptag |= (env->fptags[i] << i);
4372 }
4373 stw(ptr, env->fpuc);
4374 stw(ptr + 2, fpus);
4375 stw(ptr + 4, fptag ^ 0xff);
4376#ifdef TARGET_X86_64
4377 if (data64) {
4378 stq(ptr + 0x08, 0); /* rip */
4379 stq(ptr + 0x10, 0); /* rdp */
4380 } else
4381#endif
4382 {
4383 stl(ptr + 0x08, 0); /* eip */
4384 stl(ptr + 0x0c, 0); /* sel */
4385 stl(ptr + 0x10, 0); /* dp */
4386 stl(ptr + 0x14, 0); /* sel */
4387 }
4388
4389 addr = ptr + 0x20;
4390 for(i = 0;i < 8; i++) {
4391 tmp = ST(i);
4392 helper_fstt(tmp, addr);
4393 addr += 16;
4394 }
4395
4396 if (env->cr[4] & CR4_OSFXSR_MASK) {
4397 /* XXX: finish it */
4398 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4399 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4400 if (env->hflags & HF_CS64_MASK)
4401 nb_xmm_regs = 16;
4402 else
4403 nb_xmm_regs = 8;
4404 addr = ptr + 0xa0;
4405 for(i = 0; i < nb_xmm_regs; i++) {
4406 stq(addr, env->xmm_regs[i].XMM_Q(0));
4407 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4408 addr += 16;
4409 }
4410 }
4411}
4412
4413void helper_fxrstor(target_ulong ptr, int data64)
4414{
4415 int i, fpus, fptag, nb_xmm_regs;
4416 CPU86_LDouble tmp;
4417 target_ulong addr;
4418
4419 env->fpuc = lduw(ptr);
4420 fpus = lduw(ptr + 2);
4421 fptag = lduw(ptr + 4);
4422 env->fpstt = (fpus >> 11) & 7;
4423 env->fpus = fpus & ~0x3800;
4424 fptag ^= 0xff;
4425 for(i = 0;i < 8; i++) {
4426 env->fptags[i] = ((fptag >> i) & 1);
4427 }
4428
4429 addr = ptr + 0x20;
4430 for(i = 0;i < 8; i++) {
4431 tmp = helper_fldt(addr);
4432 ST(i) = tmp;
4433 addr += 16;
4434 }
4435
4436 if (env->cr[4] & CR4_OSFXSR_MASK) {
4437 /* XXX: finish it */
4438 env->mxcsr = ldl(ptr + 0x18);
4439 //ldl(ptr + 0x1c);
4440 if (env->hflags & HF_CS64_MASK)
4441 nb_xmm_regs = 16;
4442 else
4443 nb_xmm_regs = 8;
4444 addr = ptr + 0xa0;
4445 for(i = 0; i < nb_xmm_regs; i++) {
4446 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4447 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4448 addr += 16;
4449 }
4450 }
4451}
4452
4453#ifndef USE_X86LDOUBLE
4454
4455void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4456{
4457 CPU86_LDoubleU temp;
4458 int e;
4459
4460 temp.d = f;
4461 /* mantissa */
4462 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4463 /* exponent + sign */
4464 e = EXPD(temp) - EXPBIAS + 16383;
4465 e |= SIGND(temp) >> 16;
4466 *pexp = e;
4467}
4468
4469CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4470{
4471 CPU86_LDoubleU temp;
4472 int e;
4473 uint64_t ll;
4474
4475 /* XXX: handle overflow ? */
4476 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4477 e |= (upper >> 4) & 0x800; /* sign */
4478 ll = (mant >> 11) & ((1LL << 52) - 1);
4479#ifdef __arm__
4480 temp.l.upper = (e << 20) | (ll >> 32);
4481 temp.l.lower = ll;
4482#else
4483 temp.ll = ll | ((uint64_t)e << 52);
4484#endif
4485 return temp.d;
4486}
4487
4488#else
4489
4490void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4491{
4492 CPU86_LDoubleU temp;
4493
4494 temp.d = f;
4495 *pmant = temp.l.lower;
4496 *pexp = temp.l.upper;
4497}
4498
4499CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4500{
4501 CPU86_LDoubleU temp;
4502
4503 temp.l.upper = upper;
4504 temp.l.lower = mant;
4505 return temp.d;
4506}
4507#endif
4508
4509#ifdef TARGET_X86_64
4510
4511//#define DEBUG_MULDIV
4512
4513static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4514{
4515 *plow += a;
4516 /* carry test */
4517 if (*plow < a)
4518 (*phigh)++;
4519 *phigh += b;
4520}
4521
4522static void neg128(uint64_t *plow, uint64_t *phigh)
4523{
4524 *plow = ~ *plow;
4525 *phigh = ~ *phigh;
4526 add128(plow, phigh, 1, 0);
4527}
4528
4529/* return TRUE if overflow */
4530static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4531{
4532 uint64_t q, r, a1, a0;
4533 int i, qb, ab;
4534
4535 a0 = *plow;
4536 a1 = *phigh;
4537 if (a1 == 0) {
4538 q = a0 / b;
4539 r = a0 % b;
4540 *plow = q;
4541 *phigh = r;
4542 } else {
4543 if (a1 >= b)
4544 return 1;
4545 /* XXX: use a better algorithm */
4546 for(i = 0; i < 64; i++) {
4547 ab = a1 >> 63;
4548 a1 = (a1 << 1) | (a0 >> 63);
4549 if (ab || a1 >= b) {
4550 a1 -= b;
4551 qb = 1;
4552 } else {
4553 qb = 0;
4554 }
4555 a0 = (a0 << 1) | qb;
4556 }
4557#if defined(DEBUG_MULDIV)
4558 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4559 *phigh, *plow, b, a0, a1);
4560#endif
4561 *plow = a0;
4562 *phigh = a1;
4563 }
4564 return 0;
4565}
4566
4567/* return TRUE if overflow */
4568static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4569{
4570 int sa, sb;
4571 sa = ((int64_t)*phigh < 0);
4572 if (sa)
4573 neg128(plow, phigh);
4574 sb = (b < 0);
4575 if (sb)
4576 b = -b;
4577 if (div64(plow, phigh, b) != 0)
4578 return 1;
4579 if (sa ^ sb) {
4580 if (*plow > (1ULL << 63))
4581 return 1;
4582 *plow = - *plow;
4583 } else {
4584 if (*plow >= (1ULL << 63))
4585 return 1;
4586 }
4587 if (sa)
4588 *phigh = - *phigh;
4589 return 0;
4590}
4591
4592void helper_mulq_EAX_T0(target_ulong t0)
4593{
4594 uint64_t r0, r1;
4595
4596 mulu64(&r0, &r1, EAX, t0);
4597 EAX = r0;
4598 EDX = r1;
4599 CC_DST = r0;
4600 CC_SRC = r1;
4601}
4602
4603void helper_imulq_EAX_T0(target_ulong t0)
4604{
4605 uint64_t r0, r1;
4606
4607 muls64(&r0, &r1, EAX, t0);
4608 EAX = r0;
4609 EDX = r1;
4610 CC_DST = r0;
4611 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4612}
4613
4614target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4615{
4616 uint64_t r0, r1;
4617
4618 muls64(&r0, &r1, t0, t1);
4619 CC_DST = r0;
4620 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4621 return r0;
4622}
4623
4624void helper_divq_EAX(target_ulong t0)
4625{
4626 uint64_t r0, r1;
4627 if (t0 == 0) {
4628 raise_exception(EXCP00_DIVZ);
4629 }
4630 r0 = EAX;
4631 r1 = EDX;
4632 if (div64(&r0, &r1, t0))
4633 raise_exception(EXCP00_DIVZ);
4634 EAX = r0;
4635 EDX = r1;
4636}
4637
4638void helper_idivq_EAX(target_ulong t0)
4639{
4640 uint64_t r0, r1;
4641 if (t0 == 0) {
4642 raise_exception(EXCP00_DIVZ);
4643 }
4644 r0 = EAX;
4645 r1 = EDX;
4646 if (idiv64(&r0, &r1, t0))
4647 raise_exception(EXCP00_DIVZ);
4648 EAX = r0;
4649 EDX = r1;
4650}
4651#endif
4652
94451178 4653static void do_hlt(void)
eaa728ee
FB
4654{
4655 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4656 env->halted = 1;
eaa728ee
FB
4657 env->exception_index = EXCP_HLT;
4658 cpu_loop_exit();
4659}
4660
94451178
FB
4661void helper_hlt(int next_eip_addend)
4662{
4663 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4664 EIP += next_eip_addend;
4665
4666 do_hlt();
4667}
4668
eaa728ee
FB
4669void helper_monitor(target_ulong ptr)
4670{
4671 if ((uint32_t)ECX != 0)
4672 raise_exception(EXCP0D_GPF);
4673 /* XXX: store address ? */
872929aa 4674 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4675}
4676
94451178 4677void helper_mwait(int next_eip_addend)
eaa728ee
FB
4678{
4679 if ((uint32_t)ECX != 0)
4680 raise_exception(EXCP0D_GPF);
872929aa 4681 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4682 EIP += next_eip_addend;
4683
eaa728ee
FB
4684 /* XXX: not complete but not completely erroneous */
4685 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4686 /* more than one CPU: do not sleep because another CPU may
4687 wake this one */
4688 } else {
94451178 4689 do_hlt();
eaa728ee
FB
4690 }
4691}
4692
4693void helper_debug(void)
4694{
4695 env->exception_index = EXCP_DEBUG;
4696 cpu_loop_exit();
4697}
4698
4699void helper_raise_interrupt(int intno, int next_eip_addend)
4700{
4701 raise_interrupt(intno, 1, 0, next_eip_addend);
4702}
4703
4704void helper_raise_exception(int exception_index)
4705{
4706 raise_exception(exception_index);
4707}
4708
4709void helper_cli(void)
4710{
4711 env->eflags &= ~IF_MASK;
4712}
4713
4714void helper_sti(void)
4715{
4716 env->eflags |= IF_MASK;
4717}
4718
4719#if 0
4720/* vm86plus instructions */
4721void helper_cli_vm(void)
4722{
4723 env->eflags &= ~VIF_MASK;
4724}
4725
4726void helper_sti_vm(void)
4727{
4728 env->eflags |= VIF_MASK;
4729 if (env->eflags & VIP_MASK) {
4730 raise_exception(EXCP0D_GPF);
4731 }
4732}
4733#endif
4734
4735void helper_set_inhibit_irq(void)
4736{
4737 env->hflags |= HF_INHIBIT_IRQ_MASK;
4738}
4739
4740void helper_reset_inhibit_irq(void)
4741{
4742 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4743}
4744
4745void helper_boundw(target_ulong a0, int v)
4746{
4747 int low, high;
4748 low = ldsw(a0);
4749 high = ldsw(a0 + 2);
4750 v = (int16_t)v;
4751 if (v < low || v > high) {
4752 raise_exception(EXCP05_BOUND);
4753 }
4754 FORCE_RET();
4755}
4756
4757void helper_boundl(target_ulong a0, int v)
4758{
4759 int low, high;
4760 low = ldl(a0);
4761 high = ldl(a0 + 4);
4762 if (v < low || v > high) {
4763 raise_exception(EXCP05_BOUND);
4764 }
4765 FORCE_RET();
4766}
4767
4768static float approx_rsqrt(float a)
4769{
4770 return 1.0 / sqrt(a);
4771}
4772
4773static float approx_rcp(float a)
4774{
4775 return 1.0 / a;
4776}
4777
4778#if !defined(CONFIG_USER_ONLY)
4779
4780#define MMUSUFFIX _mmu
4781
4782#define SHIFT 0
4783#include "softmmu_template.h"
4784
4785#define SHIFT 1
4786#include "softmmu_template.h"
4787
4788#define SHIFT 2
4789#include "softmmu_template.h"
4790
4791#define SHIFT 3
4792#include "softmmu_template.h"
4793
4794#endif
4795
4796/* try to fill the TLB and return an exception if error. If retaddr is
4797 NULL, it means that the function was called in C code (i.e. not
4798 from generated code or from helper.c) */
4799/* XXX: fix it to restore all registers */
4800void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4801{
4802 TranslationBlock *tb;
4803 int ret;
4804 unsigned long pc;
4805 CPUX86State *saved_env;
4806
4807 /* XXX: hack to restore env in all cases, even if not called from
4808 generated code */
4809 saved_env = env;
4810 env = cpu_single_env;
4811
4812 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4813 if (ret) {
4814 if (retaddr) {
4815 /* now we have a real cpu fault */
4816 pc = (unsigned long)retaddr;
4817 tb = tb_find_pc(pc);
4818 if (tb) {
4819 /* the PC is inside the translated code. It means that we have
4820 a virtual CPU fault */
4821 cpu_restore_state(tb, env, pc, NULL);
4822 }
4823 }
872929aa 4824 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4825 }
4826 env = saved_env;
4827}
4828
4829
4830/* Secure Virtual Machine helpers */
4831
eaa728ee
FB
4832#if defined(CONFIG_USER_ONLY)
4833
db620f46 4834void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4835{
4836}
4837void helper_vmmcall(void)
4838{
4839}
914178d3 4840void helper_vmload(int aflag)
eaa728ee
FB
4841{
4842}
914178d3 4843void helper_vmsave(int aflag)
eaa728ee
FB
4844{
4845}
872929aa
FB
4846void helper_stgi(void)
4847{
4848}
4849void helper_clgi(void)
4850{
4851}
eaa728ee
FB
4852void helper_skinit(void)
4853{
4854}
914178d3 4855void helper_invlpga(int aflag)
eaa728ee
FB
4856{
4857}
4858void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4859{
4860}
4861void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4862{
4863}
4864
4865void helper_svm_check_io(uint32_t port, uint32_t param,
4866 uint32_t next_eip_addend)
4867{
4868}
4869#else
4870
872929aa
FB
4871static inline void svm_save_seg(target_phys_addr_t addr,
4872 const SegmentCache *sc)
eaa728ee 4873{
872929aa
FB
4874 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4875 sc->selector);
4876 stq_phys(addr + offsetof(struct vmcb_seg, base),
4877 sc->base);
4878 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4879 sc->limit);
4880 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4881 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4882}
4883
4884static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4885{
4886 unsigned int flags;
4887
4888 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4889 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4890 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4891 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4892 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4893}
4894
872929aa
FB
4895static inline void svm_load_seg_cache(target_phys_addr_t addr,
4896 CPUState *env, int seg_reg)
eaa728ee 4897{
872929aa
FB
4898 SegmentCache sc1, *sc = &sc1;
4899 svm_load_seg(addr, sc);
4900 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4901 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4902}
4903
db620f46 4904void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4905{
4906 target_ulong addr;
4907 uint32_t event_inj;
4908 uint32_t int_ctl;
4909
872929aa
FB
4910 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4911
914178d3
FB
4912 if (aflag == 2)
4913 addr = EAX;
4914 else
4915 addr = (uint32_t)EAX;
4916
eaa728ee
FB
4917 if (loglevel & CPU_LOG_TB_IN_ASM)
4918 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4919
4920 env->vm_vmcb = addr;
4921
4922 /* save the current CPU state in the hsave page */
4923 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4924 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4925
4926 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4927 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4928
4929 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4930 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4931 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4932 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4933 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4934 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4935
4936 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4937 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4938
872929aa
FB
4939 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4940 &env->segs[R_ES]);
4941 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4942 &env->segs[R_CS]);
4943 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4944 &env->segs[R_SS]);
4945 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4946 &env->segs[R_DS]);
eaa728ee 4947
db620f46
FB
4948 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4949 EIP + next_eip_addend);
eaa728ee
FB
4950 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4951 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4952
4953 /* load the interception bitmaps so we do not need to access the
4954 vmcb in svm mode */
872929aa 4955 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4956 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4957 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4958 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4959 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4960 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4961
872929aa
FB
4962 /* enable intercepts */
4963 env->hflags |= HF_SVMI_MASK;
4964
33c263df
FB
4965 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4966
eaa728ee
FB
4967 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4968 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4969
4970 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4971 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4972
4973 /* clear exit_info_2 so we behave like the real hardware */
4974 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4975
4976 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4977 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4978 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4979 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4980 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4981 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4982 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4983 env->v_tpr = int_ctl & V_TPR_MASK;
4984 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4985 if (env->eflags & IF_MASK)
db620f46 4986 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4987 }
4988
5efc27bb
FB
4989 cpu_load_efer(env,
4990 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4991 env->eflags = 0;
4992 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4993 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4994 CC_OP = CC_OP_EFLAGS;
eaa728ee 4995
872929aa
FB
4996 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4997 env, R_ES);
4998 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4999 env, R_CS);
5000 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5001 env, R_SS);
5002 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5003 env, R_DS);
eaa728ee
FB
5004
5005 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5006 env->eip = EIP;
5007 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5008 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5009 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5010 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5011 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5012
5013 /* FIXME: guest state consistency checks */
5014
5015 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5016 case TLB_CONTROL_DO_NOTHING:
5017 break;
5018 case TLB_CONTROL_FLUSH_ALL_ASID:
5019 /* FIXME: this is not 100% correct but should work for now */
5020 tlb_flush(env, 1);
5021 break;
5022 }
5023
960540b4 5024 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5025
db620f46
FB
5026 if (int_ctl & V_IRQ_MASK) {
5027 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5028 }
5029
eaa728ee
FB
5030 /* maybe we need to inject an event */
5031 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5032 if (event_inj & SVM_EVTINJ_VALID) {
5033 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5034 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5035 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5036 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
5037
5038 if (loglevel & CPU_LOG_TB_IN_ASM)
5039 fprintf(logfile, "Injecting(%#hx): ", valid_err);
5040 /* FIXME: need to implement valid_err */
5041 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5042 case SVM_EVTINJ_TYPE_INTR:
5043 env->exception_index = vector;
5044 env->error_code = event_inj_err;
5045 env->exception_is_int = 0;
5046 env->exception_next_eip = -1;
5047 if (loglevel & CPU_LOG_TB_IN_ASM)
5048 fprintf(logfile, "INTR");
db620f46
FB
5049 /* XXX: is it always correct ? */
5050 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5051 break;
5052 case SVM_EVTINJ_TYPE_NMI:
db620f46 5053 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5054 env->error_code = event_inj_err;
5055 env->exception_is_int = 0;
5056 env->exception_next_eip = EIP;
5057 if (loglevel & CPU_LOG_TB_IN_ASM)
5058 fprintf(logfile, "NMI");
db620f46 5059 cpu_loop_exit();
eaa728ee
FB
5060 break;
5061 case SVM_EVTINJ_TYPE_EXEPT:
5062 env->exception_index = vector;
5063 env->error_code = event_inj_err;
5064 env->exception_is_int = 0;
5065 env->exception_next_eip = -1;
5066 if (loglevel & CPU_LOG_TB_IN_ASM)
5067 fprintf(logfile, "EXEPT");
db620f46 5068 cpu_loop_exit();
eaa728ee
FB
5069 break;
5070 case SVM_EVTINJ_TYPE_SOFT:
5071 env->exception_index = vector;
5072 env->error_code = event_inj_err;
5073 env->exception_is_int = 1;
5074 env->exception_next_eip = EIP;
5075 if (loglevel & CPU_LOG_TB_IN_ASM)
5076 fprintf(logfile, "SOFT");
db620f46 5077 cpu_loop_exit();
eaa728ee
FB
5078 break;
5079 }
5080 if (loglevel & CPU_LOG_TB_IN_ASM)
5081 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
5082 }
eaa728ee
FB
5083}
5084
5085void helper_vmmcall(void)
5086{
872929aa
FB
5087 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5088 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5089}
5090
914178d3 5091void helper_vmload(int aflag)
eaa728ee
FB
5092{
5093 target_ulong addr;
872929aa
FB
5094 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5095
914178d3
FB
5096 if (aflag == 2)
5097 addr = EAX;
5098 else
5099 addr = (uint32_t)EAX;
5100
eaa728ee
FB
5101 if (loglevel & CPU_LOG_TB_IN_ASM)
5102 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5103 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5104 env->segs[R_FS].base);
5105
872929aa
FB
5106 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5107 env, R_FS);
5108 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5109 env, R_GS);
5110 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5111 &env->tr);
5112 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5113 &env->ldt);
eaa728ee
FB
5114
5115#ifdef TARGET_X86_64
5116 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5117 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5118 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5119 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5120#endif
5121 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5122 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5123 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5124 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5125}
5126
914178d3 5127void helper_vmsave(int aflag)
eaa728ee
FB
5128{
5129 target_ulong addr;
872929aa 5130 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5131
5132 if (aflag == 2)
5133 addr = EAX;
5134 else
5135 addr = (uint32_t)EAX;
5136
eaa728ee
FB
5137 if (loglevel & CPU_LOG_TB_IN_ASM)
5138 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5139 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5140 env->segs[R_FS].base);
5141
872929aa
FB
5142 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5143 &env->segs[R_FS]);
5144 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5145 &env->segs[R_GS]);
5146 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5147 &env->tr);
5148 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5149 &env->ldt);
eaa728ee
FB
5150
5151#ifdef TARGET_X86_64
5152 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5153 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5154 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5155 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5156#endif
5157 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5158 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5159 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5160 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5161}
5162
872929aa
FB
5163void helper_stgi(void)
5164{
5165 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5166 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5167}
5168
5169void helper_clgi(void)
5170{
5171 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5172 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5173}
5174
eaa728ee
FB
5175void helper_skinit(void)
5176{
872929aa
FB
5177 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5178 /* XXX: not implemented */
872929aa 5179 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5180}
5181
914178d3 5182void helper_invlpga(int aflag)
eaa728ee 5183{
914178d3 5184 target_ulong addr;
872929aa 5185 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5186
5187 if (aflag == 2)
5188 addr = EAX;
5189 else
5190 addr = (uint32_t)EAX;
5191
5192 /* XXX: could use the ASID to see if it is needed to do the
5193 flush */
5194 tlb_flush_page(env, addr);
eaa728ee
FB
5195}
5196
5197void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5198{
872929aa
FB
5199 if (likely(!(env->hflags & HF_SVMI_MASK)))
5200 return;
eaa728ee
FB
5201 switch(type) {
5202 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5203 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5204 helper_vmexit(type, param);
5205 }
5206 break;
872929aa
FB
5207 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5208 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5209 helper_vmexit(type, param);
5210 }
5211 break;
872929aa
FB
5212 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5213 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5214 helper_vmexit(type, param);
5215 }
5216 break;
872929aa
FB
5217 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5218 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5219 helper_vmexit(type, param);
5220 }
5221 break;
872929aa
FB
5222 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5223 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5224 helper_vmexit(type, param);
5225 }
5226 break;
eaa728ee 5227 case SVM_EXIT_MSR:
872929aa 5228 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5229 /* FIXME: this should be read in at vmrun (faster this way?) */
5230 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5231 uint32_t t0, t1;
5232 switch((uint32_t)ECX) {
5233 case 0 ... 0x1fff:
5234 t0 = (ECX * 2) % 8;
5235 t1 = ECX / 8;
5236 break;
5237 case 0xc0000000 ... 0xc0001fff:
5238 t0 = (8192 + ECX - 0xc0000000) * 2;
5239 t1 = (t0 / 8);
5240 t0 %= 8;
5241 break;
5242 case 0xc0010000 ... 0xc0011fff:
5243 t0 = (16384 + ECX - 0xc0010000) * 2;
5244 t1 = (t0 / 8);
5245 t0 %= 8;
5246 break;
5247 default:
5248 helper_vmexit(type, param);
5249 t0 = 0;
5250 t1 = 0;
5251 break;
5252 }
5253 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5254 helper_vmexit(type, param);
5255 }
5256 break;
5257 default:
872929aa 5258 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5259 helper_vmexit(type, param);
5260 }
5261 break;
5262 }
5263}
5264
5265void helper_svm_check_io(uint32_t port, uint32_t param,
5266 uint32_t next_eip_addend)
5267{
872929aa 5268 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5269 /* FIXME: this should be read in at vmrun (faster this way?) */
5270 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5271 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5272 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5273 /* next EIP */
5274 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5275 env->eip + next_eip_addend);
5276 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5277 }
5278 }
5279}
5280
5281/* Note: currently only 32 bits of exit_code are used */
5282void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5283{
5284 uint32_t int_ctl;
5285
5286 if (loglevel & CPU_LOG_TB_IN_ASM)
5287 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5288 exit_code, exit_info_1,
5289 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5290 EIP);
5291
5292 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5294 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5295 } else {
5296 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5297 }
5298
5299 /* Save the VM state in the vmcb */
872929aa
FB
5300 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5301 &env->segs[R_ES]);
5302 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5303 &env->segs[R_CS]);
5304 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5305 &env->segs[R_SS]);
5306 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5307 &env->segs[R_DS]);
eaa728ee
FB
5308
5309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5310 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5311
5312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5313 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5314
5315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5317 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5318 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5319 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5320
db620f46
FB
5321 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5322 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5323 int_ctl |= env->v_tpr & V_TPR_MASK;
5324 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5325 int_ctl |= V_IRQ_MASK;
5326 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5327
5328 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5329 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5330 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5331 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5332 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5333 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5334 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5335
5336 /* Reload the host state from vm_hsave */
db620f46 5337 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5338 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5339 env->intercept = 0;
5340 env->intercept_exceptions = 0;
5341 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5342 env->tsc_offset = 0;
eaa728ee
FB
5343
5344 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5345 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5346
5347 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5348 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5349
5350 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5351 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5352 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5353 /* we need to set the efer after the crs so the hidden flags get
5354 set properly */
5efc27bb
FB
5355 cpu_load_efer(env,
5356 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5357 env->eflags = 0;
5358 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5359 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5360 CC_OP = CC_OP_EFLAGS;
5361
872929aa
FB
5362 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5363 env, R_ES);
5364 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5365 env, R_CS);
5366 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5367 env, R_SS);
5368 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5369 env, R_DS);
eaa728ee
FB
5370
5371 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5372 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5373 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5374
5375 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5376 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5377
5378 /* other setups */
5379 cpu_x86_set_cpl(env, 0);
5380 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5381 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5382
960540b4 5383 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5384 /* FIXME: Resets the current ASID register to zero (host ASID). */
5385
5386 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5387
5388 /* Clears the TSC_OFFSET inside the processor. */
5389
5390 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5391 from the page table indicated the host's CR3. If the PDPEs contain
5392 illegal state, the processor causes a shutdown. */
5393
5394 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5395 env->cr[0] |= CR0_PE_MASK;
5396 env->eflags &= ~VM_MASK;
5397
5398 /* Disables all breakpoints in the host DR7 register. */
5399
5400 /* Checks the reloaded host state for consistency. */
5401
5402 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5403 host's code segment or non-canonical (in the case of long mode), a
5404 #GP fault is delivered inside the host.) */
5405
5406 /* remove any pending exception */
5407 env->exception_index = -1;
5408 env->error_code = 0;
5409 env->old_exception = -1;
5410
5411 cpu_loop_exit();
5412}
5413
5414#endif
5415
5416/* MMX/SSE */
5417/* XXX: optimize by storing fptt and fptags in the static cpu state */
5418void helper_enter_mmx(void)
5419{
5420 env->fpstt = 0;
5421 *(uint32_t *)(env->fptags) = 0;
5422 *(uint32_t *)(env->fptags + 4) = 0;
5423}
5424
5425void helper_emms(void)
5426{
5427 /* set to empty state */
5428 *(uint32_t *)(env->fptags) = 0x01010101;
5429 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5430}
5431
5432/* XXX: suppress */
5433void helper_movq(uint64_t *d, uint64_t *s)
5434{
5435 *d = *s;
5436}
5437
5438#define SHIFT 0
5439#include "ops_sse.h"
5440
5441#define SHIFT 1
5442#include "ops_sse.h"
5443
5444#define SHIFT 0
5445#include "helper_template.h"
5446#undef SHIFT
5447
5448#define SHIFT 1
5449#include "helper_template.h"
5450#undef SHIFT
5451
5452#define SHIFT 2
5453#include "helper_template.h"
5454#undef SHIFT
5455
5456#ifdef TARGET_X86_64
5457
5458#define SHIFT 3
5459#include "helper_template.h"
5460#undef SHIFT
5461
5462#endif
5463
5464/* bit operations */
5465target_ulong helper_bsf(target_ulong t0)
5466{
5467 int count;
5468 target_ulong res;
5469
5470 res = t0;
5471 count = 0;
5472 while ((res & 1) == 0) {
5473 count++;
5474 res >>= 1;
5475 }
5476 return count;
5477}
5478
5479target_ulong helper_bsr(target_ulong t0)
5480{
5481 int count;
5482 target_ulong res, mask;
5483
5484 res = t0;
5485 count = TARGET_LONG_BITS - 1;
5486 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5487 while ((res & mask) == 0) {
5488 count--;
5489 res <<= 1;
5490 }
5491 return count;
5492}
5493
5494
5495static int compute_all_eflags(void)
5496{
5497 return CC_SRC;
5498}
5499
5500static int compute_c_eflags(void)
5501{
5502 return CC_SRC & CC_C;
5503}
5504
5505CCTable cc_table[CC_OP_NB] = {
5506 [CC_OP_DYNAMIC] = { /* should never happen */ },
5507
5508 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5509
5510 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5511 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5512 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5513
5514 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5515 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5516 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5517
5518 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5519 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5520 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5521
5522 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5523 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5524 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5525
5526 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5527 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5528 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5529
5530 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5531 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5532 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5533
5534 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5535 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5536 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5537
5538 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5539 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5540 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5541
5542 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5543 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5544 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5545
5546 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5547 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5548 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5549
5550#ifdef TARGET_X86_64
5551 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5552
5553 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5554
5555 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5556
5557 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5558
5559 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5560
5561 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5562
5563 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5564
5565 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5566
5567 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5568
5569 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5570#endif
5571};
5572
This page took 0.694629 seconds and 4 git commands to generate.