]> Git Repo - qemu.git/blame - target-i386/seg_helper.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-gtk-20141028-1' into staging
[qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
3e457172 21#include "cpu.h"
1de7afc9 22#include "qemu/log.h"
2ef6175a 23#include "exec/helper-proto.h"
f08b6170 24#include "exec/cpu_ldst.h"
eaa728ee 25
3e457172 26//#define DEBUG_PCALL
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
20054ef0 29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
30# define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 32#else
20054ef0 33# define LOG_PCALL(...) do { } while (0)
8995b7a0 34# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
35#endif
36
8a201bd4
PB
37#ifndef CONFIG_USER_ONLY
38#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39#define MEMSUFFIX _kernel
40#define DATA_SIZE 1
41#include "exec/cpu_ldst_template.h"
42
43#define DATA_SIZE 2
44#include "exec/cpu_ldst_template.h"
45
46#define DATA_SIZE 4
47#include "exec/cpu_ldst_template.h"
48
49#define DATA_SIZE 8
50#include "exec/cpu_ldst_template.h"
51#undef CPU_MMU_INDEX
52#undef MEMSUFFIX
53#endif
54
eaa728ee 55/* return non zero if error */
2999a0b2
BS
56static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
57 uint32_t *e2_ptr, int selector)
eaa728ee
FB
58{
59 SegmentCache *dt;
60 int index;
61 target_ulong ptr;
62
20054ef0 63 if (selector & 0x4) {
eaa728ee 64 dt = &env->ldt;
20054ef0 65 } else {
eaa728ee 66 dt = &env->gdt;
20054ef0 67 }
eaa728ee 68 index = selector & ~7;
20054ef0 69 if ((index + 7) > dt->limit) {
eaa728ee 70 return -1;
20054ef0 71 }
eaa728ee 72 ptr = dt->base + index;
329e607d
BS
73 *e1_ptr = cpu_ldl_kernel(env, ptr);
74 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
75 return 0;
76}
77
78static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
79{
80 unsigned int limit;
20054ef0 81
eaa728ee 82 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 83 if (e2 & DESC_G_MASK) {
eaa728ee 84 limit = (limit << 12) | 0xfff;
20054ef0 85 }
eaa728ee
FB
86 return limit;
87}
88
89static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
90{
20054ef0 91 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
92}
93
20054ef0
BS
94static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
95 uint32_t e2)
eaa728ee
FB
96{
97 sc->base = get_seg_base(e1, e2);
98 sc->limit = get_seg_limit(e1, e2);
99 sc->flags = e2;
100}
101
102/* init the segment cache in vm86 mode. */
2999a0b2 103static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
104{
105 selector &= 0xffff;
b98dbc90
PB
106
107 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
108 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
109 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
110}
111
2999a0b2 112static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
eaa728ee
FB
113 uint32_t *esp_ptr, int dpl)
114{
a47dddd7 115 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
116 int type, index, shift;
117
118#if 0
119 {
120 int i;
121 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 122 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 123 printf("%02x ", env->tr.base[i]);
20054ef0
BS
124 if ((i & 7) == 7) {
125 printf("\n");
126 }
eaa728ee
FB
127 }
128 printf("\n");
129 }
130#endif
131
20054ef0 132 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 133 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 134 }
eaa728ee 135 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 136 if ((type & 7) != 1) {
a47dddd7 137 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 138 }
eaa728ee
FB
139 shift = type >> 3;
140 index = (dpl * 4 + 2) << shift;
20054ef0 141 if (index + (4 << shift) - 1 > env->tr.limit) {
77b2bc2c 142 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 143 }
eaa728ee 144 if (shift == 0) {
329e607d
BS
145 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
146 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
eaa728ee 147 } else {
329e607d
BS
148 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
149 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
eaa728ee
FB
150 }
151}
152
d3b54918 153static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
eaa728ee
FB
154{
155 uint32_t e1, e2;
d3b54918 156 int rpl, dpl;
eaa728ee
FB
157
158 if ((selector & 0xfffc) != 0) {
2999a0b2 159 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 160 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
161 }
162 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 163 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 164 }
eaa728ee
FB
165 rpl = selector & 3;
166 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 167 if (seg_reg == R_CS) {
20054ef0 168 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 170 }
20054ef0 171 if (dpl != rpl) {
77b2bc2c 172 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 173 }
eaa728ee
FB
174 } else if (seg_reg == R_SS) {
175 /* SS must be writable data */
20054ef0 176 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
178 }
179 if (dpl != cpl || dpl != rpl) {
77b2bc2c 180 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 181 }
eaa728ee
FB
182 } else {
183 /* not readable code */
20054ef0 184 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
77b2bc2c 185 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 186 }
eaa728ee
FB
187 /* if data or non conforming code, checks the rights */
188 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 189 if (dpl < cpl || dpl < rpl) {
77b2bc2c 190 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 191 }
eaa728ee
FB
192 }
193 }
20054ef0 194 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 195 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 196 }
eaa728ee 197 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
198 get_seg_base(e1, e2),
199 get_seg_limit(e1, e2),
200 e2);
eaa728ee 201 } else {
20054ef0 202 if (seg_reg == R_SS || seg_reg == R_CS) {
77b2bc2c 203 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 204 }
eaa728ee
FB
205 }
206}
207
208#define SWITCH_TSS_JMP 0
209#define SWITCH_TSS_IRET 1
210#define SWITCH_TSS_CALL 2
211
212/* XXX: restore CPU state in registers (PowerPC case) */
2999a0b2 213static void switch_tss(CPUX86State *env, int tss_selector,
eaa728ee
FB
214 uint32_t e1, uint32_t e2, int source,
215 uint32_t next_eip)
216{
217 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
218 target_ulong tss_base;
219 uint32_t new_regs[8], new_segs[6];
220 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
221 uint32_t old_eflags, eflags_mask;
222 SegmentCache *dt;
223 int index;
224 target_ulong ptr;
225
226 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
228 source);
eaa728ee
FB
229
230 /* if task gate, we read the TSS segment and we load it */
231 if (type == 5) {
20054ef0 232 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 233 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 234 }
eaa728ee 235 tss_selector = e1 >> 16;
20054ef0 236 if (tss_selector & 4) {
77b2bc2c 237 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 238 }
2999a0b2 239 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
77b2bc2c 240 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0
BS
241 }
242 if (e2 & DESC_S_MASK) {
77b2bc2c 243 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 244 }
eaa728ee 245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 246 if ((type & 7) != 1) {
77b2bc2c 247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 248 }
eaa728ee
FB
249 }
250
20054ef0 251 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 252 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 253 }
eaa728ee 254
20054ef0 255 if (type & 8) {
eaa728ee 256 tss_limit_max = 103;
20054ef0 257 } else {
eaa728ee 258 tss_limit_max = 43;
20054ef0 259 }
eaa728ee
FB
260 tss_limit = get_seg_limit(e1, e2);
261 tss_base = get_seg_base(e1, e2);
262 if ((tss_selector & 4) != 0 ||
20054ef0 263 tss_limit < tss_limit_max) {
77b2bc2c 264 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 265 }
eaa728ee 266 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 267 if (old_type & 8) {
eaa728ee 268 old_tss_limit_max = 103;
20054ef0 269 } else {
eaa728ee 270 old_tss_limit_max = 43;
20054ef0 271 }
eaa728ee
FB
272
273 /* read all the registers from the new TSS */
274 if (type & 8) {
275 /* 32 bit */
329e607d
BS
276 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
277 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
278 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
20054ef0 279 for (i = 0; i < 8; i++) {
329e607d 280 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
20054ef0
BS
281 }
282 for (i = 0; i < 6; i++) {
329e607d 283 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
20054ef0 284 }
329e607d
BS
285 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
286 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
eaa728ee
FB
287 } else {
288 /* 16 bit */
289 new_cr3 = 0;
329e607d
BS
290 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
291 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
20054ef0 292 for (i = 0; i < 8; i++) {
329e607d
BS
293 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
294 0xffff0000;
20054ef0
BS
295 }
296 for (i = 0; i < 4; i++) {
329e607d 297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
20054ef0 298 }
329e607d 299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
eaa728ee
FB
300 new_segs[R_FS] = 0;
301 new_segs[R_GS] = 0;
302 new_trap = 0;
303 }
4581cbcd
BS
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
307 (void)new_trap;
eaa728ee
FB
308
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
313
329e607d
BS
314 v1 = cpu_ldub_kernel(env, env->tr.base);
315 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
316 cpu_stb_kernel(env, env->tr.base, v1);
317 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
eaa728ee
FB
318
319 /* clear busy bit (it is restartable) */
320 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
321 target_ulong ptr;
322 uint32_t e2;
20054ef0 323
eaa728ee 324 ptr = env->gdt.base + (env->tr.selector & ~7);
329e607d 325 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 326 e2 &= ~DESC_TSS_BUSY_MASK;
329e607d 327 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee 328 }
997ff0d9 329 old_eflags = cpu_compute_eflags(env);
20054ef0 330 if (source == SWITCH_TSS_IRET) {
eaa728ee 331 old_eflags &= ~NT_MASK;
20054ef0 332 }
eaa728ee
FB
333
334 /* save the current state in the old TSS */
335 if (type & 8) {
336 /* 32 bit */
329e607d
BS
337 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
338 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
4b34e3ad 339 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
a4165610 340 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
00f5e6f2 341 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
70b51365 342 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
08b3ded6 343 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
c12dddd7 344 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
78c3c6d3 345 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
cf75c597 346 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
20054ef0 347 for (i = 0; i < 6; i++) {
329e607d
BS
348 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
349 env->segs[i].selector);
20054ef0 350 }
eaa728ee
FB
351 } else {
352 /* 16 bit */
329e607d
BS
353 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
354 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
4b34e3ad 355 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
a4165610 356 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
00f5e6f2 357 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
70b51365 358 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
08b3ded6 359 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
c12dddd7 360 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
78c3c6d3 361 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
cf75c597 362 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
20054ef0 363 for (i = 0; i < 4; i++) {
329e607d
BS
364 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
365 env->segs[i].selector);
20054ef0 366 }
eaa728ee
FB
367 }
368
369 /* now if an exception occurs, it will occurs in the next task
370 context */
371
372 if (source == SWITCH_TSS_CALL) {
329e607d 373 cpu_stw_kernel(env, tss_base, env->tr.selector);
eaa728ee
FB
374 new_eflags |= NT_MASK;
375 }
376
377 /* set busy bit */
378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
379 target_ulong ptr;
380 uint32_t e2;
20054ef0 381
eaa728ee 382 ptr = env->gdt.base + (tss_selector & ~7);
329e607d 383 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 384 e2 |= DESC_TSS_BUSY_MASK;
329e607d 385 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
386 }
387
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env->cr[0] |= CR0_TS_MASK;
391 env->hflags |= HF_TS_MASK;
392 env->tr.selector = tss_selector;
393 env->tr.base = tss_base;
394 env->tr.limit = tss_limit;
395 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
396
397 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
398 cpu_x86_update_cr3(env, new_cr3);
399 }
400
401 /* load all registers without an exception, then reload them with
402 possible exception */
403 env->eip = new_eip;
404 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
405 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 406 if (!(type & 8)) {
eaa728ee 407 eflags_mask &= 0xffff;
20054ef0 408 }
997ff0d9 409 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 410 /* XXX: what to do in 16 bit case? */
4b34e3ad 411 env->regs[R_EAX] = new_regs[0];
a4165610 412 env->regs[R_ECX] = new_regs[1];
00f5e6f2 413 env->regs[R_EDX] = new_regs[2];
70b51365 414 env->regs[R_EBX] = new_regs[3];
08b3ded6 415 env->regs[R_ESP] = new_regs[4];
c12dddd7 416 env->regs[R_EBP] = new_regs[5];
78c3c6d3 417 env->regs[R_ESI] = new_regs[6];
cf75c597 418 env->regs[R_EDI] = new_regs[7];
eaa728ee 419 if (new_eflags & VM_MASK) {
20054ef0 420 for (i = 0; i < 6; i++) {
2999a0b2 421 load_seg_vm(env, i, new_segs[i]);
20054ef0 422 }
eaa728ee 423 } else {
eaa728ee 424 /* first just selectors as the rest may trigger exceptions */
20054ef0 425 for (i = 0; i < 6; i++) {
eaa728ee 426 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 427 }
eaa728ee
FB
428 }
429
430 env->ldt.selector = new_ldt & ~4;
431 env->ldt.base = 0;
432 env->ldt.limit = 0;
433 env->ldt.flags = 0;
434
435 /* load the LDT */
20054ef0 436 if (new_ldt & 4) {
77b2bc2c 437 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 438 }
eaa728ee
FB
439
440 if ((new_ldt & 0xfffc) != 0) {
441 dt = &env->gdt;
442 index = new_ldt & ~7;
20054ef0 443 if ((index + 7) > dt->limit) {
77b2bc2c 444 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 445 }
eaa728ee 446 ptr = dt->base + index;
329e607d
BS
447 e1 = cpu_ldl_kernel(env, ptr);
448 e2 = cpu_ldl_kernel(env, ptr + 4);
20054ef0 449 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 450 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0
BS
451 }
452 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 453 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 454 }
eaa728ee
FB
455 load_seg_cache_raw_dt(&env->ldt, e1, e2);
456 }
457
458 /* load the segments */
459 if (!(new_eflags & VM_MASK)) {
d3b54918
PB
460 int cpl = new_segs[R_CS] & 3;
461 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
462 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
463 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
464 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
465 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
466 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
eaa728ee
FB
467 }
468
a78d0eab 469 /* check that env->eip is in the CS segment limits */
eaa728ee 470 if (new_eip > env->segs[R_CS].limit) {
20054ef0 471 /* XXX: different exception if CALL? */
77b2bc2c 472 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 473 }
01df040b
AL
474
475#ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
428065ce
LG
477 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
478 for (i = 0; i < DR7_MAX_BP; i++) {
5902564a
LG
479 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
480 !hw_global_breakpoint_enabled(env->dr[7], i)) {
01df040b 481 hw_breakpoint_remove(env, i);
20054ef0 482 }
01df040b 483 }
428065ce 484 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
01df040b
AL
485 }
486#endif
eaa728ee
FB
487}
488
eaa728ee
FB
489static inline unsigned int get_sp_mask(unsigned int e2)
490{
20054ef0 491 if (e2 & DESC_B_MASK) {
eaa728ee 492 return 0xffffffff;
20054ef0 493 } else {
eaa728ee 494 return 0xffff;
20054ef0 495 }
eaa728ee
FB
496}
497
20054ef0 498static int exception_has_error_code(int intno)
2ed51f5b 499{
20054ef0
BS
500 switch (intno) {
501 case 8:
502 case 10:
503 case 11:
504 case 12:
505 case 13:
506 case 14:
507 case 17:
508 return 1;
509 }
510 return 0;
2ed51f5b
AL
511}
512
eaa728ee 513#ifdef TARGET_X86_64
08b3ded6
LG
514#define SET_ESP(val, sp_mask) \
515 do { \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
518 ((val) & 0xffff); \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
521 } else { \
522 env->regs[R_ESP] = (val); \
523 } \
20054ef0 524 } while (0)
eaa728ee 525#else
08b3ded6
LG
526#define SET_ESP(val, sp_mask) \
527 do { \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
20054ef0 530 } while (0)
eaa728ee
FB
531#endif
532
c0a04f0e
AL
533/* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
536
eaa728ee 537/* XXX: add a is_user flag to have proper security support */
329e607d
BS
538#define PUSHW(ssp, sp, sp_mask, val) \
539 { \
540 sp -= 2; \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
20054ef0 542 }
eaa728ee 543
20054ef0
BS
544#define PUSHL(ssp, sp, sp_mask, val) \
545 { \
546 sp -= 4; \
329e607d 547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
20054ef0 548 }
eaa728ee 549
329e607d
BS
550#define POPW(ssp, sp, sp_mask, val) \
551 { \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
553 sp += 2; \
20054ef0 554 }
eaa728ee 555
329e607d
BS
556#define POPL(ssp, sp, sp_mask, val) \
557 { \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
559 sp += 4; \
20054ef0 560 }
eaa728ee
FB
561
562/* protected mode interrupt */
2999a0b2
BS
563static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
564 int error_code, unsigned int next_eip,
565 int is_hw)
eaa728ee
FB
566{
567 SegmentCache *dt;
568 target_ulong ptr, ssp;
569 int type, dpl, selector, ss_dpl, cpl;
570 int has_error_code, new_stack, shift;
1c918eba 571 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 572 uint32_t old_eip, sp_mask;
87446327 573 int vm86 = env->eflags & VM_MASK;
eaa728ee 574
eaa728ee 575 has_error_code = 0;
20054ef0
BS
576 if (!is_int && !is_hw) {
577 has_error_code = exception_has_error_code(intno);
578 }
579 if (is_int) {
eaa728ee 580 old_eip = next_eip;
20054ef0 581 } else {
eaa728ee 582 old_eip = env->eip;
20054ef0 583 }
eaa728ee
FB
584
585 dt = &env->idt;
20054ef0 586 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 587 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 588 }
eaa728ee 589 ptr = dt->base + intno * 8;
329e607d
BS
590 e1 = cpu_ldl_kernel(env, ptr);
591 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
592 /* check gate type */
593 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 594 switch (type) {
eaa728ee
FB
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
20054ef0 597 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 598 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 599 }
2999a0b2 600 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
601 if (has_error_code) {
602 int type;
603 uint32_t mask;
20054ef0 604
eaa728ee
FB
605 /* push the error code */
606 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
607 shift = type >> 3;
20054ef0 608 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 609 mask = 0xffffffff;
20054ef0 610 } else {
eaa728ee 611 mask = 0xffff;
20054ef0 612 }
08b3ded6 613 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 614 ssp = env->segs[R_SS].base + esp;
20054ef0 615 if (shift) {
329e607d 616 cpu_stl_kernel(env, ssp, error_code);
20054ef0 617 } else {
329e607d 618 cpu_stw_kernel(env, ssp, error_code);
20054ef0 619 }
eaa728ee
FB
620 SET_ESP(esp, mask);
621 }
622 return;
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
627 break;
628 default:
77b2bc2c 629 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
630 break;
631 }
632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
633 cpl = env->hflags & HF_CPL_MASK;
1235fc06 634 /* check privilege if software int */
20054ef0 635 if (is_int && dpl < cpl) {
77b2bc2c 636 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 637 }
eaa728ee 638 /* check valid bit */
20054ef0 639 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 640 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 641 }
eaa728ee
FB
642 selector = e1 >> 16;
643 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 644 if ((selector & 0xfffc) == 0) {
77b2bc2c 645 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 646 }
2999a0b2 647 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 648 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
649 }
650 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 652 }
eaa728ee 653 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 654 if (dpl > cpl) {
77b2bc2c 655 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
656 }
657 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 658 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 659 }
eaa728ee
FB
660 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
661 /* to inner privilege */
2999a0b2 662 get_ss_esp_from_tss(env, &ss, &esp, dpl);
20054ef0 663 if ((ss & 0xfffc) == 0) {
77b2bc2c 664 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
665 }
666 if ((ss & 3) != dpl) {
77b2bc2c 667 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 668 }
2999a0b2 669 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 671 }
eaa728ee 672 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 673 if (ss_dpl != dpl) {
77b2bc2c 674 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 675 }
eaa728ee
FB
676 if (!(ss_e2 & DESC_S_MASK) ||
677 (ss_e2 & DESC_CS_MASK) ||
20054ef0 678 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 679 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
680 }
681 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 682 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 683 }
eaa728ee
FB
684 new_stack = 1;
685 sp_mask = get_sp_mask(ss_e2);
686 ssp = get_seg_base(ss_e1, ss_e2);
687 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
688 /* to same privilege */
87446327 689 if (vm86) {
77b2bc2c 690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 691 }
eaa728ee
FB
692 new_stack = 0;
693 sp_mask = get_sp_mask(env->segs[R_SS].flags);
694 ssp = env->segs[R_SS].base;
08b3ded6 695 esp = env->regs[R_ESP];
eaa728ee
FB
696 dpl = cpl;
697 } else {
77b2bc2c 698 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
699 new_stack = 0; /* avoid warning */
700 sp_mask = 0; /* avoid warning */
701 ssp = 0; /* avoid warning */
702 esp = 0; /* avoid warning */
703 }
704
705 shift = type >> 3;
706
707#if 0
708 /* XXX: check that enough room is available */
709 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 710 if (vm86) {
eaa728ee 711 push_size += 8;
20054ef0 712 }
eaa728ee
FB
713 push_size <<= shift;
714#endif
715 if (shift == 1) {
716 if (new_stack) {
87446327 717 if (vm86) {
eaa728ee
FB
718 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
719 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
720 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
721 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
722 }
723 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 724 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 725 }
997ff0d9 726 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
727 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
728 PUSHL(ssp, esp, sp_mask, old_eip);
729 if (has_error_code) {
730 PUSHL(ssp, esp, sp_mask, error_code);
731 }
732 } else {
733 if (new_stack) {
87446327 734 if (vm86) {
eaa728ee
FB
735 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
736 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
737 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
738 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
739 }
740 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 741 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 742 }
997ff0d9 743 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
744 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
745 PUSHW(ssp, esp, sp_mask, old_eip);
746 if (has_error_code) {
747 PUSHW(ssp, esp, sp_mask, error_code);
748 }
749 }
750
fd460606
KC
751 /* interrupt gate clear IF mask */
752 if ((type & 1) == 0) {
753 env->eflags &= ~IF_MASK;
754 }
755 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
756
eaa728ee 757 if (new_stack) {
87446327 758 if (vm86) {
eaa728ee
FB
759 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
763 }
764 ss = (ss & ~3) | dpl;
765 cpu_x86_load_seg_cache(env, R_SS, ss,
766 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
767 }
768 SET_ESP(esp, sp_mask);
769
770 selector = (selector & ~3) | dpl;
771 cpu_x86_load_seg_cache(env, R_CS, selector,
772 get_seg_base(e1, e2),
773 get_seg_limit(e1, e2),
774 e2);
eaa728ee 775 env->eip = offset;
eaa728ee
FB
776}
777
778#ifdef TARGET_X86_64
779
20054ef0
BS
780#define PUSHQ(sp, val) \
781 { \
782 sp -= 8; \
329e607d 783 cpu_stq_kernel(env, sp, (val)); \
20054ef0 784 }
eaa728ee 785
20054ef0
BS
786#define POPQ(sp, val) \
787 { \
329e607d 788 val = cpu_ldq_kernel(env, sp); \
20054ef0
BS
789 sp += 8; \
790 }
eaa728ee 791
2999a0b2 792static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 793{
a47dddd7 794 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
795 int index;
796
797#if 0
798 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
799 env->tr.base, env->tr.limit);
800#endif
801
20054ef0 802 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 803 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 804 }
eaa728ee 805 index = 8 * level + 4;
20054ef0 806 if ((index + 7) > env->tr.limit) {
77b2bc2c 807 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 808 }
329e607d 809 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
810}
811
812/* 64 bit interrupt */
2999a0b2
BS
813static void do_interrupt64(CPUX86State *env, int intno, int is_int,
814 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
815{
816 SegmentCache *dt;
817 target_ulong ptr;
818 int type, dpl, selector, cpl, ist;
819 int has_error_code, new_stack;
820 uint32_t e1, e2, e3, ss;
821 target_ulong old_eip, esp, offset;
eaa728ee 822
eaa728ee 823 has_error_code = 0;
20054ef0
BS
824 if (!is_int && !is_hw) {
825 has_error_code = exception_has_error_code(intno);
826 }
827 if (is_int) {
eaa728ee 828 old_eip = next_eip;
20054ef0 829 } else {
eaa728ee 830 old_eip = env->eip;
20054ef0 831 }
eaa728ee
FB
832
833 dt = &env->idt;
20054ef0 834 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 835 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 836 }
eaa728ee 837 ptr = dt->base + intno * 16;
329e607d
BS
838 e1 = cpu_ldl_kernel(env, ptr);
839 e2 = cpu_ldl_kernel(env, ptr + 4);
840 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 843 switch (type) {
eaa728ee
FB
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
846 break;
847 default:
77b2bc2c 848 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
849 break;
850 }
851 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
852 cpl = env->hflags & HF_CPL_MASK;
1235fc06 853 /* check privilege if software int */
20054ef0 854 if (is_int && dpl < cpl) {
77b2bc2c 855 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 856 }
eaa728ee 857 /* check valid bit */
20054ef0 858 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 859 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 860 }
eaa728ee
FB
861 selector = e1 >> 16;
862 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
863 ist = e2 & 7;
20054ef0 864 if ((selector & 0xfffc) == 0) {
77b2bc2c 865 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 866 }
eaa728ee 867
2999a0b2 868 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 869 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
870 }
871 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 872 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 873 }
eaa728ee 874 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 875 if (dpl > cpl) {
77b2bc2c 876 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
877 }
878 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 879 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
880 }
881 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 882 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 883 }
eaa728ee
FB
884 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
885 /* to inner privilege */
20054ef0 886 if (ist != 0) {
2999a0b2 887 esp = get_rsp_from_tss(env, ist + 3);
20054ef0 888 } else {
2999a0b2 889 esp = get_rsp_from_tss(env, dpl);
20054ef0 890 }
eaa728ee
FB
891 esp &= ~0xfLL; /* align stack */
892 ss = 0;
893 new_stack = 1;
894 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
895 /* to same privilege */
20054ef0 896 if (env->eflags & VM_MASK) {
77b2bc2c 897 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 898 }
eaa728ee 899 new_stack = 0;
20054ef0 900 if (ist != 0) {
2999a0b2 901 esp = get_rsp_from_tss(env, ist + 3);
20054ef0 902 } else {
08b3ded6 903 esp = env->regs[R_ESP];
20054ef0 904 }
eaa728ee
FB
905 esp &= ~0xfLL; /* align stack */
906 dpl = cpl;
907 } else {
77b2bc2c 908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
909 new_stack = 0; /* avoid warning */
910 esp = 0; /* avoid warning */
911 }
912
913 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 914 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 915 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
916 PUSHQ(esp, env->segs[R_CS].selector);
917 PUSHQ(esp, old_eip);
918 if (has_error_code) {
919 PUSHQ(esp, error_code);
920 }
921
fd460606
KC
922 /* interrupt gate clear IF mask */
923 if ((type & 1) == 0) {
924 env->eflags &= ~IF_MASK;
925 }
926 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
927
eaa728ee
FB
928 if (new_stack) {
929 ss = 0 | dpl;
930 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
931 }
08b3ded6 932 env->regs[R_ESP] = esp;
eaa728ee
FB
933
934 selector = (selector & ~3) | dpl;
935 cpu_x86_load_seg_cache(env, R_CS, selector,
936 get_seg_base(e1, e2),
937 get_seg_limit(e1, e2),
938 e2);
eaa728ee 939 env->eip = offset;
eaa728ee
FB
940}
941#endif
942
d9957a8b 943#ifdef TARGET_X86_64
eaa728ee 944#if defined(CONFIG_USER_ONLY)
2999a0b2 945void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 946{
27103424
AF
947 CPUState *cs = CPU(x86_env_get_cpu(env));
948
949 cs->exception_index = EXCP_SYSCALL;
eaa728ee 950 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 951 cpu_loop_exit(cs);
eaa728ee
FB
952}
953#else
2999a0b2 954void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
955{
956 int selector;
957
958 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 959 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
960 }
961 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
962 if (env->hflags & HF_LMA_MASK) {
963 int code64;
964
a4165610 965 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 966 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
967
968 code64 = env->hflags & HF_CS64_MASK;
969
fd460606
KC
970 env->eflags &= ~env->fmask;
971 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
972 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
973 0, 0xffffffff,
974 DESC_G_MASK | DESC_P_MASK |
975 DESC_S_MASK |
20054ef0
BS
976 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
977 DESC_L_MASK);
eaa728ee
FB
978 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
979 0, 0xffffffff,
980 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981 DESC_S_MASK |
982 DESC_W_MASK | DESC_A_MASK);
20054ef0 983 if (code64) {
eaa728ee 984 env->eip = env->lstar;
20054ef0 985 } else {
eaa728ee 986 env->eip = env->cstar;
20054ef0 987 }
d9957a8b 988 } else {
a4165610 989 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 990
fd460606 991 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
992 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
993 0, 0xffffffff,
994 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
995 DESC_S_MASK |
996 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
997 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
998 0, 0xffffffff,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000 DESC_S_MASK |
1001 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1002 env->eip = (uint32_t)env->star;
1003 }
1004}
1005#endif
d9957a8b 1006#endif
eaa728ee 1007
d9957a8b 1008#ifdef TARGET_X86_64
2999a0b2 1009void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1010{
1011 int cpl, selector;
1012
1013 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1014 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1015 }
1016 cpl = env->hflags & HF_CPL_MASK;
1017 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
77b2bc2c 1018 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
1019 }
1020 selector = (env->star >> 48) & 0xffff;
eaa728ee 1021 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1022 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1023 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1024 NT_MASK);
eaa728ee
FB
1025 if (dflag == 2) {
1026 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1031 DESC_L_MASK);
a4165610 1032 env->eip = env->regs[R_ECX];
eaa728ee
FB
1033 } else {
1034 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1038 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1039 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee
FB
1040 }
1041 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1044 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1045 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1046 } else {
fd460606 1047 env->eflags |= IF_MASK;
eaa728ee
FB
1048 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1049 0, 0xffffffff,
1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1052 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1053 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee
FB
1054 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1059 }
eaa728ee 1060}
d9957a8b 1061#endif
eaa728ee
FB
1062
1063/* real mode interrupt */
2999a0b2
BS
1064static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1065 int error_code, unsigned int next_eip)
eaa728ee
FB
1066{
1067 SegmentCache *dt;
1068 target_ulong ptr, ssp;
1069 int selector;
1070 uint32_t offset, esp;
1071 uint32_t old_cs, old_eip;
eaa728ee 1072
20054ef0 1073 /* real mode (simpler!) */
eaa728ee 1074 dt = &env->idt;
20054ef0 1075 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1076 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1077 }
eaa728ee 1078 ptr = dt->base + intno * 4;
329e607d
BS
1079 offset = cpu_lduw_kernel(env, ptr);
1080 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1081 esp = env->regs[R_ESP];
eaa728ee 1082 ssp = env->segs[R_SS].base;
20054ef0 1083 if (is_int) {
eaa728ee 1084 old_eip = next_eip;
20054ef0 1085 } else {
eaa728ee 1086 old_eip = env->eip;
20054ef0 1087 }
eaa728ee 1088 old_cs = env->segs[R_CS].selector;
20054ef0 1089 /* XXX: use SS segment size? */
997ff0d9 1090 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1091 PUSHW(ssp, esp, 0xffff, old_cs);
1092 PUSHW(ssp, esp, 0xffff, old_eip);
1093
1094 /* update processor state */
08b3ded6 1095 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1096 env->eip = offset;
1097 env->segs[R_CS].selector = selector;
1098 env->segs[R_CS].base = (selector << 4);
1099 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1100}
1101
e694d4e2 1102#if defined(CONFIG_USER_ONLY)
eaa728ee 1103/* fake user mode interrupt */
2999a0b2
BS
1104static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1105 int error_code, target_ulong next_eip)
eaa728ee
FB
1106{
1107 SegmentCache *dt;
1108 target_ulong ptr;
1109 int dpl, cpl, shift;
1110 uint32_t e2;
1111
1112 dt = &env->idt;
1113 if (env->hflags & HF_LMA_MASK) {
1114 shift = 4;
1115 } else {
1116 shift = 3;
1117 }
1118 ptr = dt->base + (intno << shift);
329e607d 1119 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1120
1121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1123 /* check privilege if software int */
20054ef0 1124 if (is_int && dpl < cpl) {
77b2bc2c 1125 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1126 }
eaa728ee
FB
1127
1128 /* Since we emulate only user space, we cannot do more than
1129 exiting the emulation with the suitable exception and error
47575997
JM
1130 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1131 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1132 env->eip = next_eip;
20054ef0 1133 }
eaa728ee
FB
1134}
1135
e694d4e2
BS
1136#else
1137
2999a0b2
BS
1138static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1139 int error_code, int is_hw, int rm)
2ed51f5b 1140{
19d6ca16 1141 CPUState *cs = CPU(x86_env_get_cpu(env));
fdfba1a2 1142 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1143 control.event_inj));
1144
2ed51f5b 1145 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1146 int type;
1147
1148 if (is_int) {
1149 type = SVM_EVTINJ_TYPE_SOFT;
1150 } else {
1151 type = SVM_EVTINJ_TYPE_EXEPT;
1152 }
1153 event_inj = intno | type | SVM_EVTINJ_VALID;
1154 if (!rm && exception_has_error_code(intno)) {
1155 event_inj |= SVM_EVTINJ_VALID_ERR;
ab1da857 1156 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1157 control.event_inj_err),
1158 error_code);
1159 }
ab1da857
EI
1160 stl_phys(cs->as,
1161 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1162 event_inj);
2ed51f5b
AL
1163 }
1164}
00ea18d1 1165#endif
2ed51f5b 1166
eaa728ee
FB
1167/*
1168 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1169 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1170 * instruction. It is only relevant if is_int is TRUE.
1171 */
ca4c810a 1172static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1173 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1174{
ca4c810a
AF
1175 CPUX86State *env = &cpu->env;
1176
8fec2b8c 1177 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1178 if ((env->cr[0] & CR0_PE_MASK)) {
1179 static int count;
20054ef0
BS
1180
1181 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1182 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1183 count, intno, error_code, is_int,
1184 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1185 env->segs[R_CS].selector, env->eip,
1186 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1187 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1188 if (intno == 0x0e) {
93fcfe39 1189 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1190 } else {
4b34e3ad 1191 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1192 }
93fcfe39 1193 qemu_log("\n");
a0762859 1194 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1195#if 0
1196 {
1197 int i;
9bd5494e 1198 target_ulong ptr;
20054ef0 1199
93fcfe39 1200 qemu_log(" code=");
eaa728ee 1201 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1202 for (i = 0; i < 16; i++) {
93fcfe39 1203 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1204 }
93fcfe39 1205 qemu_log("\n");
eaa728ee
FB
1206 }
1207#endif
1208 count++;
1209 }
1210 }
1211 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1212#if !defined(CONFIG_USER_ONLY)
20054ef0 1213 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1214 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1215 }
00ea18d1 1216#endif
eb38c52c 1217#ifdef TARGET_X86_64
eaa728ee 1218 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1219 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1220 } else
1221#endif
1222 {
2999a0b2
BS
1223 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1224 is_hw);
eaa728ee
FB
1225 }
1226 } else {
00ea18d1 1227#if !defined(CONFIG_USER_ONLY)
20054ef0 1228 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1229 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1230 }
00ea18d1 1231#endif
2999a0b2 1232 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1233 }
2ed51f5b 1234
00ea18d1 1235#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1236 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2
EI
1237 CPUState *cs = CPU(cpu);
1238 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
20054ef0
BS
1239 offsetof(struct vmcb,
1240 control.event_inj));
1241
ab1da857
EI
1242 stl_phys(cs->as,
1243 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1244 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1245 }
00ea18d1 1246#endif
eaa728ee
FB
1247}
1248
97a8ea5a 1249void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1250{
97a8ea5a
AF
1251 X86CPU *cpu = X86_CPU(cs);
1252 CPUX86State *env = &cpu->env;
1253
e694d4e2
BS
1254#if defined(CONFIG_USER_ONLY)
1255 /* if user mode only, we simulate a fake exception
1256 which will be handled outside the cpu execution
1257 loop */
27103424 1258 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1259 env->exception_is_int,
1260 env->error_code,
1261 env->exception_next_eip);
1262 /* successfully delivered */
1263 env->old_exception = -1;
1264#else
1265 /* simulate a real cpu exception. On i386, it can
1266 trigger new exceptions, but we do not handle
1267 double or triple faults yet. */
27103424 1268 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1269 env->exception_is_int,
1270 env->error_code,
1271 env->exception_next_eip, 0);
1272 /* successfully delivered */
1273 env->old_exception = -1;
1274#endif
e694d4e2
BS
1275}
1276
2999a0b2 1277void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1278{
ca4c810a 1279 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1280}
1281
42f53fea
RH
1282bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1283{
1284 X86CPU *cpu = X86_CPU(cs);
1285 CPUX86State *env = &cpu->env;
1286 bool ret = false;
1287
1288#if !defined(CONFIG_USER_ONLY)
1289 if (interrupt_request & CPU_INTERRUPT_POLL) {
1290 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1291 apic_poll_irq(cpu->apic_state);
1292 }
1293#endif
1294 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1295 do_cpu_sipi(cpu);
1296 } else if (env->hflags2 & HF2_GIF_MASK) {
1297 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1298 !(env->hflags & HF_SMM_MASK)) {
1299 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1300 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1301 do_smm_enter(cpu);
1302 ret = true;
1303 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1304 !(env->hflags2 & HF2_NMI_MASK)) {
1305 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1306 env->hflags2 |= HF2_NMI_MASK;
1307 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1308 ret = true;
1309 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1310 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1311 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1312 ret = true;
1313 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1314 (((env->hflags2 & HF2_VINTR_MASK) &&
1315 (env->hflags2 & HF2_HIF_MASK)) ||
1316 (!(env->hflags2 & HF2_VINTR_MASK) &&
1317 (env->eflags & IF_MASK &&
1318 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1319 int intno;
1320 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1321 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1322 CPU_INTERRUPT_VIRQ);
1323 intno = cpu_get_pic_interrupt(env);
1324 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1325 "Servicing hardware INT=0x%02x\n", intno);
1326 do_interrupt_x86_hardirq(env, intno, 1);
1327 /* ensure that no TB jump will be modified as
1328 the program flow was changed */
1329 ret = true;
1330#if !defined(CONFIG_USER_ONLY)
1331 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1332 (env->eflags & IF_MASK) &&
1333 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1334 int intno;
1335 /* FIXME: this should respect TPR */
1336 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1337 intno = ldl_phys(cs->as, env->vm_vmcb
1338 + offsetof(struct vmcb, control.int_vector));
1339 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1340 "Servicing virtual hardware INT=0x%02x\n", intno);
1341 do_interrupt_x86_hardirq(env, intno, 1);
1342 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1343 ret = true;
1344#endif
1345 }
1346 }
1347
1348 return ret;
1349}
1350
2999a0b2
BS
1351void helper_enter_level(CPUX86State *env, int level, int data32,
1352 target_ulong t1)
eaa728ee
FB
1353{
1354 target_ulong ssp;
1355 uint32_t esp_mask, esp, ebp;
1356
1357 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1358 ssp = env->segs[R_SS].base;
c12dddd7 1359 ebp = env->regs[R_EBP];
08b3ded6 1360 esp = env->regs[R_ESP];
eaa728ee
FB
1361 if (data32) {
1362 /* 32 bit */
1363 esp -= 4;
1364 while (--level) {
1365 esp -= 4;
1366 ebp -= 4;
329e607d
BS
1367 cpu_stl_data(env, ssp + (esp & esp_mask),
1368 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
eaa728ee
FB
1369 }
1370 esp -= 4;
329e607d 1371 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
eaa728ee
FB
1372 } else {
1373 /* 16 bit */
1374 esp -= 2;
1375 while (--level) {
1376 esp -= 2;
1377 ebp -= 2;
329e607d
BS
1378 cpu_stw_data(env, ssp + (esp & esp_mask),
1379 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
eaa728ee
FB
1380 }
1381 esp -= 2;
329e607d 1382 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
eaa728ee
FB
1383 }
1384}
1385
1386#ifdef TARGET_X86_64
2999a0b2
BS
1387void helper_enter64_level(CPUX86State *env, int level, int data64,
1388 target_ulong t1)
eaa728ee
FB
1389{
1390 target_ulong esp, ebp;
20054ef0 1391
c12dddd7 1392 ebp = env->regs[R_EBP];
08b3ded6 1393 esp = env->regs[R_ESP];
eaa728ee
FB
1394
1395 if (data64) {
1396 /* 64 bit */
1397 esp -= 8;
1398 while (--level) {
1399 esp -= 8;
1400 ebp -= 8;
329e607d 1401 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
eaa728ee
FB
1402 }
1403 esp -= 8;
329e607d 1404 cpu_stq_data(env, esp, t1);
eaa728ee
FB
1405 } else {
1406 /* 16 bit */
1407 esp -= 2;
1408 while (--level) {
1409 esp -= 2;
1410 ebp -= 2;
329e607d 1411 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
eaa728ee
FB
1412 }
1413 esp -= 2;
329e607d 1414 cpu_stw_data(env, esp, t1);
eaa728ee
FB
1415 }
1416}
1417#endif
1418
2999a0b2 1419void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1420{
1421 SegmentCache *dt;
1422 uint32_t e1, e2;
1423 int index, entry_limit;
1424 target_ulong ptr;
1425
1426 selector &= 0xffff;
1427 if ((selector & 0xfffc) == 0) {
1428 /* XXX: NULL selector case: invalid LDT */
1429 env->ldt.base = 0;
1430 env->ldt.limit = 0;
1431 } else {
20054ef0 1432 if (selector & 0x4) {
77b2bc2c 1433 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1434 }
eaa728ee
FB
1435 dt = &env->gdt;
1436 index = selector & ~7;
1437#ifdef TARGET_X86_64
20054ef0 1438 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1439 entry_limit = 15;
20054ef0 1440 } else
eaa728ee 1441#endif
20054ef0 1442 {
eaa728ee 1443 entry_limit = 7;
20054ef0
BS
1444 }
1445 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1446 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1447 }
eaa728ee 1448 ptr = dt->base + index;
329e607d
BS
1449 e1 = cpu_ldl_kernel(env, ptr);
1450 e2 = cpu_ldl_kernel(env, ptr + 4);
20054ef0 1451 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 1452 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1453 }
1454 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1455 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1456 }
eaa728ee
FB
1457#ifdef TARGET_X86_64
1458 if (env->hflags & HF_LMA_MASK) {
1459 uint32_t e3;
20054ef0 1460
329e607d 1461 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
1462 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1463 env->ldt.base |= (target_ulong)e3 << 32;
1464 } else
1465#endif
1466 {
1467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1468 }
1469 }
1470 env->ldt.selector = selector;
1471}
1472
2999a0b2 1473void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1474{
1475 SegmentCache *dt;
1476 uint32_t e1, e2;
1477 int index, type, entry_limit;
1478 target_ulong ptr;
1479
1480 selector &= 0xffff;
1481 if ((selector & 0xfffc) == 0) {
1482 /* NULL selector case: invalid TR */
1483 env->tr.base = 0;
1484 env->tr.limit = 0;
1485 env->tr.flags = 0;
1486 } else {
20054ef0 1487 if (selector & 0x4) {
77b2bc2c 1488 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1489 }
eaa728ee
FB
1490 dt = &env->gdt;
1491 index = selector & ~7;
1492#ifdef TARGET_X86_64
20054ef0 1493 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1494 entry_limit = 15;
20054ef0 1495 } else
eaa728ee 1496#endif
20054ef0 1497 {
eaa728ee 1498 entry_limit = 7;
20054ef0
BS
1499 }
1500 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1501 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1502 }
eaa728ee 1503 ptr = dt->base + index;
329e607d
BS
1504 e1 = cpu_ldl_kernel(env, ptr);
1505 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1506 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1507 if ((e2 & DESC_S_MASK) ||
20054ef0 1508 (type != 1 && type != 9)) {
77b2bc2c 1509 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1510 }
1511 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1512 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1513 }
eaa728ee
FB
1514#ifdef TARGET_X86_64
1515 if (env->hflags & HF_LMA_MASK) {
1516 uint32_t e3, e4;
20054ef0 1517
329e607d
BS
1518 e3 = cpu_ldl_kernel(env, ptr + 8);
1519 e4 = cpu_ldl_kernel(env, ptr + 12);
20054ef0 1520 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
77b2bc2c 1521 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1522 }
eaa728ee
FB
1523 load_seg_cache_raw_dt(&env->tr, e1, e2);
1524 env->tr.base |= (target_ulong)e3 << 32;
1525 } else
1526#endif
1527 {
1528 load_seg_cache_raw_dt(&env->tr, e1, e2);
1529 }
1530 e2 |= DESC_TSS_BUSY_MASK;
329e607d 1531 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
1532 }
1533 env->tr.selector = selector;
1534}
1535
1536/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1537void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1538{
1539 uint32_t e1, e2;
1540 int cpl, dpl, rpl;
1541 SegmentCache *dt;
1542 int index;
1543 target_ulong ptr;
1544
1545 selector &= 0xffff;
1546 cpl = env->hflags & HF_CPL_MASK;
1547 if ((selector & 0xfffc) == 0) {
1548 /* null selector case */
1549 if (seg_reg == R_SS
1550#ifdef TARGET_X86_64
1551 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1552#endif
20054ef0 1553 ) {
77b2bc2c 1554 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1555 }
eaa728ee
FB
1556 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1557 } else {
1558
20054ef0 1559 if (selector & 0x4) {
eaa728ee 1560 dt = &env->ldt;
20054ef0 1561 } else {
eaa728ee 1562 dt = &env->gdt;
20054ef0 1563 }
eaa728ee 1564 index = selector & ~7;
20054ef0 1565 if ((index + 7) > dt->limit) {
77b2bc2c 1566 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1567 }
eaa728ee 1568 ptr = dt->base + index;
329e607d
BS
1569 e1 = cpu_ldl_kernel(env, ptr);
1570 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 1571
20054ef0 1572 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 1573 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1574 }
eaa728ee
FB
1575 rpl = selector & 3;
1576 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1577 if (seg_reg == R_SS) {
1578 /* must be writable segment */
20054ef0 1579 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 1580 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1581 }
1582 if (rpl != cpl || dpl != cpl) {
77b2bc2c 1583 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1584 }
eaa728ee
FB
1585 } else {
1586 /* must be readable segment */
20054ef0 1587 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
77b2bc2c 1588 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1589 }
eaa728ee
FB
1590
1591 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1592 /* if not conforming code, test rights */
20054ef0 1593 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1594 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1595 }
eaa728ee
FB
1596 }
1597 }
1598
1599 if (!(e2 & DESC_P_MASK)) {
20054ef0 1600 if (seg_reg == R_SS) {
77b2bc2c 1601 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
20054ef0 1602 } else {
77b2bc2c 1603 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1604 }
eaa728ee
FB
1605 }
1606
1607 /* set the access bit if not already set */
1608 if (!(e2 & DESC_A_MASK)) {
1609 e2 |= DESC_A_MASK;
329e607d 1610 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
1611 }
1612
1613 cpu_x86_load_seg_cache(env, seg_reg, selector,
1614 get_seg_base(e1, e2),
1615 get_seg_limit(e1, e2),
1616 e2);
1617#if 0
93fcfe39 1618 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1619 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1620#endif
1621 }
1622}
1623
1624/* protected mode jump */
2999a0b2 1625void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
eaa728ee
FB
1626 int next_eip_addend)
1627{
1628 int gate_cs, type;
1629 uint32_t e1, e2, cpl, dpl, rpl, limit;
1630 target_ulong next_eip;
1631
20054ef0 1632 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 1633 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1634 }
2999a0b2 1635 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 1636 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1637 }
eaa728ee
FB
1638 cpl = env->hflags & HF_CPL_MASK;
1639 if (e2 & DESC_S_MASK) {
20054ef0 1640 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 1641 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1642 }
eaa728ee
FB
1643 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1644 if (e2 & DESC_C_MASK) {
1645 /* conforming code segment */
20054ef0 1646 if (dpl > cpl) {
77b2bc2c 1647 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1648 }
eaa728ee
FB
1649 } else {
1650 /* non conforming code segment */
1651 rpl = new_cs & 3;
20054ef0 1652 if (rpl > cpl) {
77b2bc2c 1653 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1654 }
1655 if (dpl != cpl) {
77b2bc2c 1656 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1657 }
eaa728ee 1658 }
20054ef0 1659 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1660 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1661 }
eaa728ee
FB
1662 limit = get_seg_limit(e1, e2);
1663 if (new_eip > limit &&
20054ef0 1664 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
77b2bc2c 1665 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1666 }
eaa728ee
FB
1667 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1668 get_seg_base(e1, e2), limit, e2);
a78d0eab 1669 env->eip = new_eip;
eaa728ee
FB
1670 } else {
1671 /* jump to call or task gate */
1672 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1673 rpl = new_cs & 3;
1674 cpl = env->hflags & HF_CPL_MASK;
1675 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1676 switch (type) {
eaa728ee
FB
1677 case 1: /* 286 TSS */
1678 case 9: /* 386 TSS */
1679 case 5: /* task gate */
20054ef0 1680 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1681 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1682 }
eaa728ee 1683 next_eip = env->eip + next_eip_addend;
2999a0b2 1684 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
eaa728ee
FB
1685 break;
1686 case 4: /* 286 call gate */
1687 case 12: /* 386 call gate */
20054ef0 1688 if ((dpl < cpl) || (dpl < rpl)) {
77b2bc2c 1689 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1690 }
1691 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1692 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1693 }
eaa728ee
FB
1694 gate_cs = e1 >> 16;
1695 new_eip = (e1 & 0xffff);
20054ef0 1696 if (type == 12) {
eaa728ee 1697 new_eip |= (e2 & 0xffff0000);
20054ef0 1698 }
2999a0b2 1699 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
77b2bc2c 1700 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1701 }
eaa728ee
FB
1702 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1703 /* must be code segment */
1704 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1705 (DESC_S_MASK | DESC_CS_MASK))) {
77b2bc2c 1706 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1707 }
eaa728ee 1708 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1709 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
77b2bc2c 1710 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0
BS
1711 }
1712 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1713 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1714 }
eaa728ee 1715 limit = get_seg_limit(e1, e2);
20054ef0 1716 if (new_eip > limit) {
77b2bc2c 1717 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1718 }
eaa728ee
FB
1719 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1720 get_seg_base(e1, e2), limit, e2);
a78d0eab 1721 env->eip = new_eip;
eaa728ee
FB
1722 break;
1723 default:
77b2bc2c 1724 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
1725 break;
1726 }
1727 }
1728}
1729
1730/* real mode call */
2999a0b2 1731void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1732 int shift, int next_eip)
1733{
1734 int new_eip;
1735 uint32_t esp, esp_mask;
1736 target_ulong ssp;
1737
1738 new_eip = new_eip1;
08b3ded6 1739 esp = env->regs[R_ESP];
eaa728ee
FB
1740 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1741 ssp = env->segs[R_SS].base;
1742 if (shift) {
1743 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1744 PUSHL(ssp, esp, esp_mask, next_eip);
1745 } else {
1746 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1747 PUSHW(ssp, esp, esp_mask, next_eip);
1748 }
1749
1750 SET_ESP(esp, esp_mask);
1751 env->eip = new_eip;
1752 env->segs[R_CS].selector = new_cs;
1753 env->segs[R_CS].base = (new_cs << 4);
1754}
1755
1756/* protected mode call */
2999a0b2 1757void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
eaa728ee
FB
1758 int shift, int next_eip_addend)
1759{
1760 int new_stack, i;
1761 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1762 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
1763 uint32_t val, limit, old_sp_mask;
1764 target_ulong ssp, old_ssp, next_eip;
1765
1766 next_eip = env->eip + next_eip_addend;
d12d51d5 1767 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1768 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1769 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 1770 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1771 }
2999a0b2 1772 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 1773 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1774 }
eaa728ee 1775 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1776 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1777 if (e2 & DESC_S_MASK) {
20054ef0 1778 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 1779 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1780 }
eaa728ee
FB
1781 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1782 if (e2 & DESC_C_MASK) {
1783 /* conforming code segment */
20054ef0 1784 if (dpl > cpl) {
77b2bc2c 1785 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1786 }
eaa728ee
FB
1787 } else {
1788 /* non conforming code segment */
1789 rpl = new_cs & 3;
20054ef0 1790 if (rpl > cpl) {
77b2bc2c 1791 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1792 }
1793 if (dpl != cpl) {
77b2bc2c 1794 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1795 }
eaa728ee 1796 }
20054ef0 1797 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1798 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1799 }
eaa728ee
FB
1800
1801#ifdef TARGET_X86_64
1802 /* XXX: check 16/32 bit cases in long mode */
1803 if (shift == 2) {
1804 target_ulong rsp;
20054ef0 1805
eaa728ee 1806 /* 64 bit case */
08b3ded6 1807 rsp = env->regs[R_ESP];
eaa728ee
FB
1808 PUSHQ(rsp, env->segs[R_CS].selector);
1809 PUSHQ(rsp, next_eip);
1810 /* from this point, not restartable */
08b3ded6 1811 env->regs[R_ESP] = rsp;
eaa728ee
FB
1812 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1813 get_seg_base(e1, e2),
1814 get_seg_limit(e1, e2), e2);
a78d0eab 1815 env->eip = new_eip;
eaa728ee
FB
1816 } else
1817#endif
1818 {
08b3ded6 1819 sp = env->regs[R_ESP];
eaa728ee
FB
1820 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1821 ssp = env->segs[R_SS].base;
1822 if (shift) {
1823 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1824 PUSHL(ssp, sp, sp_mask, next_eip);
1825 } else {
1826 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1827 PUSHW(ssp, sp, sp_mask, next_eip);
1828 }
1829
1830 limit = get_seg_limit(e1, e2);
20054ef0 1831 if (new_eip > limit) {
77b2bc2c 1832 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1833 }
eaa728ee
FB
1834 /* from this point, not restartable */
1835 SET_ESP(sp, sp_mask);
1836 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1837 get_seg_base(e1, e2), limit, e2);
a78d0eab 1838 env->eip = new_eip;
eaa728ee
FB
1839 }
1840 } else {
1841 /* check gate type */
1842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1843 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1844 rpl = new_cs & 3;
20054ef0 1845 switch (type) {
eaa728ee
FB
1846 case 1: /* available 286 TSS */
1847 case 9: /* available 386 TSS */
1848 case 5: /* task gate */
20054ef0 1849 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1850 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1851 }
2999a0b2 1852 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
eaa728ee
FB
1853 return;
1854 case 4: /* 286 call gate */
1855 case 12: /* 386 call gate */
1856 break;
1857 default:
77b2bc2c 1858 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
1859 break;
1860 }
1861 shift = type >> 3;
1862
20054ef0 1863 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1864 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1865 }
eaa728ee 1866 /* check valid bit */
20054ef0 1867 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1868 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1869 }
eaa728ee
FB
1870 selector = e1 >> 16;
1871 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1872 param_count = e2 & 0x1f;
20054ef0 1873 if ((selector & 0xfffc) == 0) {
77b2bc2c 1874 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1875 }
eaa728ee 1876
2999a0b2 1877 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 1878 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1879 }
1880 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 1881 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1882 }
eaa728ee 1883 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1884 if (dpl > cpl) {
77b2bc2c 1885 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1886 }
1887 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1888 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1889 }
eaa728ee
FB
1890
1891 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1892 /* to inner privilege */
2999a0b2 1893 get_ss_esp_from_tss(env, &ss, &sp, dpl);
90a2541b
LG
1894 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1895 TARGET_FMT_lx "\n", ss, sp, param_count,
1896 env->regs[R_ESP]);
20054ef0 1897 if ((ss & 0xfffc) == 0) {
77b2bc2c 1898 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
1899 }
1900 if ((ss & 3) != dpl) {
77b2bc2c 1901 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1902 }
2999a0b2 1903 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 1904 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1905 }
eaa728ee 1906 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1907 if (ss_dpl != dpl) {
77b2bc2c 1908 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1909 }
eaa728ee
FB
1910 if (!(ss_e2 & DESC_S_MASK) ||
1911 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1912 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 1913 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
1914 }
1915 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 1916 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1917 }
eaa728ee 1918
20054ef0 1919 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1920
1921 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1922 old_ssp = env->segs[R_SS].base;
1923
1924 sp_mask = get_sp_mask(ss_e2);
1925 ssp = get_seg_base(ss_e1, ss_e2);
1926 if (shift) {
1927 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
08b3ded6 1928 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
20054ef0 1929 for (i = param_count - 1; i >= 0; i--) {
90a2541b
LG
1930 val = cpu_ldl_kernel(env, old_ssp +
1931 ((env->regs[R_ESP] + i * 4) &
1932 old_sp_mask));
eaa728ee
FB
1933 PUSHL(ssp, sp, sp_mask, val);
1934 }
1935 } else {
1936 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
08b3ded6 1937 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
20054ef0 1938 for (i = param_count - 1; i >= 0; i--) {
90a2541b
LG
1939 val = cpu_lduw_kernel(env, old_ssp +
1940 ((env->regs[R_ESP] + i * 2) &
1941 old_sp_mask));
eaa728ee
FB
1942 PUSHW(ssp, sp, sp_mask, val);
1943 }
1944 }
1945 new_stack = 1;
1946 } else {
1947 /* to same privilege */
08b3ded6 1948 sp = env->regs[R_ESP];
eaa728ee
FB
1949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1950 ssp = env->segs[R_SS].base;
20054ef0 1951 /* push_size = (4 << shift); */
eaa728ee
FB
1952 new_stack = 0;
1953 }
1954
1955 if (shift) {
1956 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1957 PUSHL(ssp, sp, sp_mask, next_eip);
1958 } else {
1959 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1960 PUSHW(ssp, sp, sp_mask, next_eip);
1961 }
1962
1963 /* from this point, not restartable */
1964
1965 if (new_stack) {
1966 ss = (ss & ~3) | dpl;
1967 cpu_x86_load_seg_cache(env, R_SS, ss,
1968 ssp,
1969 get_seg_limit(ss_e1, ss_e2),
1970 ss_e2);
1971 }
1972
1973 selector = (selector & ~3) | dpl;
1974 cpu_x86_load_seg_cache(env, R_CS, selector,
1975 get_seg_base(e1, e2),
1976 get_seg_limit(e1, e2),
1977 e2);
eaa728ee 1978 SET_ESP(sp, sp_mask);
a78d0eab 1979 env->eip = offset;
eaa728ee 1980 }
eaa728ee
FB
1981}
1982
1983/* real and vm86 mode iret */
2999a0b2 1984void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
1985{
1986 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1987 target_ulong ssp;
1988 int eflags_mask;
1989
20054ef0 1990 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 1991 sp = env->regs[R_ESP];
eaa728ee
FB
1992 ssp = env->segs[R_SS].base;
1993 if (shift == 1) {
1994 /* 32 bits */
1995 POPL(ssp, sp, sp_mask, new_eip);
1996 POPL(ssp, sp, sp_mask, new_cs);
1997 new_cs &= 0xffff;
1998 POPL(ssp, sp, sp_mask, new_eflags);
1999 } else {
2000 /* 16 bits */
2001 POPW(ssp, sp, sp_mask, new_eip);
2002 POPW(ssp, sp, sp_mask, new_cs);
2003 POPW(ssp, sp, sp_mask, new_eflags);
2004 }
08b3ded6 2005 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2006 env->segs[R_CS].selector = new_cs;
2007 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2008 env->eip = new_eip;
20054ef0
BS
2009 if (env->eflags & VM_MASK) {
2010 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2011 NT_MASK;
2012 } else {
2013 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2014 RF_MASK | NT_MASK;
2015 }
2016 if (shift == 0) {
eaa728ee 2017 eflags_mask &= 0xffff;
20054ef0 2018 }
997ff0d9 2019 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2020 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2021}
2022
2999a0b2 2023static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2024{
2025 int dpl;
2026 uint32_t e2;
2027
2028 /* XXX: on x86_64, we do not want to nullify FS and GS because
2029 they may still contain a valid base. I would be interested to
2030 know how a real x86_64 CPU behaves */
2031 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2032 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2033 return;
20054ef0 2034 }
eaa728ee
FB
2035
2036 e2 = env->segs[seg_reg].flags;
2037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2038 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2039 /* data or non conforming code segment */
2040 if (dpl < cpl) {
2041 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2042 }
2043 }
2044}
2045
2046/* protected mode iret */
2999a0b2
BS
2047static inline void helper_ret_protected(CPUX86State *env, int shift,
2048 int is_iret, int addend)
eaa728ee
FB
2049{
2050 uint32_t new_cs, new_eflags, new_ss;
2051 uint32_t new_es, new_ds, new_fs, new_gs;
2052 uint32_t e1, e2, ss_e1, ss_e2;
2053 int cpl, dpl, rpl, eflags_mask, iopl;
2054 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2055
2056#ifdef TARGET_X86_64
20054ef0 2057 if (shift == 2) {
eaa728ee 2058 sp_mask = -1;
20054ef0 2059 } else
eaa728ee 2060#endif
20054ef0 2061 {
eaa728ee 2062 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2063 }
08b3ded6 2064 sp = env->regs[R_ESP];
eaa728ee
FB
2065 ssp = env->segs[R_SS].base;
2066 new_eflags = 0; /* avoid warning */
2067#ifdef TARGET_X86_64
2068 if (shift == 2) {
2069 POPQ(sp, new_eip);
2070 POPQ(sp, new_cs);
2071 new_cs &= 0xffff;
2072 if (is_iret) {
2073 POPQ(sp, new_eflags);
2074 }
2075 } else
2076#endif
20054ef0
BS
2077 {
2078 if (shift == 1) {
2079 /* 32 bits */
2080 POPL(ssp, sp, sp_mask, new_eip);
2081 POPL(ssp, sp, sp_mask, new_cs);
2082 new_cs &= 0xffff;
2083 if (is_iret) {
2084 POPL(ssp, sp, sp_mask, new_eflags);
2085 if (new_eflags & VM_MASK) {
2086 goto return_to_vm86;
2087 }
2088 }
2089 } else {
2090 /* 16 bits */
2091 POPW(ssp, sp, sp_mask, new_eip);
2092 POPW(ssp, sp, sp_mask, new_cs);
2093 if (is_iret) {
2094 POPW(ssp, sp, sp_mask, new_eflags);
2095 }
eaa728ee 2096 }
eaa728ee 2097 }
d12d51d5
AL
2098 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2099 new_cs, new_eip, shift, addend);
8995b7a0 2100 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2101 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2102 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2103 }
2999a0b2 2104 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 2105 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2106 }
eaa728ee 2107 if (!(e2 & DESC_S_MASK) ||
20054ef0 2108 !(e2 & DESC_CS_MASK)) {
77b2bc2c 2109 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2110 }
eaa728ee
FB
2111 cpl = env->hflags & HF_CPL_MASK;
2112 rpl = new_cs & 3;
20054ef0 2113 if (rpl < cpl) {
77b2bc2c 2114 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2115 }
eaa728ee
FB
2116 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2117 if (e2 & DESC_C_MASK) {
20054ef0 2118 if (dpl > rpl) {
77b2bc2c 2119 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2120 }
eaa728ee 2121 } else {
20054ef0 2122 if (dpl != rpl) {
77b2bc2c 2123 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2124 }
eaa728ee 2125 }
20054ef0 2126 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2127 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2128 }
eaa728ee
FB
2129
2130 sp += addend;
2131 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2132 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2133 /* return to same privilege level */
eaa728ee
FB
2134 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2135 get_seg_base(e1, e2),
2136 get_seg_limit(e1, e2),
2137 e2);
2138 } else {
2139 /* return to different privilege level */
2140#ifdef TARGET_X86_64
2141 if (shift == 2) {
2142 POPQ(sp, new_esp);
2143 POPQ(sp, new_ss);
2144 new_ss &= 0xffff;
2145 } else
2146#endif
20054ef0
BS
2147 {
2148 if (shift == 1) {
2149 /* 32 bits */
2150 POPL(ssp, sp, sp_mask, new_esp);
2151 POPL(ssp, sp, sp_mask, new_ss);
2152 new_ss &= 0xffff;
2153 } else {
2154 /* 16 bits */
2155 POPW(ssp, sp, sp_mask, new_esp);
2156 POPW(ssp, sp, sp_mask, new_ss);
2157 }
eaa728ee 2158 }
d12d51d5 2159 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2160 new_ss, new_esp);
eaa728ee
FB
2161 if ((new_ss & 0xfffc) == 0) {
2162#ifdef TARGET_X86_64
20054ef0
BS
2163 /* NULL ss is allowed in long mode if cpl != 3 */
2164 /* XXX: test CS64? */
eaa728ee
FB
2165 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2166 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2167 0, 0xffffffff,
2168 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2169 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2170 DESC_W_MASK | DESC_A_MASK);
20054ef0 2171 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2172 } else
2173#endif
2174 {
77b2bc2c 2175 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2176 }
2177 } else {
20054ef0 2178 if ((new_ss & 3) != rpl) {
77b2bc2c 2179 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2180 }
2999a0b2 2181 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
77b2bc2c 2182 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2183 }
eaa728ee
FB
2184 if (!(ss_e2 & DESC_S_MASK) ||
2185 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2186 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2187 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2188 }
eaa728ee 2189 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2190 if (dpl != rpl) {
77b2bc2c 2191 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2192 }
2193 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2194 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
20054ef0 2195 }
eaa728ee
FB
2196 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2197 get_seg_base(ss_e1, ss_e2),
2198 get_seg_limit(ss_e1, ss_e2),
2199 ss_e2);
2200 }
2201
2202 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2203 get_seg_base(e1, e2),
2204 get_seg_limit(e1, e2),
2205 e2);
eaa728ee
FB
2206 sp = new_esp;
2207#ifdef TARGET_X86_64
20054ef0 2208 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2209 sp_mask = -1;
20054ef0 2210 } else
eaa728ee 2211#endif
20054ef0 2212 {
eaa728ee 2213 sp_mask = get_sp_mask(ss_e2);
20054ef0 2214 }
eaa728ee
FB
2215
2216 /* validate data segments */
2999a0b2
BS
2217 validate_seg(env, R_ES, rpl);
2218 validate_seg(env, R_DS, rpl);
2219 validate_seg(env, R_FS, rpl);
2220 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2221
2222 sp += addend;
2223 }
2224 SET_ESP(sp, sp_mask);
2225 env->eip = new_eip;
2226 if (is_iret) {
2227 /* NOTE: 'cpl' is the _old_ CPL */
2228 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2229 if (cpl == 0) {
eaa728ee 2230 eflags_mask |= IOPL_MASK;
20054ef0 2231 }
eaa728ee 2232 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2233 if (cpl <= iopl) {
eaa728ee 2234 eflags_mask |= IF_MASK;
20054ef0
BS
2235 }
2236 if (shift == 0) {
eaa728ee 2237 eflags_mask &= 0xffff;
20054ef0 2238 }
997ff0d9 2239 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2240 }
2241 return;
2242
2243 return_to_vm86:
2244 POPL(ssp, sp, sp_mask, new_esp);
2245 POPL(ssp, sp, sp_mask, new_ss);
2246 POPL(ssp, sp, sp_mask, new_es);
2247 POPL(ssp, sp, sp_mask, new_ds);
2248 POPL(ssp, sp, sp_mask, new_fs);
2249 POPL(ssp, sp, sp_mask, new_gs);
2250
2251 /* modify processor state */
997ff0d9
BS
2252 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2253 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2254 VIP_MASK);
2999a0b2 2255 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2256 load_seg_vm(env, R_SS, new_ss & 0xffff);
2257 load_seg_vm(env, R_ES, new_es & 0xffff);
2258 load_seg_vm(env, R_DS, new_ds & 0xffff);
2259 load_seg_vm(env, R_FS, new_fs & 0xffff);
2260 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2261
2262 env->eip = new_eip & 0xffff;
08b3ded6 2263 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2264}
2265
2999a0b2 2266void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2267{
2268 int tss_selector, type;
2269 uint32_t e1, e2;
2270
2271 /* specific case for TSS */
2272 if (env->eflags & NT_MASK) {
2273#ifdef TARGET_X86_64
20054ef0 2274 if (env->hflags & HF_LMA_MASK) {
77b2bc2c 2275 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2276 }
eaa728ee 2277#endif
329e607d 2278 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
20054ef0 2279 if (tss_selector & 4) {
77b2bc2c 2280 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2281 }
2999a0b2 2282 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
77b2bc2c 2283 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2284 }
eaa728ee
FB
2285 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2286 /* NOTE: we check both segment and busy TSS */
20054ef0 2287 if (type != 3) {
77b2bc2c 2288 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2289 }
2999a0b2 2290 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
eaa728ee 2291 } else {
2999a0b2 2292 helper_ret_protected(env, shift, 1, 0);
eaa728ee 2293 }
db620f46 2294 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2295}
2296
2999a0b2 2297void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2298{
2999a0b2 2299 helper_ret_protected(env, shift, 0, addend);
eaa728ee
FB
2300}
2301
2999a0b2 2302void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2303{
2304 if (env->sysenter_cs == 0) {
77b2bc2c 2305 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2306 }
2307 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2308
2309#ifdef TARGET_X86_64
2310 if (env->hflags & HF_LMA_MASK) {
2311 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2312 0, 0xffffffff,
2313 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314 DESC_S_MASK |
20054ef0
BS
2315 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2316 DESC_L_MASK);
2436b61a
AZ
2317 } else
2318#endif
2319 {
2320 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2321 0, 0xffffffff,
2322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323 DESC_S_MASK |
2324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2325 }
eaa728ee
FB
2326 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2327 0, 0xffffffff,
2328 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2329 DESC_S_MASK |
2330 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2331 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2332 env->eip = env->sysenter_eip;
eaa728ee
FB
2333}
2334
2999a0b2 2335void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2336{
2337 int cpl;
2338
2339 cpl = env->hflags & HF_CPL_MASK;
2340 if (env->sysenter_cs == 0 || cpl != 0) {
77b2bc2c 2341 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 2342 }
2436b61a
AZ
2343#ifdef TARGET_X86_64
2344 if (dflag == 2) {
20054ef0
BS
2345 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2346 3, 0, 0xffffffff,
2436b61a
AZ
2347 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2348 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2350 DESC_L_MASK);
2351 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2352 3, 0, 0xffffffff,
2436b61a
AZ
2353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2354 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2355 DESC_W_MASK | DESC_A_MASK);
2356 } else
2357#endif
2358 {
20054ef0
BS
2359 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2360 3, 0, 0xffffffff,
2436b61a
AZ
2361 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2362 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2363 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2364 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2365 3, 0, 0xffffffff,
2436b61a
AZ
2366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2368 DESC_W_MASK | DESC_A_MASK);
2369 }
08b3ded6 2370 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2371 env->eip = env->regs[R_EDX];
eaa728ee
FB
2372}
2373
2999a0b2 2374target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2375{
2376 unsigned int limit;
2377 uint32_t e1, e2, eflags, selector;
2378 int rpl, dpl, cpl, type;
2379
2380 selector = selector1 & 0xffff;
f0967a1a 2381 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2382 if ((selector & 0xfffc) == 0) {
dc1ded53 2383 goto fail;
20054ef0 2384 }
2999a0b2 2385 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2386 goto fail;
20054ef0 2387 }
eaa728ee
FB
2388 rpl = selector & 3;
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 cpl = env->hflags & HF_CPL_MASK;
2391 if (e2 & DESC_S_MASK) {
2392 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2393 /* conforming */
2394 } else {
20054ef0 2395 if (dpl < cpl || dpl < rpl) {
eaa728ee 2396 goto fail;
20054ef0 2397 }
eaa728ee
FB
2398 }
2399 } else {
2400 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2401 switch (type) {
eaa728ee
FB
2402 case 1:
2403 case 2:
2404 case 3:
2405 case 9:
2406 case 11:
2407 break;
2408 default:
2409 goto fail;
2410 }
2411 if (dpl < cpl || dpl < rpl) {
2412 fail:
2413 CC_SRC = eflags & ~CC_Z;
2414 return 0;
2415 }
2416 }
2417 limit = get_seg_limit(e1, e2);
2418 CC_SRC = eflags | CC_Z;
2419 return limit;
2420}
2421
2999a0b2 2422target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2423{
2424 uint32_t e1, e2, eflags, selector;
2425 int rpl, dpl, cpl, type;
2426
2427 selector = selector1 & 0xffff;
f0967a1a 2428 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2429 if ((selector & 0xfffc) == 0) {
eaa728ee 2430 goto fail;
20054ef0 2431 }
2999a0b2 2432 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2433 goto fail;
20054ef0 2434 }
eaa728ee
FB
2435 rpl = selector & 3;
2436 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2437 cpl = env->hflags & HF_CPL_MASK;
2438 if (e2 & DESC_S_MASK) {
2439 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2440 /* conforming */
2441 } else {
20054ef0 2442 if (dpl < cpl || dpl < rpl) {
eaa728ee 2443 goto fail;
20054ef0 2444 }
eaa728ee
FB
2445 }
2446 } else {
2447 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2448 switch (type) {
eaa728ee
FB
2449 case 1:
2450 case 2:
2451 case 3:
2452 case 4:
2453 case 5:
2454 case 9:
2455 case 11:
2456 case 12:
2457 break;
2458 default:
2459 goto fail;
2460 }
2461 if (dpl < cpl || dpl < rpl) {
2462 fail:
2463 CC_SRC = eflags & ~CC_Z;
2464 return 0;
2465 }
2466 }
2467 CC_SRC = eflags | CC_Z;
2468 return e2 & 0x00f0ff00;
2469}
2470
2999a0b2 2471void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2472{
2473 uint32_t e1, e2, eflags, selector;
2474 int rpl, dpl, cpl;
2475
2476 selector = selector1 & 0xffff;
f0967a1a 2477 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2478 if ((selector & 0xfffc) == 0) {
eaa728ee 2479 goto fail;
20054ef0 2480 }
2999a0b2 2481 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2482 goto fail;
20054ef0
BS
2483 }
2484 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2485 goto fail;
20054ef0 2486 }
eaa728ee
FB
2487 rpl = selector & 3;
2488 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 cpl = env->hflags & HF_CPL_MASK;
2490 if (e2 & DESC_CS_MASK) {
20054ef0 2491 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2492 goto fail;
20054ef0 2493 }
eaa728ee 2494 if (!(e2 & DESC_C_MASK)) {
20054ef0 2495 if (dpl < cpl || dpl < rpl) {
eaa728ee 2496 goto fail;
20054ef0 2497 }
eaa728ee
FB
2498 }
2499 } else {
2500 if (dpl < cpl || dpl < rpl) {
2501 fail:
2502 CC_SRC = eflags & ~CC_Z;
2503 return;
2504 }
2505 }
2506 CC_SRC = eflags | CC_Z;
2507}
2508
2999a0b2 2509void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2510{
2511 uint32_t e1, e2, eflags, selector;
2512 int rpl, dpl, cpl;
2513
2514 selector = selector1 & 0xffff;
f0967a1a 2515 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2516 if ((selector & 0xfffc) == 0) {
eaa728ee 2517 goto fail;
20054ef0 2518 }
2999a0b2 2519 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2520 goto fail;
20054ef0
BS
2521 }
2522 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2523 goto fail;
20054ef0 2524 }
eaa728ee
FB
2525 rpl = selector & 3;
2526 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2527 cpl = env->hflags & HF_CPL_MASK;
2528 if (e2 & DESC_CS_MASK) {
2529 goto fail;
2530 } else {
20054ef0 2531 if (dpl < cpl || dpl < rpl) {
eaa728ee 2532 goto fail;
20054ef0 2533 }
eaa728ee
FB
2534 if (!(e2 & DESC_W_MASK)) {
2535 fail:
2536 CC_SRC = eflags & ~CC_Z;
2537 return;
2538 }
2539 }
2540 CC_SRC = eflags | CC_Z;
2541}
2542
f299f437 2543#if defined(CONFIG_USER_ONLY)
2999a0b2 2544void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2545{
f299f437 2546 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2547 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2548 selector &= 0xffff;
2549 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2550 (selector << 4), 0xffff,
2551 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2552 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2553 } else {
2999a0b2 2554 helper_load_seg(env, seg_reg, selector);
13822781 2555 }
eaa728ee 2556}
eaa728ee 2557#endif
81cf8d8a
PB
2558
2559/* check if Port I/O is allowed in TSS */
2560static inline void check_io(CPUX86State *env, int addr, int size)
2561{
2562 int io_offset, val, mask;
2563
2564 /* TSS must be a valid 32 bit one */
2565 if (!(env->tr.flags & DESC_P_MASK) ||
2566 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2567 env->tr.limit < 103) {
2568 goto fail;
2569 }
2570 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2571 io_offset += (addr >> 3);
2572 /* Note: the check needs two bytes */
2573 if ((io_offset + 1) > env->tr.limit) {
2574 goto fail;
2575 }
2576 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2577 val >>= (addr & 7);
2578 mask = (1 << size) - 1;
2579 /* all bits must be zero to allow the I/O */
2580 if ((val & mask) != 0) {
2581 fail:
2582 raise_exception_err(env, EXCP0D_GPF, 0);
2583 }
2584}
2585
2586void helper_check_iob(CPUX86State *env, uint32_t t0)
2587{
2588 check_io(env, t0, 1);
2589}
2590
2591void helper_check_iow(CPUX86State *env, uint32_t t0)
2592{
2593 check_io(env, t0, 2);
2594}
2595
2596void helper_check_iol(CPUX86State *env, uint32_t t0)
2597{
2598 check_io(env, t0, 4);
2599}
This page took 1.145178 seconds and 4 git commands to generate.