]>
Commit | Line | Data |
---|---|---|
f7b2429f BS |
1 | /* |
2 | * x86 misc helpers | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "cpu.h" | |
21 | #include "dyngen-exec.h" | |
22 | #include "ioport.h" | |
23 | #include "helper.h" | |
24 | ||
25 | #if !defined(CONFIG_USER_ONLY) | |
26 | #include "softmmu_exec.h" | |
27 | #endif /* !defined(CONFIG_USER_ONLY) */ | |
28 | ||
29 | /* check if Port I/O is allowed in TSS */ | |
30 | static inline void check_io(int addr, int size) | |
31 | { | |
32 | int io_offset, val, mask; | |
33 | ||
34 | /* TSS must be a valid 32 bit one */ | |
35 | if (!(env->tr.flags & DESC_P_MASK) || | |
36 | ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || | |
37 | env->tr.limit < 103) { | |
38 | goto fail; | |
39 | } | |
40 | io_offset = lduw_kernel(env->tr.base + 0x66); | |
41 | io_offset += (addr >> 3); | |
42 | /* Note: the check needs two bytes */ | |
43 | if ((io_offset + 1) > env->tr.limit) { | |
44 | goto fail; | |
45 | } | |
46 | val = lduw_kernel(env->tr.base + io_offset); | |
47 | val >>= (addr & 7); | |
48 | mask = (1 << size) - 1; | |
49 | /* all bits must be zero to allow the I/O */ | |
50 | if ((val & mask) != 0) { | |
51 | fail: | |
52 | raise_exception_err(env, EXCP0D_GPF, 0); | |
53 | } | |
54 | } | |
55 | ||
56 | void helper_check_iob(uint32_t t0) | |
57 | { | |
58 | check_io(t0, 1); | |
59 | } | |
60 | ||
61 | void helper_check_iow(uint32_t t0) | |
62 | { | |
63 | check_io(t0, 2); | |
64 | } | |
65 | ||
66 | void helper_check_iol(uint32_t t0) | |
67 | { | |
68 | check_io(t0, 4); | |
69 | } | |
70 | ||
71 | void helper_outb(uint32_t port, uint32_t data) | |
72 | { | |
73 | cpu_outb(port, data & 0xff); | |
74 | } | |
75 | ||
76 | target_ulong helper_inb(uint32_t port) | |
77 | { | |
78 | return cpu_inb(port); | |
79 | } | |
80 | ||
81 | void helper_outw(uint32_t port, uint32_t data) | |
82 | { | |
83 | cpu_outw(port, data & 0xffff); | |
84 | } | |
85 | ||
86 | target_ulong helper_inw(uint32_t port) | |
87 | { | |
88 | return cpu_inw(port); | |
89 | } | |
90 | ||
91 | void helper_outl(uint32_t port, uint32_t data) | |
92 | { | |
93 | cpu_outl(port, data); | |
94 | } | |
95 | ||
96 | target_ulong helper_inl(uint32_t port) | |
97 | { | |
98 | return cpu_inl(port); | |
99 | } | |
100 | ||
101 | void helper_into(int next_eip_addend) | |
102 | { | |
103 | int eflags; | |
104 | ||
f0967a1a | 105 | eflags = cpu_cc_compute_all(env, CC_OP); |
f7b2429f BS |
106 | if (eflags & CC_O) { |
107 | raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); | |
108 | } | |
109 | } | |
110 | ||
111 | void helper_single_step(void) | |
112 | { | |
113 | #ifndef CONFIG_USER_ONLY | |
114 | check_hw_breakpoints(env, 1); | |
115 | env->dr[6] |= DR6_BS; | |
116 | #endif | |
117 | raise_exception(env, EXCP01_DB); | |
118 | } | |
119 | ||
120 | void helper_cpuid(void) | |
121 | { | |
122 | uint32_t eax, ebx, ecx, edx; | |
123 | ||
124 | cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); | |
125 | ||
126 | cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); | |
127 | EAX = eax; | |
128 | EBX = ebx; | |
129 | ECX = ecx; | |
130 | EDX = edx; | |
131 | } | |
132 | ||
133 | #if defined(CONFIG_USER_ONLY) | |
134 | target_ulong helper_read_crN(int reg) | |
135 | { | |
136 | return 0; | |
137 | } | |
138 | ||
139 | void helper_write_crN(int reg, target_ulong t0) | |
140 | { | |
141 | } | |
142 | ||
143 | void helper_movl_drN_T0(int reg, target_ulong t0) | |
144 | { | |
145 | } | |
146 | #else | |
147 | target_ulong helper_read_crN(int reg) | |
148 | { | |
149 | target_ulong val; | |
150 | ||
151 | cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); | |
152 | switch (reg) { | |
153 | default: | |
154 | val = env->cr[reg]; | |
155 | break; | |
156 | case 8: | |
157 | if (!(env->hflags2 & HF2_VINTR_MASK)) { | |
158 | val = cpu_get_apic_tpr(env->apic_state); | |
159 | } else { | |
160 | val = env->v_tpr; | |
161 | } | |
162 | break; | |
163 | } | |
164 | return val; | |
165 | } | |
166 | ||
167 | void helper_write_crN(int reg, target_ulong t0) | |
168 | { | |
169 | cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); | |
170 | switch (reg) { | |
171 | case 0: | |
172 | cpu_x86_update_cr0(env, t0); | |
173 | break; | |
174 | case 3: | |
175 | cpu_x86_update_cr3(env, t0); | |
176 | break; | |
177 | case 4: | |
178 | cpu_x86_update_cr4(env, t0); | |
179 | break; | |
180 | case 8: | |
181 | if (!(env->hflags2 & HF2_VINTR_MASK)) { | |
182 | cpu_set_apic_tpr(env->apic_state, t0); | |
183 | } | |
184 | env->v_tpr = t0 & 0x0f; | |
185 | break; | |
186 | default: | |
187 | env->cr[reg] = t0; | |
188 | break; | |
189 | } | |
190 | } | |
191 | ||
192 | void helper_movl_drN_T0(int reg, target_ulong t0) | |
193 | { | |
194 | int i; | |
195 | ||
196 | if (reg < 4) { | |
197 | hw_breakpoint_remove(env, reg); | |
198 | env->dr[reg] = t0; | |
199 | hw_breakpoint_insert(env, reg); | |
200 | } else if (reg == 7) { | |
201 | for (i = 0; i < 4; i++) { | |
202 | hw_breakpoint_remove(env, i); | |
203 | } | |
204 | env->dr[7] = t0; | |
205 | for (i = 0; i < 4; i++) { | |
206 | hw_breakpoint_insert(env, i); | |
207 | } | |
208 | } else { | |
209 | env->dr[reg] = t0; | |
210 | } | |
211 | } | |
212 | #endif | |
213 | ||
214 | void helper_lmsw(target_ulong t0) | |
215 | { | |
216 | /* only 4 lower bits of CR0 are modified. PE cannot be set to zero | |
217 | if already set to one. */ | |
218 | t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); | |
219 | helper_write_crN(0, t0); | |
220 | } | |
221 | ||
222 | void helper_invlpg(target_ulong addr) | |
223 | { | |
224 | cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); | |
225 | tlb_flush_page(env, addr); | |
226 | } | |
227 | ||
228 | void helper_rdtsc(void) | |
229 | { | |
230 | uint64_t val; | |
231 | ||
232 | if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { | |
233 | raise_exception(env, EXCP0D_GPF); | |
234 | } | |
235 | cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); | |
236 | ||
237 | val = cpu_get_tsc(env) + env->tsc_offset; | |
238 | EAX = (uint32_t)(val); | |
239 | EDX = (uint32_t)(val >> 32); | |
240 | } | |
241 | ||
242 | void helper_rdtscp(void) | |
243 | { | |
244 | helper_rdtsc(); | |
245 | ECX = (uint32_t)(env->tsc_aux); | |
246 | } | |
247 | ||
248 | void helper_rdpmc(void) | |
249 | { | |
250 | if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { | |
251 | raise_exception(env, EXCP0D_GPF); | |
252 | } | |
253 | cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); | |
254 | ||
255 | /* currently unimplemented */ | |
256 | qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); | |
257 | raise_exception_err(env, EXCP06_ILLOP, 0); | |
258 | } | |
259 | ||
260 | #if defined(CONFIG_USER_ONLY) | |
261 | void helper_wrmsr(void) | |
262 | { | |
263 | } | |
264 | ||
265 | void helper_rdmsr(void) | |
266 | { | |
267 | } | |
268 | #else | |
269 | void helper_wrmsr(void) | |
270 | { | |
271 | uint64_t val; | |
272 | ||
273 | cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); | |
274 | ||
275 | val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); | |
276 | ||
277 | switch ((uint32_t)ECX) { | |
278 | case MSR_IA32_SYSENTER_CS: | |
279 | env->sysenter_cs = val & 0xffff; | |
280 | break; | |
281 | case MSR_IA32_SYSENTER_ESP: | |
282 | env->sysenter_esp = val; | |
283 | break; | |
284 | case MSR_IA32_SYSENTER_EIP: | |
285 | env->sysenter_eip = val; | |
286 | break; | |
287 | case MSR_IA32_APICBASE: | |
288 | cpu_set_apic_base(env->apic_state, val); | |
289 | break; | |
290 | case MSR_EFER: | |
291 | { | |
292 | uint64_t update_mask; | |
293 | ||
294 | update_mask = 0; | |
295 | if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) { | |
296 | update_mask |= MSR_EFER_SCE; | |
297 | } | |
298 | if (env->cpuid_ext2_features & CPUID_EXT2_LM) { | |
299 | update_mask |= MSR_EFER_LME; | |
300 | } | |
301 | if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { | |
302 | update_mask |= MSR_EFER_FFXSR; | |
303 | } | |
304 | if (env->cpuid_ext2_features & CPUID_EXT2_NX) { | |
305 | update_mask |= MSR_EFER_NXE; | |
306 | } | |
307 | if (env->cpuid_ext3_features & CPUID_EXT3_SVM) { | |
308 | update_mask |= MSR_EFER_SVME; | |
309 | } | |
310 | if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) { | |
311 | update_mask |= MSR_EFER_FFXSR; | |
312 | } | |
313 | cpu_load_efer(env, (env->efer & ~update_mask) | | |
314 | (val & update_mask)); | |
315 | } | |
316 | break; | |
317 | case MSR_STAR: | |
318 | env->star = val; | |
319 | break; | |
320 | case MSR_PAT: | |
321 | env->pat = val; | |
322 | break; | |
323 | case MSR_VM_HSAVE_PA: | |
324 | env->vm_hsave = val; | |
325 | break; | |
326 | #ifdef TARGET_X86_64 | |
327 | case MSR_LSTAR: | |
328 | env->lstar = val; | |
329 | break; | |
330 | case MSR_CSTAR: | |
331 | env->cstar = val; | |
332 | break; | |
333 | case MSR_FMASK: | |
334 | env->fmask = val; | |
335 | break; | |
336 | case MSR_FSBASE: | |
337 | env->segs[R_FS].base = val; | |
338 | break; | |
339 | case MSR_GSBASE: | |
340 | env->segs[R_GS].base = val; | |
341 | break; | |
342 | case MSR_KERNELGSBASE: | |
343 | env->kernelgsbase = val; | |
344 | break; | |
345 | #endif | |
346 | case MSR_MTRRphysBase(0): | |
347 | case MSR_MTRRphysBase(1): | |
348 | case MSR_MTRRphysBase(2): | |
349 | case MSR_MTRRphysBase(3): | |
350 | case MSR_MTRRphysBase(4): | |
351 | case MSR_MTRRphysBase(5): | |
352 | case MSR_MTRRphysBase(6): | |
353 | case MSR_MTRRphysBase(7): | |
354 | env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; | |
355 | break; | |
356 | case MSR_MTRRphysMask(0): | |
357 | case MSR_MTRRphysMask(1): | |
358 | case MSR_MTRRphysMask(2): | |
359 | case MSR_MTRRphysMask(3): | |
360 | case MSR_MTRRphysMask(4): | |
361 | case MSR_MTRRphysMask(5): | |
362 | case MSR_MTRRphysMask(6): | |
363 | case MSR_MTRRphysMask(7): | |
364 | env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; | |
365 | break; | |
366 | case MSR_MTRRfix64K_00000: | |
367 | env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; | |
368 | break; | |
369 | case MSR_MTRRfix16K_80000: | |
370 | case MSR_MTRRfix16K_A0000: | |
371 | env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; | |
372 | break; | |
373 | case MSR_MTRRfix4K_C0000: | |
374 | case MSR_MTRRfix4K_C8000: | |
375 | case MSR_MTRRfix4K_D0000: | |
376 | case MSR_MTRRfix4K_D8000: | |
377 | case MSR_MTRRfix4K_E0000: | |
378 | case MSR_MTRRfix4K_E8000: | |
379 | case MSR_MTRRfix4K_F0000: | |
380 | case MSR_MTRRfix4K_F8000: | |
381 | env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; | |
382 | break; | |
383 | case MSR_MTRRdefType: | |
384 | env->mtrr_deftype = val; | |
385 | break; | |
386 | case MSR_MCG_STATUS: | |
387 | env->mcg_status = val; | |
388 | break; | |
389 | case MSR_MCG_CTL: | |
390 | if ((env->mcg_cap & MCG_CTL_P) | |
391 | && (val == 0 || val == ~(uint64_t)0)) { | |
392 | env->mcg_ctl = val; | |
393 | } | |
394 | break; | |
395 | case MSR_TSC_AUX: | |
396 | env->tsc_aux = val; | |
397 | break; | |
398 | case MSR_IA32_MISC_ENABLE: | |
399 | env->msr_ia32_misc_enable = val; | |
400 | break; | |
401 | default: | |
402 | if ((uint32_t)ECX >= MSR_MC0_CTL | |
403 | && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { | |
404 | uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; | |
405 | if ((offset & 0x3) != 0 | |
406 | || (val == 0 || val == ~(uint64_t)0)) { | |
407 | env->mce_banks[offset] = val; | |
408 | } | |
409 | break; | |
410 | } | |
411 | /* XXX: exception? */ | |
412 | break; | |
413 | } | |
414 | } | |
415 | ||
416 | void helper_rdmsr(void) | |
417 | { | |
418 | uint64_t val; | |
419 | ||
420 | cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); | |
421 | ||
422 | switch ((uint32_t)ECX) { | |
423 | case MSR_IA32_SYSENTER_CS: | |
424 | val = env->sysenter_cs; | |
425 | break; | |
426 | case MSR_IA32_SYSENTER_ESP: | |
427 | val = env->sysenter_esp; | |
428 | break; | |
429 | case MSR_IA32_SYSENTER_EIP: | |
430 | val = env->sysenter_eip; | |
431 | break; | |
432 | case MSR_IA32_APICBASE: | |
433 | val = cpu_get_apic_base(env->apic_state); | |
434 | break; | |
435 | case MSR_EFER: | |
436 | val = env->efer; | |
437 | break; | |
438 | case MSR_STAR: | |
439 | val = env->star; | |
440 | break; | |
441 | case MSR_PAT: | |
442 | val = env->pat; | |
443 | break; | |
444 | case MSR_VM_HSAVE_PA: | |
445 | val = env->vm_hsave; | |
446 | break; | |
447 | case MSR_IA32_PERF_STATUS: | |
448 | /* tsc_increment_by_tick */ | |
449 | val = 1000ULL; | |
450 | /* CPU multiplier */ | |
451 | val |= (((uint64_t)4ULL) << 40); | |
452 | break; | |
453 | #ifdef TARGET_X86_64 | |
454 | case MSR_LSTAR: | |
455 | val = env->lstar; | |
456 | break; | |
457 | case MSR_CSTAR: | |
458 | val = env->cstar; | |
459 | break; | |
460 | case MSR_FMASK: | |
461 | val = env->fmask; | |
462 | break; | |
463 | case MSR_FSBASE: | |
464 | val = env->segs[R_FS].base; | |
465 | break; | |
466 | case MSR_GSBASE: | |
467 | val = env->segs[R_GS].base; | |
468 | break; | |
469 | case MSR_KERNELGSBASE: | |
470 | val = env->kernelgsbase; | |
471 | break; | |
472 | case MSR_TSC_AUX: | |
473 | val = env->tsc_aux; | |
474 | break; | |
475 | #endif | |
476 | case MSR_MTRRphysBase(0): | |
477 | case MSR_MTRRphysBase(1): | |
478 | case MSR_MTRRphysBase(2): | |
479 | case MSR_MTRRphysBase(3): | |
480 | case MSR_MTRRphysBase(4): | |
481 | case MSR_MTRRphysBase(5): | |
482 | case MSR_MTRRphysBase(6): | |
483 | case MSR_MTRRphysBase(7): | |
484 | val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; | |
485 | break; | |
486 | case MSR_MTRRphysMask(0): | |
487 | case MSR_MTRRphysMask(1): | |
488 | case MSR_MTRRphysMask(2): | |
489 | case MSR_MTRRphysMask(3): | |
490 | case MSR_MTRRphysMask(4): | |
491 | case MSR_MTRRphysMask(5): | |
492 | case MSR_MTRRphysMask(6): | |
493 | case MSR_MTRRphysMask(7): | |
494 | val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; | |
495 | break; | |
496 | case MSR_MTRRfix64K_00000: | |
497 | val = env->mtrr_fixed[0]; | |
498 | break; | |
499 | case MSR_MTRRfix16K_80000: | |
500 | case MSR_MTRRfix16K_A0000: | |
501 | val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; | |
502 | break; | |
503 | case MSR_MTRRfix4K_C0000: | |
504 | case MSR_MTRRfix4K_C8000: | |
505 | case MSR_MTRRfix4K_D0000: | |
506 | case MSR_MTRRfix4K_D8000: | |
507 | case MSR_MTRRfix4K_E0000: | |
508 | case MSR_MTRRfix4K_E8000: | |
509 | case MSR_MTRRfix4K_F0000: | |
510 | case MSR_MTRRfix4K_F8000: | |
511 | val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; | |
512 | break; | |
513 | case MSR_MTRRdefType: | |
514 | val = env->mtrr_deftype; | |
515 | break; | |
516 | case MSR_MTRRcap: | |
517 | if (env->cpuid_features & CPUID_MTRR) { | |
518 | val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | | |
519 | MSR_MTRRcap_WC_SUPPORTED; | |
520 | } else { | |
521 | /* XXX: exception? */ | |
522 | val = 0; | |
523 | } | |
524 | break; | |
525 | case MSR_MCG_CAP: | |
526 | val = env->mcg_cap; | |
527 | break; | |
528 | case MSR_MCG_CTL: | |
529 | if (env->mcg_cap & MCG_CTL_P) { | |
530 | val = env->mcg_ctl; | |
531 | } else { | |
532 | val = 0; | |
533 | } | |
534 | break; | |
535 | case MSR_MCG_STATUS: | |
536 | val = env->mcg_status; | |
537 | break; | |
538 | case MSR_IA32_MISC_ENABLE: | |
539 | val = env->msr_ia32_misc_enable; | |
540 | break; | |
541 | default: | |
542 | if ((uint32_t)ECX >= MSR_MC0_CTL | |
543 | && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { | |
544 | uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; | |
545 | val = env->mce_banks[offset]; | |
546 | break; | |
547 | } | |
548 | /* XXX: exception? */ | |
549 | val = 0; | |
550 | break; | |
551 | } | |
552 | EAX = (uint32_t)(val); | |
553 | EDX = (uint32_t)(val >> 32); | |
554 | } | |
555 | #endif | |
556 | ||
557 | static void do_hlt(void) | |
558 | { | |
559 | env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ | |
560 | env->halted = 1; | |
561 | env->exception_index = EXCP_HLT; | |
562 | cpu_loop_exit(env); | |
563 | } | |
564 | ||
565 | void helper_hlt(int next_eip_addend) | |
566 | { | |
567 | cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); | |
568 | EIP += next_eip_addend; | |
569 | ||
570 | do_hlt(); | |
571 | } | |
572 | ||
573 | void helper_monitor(target_ulong ptr) | |
574 | { | |
575 | if ((uint32_t)ECX != 0) { | |
576 | raise_exception(env, EXCP0D_GPF); | |
577 | } | |
578 | /* XXX: store address? */ | |
579 | cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); | |
580 | } | |
581 | ||
582 | void helper_mwait(int next_eip_addend) | |
583 | { | |
584 | if ((uint32_t)ECX != 0) { | |
585 | raise_exception(env, EXCP0D_GPF); | |
586 | } | |
587 | cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); | |
588 | EIP += next_eip_addend; | |
589 | ||
590 | /* XXX: not complete but not completely erroneous */ | |
591 | if (env->cpu_index != 0 || env->next_cpu != NULL) { | |
592 | /* more than one CPU: do not sleep because another CPU may | |
593 | wake this one */ | |
594 | } else { | |
595 | do_hlt(); | |
596 | } | |
597 | } | |
598 | ||
599 | void helper_debug(void) | |
600 | { | |
601 | env->exception_index = EXCP_DEBUG; | |
602 | cpu_loop_exit(env); | |
603 | } |