/*
* qemu user main
*
- * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003-2008 Fabrice Bellard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ * MA 02110-1301, USA.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
+#include <sys/mman.h>
#include "qemu.h"
+#include "qemu-common.h"
+#include "cache-utils.h"
+/* For tb_lock */
+#include "exec-all.h"
+
+
+#include "envlist.h"
#define DEBUG_LOGFILE "/tmp/qemu.log"
+char *exec_path;
+
static const char *interp_prefix = CONFIG_QEMU_PREFIX;
const char *qemu_uname_release = CONFIG_UNAME_RELEASE;
"__init_array_end:\n"
"__fini_array_start:\n"
"__fini_array_end:\n"
- ".long 0\n");
+ ".long 0\n"
+ ".previous\n");
#endif
/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
return 0;
}
+#if defined(TARGET_I386)
int cpu_get_pic_interrupt(CPUState *env)
{
return -1;
}
+#endif
/* timers for rdtsc */
#endif
+#if defined(USE_NPTL)
+/***********************************************************/
+/* Helper routines for implementing atomic operations. */
+
+/* To implement exclusive operations we force all cpus to syncronise.
+ We don't require a full sync, only that no cpus are executing guest code.
+ The alternative is to map target atomic ops onto host equivalents,
+ which requires quite a lot of per host/target work. */
+static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static int pending_cpus;
+
+/* Make sure everything is in a consistent state for calling fork(). */
+void fork_start(void)
+{
+ mmap_fork_start();
+ pthread_mutex_lock(&tb_lock);
+ pthread_mutex_lock(&exclusive_lock);
+}
+
+void fork_end(int child)
+{
+ if (child) {
+ /* Child processes created by fork() only have a single thread.
+ Discard information about the parent threads. */
+ first_cpu = thread_env;
+ thread_env->next_cpu = NULL;
+ pending_cpus = 0;
+ pthread_mutex_init(&exclusive_lock, NULL);
+ pthread_cond_init(&exclusive_cond, NULL);
+ pthread_cond_init(&exclusive_resume, NULL);
+ pthread_mutex_init(&tb_lock, NULL);
+ gdbserver_fork(thread_env);
+ } else {
+ pthread_mutex_unlock(&exclusive_lock);
+ pthread_mutex_unlock(&tb_lock);
+ }
+ mmap_fork_end(child);
+}
+
+/* Wait for pending exclusive operations to complete. The exclusive lock
+ must be held. */
+static inline void exclusive_idle(void)
+{
+ while (pending_cpus) {
+ pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ }
+}
+
+/* Start an exclusive operation.
+ Must only be called from outside cpu_arm_exec. */
+static inline void start_exclusive(void)
+{
+ CPUState *other;
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+
+ pending_cpus = 1;
+ /* Make all other cpus stop executing. */
+ for (other = first_cpu; other; other = other->next_cpu) {
+ if (other->running) {
+ pending_cpus++;
+ cpu_interrupt(other, CPU_INTERRUPT_EXIT);
+ }
+ }
+ if (pending_cpus > 1) {
+ pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ }
+}
+
+/* Finish an exclusive operation. */
+static inline void end_exclusive(void)
+{
+ pending_cpus = 0;
+ pthread_cond_broadcast(&exclusive_resume);
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Wait for exclusive ops to finish, and begin cpu execution. */
+static inline void cpu_exec_start(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+ env->running = 1;
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Mark cpu as not executing, and release pending exclusive ops. */
+static inline void cpu_exec_end(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ env->running = 0;
+ if (pending_cpus > 1) {
+ pending_cpus--;
+ if (pending_cpus == 1) {
+ pthread_cond_signal(&exclusive_cond);
+ }
+ }
+ exclusive_idle();
+ pthread_mutex_unlock(&exclusive_lock);
+}
+#else /* if !USE_NPTL */
+/* These are no-ops because we are not threadsafe. */
+static inline void cpu_exec_start(CPUState *env)
+{
+}
+
+static inline void cpu_exec_end(CPUState *env)
+{
+}
+
+static inline void start_exclusive(void)
+{
+}
+
+static inline void end_exclusive(void)
+{
+}
+
+void fork_start(void)
+{
+}
+
+void fork_end(int child)
+{
+ if (child) {
+ gdbserver_fork(thread_env);
+ }
+}
+#endif
+
+
#ifdef TARGET_I386
/***********************************************************/
/* CPUX86 core interface */
e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
e2 |= flags;
p = ptr;
- p[0] = tswapl(e1);
- p[1] = tswapl(e2);
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
}
+static uint64_t *idt_table;
+#ifdef TARGET_X86_64
+static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
+ uint64_t addr, unsigned int sel)
+{
+ uint32_t *p, e1, e2;
+ e1 = (addr & 0xffff) | (sel << 16);
+ e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
+ p = ptr;
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
+ p[2] = tswap32(addr >> 32);
+ p[3] = 0;
+}
+/* only dpl matters as we do only user space emulation */
+static void set_idt(int n, unsigned int dpl)
+{
+ set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
+}
+#else
static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
- unsigned long addr, unsigned int sel)
+ uint32_t addr, unsigned int sel)
{
- unsigned int e1, e2;
- uint32_t *p;
+ uint32_t *p, e1, e2;
e1 = (addr & 0xffff) | (sel << 16);
e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
p = ptr;
- p[0] = tswapl(e1);
- p[1] = tswapl(e2);
+ p[0] = tswap32(e1);
+ p[1] = tswap32(e2);
}
-uint64_t gdt_table[6];
-uint64_t idt_table[256];
-
/* only dpl matters as we do only user space emulation */
static void set_idt(int n, unsigned int dpl)
{
set_gate(idt_table + n, 0, dpl, 0, 0);
}
+#endif
void cpu_loop(CPUX86State *env)
{
trapnr = cpu_x86_exec(env);
switch(trapnr) {
case 0x80:
- /* linux syscall */
+ /* linux syscall from int $0x80 */
env->regs[R_EAX] = do_syscall(env,
env->regs[R_EAX],
env->regs[R_EBX],
env->regs[R_EDI],
env->regs[R_EBP]);
break;
+#ifndef TARGET_ABI32
+ case EXCP_SYSCALL:
+ /* linux syscall from syscall intruction */
+ env->regs[R_EAX] = do_syscall(env,
+ env->regs[R_EAX],
+ env->regs[R_EDI],
+ env->regs[R_ESI],
+ env->regs[R_EDX],
+ env->regs[10],
+ env->regs[8],
+ env->regs[9]);
+ env->eip = env->exception_next_eip;
+ break;
+#endif
case EXCP0B_NOSEG:
case EXCP0C_STACK:
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case EXCP0D_GPF:
+ /* XXX: potential problem if ABI32 */
#ifndef TARGET_X86_64
if (env->eflags & VM_MASK) {
handle_vm86_fault(env);
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
case EXCP0E_PAGE:
else
info.si_code = TARGET_SEGV_ACCERR;
info._sifields._sigfault._addr = env->cr[2];
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case EXCP00_DIVZ:
#ifndef TARGET_X86_64
info.si_errno = 0;
info.si_code = TARGET_FPE_INTDIV;
info._sifields._sigfault._addr = env->eip;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
- case EXCP01_SSTP:
+ case EXCP01_DB:
case EXCP03_INT3:
#ifndef TARGET_X86_64
if (env->eflags & VM_MASK) {
{
info.si_signo = SIGTRAP;
info.si_errno = 0;
- if (trapnr == EXCP01_SSTP) {
+ if (trapnr == EXCP01_DB) {
info.si_code = TARGET_TRAP_BRKPT;
info._sifields._sigfault._addr = env->eip;
} else {
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
}
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
case EXCP04_INTO:
info.si_errno = 0;
info.si_code = TARGET_SI_KERNEL;
info._sifields._sigfault._addr = 0;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
case EXCP06_ILLOP:
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->eip;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
#ifdef TARGET_ARM
-/* XXX: find a better solution */
-extern void tb_invalidate_page_range(abi_ulong start, abi_ulong end);
-
static void arm_cache_flush(abi_ulong start, abi_ulong last)
{
abi_ulong addr, last1;
}
}
+/* Handle a jump to the kernel code page. */
+static int
+do_kernel_trap(CPUARMState *env)
+{
+ uint32_t addr;
+ uint32_t cpsr;
+ uint32_t val;
+
+ switch (env->regs[15]) {
+ case 0xffff0fa0: /* __kernel_memory_barrier */
+ /* ??? No-op. Will need to do better for SMP. */
+ break;
+ case 0xffff0fc0: /* __kernel_cmpxchg */
+ /* XXX: This only works between threads, not between processes.
+ It's probably possible to implement this with native host
+ operations. However things like ldrex/strex are much harder so
+ there's not much point trying. */
+ start_exclusive();
+ cpsr = cpsr_read(env);
+ addr = env->regs[2];
+ /* FIXME: This should SEGV if the access fails. */
+ if (get_user_u32(val, addr))
+ val = ~env->regs[0];
+ if (val == env->regs[0]) {
+ val = env->regs[1];
+ /* FIXME: Check for segfaults. */
+ put_user_u32(val, addr);
+ env->regs[0] = 0;
+ cpsr |= CPSR_C;
+ } else {
+ env->regs[0] = -1;
+ cpsr &= ~CPSR_C;
+ }
+ cpsr_write(env, cpsr, CPSR_C);
+ end_exclusive();
+ break;
+ case 0xffff0fe0: /* __kernel_get_tls */
+ env->regs[0] = env->cp15.c13_tls2;
+ break;
+ default:
+ return 1;
+ }
+ /* Jump back to the caller. */
+ addr = env->regs[14];
+ if (addr & 1) {
+ env->thumb = 1;
+ addr &= ~1;
+ }
+ env->regs[15] = addr;
+
+ return 0;
+}
+
void cpu_loop(CPUARMState *env)
{
int trapnr;
uint32_t addr;
for(;;) {
+ cpu_exec_start(env);
trapnr = cpu_arm_exec(env);
+ cpu_exec_end(env);
switch(trapnr) {
case EXCP_UDEF:
{
TaskState *ts = env->opaque;
uint32_t opcode;
+ int rc;
/* we handle the FPU emulation here, as Linux */
/* we get the opcode */
- opcode = tget32(env->regs[15]);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(opcode, env->regs[15]);
- if (EmulateAll(opcode, &ts->fpa, env) == 0) {
+ rc = EmulateAll(opcode, &ts->fpa, env);
+ if (rc == 0) { /* illegal instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->regs[15];
- queue_signal(info.si_signo, &info);
- } else {
+ queue_signal(env, info.si_signo, &info);
+ } else if (rc < 0) { /* FP exception */
+ int arm_fpe=0;
+
+ /* translate softfloat flags to FPSR flags */
+ if (-rc & float_flag_invalid)
+ arm_fpe |= BIT_IOC;
+ if (-rc & float_flag_divbyzero)
+ arm_fpe |= BIT_DZC;
+ if (-rc & float_flag_overflow)
+ arm_fpe |= BIT_OFC;
+ if (-rc & float_flag_underflow)
+ arm_fpe |= BIT_UFC;
+ if (-rc & float_flag_inexact)
+ arm_fpe |= BIT_IXC;
+
+ FPSR fpsr = ts->fpa.fpsr;
+ //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
+
+ if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+
+ /* ordered by priority, least first */
+ if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
+ if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
+ if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
+ if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
+ if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
+
+ info._sifields._sigfault._addr = env->regs[15];
+ queue_signal(env, info.si_signo, &info);
+ } else {
+ env->regs[15] += 4;
+ }
+
+ /* accumulate unenabled exceptions */
+ if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
+ fpsr |= BIT_IXC;
+ if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
+ fpsr |= BIT_UFC;
+ if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
+ fpsr |= BIT_OFC;
+ if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
+ fpsr |= BIT_DZC;
+ if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
+ fpsr |= BIT_IOC;
+ ts->fpa.fpsr=fpsr;
+ } else { /* everything OK */
/* increment PC */
env->regs[15] += 4;
}
/* system call */
if (trapnr == EXCP_BKPT) {
if (env->thumb) {
- insn = tget16(env->regs[15]);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u16(insn, env->regs[15]);
n = insn & 0xff;
env->regs[15] += 2;
} else {
- insn = tget32(env->regs[15]);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(insn, env->regs[15]);
n = (insn & 0xf) | ((insn >> 4) & 0xff0);
env->regs[15] += 4;
}
} else {
if (env->thumb) {
- insn = tget16(env->regs[15] - 2);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u16(insn, env->regs[15] - 2);
n = insn & 0xff;
} else {
- insn = tget32(env->regs[15] - 4);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_u32(insn, env->regs[15] - 4);
n = insn & 0xffffff;
}
}
n -= ARM_SYSCALL_BASE;
env->eabi = 0;
}
- env->regs[0] = do_syscall(env,
- n,
- env->regs[0],
- env->regs[1],
- env->regs[2],
- env->regs[3],
- env->regs[4],
- env->regs[5]);
+ if ( n > ARM_NR_BASE) {
+ switch (n) {
+ case ARM_NR_cacheflush:
+ arm_cache_flush(env->regs[0], env->regs[1]);
+ break;
+ case ARM_NR_set_tls:
+ cpu_set_tls(env, env->regs[0]);
+ env->regs[0] = 0;
+ break;
+ default:
+ gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
+ n);
+ env->regs[0] = -TARGET_ENOSYS;
+ break;
+ }
+ } else {
+ env->regs[0] = do_syscall(env,
+ n,
+ env->regs[0],
+ env->regs[1],
+ env->regs[2],
+ env->regs[3],
+ env->regs[4],
+ env->regs[5]);
+ }
} else {
goto error;
}
/* just indicate that signals should be handled asap */
break;
case EXCP_PREFETCH_ABORT:
- addr = env->cp15.c6_data;
+ addr = env->cp15.c6_insn;
goto do_segv;
case EXCP_DATA_ABORT:
- addr = env->cp15.c6_insn;
+ addr = env->cp15.c6_data;
goto do_segv;
do_segv:
{
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = addr;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
case EXCP_DEBUG:
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
+ case EXCP_KERNEL_TRAP:
+ if (do_kernel_trap(env))
+ goto error;
+ break;
default:
error:
fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
#endif
#ifdef TARGET_SPARC
+#define SPARC64_STACK_BIAS 2047
//#define DEBUG_WIN
can be found at http://www.sics.se/~psm/sparcstack.html */
static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
{
- index = (index + cwp * 16) & (16 * NWINDOWS - 1);
+ index = (index + cwp * 16) % (16 * env->nwindows);
/* wrap handling : if cwp is on the last window, then we use the
registers 'after' the end */
- if (index < 8 && env->cwp == (NWINDOWS - 1))
- index += (16 * NWINDOWS);
+ if (index < 8 && env->cwp == env->nwindows - 1)
+ index += 16 * env->nwindows;
return index;
}
abi_ulong sp_ptr;
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
+#ifdef TARGET_SPARC64
+ if (sp_ptr & 3)
+ sp_ptr += SPARC64_STACK_BIAS;
+#endif
#if defined(DEBUG_WIN)
- printf("win_overflow: sp_ptr=0x%x save_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
- tputl(sp_ptr, env->regbase[get_reg_index(env, cwp1, 8 + i)]);
+ /* FIXME - what to do if put_user() fails? */
+ put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
sp_ptr += sizeof(abi_ulong);
}
}
{
#ifndef TARGET_SPARC64
unsigned int new_wim;
- new_wim = ((env->wim >> 1) | (env->wim << (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->wim = new_wim;
#else
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->cansave++;
env->canrestore--;
#endif
static void restore_window(CPUSPARCState *env)
{
- unsigned int new_wim, i, cwp1;
+#ifndef TARGET_SPARC64
+ unsigned int new_wim;
+#endif
+ unsigned int i, cwp1;
abi_ulong sp_ptr;
- new_wim = ((env->wim << 1) | (env->wim >> (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
+#ifndef TARGET_SPARC64
+ new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+#endif
/* restore the invalid window */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
+#ifdef TARGET_SPARC64
+ if (sp_ptr & 3)
+ sp_ptr += SPARC64_STACK_BIAS;
+#endif
#if defined(DEBUG_WIN)
- printf("win_underflow: sp_ptr=0x%x load_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
- env->regbase[get_reg_index(env, cwp1, 8 + i)] = tgetl(sp_ptr);
+ /* FIXME - what to do if get_user() fails? */
+ get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
sp_ptr += sizeof(abi_ulong);
}
- env->wim = new_wim;
#ifdef TARGET_SPARC64
env->canrestore++;
- if (env->cleanwin < NWINDOWS - 1)
- env->cleanwin++;
+ if (env->cleanwin < env->nwindows - 1)
+ env->cleanwin++;
env->cansave--;
+#else
+ env->wim = new_wim;
#endif
}
offset = 1;
for(;;) {
/* if restore would invoke restore_window(), then we can stop */
- cwp1 = (env->cwp + offset) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + offset);
+#ifndef TARGET_SPARC64
if (env->wim & (1 << cwp1))
break;
+#else
+ if (env->canrestore == 0)
+ break;
+ env->cansave++;
+ env->canrestore--;
+#endif
save_window_offset(env, cwp1);
offset++;
}
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
+#ifndef TARGET_SPARC64
/* set wim so that restore will reload the registers */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
env->wim = 1 << cwp1;
+#endif
#if defined(DEBUG_WIN)
printf("flush_windows: nb=%d\n", offset - 1);
#endif
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->mmuregs[4];
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
#else
if (trapnr == TT_DFAULT)
info._sifields._sigfault._addr = env->dmmuregs[4];
else
- info._sifields._sigfault._addr = env->tpc[env->tl];
- queue_signal(info.si_signo, &info);
+ info._sifields._sigfault._addr = env->tsptr->tpc;
+ queue_signal(env, info.si_signo, &info);
}
break;
+#ifndef TARGET_ABI32
case 0x16e:
flush_windows(env);
sparc64_get_context(env);
flush_windows(env);
sparc64_set_context(env);
break;
+#endif
#endif
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
do { \
fprintf(stderr, fmt , ##args); \
cpu_dump_state(env, stderr, fprintf, 0); \
- if (loglevel != 0) { \
- fprintf(logfile, fmt , ##args); \
- cpu_dump_state(env, logfile, fprintf, 0); \
- } \
+ qemu_log(fmt, ##args); \
+ log_cpu_state(env, 0); \
} while (0)
void cpu_loop(CPUPPCState *env)
break;
}
info._sifields._sigfault._addr = env->nip;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_ISI: /* Instruction storage exception */
EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" ADDRX "\n",
break;
}
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_EXTERNAL: /* External input */
cpu_abort(env, "External interrupt while in user mode. "
info.si_errno = 0;
info.si_code = TARGET_BUS_ADRALN;
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_PROGRAM: /* Program exception */
/* XXX: check this */
break;
}
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
EXCP_DUMP(env, "No floating point allowed\n");
info.si_errno = 0;
info.si_code = TARGET_ILL_COPROC;
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_SYSCALL: /* System call exception */
cpu_abort(env, "Syscall exception while in user mode. "
info.si_errno = 0;
info.si_code = TARGET_ILL_COPROC;
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_DECR: /* Decrementer exception */
cpu_abort(env, "Decrementer interrupt while in user mode. "
cpu_abort(env, "Instruction TLB exception while in user mode. "
"Aborting\n");
break;
- case POWERPC_EXCP_DEBUG: /* Debug interrupt */
- /* XXX: check this */
- {
- int sig;
-
- sig = gdb_handlesig(env, TARGET_SIGTRAP);
- if (sig) {
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
- }
- }
- break;
-#if defined(TARGET_PPCEMB)
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n");
info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_COPROC;
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
cpu_abort(env, "Embedded floating-point data IRQ not handled\n");
cpu_abort(env, "Reset interrupt while in user mode. "
"Aborting\n");
break;
-#endif /* defined(TARGET_PPCEMB) */
-#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) /* PowerPC 64 */
case POWERPC_EXCP_DSEG: /* Data segment exception */
cpu_abort(env, "Data segment exception while in user mode. "
"Aborting\n");
cpu_abort(env, "Instruction segment exception "
"while in user mode. Aborting\n");
break;
-#endif /* defined(TARGET_PPC64) && !defined(TARGET_ABI32) */
-#if defined(TARGET_PPC64H) && !defined(TARGET_ABI32)
/* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
cpu_abort(env, "Hypervisor decrementer interrupt "
"while in user mode. Aborting\n");
break;
-#endif /* defined(TARGET_PPC64H) && !defined(TARGET_ABI32) */
case POWERPC_EXCP_TRACE: /* Trace exception */
/* Nothing to do:
* we use this exception to emulate step-by-step execution mode.
*/
break;
-#if defined(TARGET_PPC64H) && !defined(TARGET_ABI32)
/* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
cpu_abort(env, "Hypervisor data storage exception "
cpu_abort(env, "Hypervisor instruction segment exception "
"while in user mode. Aborting\n");
break;
-#endif /* defined(TARGET_PPC64H) && !defined(TARGET_ABI32) */
case POWERPC_EXCP_VPU: /* Vector unavailable exception */
EXCP_DUMP(env, "No Altivec instructions allowed\n");
info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_COPROC;
info._sifields._sigfault._addr = env->nip - 4;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
cpu_abort(env, "Programable interval timer interrupt "
printf("syscall returned 0x%08x (%d)\n", ret, ret);
#endif
break;
+ case EXCP_DEBUG:
+ {
+ int sig;
+
+ sig = gdb_handlesig(env, TARGET_SIGTRAP);
+ if (sig) {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, &info);
+ }
+ }
+ break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
trapnr = cpu_mips_exec(env);
switch(trapnr) {
case EXCP_SYSCALL:
- syscall_num = env->gpr[2][env->current_tc] - 4000;
- env->PC[env->current_tc] += 4;
+ syscall_num = env->active_tc.gpr[2] - 4000;
+ env->active_tc.PC += 4;
if (syscall_num >= sizeof(mips_syscall_args)) {
ret = -ENOSYS;
} else {
abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
nb_args = mips_syscall_args[syscall_num];
- sp_reg = env->gpr[29][env->current_tc];
+ sp_reg = env->active_tc.gpr[29];
switch (nb_args) {
/* these arguments are taken from the stack */
- case 8: arg8 = tgetl(sp_reg + 28);
- case 7: arg7 = tgetl(sp_reg + 24);
- case 6: arg6 = tgetl(sp_reg + 20);
- case 5: arg5 = tgetl(sp_reg + 16);
+ /* FIXME - what to do if get_user() fails? */
+ case 8: get_user_ual(arg8, sp_reg + 28);
+ case 7: get_user_ual(arg7, sp_reg + 24);
+ case 6: get_user_ual(arg6, sp_reg + 20);
+ case 5: get_user_ual(arg5, sp_reg + 16);
default:
break;
}
- ret = do_syscall(env, env->gpr[2][env->current_tc],
- env->gpr[4][env->current_tc],
- env->gpr[5][env->current_tc],
- env->gpr[6][env->current_tc],
- env->gpr[7][env->current_tc],
+ ret = do_syscall(env, env->active_tc.gpr[2],
+ env->active_tc.gpr[4],
+ env->active_tc.gpr[5],
+ env->active_tc.gpr[6],
+ env->active_tc.gpr[7],
arg5, arg6/*, arg7, arg8*/);
}
if ((unsigned int)ret >= (unsigned int)(-1133)) {
- env->gpr[7][env->current_tc] = 1; /* error flag */
+ env->active_tc.gpr[7] = 1; /* error flag */
ret = -ret;
} else {
- env->gpr[7][env->current_tc] = 0; /* error flag */
+ env->active_tc.gpr[7] = 0; /* error flag */
}
- env->gpr[2][env->current_tc] = ret;
+ env->active_tc.gpr[2] = ret;
break;
case EXCP_TLBL:
case EXCP_TLBS:
info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = 0;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
switch (trapnr) {
case 0x160:
+ env->pc += 2;
ret = do_syscall(env,
env->gregs[3],
env->gregs[4],
env->gregs[6],
env->gregs[7],
env->gregs[0],
- 0);
+ env->gregs[1]);
env->gregs[0] = ret;
- env->pc += 2;
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
break;
case EXCP_DEBUG:
{
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
+ case 0xa0:
+ case 0xc0:
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->tea;
+ queue_signal(env, info.si_signo, &info);
+ break;
+
default:
printf ("Unhandled trap: 0x%x\n", trapnr);
cpu_dump_state(env, stderr, fprintf, 0);
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->debug1;
- queue_signal(info.si_signo, &info);
+ info._sifields._sigfault._addr = env->pregs[PR_EDA];
+ queue_signal(env, info.si_signo, &info);
}
break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
case EXCP_BREAK:
ret = do_syscall(env,
env->regs[9],
env->pregs[7],
env->pregs[11]);
env->regs[10] = ret;
- env->pc += 2;
break;
case EXCP_DEBUG:
{
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPN;
info._sifields._sigfault._addr = env->pc;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
break;
case EXCP_TRAP0:
{
env->dregs[3],
env->dregs[4],
env->dregs[5],
- env->dregs[6]);
+ env->aregs[0]);
}
break;
case EXCP_INTERRUPT:
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->mmu.ar;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
break;
case EXCP_DEBUG:
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
exit(1);
break;
case EXCP_CALL_PAL ... (EXCP_CALL_PALP - 1):
- fprintf(stderr, "Call to PALcode\n");
call_pal(env, (trapnr >> 6) | 0x80);
break;
case EXCP_CALL_PALP ... (EXCP_CALL_PALE - 1):
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(info.si_signo, &info);
+ queue_signal(env, info.si_signo, &info);
}
}
break;
}
#endif /* TARGET_ALPHA */
-void usage(void)
+static void usage(void)
{
- printf("qemu-" TARGET_ARCH " version " QEMU_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard\n"
- "usage: qemu-" TARGET_ARCH " [-h] [-g] [-d opts] [-L path] [-s size] [-cpu model] program [arguments...]\n"
+ printf("qemu-" TARGET_ARCH " version " QEMU_VERSION ", Copyright (c) 2003-2008 Fabrice Bellard\n"
+ "usage: qemu-" TARGET_ARCH " [options] program [arguments...]\n"
"Linux CPU emulator (compiled for %s emulation)\n"
"\n"
+ "Standard options:\n"
"-h print this help\n"
"-g port wait gdb connection to port\n"
"-L path set the elf interpreter prefix (default=%s)\n"
"-s size set the stack size in bytes (default=%ld)\n"
"-cpu model select CPU (-cpu ? for list)\n"
"-drop-ld-preload drop LD_PRELOAD for target process\n"
+ "-E var=value sets/modifies targets environment variable(s)\n"
+ "-U var unsets targets environment variable(s)\n"
"\n"
- "debug options:\n"
+ "Debug options:\n"
"-d options activate log (logfile=%s)\n"
"-p pagesize set the host page size to 'pagesize'\n"
- "-strace log system calls\n",
+ "-strace log system calls\n"
+ "\n"
+ "Environment variables:\n"
+ "QEMU_STRACE Print system calls and arguments similar to the\n"
+ " 'strace' program. Enable by setting to any value.\n"
+ "You can use -E and -U options to set/unset environment variables\n"
+ "for target process. It is possible to provide several variables\n"
+ "by repeating the option. For example:\n"
+ " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
+ "Note that if you provide several changes to single variable\n"
+ "last change will stay in effect.\n"
+ ,
TARGET_ARCH,
interp_prefix,
x86_stack_size,
_exit(1);
}
-/* XXX: currently only used for async signals (see signal.c) */
-CPUState *global_env;
-
-/* used to free thread contexts */
-TaskState *first_task_state;
+THREAD CPUState *thread_env;
-int main(int argc, char **argv)
+/* Assumes contents are already zeroed. */
+void init_task_state(TaskState *ts)
+{
+ int i;
+
+ ts->used = 1;
+ ts->first_free = ts->sigqueue_table;
+ for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
+ ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];
+ }
+ ts->sigqueue_table[i].next = NULL;
+}
+
+int main(int argc, char **argv, char **envp)
{
const char *filename;
const char *cpu_model;
int optind;
const char *r;
int gdbstub_port = 0;
- int drop_ld_preload = 0, environ_count = 0;
- char **target_environ, **wrk, **dst;
+ char **target_environ, **wrk;
+ envlist_t *envlist = NULL;
if (argc <= 1)
usage();
+ qemu_cache_utils_init(envp);
+
/* init debug */
cpu_set_log_filename(DEBUG_LOGFILE);
+ if ((envlist = envlist_create()) == NULL) {
+ (void) fprintf(stderr, "Unable to allocate envlist\n");
+ exit(1);
+ }
+
+ /* add current environment into the list */
+ for (wrk = environ; *wrk != NULL; wrk++) {
+ (void) envlist_setenv(envlist, *wrk);
+ }
+
cpu_model = NULL;
optind = 1;
for(;;) {
break;
} else if (!strcmp(r, "d")) {
int mask;
- CPULogItem *item;
+ const CPULogItem *item;
if (optind >= argc)
break;
exit(1);
}
cpu_set_log(mask);
+ } else if (!strcmp(r, "E")) {
+ r = argv[optind++];
+ if (envlist_setenv(envlist, r) != 0)
+ usage();
+ } else if (!strcmp(r, "U")) {
+ r = argv[optind++];
+ if (envlist_unsetenv(envlist, r) != 0)
+ usage();
} else if (!strcmp(r, "s")) {
+ if (optind >= argc)
+ break;
r = argv[optind++];
x86_stack_size = strtol(r, (char **)&r, 0);
if (x86_stack_size <= 0)
} else if (!strcmp(r, "L")) {
interp_prefix = argv[optind++];
} else if (!strcmp(r, "p")) {
+ if (optind >= argc)
+ break;
qemu_host_page_size = atoi(argv[optind++]);
if (qemu_host_page_size == 0 ||
(qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
exit(1);
}
} else if (!strcmp(r, "g")) {
+ if (optind >= argc)
+ break;
gdbstub_port = atoi(argv[optind++]);
} else if (!strcmp(r, "r")) {
qemu_uname_release = argv[optind++];
} else if (!strcmp(r, "cpu")) {
cpu_model = argv[optind++];
- if (strcmp(cpu_model, "?") == 0) {
+ if (cpu_model == NULL || strcmp(cpu_model, "?") == 0) {
/* XXX: implement xxx_cpu_list for targets that still miss it */
#if defined(cpu_list)
cpu_list(stdout, &fprintf);
_exit(1);
}
} else if (!strcmp(r, "drop-ld-preload")) {
- drop_ld_preload = 1;
+ (void) envlist_unsetenv(envlist, "LD_PRELOAD");
} else if (!strcmp(r, "strace")) {
do_strace = 1;
} else
if (optind >= argc)
usage();
filename = argv[optind];
+ exec_path = argv[optind];
/* Zero out regs */
memset(regs, 0, sizeof(struct target_pt_regs));
cpu_model = "24Kf";
#endif
#elif defined(TARGET_PPC)
+#ifdef TARGET_PPC64
+ cpu_model = "970";
+#else
cpu_model = "750";
+#endif
#else
cpu_model = "any";
#endif
}
+ cpu_exec_init_all(0);
/* NOTE: we need to init the CPU at this stage to get
qemu_host_page_size */
env = cpu_init(cpu_model);
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
- global_env = env;
+ thread_env = env;
if (getenv("QEMU_STRACE")) {
do_strace = 1;
}
- wrk = environ;
- while (*(wrk++))
- environ_count++;
-
- target_environ = malloc((environ_count + 1) * sizeof(char *));
- if (!target_environ)
- abort();
- for (wrk = environ, dst = target_environ; *wrk; wrk++) {
- if (drop_ld_preload && !strncmp(*wrk, "LD_PRELOAD=", 11))
- continue;
- *(dst++) = strdup(*wrk);
- }
- *dst = NULL; /* NULL terminate target_environ */
+ target_environ = envlist_to_environ(envlist, NULL);
+ envlist_free(envlist);
if (loader_exec(filename, argv+optind, target_environ, regs, info) != 0) {
printf("Error loading %s\n", filename);
free(target_environ);
- if (loglevel) {
- page_dump(logfile);
-
- fprintf(logfile, "start_brk 0x" TARGET_FMT_lx "\n", info->start_brk);
- fprintf(logfile, "end_code 0x" TARGET_FMT_lx "\n", info->end_code);
- fprintf(logfile, "start_code 0x" TARGET_FMT_lx "\n",
- info->start_code);
- fprintf(logfile, "start_data 0x" TARGET_FMT_lx "\n",
- info->start_data);
- fprintf(logfile, "end_data 0x" TARGET_FMT_lx "\n", info->end_data);
- fprintf(logfile, "start_stack 0x" TARGET_FMT_lx "\n",
- info->start_stack);
- fprintf(logfile, "brk 0x" TARGET_FMT_lx "\n", info->brk);
- fprintf(logfile, "entry 0x" TARGET_FMT_lx "\n", info->entry);
+ if (qemu_log_enabled()) {
+ log_page_dump();
+
+ qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
+ qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
+ qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_code);
+ qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_data);
+ qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
+ qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
+ info->start_stack);
+ qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
+ qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
}
target_set_brk(info->brk);
/* build Task State */
memset(ts, 0, sizeof(TaskState));
- env->opaque = ts;
- ts->used = 1;
+ init_task_state(ts);
ts->info = info;
- env->user_mode_only = 1;
+ env->opaque = ts;
#if defined(TARGET_I386)
cpu_x86_set_cpl(env, 3);
env->cr[4] |= CR4_OSFXSR_MASK;
env->hflags |= HF_OSFXSR_MASK;
}
+#ifndef TARGET_ABI32
+ /* enable 64 bit mode if possible */
+ if (!(env->cpuid_ext2_features & CPUID_EXT2_LM)) {
+ fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
+ exit(1);
+ }
+ env->cr[4] |= CR4_PAE_MASK;
+ env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
+ env->hflags |= HF_LMA_MASK;
+#endif
/* flags setup : we activate the IRQs by default as in user mode */
env->eflags |= IF_MASK;
/* linux register setup */
-#if defined(TARGET_X86_64)
+#ifndef TARGET_ABI32
env->regs[R_EAX] = regs->rax;
env->regs[R_EBX] = regs->rbx;
env->regs[R_ECX] = regs->rcx;
#endif
/* linux interrupt setup */
- env->idt.base = h2g(idt_table);
- env->idt.limit = sizeof(idt_table) - 1;
+#ifndef TARGET_ABI32
+ env->idt.limit = 511;
+#else
+ env->idt.limit = 255;
+#endif
+ env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ idt_table = g2h(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
set_idt(2, 0);
set_idt(3, 3);
set_idt(4, 3);
- set_idt(5, 3);
+ set_idt(5, 0);
set_idt(6, 0);
set_idt(7, 0);
set_idt(8, 0);
set_idt(0x80, 3);
/* linux segment setup */
- env->gdt.base = h2g(gdt_table);
- env->gdt.limit = sizeof(gdt_table) - 1;
- write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
- (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
- write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
- (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
+ {
+ uint64_t *gdt_table;
+ env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
+ PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
+ gdt_table = g2h(env->gdt.base);
+#ifdef TARGET_ABI32
+ write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
+#else
+ /* 64 bit code segment */
+ write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ DESC_L_MASK |
+ (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
+#endif
+ write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
+ (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
+ }
cpu_x86_load_seg(env, R_CS, __USER_CS);
+ cpu_x86_load_seg(env, R_SS, __USER_DS);
+#ifdef TARGET_ABI32
cpu_x86_load_seg(env, R_DS, __USER_DS);
cpu_x86_load_seg(env, R_ES, __USER_DS);
- cpu_x86_load_seg(env, R_SS, __USER_DS);
cpu_x86_load_seg(env, R_FS, __USER_DS);
cpu_x86_load_seg(env, R_GS, __USER_DS);
-
/* This hack makes Wine work... */
env->segs[R_FS].selector = 0;
+#else
+ cpu_x86_load_seg(env, R_DS, 0);
+ cpu_x86_load_seg(env, R_ES, 0);
+ cpu_x86_load_seg(env, R_FS, 0);
+ cpu_x86_load_seg(env, R_GS, 0);
+#endif
#elif defined(TARGET_ARM)
{
int i;
int i;
for(i = 0; i < 32; i++) {
- env->gpr[i][env->current_tc] = regs->regs[i];
+ env->active_tc.gpr[i] = regs->regs[i];
}
- env->PC[env->current_tc] = regs->cp0_epc;
+ env->active_tc.PC = regs->cp0_epc;
}
#elif defined(TARGET_SH4)
{