]>
Commit | Line | Data |
---|---|---|
f20f9df0 AF |
1 | /* |
2 | * x86 gdb server stub | |
3 | * | |
4 | * Copyright (c) 2003-2005 Fabrice Bellard | |
5 | * Copyright (c) 2013 SUSE LINUX Products GmbH | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #ifdef TARGET_X86_64 | |
22 | static const int gpr_map[16] = { | |
23 | R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP, | |
24 | 8, 9, 10, 11, 12, 13, 14, 15 | |
25 | }; | |
26 | #else | |
27 | #define gpr_map gpr_map32 | |
28 | #endif | |
29 | static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; | |
30 | ||
31 | #define IDX_IP_REG CPU_NB_REGS | |
32 | #define IDX_FLAGS_REG (IDX_IP_REG + 1) | |
33 | #define IDX_SEG_REGS (IDX_FLAGS_REG + 1) | |
34 | #define IDX_FP_REGS (IDX_SEG_REGS + 6) | |
35 | #define IDX_XMM_REGS (IDX_FP_REGS + 16) | |
36 | #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS) | |
37 | ||
38 | static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n) | |
39 | { | |
40 | if (n < CPU_NB_REGS) { | |
41 | if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { | |
42 | GET_REG64(env->regs[gpr_map[n]]); | |
43 | } else if (n < CPU_NB_REGS32) { | |
44 | GET_REG32(env->regs[gpr_map32[n]]); | |
45 | } | |
46 | } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { | |
47 | #ifdef USE_X86LDOUBLE | |
48 | /* FIXME: byteswap float values - after fixing fpregs layout. */ | |
49 | memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10); | |
50 | #else | |
51 | memset(mem_buf, 0, 10); | |
52 | #endif | |
53 | return 10; | |
54 | } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { | |
55 | n -= IDX_XMM_REGS; | |
56 | if (n < CPU_NB_REGS32 || | |
57 | (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { | |
58 | stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0)); | |
59 | stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1)); | |
60 | return 16; | |
61 | } | |
62 | } else { | |
63 | switch (n) { | |
64 | case IDX_IP_REG: | |
65 | if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { | |
66 | GET_REG64(env->eip); | |
67 | } else { | |
68 | GET_REG32(env->eip); | |
69 | } | |
70 | case IDX_FLAGS_REG: | |
71 | GET_REG32(env->eflags); | |
72 | ||
73 | case IDX_SEG_REGS: | |
74 | GET_REG32(env->segs[R_CS].selector); | |
75 | case IDX_SEG_REGS + 1: | |
76 | GET_REG32(env->segs[R_SS].selector); | |
77 | case IDX_SEG_REGS + 2: | |
78 | GET_REG32(env->segs[R_DS].selector); | |
79 | case IDX_SEG_REGS + 3: | |
80 | GET_REG32(env->segs[R_ES].selector); | |
81 | case IDX_SEG_REGS + 4: | |
82 | GET_REG32(env->segs[R_FS].selector); | |
83 | case IDX_SEG_REGS + 5: | |
84 | GET_REG32(env->segs[R_GS].selector); | |
85 | ||
86 | case IDX_FP_REGS + 8: | |
87 | GET_REG32(env->fpuc); | |
88 | case IDX_FP_REGS + 9: | |
89 | GET_REG32((env->fpus & ~0x3800) | | |
90 | (env->fpstt & 0x7) << 11); | |
91 | case IDX_FP_REGS + 10: | |
92 | GET_REG32(0); /* ftag */ | |
93 | case IDX_FP_REGS + 11: | |
94 | GET_REG32(0); /* fiseg */ | |
95 | case IDX_FP_REGS + 12: | |
96 | GET_REG32(0); /* fioff */ | |
97 | case IDX_FP_REGS + 13: | |
98 | GET_REG32(0); /* foseg */ | |
99 | case IDX_FP_REGS + 14: | |
100 | GET_REG32(0); /* fooff */ | |
101 | case IDX_FP_REGS + 15: | |
102 | GET_REG32(0); /* fop */ | |
103 | ||
104 | case IDX_MXCSR_REG: | |
105 | GET_REG32(env->mxcsr); | |
106 | } | |
107 | } | |
108 | return 0; | |
109 | } | |
110 | ||
111 | static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf) | |
112 | { | |
113 | uint16_t selector = ldl_p(mem_buf); | |
114 | ||
115 | if (selector != env->segs[sreg].selector) { | |
116 | #if defined(CONFIG_USER_ONLY) | |
117 | cpu_x86_load_seg(env, sreg, selector); | |
118 | #else | |
119 | unsigned int limit, flags; | |
120 | target_ulong base; | |
121 | ||
122 | if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { | |
123 | base = selector << 4; | |
124 | limit = 0xffff; | |
125 | flags = 0; | |
126 | } else { | |
127 | if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, | |
128 | &flags)) { | |
129 | return 4; | |
130 | } | |
131 | } | |
132 | cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags); | |
133 | #endif | |
134 | } | |
135 | return 4; | |
136 | } | |
137 | ||
138 | static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n) | |
139 | { | |
140 | uint32_t tmp; | |
141 | ||
142 | if (n < CPU_NB_REGS) { | |
143 | if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { | |
144 | env->regs[gpr_map[n]] = ldtul_p(mem_buf); | |
145 | return sizeof(target_ulong); | |
146 | } else if (n < CPU_NB_REGS32) { | |
147 | n = gpr_map32[n]; | |
148 | env->regs[n] &= ~0xffffffffUL; | |
149 | env->regs[n] |= (uint32_t)ldl_p(mem_buf); | |
150 | return 4; | |
151 | } | |
152 | } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { | |
153 | #ifdef USE_X86LDOUBLE | |
154 | /* FIXME: byteswap float values - after fixing fpregs layout. */ | |
155 | memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10); | |
156 | #endif | |
157 | return 10; | |
158 | } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { | |
159 | n -= IDX_XMM_REGS; | |
160 | if (n < CPU_NB_REGS32 || | |
161 | (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { | |
162 | env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf); | |
163 | env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8); | |
164 | return 16; | |
165 | } | |
166 | } else { | |
167 | switch (n) { | |
168 | case IDX_IP_REG: | |
169 | if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { | |
170 | env->eip = ldq_p(mem_buf); | |
171 | return 8; | |
172 | } else { | |
173 | env->eip &= ~0xffffffffUL; | |
174 | env->eip |= (uint32_t)ldl_p(mem_buf); | |
175 | return 4; | |
176 | } | |
177 | case IDX_FLAGS_REG: | |
178 | env->eflags = ldl_p(mem_buf); | |
179 | return 4; | |
180 | ||
181 | case IDX_SEG_REGS: | |
182 | return cpu_x86_gdb_load_seg(env, R_CS, mem_buf); | |
183 | case IDX_SEG_REGS + 1: | |
184 | return cpu_x86_gdb_load_seg(env, R_SS, mem_buf); | |
185 | case IDX_SEG_REGS + 2: | |
186 | return cpu_x86_gdb_load_seg(env, R_DS, mem_buf); | |
187 | case IDX_SEG_REGS + 3: | |
188 | return cpu_x86_gdb_load_seg(env, R_ES, mem_buf); | |
189 | case IDX_SEG_REGS + 4: | |
190 | return cpu_x86_gdb_load_seg(env, R_FS, mem_buf); | |
191 | case IDX_SEG_REGS + 5: | |
192 | return cpu_x86_gdb_load_seg(env, R_GS, mem_buf); | |
193 | ||
194 | case IDX_FP_REGS + 8: | |
195 | env->fpuc = ldl_p(mem_buf); | |
196 | return 4; | |
197 | case IDX_FP_REGS + 9: | |
198 | tmp = ldl_p(mem_buf); | |
199 | env->fpstt = (tmp >> 11) & 7; | |
200 | env->fpus = tmp & ~0x3800; | |
201 | return 4; | |
202 | case IDX_FP_REGS + 10: /* ftag */ | |
203 | return 4; | |
204 | case IDX_FP_REGS + 11: /* fiseg */ | |
205 | return 4; | |
206 | case IDX_FP_REGS + 12: /* fioff */ | |
207 | return 4; | |
208 | case IDX_FP_REGS + 13: /* foseg */ | |
209 | return 4; | |
210 | case IDX_FP_REGS + 14: /* fooff */ | |
211 | return 4; | |
212 | case IDX_FP_REGS + 15: /* fop */ | |
213 | return 4; | |
214 | ||
215 | case IDX_MXCSR_REG: | |
216 | env->mxcsr = ldl_p(mem_buf); | |
217 | return 4; | |
218 | } | |
219 | } | |
220 | /* Unrecognised register. */ | |
221 | return 0; | |
222 | } |