]>
Commit | Line | Data |
---|---|---|
d9f24bf5 PB |
1 | /* |
2 | * Target-specific parts of the CPU object | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
21 | #include "qemu-common.h" | |
22 | #include "qapi/error.h" | |
23 | ||
24 | #include "exec/target_page.h" | |
25 | #include "hw/qdev-core.h" | |
26 | #include "hw/qdev-properties.h" | |
27 | #include "qemu/error-report.h" | |
28 | #include "migration/vmstate.h" | |
29 | #ifdef CONFIG_USER_ONLY | |
30 | #include "qemu.h" | |
31 | #else | |
32 | #include "exec/address-spaces.h" | |
33 | #endif | |
34 | #include "sysemu/tcg.h" | |
35 | #include "sysemu/kvm.h" | |
36 | #include "sysemu/replay.h" | |
3b9bd3f4 | 37 | #include "exec/translate-all.h" |
d9f24bf5 PB |
38 | #include "exec/log.h" |
39 | ||
40 | uintptr_t qemu_host_page_size; | |
41 | intptr_t qemu_host_page_mask; | |
42 | ||
43 | #ifndef CONFIG_USER_ONLY | |
44 | static int cpu_common_post_load(void *opaque, int version_id) | |
45 | { | |
46 | CPUState *cpu = opaque; | |
47 | ||
48 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the | |
49 | version_id is increased. */ | |
50 | cpu->interrupt_request &= ~0x01; | |
51 | tlb_flush(cpu); | |
52 | ||
53 | /* loadvm has just updated the content of RAM, bypassing the | |
54 | * usual mechanisms that ensure we flush TBs for writes to | |
55 | * memory we've translated code from. So we must flush all TBs, | |
56 | * which will now be stale. | |
57 | */ | |
58 | tb_flush(cpu); | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
63 | static int cpu_common_pre_load(void *opaque) | |
64 | { | |
65 | CPUState *cpu = opaque; | |
66 | ||
67 | cpu->exception_index = -1; | |
68 | ||
69 | return 0; | |
70 | } | |
71 | ||
72 | static bool cpu_common_exception_index_needed(void *opaque) | |
73 | { | |
74 | CPUState *cpu = opaque; | |
75 | ||
76 | return tcg_enabled() && cpu->exception_index != -1; | |
77 | } | |
78 | ||
79 | static const VMStateDescription vmstate_cpu_common_exception_index = { | |
80 | .name = "cpu_common/exception_index", | |
81 | .version_id = 1, | |
82 | .minimum_version_id = 1, | |
83 | .needed = cpu_common_exception_index_needed, | |
84 | .fields = (VMStateField[]) { | |
85 | VMSTATE_INT32(exception_index, CPUState), | |
86 | VMSTATE_END_OF_LIST() | |
87 | } | |
88 | }; | |
89 | ||
90 | static bool cpu_common_crash_occurred_needed(void *opaque) | |
91 | { | |
92 | CPUState *cpu = opaque; | |
93 | ||
94 | return cpu->crash_occurred; | |
95 | } | |
96 | ||
97 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { | |
98 | .name = "cpu_common/crash_occurred", | |
99 | .version_id = 1, | |
100 | .minimum_version_id = 1, | |
101 | .needed = cpu_common_crash_occurred_needed, | |
102 | .fields = (VMStateField[]) { | |
103 | VMSTATE_BOOL(crash_occurred, CPUState), | |
104 | VMSTATE_END_OF_LIST() | |
105 | } | |
106 | }; | |
107 | ||
108 | const VMStateDescription vmstate_cpu_common = { | |
109 | .name = "cpu_common", | |
110 | .version_id = 1, | |
111 | .minimum_version_id = 1, | |
112 | .pre_load = cpu_common_pre_load, | |
113 | .post_load = cpu_common_post_load, | |
114 | .fields = (VMStateField[]) { | |
115 | VMSTATE_UINT32(halted, CPUState), | |
116 | VMSTATE_UINT32(interrupt_request, CPUState), | |
117 | VMSTATE_END_OF_LIST() | |
118 | }, | |
119 | .subsections = (const VMStateDescription*[]) { | |
120 | &vmstate_cpu_common_exception_index, | |
121 | &vmstate_cpu_common_crash_occurred, | |
122 | NULL | |
123 | } | |
124 | }; | |
125 | #endif | |
126 | ||
7df5e3d6 | 127 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
d9f24bf5 PB |
128 | { |
129 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
130 | ||
7df5e3d6 CF |
131 | cpu_list_add(cpu); |
132 | ||
133 | #ifdef CONFIG_TCG | |
134 | /* NB: errp parameter is unused currently */ | |
135 | if (tcg_enabled()) { | |
136 | tcg_exec_realizefn(cpu, errp); | |
137 | } | |
138 | #endif /* CONFIG_TCG */ | |
139 | ||
140 | #ifdef CONFIG_USER_ONLY | |
141 | assert(cc->vmsd == NULL); | |
142 | #else | |
143 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
144 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); | |
145 | } | |
146 | if (cc->vmsd != NULL) { | |
147 | vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); | |
148 | } | |
149 | #endif /* CONFIG_USER_ONLY */ | |
150 | } | |
151 | ||
152 | void cpu_exec_unrealizefn(CPUState *cpu) | |
153 | { | |
154 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
d9f24bf5 PB |
155 | |
156 | #ifdef CONFIG_USER_ONLY | |
157 | assert(cc->vmsd == NULL); | |
158 | #else | |
159 | if (cc->vmsd != NULL) { | |
160 | vmstate_unregister(NULL, cc->vmsd, cpu); | |
161 | } | |
162 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
163 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); | |
164 | } | |
d9f24bf5 | 165 | #endif |
7df5e3d6 CF |
166 | #ifdef CONFIG_TCG |
167 | /* NB: errp parameter is unused currently */ | |
168 | if (tcg_enabled()) { | |
169 | tcg_exec_unrealizefn(cpu); | |
170 | } | |
171 | #endif /* CONFIG_TCG */ | |
172 | ||
173 | cpu_list_remove(cpu); | |
d9f24bf5 PB |
174 | } |
175 | ||
d9f24bf5 PB |
176 | void cpu_exec_initfn(CPUState *cpu) |
177 | { | |
178 | cpu->as = NULL; | |
179 | cpu->num_ases = 0; | |
180 | ||
181 | #ifndef CONFIG_USER_ONLY | |
182 | cpu->thread_id = qemu_get_thread_id(); | |
183 | cpu->memory = get_system_memory(); | |
184 | object_ref(OBJECT(cpu->memory)); | |
185 | #endif | |
186 | } | |
187 | ||
d9f24bf5 PB |
188 | const char *parse_cpu_option(const char *cpu_option) |
189 | { | |
190 | ObjectClass *oc; | |
191 | CPUClass *cc; | |
192 | gchar **model_pieces; | |
193 | const char *cpu_type; | |
194 | ||
195 | model_pieces = g_strsplit(cpu_option, ",", 2); | |
196 | if (!model_pieces[0]) { | |
197 | error_report("-cpu option cannot be empty"); | |
198 | exit(1); | |
199 | } | |
200 | ||
201 | oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); | |
202 | if (oc == NULL) { | |
203 | error_report("unable to find CPU model '%s'", model_pieces[0]); | |
204 | g_strfreev(model_pieces); | |
205 | exit(EXIT_FAILURE); | |
206 | } | |
207 | ||
208 | cpu_type = object_class_get_name(oc); | |
209 | cc = CPU_CLASS(oc); | |
210 | cc->parse_features(cpu_type, model_pieces[1], &error_fatal); | |
211 | g_strfreev(model_pieces); | |
212 | return cpu_type; | |
213 | } | |
214 | ||
215 | #if defined(CONFIG_USER_ONLY) | |
216 | void tb_invalidate_phys_addr(target_ulong addr) | |
217 | { | |
218 | mmap_lock(); | |
219 | tb_invalidate_phys_page_range(addr, addr + 1); | |
220 | mmap_unlock(); | |
221 | } | |
222 | ||
223 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) | |
224 | { | |
225 | tb_invalidate_phys_addr(pc); | |
226 | } | |
227 | #else | |
228 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) | |
229 | { | |
230 | ram_addr_t ram_addr; | |
231 | MemoryRegion *mr; | |
232 | hwaddr l = 1; | |
233 | ||
234 | if (!tcg_enabled()) { | |
235 | return; | |
236 | } | |
237 | ||
238 | RCU_READ_LOCK_GUARD(); | |
239 | mr = address_space_translate(as, addr, &addr, &l, false, attrs); | |
240 | if (!(memory_region_is_ram(mr) | |
241 | || memory_region_is_romd(mr))) { | |
242 | return; | |
243 | } | |
244 | ram_addr = memory_region_get_ram_addr(mr) + addr; | |
245 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); | |
246 | } | |
247 | ||
248 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) | |
249 | { | |
250 | /* | |
251 | * There may not be a virtual to physical translation for the pc | |
252 | * right now, but there may exist cached TB for this pc. | |
253 | * Flush the whole TB cache to force re-translation of such TBs. | |
254 | * This is heavyweight, but we're debugging anyway. | |
255 | */ | |
256 | tb_flush(cpu); | |
257 | } | |
258 | #endif | |
259 | ||
260 | /* Add a breakpoint. */ | |
261 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, | |
262 | CPUBreakpoint **breakpoint) | |
263 | { | |
264 | CPUBreakpoint *bp; | |
265 | ||
266 | bp = g_malloc(sizeof(*bp)); | |
267 | ||
268 | bp->pc = pc; | |
269 | bp->flags = flags; | |
270 | ||
271 | /* keep all GDB-injected breakpoints in front */ | |
272 | if (flags & BP_GDB) { | |
273 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); | |
274 | } else { | |
275 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); | |
276 | } | |
277 | ||
278 | breakpoint_invalidate(cpu, pc); | |
279 | ||
280 | if (breakpoint) { | |
281 | *breakpoint = bp; | |
282 | } | |
283 | return 0; | |
284 | } | |
285 | ||
286 | /* Remove a specific breakpoint. */ | |
287 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) | |
288 | { | |
289 | CPUBreakpoint *bp; | |
290 | ||
291 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { | |
292 | if (bp->pc == pc && bp->flags == flags) { | |
293 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
294 | return 0; | |
295 | } | |
296 | } | |
297 | return -ENOENT; | |
298 | } | |
299 | ||
300 | /* Remove a specific breakpoint by reference. */ | |
301 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) | |
302 | { | |
303 | QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); | |
304 | ||
305 | breakpoint_invalidate(cpu, breakpoint->pc); | |
306 | ||
307 | g_free(breakpoint); | |
308 | } | |
309 | ||
310 | /* Remove all matching breakpoints. */ | |
311 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) | |
312 | { | |
313 | CPUBreakpoint *bp, *next; | |
314 | ||
315 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { | |
316 | if (bp->flags & mask) { | |
317 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
318 | } | |
319 | } | |
320 | } | |
321 | ||
322 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
323 | CPU loop after each instruction */ | |
324 | void cpu_single_step(CPUState *cpu, int enabled) | |
325 | { | |
326 | if (cpu->singlestep_enabled != enabled) { | |
327 | cpu->singlestep_enabled = enabled; | |
328 | if (kvm_enabled()) { | |
329 | kvm_update_guest_debug(cpu, 0); | |
330 | } else { | |
331 | /* must flush all the translated code to avoid inconsistencies */ | |
332 | /* XXX: only flush what is necessary */ | |
333 | tb_flush(cpu); | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | void cpu_abort(CPUState *cpu, const char *fmt, ...) | |
339 | { | |
340 | va_list ap; | |
341 | va_list ap2; | |
342 | ||
343 | va_start(ap, fmt); | |
344 | va_copy(ap2, ap); | |
345 | fprintf(stderr, "qemu: fatal: "); | |
346 | vfprintf(stderr, fmt, ap); | |
347 | fprintf(stderr, "\n"); | |
348 | cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
349 | if (qemu_log_separate()) { | |
350 | FILE *logfile = qemu_log_lock(); | |
351 | qemu_log("qemu: fatal: "); | |
352 | qemu_log_vprintf(fmt, ap2); | |
353 | qemu_log("\n"); | |
354 | log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
355 | qemu_log_flush(); | |
356 | qemu_log_unlock(logfile); | |
357 | qemu_log_close(); | |
358 | } | |
359 | va_end(ap2); | |
360 | va_end(ap); | |
361 | replay_finish(); | |
362 | #if defined(CONFIG_USER_ONLY) | |
363 | { | |
364 | struct sigaction act; | |
365 | sigfillset(&act.sa_mask); | |
366 | act.sa_handler = SIG_DFL; | |
367 | act.sa_flags = 0; | |
368 | sigaction(SIGABRT, &act, NULL); | |
369 | } | |
370 | #endif | |
371 | abort(); | |
372 | } | |
373 | ||
374 | /* physical memory access (slow version, mainly for debug) */ | |
375 | #if defined(CONFIG_USER_ONLY) | |
376 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, | |
377 | void *ptr, target_ulong len, bool is_write) | |
378 | { | |
379 | int flags; | |
380 | target_ulong l, page; | |
381 | void * p; | |
382 | uint8_t *buf = ptr; | |
383 | ||
384 | while (len > 0) { | |
385 | page = addr & TARGET_PAGE_MASK; | |
386 | l = (page + TARGET_PAGE_SIZE) - addr; | |
387 | if (l > len) | |
388 | l = len; | |
389 | flags = page_get_flags(page); | |
390 | if (!(flags & PAGE_VALID)) | |
391 | return -1; | |
392 | if (is_write) { | |
393 | if (!(flags & PAGE_WRITE)) | |
394 | return -1; | |
395 | /* XXX: this code should not depend on lock_user */ | |
396 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) | |
397 | return -1; | |
398 | memcpy(p, buf, l); | |
399 | unlock_user(p, addr, l); | |
400 | } else { | |
401 | if (!(flags & PAGE_READ)) | |
402 | return -1; | |
403 | /* XXX: this code should not depend on lock_user */ | |
404 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) | |
405 | return -1; | |
406 | memcpy(buf, p, l); | |
407 | unlock_user(p, addr, 0); | |
408 | } | |
409 | len -= l; | |
410 | buf += l; | |
411 | addr += l; | |
412 | } | |
413 | return 0; | |
414 | } | |
415 | #endif | |
416 | ||
417 | bool target_words_bigendian(void) | |
418 | { | |
419 | #if defined(TARGET_WORDS_BIGENDIAN) | |
420 | return true; | |
421 | #else | |
422 | return false; | |
423 | #endif | |
424 | } | |
425 | ||
426 | void page_size_init(void) | |
427 | { | |
428 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
429 | TARGET_PAGE_SIZE */ | |
430 | if (qemu_host_page_size == 0) { | |
431 | qemu_host_page_size = qemu_real_host_page_size; | |
432 | } | |
433 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
434 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
435 | } | |
436 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; | |
437 | } |