]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU CPU model | |
3 | * | |
4 | * Copyright (c) 2012-2014 SUSE LINUX Products GmbH | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version 2 | |
9 | * of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, see | |
18 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
19 | */ | |
20 | ||
21 | #include "qemu/osdep.h" | |
22 | #include "qapi/error.h" | |
23 | #include "qom/cpu.h" | |
24 | #include "sysemu/hw_accel.h" | |
25 | #include "qemu/notify.h" | |
26 | #include "qemu/log.h" | |
27 | #include "qemu/main-loop.h" | |
28 | #include "exec/log.h" | |
29 | #include "qemu/error-report.h" | |
30 | #include "qemu/qemu-print.h" | |
31 | #include "sysemu/sysemu.h" | |
32 | #include "sysemu/tcg.h" | |
33 | #include "hw/boards.h" | |
34 | #include "hw/qdev-properties.h" | |
35 | #include "trace-root.h" | |
36 | ||
37 | CPUInterruptHandler cpu_interrupt_handler; | |
38 | ||
39 | CPUState *cpu_by_arch_id(int64_t id) | |
40 | { | |
41 | CPUState *cpu; | |
42 | ||
43 | CPU_FOREACH(cpu) { | |
44 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
45 | ||
46 | if (cc->get_arch_id(cpu) == id) { | |
47 | return cpu; | |
48 | } | |
49 | } | |
50 | return NULL; | |
51 | } | |
52 | ||
53 | bool cpu_exists(int64_t id) | |
54 | { | |
55 | return !!cpu_by_arch_id(id); | |
56 | } | |
57 | ||
58 | CPUState *cpu_create(const char *typename) | |
59 | { | |
60 | Error *err = NULL; | |
61 | CPUState *cpu = CPU(object_new(typename)); | |
62 | object_property_set_bool(OBJECT(cpu), true, "realized", &err); | |
63 | if (err != NULL) { | |
64 | error_report_err(err); | |
65 | object_unref(OBJECT(cpu)); | |
66 | exit(EXIT_FAILURE); | |
67 | } | |
68 | return cpu; | |
69 | } | |
70 | ||
71 | bool cpu_paging_enabled(const CPUState *cpu) | |
72 | { | |
73 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
74 | ||
75 | return cc->get_paging_enabled(cpu); | |
76 | } | |
77 | ||
78 | static bool cpu_common_get_paging_enabled(const CPUState *cpu) | |
79 | { | |
80 | return false; | |
81 | } | |
82 | ||
83 | void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, | |
84 | Error **errp) | |
85 | { | |
86 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
87 | ||
88 | cc->get_memory_mapping(cpu, list, errp); | |
89 | } | |
90 | ||
91 | static void cpu_common_get_memory_mapping(CPUState *cpu, | |
92 | MemoryMappingList *list, | |
93 | Error **errp) | |
94 | { | |
95 | error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); | |
96 | } | |
97 | ||
98 | /* Resetting the IRQ comes from across the code base so we take the | |
99 | * BQL here if we need to. cpu_interrupt assumes it is held.*/ | |
100 | void cpu_reset_interrupt(CPUState *cpu, int mask) | |
101 | { | |
102 | bool need_lock = !qemu_mutex_iothread_locked(); | |
103 | ||
104 | if (need_lock) { | |
105 | qemu_mutex_lock_iothread(); | |
106 | } | |
107 | cpu->interrupt_request &= ~mask; | |
108 | if (need_lock) { | |
109 | qemu_mutex_unlock_iothread(); | |
110 | } | |
111 | } | |
112 | ||
113 | void cpu_exit(CPUState *cpu) | |
114 | { | |
115 | atomic_set(&cpu->exit_request, 1); | |
116 | /* Ensure cpu_exec will see the exit request after TCG has exited. */ | |
117 | smp_wmb(); | |
118 | atomic_set(&cpu->icount_decr_ptr->u16.high, -1); | |
119 | } | |
120 | ||
121 | int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, | |
122 | void *opaque) | |
123 | { | |
124 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
125 | ||
126 | return (*cc->write_elf32_qemunote)(f, cpu, opaque); | |
127 | } | |
128 | ||
129 | static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f, | |
130 | CPUState *cpu, void *opaque) | |
131 | { | |
132 | return 0; | |
133 | } | |
134 | ||
135 | int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, | |
136 | int cpuid, void *opaque) | |
137 | { | |
138 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
139 | ||
140 | return (*cc->write_elf32_note)(f, cpu, cpuid, opaque); | |
141 | } | |
142 | ||
143 | static int cpu_common_write_elf32_note(WriteCoreDumpFunction f, | |
144 | CPUState *cpu, int cpuid, | |
145 | void *opaque) | |
146 | { | |
147 | return -1; | |
148 | } | |
149 | ||
150 | int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, | |
151 | void *opaque) | |
152 | { | |
153 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
154 | ||
155 | return (*cc->write_elf64_qemunote)(f, cpu, opaque); | |
156 | } | |
157 | ||
158 | static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f, | |
159 | CPUState *cpu, void *opaque) | |
160 | { | |
161 | return 0; | |
162 | } | |
163 | ||
164 | int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, | |
165 | int cpuid, void *opaque) | |
166 | { | |
167 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
168 | ||
169 | return (*cc->write_elf64_note)(f, cpu, cpuid, opaque); | |
170 | } | |
171 | ||
172 | static int cpu_common_write_elf64_note(WriteCoreDumpFunction f, | |
173 | CPUState *cpu, int cpuid, | |
174 | void *opaque) | |
175 | { | |
176 | return -1; | |
177 | } | |
178 | ||
179 | ||
180 | static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg) | |
181 | { | |
182 | return 0; | |
183 | } | |
184 | ||
185 | static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg) | |
186 | { | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp) | |
191 | { | |
192 | /* If no extra check is required, QEMU watchpoint match can be considered | |
193 | * as an architectural match. | |
194 | */ | |
195 | return true; | |
196 | } | |
197 | ||
198 | static bool cpu_common_virtio_is_big_endian(CPUState *cpu) | |
199 | { | |
200 | return target_words_bigendian(); | |
201 | } | |
202 | ||
203 | static void cpu_common_noop(CPUState *cpu) | |
204 | { | |
205 | } | |
206 | ||
207 | static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) | |
208 | { | |
209 | return false; | |
210 | } | |
211 | ||
212 | GuestPanicInformation *cpu_get_crash_info(CPUState *cpu) | |
213 | { | |
214 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
215 | GuestPanicInformation *res = NULL; | |
216 | ||
217 | if (cc->get_crash_info) { | |
218 | res = cc->get_crash_info(cpu); | |
219 | } | |
220 | return res; | |
221 | } | |
222 | ||
223 | void cpu_dump_state(CPUState *cpu, FILE *f, int flags) | |
224 | { | |
225 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
226 | ||
227 | if (cc->dump_state) { | |
228 | cpu_synchronize_state(cpu); | |
229 | cc->dump_state(cpu, f, flags); | |
230 | } | |
231 | } | |
232 | ||
233 | void cpu_dump_statistics(CPUState *cpu, int flags) | |
234 | { | |
235 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
236 | ||
237 | if (cc->dump_statistics) { | |
238 | cc->dump_statistics(cpu, flags); | |
239 | } | |
240 | } | |
241 | ||
242 | void cpu_reset(CPUState *cpu) | |
243 | { | |
244 | CPUClass *klass = CPU_GET_CLASS(cpu); | |
245 | ||
246 | if (klass->reset != NULL) { | |
247 | (*klass->reset)(cpu); | |
248 | } | |
249 | ||
250 | trace_guest_cpu_reset(cpu); | |
251 | } | |
252 | ||
253 | static void cpu_common_reset(CPUState *cpu) | |
254 | { | |
255 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
256 | ||
257 | if (qemu_loglevel_mask(CPU_LOG_RESET)) { | |
258 | qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); | |
259 | log_cpu_state(cpu, cc->reset_dump_flags); | |
260 | } | |
261 | ||
262 | cpu->interrupt_request = 0; | |
263 | cpu->halted = 0; | |
264 | cpu->mem_io_pc = 0; | |
265 | cpu->mem_io_vaddr = 0; | |
266 | cpu->icount_extra = 0; | |
267 | atomic_set(&cpu->icount_decr_ptr->u32, 0); | |
268 | cpu->can_do_io = 1; | |
269 | cpu->exception_index = -1; | |
270 | cpu->crash_occurred = false; | |
271 | cpu->cflags_next_tb = -1; | |
272 | ||
273 | if (tcg_enabled()) { | |
274 | cpu_tb_jmp_cache_clear(cpu); | |
275 | ||
276 | tcg_flush_softmmu_tlb(cpu); | |
277 | } | |
278 | } | |
279 | ||
280 | static bool cpu_common_has_work(CPUState *cs) | |
281 | { | |
282 | return false; | |
283 | } | |
284 | ||
285 | ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) | |
286 | { | |
287 | CPUClass *cc = CPU_CLASS(object_class_by_name(typename)); | |
288 | ||
289 | assert(cpu_model && cc->class_by_name); | |
290 | return cc->class_by_name(cpu_model); | |
291 | } | |
292 | ||
293 | static void cpu_common_parse_features(const char *typename, char *features, | |
294 | Error **errp) | |
295 | { | |
296 | char *val; | |
297 | static bool cpu_globals_initialized; | |
298 | /* Single "key=value" string being parsed */ | |
299 | char *featurestr = features ? strtok(features, ",") : NULL; | |
300 | ||
301 | /* should be called only once, catch invalid users */ | |
302 | assert(!cpu_globals_initialized); | |
303 | cpu_globals_initialized = true; | |
304 | ||
305 | while (featurestr) { | |
306 | val = strchr(featurestr, '='); | |
307 | if (val) { | |
308 | GlobalProperty *prop = g_new0(typeof(*prop), 1); | |
309 | *val = 0; | |
310 | val++; | |
311 | prop->driver = typename; | |
312 | prop->property = g_strdup(featurestr); | |
313 | prop->value = g_strdup(val); | |
314 | qdev_prop_register_global(prop); | |
315 | } else { | |
316 | error_setg(errp, "Expected key=value format, found %s.", | |
317 | featurestr); | |
318 | return; | |
319 | } | |
320 | featurestr = strtok(NULL, ","); | |
321 | } | |
322 | } | |
323 | ||
324 | static void cpu_common_realizefn(DeviceState *dev, Error **errp) | |
325 | { | |
326 | CPUState *cpu = CPU(dev); | |
327 | Object *machine = qdev_get_machine(); | |
328 | ||
329 | /* qdev_get_machine() can return something that's not TYPE_MACHINE | |
330 | * if this is one of the user-only emulators; in that case there's | |
331 | * no need to check the ignore_memory_transaction_failures board flag. | |
332 | */ | |
333 | if (object_dynamic_cast(machine, TYPE_MACHINE)) { | |
334 | ObjectClass *oc = object_get_class(machine); | |
335 | MachineClass *mc = MACHINE_CLASS(oc); | |
336 | ||
337 | if (mc) { | |
338 | cpu->ignore_memory_transaction_failures = | |
339 | mc->ignore_memory_transaction_failures; | |
340 | } | |
341 | } | |
342 | ||
343 | if (dev->hotplugged) { | |
344 | cpu_synchronize_post_init(cpu); | |
345 | cpu_resume(cpu); | |
346 | } | |
347 | ||
348 | /* NOTE: latest generic point where the cpu is fully realized */ | |
349 | trace_init_vcpu(cpu); | |
350 | } | |
351 | ||
352 | static void cpu_common_unrealizefn(DeviceState *dev, Error **errp) | |
353 | { | |
354 | CPUState *cpu = CPU(dev); | |
355 | /* NOTE: latest generic point before the cpu is fully unrealized */ | |
356 | trace_fini_vcpu(cpu); | |
357 | cpu_exec_unrealizefn(cpu); | |
358 | } | |
359 | ||
360 | static void cpu_common_initfn(Object *obj) | |
361 | { | |
362 | CPUState *cpu = CPU(obj); | |
363 | CPUClass *cc = CPU_GET_CLASS(obj); | |
364 | ||
365 | cpu->cpu_index = UNASSIGNED_CPU_INDEX; | |
366 | cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; | |
367 | cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; | |
368 | /* *-user doesn't have configurable SMP topology */ | |
369 | /* the default value is changed by qemu_init_vcpu() for softmmu */ | |
370 | cpu->nr_cores = 1; | |
371 | cpu->nr_threads = 1; | |
372 | ||
373 | qemu_mutex_init(&cpu->work_mutex); | |
374 | QTAILQ_INIT(&cpu->breakpoints); | |
375 | QTAILQ_INIT(&cpu->watchpoints); | |
376 | ||
377 | cpu_exec_initfn(cpu); | |
378 | } | |
379 | ||
380 | static void cpu_common_finalize(Object *obj) | |
381 | { | |
382 | CPUState *cpu = CPU(obj); | |
383 | ||
384 | qemu_mutex_destroy(&cpu->work_mutex); | |
385 | } | |
386 | ||
387 | static int64_t cpu_common_get_arch_id(CPUState *cpu) | |
388 | { | |
389 | return cpu->cpu_index; | |
390 | } | |
391 | ||
392 | static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len) | |
393 | { | |
394 | return addr; | |
395 | } | |
396 | ||
397 | static void generic_handle_interrupt(CPUState *cpu, int mask) | |
398 | { | |
399 | cpu->interrupt_request |= mask; | |
400 | ||
401 | if (!qemu_cpu_is_self(cpu)) { | |
402 | qemu_cpu_kick(cpu); | |
403 | } | |
404 | } | |
405 | ||
406 | CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt; | |
407 | ||
408 | static void cpu_class_init(ObjectClass *klass, void *data) | |
409 | { | |
410 | DeviceClass *dc = DEVICE_CLASS(klass); | |
411 | CPUClass *k = CPU_CLASS(klass); | |
412 | ||
413 | k->parse_features = cpu_common_parse_features; | |
414 | k->reset = cpu_common_reset; | |
415 | k->get_arch_id = cpu_common_get_arch_id; | |
416 | k->has_work = cpu_common_has_work; | |
417 | k->get_paging_enabled = cpu_common_get_paging_enabled; | |
418 | k->get_memory_mapping = cpu_common_get_memory_mapping; | |
419 | k->write_elf32_qemunote = cpu_common_write_elf32_qemunote; | |
420 | k->write_elf32_note = cpu_common_write_elf32_note; | |
421 | k->write_elf64_qemunote = cpu_common_write_elf64_qemunote; | |
422 | k->write_elf64_note = cpu_common_write_elf64_note; | |
423 | k->gdb_read_register = cpu_common_gdb_read_register; | |
424 | k->gdb_write_register = cpu_common_gdb_write_register; | |
425 | k->virtio_is_big_endian = cpu_common_virtio_is_big_endian; | |
426 | k->debug_excp_handler = cpu_common_noop; | |
427 | k->debug_check_watchpoint = cpu_common_debug_check_watchpoint; | |
428 | k->cpu_exec_enter = cpu_common_noop; | |
429 | k->cpu_exec_exit = cpu_common_noop; | |
430 | k->cpu_exec_interrupt = cpu_common_exec_interrupt; | |
431 | k->adjust_watchpoint_address = cpu_adjust_watchpoint_address; | |
432 | set_bit(DEVICE_CATEGORY_CPU, dc->categories); | |
433 | dc->realize = cpu_common_realizefn; | |
434 | dc->unrealize = cpu_common_unrealizefn; | |
435 | dc->props = cpu_common_props; | |
436 | /* | |
437 | * Reason: CPUs still need special care by board code: wiring up | |
438 | * IRQs, adding reset handlers, halting non-first CPUs, ... | |
439 | */ | |
440 | dc->user_creatable = false; | |
441 | } | |
442 | ||
443 | static const TypeInfo cpu_type_info = { | |
444 | .name = TYPE_CPU, | |
445 | .parent = TYPE_DEVICE, | |
446 | .instance_size = sizeof(CPUState), | |
447 | .instance_init = cpu_common_initfn, | |
448 | .instance_finalize = cpu_common_finalize, | |
449 | .abstract = true, | |
450 | .class_size = sizeof(CPUClass), | |
451 | .class_init = cpu_class_init, | |
452 | }; | |
453 | ||
454 | static void cpu_register_types(void) | |
455 | { | |
456 | type_register_static(&cpu_type_info); | |
457 | } | |
458 | ||
459 | type_init(cpu_register_types) |