]> Git Repo - qemu.git/blob - hw/core/cpu.c
target/arm: Convert PMUL.8 to gvec
[qemu.git] / hw / core / cpu.c
1 /*
2  * QEMU CPU model
3  *
4  * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see
18  * <http://www.gnu.org/licenses/gpl-2.0.html>
19  */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/core/cpu.h"
24 #include "sysemu/hw_accel.h"
25 #include "qemu/notify.h"
26 #include "qemu/log.h"
27 #include "qemu/main-loop.h"
28 #include "exec/log.h"
29 #include "qemu/error-report.h"
30 #include "qemu/qemu-print.h"
31 #include "sysemu/tcg.h"
32 #include "hw/boards.h"
33 #include "hw/qdev-properties.h"
34 #include "trace-root.h"
35 #include "qemu/plugin.h"
36
37 CPUInterruptHandler cpu_interrupt_handler;
38
39 CPUState *cpu_by_arch_id(int64_t id)
40 {
41     CPUState *cpu;
42
43     CPU_FOREACH(cpu) {
44         CPUClass *cc = CPU_GET_CLASS(cpu);
45
46         if (cc->get_arch_id(cpu) == id) {
47             return cpu;
48         }
49     }
50     return NULL;
51 }
52
53 bool cpu_exists(int64_t id)
54 {
55     return !!cpu_by_arch_id(id);
56 }
57
58 CPUState *cpu_create(const char *typename)
59 {
60     Error *err = NULL;
61     CPUState *cpu = CPU(object_new(typename));
62     object_property_set_bool(OBJECT(cpu), true, "realized", &err);
63     if (err != NULL) {
64         error_report_err(err);
65         object_unref(OBJECT(cpu));
66         exit(EXIT_FAILURE);
67     }
68     return cpu;
69 }
70
71 bool cpu_paging_enabled(const CPUState *cpu)
72 {
73     CPUClass *cc = CPU_GET_CLASS(cpu);
74
75     return cc->get_paging_enabled(cpu);
76 }
77
78 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
79 {
80     return false;
81 }
82
83 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
84                             Error **errp)
85 {
86     CPUClass *cc = CPU_GET_CLASS(cpu);
87
88     cc->get_memory_mapping(cpu, list, errp);
89 }
90
91 static void cpu_common_get_memory_mapping(CPUState *cpu,
92                                           MemoryMappingList *list,
93                                           Error **errp)
94 {
95     error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
96 }
97
98 /* Resetting the IRQ comes from across the code base so we take the
99  * BQL here if we need to.  cpu_interrupt assumes it is held.*/
100 void cpu_reset_interrupt(CPUState *cpu, int mask)
101 {
102     bool need_lock = !qemu_mutex_iothread_locked();
103
104     if (need_lock) {
105         qemu_mutex_lock_iothread();
106     }
107     cpu->interrupt_request &= ~mask;
108     if (need_lock) {
109         qemu_mutex_unlock_iothread();
110     }
111 }
112
113 void cpu_exit(CPUState *cpu)
114 {
115     atomic_set(&cpu->exit_request, 1);
116     /* Ensure cpu_exec will see the exit request after TCG has exited.  */
117     smp_wmb();
118     atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
119 }
120
121 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
122                              void *opaque)
123 {
124     CPUClass *cc = CPU_GET_CLASS(cpu);
125
126     return (*cc->write_elf32_qemunote)(f, cpu, opaque);
127 }
128
129 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
130                                            CPUState *cpu, void *opaque)
131 {
132     return 0;
133 }
134
135 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
136                          int cpuid, void *opaque)
137 {
138     CPUClass *cc = CPU_GET_CLASS(cpu);
139
140     return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
141 }
142
143 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
144                                        CPUState *cpu, int cpuid,
145                                        void *opaque)
146 {
147     return -1;
148 }
149
150 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
151                              void *opaque)
152 {
153     CPUClass *cc = CPU_GET_CLASS(cpu);
154
155     return (*cc->write_elf64_qemunote)(f, cpu, opaque);
156 }
157
158 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
159                                            CPUState *cpu, void *opaque)
160 {
161     return 0;
162 }
163
164 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
165                          int cpuid, void *opaque)
166 {
167     CPUClass *cc = CPU_GET_CLASS(cpu);
168
169     return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
170 }
171
172 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
173                                        CPUState *cpu, int cpuid,
174                                        void *opaque)
175 {
176     return -1;
177 }
178
179
180 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
181 {
182     return 0;
183 }
184
185 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
186 {
187     return 0;
188 }
189
190 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
191 {
192     /* If no extra check is required, QEMU watchpoint match can be considered
193      * as an architectural match.
194      */
195     return true;
196 }
197
198 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
199 {
200     return target_words_bigendian();
201 }
202
203 static void cpu_common_noop(CPUState *cpu)
204 {
205 }
206
207 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
208 {
209     return false;
210 }
211
212 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
213 {
214     CPUClass *cc = CPU_GET_CLASS(cpu);
215     GuestPanicInformation *res = NULL;
216
217     if (cc->get_crash_info) {
218         res = cc->get_crash_info(cpu);
219     }
220     return res;
221 }
222
223 void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
224 {
225     CPUClass *cc = CPU_GET_CLASS(cpu);
226
227     if (cc->dump_state) {
228         cpu_synchronize_state(cpu);
229         cc->dump_state(cpu, f, flags);
230     }
231 }
232
233 void cpu_dump_statistics(CPUState *cpu, int flags)
234 {
235     CPUClass *cc = CPU_GET_CLASS(cpu);
236
237     if (cc->dump_statistics) {
238         cc->dump_statistics(cpu, flags);
239     }
240 }
241
242 void cpu_class_set_parent_reset(CPUClass *cc,
243                                 void (*child_reset)(CPUState *cpu),
244                                 void (**parent_reset)(CPUState *cpu))
245 {
246     *parent_reset = cc->reset;
247     cc->reset = child_reset;
248 }
249
250 void cpu_reset(CPUState *cpu)
251 {
252     CPUClass *klass = CPU_GET_CLASS(cpu);
253
254     if (klass->reset != NULL) {
255         (*klass->reset)(cpu);
256     }
257
258     trace_guest_cpu_reset(cpu);
259 }
260
261 static void cpu_common_reset(CPUState *cpu)
262 {
263     CPUClass *cc = CPU_GET_CLASS(cpu);
264
265     if (qemu_loglevel_mask(CPU_LOG_RESET)) {
266         qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
267         log_cpu_state(cpu, cc->reset_dump_flags);
268     }
269
270     cpu->interrupt_request = 0;
271     cpu->halted = 0;
272     cpu->mem_io_pc = 0;
273     cpu->icount_extra = 0;
274     atomic_set(&cpu->icount_decr_ptr->u32, 0);
275     cpu->can_do_io = 1;
276     cpu->exception_index = -1;
277     cpu->crash_occurred = false;
278     cpu->cflags_next_tb = -1;
279
280     if (tcg_enabled()) {
281         cpu_tb_jmp_cache_clear(cpu);
282
283         tcg_flush_softmmu_tlb(cpu);
284     }
285 }
286
287 static bool cpu_common_has_work(CPUState *cs)
288 {
289     return false;
290 }
291
292 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
293 {
294     CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
295
296     assert(cpu_model && cc->class_by_name);
297     return cc->class_by_name(cpu_model);
298 }
299
300 static void cpu_common_parse_features(const char *typename, char *features,
301                                       Error **errp)
302 {
303     char *val;
304     static bool cpu_globals_initialized;
305     /* Single "key=value" string being parsed */
306     char *featurestr = features ? strtok(features, ",") : NULL;
307
308     /* should be called only once, catch invalid users */
309     assert(!cpu_globals_initialized);
310     cpu_globals_initialized = true;
311
312     while (featurestr) {
313         val = strchr(featurestr, '=');
314         if (val) {
315             GlobalProperty *prop = g_new0(typeof(*prop), 1);
316             *val = 0;
317             val++;
318             prop->driver = typename;
319             prop->property = g_strdup(featurestr);
320             prop->value = g_strdup(val);
321             qdev_prop_register_global(prop);
322         } else {
323             error_setg(errp, "Expected key=value format, found %s.",
324                        featurestr);
325             return;
326         }
327         featurestr = strtok(NULL, ",");
328     }
329 }
330
331 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
332 {
333     CPUState *cpu = CPU(dev);
334     Object *machine = qdev_get_machine();
335
336     /* qdev_get_machine() can return something that's not TYPE_MACHINE
337      * if this is one of the user-only emulators; in that case there's
338      * no need to check the ignore_memory_transaction_failures board flag.
339      */
340     if (object_dynamic_cast(machine, TYPE_MACHINE)) {
341         ObjectClass *oc = object_get_class(machine);
342         MachineClass *mc = MACHINE_CLASS(oc);
343
344         if (mc) {
345             cpu->ignore_memory_transaction_failures =
346                 mc->ignore_memory_transaction_failures;
347         }
348     }
349
350     if (dev->hotplugged) {
351         cpu_synchronize_post_init(cpu);
352         cpu_resume(cpu);
353     }
354
355     /* NOTE: latest generic point where the cpu is fully realized */
356     trace_init_vcpu(cpu);
357 }
358
359 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
360 {
361     CPUState *cpu = CPU(dev);
362     /* NOTE: latest generic point before the cpu is fully unrealized */
363     trace_fini_vcpu(cpu);
364     qemu_plugin_vcpu_exit_hook(cpu);
365     cpu_exec_unrealizefn(cpu);
366 }
367
368 static void cpu_common_initfn(Object *obj)
369 {
370     CPUState *cpu = CPU(obj);
371     CPUClass *cc = CPU_GET_CLASS(obj);
372
373     cpu->cpu_index = UNASSIGNED_CPU_INDEX;
374     cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
375     cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
376     /* *-user doesn't have configurable SMP topology */
377     /* the default value is changed by qemu_init_vcpu() for softmmu */
378     cpu->nr_cores = 1;
379     cpu->nr_threads = 1;
380
381     qemu_mutex_init(&cpu->work_mutex);
382     QTAILQ_INIT(&cpu->breakpoints);
383     QTAILQ_INIT(&cpu->watchpoints);
384
385     cpu_exec_initfn(cpu);
386 }
387
388 static void cpu_common_finalize(Object *obj)
389 {
390     CPUState *cpu = CPU(obj);
391
392     qemu_mutex_destroy(&cpu->work_mutex);
393 }
394
395 static int64_t cpu_common_get_arch_id(CPUState *cpu)
396 {
397     return cpu->cpu_index;
398 }
399
400 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
401 {
402     return addr;
403 }
404
405 static void generic_handle_interrupt(CPUState *cpu, int mask)
406 {
407     cpu->interrupt_request |= mask;
408
409     if (!qemu_cpu_is_self(cpu)) {
410         qemu_cpu_kick(cpu);
411     }
412 }
413
414 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
415
416 static void cpu_class_init(ObjectClass *klass, void *data)
417 {
418     DeviceClass *dc = DEVICE_CLASS(klass);
419     CPUClass *k = CPU_CLASS(klass);
420
421     k->parse_features = cpu_common_parse_features;
422     k->reset = cpu_common_reset;
423     k->get_arch_id = cpu_common_get_arch_id;
424     k->has_work = cpu_common_has_work;
425     k->get_paging_enabled = cpu_common_get_paging_enabled;
426     k->get_memory_mapping = cpu_common_get_memory_mapping;
427     k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
428     k->write_elf32_note = cpu_common_write_elf32_note;
429     k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
430     k->write_elf64_note = cpu_common_write_elf64_note;
431     k->gdb_read_register = cpu_common_gdb_read_register;
432     k->gdb_write_register = cpu_common_gdb_write_register;
433     k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
434     k->debug_excp_handler = cpu_common_noop;
435     k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
436     k->cpu_exec_enter = cpu_common_noop;
437     k->cpu_exec_exit = cpu_common_noop;
438     k->cpu_exec_interrupt = cpu_common_exec_interrupt;
439     k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
440     set_bit(DEVICE_CATEGORY_CPU, dc->categories);
441     dc->realize = cpu_common_realizefn;
442     dc->unrealize = cpu_common_unrealizefn;
443     device_class_set_props(dc, cpu_common_props);
444     /*
445      * Reason: CPUs still need special care by board code: wiring up
446      * IRQs, adding reset handlers, halting non-first CPUs, ...
447      */
448     dc->user_creatable = false;
449 }
450
451 static const TypeInfo cpu_type_info = {
452     .name = TYPE_CPU,
453     .parent = TYPE_DEVICE,
454     .instance_size = sizeof(CPUState),
455     .instance_init = cpu_common_initfn,
456     .instance_finalize = cpu_common_finalize,
457     .abstract = true,
458     .class_size = sizeof(CPUClass),
459     .class_init = cpu_class_init,
460 };
461
462 static void cpu_register_types(void)
463 {
464     type_register_static(&cpu_type_info);
465 }
466
467 type_init(cpu_register_types)
This page took 0.047871 seconds and 4 git commands to generate.