]> Git Repo - qemu.git/blame - target-arm/kvm.c
target-arm: Remove unused ARMCPUState sr substruct
[qemu.git] / target-arm / kvm.c
CommitLineData
494b00c7
CD
1/*
2 * ARM implementation of KVM hooks
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "qemu-common.h"
19#include "qemu/timer.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/kvm.h"
eb035b48 22#include "kvm_arm.h"
494b00c7 23#include "cpu.h"
bd2be150 24#include "hw/arm/arm.h"
494b00c7
CD
25
26const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
27 KVM_CAP_LAST_INFO
28};
29
a96c0514
PM
30bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
31 int *fdarray,
32 struct kvm_vcpu_init *init)
33{
34 int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
35
36 kvmfd = qemu_open("/dev/kvm", O_RDWR);
37 if (kvmfd < 0) {
38 goto err;
39 }
40 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
41 if (vmfd < 0) {
42 goto err;
43 }
44 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
45 if (cpufd < 0) {
46 goto err;
47 }
48
49 ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
50 if (ret >= 0) {
51 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
52 if (ret < 0) {
53 goto err;
54 }
55 } else {
56 /* Old kernel which doesn't know about the
57 * PREFERRED_TARGET ioctl: we know it will only support
58 * creating one kind of guest CPU which is its preferred
59 * CPU type.
60 */
61 while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
62 init->target = *cpus_to_try++;
63 memset(init->features, 0, sizeof(init->features));
64 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
65 if (ret >= 0) {
66 break;
67 }
68 }
69 if (ret < 0) {
70 goto err;
71 }
72 }
73
74 fdarray[0] = kvmfd;
75 fdarray[1] = vmfd;
76 fdarray[2] = cpufd;
77
78 return true;
79
80err:
81 if (cpufd >= 0) {
82 close(cpufd);
83 }
84 if (vmfd >= 0) {
85 close(vmfd);
86 }
87 if (kvmfd >= 0) {
88 close(kvmfd);
89 }
90
91 return false;
92}
93
94void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
95{
96 int i;
97
98 for (i = 2; i >= 0; i--) {
99 close(fdarray[i]);
100 }
101}
102
a96c0514
PM
103static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
104{
105 ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
106
107 /* All we really need to set up for the 'host' CPU
108 * is the feature bits -- we rely on the fact that the
109 * various ID register values in ARMCPU are only used for
110 * TCG CPUs.
111 */
112 if (!kvm_arm_get_host_cpu_features(ahcc)) {
113 fprintf(stderr, "Failed to retrieve host CPU features!\n");
114 abort();
115 }
116}
117
118static void kvm_arm_host_cpu_initfn(Object *obj)
119{
120 ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj);
121 ARMCPU *cpu = ARM_CPU(obj);
122 CPUARMState *env = &cpu->env;
123
124 cpu->kvm_target = ahcc->target;
125 cpu->dtb_compatible = ahcc->dtb_compatible;
126 env->features = ahcc->features;
127}
128
129static const TypeInfo host_arm_cpu_type_info = {
130 .name = TYPE_ARM_HOST_CPU,
26861c7c
MH
131#ifdef TARGET_AARCH64
132 .parent = TYPE_AARCH64_CPU,
133#else
a96c0514 134 .parent = TYPE_ARM_CPU,
26861c7c 135#endif
a96c0514
PM
136 .instance_init = kvm_arm_host_cpu_initfn,
137 .class_init = kvm_arm_host_cpu_class_init,
138 .class_size = sizeof(ARMHostCPUClass),
139};
140
494b00c7
CD
141int kvm_arch_init(KVMState *s)
142{
143 /* For ARM interrupt delivery is always asynchronous,
144 * whether we are using an in-kernel VGIC or not.
145 */
146 kvm_async_interrupts_allowed = true;
a96c0514
PM
147
148 type_register_static(&host_arm_cpu_type_info);
149
494b00c7
CD
150 return 0;
151}
152
153unsigned long kvm_arch_vcpu_id(CPUState *cpu)
154{
155 return cpu->cpu_index;
156}
157
eb035b48
PM
158/* We track all the KVM devices which need their memory addresses
159 * passing to the kernel in a list of these structures.
160 * When board init is complete we run through the list and
161 * tell the kernel the base addresses of the memory regions.
162 * We use a MemoryListener to track mapping and unmapping of
163 * the regions during board creation, so the board models don't
164 * need to do anything special for the KVM case.
165 */
166typedef struct KVMDevice {
167 struct kvm_arm_device_addr kda;
168 MemoryRegion *mr;
169 QSLIST_ENTRY(KVMDevice) entries;
170} KVMDevice;
171
172static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
173
174static void kvm_arm_devlistener_add(MemoryListener *listener,
175 MemoryRegionSection *section)
176{
177 KVMDevice *kd;
178
179 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
180 if (section->mr == kd->mr) {
181 kd->kda.addr = section->offset_within_address_space;
182 }
183 }
184}
185
186static void kvm_arm_devlistener_del(MemoryListener *listener,
187 MemoryRegionSection *section)
188{
189 KVMDevice *kd;
190
191 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
192 if (section->mr == kd->mr) {
193 kd->kda.addr = -1;
194 }
195 }
196}
197
198static MemoryListener devlistener = {
199 .region_add = kvm_arm_devlistener_add,
200 .region_del = kvm_arm_devlistener_del,
201};
202
203static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
204{
205 KVMDevice *kd, *tkd;
206
207 memory_listener_unregister(&devlistener);
208 QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
209 if (kd->kda.addr != -1) {
210 if (kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR,
211 &kd->kda) < 0) {
212 fprintf(stderr, "KVM_ARM_SET_DEVICE_ADDRESS failed: %s\n",
213 strerror(errno));
214 abort();
215 }
216 }
dfde4e6e 217 memory_region_unref(kd->mr);
eb035b48
PM
218 g_free(kd);
219 }
220}
221
222static Notifier notify = {
223 .notify = kvm_arm_machine_init_done,
224};
225
226void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid)
227{
228 KVMDevice *kd;
229
230 if (!kvm_irqchip_in_kernel()) {
231 return;
232 }
233
234 if (QSLIST_EMPTY(&kvm_devices_head)) {
235 memory_listener_register(&devlistener, NULL);
236 qemu_add_machine_init_done_notifier(&notify);
237 }
238 kd = g_new0(KVMDevice, 1);
239 kd->mr = mr;
240 kd->kda.id = devid;
241 kd->kda.addr = -1;
242 QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
dfde4e6e 243 memory_region_ref(kd->mr);
eb035b48
PM
244}
245
ff047453
PM
246bool write_kvmstate_to_list(ARMCPU *cpu)
247{
248 CPUState *cs = CPU(cpu);
249 int i;
250 bool ok = true;
251
252 for (i = 0; i < cpu->cpreg_array_len; i++) {
253 struct kvm_one_reg r;
254 uint64_t regidx = cpu->cpreg_indexes[i];
255 uint32_t v32;
256 int ret;
257
258 r.id = regidx;
259
260 switch (regidx & KVM_REG_SIZE_MASK) {
261 case KVM_REG_SIZE_U32:
262 r.addr = (uintptr_t)&v32;
263 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
264 if (!ret) {
265 cpu->cpreg_values[i] = v32;
266 }
267 break;
268 case KVM_REG_SIZE_U64:
269 r.addr = (uintptr_t)(cpu->cpreg_values + i);
270 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
271 break;
272 default:
273 abort();
274 }
275 if (ret) {
276 ok = false;
277 }
278 }
279 return ok;
280}
281
282bool write_list_to_kvmstate(ARMCPU *cpu)
283{
284 CPUState *cs = CPU(cpu);
285 int i;
286 bool ok = true;
287
288 for (i = 0; i < cpu->cpreg_array_len; i++) {
289 struct kvm_one_reg r;
290 uint64_t regidx = cpu->cpreg_indexes[i];
291 uint32_t v32;
292 int ret;
293
294 r.id = regidx;
295 switch (regidx & KVM_REG_SIZE_MASK) {
296 case KVM_REG_SIZE_U32:
297 v32 = cpu->cpreg_values[i];
298 r.addr = (uintptr_t)&v32;
299 break;
300 case KVM_REG_SIZE_U64:
301 r.addr = (uintptr_t)(cpu->cpreg_values + i);
302 break;
303 default:
304 abort();
305 }
306 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
307 if (ret) {
308 /* We might fail for "unknown register" and also for
309 * "you tried to set a register which is constant with
310 * a different value from what it actually contains".
311 */
312 ok = false;
313 }
314 }
315 return ok;
316}
317
494b00c7
CD
318void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
319{
320}
321
322void kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
323{
324}
325
326int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
327{
328 return 0;
329}
330
494b00c7
CD
331bool kvm_arch_stop_on_emulation_error(CPUState *cs)
332{
333 return true;
334}
335
336int kvm_arch_process_async_events(CPUState *cs)
337{
338 return 0;
339}
340
341int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
342{
343 return 1;
344}
345
346int kvm_arch_on_sigbus(int code, void *addr)
347{
348 return 1;
349}
350
351void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
352{
353 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
354}
355
356int kvm_arch_insert_sw_breakpoint(CPUState *cs,
357 struct kvm_sw_breakpoint *bp)
358{
359 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
360 return -EINVAL;
361}
362
363int kvm_arch_insert_hw_breakpoint(target_ulong addr,
364 target_ulong len, int type)
365{
366 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
367 return -EINVAL;
368}
369
370int kvm_arch_remove_hw_breakpoint(target_ulong addr,
371 target_ulong len, int type)
372{
373 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
374 return -EINVAL;
375}
376
377int kvm_arch_remove_sw_breakpoint(CPUState *cs,
378 struct kvm_sw_breakpoint *bp)
379{
380 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
381 return -EINVAL;
382}
383
384void kvm_arch_remove_all_hw_breakpoints(void)
385{
386 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
387}
b3a1c626
AK
388
389void kvm_arch_init_irq_routing(KVMState *s)
390{
391}
This page took 0.169403 seconds and 4 git commands to generate.