2 * ARM implementation of KVM hooks
4 * Copyright Christoffer Dall 2009-2010
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include <sys/ioctl.h>
14 #include <linux/kvm.h>
16 #include "qemu-common.h"
17 #include "qemu/timer.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/kvm.h"
24 #include "sysemu/kvm_int.h"
28 #include "internals.h"
29 #include "hw/pci/pci.h"
30 #include "exec/memattrs.h"
31 #include "exec/address-spaces.h"
32 #include "hw/boards.h"
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
40 static bool cap_has_mp_state;
41 static bool cap_has_inject_serror_esr;
43 static ARMHostCPUFeatures arm_host_cpu_features;
45 int kvm_arm_vcpu_init(CPUState *cs)
47 ARMCPU *cpu = ARM_CPU(cs);
48 struct kvm_vcpu_init init;
50 init.target = cpu->kvm_target;
51 memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
53 return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
56 int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
58 return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
61 void kvm_arm_init_serror_injection(CPUState *cs)
63 cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
64 KVM_CAP_ARM_INJECT_SERROR_ESR);
67 bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
69 struct kvm_vcpu_init *init)
71 int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
73 kvmfd = qemu_open("/dev/kvm", O_RDWR);
77 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
81 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
87 /* Caller doesn't want the VCPU to be initialized, so skip it */
91 if (init->target == -1) {
92 struct kvm_vcpu_init preferred;
94 ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
96 init->target = preferred.target;
100 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
104 } else if (cpus_to_try) {
105 /* Old kernel which doesn't know about the
106 * PREFERRED_TARGET ioctl: we know it will only support
107 * creating one kind of guest CPU which is its preferred
110 struct kvm_vcpu_init try;
112 while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
113 try.target = *cpus_to_try++;
114 memcpy(try.features, init->features, sizeof(init->features));
115 ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
123 init->target = try.target;
125 /* Treat a NULL cpus_to_try argument the same as an empty
126 * list, which means we will fail the call since this must
127 * be an old kernel which doesn't support PREFERRED_TARGET.
153 void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
157 for (i = 2; i >= 0; i--) {
162 void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
164 CPUARMState *env = &cpu->env;
166 if (!arm_host_cpu_features.dtb_compatible) {
167 if (!kvm_enabled() ||
168 !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
169 /* We can't report this error yet, so flag that we need to
170 * in arm_cpu_realizefn().
172 cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
173 cpu->host_cpu_probe_failed = true;
178 cpu->kvm_target = arm_host_cpu_features.target;
179 cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
180 cpu->isar = arm_host_cpu_features.isar;
181 env->features = arm_host_cpu_features.features;
184 static bool kvm_no_adjvtime_get(Object *obj, Error **errp)
186 return !ARM_CPU(obj)->kvm_adjvtime;
189 static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
191 ARM_CPU(obj)->kvm_adjvtime = !value;
194 /* KVM VCPU properties should be prefixed with "kvm-". */
195 void kvm_arm_add_vcpu_properties(Object *obj)
197 if (!kvm_enabled()) {
201 ARM_CPU(obj)->kvm_adjvtime = true;
202 object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get,
203 kvm_no_adjvtime_set);
204 object_property_set_description(obj, "kvm-no-adjvtime",
205 "Set on to disable the adjustment of "
206 "the virtual counter. VM stopped time "
210 bool kvm_arm_pmu_supported(CPUState *cpu)
212 return kvm_check_extension(cpu->kvm_state, KVM_CAP_ARM_PMU_V3);
215 int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
217 KVMState *s = KVM_STATE(ms->accelerator);
220 ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
221 return ret > 0 ? ret : 40;
224 int kvm_arch_init(MachineState *ms, KVMState *s)
227 /* For ARM interrupt delivery is always asynchronous,
228 * whether we are using an in-kernel VGIC or not.
230 kvm_async_interrupts_allowed = true;
233 * PSCI wakes up secondary cores, so we always need to
234 * have vCPUs waiting in kernel space
236 kvm_halt_in_kernel_allowed = true;
238 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
240 if (ms->smp.cpus > 256 &&
241 !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
242 error_report("Using more than 256 vcpus requires a host kernel "
243 "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
250 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
252 return cpu->cpu_index;
255 /* We track all the KVM devices which need their memory addresses
256 * passing to the kernel in a list of these structures.
257 * When board init is complete we run through the list and
258 * tell the kernel the base addresses of the memory regions.
259 * We use a MemoryListener to track mapping and unmapping of
260 * the regions during board creation, so the board models don't
261 * need to do anything special for the KVM case.
263 * Sometimes the address must be OR'ed with some other fields
264 * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
265 * @kda_addr_ormask aims at storing the value of those fields.
267 typedef struct KVMDevice {
268 struct kvm_arm_device_addr kda;
269 struct kvm_device_attr kdattr;
270 uint64_t kda_addr_ormask;
272 QSLIST_ENTRY(KVMDevice) entries;
276 static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
278 static void kvm_arm_devlistener_add(MemoryListener *listener,
279 MemoryRegionSection *section)
283 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
284 if (section->mr == kd->mr) {
285 kd->kda.addr = section->offset_within_address_space;
290 static void kvm_arm_devlistener_del(MemoryListener *listener,
291 MemoryRegionSection *section)
295 QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
296 if (section->mr == kd->mr) {
302 static MemoryListener devlistener = {
303 .region_add = kvm_arm_devlistener_add,
304 .region_del = kvm_arm_devlistener_del,
307 static void kvm_arm_set_device_addr(KVMDevice *kd)
309 struct kvm_device_attr *attr = &kd->kdattr;
312 /* If the device control API is available and we have a device fd on the
313 * KVMDevice struct, let's use the newer API
315 if (kd->dev_fd >= 0) {
316 uint64_t addr = kd->kda.addr;
318 addr |= kd->kda_addr_ormask;
319 attr->addr = (uintptr_t)&addr;
320 ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
322 ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
326 fprintf(stderr, "Failed to set device address: %s\n",
332 static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
336 QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
337 if (kd->kda.addr != -1) {
338 kvm_arm_set_device_addr(kd);
340 memory_region_unref(kd->mr);
341 QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
344 memory_listener_unregister(&devlistener);
347 static Notifier notify = {
348 .notify = kvm_arm_machine_init_done,
351 void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
352 uint64_t attr, int dev_fd, uint64_t addr_ormask)
356 if (!kvm_irqchip_in_kernel()) {
360 if (QSLIST_EMPTY(&kvm_devices_head)) {
361 memory_listener_register(&devlistener, &address_space_memory);
362 qemu_add_machine_init_done_notifier(¬ify);
364 kd = g_new0(KVMDevice, 1);
368 kd->kdattr.flags = 0;
369 kd->kdattr.group = group;
370 kd->kdattr.attr = attr;
372 kd->kda_addr_ormask = addr_ormask;
373 QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
374 memory_region_ref(kd->mr);
377 static int compare_u64(const void *a, const void *b)
379 if (*(uint64_t *)a > *(uint64_t *)b) {
382 if (*(uint64_t *)a < *(uint64_t *)b) {
389 * cpreg_values are sorted in ascending order by KVM register ID
390 * (see kvm_arm_init_cpreg_list). This allows us to cheaply find
391 * the storage for a KVM register by ID with a binary search.
393 static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx)
397 res = bsearch(®idx, cpu->cpreg_indexes, cpu->cpreg_array_len,
398 sizeof(uint64_t), compare_u64);
401 return &cpu->cpreg_values[res - cpu->cpreg_indexes];
404 /* Initialize the ARMCPU cpreg list according to the kernel's
405 * definition of what CPU registers it knows about (and throw away
406 * the previous TCG-created cpreg list).
408 int kvm_arm_init_cpreg_list(ARMCPU *cpu)
410 struct kvm_reg_list rl;
411 struct kvm_reg_list *rlp;
412 int i, ret, arraylen;
413 CPUState *cs = CPU(cpu);
416 ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
420 rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
422 ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
426 /* Sort the list we get back from the kernel, since cpreg_tuples
427 * must be in strictly ascending order.
429 qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
431 for (i = 0, arraylen = 0; i < rlp->n; i++) {
432 if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
435 switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
436 case KVM_REG_SIZE_U32:
437 case KVM_REG_SIZE_U64:
440 fprintf(stderr, "Can't handle size of register in kernel list\n");
448 cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
449 cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
450 cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
452 cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
454 cpu->cpreg_array_len = arraylen;
455 cpu->cpreg_vmstate_array_len = arraylen;
457 for (i = 0, arraylen = 0; i < rlp->n; i++) {
458 uint64_t regidx = rlp->reg[i];
459 if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
462 cpu->cpreg_indexes[arraylen] = regidx;
465 assert(cpu->cpreg_array_len == arraylen);
467 if (!write_kvmstate_to_list(cpu)) {
468 /* Shouldn't happen unless kernel is inconsistent about
469 * what registers exist.
471 fprintf(stderr, "Initial read of kernel register state failed\n");
481 bool write_kvmstate_to_list(ARMCPU *cpu)
483 CPUState *cs = CPU(cpu);
487 for (i = 0; i < cpu->cpreg_array_len; i++) {
488 struct kvm_one_reg r;
489 uint64_t regidx = cpu->cpreg_indexes[i];
495 switch (regidx & KVM_REG_SIZE_MASK) {
496 case KVM_REG_SIZE_U32:
497 r.addr = (uintptr_t)&v32;
498 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
500 cpu->cpreg_values[i] = v32;
503 case KVM_REG_SIZE_U64:
504 r.addr = (uintptr_t)(cpu->cpreg_values + i);
505 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
517 bool write_list_to_kvmstate(ARMCPU *cpu, int level)
519 CPUState *cs = CPU(cpu);
523 for (i = 0; i < cpu->cpreg_array_len; i++) {
524 struct kvm_one_reg r;
525 uint64_t regidx = cpu->cpreg_indexes[i];
529 if (kvm_arm_cpreg_level(regidx) > level) {
534 switch (regidx & KVM_REG_SIZE_MASK) {
535 case KVM_REG_SIZE_U32:
536 v32 = cpu->cpreg_values[i];
537 r.addr = (uintptr_t)&v32;
539 case KVM_REG_SIZE_U64:
540 r.addr = (uintptr_t)(cpu->cpreg_values + i);
545 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
547 /* We might fail for "unknown register" and also for
548 * "you tried to set a register which is constant with
549 * a different value from what it actually contains".
557 void kvm_arm_cpu_pre_save(ARMCPU *cpu)
559 /* KVM virtual time adjustment */
560 if (cpu->kvm_vtime_dirty) {
561 *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime;
565 void kvm_arm_cpu_post_load(ARMCPU *cpu)
567 /* KVM virtual time adjustment */
568 if (cpu->kvm_adjvtime) {
569 cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
570 cpu->kvm_vtime_dirty = true;
574 void kvm_arm_reset_vcpu(ARMCPU *cpu)
578 /* Re-init VCPU so that all registers are set to
579 * their respective reset values.
581 ret = kvm_arm_vcpu_init(CPU(cpu));
583 fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
586 if (!write_kvmstate_to_list(cpu)) {
587 fprintf(stderr, "write_kvmstate_to_list failed\n");
591 * Sync the reset values also into the CPUState. This is necessary
592 * because the next thing we do will be a kvm_arch_put_registers()
593 * which will update the list values from the CPUState before copying
594 * the list values back to KVM. It's OK to ignore failure returns here
595 * for the same reason we do so in kvm_arch_get_registers().
597 write_list_to_cpustate(cpu);
601 * Update KVM's MP_STATE based on what QEMU thinks it is
603 int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
605 if (cap_has_mp_state) {
606 struct kvm_mp_state mp_state = {
607 .mp_state = (cpu->power_state == PSCI_OFF) ?
608 KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
610 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
612 fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
613 __func__, ret, strerror(-ret));
622 * Sync the KVM MP_STATE into QEMU
624 int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
626 if (cap_has_mp_state) {
627 struct kvm_mp_state mp_state;
628 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
630 fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
631 __func__, ret, strerror(-ret));
634 cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
641 void kvm_arm_get_virtual_time(CPUState *cs)
643 ARMCPU *cpu = ARM_CPU(cs);
644 struct kvm_one_reg reg = {
645 .id = KVM_REG_ARM_TIMER_CNT,
646 .addr = (uintptr_t)&cpu->kvm_vtime,
650 if (cpu->kvm_vtime_dirty) {
654 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
656 error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
660 cpu->kvm_vtime_dirty = true;
663 void kvm_arm_put_virtual_time(CPUState *cs)
665 ARMCPU *cpu = ARM_CPU(cs);
666 struct kvm_one_reg reg = {
667 .id = KVM_REG_ARM_TIMER_CNT,
668 .addr = (uintptr_t)&cpu->kvm_vtime,
672 if (!cpu->kvm_vtime_dirty) {
676 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
678 error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
682 cpu->kvm_vtime_dirty = false;
685 int kvm_put_vcpu_events(ARMCPU *cpu)
687 CPUARMState *env = &cpu->env;
688 struct kvm_vcpu_events events;
691 if (!kvm_has_vcpu_events()) {
695 memset(&events, 0, sizeof(events));
696 events.exception.serror_pending = env->serror.pending;
698 /* Inject SError to guest with specified syndrome if host kernel
699 * supports it, otherwise inject SError without syndrome.
701 if (cap_has_inject_serror_esr) {
702 events.exception.serror_has_esr = env->serror.has_esr;
703 events.exception.serror_esr = env->serror.esr;
706 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
708 error_report("failed to put vcpu events");
714 int kvm_get_vcpu_events(ARMCPU *cpu)
716 CPUARMState *env = &cpu->env;
717 struct kvm_vcpu_events events;
720 if (!kvm_has_vcpu_events()) {
724 memset(&events, 0, sizeof(events));
725 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
727 error_report("failed to get vcpu events");
731 env->serror.pending = events.exception.serror_pending;
732 env->serror.has_esr = events.exception.serror_has_esr;
733 env->serror.esr = events.exception.serror_esr;
738 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
742 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
745 uint32_t switched_level;
747 if (kvm_irqchip_in_kernel()) {
749 * We only need to sync timer states with user-space interrupt
750 * controllers, so return early and save cycles if we don't.
752 return MEMTXATTRS_UNSPECIFIED;
757 /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
758 if (run->s.regs.device_irq_level != cpu->device_irq_level) {
759 switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
761 qemu_mutex_lock_iothread();
763 if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
764 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
765 !!(run->s.regs.device_irq_level &
766 KVM_ARM_DEV_EL1_VTIMER));
767 switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
770 if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
771 qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
772 !!(run->s.regs.device_irq_level &
773 KVM_ARM_DEV_EL1_PTIMER));
774 switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
777 if (switched_level & KVM_ARM_DEV_PMU) {
778 qemu_set_irq(cpu->pmu_interrupt,
779 !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
780 switched_level &= ~KVM_ARM_DEV_PMU;
783 if (switched_level) {
784 qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
785 __func__, switched_level);
788 /* We also mark unknown levels as processed to not waste cycles */
789 cpu->device_irq_level = run->s.regs.device_irq_level;
790 qemu_mutex_unlock_iothread();
793 return MEMTXATTRS_UNSPECIFIED;
796 void kvm_arm_vm_state_change(void *opaque, int running, RunState state)
798 CPUState *cs = opaque;
799 ARMCPU *cpu = ARM_CPU(cs);
802 if (cpu->kvm_adjvtime) {
803 kvm_arm_put_virtual_time(cs);
806 if (cpu->kvm_adjvtime) {
807 kvm_arm_get_virtual_time(cs);
812 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
816 switch (run->exit_reason) {
818 if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
820 } /* otherwise return to guest */
823 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
824 __func__, run->exit_reason);
830 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
835 int kvm_arch_process_async_events(CPUState *cs)
840 /* The #ifdef protections are until 32bit headers are imported and can
841 * be removed once both 32 and 64 bit reach feature parity.
843 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
845 #ifdef KVM_GUESTDBG_USE_SW_BP
846 if (kvm_sw_breakpoints_active(cs)) {
847 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
850 #ifdef KVM_GUESTDBG_USE_HW
851 if (kvm_arm_hw_debug_active(cs)) {
852 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
853 kvm_arm_copy_hw_debug_data(&dbg->arch);
858 void kvm_arch_init_irq_routing(KVMState *s)
862 int kvm_arch_irqchip_create(KVMState *s)
864 if (kvm_kernel_irqchip_split()) {
865 perror("-machine kernel_irqchip=split is not supported on ARM.");
869 /* If we can create the VGIC using the newer device control API, we
870 * let the device do this when it initializes itself, otherwise we
871 * fall back to the old API */
872 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
875 int kvm_arm_vgic_probe(void)
879 if (kvm_create_device(kvm_state,
880 KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
881 val |= KVM_ARM_VGIC_V3;
883 if (kvm_create_device(kvm_state,
884 KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
885 val |= KVM_ARM_VGIC_V2;
890 int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
892 int kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) | irq;
893 int cpu_idx1 = cpu % 256;
894 int cpu_idx2 = cpu / 256;
896 kvm_irq |= (cpu_idx1 << KVM_ARM_IRQ_VCPU_SHIFT) |
897 (cpu_idx2 << KVM_ARM_IRQ_VCPU2_SHIFT);
899 return kvm_set_irq(kvm_state, kvm_irq, !!level);
902 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
903 uint64_t address, uint32_t data, PCIDevice *dev)
905 AddressSpace *as = pci_device_iommu_address_space(dev);
906 hwaddr xlat, len, doorbell_gpa;
907 MemoryRegionSection mrs;
911 if (as == &address_space_memory) {
915 /* MSI doorbell address is translated by an IOMMU */
918 mr = address_space_translate(as, address, &xlat, &len, true,
919 MEMTXATTRS_UNSPECIFIED);
923 mrs = memory_region_find(mr, xlat, 1);
928 doorbell_gpa = mrs.offset_within_address_space;
929 memory_region_unref(mrs.mr);
931 route->u.msi.address_lo = doorbell_gpa;
932 route->u.msi.address_hi = doorbell_gpa >> 32;
934 trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
943 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
944 int vector, PCIDevice *dev)
949 int kvm_arch_release_virq_post(int virq)
954 int kvm_arch_msi_data_to_gsi(uint32_t data)
956 return (data - 32) & 0xffff;