]> Git Repo - qemu.git/blame - include/sysemu/kvm.h
Merge remote-tracking branch 'kiszka/queues/slirp' into staging
[qemu.git] / include / sysemu / kvm.h
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef QEMU_KVM_H
15#define QEMU_KVM_H
16
ca821806 17#include <errno.h>
1c14f162 18#include "config-host.h"
1de7afc9 19#include "qemu/queue.h"
504134d2 20#include "qom/cpu.h"
05330448 21
ca821806
MT
22#ifdef CONFIG_KVM
23#include <linux/kvm.h>
bc74b7db 24#include <linux/kvm_para.h>
9ca58923
EH
25#else
26/* These constants must never be used at runtime if kvm_enabled() is false.
27 * They exist so we don't need #ifdefs around KVM-specific code that already
28 * checks kvm_enabled() properly.
29 */
30#define KVM_CPUID_SIGNATURE 0
31#define KVM_CPUID_FEATURES 0
32#define KVM_FEATURE_CLOCKSOURCE 0
33#define KVM_FEATURE_NOP_IO_DELAY 0
34#define KVM_FEATURE_MMU_OP 0
35#define KVM_FEATURE_CLOCKSOURCE2 0
36#define KVM_FEATURE_ASYNC_PF 0
37#define KVM_FEATURE_STEAL_TIME 0
38#define KVM_FEATURE_PV_EOI 0
d61a23ba 39#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 0
ca821806 40#endif
05330448 41
d5286af5 42extern bool kvm_allowed;
3d4b2649 43extern bool kvm_kernel_irqchip;
7ae26bd4 44extern bool kvm_async_interrupts_allowed;
215e79c0 45extern bool kvm_halt_in_kernel_allowed;
cc7e0ddf 46extern bool kvm_irqfds_allowed;
614e41bc 47extern bool kvm_msi_via_irqfd_allowed;
f3e1bed8 48extern bool kvm_gsi_routing_allowed;
df9c8b75 49extern bool kvm_readonly_mem_allowed;
98c8573e
PB
50
51#if defined CONFIG_KVM || !defined NEED_CPU_H
3d4b2649 52#define kvm_enabled() (kvm_allowed)
96fda35a
PM
53/**
54 * kvm_irqchip_in_kernel:
55 *
56 * Returns: true if the user asked us to create an in-kernel
57 * irqchip via the "kernel_irqchip=on" machine option.
58 * What this actually means is architecture and machine model
59 * specific: on PC, for instance, it means that the LAPIC,
60 * IOAPIC and PIT are all in kernel. This function should never
61 * be used from generic target-independent code: use one of the
62 * following functions or some other specific check instead.
63 */
3d4b2649 64#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
7ae26bd4
PM
65
66/**
67 * kvm_async_interrupts_enabled:
68 *
69 * Returns: true if we can deliver interrupts to KVM
70 * asynchronously (ie by ioctl from any thread at any time)
71 * rather than having to do interrupt delivery synchronously
72 * (where the vcpu must be stopped at a suitable point first).
73 */
74#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
75
215e79c0
AG
76/**
77 * kvm_halt_in_kernel
78 *
79 * Returns: true if halted cpus should still get a KVM_RUN ioctl to run
80 * inside of kernel space. This only works if MP state is implemented.
81 */
82#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
83
cc7e0ddf
PM
84/**
85 * kvm_irqfds_enabled:
86 *
87 * Returns: true if we can use irqfds to inject interrupts into
88 * a KVM CPU (ie the kernel supports irqfds and we are running
89 * with a configuration where it is meaningful to use them).
90 */
91#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
92
614e41bc
PM
93/**
94 * kvm_msi_via_irqfd_enabled:
95 *
96 * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
97 * to a KVM CPU via an irqfd. This requires that the kernel supports
98 * this and that we're running in a configuration that permits it.
99 */
100#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
101
f3e1bed8
PM
102/**
103 * kvm_gsi_routing_enabled:
104 *
105 * Returns: true if GSI routing is enabled (ie the kernel supports
106 * it and we're running in a configuration that permits it).
107 */
108#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
109
df9c8b75
JJ
110/**
111 * kvm_readonly_mem_enabled:
112 *
113 * Returns: true if KVM readonly memory is enabled (ie the kernel
114 * supports it and we're running in a configuration that permits it).
115 */
116#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
117
05330448 118#else
3d4b2649
JK
119#define kvm_enabled() (0)
120#define kvm_irqchip_in_kernel() (false)
7ae26bd4 121#define kvm_async_interrupts_enabled() (false)
215e79c0 122#define kvm_halt_in_kernel() (false)
cc7e0ddf 123#define kvm_irqfds_enabled() (false)
614e41bc 124#define kvm_msi_via_irqfd_enabled() (false)
f3e1bed8 125#define kvm_gsi_routing_allowed() (false)
df9c8b75 126#define kvm_readonly_mem_enabled() (false)
05330448
AL
127#endif
128
129struct kvm_run;
680c1c6f 130struct kvm_lapic_state;
05330448 131
94a8d39a
JK
132typedef struct KVMCapabilityInfo {
133 const char *name;
134 int value;
135} KVMCapabilityInfo;
136
137#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
138#define KVM_CAP_LAST_INFO { NULL, 0 }
139
92b4e489
JK
140struct KVMState;
141typedef struct KVMState KVMState;
142extern KVMState *kvm_state;
143
05330448
AL
144/* external API */
145
cad1e282 146int kvm_init(void);
05330448 147
00a1555e
PB
148int kvm_has_sync_mmu(void);
149int kvm_has_vcpu_events(void);
150int kvm_has_robust_singlestep(void);
ff44f1a3 151int kvm_has_debugregs(void);
f1665b21
SY
152int kvm_has_xsave(void);
153int kvm_has_xcrs(void);
8a7c7393 154int kvm_has_pit_state2(void);
d2f2b8a7 155int kvm_has_many_ioeventfds(void);
84b058d7 156int kvm_has_gsi_routing(void);
3ab73842 157int kvm_has_intx_set_mask(void);
00a1555e 158
504134d2 159int kvm_init_vcpu(CPUState *cpu);
1458c363 160int kvm_cpu_exec(CPUState *cpu);
05330448 161
504134d2 162#ifdef NEED_CPU_H
05330448 163
c4cfef5e 164void kvm_setup_guest_memory(void *start, size_t size);
62a2744c 165void kvm_flush_coalesced_mmio_buffer(void);
f65ed4c1 166
62278814 167int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9 168 target_ulong len, int type);
62278814 169int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9 170 target_ulong len, int type);
1d5791f4 171void kvm_remove_all_breakpoints(CPUState *cpu);
38e478ec 172int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
20c20526 173#ifndef _WIN32
491d6e80 174int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset);
20c20526 175#endif
e22a25c9 176
290adf38 177int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
a1b87fe0
JK
178int kvm_on_sigbus(int code, void *addr);
179
05330448
AL
180/* internal API */
181
984b5181 182int kvm_ioctl(KVMState *s, int type, ...);
05330448 183
984b5181 184int kvm_vm_ioctl(KVMState *s, int type, ...);
05330448 185
1bc22652 186int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
05330448
AL
187
188/* Arch specific hooks */
189
94a8d39a
JK
190extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
191
20d695a9
AF
192void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
193void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
05330448 194
20d695a9 195int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
05330448 196
20d695a9 197int kvm_arch_process_async_events(CPUState *cpu);
0af691d7 198
20d695a9 199int kvm_arch_get_registers(CPUState *cpu);
05330448 200
ea375f9a
JK
201/* state subset only touched by the VCPU itself during runtime */
202#define KVM_PUT_RUNTIME_STATE 1
203/* state subset modified during VCPU reset */
204#define KVM_PUT_RESET_STATE 2
205/* full state set, modified during initialization or on vmload */
206#define KVM_PUT_FULL_STATE 3
207
20d695a9 208int kvm_arch_put_registers(CPUState *cpu, int level);
05330448 209
cad1e282 210int kvm_arch_init(KVMState *s);
05330448 211
20d695a9 212int kvm_arch_init_vcpu(CPUState *cpu);
05330448 213
b164e48e
EH
214/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
215unsigned long kvm_arch_vcpu_id(CPUState *cpu);
216
20d695a9 217void kvm_arch_reset_vcpu(CPUState *cpu);
caa5af0f 218
20d695a9 219int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
a1b87fe0 220int kvm_arch_on_sigbus(int code, void *addr);
c0532a76 221
84b058d7
JK
222void kvm_arch_init_irq_routing(KVMState *s);
223
3889c3fa 224int kvm_set_irq(KVMState *s, int irq, int level);
04fa27f5 225int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
84b058d7 226
1df186df 227void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
cb925cf9 228void kvm_irqchip_commit_routes(KVMState *s);
84b058d7 229
680c1c6f
JK
230void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
231void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
232
e22a25c9
AL
233struct kvm_guest_debug;
234struct kvm_debug_exit_arch;
235
236struct kvm_sw_breakpoint {
237 target_ulong pc;
238 target_ulong saved_insn;
239 int use_count;
72cf2d4f 240 QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
e22a25c9
AL
241};
242
72cf2d4f 243QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
e22a25c9 244
a60f24b5 245struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
e22a25c9
AL
246 target_ulong pc);
247
a60f24b5 248int kvm_sw_breakpoints_active(CPUState *cpu);
e22a25c9 249
80b7cd73 250int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
e22a25c9 251 struct kvm_sw_breakpoint *bp);
80b7cd73 252int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
e22a25c9
AL
253 struct kvm_sw_breakpoint *bp);
254int kvm_arch_insert_hw_breakpoint(target_ulong addr,
255 target_ulong len, int type);
256int kvm_arch_remove_hw_breakpoint(target_ulong addr,
257 target_ulong len, int type);
258void kvm_arch_remove_all_hw_breakpoints(void);
259
20d695a9 260void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
e22a25c9 261
20d695a9 262bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
4513d923 263
ad7b8b33
AL
264int kvm_check_extension(KVMState *s, unsigned int extension);
265
ba9bc59e 266uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
c958a8bd 267 uint32_t index, int reg);
dd1750d7 268void kvm_cpu_synchronize_state(CPUState *cpu);
b827df58 269
e22a25c9
AL
270/* generic hooks - to be moved/refactored once there are more users */
271
cb446eca 272static inline void cpu_synchronize_state(CPUState *cpu)
e22a25c9
AL
273{
274 if (kvm_enabled()) {
cb446eca 275 kvm_cpu_synchronize_state(cpu);
e22a25c9
AL
276 }
277}
278
13eed94e
IM
279#if !defined(CONFIG_USER_ONLY)
280int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
281 hwaddr *phys_addr);
282#endif
283
284#endif /* NEED_CPU_H */
285
286void kvm_cpu_synchronize_post_reset(CPUState *cpu);
287void kvm_cpu_synchronize_post_init(CPUState *cpu);
288
3f24a58f 289static inline void cpu_synchronize_post_reset(CPUState *cpu)
ea375f9a
JK
290{
291 if (kvm_enabled()) {
3f24a58f 292 kvm_cpu_synchronize_post_reset(cpu);
ea375f9a
JK
293 }
294}
295
3f24a58f 296static inline void cpu_synchronize_post_init(CPUState *cpu)
ea375f9a
JK
297{
298 if (kvm_enabled()) {
3f24a58f 299 kvm_cpu_synchronize_post_init(cpu);
ea375f9a
JK
300 }
301}
ca821806 302
92b4e489 303int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg);
cc57407e 304int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg);
1e2aa8be 305void kvm_irqchip_release_virq(KVMState *s, int virq);
39853bbc 306
ca916d37
VM
307int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
308 EventNotifier *rn, int virq);
b131c74a 309int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
d8ee0384
JB
310void kvm_pc_gsi_handler(void *opaque, int n, int level);
311void kvm_pc_setup_irq_routing(bool pci_enabled);
7b774593 312void kvm_init_irq_routing(KVMState *s);
05330448 313#endif
This page took 0.510557 seconds and 4 git commands to generate.