]> Git Repo - linux.git/blame - arch/x86/kvm/x86.h
x86/cpu: KVM: Move macro to encode PAT value to common header
[linux.git] / arch / x86 / kvm / x86.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
26eef70c
AK
2#ifndef ARCH_X86_KVM_X86_H
3#define ARCH_X86_KVM_X86_H
4
5#include <linux/kvm_host.h>
55cd57b5 6#include <asm/fpu/xstate.h>
3f1a18b9 7#include <asm/mce.h>
8d93c874 8#include <asm/pvclock.h>
3eeb3288 9#include "kvm_cache_regs.h"
2f728d66 10#include "kvm_emulate.h"
26eef70c 11
938c8745
SC
12struct kvm_caps {
13 /* control of guest tsc rate supported? */
14 bool has_tsc_control;
15 /* maximum supported tsc_khz for guests */
16 u32 max_guest_tsc_khz;
17 /* number of bits of the fractional part of the TSC scaling ratio */
18 u8 tsc_scaling_ratio_frac_bits;
19 /* maximum allowed value of TSC scaling ratio */
20 u64 max_tsc_scaling_ratio;
21 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
22 u64 default_tsc_scaling_ratio;
23 /* bus lock detection supported? */
24 bool has_bus_lock_exit;
2f4073e0
TX
25 /* notify VM exit supported? */
26 bool has_notify_vmexit;
2a955c4d
PB
27 /* bit mask of VM types */
28 u32 supported_vm_types;
938c8745
SC
29
30 u64 supported_mce_cap;
31 u64 supported_xcr0;
32 u64 supported_xss;
bec46859 33 u64 supported_perf_cap;
938c8745
SC
34};
35
7974c064 36struct kvm_host_values {
82897db9
SC
37 /*
38 * The host's raw MAXPHYADDR, i.e. the number of non-reserved physical
39 * address bits irrespective of features that repurpose legal bits,
40 * e.g. MKTME.
41 */
42 u8 maxphyaddr;
43
7974c064
SC
44 u64 efer;
45 u64 xcr0;
46 u64 xss;
47 u64 arch_capabilities;
48};
49
65297341
UB
50void kvm_spurious_fault(void);
51
648fc8ae
SC
52#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
53({ \
54 bool failed = (consistency_check); \
55 if (failed) \
56 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
57 failed; \
58})
59
5757f5b9
SC
60/*
61 * The first...last VMX feature MSRs that are emulated by KVM. This may or may
62 * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
63 * associated feature that KVM supports for nested virtualization.
64 */
65#define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC
66#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC
67
c8e88717
BM
68#define KVM_DEFAULT_PLE_GAP 128
69#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
70#define KVM_DEFAULT_PLE_WINDOW_GROW 2
71#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
72#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
8566ac8b
BM
73#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
74#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
c8e88717
BM
75
76static inline unsigned int __grow_ple_window(unsigned int val,
77 unsigned int base, unsigned int modifier, unsigned int max)
78{
79 u64 ret = val;
80
81 if (modifier < 1)
82 return base;
83
84 if (modifier < base)
85 ret *= modifier;
86 else
87 ret += modifier;
88
89 return min(ret, (u64)max);
90}
91
92static inline unsigned int __shrink_ple_window(unsigned int val,
93 unsigned int base, unsigned int modifier, unsigned int min)
94{
95 if (modifier < 1)
96 return base;
97
98 if (modifier < base)
99 val /= modifier;
100 else
101 val -= modifier;
102
103 return max(val, min);
104}
105
beb2e446
SC
106#define MSR_IA32_CR_PAT_DEFAULT \
107 PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC)
74545705 108
40e5f908 109void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
cb6a32c2
SC
110int kvm_check_nested_events(struct kvm_vcpu *vcpu);
111
fb3146b4
SC
112static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
113{
114 return vcpu->arch.last_vmentry_cpu != -1;
115}
116
7709aba8
SC
117static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
118{
119 return vcpu->arch.exception.pending ||
7055fb11
SC
120 vcpu->arch.exception_vmexit.pending ||
121 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7709aba8
SC
122}
123
26eef70c
AK
124static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
125{
5c7d4f9a 126 vcpu->arch.exception.pending = false;
664f8e26 127 vcpu->arch.exception.injected = false;
7709aba8 128 vcpu->arch.exception_vmexit.pending = false;
26eef70c
AK
129}
130
66fd3f7f
GN
131static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
132 bool soft)
937a7eae 133{
04140b41 134 vcpu->arch.interrupt.injected = true;
66fd3f7f 135 vcpu->arch.interrupt.soft = soft;
937a7eae
AK
136 vcpu->arch.interrupt.nr = vector;
137}
138
139static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
140{
04140b41 141 vcpu->arch.interrupt.injected = false;
937a7eae
AK
142}
143
3298b75c
GN
144static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
145{
04140b41 146 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
3298b75c
GN
147 vcpu->arch.nmi_injected;
148}
66fd3f7f
GN
149
150static inline bool kvm_exception_is_soft(unsigned int nr)
151{
152 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
153}
fc61b800 154
3eeb3288
AK
155static inline bool is_protmode(struct kvm_vcpu *vcpu)
156{
607475cf 157 return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
3eeb3288
AK
158}
159
68f7c82a 160static inline bool is_long_mode(struct kvm_vcpu *vcpu)
836a1b3c
AK
161{
162#ifdef CONFIG_X86_64
68f7c82a 163 return !!(vcpu->arch.efer & EFER_LMA);
836a1b3c 164#else
68f7c82a 165 return false;
836a1b3c
AK
166#endif
167}
168
5777392e
NA
169static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
170{
171 int cs_db, cs_l;
172
b5aead00
TL
173 WARN_ON_ONCE(vcpu->arch.guest_state_protected);
174
5777392e
NA
175 if (!is_long_mode(vcpu))
176 return false;
89604647 177 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
5777392e
NA
178 return cs_l;
179}
180
b5aead00
TL
181static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
182{
183 /*
184 * If running with protected guest state, the CS register is not
185 * accessible. The hypercall register values will have had to been
186 * provided in 64-bit mode, so assume the guest is in 64-bit.
187 */
188 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
189}
190
0447378a
MO
191static inline bool x86_exception_has_error_code(unsigned int vector)
192{
193 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
194 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
195 BIT(PF_VECTOR) | BIT(AC_VECTOR);
196
197 return (1U << vector) & exception_has_error_code;
198}
199
6539e738
JR
200static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
201{
202 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
203}
204
bede6eb4 205static inline bool is_pae(struct kvm_vcpu *vcpu)
836a1b3c 206{
bede6eb4 207 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
836a1b3c
AK
208}
209
bede6eb4 210static inline bool is_pse(struct kvm_vcpu *vcpu)
836a1b3c 211{
bede6eb4 212 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
836a1b3c
AK
213}
214
bede6eb4 215static inline bool is_paging(struct kvm_vcpu *vcpu)
836a1b3c 216{
bede6eb4 217 return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
836a1b3c
AK
218}
219
bf03d4f9
PB
220static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
221{
222 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
223}
224
fd8cb433
YZ
225static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
226{
607475cf 227 return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
fd8cb433
YZ
228}
229
fd8cb433
YZ
230static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
231{
1fb85d06 232 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
fd8cb433
YZ
233}
234
bebb106a
XG
235static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
236 gva_t gva, gfn_t gfn, unsigned access)
237{
ddfd1730
SC
238 u64 gen = kvm_memslots(vcpu->kvm)->generation;
239
361209e0 240 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
ddfd1730
SC
241 return;
242
9034e6e8
PB
243 /*
244 * If this is a shadow nested page table, the "GVA" is
245 * actually a nGPA.
246 */
247 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
871bd034 248 vcpu->arch.mmio_access = access;
bebb106a 249 vcpu->arch.mmio_gfn = gfn;
ddfd1730 250 vcpu->arch.mmio_gen = gen;
56f17dd3
DM
251}
252
253static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
254{
255 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
bebb106a
XG
256}
257
258/*
56f17dd3
DM
259 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
260 * clear all mmio cache info.
bebb106a 261 */
56f17dd3
DM
262#define MMIO_GVA_ANY (~(gva_t)0)
263
bebb106a
XG
264static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
265{
56f17dd3 266 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
bebb106a
XG
267 return;
268
269 vcpu->arch.mmio_gva = 0;
270}
271
272static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
273{
56f17dd3
DM
274 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
275 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
bebb106a
XG
276 return true;
277
278 return false;
279}
280
281static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
282{
56f17dd3
DM
283 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
284 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
bebb106a
XG
285 return true;
286
287 return false;
288}
289
27b4a9c4 290static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
5777392e 291{
27b4a9c4 292 unsigned long val = kvm_register_read_raw(vcpu, reg);
5777392e
NA
293
294 return is_64_bit_mode(vcpu) ? val : (u32)val;
295}
296
27b4a9c4 297static inline void kvm_register_write(struct kvm_vcpu *vcpu,
489cbcf0 298 int reg, unsigned long val)
27e6fb5d
NA
299{
300 if (!is_64_bit_mode(vcpu))
301 val = (u32)val;
27b4a9c4 302 return kvm_register_write_raw(vcpu, reg, val);
27e6fb5d
NA
303}
304
41dbc6bc
PB
305static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
306{
307 return !(kvm->arch.disabled_quirks & quirk);
308}
309
9497e1f2 310void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
ff9d07a0 311
108b249c 312u64 get_kvmclock_ns(struct kvm *kvm);
5d6d6a7d 313uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
451a7078 314bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
99e3e30a 315
ce14e868 316int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
064aea77
NHE
317 gva_t addr, void *val, unsigned int bytes,
318 struct x86_exception *exception);
319
ce14e868 320int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
6a4d7550
NHE
321 gva_t addr, void *val, unsigned int bytes,
322 struct x86_exception *exception);
323
082d06ed
WL
324int handle_ud(struct kvm_vcpu *vcpu);
325
d4963e31
SC
326void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
327 struct kvm_queued_exception *ex);
da998b46 328
ff53604b
XG
329int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
330int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
52004014 331bool kvm_vector_hashing_enabled(void);
89786147 332void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
4aa2691d
WH
333int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
334 void *insn, int insn_len);
736c291c 335int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
c60658d1 336 int emulation_type, void *insn, int insn_len);
404d5d7b 337fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
4566654b 338
938c8745 339extern struct kvm_caps kvm_caps;
7974c064 340extern struct kvm_host_values kvm_host;
938c8745 341
4732f244 342extern bool enable_pmu;
4ff41732 343
6be3ae45
AL
344/*
345 * Get a filtered version of KVM's supported XCR0 that strips out dynamic
346 * features for which the current process doesn't (yet) have permission to use.
347 * This is intended to be used only when enumerating support to userspace,
348 * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
349 * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
350 * userspace attempts to enable unpermitted features.
351 */
352static inline u64 kvm_get_filtered_xcr0(void)
353{
55cd57b5
SC
354 u64 permitted_xcr0 = kvm_caps.supported_xcr0;
355
356 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
357
358 if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) {
359 permitted_xcr0 &= xstate_get_guest_group_perm();
360
361 /*
362 * Treat XTILE_CFG as unsupported if the current process isn't
363 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
364 * XCR0 without setting XTILE_DATA is architecturally illegal.
365 */
366 if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA))
367 permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG;
368 }
369 return permitted_xcr0;
6be3ae45
AL
370}
371
615a4ae1
SC
372static inline bool kvm_mpx_supported(void)
373{
938c8745 374 return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
615a4ae1
SC
375 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
376}
377
9ed96e87
MT
378extern unsigned int min_timer_period_us;
379
c4ae60e4
LA
380extern bool enable_vmware_backdoor;
381
0c5f81da
WL
382extern int pi_inject_timer;
383
d855066f
LX
384extern bool report_ignored_msrs;
385
cb00a70b
DM
386extern bool eager_page_split;
387
e76ae527
SC
388static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
389{
390 if (report_ignored_msrs)
391 vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data);
392}
393
394static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr)
395{
396 if (report_ignored_msrs)
397 vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr);
398}
399
8d93c874
MT
400static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
401{
402 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
403 vcpu->arch.virtual_tsc_shift);
404}
405
b51012de
PB
406/* Same "calling convention" as do_div:
407 * - divide (n << 32) by base
408 * - put result in n
409 * - return remainder
410 */
411#define do_shl32_div32(n, base) \
412 ({ \
413 u32 __quot, __rem; \
414 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
415 : "rm" (base), "0" (0), "1" ((u32) n)); \
416 n = __quot; \
417 __rem; \
418 })
419
4d5422ce 420static inline bool kvm_mwait_in_guest(struct kvm *kvm)
668fffa3 421{
4d5422ce 422 return kvm->arch.mwait_in_guest;
668fffa3
MT
423}
424
caa057a2
WL
425static inline bool kvm_hlt_in_guest(struct kvm *kvm)
426{
427 return kvm->arch.hlt_in_guest;
428}
429
b31c114b
WL
430static inline bool kvm_pause_in_guest(struct kvm *kvm)
431{
432 return kvm->arch.pause_in_guest;
433}
434
b5170063
WL
435static inline bool kvm_cstate_in_guest(struct kvm *kvm)
436{
437 return kvm->arch.cstate_in_guest;
438}
439
2f4073e0
TX
440static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
441{
442 return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
443}
444
11df586d
SC
445static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
446 enum kvm_intr_type intr)
dd60d217 447{
db215756 448 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
dd60d217
AK
449}
450
11df586d 451static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
dd60d217 452{
73cd107b 453 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
dd60d217
AK
454}
455
73cd107b
SC
456static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
457{
db215756 458 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
73cd107b 459}
674ea351
PB
460
461static inline bool kvm_pat_valid(u64 data)
462{
463 if (data & 0xF8F8F8F8F8F8F8F8ull)
464 return false;
465 /* 0, 1, 4, 5, 6, 7 are valid values. */
466 return (data | ((data & 0x0202020202020202ull) << 1)) == data;
467}
468
9b5e8532 469static inline bool kvm_dr7_valid(u64 data)
b91991bf
KS
470{
471 /* Bits [63:32] are reserved */
472 return !(data >> 32);
473}
f5f6145e
KS
474static inline bool kvm_dr6_valid(u64 data)
475{
476 /* Bits [63:32] are reserved */
477 return !(data >> 32);
478}
b91991bf 479
3f1a18b9
UB
480/*
481 * Trigger machine check on the host. We assume all the MSRs are already set up
482 * by the CPU and that we still run on the same CPU as the MCE occurred on.
483 * We pass a fake environment to the machine check handler because we want
484 * the guest to be always treated like user space, no matter what context
485 * it used internally.
486 */
487static inline void kvm_machine_check(void)
488{
489#if defined(CONFIG_X86_MCE)
490 struct pt_regs regs = {
491 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
492 .flags = X86_EFLAGS_IF,
493 };
494
495 do_machine_check(&regs);
496#endif
497}
498
139a12cf
AL
499void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
500void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
841c2be0 501int kvm_spec_ctrl_test_value(u64 value);
c33f6f22 502bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3f3393b3
BM
503int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
504 struct x86_exception *e);
9715092f 505int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
51de8151 506bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
674ea351 507
cc4cb017
ML
508/*
509 * Internal error codes that are used to indicate that MSR emulation encountered
510 * an error that should result in #GP in the guest, unless userspace
511 * handles it.
512 */
513#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
514#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
6abe9c13 515
b899c132
KS
516#define __cr4_reserved_bits(__cpu_has, __c) \
517({ \
518 u64 __reserved_bits = CR4_RESERVED_BITS; \
519 \
520 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
521 __reserved_bits |= X86_CR4_OSXSAVE; \
522 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
523 __reserved_bits |= X86_CR4_SMEP; \
524 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
525 __reserved_bits |= X86_CR4_SMAP; \
526 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
527 __reserved_bits |= X86_CR4_FSGSBASE; \
528 if (!__cpu_has(__c, X86_FEATURE_PKU)) \
529 __reserved_bits |= X86_CR4_PKE; \
530 if (!__cpu_has(__c, X86_FEATURE_LA57)) \
531 __reserved_bits |= X86_CR4_LA57; \
532 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
533 __reserved_bits |= X86_CR4_UMIP; \
53efe527
PB
534 if (!__cpu_has(__c, X86_FEATURE_VMX)) \
535 __reserved_bits |= X86_CR4_VMXE; \
4683d758
VK
536 if (!__cpu_has(__c, X86_FEATURE_PCID)) \
537 __reserved_bits |= X86_CR4_PCIDE; \
93d1c9f4
RH
538 if (!__cpu_has(__c, X86_FEATURE_LAM)) \
539 __reserved_bits |= X86_CR4_LAM_SUP; \
b899c132
KS
540 __reserved_bits; \
541})
542
8f423a80
TL
543int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
544 void *dst);
545int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
546 void *dst);
7ed9abfe
TL
547int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
548 unsigned int port, void *data, unsigned int count,
549 int in);
8f423a80 550
26eef70c 551#endif
This page took 0.965217 seconds and 4 git commands to generate.