1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
6 * derived from arch/x86/kvm/x86.c
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
18 #include <asm/processor.h>
20 #include <asm/fpu/xstate.h>
29 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
30 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
32 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
33 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
35 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
38 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
40 xstate_bv &= XFEATURE_MASK_EXTEND;
42 if (xstate_bv & 0x1) {
43 u32 eax, ebx, ecx, edx, offset;
44 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
45 offset = compacted ? ret : ebx;
46 ret = max(ret, offset + eax);
57 * This one is tied to SSB in the user API, and not
58 * visible in /proc/cpuinfo.
60 #define KVM_X86_FEATURE_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
63 #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
66 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
67 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
69 struct kvm_cpuid_entry2 *e;
72 for (i = 0; i < nent; i++) {
75 if (e->function == function &&
76 (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
83 static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
85 struct kvm_cpuid_entry2 *best;
88 * The existing code assumes virtual address is 48-bit or 57-bit in the
89 * canonical address checks; exit if it is ever changed.
91 best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
93 int vaddr_bits = (best->eax & 0xff00) >> 8;
95 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
102 static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
105 struct kvm_cpuid_entry2 *entry;
107 vcpu->arch.kvm_cpuid_base = 0;
109 for_each_possible_hypervisor_cpuid_base(function) {
110 entry = kvm_find_cpuid_entry(vcpu, function, 0);
115 signature[0] = entry->ebx;
116 signature[1] = entry->ecx;
117 signature[2] = entry->edx;
119 BUILD_BUG_ON(sizeof(signature) > sizeof(KVM_SIGNATURE));
120 if (!memcmp(signature, KVM_SIGNATURE, sizeof(signature))) {
121 vcpu->arch.kvm_cpuid_base = function;
128 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
130 u32 base = vcpu->arch.kvm_cpuid_base;
135 return kvm_find_cpuid_entry(vcpu, base | KVM_CPUID_FEATURES, 0);
138 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
140 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
143 * save the feature bitmap to avoid cpuid lookup for every PV
147 vcpu->arch.pv_cpuid.features = best->eax;
150 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
152 struct kvm_cpuid_entry2 *best;
154 best = kvm_find_cpuid_entry(vcpu, 1, 0);
156 /* Update OSXSAVE bit */
157 if (boot_cpu_has(X86_FEATURE_XSAVE))
158 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
159 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
161 cpuid_entry_change(best, X86_FEATURE_APIC,
162 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
165 best = kvm_find_cpuid_entry(vcpu, 7, 0);
166 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
167 cpuid_entry_change(best, X86_FEATURE_OSPKE,
168 kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
170 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
172 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
174 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
175 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
176 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
177 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
179 best = kvm_find_kvm_cpuid_features(vcpu);
180 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
181 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
182 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
184 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
185 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
187 cpuid_entry_change(best, X86_FEATURE_MWAIT,
188 vcpu->arch.ia32_misc_enable_msr &
189 MSR_IA32_MISC_ENABLE_MWAIT);
192 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
194 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
196 struct kvm_lapic *apic = vcpu->arch.apic;
197 struct kvm_cpuid_entry2 *best;
199 best = kvm_find_cpuid_entry(vcpu, 1, 0);
201 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
202 apic->lapic_timer.timer_mode_mask = 3 << 17;
204 apic->lapic_timer.timer_mode_mask = 1 << 17;
206 kvm_apic_set_version(vcpu);
209 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
211 vcpu->arch.guest_supported_xcr0 = 0;
213 vcpu->arch.guest_supported_xcr0 =
214 (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
217 * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
218 * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
219 * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
220 * at the time of EENTER, thus adjust the allowed XFRM by the guest's
221 * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
222 * '1' even on CPUs that don't support XSAVE.
224 best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
226 best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
227 best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
228 best->ecx |= XFEATURE_MASK_FPSSE;
231 kvm_update_pv_runtime(vcpu);
233 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
234 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
236 kvm_pmu_refresh(vcpu);
237 vcpu->arch.cr4_guest_rsvd_bits =
238 __cr4_reserved_bits(guest_cpuid_has, vcpu);
240 kvm_hv_set_cpuid(vcpu);
242 /* Invoke the vendor callback only after the above state is updated. */
243 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
246 * Except for the MMU, which needs to do its thing any vendor specific
247 * adjustments to the reserved GPA bits.
249 kvm_mmu_after_set_cpuid(vcpu);
252 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
254 struct kvm_cpuid_entry2 *best;
256 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
257 if (!best || best->eax < 0x80000008)
259 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
261 return best->eax & 0xff;
267 * This "raw" version returns the reserved GPA bits without any adjustments for
268 * encryption technologies that usurp bits. The raw mask should be used if and
269 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
271 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
273 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
276 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
281 r = kvm_check_cpuid(e2, nent);
285 kvfree(vcpu->arch.cpuid_entries);
286 vcpu->arch.cpuid_entries = e2;
287 vcpu->arch.cpuid_nent = nent;
289 kvm_update_kvm_cpuid_base(vcpu);
290 kvm_update_cpuid_runtime(vcpu);
291 kvm_vcpu_after_set_cpuid(vcpu);
296 /* when an old userspace process fills a new kernel module */
297 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
298 struct kvm_cpuid *cpuid,
299 struct kvm_cpuid_entry __user *entries)
302 struct kvm_cpuid_entry *e = NULL;
303 struct kvm_cpuid_entry2 *e2 = NULL;
305 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
309 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
313 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
319 for (i = 0; i < cpuid->nent; i++) {
320 e2[i].function = e[i].function;
321 e2[i].eax = e[i].eax;
322 e2[i].ebx = e[i].ebx;
323 e2[i].ecx = e[i].ecx;
324 e2[i].edx = e[i].edx;
327 e2[i].padding[0] = 0;
328 e2[i].padding[1] = 0;
329 e2[i].padding[2] = 0;
332 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
342 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
343 struct kvm_cpuid2 *cpuid,
344 struct kvm_cpuid_entry2 __user *entries)
346 struct kvm_cpuid_entry2 *e2 = NULL;
349 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
353 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
358 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
365 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
366 struct kvm_cpuid2 *cpuid,
367 struct kvm_cpuid_entry2 __user *entries)
372 if (cpuid->nent < vcpu->arch.cpuid_nent)
375 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
376 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
381 cpuid->nent = vcpu->arch.cpuid_nent;
385 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
386 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
388 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
389 struct kvm_cpuid_entry2 entry;
391 reverse_cpuid_check(leaf);
393 cpuid_count(cpuid.function, cpuid.index,
394 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
396 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
399 static __always_inline
400 void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
402 /* Use kvm_cpu_cap_mask for non-scattered leafs. */
403 BUILD_BUG_ON(leaf < NCAPINTS);
405 kvm_cpu_caps[leaf] = mask;
407 __kvm_cpu_cap_mask(leaf);
410 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
412 /* Use kvm_cpu_cap_init_scattered for scattered leafs. */
413 BUILD_BUG_ON(leaf >= NCAPINTS);
415 kvm_cpu_caps[leaf] &= mask;
417 __kvm_cpu_cap_mask(leaf);
420 void kvm_set_cpu_caps(void)
423 unsigned int f_gbpages = F(GBPAGES);
424 unsigned int f_lm = F(LM);
426 unsigned int f_gbpages = 0;
427 unsigned int f_lm = 0;
429 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
431 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
432 sizeof(boot_cpu_data.x86_capability));
434 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
435 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
437 kvm_cpu_cap_mask(CPUID_1_ECX,
439 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
440 * advertised to guests via CPUID!
442 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
443 0 /* DS-CPL, VMX, SMX, EST */ |
444 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
445 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
446 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
447 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
448 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
451 /* KVM emulates x2apic in software irrespective of host support. */
452 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
454 kvm_cpu_cap_mask(CPUID_1_EDX,
455 F(FPU) | F(VME) | F(DE) | F(PSE) |
456 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
457 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
458 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
459 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
460 0 /* Reserved, DS, ACPI */ | F(MMX) |
461 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
462 0 /* HTT, TM, Reserved, PBE */
465 kvm_cpu_cap_mask(CPUID_7_0_EBX,
466 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
467 F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
468 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
469 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
470 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
473 kvm_cpu_cap_mask(CPUID_7_ECX,
474 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
475 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
476 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
477 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
478 F(SGX_LC) | F(BUS_LOCK_DETECT)
480 /* Set LA57 based on hardware capability. */
481 if (cpuid_ecx(7) & F(LA57))
482 kvm_cpu_cap_set(X86_FEATURE_LA57);
485 * PKU not yet implemented for shadow paging and requires OSPKE
486 * to be set on the host. Clear it if that is not the case
488 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
489 kvm_cpu_cap_clear(X86_FEATURE_PKU);
491 kvm_cpu_cap_mask(CPUID_7_EDX,
492 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
493 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
494 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
495 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16)
498 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
499 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
500 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
502 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
503 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
504 if (boot_cpu_has(X86_FEATURE_STIBP))
505 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
506 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
507 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
509 kvm_cpu_cap_mask(CPUID_7_1_EAX,
510 F(AVX_VNNI) | F(AVX512_BF16)
513 kvm_cpu_cap_mask(CPUID_D_1_EAX,
514 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
517 kvm_cpu_cap_init_scattered(CPUID_12_EAX,
521 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
522 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
523 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
524 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
525 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
526 F(TOPOEXT) | F(PERFCTR_CORE)
529 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
530 F(FPU) | F(VME) | F(DE) | F(PSE) |
531 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
532 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
533 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
534 F(PAT) | F(PSE36) | 0 /* Reserved */ |
535 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
536 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
537 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
540 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
541 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
543 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
544 F(CLZERO) | F(XSAVEERPTR) |
545 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
546 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
547 __feature_bit(KVM_X86_FEATURE_PSFD)
551 * AMD has separate bits for each SPEC_CTRL bit.
552 * arch/x86/kernel/cpu/bugs.c is kind enough to
553 * record that in cpufeatures so use them.
555 if (boot_cpu_has(X86_FEATURE_IBPB))
556 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
557 if (boot_cpu_has(X86_FEATURE_IBRS))
558 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
559 if (boot_cpu_has(X86_FEATURE_STIBP))
560 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
561 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
562 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
563 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
564 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
566 * The preference is to use SPEC CTRL MSR instead of the
569 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
570 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
571 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
574 * Hide all SVM features by default, SVM will set the cap bits for
575 * features it emulates and/or exposes for L1.
577 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
579 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
580 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
583 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
584 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
585 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
590 * Hide RDTSCP and RDPID if either feature is reported as supported but
591 * probing MSR_TSC_AUX failed. This is purely a sanity check and
592 * should never happen, but the guest will likely crash if RDTSCP or
593 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
594 * the past. For example, the sanity check may fire if this instance of
595 * KVM is running as L1 on top of an older, broken KVM.
597 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
598 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
599 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
600 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
601 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
604 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
606 struct kvm_cpuid_array {
607 struct kvm_cpuid_entry2 *entries;
612 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
613 u32 function, u32 index)
615 struct kvm_cpuid_entry2 *entry;
617 if (array->nent >= array->maxnent)
620 entry = &array->entries[array->nent++];
622 entry->function = function;
623 entry->index = index;
626 cpuid_count(entry->function, entry->index,
627 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
642 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
649 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
651 struct kvm_cpuid_entry2 *entry;
653 if (array->nent >= array->maxnent)
656 entry = &array->entries[array->nent];
657 entry->function = func;
667 entry->ecx = F(MOVBE);
671 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
673 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
674 entry->ecx = F(RDPID);
684 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
686 struct kvm_cpuid_entry2 *entry;
689 /* all calls to cpuid_count() should be made on the same cpu */
694 entry = do_host_cpuid(array, function, 0);
700 /* Limited to the highest leaf implemented in KVM. */
701 entry->eax = min(entry->eax, 0x1fU);
704 cpuid_entry_override(entry, CPUID_1_EDX);
705 cpuid_entry_override(entry, CPUID_1_ECX);
709 * On ancient CPUs, function 2 entries are STATEFUL. That is,
710 * CPUID(function=2, index=0) may return different results each
711 * time, with the least-significant byte in EAX enumerating the
712 * number of times software should do CPUID(2, 0).
714 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
715 * idiotic. Intel's SDM states that EAX & 0xff "will always
716 * return 01H. Software should ignore this value and not
717 * interpret it as an informational descriptor", while AMD's
718 * APM states that CPUID(2) is reserved.
720 * WARN if a frankenstein CPU that supports virtualization and
721 * a stateful CPUID.0x2 is encountered.
723 WARN_ON_ONCE((entry->eax & 0xff) > 1);
725 /* functions 4 and 0x8000001d have additional index. */
729 * Read entries until the cache type in the previous entry is
730 * zero, i.e. indicates an invalid entry.
732 for (i = 1; entry->eax & 0x1f; ++i) {
733 entry = do_host_cpuid(array, function, i);
738 case 6: /* Thermal management */
739 entry->eax = 0x4; /* allow ARAT */
744 /* function 7 has additional index. */
746 entry->eax = min(entry->eax, 1u);
747 cpuid_entry_override(entry, CPUID_7_0_EBX);
748 cpuid_entry_override(entry, CPUID_7_ECX);
749 cpuid_entry_override(entry, CPUID_7_EDX);
751 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
752 if (entry->eax == 1) {
753 entry = do_host_cpuid(array, function, 1);
757 cpuid_entry_override(entry, CPUID_7_1_EAX);
765 case 0xa: { /* Architectural Performance Monitoring */
766 struct x86_pmu_capability cap;
767 union cpuid10_eax eax;
768 union cpuid10_edx edx;
770 perf_get_x86_pmu_capability(&cap);
773 * Only support guest architectural pmu on a host
774 * with architectural pmu.
777 memset(&cap, 0, sizeof(cap));
779 eax.split.version_id = min(cap.version, 2);
780 eax.split.num_counters = cap.num_counters_gp;
781 eax.split.bit_width = cap.bit_width_gp;
782 eax.split.mask_length = cap.events_mask_len;
784 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
785 edx.split.bit_width_fixed = cap.bit_width_fixed;
787 edx.split.anythread_deprecated = 1;
788 edx.split.reserved1 = 0;
789 edx.split.reserved2 = 0;
791 entry->eax = eax.full;
792 entry->ebx = cap.events_mask;
794 entry->edx = edx.full;
798 * Per Intel's SDM, the 0x1f is a superset of 0xb,
799 * thus they can be handled by common code.
804 * Populate entries until the level type (ECX[15:8]) of the
805 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is
806 * the starting entry, filled by the primary do_host_cpuid().
808 for (i = 1; entry->ecx & 0xff00; ++i) {
809 entry = do_host_cpuid(array, function, i);
815 entry->eax &= supported_xcr0;
816 entry->ebx = xstate_required_size(supported_xcr0, false);
817 entry->ecx = entry->ebx;
818 entry->edx &= supported_xcr0 >> 32;
822 entry = do_host_cpuid(array, function, 1);
826 cpuid_entry_override(entry, CPUID_D_1_EAX);
827 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
828 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
831 WARN_ON_ONCE(supported_xss != 0);
834 entry->ecx &= supported_xss;
835 entry->edx &= supported_xss >> 32;
837 for (i = 2; i < 64; ++i) {
839 if (supported_xcr0 & BIT_ULL(i))
841 else if (supported_xss & BIT_ULL(i))
846 entry = do_host_cpuid(array, function, i);
851 * The supported check above should have filtered out
852 * invalid sub-leafs. Only valid sub-leafs should
853 * reach this point, and they should have a non-zero
854 * save state size. Furthermore, check whether the
855 * processor agrees with supported_xcr0/supported_xss
856 * on whether this is an XCR0- or IA32_XSS-managed area.
858 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
867 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
868 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
873 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
874 * and max enclave sizes. The SGX sub-features and MISCSELECT
875 * are restricted by kernel and KVM capabilities (like most
876 * feature flags), while enclave size is unrestricted.
878 cpuid_entry_override(entry, CPUID_12_EAX);
879 entry->ebx &= SGX_MISC_EXINFO;
881 entry = do_host_cpuid(array, function, 1);
886 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
887 * feature flags. Advertise all supported flags, including
888 * privileged attributes that require explicit opt-in from
889 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
890 * expected to derive it from supported XCR0.
892 entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
893 SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY |
899 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
900 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
904 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
905 if (!do_host_cpuid(array, function, i))
909 case KVM_CPUID_SIGNATURE: {
910 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
911 entry->eax = KVM_CPUID_FEATURES;
912 entry->ebx = sigptr[0];
913 entry->ecx = sigptr[1];
914 entry->edx = sigptr[2];
917 case KVM_CPUID_FEATURES:
918 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
919 (1 << KVM_FEATURE_NOP_IO_DELAY) |
920 (1 << KVM_FEATURE_CLOCKSOURCE2) |
921 (1 << KVM_FEATURE_ASYNC_PF) |
922 (1 << KVM_FEATURE_PV_EOI) |
923 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
924 (1 << KVM_FEATURE_PV_UNHALT) |
925 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
926 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
927 (1 << KVM_FEATURE_PV_SEND_IPI) |
928 (1 << KVM_FEATURE_POLL_CONTROL) |
929 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
930 (1 << KVM_FEATURE_ASYNC_PF_INT);
933 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
940 entry->eax = min(entry->eax, 0x8000001f);
943 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
944 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
947 /* L2 cache and TLB: pass through host info. */
949 case 0x80000007: /* Advanced power management */
950 /* invariant TSC is CPUID.80000007H:EDX[8] */
951 entry->edx &= (1 << 8);
952 /* mask against host */
953 entry->edx &= boot_cpu_data.x86_power;
954 entry->eax = entry->ebx = entry->ecx = 0;
957 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
958 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
959 unsigned phys_as = entry->eax & 0xff;
962 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
963 * the guest operates in the same PA space as the host, i.e.
964 * reductions in MAXPHYADDR for memory encryption affect shadow
967 * If TDP is enabled but an explicit guest MAXPHYADDR is not
968 * provided, use the raw bare metal MAXPHYADDR as reductions to
969 * the HPAs do not affect GPAs.
972 g_phys_as = boot_cpu_data.x86_phys_bits;
976 entry->eax = g_phys_as | (virt_as << 8);
978 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
982 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
983 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
986 entry->eax = 1; /* SVM revision 1 */
987 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
988 ASID emulation to nested SVM */
989 entry->ecx = 0; /* Reserved */
990 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
993 entry->ecx = entry->edx = 0;
999 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1000 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1002 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1005 * Enumerate '0' for "PA bits reduction", the adjusted
1006 * MAXPHYADDR is enumerated directly (see 0x80000008).
1008 entry->ebx &= ~GENMASK(11, 6);
1011 /*Add support for Centaur's CPUID instruction*/
1013 /*Just support up to 0xC0000004 now*/
1014 entry->eax = min(entry->eax, 0xC0000004);
1017 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1019 case 3: /* Processor serial number */
1020 case 5: /* MONITOR/MWAIT */
1025 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1037 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1040 if (type == KVM_GET_EMULATED_CPUID)
1041 return __do_cpuid_func_emulated(array, func);
1043 return __do_cpuid_func(array, func);
1046 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1048 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1054 if (func == CENTAUR_CPUID_SIGNATURE &&
1055 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1058 r = do_cpuid_func(array, func, type);
1062 limit = array->entries[array->nent - 1].eax;
1063 for (func = func + 1; func <= limit; ++func) {
1064 r = do_cpuid_func(array, func, type);
1072 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1073 __u32 num_entries, unsigned int ioctl_type)
1078 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1082 * We want to make sure that ->padding is being passed clean from
1083 * userspace in case we want to use it for something in the future.
1085 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1086 * have to give ourselves satisfied only with the emulated side. /me
1089 for (i = 0; i < num_entries; i++) {
1090 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1093 if (pad[0] || pad[1] || pad[2])
1099 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1100 struct kvm_cpuid_entry2 __user *entries,
1103 static const u32 funcs[] = {
1104 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1107 struct kvm_cpuid_array array = {
1112 if (cpuid->nent < 1)
1114 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1115 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1117 if (sanity_check_entries(entries, cpuid->nent, type))
1120 array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
1125 array.maxnent = cpuid->nent;
1127 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1128 r = get_cpuid_func(&array, funcs[i], type);
1132 cpuid->nent = array.nent;
1134 if (copy_to_user(entries, array.entries,
1135 array.nent * sizeof(struct kvm_cpuid_entry2)))
1139 vfree(array.entries);
1143 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1144 u32 function, u32 index)
1146 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1149 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1152 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1153 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1154 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1155 * range. Centaur/VIA follows Intel semantics.
1157 * A leaf is considered out-of-range if its function is higher than the maximum
1158 * supported leaf of its associated class or if its associated class does not
1161 * There are three primary classes to be considered, with their respective
1162 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1163 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1164 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1166 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1167 * - Hypervisor: 0x40000000 - 0x4fffffff
1168 * - Extended: 0x80000000 - 0xbfffffff
1169 * - Centaur: 0xc0000000 - 0xcfffffff
1171 * The Hypervisor class is further subdivided into sub-classes that each act as
1172 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1173 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1174 * CPUID sub-classes are:
1176 * - HyperV: 0x40000000 - 0x400000ff
1177 * - KVM: 0x40000100 - 0x400001ff
1179 static struct kvm_cpuid_entry2 *
1180 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1182 struct kvm_cpuid_entry2 *basic, *class;
1183 u32 function = *fn_ptr;
1185 basic = kvm_find_cpuid_entry(vcpu, 0, 0);
1189 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1190 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1193 if (function >= 0x40000000 && function <= 0x4fffffff)
1194 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
1195 else if (function >= 0xc0000000)
1196 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
1198 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
1200 if (class && function <= class->eax)
1204 * Leaf specific adjustments are also applied when redirecting to the
1205 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1206 * entry for CPUID.0xb.index (see below), then the output value for EDX
1207 * needs to be pulled from CPUID.0xb.1.
1209 *fn_ptr = basic->eax;
1212 * The class does not exist or the requested function is out of range;
1213 * the effective CPUID entry is the max basic leaf. Note, the index of
1214 * the original requested leaf is observed!
1216 return kvm_find_cpuid_entry(vcpu, basic->eax, index);
1219 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1220 u32 *ecx, u32 *edx, bool exact_only)
1222 u32 orig_function = *eax, function = *eax, index = *ecx;
1223 struct kvm_cpuid_entry2 *entry;
1224 bool exact, used_max_basic = false;
1226 entry = kvm_find_cpuid_entry(vcpu, function, index);
1229 if (!entry && !exact_only) {
1230 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1231 used_max_basic = !!entry;
1239 if (function == 7 && index == 0) {
1241 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1242 (data & TSX_CTRL_CPUID_CLEAR))
1243 *ebx &= ~(F(RTM) | F(HLE));
1246 *eax = *ebx = *ecx = *edx = 0;
1248 * When leaf 0BH or 1FH is defined, CL is pass-through
1249 * and EDX is always the x2APIC ID, even for undefined
1250 * subleaves. Index 1 will exist iff the leaf is
1251 * implemented, so we pass through CL iff leaf 1
1252 * exists. EDX can be copied from any existing index.
1254 if (function == 0xb || function == 0x1f) {
1255 entry = kvm_find_cpuid_entry(vcpu, function, 1);
1257 *ecx = index & 0xff;
1262 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1266 EXPORT_SYMBOL_GPL(kvm_cpuid);
1268 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1270 u32 eax, ebx, ecx, edx;
1272 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1275 eax = kvm_rax_read(vcpu);
1276 ecx = kvm_rcx_read(vcpu);
1277 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1278 kvm_rax_write(vcpu, eax);
1279 kvm_rbx_write(vcpu, ebx);
1280 kvm_rcx_write(vcpu, ecx);
1281 kvm_rdx_write(vcpu, edx);
1282 return kvm_skip_emulated_instruction(vcpu);
1284 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);