x86/cpu: KVM: Add common defines for architectural memory types (PAT, MTRRs, etc.)
authorSean Christopherson <seanjc@google.com>
Wed, 5 Jun 2024 23:19:09 +0000 (16:19 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 22 Aug 2024 18:25:46 +0000 (11:25 -0700)
Add defines for the architectural memory types that can be shoved into
various MSRs and registers, e.g. MTRRs, PAT, VMX capabilities MSRs, EPTPs,
etc.  While most MSRs/registers support only a subset of all memory types,
the values themselves are architectural and identical across all users.

Leave the goofy MTRR_TYPE_* definitions as-is since they are in a uapi
header, but add compile-time assertions to connect the dots (and sanity
check that the msr-index.h values didn't get fat-fingered).

Keep the VMX_EPTP_MT_* defines so that it's slightly more obvious that the
EPTP holds a single memory type in 3 of its 64 bits; those bits just
happen to be 2:0, i.e. don't need to be shifted.

Opportunistically use X86_MEMTYPE_WB instead of an open coded '6' in
setup_vmcs_config().

No functional change intended.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20240605231918.2915961-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/mm/pat/memtype.c

index 82c6a4d350e09e6d8f98f11189cc5cb8f5e58a7c..bb43a07616e64f6124f41f2c2c1a5a4791cba3df 100644 (file)
 #define EFER_FFXSR             (1<<_EFER_FFXSR)
 #define EFER_AUTOIBRS          (1<<_EFER_AUTOIBRS)
 
+/*
+ * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
+ * Most MSRs support/allow only a subset of memory types, but the values
+ * themselves are common across all relevant MSRs.
+ */
+#define X86_MEMTYPE_UC         0ull    /* Uncacheable, a.k.a. Strong Uncacheable */
+#define X86_MEMTYPE_WC         1ull    /* Write Combining */
+/* RESERVED                    2 */
+/* RESERVED                    3 */
+#define X86_MEMTYPE_WT         4ull    /* Write Through */
+#define X86_MEMTYPE_WP         5ull    /* Write Protected */
+#define X86_MEMTYPE_WB         6ull    /* Write Back */
+#define X86_MEMTYPE_UC_MINUS   7ull    /* Weak Uncacheabled (PAT only) */
+
 /* FRED MSRs */
 #define MSR_IA32_FRED_RSP0     0x1cc                   /* Level 0 stack pointer */
 #define MSR_IA32_FRED_RSP1     0x1cd                   /* Level 1 stack pointer */
 #define VMX_BASIC_64           0x0001000000000000LLU
 #define VMX_BASIC_MEM_TYPE_SHIFT       50
 #define VMX_BASIC_MEM_TYPE_MASK        0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB  6LLU
 #define VMX_BASIC_INOUT                0x0040000000000000LLU
 
 /* Resctrl MSRs: */
index d77a31039f241e0333a78c42fcbb1eacb0dc27f6..e531d8d80a112cd9c7bdca3f9164e43bdade69db 100644 (file)
@@ -508,9 +508,10 @@ enum vmcs_field {
 #define VMX_EPTP_PWL_4                         0x18ull
 #define VMX_EPTP_PWL_5                         0x20ull
 #define VMX_EPTP_AD_ENABLE_BIT                 (1ull << 6)
+/* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
 #define VMX_EPTP_MT_MASK                       0x7ull
-#define VMX_EPTP_MT_WB                         0x6ull
-#define VMX_EPTP_MT_UC                         0x0ull
+#define VMX_EPTP_MT_WB                         X86_MEMTYPE_WB
+#define VMX_EPTP_MT_UC                         X86_MEMTYPE_UC
 #define VMX_EPT_READABLE_MASK                  0x1ull
 #define VMX_EPT_WRITABLE_MASK                  0x2ull
 #define VMX_EPT_EXECUTABLE_MASK                        0x4ull
index 2a2fc14955cd3b2b0302486d82b542a280bd36c6..989d368be04fccebdffd7da5a4a2ef029823c2b3 100644 (file)
 
 #include "mtrr.h"
 
+static_assert(X86_MEMTYPE_UC == MTRR_TYPE_UNCACHABLE);
+static_assert(X86_MEMTYPE_WC == MTRR_TYPE_WRCOMB);
+static_assert(X86_MEMTYPE_WT == MTRR_TYPE_WRTHROUGH);
+static_assert(X86_MEMTYPE_WP == MTRR_TYPE_WRPROT);
+static_assert(X86_MEMTYPE_WB == MTRR_TYPE_WRBACK);
+
 /* arch_phys_wc_add returns an MTRR register index plus this offset. */
 #define MTRR_TO_PHYS_WC_OFFSET 1000
 
index 2392a7ef254df2ce3964e36fc6362b9830135253..504fe5ffd47b705e87aa882e6184e3a6b974b5ef 100644 (file)
@@ -7070,7 +7070,7 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
                VMCS12_REVISION |
                VMX_BASIC_TRUE_CTLS |
                ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
-               (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+               (X86_MEMTYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
 
        if (cpu_has_vmx_basic_inout())
                msrs->basic |= VMX_BASIC_INOUT;
index f18c2d8c7476e180005cef514953ab0e2ecb80bb..a4d077db04cf6e3ae2b7c091a121c22852349974 100644 (file)
@@ -2747,7 +2747,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 #endif
 
        /* Require Write-Back (WB) memory type for VMCS accesses. */
-       if (((vmx_msr_high >> 18) & 15) != 6)
+       if (((vmx_msr_high >> 18) & 15) != X86_MEMTYPE_WB)
                return -EIO;
 
        rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
index bdc2a240c2aae67e253565636fe279fdea93e907..15b888ebaf17e139f5cc5d1f1283382825080235 100644 (file)
@@ -176,15 +176,6 @@ static inline void set_page_memtype(struct page *pg,
 }
 #endif
 
-enum {
-       PAT_UC = 0,             /* uncached */
-       PAT_WC = 1,             /* Write combining */
-       PAT_WT = 4,             /* Write Through */
-       PAT_WP = 5,             /* Write Protected */
-       PAT_WB = 6,             /* Write Back (default) */
-       PAT_UC_MINUS = 7,       /* UC, but can be overridden by MTRR */
-};
-
 #define CM(c) (_PAGE_CACHE_MODE_ ## c)
 
 static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
@@ -194,13 +185,13 @@ static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
        char *cache_mode;
 
        switch (pat_val) {
-       case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
-       case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
-       case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
-       case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
-       case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
-       case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
-       default:           cache = CM(WB);       cache_mode = "WB  "; break;
+       case X86_MEMTYPE_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
+       case X86_MEMTYPE_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
+       case X86_MEMTYPE_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
+       case X86_MEMTYPE_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
+       case X86_MEMTYPE_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
+       case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
+       default:                   cache = CM(WB);       cache_mode = "WB  "; break;
        }
 
        memcpy(msg, cache_mode, 4);
@@ -257,11 +248,11 @@ void pat_cpu_init(void)
 void __init pat_bp_init(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
-#define PAT(p0, p1, p2, p3, p4, p5, p6, p7)                    \
-       (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) |           \
-       ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) |     \
-       ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) |     \
-       ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56))
+#define PAT(p0, p1, p2, p3, p4, p5, p6, p7)                            \
+       ((X86_MEMTYPE_ ## p0)      | (X86_MEMTYPE_ ## p1 << 8)  |       \
+       (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) |       \
+       (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) |       \
+       (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
 
 
        if (!IS_ENABLED(CONFIG_X86_PAT))
This page took 0.127844 seconds and 4 git commands to generate.