Say N otherwise.
config MICROCODE
- bool "CPU microcode loading support"
- default y
+ def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
- help
- If you say Y here, you will be able to update the microcode on
- Intel and AMD processors. The Intel support is for the IA32 family,
- e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
- AMD support is for families 0x10 and later. You will obviously need
- the actual microcode binary data itself which is not shipped with
- the Linux kernel.
-
- The preferred method to load microcode from a detached initrd is described
- in Documentation/arch/x86/microcode.rst. For that you need to enable
- CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
- initrd for microcode blobs.
-
- In addition, you can build the microcode into the kernel. For that you
- need to add the vendor-supplied microcode to the CONFIG_EXTRA_FIRMWARE
- config option.
-
- config MICROCODE_INTEL
- bool "Intel microcode loading support"
- depends on CPU_SUP_INTEL && MICROCODE
- default MICROCODE
- help
- This options enables microcode patch loading support for Intel
- processors.
-
- For the current Intel microcode data package go to
- <https://downloadcenter.intel.com> and search for
- 'Linux Processor Microcode Data File'.
-
- config MICROCODE_AMD
- bool "AMD microcode loading support"
- depends on CPU_SUP_AMD && MICROCODE
- help
- If you select this option, microcode patch loading support for AMD
- processors will be enabled.
config MICROCODE_LATE_LOADING
bool "Late microcode loading (DANGEROUS)"
This mitigates both spectre_v2 and retbleed at great cost to
performance.
+config CPU_SRSO
+ bool "Mitigate speculative RAS overflow on AMD"
+ depends on CPU_SUP_AMD && X86_64 && RETHUNK
+ default y
+ help
+ Enable the SRSO mitigation needed on AMD Zen1-4 machines.
+
config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
against straight line speculation. The kernel image might be slightly
larger.
+config GDS_FORCE_MITIGATION
+ bool "Force GDS Mitigation"
+ depends on CPU_SUP_INTEL
+ default n
+ help
+ Gather Data Sampling (GDS) is a hardware vulnerability which allows
+ unprivileged speculative access to data which was previously stored in
+ vector registers.
+
+ This option is equivalent to setting gather_data_sampling=force on the
+ command line. The microcode mitigation is used if present, otherwise
+ AVX is disabled as a mitigation. On affected systems that are missing
+ the microcode any userspace code that unconditionally uses AVX will
+ break with this option set.
+
+ Setting this option on systems not vulnerable to GDS has no effect.
+
+ If in doubt, say N.
+
endif
config ARCH_HAS_ADD_PAGES
CONFIG_PARAVIRT=y
CONFIG_NR_CPUS=8
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_NUMA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
- #include <asm/cpu.h>
- #include <linux/earlycpio.h>
- #include <linux/initrd.h>
- #include <asm/microcode_amd.h>
-
- struct ucode_patch {
- struct list_head plist;
- void *data; /* Intel uses only this one */
- unsigned int size;
- u32 patch_id;
- u16 equiv_cpu;
- };
-
- extern struct list_head microcode_cache;
-
struct cpu_signature {
unsigned int sig;
unsigned int pf;
unsigned int rev;
};
- struct device;
-
- enum ucode_state {
- UCODE_OK = 0,
- UCODE_NEW,
- UCODE_UPDATED,
- UCODE_NFOUND,
- UCODE_ERROR,
+ struct ucode_cpu_info {
+ struct cpu_signature cpu_sig;
+ void *mc;
};
- struct microcode_ops {
- enum ucode_state (*request_microcode_fw) (int cpu, struct device *);
-
- void (*microcode_fini_cpu) (int cpu);
+ #ifdef CONFIG_MICROCODE
+ void load_ucode_bsp(void);
+ void load_ucode_ap(void);
+ void microcode_bsp_resume(void);
+ #else
+ static inline void load_ucode_bsp(void) { }
+ static inline void load_ucode_ap(void) { }
+ static inline void microcode_bsp_resume(void) { }
+ #endif
- /*
- * The generic 'microcode_core' part guarantees that
- * the callbacks below run on a target cpu when they
- * are being called.
- * See also the "Synchronization" section in microcode_core.c.
- */
- enum ucode_state (*apply_microcode) (int cpu);
- int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
+ #ifdef CONFIG_CPU_SUP_INTEL
+ /* Intel specific microcode defines. Public for IFS */
+ struct microcode_header_intel {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int metasize;
+ unsigned int reserved[2];
};
- struct ucode_cpu_info {
- struct cpu_signature cpu_sig;
- void *mc;
+ struct microcode_intel {
+ struct microcode_header_intel hdr;
+ unsigned int bits[];
};
- extern struct ucode_cpu_info ucode_cpu_info[];
- struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
- #ifdef CONFIG_MICROCODE_INTEL
- extern struct microcode_ops * __init init_intel_microcode(void);
- #else
- static inline struct microcode_ops * __init init_intel_microcode(void)
- {
- return NULL;
- }
- #endif /* CONFIG_MICROCODE_INTEL */
+ #define DEFAULT_UCODE_DATASIZE (2000)
+ #define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
+ #define MC_HEADER_TYPE_MICROCODE 1
+ #define MC_HEADER_TYPE_IFS 2
- #ifdef CONFIG_MICROCODE_AMD
- extern struct microcode_ops * __init init_amd_microcode(void);
- extern void __exit exit_amd_microcode(void);
- #else
- static inline struct microcode_ops * __init init_amd_microcode(void)
+ static inline int intel_microcode_get_datasize(struct microcode_header_intel *hdr)
{
- return NULL;
+ return hdr->datasize ? : DEFAULT_UCODE_DATASIZE;
}
- static inline void __exit exit_amd_microcode(void) {}
- #endif
- #define MAX_UCODE_COUNT 128
-
- #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
- #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
- #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
- #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
- #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
- #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
- #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
-
- #define CPUID_IS(a, b, c, ebx, ecx, edx) \
- (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
-
- /*
- * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
- * x86_cpuid_vendor() gets vendor id for BSP.
- *
- * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
- * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
- *
- * x86_cpuid_vendor() gets vendor information directly from CPUID.
- */
- static inline int x86_cpuid_vendor(void)
+ static inline u32 intel_get_microcode_revision(void)
{
- u32 eax = 0x00000000;
- u32 ebx, ecx = 0, edx;
+ u32 rev, dummy;
- native_cpuid(&eax, &ebx, &ecx, &edx);
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
- if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
- return X86_VENDOR_INTEL;
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
- if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
- return X86_VENDOR_AMD;
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
- return X86_VENDOR_UNKNOWN;
+ return rev;
}
- static inline unsigned int x86_cpuid_family(void)
- {
- u32 eax = 0x00000001;
- u32 ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
+ void show_ucode_info_early(void);
- return x86_family(eax);
- }
+ #else /* CONFIG_CPU_SUP_INTEL */
+ static inline void show_ucode_info_early(void) { }
+ #endif /* !CONFIG_CPU_SUP_INTEL */
- #ifdef CONFIG_MICROCODE
- extern void __init load_ucode_bsp(void);
- extern void load_ucode_ap(void);
- void reload_early_microcode(unsigned int cpu);
- extern bool initrd_gone;
- void microcode_bsp_resume(void);
- #else
- static inline void __init load_ucode_bsp(void) { }
- static inline void load_ucode_ap(void) { }
- static inline void reload_early_microcode(unsigned int cpu) { }
- static inline void microcode_bsp_resume(void) { }
++#ifdef CONFIG_CPU_SUP_AMD
++void amd_check_microcode(void);
++#else /* CONFIG_CPU_SUP_AMD */
++static inline void amd_check_microcode(void) {}
+#endif
+
#endif /* _ASM_X86_MICROCODE_H */
#include <asm/cacheinfo.h>
#include <asm/memtype.h>
#include <asm/microcode.h>
- #include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
#define RETBLEED BIT(3)
/* CPU is affected by SMT (cross-thread) return predictions */
#define SMT_RSB BIT(4)
+/* CPU is affected by SRSO */
+#define SRSO BIT(5)
+/* CPU is affected by GDS */
+#define GDS BIT(6)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
- VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
- VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
- VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
+ VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
+ VULNBL_AMD(0x19, SRSO),
{}
};
if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
setup_force_cpu_bug(X86_BUG_SMT_RSB);
+ if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
+ if (cpu_matches(cpu_vuln_blacklist, SRSO))
+ setup_force_cpu_bug(X86_BUG_SRSO);
+ }
+
+ /*
+ * Check if CPU is vulnerable to GDS. If running in a virtual machine on
+ * an affected processor, the VMM may have disabled the use of GATHER by
+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ * which means that AVX will be disabled.
+ */
+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+ boot_cpu_has(X86_FEATURE_AVX))
+ setup_force_cpu_bug(X86_BUG_GDS);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
+ if (boot_cpu_has_bug(X86_BUG_GDS))
+ update_gds_msr();
tsx_ap_init();
}
* @prev_info: CPU capabilities stored before an update.
*
* The microcode loader calls this upon late microcode load to recheck features,
- * only when microcode has been updated. Caller holds microcode_mutex and CPU
- * hotplug lock.
+ * only when microcode has been updated. Caller holds and CPU hotplug lock.
*
* Return: None
*/
perf_check_microcode();
+ amd_check_microcode();
+
store_cpu_caps(&curr_info);
if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
- cpu_smt_check_topology();
+ cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings);
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");