]> Git Repo - linux.git/commitdiff
Merge tag 'x86-cpu-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
authorLinus Torvalds <[email protected]>
Tue, 14 May 2024 01:44:44 +0000 (18:44 -0700)
committerLinus Torvalds <[email protected]>
Tue, 14 May 2024 01:44:44 +0000 (18:44 -0700)
Pull x86 cpu updates from Ingo Molnar:

 - Rework the x86 CPU vendor/family/model code: introduce the 'VFM'
   value that is an 8+8+8 bit concatenation of the vendor/family/model
   value, and add macros that work on VFM values. This simplifies the
   addition of new Intel models & families, and simplifies existing
   enumeration & quirk code.

 - Add support for the AMD 0x80000026 leaf, to better parse topology
   information

 - Optimize the NUMA allocation layout of more per-CPU data structures

 - Improve the workaround for AMD erratum 1386

 - Clear TME from /proc/cpuinfo as well, when disabled by the firmware

 - Improve x86 self-tests

 - Extend the mce_record tracepoint with the ::ppin and ::microcode fields

 - Implement recovery for MCE errors in TDX/SEAM non-root mode

 - Misc cleanups and fixes

* tag 'x86-cpu-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
  x86/mm: Switch to new Intel CPU model defines
  x86/tsc_msr: Switch to new Intel CPU model defines
  x86/tsc: Switch to new Intel CPU model defines
  x86/cpu: Switch to new Intel CPU model defines
  x86/resctrl: Switch to new Intel CPU model defines
  x86/microcode/intel: Switch to new Intel CPU model defines
  x86/mce: Switch to new Intel CPU model defines
  x86/cpu: Switch to new Intel CPU model defines
  x86/cpu/intel_epb: Switch to new Intel CPU model defines
  x86/aperfmperf: Switch to new Intel CPU model defines
  x86/apic: Switch to new Intel CPU model defines
  perf/x86/msr: Switch to new Intel CPU model defines
  perf/x86/intel/uncore: Switch to new Intel CPU model defines
  perf/x86/intel/pt: Switch to new Intel CPU model defines
  perf/x86/lbr: Switch to new Intel CPU model defines
  perf/x86/intel/cstate: Switch to new Intel CPU model defines
  x86/bugs: Switch to new Intel CPU model defines
  x86/bugs: Switch to new Intel CPU model defines
  x86/cpu/vfm: Update arch/x86/include/asm/intel-family.h
  x86/cpu/vfm: Add new macros to work with (vendor/family/model) values
  ...

1  2 
arch/x86/events/intel/lbr.c
arch/x86/include/asm/processor.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/mce/severity.c
arch/x86/kernel/cpu/topology_amd.c
arch/x86/kernel/setup.c
arch/x86/kernel/tsc.c

index 4367aa77cb8d9f6f18adabebe39d3ef0ddc4dcf5,86277196ffaddab598c4226726b95fd53f51422c..dc641b50814e21e35df66d56874ef7610b2fd1bc
@@@ -2,6 -2,7 +2,7 @@@
  #include <linux/perf_event.h>
  #include <linux/types.h>
  
+ #include <asm/cpu_device_id.h>
  #include <asm/perf_event.h>
  #include <asm/msr.h>
  
@@@ -1457,7 -1458,7 +1458,7 @@@ void __init intel_pmu_lbr_init_atom(voi
         * to have an operational LBR which can freeze
         * on PMU interrupt
         */
-       if (boot_cpu_data.x86_model == 28
+       if (boot_cpu_data.x86_vfm == INTEL_ATOM_BONNELL
            && boot_cpu_data.x86_stepping < 10) {
                pr_cont("LBR disabled due to erratum");
                return;
@@@ -1693,7 -1694,6 +1694,7 @@@ void x86_perf_get_lbr(struct x86_pmu_lb
        lbr->from = x86_pmu.lbr_from;
        lbr->to = x86_pmu.lbr_to;
        lbr->info = x86_pmu.lbr_info;
 +      lbr->has_callstack = x86_pmu_has_lbr_callstack();
  }
  EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
  
index 57e1c91420f79c32cdd2f3b4bd5767b84f2eb283,e0f9251e105b36981356b2f94974be73a126ba3e..cb4f6c513c484349294afd7fc758349a3c62d141
@@@ -108,9 -108,23 +108,23 @@@ struct cpuinfo_topology 
  };
  
  struct cpuinfo_x86 {
-       __u8                    x86;            /* CPU family */
-       __u8                    x86_vendor;     /* CPU vendor */
-       __u8                    x86_model;
+       union {
+               /*
+                * The particular ordering (low-to-high) of (vendor,
+                * family, model) is done in case range of models, like
+                * it is usually done on AMD, need to be compared.
+                */
+               struct {
+                       __u8    x86_model;
+                       /* CPU family */
+                       __u8    x86;
+                       /* CPU vendor */
+                       __u8    x86_vendor;
+                       __u8    x86_reserved;
+               };
+               /* combined vendor, family, model */
+               __u32           x86_vfm;
+       };
        __u8                    x86_stepping;
  #ifdef CONFIG_X86_64
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@@ -472,6 -486,7 +486,6 @@@ struct thread_struct 
        unsigned long           iopl_emul;
  
        unsigned int            iopl_warn:1;
 -      unsigned int            sig_on_uaccess_err:1;
  
        /*
         * Protection Keys Register for Userspace.  Loaded immediately on
@@@ -586,7 -601,7 +600,7 @@@ extern char                        ignore_fpu_irq
  # define BASE_PREFETCH                ""
  # define ARCH_HAS_PREFETCH
  #else
 -# define BASE_PREFETCH                "prefetcht0 %P1"
 +# define BASE_PREFETCH                "prefetcht0 %1"
  #endif
  
  /*
   */
  static inline void prefetch(const void *x)
  {
 -      alternative_input(BASE_PREFETCH, "prefetchnta %P1",
 +      alternative_input(BASE_PREFETCH, "prefetchnta %1",
                          X86_FEATURE_XMM,
                          "m" (*(const char *)x));
  }
   */
  static __always_inline void prefetchw(const void *x)
  {
 -      alternative_input(BASE_PREFETCH, "prefetchw %P1",
 +      alternative_input(BASE_PREFETCH, "prefetchw %1",
                          X86_FEATURE_3DNOWPREFETCH,
                          "m" (*(const char *)x));
  }
  #define KSTK_ESP(task)                (task_pt_regs(task)->sp)
  
  #else
 -extern unsigned long __end_init_task[];
 +extern unsigned long __top_init_kernel_stack[];
  
  #define INIT_THREAD {                                                 \
 -      .sp     = (unsigned long)&__end_init_task -                     \
 -                TOP_OF_KERNEL_STACK_PADDING -                         \
 -                sizeof(struct pt_regs),                               \
 +      .sp     = (unsigned long)&__top_init_kernel_stack,              \
  }
  
  extern unsigned long KSTK_ESP(struct task_struct *task);
index 803dcfb0e3469c6393a81e0ada1675ea56451eb7,f76aaf5216f3dcf241abb48f461e2624c31ae0cf..0d22aefbde7f327f4a6fab3efe436593de6922d7
@@@ -497,32 -497,32 +497,32 @@@ static struct clock_event_device lapic_
  static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
  
  static const struct x86_cpu_id deadline_match[] __initconst = {
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
+       X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
+       X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
  
-       X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X,        0x0b000020),
+       X86_MATCH_VFM(INTEL_BROADWELL_X,        0x0b000020),
  
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
+       X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
+       X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
+       X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
+       X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
  
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
-       X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
+       X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
+       X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
+       X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
  
-       X86_MATCH_INTEL_FAM6_MODEL( HASWELL,            0x22),
-       X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L,          0x20),
-       X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G,          0x17),
+       X86_MATCH_VFM(INTEL_HASWELL,            0x22),
+       X86_MATCH_VFM(INTEL_HASWELL_L,          0x20),
+       X86_MATCH_VFM(INTEL_HASWELL_G,          0x17),
  
-       X86_MATCH_INTEL_FAM6_MODEL( BROADWELL,          0x25),
-       X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G,        0x17),
+       X86_MATCH_VFM(INTEL_BROADWELL,          0x25),
+       X86_MATCH_VFM(INTEL_BROADWELL_G,        0x17),
  
-       X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L,          0xb2),
-       X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE,            0xb2),
+       X86_MATCH_VFM(INTEL_SKYLAKE_L,          0xb2),
+       X86_MATCH_VFM(INTEL_SKYLAKE,            0xb2),
  
-       X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L,         0x52),
-       X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE,           0x52),
+       X86_MATCH_VFM(INTEL_KABYLAKE_L,         0x52),
+       X86_MATCH_VFM(INTEL_KABYLAKE,           0x52),
  
        {},
  };
@@@ -1771,7 -1771,7 +1771,7 @@@ void x2apic_setup(void
        __x2apic_enable();
  }
  
 -static __init void apic_set_fixmap(void);
 +static __init void apic_set_fixmap(bool read_apic);
  
  static __init void x2apic_disable(void)
  {
        }
  
        __x2apic_disable();
 -      apic_set_fixmap();
 +      /*
 +       * Don't reread the APIC ID as it was already done from
 +       * check_x2apic() and the APIC driver still is a x2APIC variant,
 +       * which fails to do the read after x2APIC was disabled.
 +       */
 +      apic_set_fixmap(false);
  }
  
  static __init void x2apic_enable(void)
@@@ -2062,14 -2057,13 +2062,14 @@@ void __init init_apic_mappings(void
        }
  }
  
 -static __init void apic_set_fixmap(void)
 +static __init void apic_set_fixmap(bool read_apic)
  {
        set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
        apic_mmio_base = APIC_BASE;
        apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
                    apic_mmio_base, mp_lapic_addr);
 -      apic_read_boot_cpu_id(false);
 +      if (read_apic)
 +              apic_read_boot_cpu_id(false);
  }
  
  void __init register_lapic_address(unsigned long address)
        mp_lapic_addr = address;
  
        if (!x2apic_mode)
 -              apic_set_fixmap();
 +              apic_set_fixmap(true);
  }
  
  /*
index 307302af0aeee22d0f3871d06ee117bd6c82818d,39f316d50ae44ac5526a2870cc95555c9729b387..44df3f11e7319b1de6046281d9db5b2231d9fb97
@@@ -13,6 -13,7 +13,7 @@@
  #include <asm/apic.h>
  #include <asm/cacheinfo.h>
  #include <asm/cpu.h>
+ #include <asm/cpu_device_id.h>
  #include <asm/spec-ctrl.h>
  #include <asm/smp.h>
  #include <asm/numa.h>
@@@ -459,7 -460,8 +460,7 @@@ static void bsp_init_amd(struct cpuinfo
  
        case 0x1a:
                switch (c->x86_model) {
 -              case 0x00 ... 0x0f:
 -              case 0x20 ... 0x2f:
 +              case 0x00 ... 0x2f:
                case 0x40 ... 0x4f:
                case 0x70 ... 0x7f:
                        setup_force_cpu_cap(X86_FEATURE_ZEN5);
@@@ -794,6 -796,11 +795,11 @@@ static void init_amd_bd(struct cpuinfo_
        clear_rdrand_cpuid_bit(c);
  }
  
+ static const struct x86_cpu_desc erratum_1386_microcode[] = {
+       AMD_CPU_DESC(0x17,  0x1, 0x2, 0x0800126e),
+       AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
+ };
  static void fix_erratum_1386(struct cpuinfo_x86 *c)
  {
        /*
         *
         * Affected parts all have no supervisor XSAVE states, meaning that
         * the XSAVEC instruction (which works fine) is equivalent.
+        *
+        * Clear the feature flag only on microcode revisions which
+        * don't have the fix.
         */
+       if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode))
+               return;
        clear_cpu_cap(c, X86_FEATURE_XSAVES);
  }
  
index ab18185894dfd5e9c3f09f5fa39ac4c8ef72e7f4,32d86dd976c0f494752868ef898a4a1331ebda65..b6f927f6c567e1a0086f5b31ea86abd1ea6af84f
@@@ -26,7 -26,7 +26,7 @@@
  #include <asm/msr.h>
  #include <asm/vmx.h>
  #include <asm/paravirt.h>
- #include <asm/intel-family.h>
+ #include <asm/cpu_device_id.h>
  #include <asm/e820/api.h>
  #include <asm/hypervisor.h>
  #include <asm/tlbflush.h>
@@@ -1652,8 -1652,7 +1652,8 @@@ static void __init bhi_select_mitigatio
                return;
  
        /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
 -      if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
 +      if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
 +          !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
                spec_ctrl_disable_kernel_rrsba();
                if (rrsba_disabled)
                        return;
@@@ -2391,20 -2390,20 +2391,20 @@@ static void override_cache_bits(struct 
        if (c->x86 != 6)
                return;
  
-       switch (c->x86_model) {
-       case INTEL_FAM6_NEHALEM:
-       case INTEL_FAM6_WESTMERE:
-       case INTEL_FAM6_SANDYBRIDGE:
-       case INTEL_FAM6_IVYBRIDGE:
-       case INTEL_FAM6_HASWELL:
-       case INTEL_FAM6_HASWELL_L:
-       case INTEL_FAM6_HASWELL_G:
-       case INTEL_FAM6_BROADWELL:
-       case INTEL_FAM6_BROADWELL_G:
-       case INTEL_FAM6_SKYLAKE_L:
-       case INTEL_FAM6_SKYLAKE:
-       case INTEL_FAM6_KABYLAKE_L:
-       case INTEL_FAM6_KABYLAKE:
+       switch (c->x86_vfm) {
+       case INTEL_NEHALEM:
+       case INTEL_WESTMERE:
+       case INTEL_SANDYBRIDGE:
+       case INTEL_IVYBRIDGE:
+       case INTEL_HASWELL:
+       case INTEL_HASWELL_L:
+       case INTEL_HASWELL_G:
+       case INTEL_BROADWELL:
+       case INTEL_BROADWELL_G:
+       case INTEL_SKYLAKE_L:
+       case INTEL_SKYLAKE:
+       case INTEL_KABYLAKE_L:
+       case INTEL_KABYLAKE:
                if (c->x86_cache_bits < 44)
                        c->x86_cache_bits = 44;
                break;
@@@ -2805,13 -2804,11 +2805,13 @@@ static const char *spectre_bhi_state(vo
  {
        if (!boot_cpu_has_bug(X86_BUG_BHI))
                return "; BHI: Not affected";
 -      else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
 +      else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
                return "; BHI: BHI_DIS_S";
 -      else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
 +      else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
                return "; BHI: SW loop, KVM: SW loop";
 -      else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
 +      else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
 +               !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
 +               rrsba_disabled)
                return "; BHI: Retpoline";
        else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
                return "; BHI: Vulnerable, KVM: SW loop";
index bca780fa5e5777cfc11a2a163074799ddcb9af86,e7892f11c70fe5e8ec5b284285a49e048af3c200..dac4d64dfb2a8e14e9a5df1b7093e859c0a4a565
@@@ -12,7 -12,7 +12,7 @@@
  #include <linux/uaccess.h>
  
  #include <asm/mce.h>
- #include <asm/intel-family.h>
+ #include <asm/cpu_device_id.h>
  #include <asm/traps.h>
  #include <asm/insn.h>
  #include <asm/insn-eval.h>
@@@ -39,20 -39,20 +39,20 @@@ static struct severity 
        u64 mask;
        u64 result;
        unsigned char sev;
-       unsigned char mcgmask;
-       unsigned char mcgres;
+       unsigned short mcgmask;
+       unsigned short mcgres;
        unsigned char ser;
        unsigned char context;
        unsigned char excp;
        unsigned char covered;
-       unsigned char cpu_model;
+       unsigned int cpu_vfm;
        unsigned char cpu_minstepping;
        unsigned char bank_lo, bank_hi;
        char *msg;
  } severities[] = {
  #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
  #define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
- #define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
+ #define VFM_STEPPING(m, s) .cpu_vfm = m, .cpu_minstepping = s
  #define  KERNEL               .context = IN_KERNEL
  #define  USER         .context = IN_USER
  #define  KERNEL_RECOV .context = IN_KERNEL_RECOV
        MCESEV(
                AO, "Uncorrected Patrol Scrub Error",
                SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
-               MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
+               VFM_STEPPING(INTEL_SKYLAKE_X, 4), BANK_RANGE(13, 18)
        ),
  
        /* ignore OVER for UCNA */
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
                USER
                ),
+       MCESEV(
+               AR, "Data load error in SEAM non-root mode",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+               MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
+               KERNEL
+               ),
+       MCESEV(
+               AR, "Instruction fetch error in SEAM non-root mode",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+               MCGMASK(MCG_STATUS_SEAM_NR, MCG_STATUS_SEAM_NR),
+               KERNEL
+               ),
        MCESEV(
                PANIC, "Data load in unrecoverable area of kernel",
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
@@@ -290,6 -302,7 +302,6 @@@ static noinstr int error_context(struc
  
        switch (fixup_type) {
        case EX_TYPE_UACCESS:
 -      case EX_TYPE_COPY:
                if (!copy_user)
                        return IN_KERNEL;
                m->kflags |= MCE_IN_KERNEL_COPYIN;
@@@ -385,7 -398,7 +397,7 @@@ static noinstr int mce_severity_intel(s
                        continue;
                if (s->excp && excp != s->excp)
                        continue;
-               if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
+               if (s->cpu_vfm && boot_cpu_data.x86_vfm != s->cpu_vfm)
                        continue;
                if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
                        continue;
index ce2d507c3b076652f9ec1cd39e1b86f1cda5fbc2,c3a3e9225c46d14b07ad2f6ec41bb87cbfa4085e..d419deed6a4884a387f5babc999fd599c5a7314e
@@@ -58,7 -58,7 +58,7 @@@ static void store_node(struct topo_sca
        tscan->amd_node_id = node_id;
  }
  
- static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
+ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
  {
        struct {
                // eax
@@@ -86,7 -86,7 +86,7 @@@
         * If leaf 0xb is available, then the domain shifts are set
         * already and nothing to do here.
         */
-       if (!has_0xb) {
+       if (!has_topoext) {
                /*
                 * Leaf 0x80000008 set the CORE domain shift already.
                 * Update the SMT domain, but do not propagate it.
        return true;
  }
  
 -static bool parse_fam10h_node_id(struct topo_scan *tscan)
 +static void parse_fam10h_node_id(struct topo_scan *tscan)
  {
        union {
                struct {
        } nid;
  
        if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
 -              return false;
 +              return;
  
        rdmsrl(MSR_FAM10H_NODE_ID, nid.msr);
        store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id);
        tscan->c->topo.llc_id = nid.node_id;
 -      return true;
  }
  
  static void legacy_set_llc(struct topo_scan *tscan)
  {
        unsigned int apicid = tscan->c->topo.initial_apicid;
  
 -      /* parse_8000_0008() set everything up except llc_id */
 -      tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
 +      /* If none of the parsers set LLC ID then use the die ID for it. */
 +      if (tscan->c->topo.llc_id == BAD_APICID)
 +              tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
  }
  
  static void topoext_fixup(struct topo_scan *tscan)
  
  static void parse_topology_amd(struct topo_scan *tscan)
  {
-       bool has_0xb = false;
+       bool has_topoext = false;
  
        /*
         * If the extended topology leaf 0x8000_001e is available
-        * try to get SMT and CORE shift from leaf 0xb first, then
-        * try to get the CORE shift from leaf 0x8000_0008.
+        * try to get SMT, CORE, TILE, and DIE shifts from extended
+        * CPUID leaf 0x8000_0026 on supported processors first. If
+        * extended CPUID leaf 0x8000_0026 is not supported, try to
+        * get SMT and CORE shift from leaf 0xb first, then try to
+        * get the CORE shift from leaf 0x8000_0008.
         */
        if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
-               has_0xb = cpu_parse_topology_ext(tscan);
+               has_topoext = cpu_parse_topology_ext(tscan);
  
-       if (!has_0xb && !parse_8000_0008(tscan))
+       if (!has_topoext && !parse_8000_0008(tscan))
                return;
  
        /* Prefer leaf 0x8000001e if available */
-       if (parse_8000_001e(tscan, has_0xb))
+       if (parse_8000_001e(tscan, has_topoext))
                return;
  
        /* Try the NODEID MSR */
 -      if (parse_fam10h_node_id(tscan))
 -              return;
 -
 -      legacy_set_llc(tscan);
 +      parse_fam10h_node_id(tscan);
  }
  
  void cpu_parse_topology_amd(struct topo_scan *tscan)
        tscan->amd_nodes_per_pkg = 1;
        topoext_fixup(tscan);
        parse_topology_amd(tscan);
 +      legacy_set_llc(tscan);
  
        if (tscan->amd_nodes_per_pkg > 1)
                set_cpu_cap(tscan->c, X86_FEATURE_AMD_DCM);
diff --combined arch/x86/kernel/setup.c
index d8975a9fb68a40f307bccc8e0ad91263235645aa,678369e4626328db4df9e79e4973bedecb8e3d8c..55a1fc332e20cb0606ea726db8423888563559e6
@@@ -7,6 -7,7 +7,7 @@@
   */
  #include <linux/acpi.h>
  #include <linux/console.h>
+ #include <linux/cpu.h>
  #include <linux/crash_dump.h>
  #include <linux/dma-map-ops.h>
  #include <linux/efi.h>
@@@ -753,22 -754,6 +754,22 @@@ void __init setup_arch(char **cmdline_p
        boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
  #endif
  
 +#ifdef CONFIG_CMDLINE_BOOL
 +#ifdef CONFIG_CMDLINE_OVERRIDE
 +      strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 +#else
 +      if (builtin_cmdline[0]) {
 +              /* append boot loader cmdline to builtin */
 +              strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 +              strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 +              strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 +      }
 +#endif
 +#endif
 +
 +      strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 +      *cmdline_p = command_line;
 +
        /*
         * If we have OLPC OFW, we might end up relocating the fixmap due to
         * reserve_top(), so do this before touching the ioremap area.
        bss_resource.start = __pa_symbol(__bss_start);
        bss_resource.end = __pa_symbol(__bss_stop)-1;
  
 -#ifdef CONFIG_CMDLINE_BOOL
 -#ifdef CONFIG_CMDLINE_OVERRIDE
 -      strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 -#else
 -      if (builtin_cmdline[0]) {
 -              /* append boot loader cmdline to builtin */
 -              strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 -              strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 -              strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 -      }
 -#endif
 -#endif
 -
 -      strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 -      *cmdline_p = command_line;
 -
        /*
         * x86_configure_nx() is called before parse_early_param() to detect
         * whether hardware doesn't support NX (so that the early EHCI debug
@@@ -1218,3 -1219,10 +1219,10 @@@ static int __init register_kernel_offse
        return 0;
  }
  __initcall(register_kernel_offset_dumper);
+ #ifdef CONFIG_HOTPLUG_CPU
+ bool arch_cpu_is_hotpluggable(int cpu)
+ {
+       return cpu > 0;
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
diff --combined arch/x86/kernel/tsc.c
index 0f7624ed1d1d09dc8ca80e8d266e10ee0852e4e0,c4745141dd1788c69e33897600338939d817ebf0..06b170759e5bf581d78a0420ae887cf5d139d6d8
@@@ -26,7 -26,7 +26,7 @@@
  #include <asm/x86_init.h>
  #include <asm/geode.h>
  #include <asm/apic.h>
- #include <asm/intel-family.h>
+ #include <asm/cpu_device_id.h>
  #include <asm/i8259.h>
  #include <asm/uv/uv.h>
  
@@@ -44,7 -44,7 +44,7 @@@ EXPORT_SYMBOL(tsc_khz)
  static int __read_mostly tsc_unstable;
  static unsigned int __initdata tsc_early_khz;
  
 -static DEFINE_STATIC_KEY_FALSE(__use_tsc);
 +static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
  
  int tsc_clocksource_reliable;
  
@@@ -682,7 -682,7 +682,7 @@@ unsigned long native_calibrate_tsc(void
         * clock.
         */
        if (crystal_khz == 0 &&
-                       boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D)
+                       boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
                crystal_khz = 25000;
  
        /*
         * For Atom SoCs TSC is the only reliable clocksource.
         * Mark TSC reliable so no watchdog on it.
         */
-       if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
+       if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
                setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
  
  #ifdef CONFIG_X86_LOCAL_APIC
This page took 0.099328 seconds and 4 git commands to generate.