]> Git Repo - J-linux.git/commitdiff
Merge tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Thu, 11 Jun 2020 22:54:31 +0000 (15:54 -0700)
committerLinus Torvalds <[email protected]>
Thu, 11 Jun 2020 22:54:31 +0000 (15:54 -0700)
Pull more x86 updates from Thomas Gleixner:
 "A set of fixes and updates for x86:

   - Unbreak paravirt VDSO clocks.

     While the VDSO code was moved into lib for sharing a subtle check
     for the validity of paravirt clocks got replaced. While the
     replacement works perfectly fine for bare metal as the update of
     the VDSO clock mode is synchronous, it fails for paravirt clocks
     because the hypervisor can invalidate them asynchronously.

     Bring it back as an optional function so it does not inflict this
     on architectures which are free of PV damage.

   - Fix the jiffies to jiffies64 mapping on 64bit so it does not
     trigger an ODR violation on newer compilers

   - Three fixes for the SSBD and *IB* speculation mitigation maze to
     ensure consistency, not disabling of some *IB* variants wrongly and
     to prevent a rogue cross process shutdown of SSBD. All marked for
     stable.

   - Add yet more CPU models to the splitlock detection capable list
     !@#%$!

   - Bring the pr_info() back which tells that TSC deadline timer is
     enabled.

   - Reboot quirk for MacBook6,1"

* tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/vdso: Unbreak paravirt VDSO clocks
  lib/vdso: Provide sanity check for cycles (again)
  clocksource: Remove obsolete ifdef
  x86_64: Fix jiffies ODR violation
  x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches.
  x86/speculation: Prevent rogue cross-process SSBD shutdown
  x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS.
  x86/cpu: Add Sapphire Rapids CPU model number
  x86/split_lock: Add Icelake microserver and Tigerlake CPU models
  x86/apic: Make TSC deadline timer detection message visible
  x86/reboot/quirks: Add MacBook6,1 reboot quirk

1  2 
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/time.c
lib/vdso/gettimeofday.c

index 4b1d31be50b4a1aba2816c2a51bc89affbe96f14,21d2f1de1057879d8f1a1cbd625c4f1f2dbf5933..bf4acb0b5365d35f38cc7d9f95ca36f60726de2f
@@@ -544,20 -544,46 +544,20 @@@ static struct clock_event_device lapic_
  };
  static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
  
 -static __init u32 hsx_deadline_rev(void)
 -{
 -      switch (boot_cpu_data.x86_stepping) {
 -      case 0x02: return 0x3a; /* EP */
 -      case 0x04: return 0x0f; /* EX */
 -      }
 -
 -      return ~0U;
 -}
 -
 -static __init u32 bdx_deadline_rev(void)
 -{
 -      switch (boot_cpu_data.x86_stepping) {
 -      case 0x02: return 0x00000011;
 -      case 0x03: return 0x0700000e;
 -      case 0x04: return 0x0f00000c;
 -      case 0x05: return 0x0e000003;
 -      }
 -
 -      return ~0U;
 -}
 -
 -static __init u32 skx_deadline_rev(void)
 -{
 -      switch (boot_cpu_data.x86_stepping) {
 -      case 0x03: return 0x01000136;
 -      case 0x04: return 0x02000014;
 -      }
 +static const struct x86_cpu_id deadline_match[] __initconst = {
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
  
 -      if (boot_cpu_data.x86_stepping > 4)
 -              return 0;
 +      X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X,        0x0b000020),
  
 -      return ~0U;
 -}
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
  
 -static const struct x86_cpu_id deadline_match[] __initconst = {
 -      X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X,          &hsx_deadline_rev),
 -      X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X,        0x0b000020),
 -      X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D,        &bdx_deadline_rev),
 -      X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_X,          &skx_deadline_rev),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
 +      X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
  
        X86_MATCH_INTEL_FAM6_MODEL( HASWELL,            0x22),
        X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L,          0x20),
@@@ -589,7 -615,14 +589,7 @@@ static __init bool apic_validate_deadli
        if (!m)
                return true;
  
 -      /*
 -       * Function pointers will have the MSB set due to address layout,
 -       * immediate revisions will not.
 -       */
 -      if ((long)m->driver_data < 0)
 -              rev = ((u32 (*)(void))(m->driver_data))();
 -      else
 -              rev = (u32)m->driver_data;
 +      rev = (u32)m->driver_data;
  
        if (boot_cpu_data.microcode >= rev)
                return true;
@@@ -2060,7 -2093,7 +2060,7 @@@ void __init init_apic_mappings(void
        unsigned int new_apicid;
  
        if (apic_validate_deadline_timer())
-               pr_debug("TSC deadline timer available\n");
+               pr_info("TSC deadline timer available\n");
  
        if (x2apic_mode) {
                boot_cpu_physical_apicid = read_apic_id();
index b6f887be440c51290cfd65641eff9fa9b00421c1,56f573aa764f40f17e04f46ffa29ffb5bed2ffec..0b71970d2d3d2a65848a6377d6026d9ea45929be
@@@ -15,7 -15,6 +15,7 @@@
  #include <linux/nospec.h>
  #include <linux/prctl.h>
  #include <linux/sched/smt.h>
 +#include <linux/pgtable.h>
  
  #include <asm/spec-ctrl.h>
  #include <asm/cmdline.h>
@@@ -27,6 -26,7 +27,6 @@@
  #include <asm/vmx.h>
  #include <asm/paravirt.h>
  #include <asm/alternative.h>
 -#include <asm/pgtable.h>
  #include <asm/set_memory.h>
  #include <asm/intel-family.h>
  #include <asm/e820/api.h>
@@@ -41,7 -41,6 +41,7 @@@ static void __init l1tf_select_mitigati
  static void __init mds_select_mitigation(void);
  static void __init mds_print_mitigation(void);
  static void __init taa_select_mitigation(void);
 +static void __init srbds_select_mitigation(void);
  
  /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
  u64 x86_spec_ctrl_base;
@@@ -109,7 -108,6 +109,7 @@@ void __init check_bugs(void
        l1tf_select_mitigation();
        mds_select_mitigation();
        taa_select_mitigation();
 +      srbds_select_mitigation();
  
        /*
         * As MDS and TAA mitigations are inter-related, print MDS
@@@ -399,97 -397,6 +399,97 @@@ static int __init tsx_async_abort_parse
  }
  early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
  
 +#undef pr_fmt
 +#define pr_fmt(fmt)   "SRBDS: " fmt
 +
 +enum srbds_mitigations {
 +      SRBDS_MITIGATION_OFF,
 +      SRBDS_MITIGATION_UCODE_NEEDED,
 +      SRBDS_MITIGATION_FULL,
 +      SRBDS_MITIGATION_TSX_OFF,
 +      SRBDS_MITIGATION_HYPERVISOR,
 +};
 +
 +static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
 +
 +static const char * const srbds_strings[] = {
 +      [SRBDS_MITIGATION_OFF]          = "Vulnerable",
 +      [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
 +      [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
 +      [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
 +      [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
 +};
 +
 +static bool srbds_off;
 +
 +void update_srbds_msr(void)
 +{
 +      u64 mcu_ctrl;
 +
 +      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 +              return;
 +
 +      if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 +              return;
 +
 +      if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
 +              return;
 +
 +      rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 +
 +      switch (srbds_mitigation) {
 +      case SRBDS_MITIGATION_OFF:
 +      case SRBDS_MITIGATION_TSX_OFF:
 +              mcu_ctrl |= RNGDS_MITG_DIS;
 +              break;
 +      case SRBDS_MITIGATION_FULL:
 +              mcu_ctrl &= ~RNGDS_MITG_DIS;
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 +}
 +
 +static void __init srbds_select_mitigation(void)
 +{
 +      u64 ia32_cap;
 +
 +      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 +              return;
 +
 +      /*
 +       * Check to see if this is one of the MDS_NO systems supporting
 +       * TSX that are only exposed to SRBDS when TSX is enabled.
 +       */
 +      ia32_cap = x86_read_arch_cap_msr();
 +      if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
 +              srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
 +      else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 +              srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
 +      else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
 +              srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
 +      else if (cpu_mitigations_off() || srbds_off)
 +              srbds_mitigation = SRBDS_MITIGATION_OFF;
 +
 +      update_srbds_msr();
 +      pr_info("%s\n", srbds_strings[srbds_mitigation]);
 +}
 +
 +static int __init srbds_parse_cmdline(char *str)
 +{
 +      if (!str)
 +              return -EINVAL;
 +
 +      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 +              return 0;
 +
 +      srbds_off = !strcmp(str, "off");
 +      return 0;
 +}
 +early_param("srbds", srbds_parse_cmdline);
 +
  #undef pr_fmt
  #define pr_fmt(fmt)     "Spectre V1 : " fmt
  
@@@ -588,7 -495,9 +588,9 @@@ early_param("nospectre_v1", nospectre_v
  static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
        SPECTRE_V2_NONE;
  
- static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+ static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+       SPECTRE_V2_USER_NONE;
+ static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
        SPECTRE_V2_USER_NONE;
  
  #ifdef CONFIG_RETPOLINE
@@@ -734,15 -643,6 +736,6 @@@ spectre_v2_user_select_mitigation(enum 
                break;
        }
  
-       /*
-        * At this point, an STIBP mode other than "off" has been set.
-        * If STIBP support is not being forced, check if STIBP always-on
-        * is preferred.
-        */
-       if (mode != SPECTRE_V2_USER_STRICT &&
-           boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
-               mode = SPECTRE_V2_USER_STRICT_PREFERRED;
        /* Initialize Indirect Branch Prediction Barrier */
        if (boot_cpu_has(X86_FEATURE_IBPB)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
                pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
                        static_key_enabled(&switch_mm_always_ibpb) ?
                        "always-on" : "conditional");
+               spectre_v2_user_ibpb = mode;
        }
  
-       /* If enhanced IBRS is enabled no STIBP required */
-       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+       /*
+        * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+        * required.
+        */
+       if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
                return;
  
        /*
-        * If SMT is not possible or STIBP is not available clear the STIBP
-        * mode.
+        * At this point, an STIBP mode other than "off" has been set.
+        * If STIBP support is not being forced, check if STIBP always-on
+        * is preferred.
         */
-       if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+       if (mode != SPECTRE_V2_USER_STRICT &&
+           boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+               mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+       /*
+        * If STIBP is not available, clear the STIBP mode.
+        */
+       if (!boot_cpu_has(X86_FEATURE_STIBP))
                mode = SPECTRE_V2_USER_NONE;
+       spectre_v2_user_stibp = mode;
  set_mode:
-       spectre_v2_user = mode;
-       /* Only print the STIBP mode when SMT possible */
-       if (smt_possible)
-               pr_info("%s\n", spectre_v2_user_strings[mode]);
+       pr_info("%s\n", spectre_v2_user_strings[mode]);
  }
  
  static const char * const spectre_v2_strings[] = {
@@@ -1014,7 -927,7 +1020,7 @@@ void cpu_bugs_smt_update(void
  {
        mutex_lock(&spec_ctrl_mutex);
  
-       switch (spectre_v2_user) {
+       switch (spectre_v2_user_stibp) {
        case SPECTRE_V2_USER_NONE:
                break;
        case SPECTRE_V2_USER_STRICT:
@@@ -1257,14 -1170,19 +1263,19 @@@ static int ib_prctl_set(struct task_str
  {
        switch (ctrl) {
        case PR_SPEC_ENABLE:
-               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return 0;
                /*
                 * Indirect branch speculation is always disabled in strict
-                * mode.
+                * mode. It can neither be enabled if it was force-disabled
+                * by a  previous prctl call.
                 */
-               if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+                   task_spec_ib_force_disable(task))
                        return -EPERM;
                task_clear_spec_ib_disable(task);
                task_update_spec_tif(task);
                 * Indirect branch speculation is always allowed when
                 * mitigation is force disabled.
                 */
-               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return -EPERM;
-               if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
                        return 0;
                task_set_spec_ib_disable(task);
                if (ctrl == PR_SPEC_FORCE_DISABLE)
@@@ -1309,7 -1229,8 +1322,8 @@@ void arch_seccomp_spec_mitigate(struct 
  {
        if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
                ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
-       if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+       if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
                ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
  }
  #endif
@@@ -1340,22 -1261,24 +1354,24 @@@ static int ib_prctl_get(struct task_str
        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
                return PR_SPEC_NOT_AFFECTED;
  
-       switch (spectre_v2_user) {
-       case SPECTRE_V2_USER_NONE:
+       if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+           spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                return PR_SPEC_ENABLE;
-       case SPECTRE_V2_USER_PRCTL:
-       case SPECTRE_V2_USER_SECCOMP:
+       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+               return PR_SPEC_DISABLE;
+       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+           spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
                if (task_spec_ib_force_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
                if (task_spec_ib_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
-       case SPECTRE_V2_USER_STRICT:
-       case SPECTRE_V2_USER_STRICT_PREFERRED:
-               return PR_SPEC_DISABLE;
-       default:
+       } else
                return PR_SPEC_NOT_AFFECTED;
-       }
  }
  
  int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@@@ -1594,7 -1517,7 +1610,7 @@@ static char *stibp_state(void
        if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
                return "";
  
-       switch (spectre_v2_user) {
+       switch (spectre_v2_user_stibp) {
        case SPECTRE_V2_USER_NONE:
                return ", STIBP: disabled";
        case SPECTRE_V2_USER_STRICT:
@@@ -1621,11 -1544,6 +1637,11 @@@ static char *ibpb_state(void
        return "";
  }
  
 +static ssize_t srbds_show_state(char *buf)
 +{
 +      return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
 +}
 +
  static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
  {
        case X86_BUG_ITLB_MULTIHIT:
                return itlb_multihit_show_state(buf);
  
 +      case X86_BUG_SRBDS:
 +              return srbds_show_state(buf);
 +
        default:
                break;
        }
@@@ -1719,9 -1634,4 +1735,9 @@@ ssize_t cpu_show_itlb_multihit(struct d
  {
        return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
  }
 +
 +ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
 +{
 +      return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
 +}
  #endif
index 63926c94eb5fc19dc9970df431cb24bb049541fb,6abbcc774b82186d9ddfca92d170c51cfda482bc..c25a67a34bd3d9a9b60b9cf65d76e3833baa282f
@@@ -1,6 -1,5 +1,6 @@@
  // SPDX-License-Identifier: GPL-2.0
  #include <linux/kernel.h>
 +#include <linux/pgtable.h>
  
  #include <linux/string.h>
  #include <linux/bitops.h>
@@@ -12,6 -11,7 +12,6 @@@
  #include <linux/uaccess.h>
  
  #include <asm/cpufeature.h>
 -#include <asm/pgtable.h>
  #include <asm/msr.h>
  #include <asm/bugs.h>
  #include <asm/cpu.h>
@@@ -22,7 -22,6 +22,7 @@@
  #include <asm/cpu_device_id.h>
  #include <asm/cmdline.h>
  #include <asm/traps.h>
 +#include <asm/resctrl.h>
  
  #ifdef CONFIG_X86_64
  #include <linux/topology.h>
@@@ -323,11 -322,6 +323,11 @@@ static void early_init_intel(struct cpu
                detect_ht_early(c);
  }
  
 +static void bsp_init_intel(struct cpuinfo_x86 *c)
 +{
 +      resctrl_cpu_detect(c);
 +}
 +
  #ifdef CONFIG_X86_32
  /*
   *    Early probe support logic for ppro memory erratum #50
@@@ -967,7 -961,6 +967,7 @@@ static const struct cpu_dev intel_cpu_d
  #endif
        .c_detect_tlb   = intel_detect_tlb,
        .c_early_init   = early_init_intel,
 +      .c_bsp_init     = bsp_init_intel,
        .c_init         = init_intel,
        .c_x86_vendor   = X86_VENDOR_INTEL,
  };
@@@ -1142,9 -1135,12 +1142,12 @@@ void switch_to_sld(unsigned long tifn
  static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           0),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,           0),
+       X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,           0),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        1),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      1),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      1),
+       X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         1),
+       X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           1),
        {}
  };
  
index 8e3d0347b664924befef4f081b5e23a551544cda,8f4533c1a4ec27c9fce69ef0ea255e58d0a1aa3c..f362ce0d5ac0f621b901c6d728445efe7f66c107
@@@ -191,7 -191,7 +191,7 @@@ void flush_thread(void
        flush_ptrace_hw_breakpoint(tsk);
        memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  
 -      fpu__clear(&tsk->thread.fpu);
 +      fpu__clear_all(&tsk->thread.fpu);
  }
  
  void disable_TSC(void)
@@@ -545,28 -545,20 +545,20 @@@ static __always_inline void __speculati
  
        lockdep_assert_irqs_disabled();
  
-       /*
-        * If TIF_SSBD is different, select the proper mitigation
-        * method. Note that if SSBD mitigation is disabled or permanentely
-        * enabled this branch can't be taken because nothing can set
-        * TIF_SSBD.
-        */
-       if (tif_diff & _TIF_SSBD) {
-               if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+       /* Handle change of TIF_SSBD depending on the mitigation method. */
+       if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+               if (tif_diff & _TIF_SSBD)
                        amd_set_ssb_virt_state(tifn);
-               } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+       } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+               if (tif_diff & _TIF_SSBD)
                        amd_set_core_ssb_state(tifn);
-               } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-                          static_cpu_has(X86_FEATURE_AMD_SSBD)) {
-                       msr |= ssbd_tif_to_spec_ctrl(tifn);
-                       updmsr  = true;
-               }
+       } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                  static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+               updmsr |= !!(tif_diff & _TIF_SSBD);
+               msr |= ssbd_tif_to_spec_ctrl(tifn);
        }
  
-       /*
-        * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
-        * otherwise avoid the MSR write.
-        */
+       /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
        if (IS_ENABLED(CONFIG_SMP) &&
            static_branch_unlikely(&switch_to_cond_stibp)) {
                updmsr |= !!(tif_diff & _TIF_SPEC_IB);
@@@ -612,17 -604,6 +604,17 @@@ void speculation_ctrl_update_current(vo
        preempt_enable();
  }
  
 +static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
 +{
 +      unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
 +
 +      newval = cr4 ^ mask;
 +      if (newval != cr4) {
 +              this_cpu_write(cpu_tlbstate.cr4, newval);
 +              __write_cr4(newval);
 +      }
 +}
 +
  void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
  {
        unsigned long tifp, tifn;
diff --combined arch/x86/kernel/reboot.c
index e040ba6be27b6b025c4dcb8bdb813a86f38ee454,8b8cebfd32989c2dbf9a0ed6390706a6013837a3..0ec7ced727fe859f7c6cd616348bfca6e156b1cb
  #include <linux/tboot.h>
  #include <linux/delay.h>
  #include <linux/frame.h>
 +#include <linux/pgtable.h>
  #include <acpi/reboot.h>
  #include <asm/io.h>
  #include <asm/apic.h>
  #include <asm/io_apic.h>
  #include <asm/desc.h>
  #include <asm/hpet.h>
 -#include <asm/pgtable.h>
  #include <asm/proto.h>
  #include <asm/reboot_fixups.h>
  #include <asm/reboot.h>
@@@ -197,6 -197,14 +197,14 @@@ static const struct dmi_system_id reboo
                        DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
                },
        },
+       {       /* Handle problems with rebooting on Apple MacBook6,1 */
+               .callback = set_pci_reboot,
+               .ident = "Apple MacBook6,1",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+               },
+       },
        {       /* Handle problems with rebooting on Apple MacBookPro5 */
                .callback = set_pci_reboot,
                .ident = "Apple MacBookPro5",
diff --combined arch/x86/kernel/time.c
index 371a6b348e44725dfcba3e4a25a4b55b91f8a811,f395729826357582ba34d11c03272fbd8992e1f2..e42faa792c07931083988931a5061b5e21429499
  #include <asm/hpet.h>
  #include <asm/time.h>
  
- #ifdef CONFIG_X86_64
- __visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
- #endif
  unsigned long profile_pc(struct pt_regs *regs)
  {
        unsigned long pc = instruction_pointer(regs);
@@@ -103,9 -99,6 +99,9 @@@ static __init void x86_late_time_init(v
         */
        x86_init.irqs.intr_mode_init();
        tsc_init();
 +
 +      if (static_cpu_has(X86_FEATURE_WAITPKG))
 +              use_tpause_delay();
  }
  
  /*
diff --combined lib/vdso/gettimeofday.c
index 7938d3c4901d1b657cccda9cf1fb10eab3d2eb4e,3bb82a6cc5aa22e3d7a17e9943d74054d42b9439..bcc9a98a052461cc26e3a8dede9bcdc5537a853c
@@@ -38,6 -38,13 +38,13 @@@ static inline bool vdso_clocksource_ok(
  }
  #endif
  
+ #ifndef vdso_cycles_ok
+ static inline bool vdso_cycles_ok(u64 cycles)
+ {
+       return true;
+ }
+ #endif
  #ifdef CONFIG_TIME_NS
  static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
                          struct __kernel_timespec *ts)
@@@ -62,6 -69,8 +69,8 @@@
                        return -1;
  
                cycles = __arch_get_hw_counter(vd->clock_mode);
+               if (unlikely(!vdso_cycles_ok(cycles)))
+                       return -1;
                ns = vdso_ts->nsec;
                last = vd->cycle_last;
                ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
@@@ -130,6 -139,8 +139,8 @@@ static __always_inline int do_hres(cons
                        return -1;
  
                cycles = __arch_get_hw_counter(vd->clock_mode);
+               if (unlikely(!vdso_cycles_ok(cycles)))
+                       return -1;
                ns = vdso_ts->nsec;
                last = vd->cycle_last;
                ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
@@@ -210,7 -221,7 +221,7 @@@ static __always_inline int do_coarse(co
        return 0;
  }
  
 -static __maybe_unused int
 +static __always_inline int
  __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
                             struct __kernel_timespec *ts)
  {
This page took 0.087953 seconds and 4 git commands to generate.