]> Git Repo - J-linux.git/commitdiff
Merge tag 'x86_fpu_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Tue, 24 May 2022 01:49:16 +0000 (18:49 -0700)
committerLinus Torvalds <[email protected]>
Tue, 24 May 2022 01:49:16 +0000 (18:49 -0700)
Pull x86 fpu updates from Borislav Petkov:

 - Add support for XSAVEC - the Compacted XSTATE saving variant - and
   thus allow for guests to use this compacted XSTATE variant when the
   hypervisor exports that support

 - A variable shadowing cleanup

* tag 'x86_fpu_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Cleanup variable shadowing
  x86/fpu/xsave: Support XSAVEC in the kernel

1  2 
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/fpu/xstate.c

index 20df73b510253a540823cf406b79ce4b7480b97e,ff08da857847f5e5d54c3ab4772a18def6fcbad7..6e0dbbf847f33ed92d1514a4085cc25ceb41fcb2
  #define X86_FEATURE_INVPCID_SINGLE    ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
  #define X86_FEATURE_HW_PSTATE         ( 7*32+ 8) /* AMD HW-PState */
  #define X86_FEATURE_PROC_FEEDBACK     ( 7*32+ 9) /* AMD ProcFeedbackInterface */
/* FREE!                                ( 7*32+10) */
#define X86_FEATURE_XCOMPACTED                ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
  #define X86_FEATURE_PTI                       ( 7*32+11) /* Kernel Page Table Isolation enabled */
  #define X86_FEATURE_RETPOLINE         ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
  #define X86_FEATURE_RETPOLINE_LFENCE  ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
  #define X86_FEATURE_VMW_VMMCALL               ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
  #define X86_FEATURE_PVUNLOCK          ( 8*32+20) /* "" PV unlock function */
  #define X86_FEATURE_VCPUPREEMPT               ( 8*32+21) /* "" PV vcpu_is_preempted function */
 +#define X86_FEATURE_TDX_GUEST         ( 8*32+22) /* Intel Trust Domain Extensions Guest */
  
  /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
  #define X86_FEATURE_FSGSBASE          ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
index 1b016a186c11121fa0f6707eaee03cd6ef5425e9,81fcd04247dec6d7fd83b7f87f10f645114a286b..c8340156bfd2aadc490b5d0a663879ded2bb991f
@@@ -142,7 -142,8 +142,8 @@@ static unsigned int xfeature_get_offset
         * Non-compacted format and legacy features use the cached fixed
         * offsets.
         */
-       if (!cpu_feature_enabled(X86_FEATURE_XSAVES) || xfeature <= XFEATURE_SSE)
+       if (!cpu_feature_enabled(X86_FEATURE_XCOMPACTED) ||
+           xfeature <= XFEATURE_SSE)
                return xstate_offsets[xfeature];
  
        /*
@@@ -369,12 -370,12 +370,12 @@@ static void __init setup_init_fpu_buf(v
        /*
         * All components are now in init state. Read the state back so
         * that init_fpstate contains all non-zero init state. This only
-        * works with XSAVE, but not with XSAVEOPT and XSAVES because
+        * works with XSAVE, but not with XSAVEOPT and XSAVEC/S because
         * those use the init optimization which skips writing data for
         * components in init state.
         *
         * XSAVE could be used, but that would require to reshuffle the
-        * data when XSAVES is available because XSAVES uses xstate
+        * data when XSAVEC/S is available because XSAVEC/S uses xstate
         * compaction. But doing so is a pointless exercise because most
         * components have an all zeros init state except for the legacy
         * ones (FP and SSE). Those can be saved with FXSAVE into the
@@@ -584,7 -585,8 +585,8 @@@ static unsigned int xstate_calculate_si
   */
  static bool __init paranoid_xstate_size_valid(unsigned int kernel_size)
  {
-       bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
+       bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
+       bool xsaves = cpu_feature_enabled(X86_FEATURE_XSAVES);
        unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
        int i;
  
                 * Supervisor state components can be managed only by
                 * XSAVES.
                 */
-               if (!compacted && xfeature_is_supervisor(i)) {
+               if (!xsaves && xfeature_is_supervisor(i)) {
                        XSTATE_WARN_ON(1);
                        return false;
                }
   * the size of the *user* states.  If we use it to size a buffer
   * that we use 'XSAVES' on, we could potentially overflow the
   * buffer because 'XSAVES' saves system states too.
+  *
+  * This also takes compaction into account. So this works for
+  * XSAVEC as well.
   */
- static unsigned int __init get_xsaves_size(void)
+ static unsigned int __init get_compacted_size(void)
  {
        unsigned int eax, ebx, ecx, edx;
        /*
         *    containing all the state components
         *    corresponding to bits currently set in
         *    XCR0 | IA32_XSS.
+        *
+        * When XSAVES is not available but XSAVEC is (virt), then there
+        * are no supervisor states, but XSAVEC still uses compacted
+        * format.
         */
        cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
        return ebx;
   * Get the total size of the enabled xstates without the independent supervisor
   * features.
   */
- static unsigned int __init get_xsaves_size_no_independent(void)
+ static unsigned int __init get_xsave_compacted_size(void)
  {
        u64 mask = xfeatures_mask_independent();
        unsigned int size;
  
        if (!mask)
-               return get_xsaves_size();
+               return get_compacted_size();
  
        /* Disable independent features. */
        wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
         * Ask the hardware what size is required of the buffer.
         * This is the size required for the task->fpu buffer.
         */
-       size = get_xsaves_size();
+       size = get_compacted_size();
  
        /* Re-enable independent features so XSAVES will work on them again. */
        wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
@@@ -687,20 -696,21 +696,21 @@@ static int __init init_xstate_size(void
  {
        /* Recompute the context size for enabled features: */
        unsigned int user_size, kernel_size, kernel_default_size;
-       bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
+       bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
  
        /* Uncompacted user space size */
        user_size = get_xsave_size_user();
  
        /*
-        * XSAVES kernel size includes supervisor states and
-        * uses compacted format when available.
+        * XSAVES kernel size includes supervisor states and uses compacted
+        * format. XSAVEC uses compacted format, but does not save
+        * supervisor states.
         *
-        * XSAVE does not support supervisor states so
-        * kernel and user size is identical.
+        * XSAVE[OPT] do not support supervisor states so kernel and user
+        * size is identical.
         */
        if (compacted)
-               kernel_size = get_xsaves_size_no_independent();
+               kernel_size = get_xsave_compacted_size();
        else
                kernel_size = user_size;
  
@@@ -813,8 -823,11 +823,11 @@@ void __init fpu__init_system_xstate(uns
        if (!cpu_feature_enabled(X86_FEATURE_XFD))
                fpu_kernel_cfg.max_features &= ~XFEATURE_MASK_USER_DYNAMIC;
  
-       fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED |
-                             XFEATURE_MASK_SUPERVISOR_SUPPORTED;
+       if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
+               fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
+       else
+               fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED |
+                                       XFEATURE_MASK_SUPERVISOR_SUPPORTED;
  
        fpu_user_cfg.max_features = fpu_kernel_cfg.max_features;
        fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED;
         */
        init_fpstate.xfd = fpu_user_cfg.max_features & XFEATURE_MASK_USER_DYNAMIC;
  
+       /* Set up compaction feature bit */
+       if (cpu_feature_enabled(X86_FEATURE_XSAVEC) ||
+           cpu_feature_enabled(X86_FEATURE_XSAVES))
+               setup_force_cpu_cap(X86_FEATURE_XCOMPACTED);
        /* Enable xstate instructions to be able to continue with initialization: */
        fpu__init_cpu_xstate();
  
        pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
                fpu_kernel_cfg.max_features,
                fpu_kernel_cfg.max_size,
-               boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
+               boot_cpu_has(X86_FEATURE_XCOMPACTED) ? "compacted" : "standard");
        return;
  
  out_disable:
@@@ -917,7 -935,7 +935,7 @@@ static void *__raw_xsave_addr(struct xr
        if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr)))
                return NULL;
  
-       if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
+       if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED)) {
                if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr))))
                        return NULL;
        }
@@@ -1215,7 -1233,7 +1233,7 @@@ static int copy_uabi_to_xstate(struct f
        }
  
        for (i = 0; i < XFEATURE_MAX; i++) {
-               u64 mask = ((u64)1 << i);
+               mask = BIT_ULL(i);
  
                if (hdr.xfeatures & mask) {
                        void *dst = __raw_xsave_addr(xsave, i);
@@@ -1525,7 -1543,7 +1543,7 @@@ static int __xstate_request_perm(u64 pe
         * vendors into extending XFD for the pre AMX states, especially
         * AVX512.
         */
-       bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
+       bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED);
        struct fpu *fpu = &current->group_leader->thread.fpu;
        struct fpu_state_perm *perm;
        unsigned int ksize, usize;
@@@ -1687,13 -1705,16 +1705,13 @@@ EXPORT_SYMBOL_GPL(xstate_get_guest_grou
   * e.g. for AMX which requires XFEATURE_XTILE_CFG(17) and
   * XFEATURE_XTILE_DATA(18) this would be XFEATURE_XTILE_DATA(18).
   */
 -long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2)
 +long fpu_xstate_prctl(int option, unsigned long arg2)
  {
        u64 __user *uptr = (u64 __user *)arg2;
        u64 permitted, supported;
        unsigned long idx = arg2;
        bool guest = false;
  
 -      if (tsk != current)
 -              return -EPERM;
 -
        switch (option) {
        case ARCH_GET_XCOMP_SUPP:
                supported = fpu_user_cfg.max_features | fpu_user_cfg.legacy_features;
This page took 0.060954 seconds and 4 git commands to generate.