]> Git Repo - linux.git/commitdiff
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Thu, 27 Dec 2018 01:37:51 +0000 (17:37 -0800)
committerLinus Torvalds <[email protected]>
Thu, 27 Dec 2018 01:37:51 +0000 (17:37 -0800)
Pull x86 fpu updates from Ingo Molnar:
 "Misc preparatory changes for an upcoming FPU optimization that will
  delay the loading of FPU registers to return-to-userspace"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Don't export __kernel_fpu_{begin,end}()
  x86/fpu: Update comment for __raw_xsave_addr()
  x86/fpu: Add might_fault() to user_insn()
  x86/pkeys: Make init_pkru_value static
  x86/thread_info: Remove _TIF_ALLWORK_MASK
  x86/process/32: Remove asm/math_emu.h include
  x86/fpu: Use unsigned long long shift in xfeature_uncompacted_offset()

1  2 
arch/x86/include/asm/efi.h
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/process_32.c

index 42982a6cc6cf966edb03d264a0fc235712ce5051,45864898f7e50e887874de094f38a34d60a28dc4..107283b1eb1e4f007f228e3699fa2eac65c6decc
@@@ -19,7 -19,7 +19,7 @@@
   * This is the main reason why we're doing stable VA mappings for RT
   * services.
   *
 - * This flag is used in conjuction with a chicken bit called
 + * This flag is used in conjunction with a chicken bit called
   * "efi=old_map" which can be used as a fallback to the old runtime
   * services mapping method in case there's some b0rkage with a
   * particular EFI implementation (haha, it is hard to hold up the
@@@ -82,8 -82,7 +82,7 @@@ struct efi_scratch 
  #define arch_efi_call_virt_setup()                                    \
  ({                                                                    \
        efi_sync_low_kernel_mappings();                                 \
-       preempt_disable();                                              \
-       __kernel_fpu_begin();                                           \
+       kernel_fpu_begin();                                             \
        firmware_restrict_branch_speculation_start();                   \
                                                                        \
        if (!efi_enabled(EFI_OLD_MEMMAP))                               \
@@@ -99,8 -98,7 +98,7 @@@
                efi_switch_mm(efi_scratch.prev_mm);                     \
                                                                        \
        firmware_restrict_branch_speculation_end();                     \
-       __kernel_fpu_end();                                             \
-       preempt_enable();                                               \
+       kernel_fpu_end();                                               \
  })
  
  extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
@@@ -141,8 -139,6 +139,8 @@@ extern int __init efi_reuse_config(u64 
  extern void efi_delete_dummy_variable(void);
  extern void efi_switch_mm(struct mm_struct *mm);
  extern void efi_recover_from_page_fault(unsigned long phys_addr);
 +extern void efi_free_boot_services(void);
 +extern void efi_reserve_boot_services(void);
  
  struct efi_setup_data {
        u64 fw_vendor;
index cd3956fc8158d58d00033173d0c0ea11253f14d3,359564beab5c38b1869cb26d617ffafbe7318d0f..9cc108456d0be4b2dc52c4e2b4a98c66ddd101eb
@@@ -444,7 -444,7 +444,7 @@@ static int xfeature_uncompacted_offset(
         * format. Checking a supervisor state's uncompacted offset is
         * an error.
         */
-       if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
+       if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
                WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
                return -1;
        }
@@@ -808,10 -808,8 +808,8 @@@ void fpu__resume_cpu(void
   * Given an xstate feature mask, calculate where in the xsave
   * buffer the state is.  Callers should ensure that the buffer
   * is valid.
-  *
-  * Note: does not work for compacted buffers.
   */
 -void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
 +static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
  {
        int feature_nr = fls64(xstate_feature_mask) - 1;
  
index 9d08f0510620f1d5da4400a2f659a7ca0ea29606,575654db463ec798ad807ad8781536e6ab679da5..e471d8e6f0b248951a44654f5222ef217fd4dd2e
@@@ -44,9 -44,6 +44,6 @@@
  #include <asm/processor.h>
  #include <asm/fpu/internal.h>
  #include <asm/desc.h>
- #ifdef CONFIG_MATH_EMULATION
- #include <asm/math_emu.h>
- #endif
  
  #include <linux/err.h>
  
@@@ -56,7 -53,7 +53,7 @@@
  #include <asm/debugreg.h>
  #include <asm/switch_to.h>
  #include <asm/vm86.h>
 -#include <asm/intel_rdt_sched.h>
 +#include <asm/resctrl_sched.h>
  #include <asm/proto.h>
  
  #include "process.h"
@@@ -298,7 -295,7 +295,7 @@@ __switch_to(struct task_struct *prev_p
        this_cpu_write(current_task, next_p);
  
        /* Load the Intel cache allocation PQR MSR. */
 -      intel_rdt_sched_in();
 +      resctrl_sched_in();
  
        return prev_p;
  }
This page took 0.075157 seconds and 4 git commands to generate.