]> Git Repo - linux.git/commitdiff
x86/fpu: Split old_fpu & new_fpu handling into separate functions
authorRik van Riel <[email protected]>
Fri, 14 Oct 2016 12:15:31 +0000 (08:15 -0400)
committerIngo Molnar <[email protected]>
Sun, 16 Oct 2016 09:38:41 +0000 (11:38 +0200)
By moving all of the new_fpu state handling into switch_fpu_finish(),
the code can be simplified some more.

This gets rid of the prefetch, but given the size of the FPU register
state on modern CPUs, and the amount of work done by __switch_to()
inbetween both functions, the value of a single cache line prefetch
seems somewhat dubious anyway.

Signed-off-by: Rik van Riel <[email protected]>
Acked-by: Dave Hansen <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: Fenghua Yu <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Quentin Casasnovas <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c

index 590f27488fcaf68feb8b043264a46b6920606336..d4a684997497bbb431c8d7ead98ed3a438cba0fd 100644 (file)
@@ -552,27 +552,15 @@ static inline int fpregs_active(void)
  *
  * This is a two-stage process:
  *
- *  - switch_fpu_prepare() saves the old state and
- *    sets the new state of the CR0.TS bit. This is
- *    done within the context of the old process.
+ *  - switch_fpu_prepare() saves the old state.
+ *    This is done within the context of the old process.
  *
  *  - switch_fpu_finish() restores the new state as
  *    necessary.
  */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-       fpu_switch_t fpu;
-
-       /*
-        * If the task has used the math, pre-load the FPU on xsave processors
-        * or if the past 5 consecutive context-switches used math.
-        */
-       fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
-                     new_fpu->fpstate_active;
-
        if (old_fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(old_fpu))
                        old_fpu->last_cpu = -1;
@@ -584,16 +572,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
                trace_x86_fpu_regs_deactivated(old_fpu);
        } else
                old_fpu->last_cpu = -1;
-
-       if (fpu.preload) {
-               if (fpregs_state_valid(new_fpu, cpu))
-                       fpu.preload = 0;
-               else
-                       prefetch(&new_fpu->state);
-               fpregs_activate(new_fpu);
-       }
-
-       return fpu;
 }
 
 /*
@@ -601,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
  */
 
 /*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
  */
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
-       if (fpu_switch.preload)
-               copy_kernel_to_fpregs(&new_fpu->state);
+       bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+                      new_fpu->fpstate_active;
+
+       if (preload) {
+               if (!fpregs_state_valid(new_fpu, cpu))
+                       copy_kernel_to_fpregs(&new_fpu->state);
+               fpregs_activate(new_fpu);
+       }
 }
 
 /*
index bd7be8efdc4ce7ae49f155b7f701f6d6d800293f..7dc8c9c3d8019e21315f54e97f7b25bffe1de7e0 100644 (file)
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-       fpu_switch_t fpu_switch;
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-       fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+       switch_fpu_prepare(prev_fpu, cpu);
 
        /*
         * Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (prev->gs | next->gs)
                lazy_load_gs(next->gs);
 
-       switch_fpu_finish(next_fpu, fpu_switch);
+       switch_fpu_finish(next_fpu, cpu);
 
        this_cpu_write(current_task, next_p);
 
index ee944bd2310d8b0017954e558d6067c3e85b3001..705669efb7624c19f22c1fcbc15205ca82dc79a6 100644 (file)
@@ -264,9 +264,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
        unsigned prev_fsindex, prev_gsindex;
-       fpu_switch_t fpu_switch;
 
-       fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+       switch_fpu_prepare(prev_fpu, cpu);
 
        /* We must save %fs and %gs before load_TLS() because
         * %fs and %gs may be cleared by load_TLS().
@@ -416,7 +415,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                prev->gsbase = 0;
        prev->gsindex = prev_gsindex;
 
-       switch_fpu_finish(next_fpu, fpu_switch);
+       switch_fpu_finish(next_fpu, cpu);
 
        /*
         * Switch the PDA and FPU contexts.
This page took 0.064197 seconds and 4 git commands to generate.