]> Git Repo - linux.git/blob - arch/x86/kernel/fpu/init.c
mptcp: factor out mptcp_connect()
[linux.git] / arch / x86 / kernel / fpu / init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * x86 FPU boot time init code:
4  */
5 #include <asm/fpu/api.h>
6 #include <asm/tlbflush.h>
7 #include <asm/setup.h>
8
9 #include <linux/sched.h>
10 #include <linux/sched/task.h>
11 #include <linux/init.h>
12
13 #include "internal.h"
14 #include "legacy.h"
15 #include "xstate.h"
16
17 /*
18  * Initialize the registers found in all CPUs, CR0 and CR4:
19  */
20 static void fpu__init_cpu_generic(void)
21 {
22         unsigned long cr0;
23         unsigned long cr4_mask = 0;
24
25         if (boot_cpu_has(X86_FEATURE_FXSR))
26                 cr4_mask |= X86_CR4_OSFXSR;
27         if (boot_cpu_has(X86_FEATURE_XMM))
28                 cr4_mask |= X86_CR4_OSXMMEXCPT;
29         if (cr4_mask)
30                 cr4_set_bits(cr4_mask);
31
32         cr0 = read_cr0();
33         cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
34         if (!boot_cpu_has(X86_FEATURE_FPU))
35                 cr0 |= X86_CR0_EM;
36         write_cr0(cr0);
37
38         /* Flush out any pending x87 state: */
39 #ifdef CONFIG_MATH_EMULATION
40         if (!boot_cpu_has(X86_FEATURE_FPU))
41                 fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
42         else
43 #endif
44                 asm volatile ("fninit");
45 }
46
47 /*
48  * Enable all supported FPU features. Called when a CPU is brought online:
49  */
50 void fpu__init_cpu(void)
51 {
52         fpu__init_cpu_generic();
53         fpu__init_cpu_xstate();
54 }
55
56 static bool fpu__probe_without_cpuid(void)
57 {
58         unsigned long cr0;
59         u16 fsw, fcw;
60
61         fsw = fcw = 0xffff;
62
63         cr0 = read_cr0();
64         cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
65         write_cr0(cr0);
66
67         asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
68
69         pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
70
71         return fsw == 0 && (fcw & 0x103f) == 0x003f;
72 }
73
74 static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
75 {
76         if (!boot_cpu_has(X86_FEATURE_CPUID) &&
77             !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
78                 if (fpu__probe_without_cpuid())
79                         setup_force_cpu_cap(X86_FEATURE_FPU);
80                 else
81                         setup_clear_cpu_cap(X86_FEATURE_FPU);
82         }
83
84 #ifndef CONFIG_MATH_EMULATION
85         if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
86                 pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
87                 for (;;)
88                         asm volatile("hlt");
89         }
90 #endif
91 }
92
93 /*
94  * Boot time FPU feature detection code:
95  */
96 unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
97 EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
98
99 static void __init fpu__init_system_mxcsr(void)
100 {
101         unsigned int mask = 0;
102
103         if (boot_cpu_has(X86_FEATURE_FXSR)) {
104                 /* Static because GCC does not get 16-byte stack alignment right: */
105                 static struct fxregs_state fxregs __initdata;
106
107                 asm volatile("fxsave %0" : "+m" (fxregs));
108
109                 mask = fxregs.mxcsr_mask;
110
111                 /*
112                  * If zero then use the default features mask,
113                  * which has all features set, except the
114                  * denormals-are-zero feature bit:
115                  */
116                 if (mask == 0)
117                         mask = 0x0000ffbf;
118         }
119         mxcsr_feature_mask &= mask;
120 }
121
122 /*
123  * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
124  */
125 static void __init fpu__init_system_generic(void)
126 {
127         /*
128          * Set up the legacy init FPU context. Will be updated when the
129          * CPU supports XSAVE[S].
130          */
131         fpstate_init_user(&init_fpstate);
132
133         fpu__init_system_mxcsr();
134 }
135
136 /* Get alignment of the TYPE. */
137 #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
138
139 /*
140  * Enforce that 'MEMBER' is the last field of 'TYPE'.
141  *
142  * Align the computed size with alignment of the TYPE,
143  * because that's how C aligns structs.
144  */
145 #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
146         BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
147                                            TYPE_ALIGN(TYPE)))
148
149 /*
150  * We append the 'struct fpu' to the task_struct:
151  */
152 static void __init fpu__init_task_struct_size(void)
153 {
154         int task_size = sizeof(struct task_struct);
155
156         /*
157          * Subtract off the static size of the register state.
158          * It potentially has a bunch of padding.
159          */
160         task_size -= sizeof(current->thread.fpu.__fpstate.regs);
161
162         /*
163          * Add back the dynamically-calculated register state
164          * size.
165          */
166         task_size += fpu_kernel_cfg.default_size;
167
168         /*
169          * We dynamically size 'struct fpu', so we require that
170          * it be at the end of 'thread_struct' and that
171          * 'thread_struct' be at the end of 'task_struct'.  If
172          * you hit a compile error here, check the structure to
173          * see if something got added to the end.
174          */
175         CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate);
176         CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
177         CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
178
179         arch_task_struct_size = task_size;
180 }
181
182 /*
183  * Set up the user and kernel xstate sizes based on the legacy FPU context size.
184  *
185  * We set this up first, and later it will be overwritten by
186  * fpu__init_system_xstate() if the CPU knows about xstates.
187  */
188 static void __init fpu__init_system_xstate_size_legacy(void)
189 {
190         unsigned int size;
191
192         /*
193          * Note that the size configuration might be overwritten later
194          * during fpu__init_system_xstate().
195          */
196         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
197                 size = sizeof(struct swregs_state);
198         } else if (cpu_feature_enabled(X86_FEATURE_FXSR)) {
199                 size = sizeof(struct fxregs_state);
200                 fpu_user_cfg.legacy_features = XFEATURE_MASK_FPSSE;
201         } else {
202                 size = sizeof(struct fregs_state);
203                 fpu_user_cfg.legacy_features = XFEATURE_MASK_FP;
204         }
205
206         fpu_kernel_cfg.max_size = size;
207         fpu_kernel_cfg.default_size = size;
208         fpu_user_cfg.max_size = size;
209         fpu_user_cfg.default_size = size;
210         fpstate_reset(&current->thread.fpu);
211 }
212
213 /*
214  * Called on the boot CPU once per system bootup, to set up the initial
215  * FPU state that is later cloned into all processes:
216  */
217 void __init fpu__init_system(struct cpuinfo_x86 *c)
218 {
219         fpstate_reset(&current->thread.fpu);
220         fpu__init_system_early_generic(c);
221
222         /*
223          * The FPU has to be operational for some of the
224          * later FPU init activities:
225          */
226         fpu__init_cpu();
227
228         fpu__init_system_generic();
229         fpu__init_system_xstate_size_legacy();
230         fpu__init_system_xstate(fpu_kernel_cfg.max_size);
231         fpu__init_task_struct_size();
232 }
This page took 0.048004 seconds and 4 git commands to generate.