]> Git Repo - linux.git/blob - arch/x86/kernel/cpu/common.c
Merge tag 'x86_microcode_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / arch / x86 / kernel / cpu / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/string.h>
12 #include <linux/ctype.h>
13 #include <linux/delay.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/clock.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/smt.h>
18 #include <linux/init.h>
19 #include <linux/kprobes.h>
20 #include <linux/kgdb.h>
21 #include <linux/smp.h>
22 #include <linux/io.h>
23 #include <linux/syscore_ops.h>
24 #include <linux/pgtable.h>
25 #include <linux/stackprotector.h>
26
27 #include <asm/cmdline.h>
28 #include <asm/perf_event.h>
29 #include <asm/mmu_context.h>
30 #include <asm/doublefault.h>
31 #include <asm/archrandom.h>
32 #include <asm/hypervisor.h>
33 #include <asm/processor.h>
34 #include <asm/tlbflush.h>
35 #include <asm/debugreg.h>
36 #include <asm/sections.h>
37 #include <asm/vsyscall.h>
38 #include <linux/topology.h>
39 #include <linux/cpumask.h>
40 #include <linux/atomic.h>
41 #include <asm/proto.h>
42 #include <asm/setup.h>
43 #include <asm/apic.h>
44 #include <asm/desc.h>
45 #include <asm/fpu/api.h>
46 #include <asm/mtrr.h>
47 #include <asm/hwcap2.h>
48 #include <linux/numa.h>
49 #include <asm/numa.h>
50 #include <asm/asm.h>
51 #include <asm/bugs.h>
52 #include <asm/cpu.h>
53 #include <asm/mce.h>
54 #include <asm/msr.h>
55 #include <asm/cacheinfo.h>
56 #include <asm/memtype.h>
57 #include <asm/microcode.h>
58 #include <asm/microcode_intel.h>
59 #include <asm/intel-family.h>
60 #include <asm/cpu_device_id.h>
61 #include <asm/uv/uv.h>
62 #include <asm/sigframe.h>
63 #include <asm/traps.h>
64 #include <asm/sev.h>
65
66 #include "cpu.h"
67
68 u32 elf_hwcap2 __read_mostly;
69
70 /* all of these masks are initialized in setup_cpu_local_masks() */
71 cpumask_var_t cpu_initialized_mask;
72 cpumask_var_t cpu_callout_mask;
73 cpumask_var_t cpu_callin_mask;
74
75 /* representing cpus for which sibling maps can be computed */
76 cpumask_var_t cpu_sibling_setup_mask;
77
78 /* Number of siblings per CPU package */
79 int smp_num_siblings = 1;
80 EXPORT_SYMBOL(smp_num_siblings);
81
82 /* Last level cache ID of each logical CPU */
83 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
84
85 u16 get_llc_id(unsigned int cpu)
86 {
87         return per_cpu(cpu_llc_id, cpu);
88 }
89 EXPORT_SYMBOL_GPL(get_llc_id);
90
91 /* L2 cache ID of each logical CPU */
92 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
93
94 static struct ppin_info {
95         int     feature;
96         int     msr_ppin_ctl;
97         int     msr_ppin;
98 } ppin_info[] = {
99         [X86_VENDOR_INTEL] = {
100                 .feature = X86_FEATURE_INTEL_PPIN,
101                 .msr_ppin_ctl = MSR_PPIN_CTL,
102                 .msr_ppin = MSR_PPIN
103         },
104         [X86_VENDOR_AMD] = {
105                 .feature = X86_FEATURE_AMD_PPIN,
106                 .msr_ppin_ctl = MSR_AMD_PPIN_CTL,
107                 .msr_ppin = MSR_AMD_PPIN
108         },
109 };
110
111 static const struct x86_cpu_id ppin_cpuids[] = {
112         X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
113         X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
114
115         /* Legacy models without CPUID enumeration */
116         X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
117         X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
118         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
119         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
120         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
121         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
122         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
123         X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
124         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
125         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
126
127         {}
128 };
129
130 static void ppin_init(struct cpuinfo_x86 *c)
131 {
132         const struct x86_cpu_id *id;
133         unsigned long long val;
134         struct ppin_info *info;
135
136         id = x86_match_cpu(ppin_cpuids);
137         if (!id)
138                 return;
139
140         /*
141          * Testing the presence of the MSR is not enough. Need to check
142          * that the PPIN_CTL allows reading of the PPIN.
143          */
144         info = (struct ppin_info *)id->driver_data;
145
146         if (rdmsrl_safe(info->msr_ppin_ctl, &val))
147                 goto clear_ppin;
148
149         if ((val & 3UL) == 1UL) {
150                 /* PPIN locked in disabled mode */
151                 goto clear_ppin;
152         }
153
154         /* If PPIN is disabled, try to enable */
155         if (!(val & 2UL)) {
156                 wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
157                 rdmsrl_safe(info->msr_ppin_ctl, &val);
158         }
159
160         /* Is the enable bit set? */
161         if (val & 2UL) {
162                 c->ppin = __rdmsr(info->msr_ppin);
163                 set_cpu_cap(c, info->feature);
164                 return;
165         }
166
167 clear_ppin:
168         clear_cpu_cap(c, info->feature);
169 }
170
171 /* correctly size the local cpu masks */
172 void __init setup_cpu_local_masks(void)
173 {
174         alloc_bootmem_cpumask_var(&cpu_initialized_mask);
175         alloc_bootmem_cpumask_var(&cpu_callin_mask);
176         alloc_bootmem_cpumask_var(&cpu_callout_mask);
177         alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
178 }
179
180 static void default_init(struct cpuinfo_x86 *c)
181 {
182 #ifdef CONFIG_X86_64
183         cpu_detect_cache_sizes(c);
184 #else
185         /* Not much we can do here... */
186         /* Check if at least it has cpuid */
187         if (c->cpuid_level == -1) {
188                 /* No cpuid. It must be an ancient CPU */
189                 if (c->x86 == 4)
190                         strcpy(c->x86_model_id, "486");
191                 else if (c->x86 == 3)
192                         strcpy(c->x86_model_id, "386");
193         }
194 #endif
195 }
196
197 static const struct cpu_dev default_cpu = {
198         .c_init         = default_init,
199         .c_vendor       = "Unknown",
200         .c_x86_vendor   = X86_VENDOR_UNKNOWN,
201 };
202
203 static const struct cpu_dev *this_cpu = &default_cpu;
204
205 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
206 #ifdef CONFIG_X86_64
207         /*
208          * We need valid kernel segments for data and code in long mode too
209          * IRET will check the segment types  kkeil 2000/10/28
210          * Also sysret mandates a special GDT layout
211          *
212          * TLS descriptors are currently at a different place compared to i386.
213          * Hopefully nobody expects them at a fixed place (Wine?)
214          */
215         [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
216         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
217         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
218         [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
219         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
220         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
221 #else
222         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
223         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
224         [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
225         [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
226         /*
227          * Segments used for calling PnP BIOS have byte granularity.
228          * They code segments and data segments have fixed 64k limits,
229          * the transfer segment sizes are set at run time.
230          */
231         /* 32-bit code */
232         [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
233         /* 16-bit code */
234         [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
235         /* 16-bit data */
236         [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
237         /* 16-bit data */
238         [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
239         /* 16-bit data */
240         [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
241         /*
242          * The APM segments have byte granularity and their bases
243          * are set at run time.  All have 64k limits.
244          */
245         /* 32-bit code */
246         [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
247         /* 16-bit code */
248         [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
249         /* data */
250         [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
251
252         [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
253         [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
254 #endif
255 } };
256 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
257
258 #ifdef CONFIG_X86_64
259 static int __init x86_nopcid_setup(char *s)
260 {
261         /* nopcid doesn't accept parameters */
262         if (s)
263                 return -EINVAL;
264
265         /* do not emit a message if the feature is not present */
266         if (!boot_cpu_has(X86_FEATURE_PCID))
267                 return 0;
268
269         setup_clear_cpu_cap(X86_FEATURE_PCID);
270         pr_info("nopcid: PCID feature disabled\n");
271         return 0;
272 }
273 early_param("nopcid", x86_nopcid_setup);
274 #endif
275
276 static int __init x86_noinvpcid_setup(char *s)
277 {
278         /* noinvpcid doesn't accept parameters */
279         if (s)
280                 return -EINVAL;
281
282         /* do not emit a message if the feature is not present */
283         if (!boot_cpu_has(X86_FEATURE_INVPCID))
284                 return 0;
285
286         setup_clear_cpu_cap(X86_FEATURE_INVPCID);
287         pr_info("noinvpcid: INVPCID feature disabled\n");
288         return 0;
289 }
290 early_param("noinvpcid", x86_noinvpcid_setup);
291
292 #ifdef CONFIG_X86_32
293 static int cachesize_override = -1;
294 static int disable_x86_serial_nr = 1;
295
296 static int __init cachesize_setup(char *str)
297 {
298         get_option(&str, &cachesize_override);
299         return 1;
300 }
301 __setup("cachesize=", cachesize_setup);
302
303 /* Standard macro to see if a specific flag is changeable */
304 static inline int flag_is_changeable_p(u32 flag)
305 {
306         u32 f1, f2;
307
308         /*
309          * Cyrix and IDT cpus allow disabling of CPUID
310          * so the code below may return different results
311          * when it is executed before and after enabling
312          * the CPUID. Add "volatile" to not allow gcc to
313          * optimize the subsequent calls to this function.
314          */
315         asm volatile ("pushfl           \n\t"
316                       "pushfl           \n\t"
317                       "popl %0          \n\t"
318                       "movl %0, %1      \n\t"
319                       "xorl %2, %0      \n\t"
320                       "pushl %0         \n\t"
321                       "popfl            \n\t"
322                       "pushfl           \n\t"
323                       "popl %0          \n\t"
324                       "popfl            \n\t"
325
326                       : "=&r" (f1), "=&r" (f2)
327                       : "ir" (flag));
328
329         return ((f1^f2) & flag) != 0;
330 }
331
332 /* Probe for the CPUID instruction */
333 int have_cpuid_p(void)
334 {
335         return flag_is_changeable_p(X86_EFLAGS_ID);
336 }
337
338 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
339 {
340         unsigned long lo, hi;
341
342         if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
343                 return;
344
345         /* Disable processor serial number: */
346
347         rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
348         lo |= 0x200000;
349         wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
350
351         pr_notice("CPU serial number disabled.\n");
352         clear_cpu_cap(c, X86_FEATURE_PN);
353
354         /* Disabling the serial number may affect the cpuid level */
355         c->cpuid_level = cpuid_eax(0);
356 }
357
358 static int __init x86_serial_nr_setup(char *s)
359 {
360         disable_x86_serial_nr = 0;
361         return 1;
362 }
363 __setup("serialnumber", x86_serial_nr_setup);
364 #else
365 static inline int flag_is_changeable_p(u32 flag)
366 {
367         return 1;
368 }
369 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
370 {
371 }
372 #endif
373
374 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
375 {
376         if (cpu_has(c, X86_FEATURE_SMEP))
377                 cr4_set_bits(X86_CR4_SMEP);
378 }
379
380 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
381 {
382         unsigned long eflags = native_save_fl();
383
384         /* This should have been cleared long ago */
385         BUG_ON(eflags & X86_EFLAGS_AC);
386
387         if (cpu_has(c, X86_FEATURE_SMAP))
388                 cr4_set_bits(X86_CR4_SMAP);
389 }
390
391 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
392 {
393         /* Check the boot processor, plus build option for UMIP. */
394         if (!cpu_feature_enabled(X86_FEATURE_UMIP))
395                 goto out;
396
397         /* Check the current processor's cpuid bits. */
398         if (!cpu_has(c, X86_FEATURE_UMIP))
399                 goto out;
400
401         cr4_set_bits(X86_CR4_UMIP);
402
403         pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
404
405         return;
406
407 out:
408         /*
409          * Make sure UMIP is disabled in case it was enabled in a
410          * previous boot (e.g., via kexec).
411          */
412         cr4_clear_bits(X86_CR4_UMIP);
413 }
414
415 /* These bits should not change their value after CPU init is finished. */
416 static const unsigned long cr4_pinned_mask =
417         X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
418         X86_CR4_FSGSBASE | X86_CR4_CET;
419 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
420 static unsigned long cr4_pinned_bits __ro_after_init;
421
422 void native_write_cr0(unsigned long val)
423 {
424         unsigned long bits_missing = 0;
425
426 set_register:
427         asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
428
429         if (static_branch_likely(&cr_pinning)) {
430                 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
431                         bits_missing = X86_CR0_WP;
432                         val |= bits_missing;
433                         goto set_register;
434                 }
435                 /* Warn after we've set the missing bits. */
436                 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
437         }
438 }
439 EXPORT_SYMBOL(native_write_cr0);
440
441 void __no_profile native_write_cr4(unsigned long val)
442 {
443         unsigned long bits_changed = 0;
444
445 set_register:
446         asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
447
448         if (static_branch_likely(&cr_pinning)) {
449                 if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
450                         bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
451                         val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
452                         goto set_register;
453                 }
454                 /* Warn after we've corrected the changed bits. */
455                 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
456                           bits_changed);
457         }
458 }
459 #if IS_MODULE(CONFIG_LKDTM)
460 EXPORT_SYMBOL_GPL(native_write_cr4);
461 #endif
462
463 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
464 {
465         unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
466
467         lockdep_assert_irqs_disabled();
468
469         newval = (cr4 & ~clear) | set;
470         if (newval != cr4) {
471                 this_cpu_write(cpu_tlbstate.cr4, newval);
472                 __write_cr4(newval);
473         }
474 }
475 EXPORT_SYMBOL(cr4_update_irqsoff);
476
477 /* Read the CR4 shadow. */
478 unsigned long cr4_read_shadow(void)
479 {
480         return this_cpu_read(cpu_tlbstate.cr4);
481 }
482 EXPORT_SYMBOL_GPL(cr4_read_shadow);
483
484 void cr4_init(void)
485 {
486         unsigned long cr4 = __read_cr4();
487
488         if (boot_cpu_has(X86_FEATURE_PCID))
489                 cr4 |= X86_CR4_PCIDE;
490         if (static_branch_likely(&cr_pinning))
491                 cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
492
493         __write_cr4(cr4);
494
495         /* Initialize cr4 shadow for this CPU. */
496         this_cpu_write(cpu_tlbstate.cr4, cr4);
497 }
498
499 /*
500  * Once CPU feature detection is finished (and boot params have been
501  * parsed), record any of the sensitive CR bits that are set, and
502  * enable CR pinning.
503  */
504 static void __init setup_cr_pinning(void)
505 {
506         cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
507         static_key_enable(&cr_pinning.key);
508 }
509
510 static __init int x86_nofsgsbase_setup(char *arg)
511 {
512         /* Require an exact match without trailing characters. */
513         if (strlen(arg))
514                 return 0;
515
516         /* Do not emit a message if the feature is not present. */
517         if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
518                 return 1;
519
520         setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
521         pr_info("FSGSBASE disabled via kernel command line\n");
522         return 1;
523 }
524 __setup("nofsgsbase", x86_nofsgsbase_setup);
525
526 /*
527  * Protection Keys are not available in 32-bit mode.
528  */
529 static bool pku_disabled;
530
531 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
532 {
533         if (c == &boot_cpu_data) {
534                 if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
535                         return;
536                 /*
537                  * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
538                  * bit to be set.  Enforce it.
539                  */
540                 setup_force_cpu_cap(X86_FEATURE_OSPKE);
541
542         } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
543                 return;
544         }
545
546         cr4_set_bits(X86_CR4_PKE);
547         /* Load the default PKRU value */
548         pkru_write_default();
549 }
550
551 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
552 static __init int setup_disable_pku(char *arg)
553 {
554         /*
555          * Do not clear the X86_FEATURE_PKU bit.  All of the
556          * runtime checks are against OSPKE so clearing the
557          * bit does nothing.
558          *
559          * This way, we will see "pku" in cpuinfo, but not
560          * "ospke", which is exactly what we want.  It shows
561          * that the CPU has PKU, but the OS has not enabled it.
562          * This happens to be exactly how a system would look
563          * if we disabled the config option.
564          */
565         pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
566         pku_disabled = true;
567         return 1;
568 }
569 __setup("nopku", setup_disable_pku);
570 #endif /* CONFIG_X86_64 */
571
572 #ifdef CONFIG_X86_KERNEL_IBT
573
574 __noendbr u64 ibt_save(void)
575 {
576         u64 msr = 0;
577
578         if (cpu_feature_enabled(X86_FEATURE_IBT)) {
579                 rdmsrl(MSR_IA32_S_CET, msr);
580                 wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
581         }
582
583         return msr;
584 }
585
586 __noendbr void ibt_restore(u64 save)
587 {
588         u64 msr;
589
590         if (cpu_feature_enabled(X86_FEATURE_IBT)) {
591                 rdmsrl(MSR_IA32_S_CET, msr);
592                 msr &= ~CET_ENDBR_EN;
593                 msr |= (save & CET_ENDBR_EN);
594                 wrmsrl(MSR_IA32_S_CET, msr);
595         }
596 }
597
598 #endif
599
600 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
601 {
602         u64 msr = CET_ENDBR_EN;
603
604         if (!HAS_KERNEL_IBT ||
605             !cpu_feature_enabled(X86_FEATURE_IBT))
606                 return;
607
608         wrmsrl(MSR_IA32_S_CET, msr);
609         cr4_set_bits(X86_CR4_CET);
610
611         if (!ibt_selftest()) {
612                 pr_err("IBT selftest: Failed!\n");
613                 wrmsrl(MSR_IA32_S_CET, 0);
614                 setup_clear_cpu_cap(X86_FEATURE_IBT);
615                 return;
616         }
617 }
618
619 __noendbr void cet_disable(void)
620 {
621         if (cpu_feature_enabled(X86_FEATURE_IBT))
622                 wrmsrl(MSR_IA32_S_CET, 0);
623 }
624
625 /*
626  * Some CPU features depend on higher CPUID levels, which may not always
627  * be available due to CPUID level capping or broken virtualization
628  * software.  Add those features to this table to auto-disable them.
629  */
630 struct cpuid_dependent_feature {
631         u32 feature;
632         u32 level;
633 };
634
635 static const struct cpuid_dependent_feature
636 cpuid_dependent_features[] = {
637         { X86_FEATURE_MWAIT,            0x00000005 },
638         { X86_FEATURE_DCA,              0x00000009 },
639         { X86_FEATURE_XSAVE,            0x0000000d },
640         { 0, 0 }
641 };
642
643 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
644 {
645         const struct cpuid_dependent_feature *df;
646
647         for (df = cpuid_dependent_features; df->feature; df++) {
648
649                 if (!cpu_has(c, df->feature))
650                         continue;
651                 /*
652                  * Note: cpuid_level is set to -1 if unavailable, but
653                  * extended_extended_level is set to 0 if unavailable
654                  * and the legitimate extended levels are all negative
655                  * when signed; hence the weird messing around with
656                  * signs here...
657                  */
658                 if (!((s32)df->level < 0 ?
659                      (u32)df->level > (u32)c->extended_cpuid_level :
660                      (s32)df->level > (s32)c->cpuid_level))
661                         continue;
662
663                 clear_cpu_cap(c, df->feature);
664                 if (!warn)
665                         continue;
666
667                 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
668                         x86_cap_flag(df->feature), df->level);
669         }
670 }
671
672 /*
673  * Naming convention should be: <Name> [(<Codename>)]
674  * This table only is used unless init_<vendor>() below doesn't set it;
675  * in particular, if CPUID levels 0x80000002..4 are supported, this
676  * isn't used
677  */
678
679 /* Look up CPU names by table lookup. */
680 static const char *table_lookup_model(struct cpuinfo_x86 *c)
681 {
682 #ifdef CONFIG_X86_32
683         const struct legacy_cpu_model_info *info;
684
685         if (c->x86_model >= 16)
686                 return NULL;    /* Range check */
687
688         if (!this_cpu)
689                 return NULL;
690
691         info = this_cpu->legacy_models;
692
693         while (info->family) {
694                 if (info->family == c->x86)
695                         return info->model_names[c->x86_model];
696                 info++;
697         }
698 #endif
699         return NULL;            /* Not found */
700 }
701
702 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
703 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
704 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
705
706 #ifdef CONFIG_X86_32
707 /* The 32-bit entry code needs to find cpu_entry_area. */
708 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
709 #endif
710
711 /* Load the original GDT from the per-cpu structure */
712 void load_direct_gdt(int cpu)
713 {
714         struct desc_ptr gdt_descr;
715
716         gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
717         gdt_descr.size = GDT_SIZE - 1;
718         load_gdt(&gdt_descr);
719 }
720 EXPORT_SYMBOL_GPL(load_direct_gdt);
721
722 /* Load a fixmap remapping of the per-cpu GDT */
723 void load_fixmap_gdt(int cpu)
724 {
725         struct desc_ptr gdt_descr;
726
727         gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
728         gdt_descr.size = GDT_SIZE - 1;
729         load_gdt(&gdt_descr);
730 }
731 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
732
733 /**
734  * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
735  * @cpu:        The CPU number for which this is invoked
736  *
737  * Invoked during early boot to switch from early GDT and early per CPU to
738  * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
739  * switch is implicit by loading the direct GDT. On 64bit this requires
740  * to update GSBASE.
741  */
742 void __init switch_gdt_and_percpu_base(int cpu)
743 {
744         load_direct_gdt(cpu);
745
746 #ifdef CONFIG_X86_64
747         /*
748          * No need to load %gs. It is already correct.
749          *
750          * Writing %gs on 64bit would zero GSBASE which would make any per
751          * CPU operation up to the point of the wrmsrl() fault.
752          *
753          * Set GSBASE to the new offset. Until the wrmsrl() happens the
754          * early mapping is still valid. That means the GSBASE update will
755          * lose any prior per CPU data which was not copied over in
756          * setup_per_cpu_areas().
757          *
758          * This works even with stackprotector enabled because the
759          * per CPU stack canary is 0 in both per CPU areas.
760          */
761         wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
762 #else
763         /*
764          * %fs is already set to __KERNEL_PERCPU, but after switching GDT
765          * it is required to load FS again so that the 'hidden' part is
766          * updated from the new GDT. Up to this point the early per CPU
767          * translation is active. Any content of the early per CPU data
768          * which was not copied over in setup_per_cpu_areas() is lost.
769          */
770         loadsegment(fs, __KERNEL_PERCPU);
771 #endif
772 }
773
774 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
775
776 static void get_model_name(struct cpuinfo_x86 *c)
777 {
778         unsigned int *v;
779         char *p, *q, *s;
780
781         if (c->extended_cpuid_level < 0x80000004)
782                 return;
783
784         v = (unsigned int *)c->x86_model_id;
785         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
786         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
787         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
788         c->x86_model_id[48] = 0;
789
790         /* Trim whitespace */
791         p = q = s = &c->x86_model_id[0];
792
793         while (*p == ' ')
794                 p++;
795
796         while (*p) {
797                 /* Note the last non-whitespace index */
798                 if (!isspace(*p))
799                         s = q;
800
801                 *q++ = *p++;
802         }
803
804         *(s + 1) = '\0';
805 }
806
807 void detect_num_cpu_cores(struct cpuinfo_x86 *c)
808 {
809         unsigned int eax, ebx, ecx, edx;
810
811         c->x86_max_cores = 1;
812         if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
813                 return;
814
815         cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
816         if (eax & 0x1f)
817                 c->x86_max_cores = (eax >> 26) + 1;
818 }
819
820 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
821 {
822         unsigned int n, dummy, ebx, ecx, edx, l2size;
823
824         n = c->extended_cpuid_level;
825
826         if (n >= 0x80000005) {
827                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
828                 c->x86_cache_size = (ecx>>24) + (edx>>24);
829 #ifdef CONFIG_X86_64
830                 /* On K8 L1 TLB is inclusive, so don't count it */
831                 c->x86_tlbsize = 0;
832 #endif
833         }
834
835         if (n < 0x80000006)     /* Some chips just has a large L1. */
836                 return;
837
838         cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
839         l2size = ecx >> 16;
840
841 #ifdef CONFIG_X86_64
842         c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
843 #else
844         /* do processor-specific cache resizing */
845         if (this_cpu->legacy_cache_size)
846                 l2size = this_cpu->legacy_cache_size(c, l2size);
847
848         /* Allow user to override all this if necessary. */
849         if (cachesize_override != -1)
850                 l2size = cachesize_override;
851
852         if (l2size == 0)
853                 return;         /* Again, no L2 cache is possible */
854 #endif
855
856         c->x86_cache_size = l2size;
857 }
858
859 u16 __read_mostly tlb_lli_4k[NR_INFO];
860 u16 __read_mostly tlb_lli_2m[NR_INFO];
861 u16 __read_mostly tlb_lli_4m[NR_INFO];
862 u16 __read_mostly tlb_lld_4k[NR_INFO];
863 u16 __read_mostly tlb_lld_2m[NR_INFO];
864 u16 __read_mostly tlb_lld_4m[NR_INFO];
865 u16 __read_mostly tlb_lld_1g[NR_INFO];
866
867 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
868 {
869         if (this_cpu->c_detect_tlb)
870                 this_cpu->c_detect_tlb(c);
871
872         pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
873                 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
874                 tlb_lli_4m[ENTRIES]);
875
876         pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
877                 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
878                 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
879 }
880
881 int detect_ht_early(struct cpuinfo_x86 *c)
882 {
883 #ifdef CONFIG_SMP
884         u32 eax, ebx, ecx, edx;
885
886         if (!cpu_has(c, X86_FEATURE_HT))
887                 return -1;
888
889         if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
890                 return -1;
891
892         if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
893                 return -1;
894
895         cpuid(1, &eax, &ebx, &ecx, &edx);
896
897         smp_num_siblings = (ebx & 0xff0000) >> 16;
898         if (smp_num_siblings == 1)
899                 pr_info_once("CPU0: Hyper-Threading is disabled\n");
900 #endif
901         return 0;
902 }
903
904 void detect_ht(struct cpuinfo_x86 *c)
905 {
906 #ifdef CONFIG_SMP
907         int index_msb, core_bits;
908
909         if (detect_ht_early(c) < 0)
910                 return;
911
912         index_msb = get_count_order(smp_num_siblings);
913         c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
914
915         smp_num_siblings = smp_num_siblings / c->x86_max_cores;
916
917         index_msb = get_count_order(smp_num_siblings);
918
919         core_bits = get_count_order(c->x86_max_cores);
920
921         c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
922                                        ((1 << core_bits) - 1);
923 #endif
924 }
925
926 static void get_cpu_vendor(struct cpuinfo_x86 *c)
927 {
928         char *v = c->x86_vendor_id;
929         int i;
930
931         for (i = 0; i < X86_VENDOR_NUM; i++) {
932                 if (!cpu_devs[i])
933                         break;
934
935                 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
936                     (cpu_devs[i]->c_ident[1] &&
937                      !strcmp(v, cpu_devs[i]->c_ident[1]))) {
938
939                         this_cpu = cpu_devs[i];
940                         c->x86_vendor = this_cpu->c_x86_vendor;
941                         return;
942                 }
943         }
944
945         pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
946                     "CPU: Your system may be unstable.\n", v);
947
948         c->x86_vendor = X86_VENDOR_UNKNOWN;
949         this_cpu = &default_cpu;
950 }
951
952 void cpu_detect(struct cpuinfo_x86 *c)
953 {
954         /* Get vendor name */
955         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
956               (unsigned int *)&c->x86_vendor_id[0],
957               (unsigned int *)&c->x86_vendor_id[8],
958               (unsigned int *)&c->x86_vendor_id[4]);
959
960         c->x86 = 4;
961         /* Intel-defined flags: level 0x00000001 */
962         if (c->cpuid_level >= 0x00000001) {
963                 u32 junk, tfms, cap0, misc;
964
965                 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
966                 c->x86          = x86_family(tfms);
967                 c->x86_model    = x86_model(tfms);
968                 c->x86_stepping = x86_stepping(tfms);
969
970                 if (cap0 & (1<<19)) {
971                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
972                         c->x86_cache_alignment = c->x86_clflush_size;
973                 }
974         }
975 }
976
977 static void apply_forced_caps(struct cpuinfo_x86 *c)
978 {
979         int i;
980
981         for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
982                 c->x86_capability[i] &= ~cpu_caps_cleared[i];
983                 c->x86_capability[i] |= cpu_caps_set[i];
984         }
985 }
986
987 static void init_speculation_control(struct cpuinfo_x86 *c)
988 {
989         /*
990          * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
991          * and they also have a different bit for STIBP support. Also,
992          * a hypervisor might have set the individual AMD bits even on
993          * Intel CPUs, for finer-grained selection of what's available.
994          */
995         if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
996                 set_cpu_cap(c, X86_FEATURE_IBRS);
997                 set_cpu_cap(c, X86_FEATURE_IBPB);
998                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
999         }
1000
1001         if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
1002                 set_cpu_cap(c, X86_FEATURE_STIBP);
1003
1004         if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
1005             cpu_has(c, X86_FEATURE_VIRT_SSBD))
1006                 set_cpu_cap(c, X86_FEATURE_SSBD);
1007
1008         if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
1009                 set_cpu_cap(c, X86_FEATURE_IBRS);
1010                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1011         }
1012
1013         if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1014                 set_cpu_cap(c, X86_FEATURE_IBPB);
1015
1016         if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1017                 set_cpu_cap(c, X86_FEATURE_STIBP);
1018                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1019         }
1020
1021         if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1022                 set_cpu_cap(c, X86_FEATURE_SSBD);
1023                 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1024                 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1025         }
1026 }
1027
1028 void get_cpu_cap(struct cpuinfo_x86 *c)
1029 {
1030         u32 eax, ebx, ecx, edx;
1031
1032         /* Intel-defined flags: level 0x00000001 */
1033         if (c->cpuid_level >= 0x00000001) {
1034                 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1035
1036                 c->x86_capability[CPUID_1_ECX] = ecx;
1037                 c->x86_capability[CPUID_1_EDX] = edx;
1038         }
1039
1040         /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1041         if (c->cpuid_level >= 0x00000006)
1042                 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1043
1044         /* Additional Intel-defined flags: level 0x00000007 */
1045         if (c->cpuid_level >= 0x00000007) {
1046                 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1047                 c->x86_capability[CPUID_7_0_EBX] = ebx;
1048                 c->x86_capability[CPUID_7_ECX] = ecx;
1049                 c->x86_capability[CPUID_7_EDX] = edx;
1050
1051                 /* Check valid sub-leaf index before accessing it */
1052                 if (eax >= 1) {
1053                         cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1054                         c->x86_capability[CPUID_7_1_EAX] = eax;
1055                 }
1056         }
1057
1058         /* Extended state features: level 0x0000000d */
1059         if (c->cpuid_level >= 0x0000000d) {
1060                 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1061
1062                 c->x86_capability[CPUID_D_1_EAX] = eax;
1063         }
1064
1065         /* AMD-defined flags: level 0x80000001 */
1066         eax = cpuid_eax(0x80000000);
1067         c->extended_cpuid_level = eax;
1068
1069         if ((eax & 0xffff0000) == 0x80000000) {
1070                 if (eax >= 0x80000001) {
1071                         cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1072
1073                         c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1074                         c->x86_capability[CPUID_8000_0001_EDX] = edx;
1075                 }
1076         }
1077
1078         if (c->extended_cpuid_level >= 0x80000007) {
1079                 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1080
1081                 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1082                 c->x86_power = edx;
1083         }
1084
1085         if (c->extended_cpuid_level >= 0x80000008) {
1086                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1087                 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1088         }
1089
1090         if (c->extended_cpuid_level >= 0x8000000a)
1091                 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1092
1093         if (c->extended_cpuid_level >= 0x8000001f)
1094                 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1095
1096         init_scattered_cpuid_features(c);
1097         init_speculation_control(c);
1098
1099         /*
1100          * Clear/Set all flags overridden by options, after probe.
1101          * This needs to happen each time we re-probe, which may happen
1102          * several times during CPU initialization.
1103          */
1104         apply_forced_caps(c);
1105 }
1106
1107 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1108 {
1109         u32 eax, ebx, ecx, edx;
1110
1111         if (c->extended_cpuid_level >= 0x80000008) {
1112                 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1113
1114                 c->x86_virt_bits = (eax >> 8) & 0xff;
1115                 c->x86_phys_bits = eax & 0xff;
1116         }
1117 #ifdef CONFIG_X86_32
1118         else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1119                 c->x86_phys_bits = 36;
1120 #endif
1121         c->x86_cache_bits = c->x86_phys_bits;
1122 }
1123
1124 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1125 {
1126 #ifdef CONFIG_X86_32
1127         int i;
1128
1129         /*
1130          * First of all, decide if this is a 486 or higher
1131          * It's a 486 if we can modify the AC flag
1132          */
1133         if (flag_is_changeable_p(X86_EFLAGS_AC))
1134                 c->x86 = 4;
1135         else
1136                 c->x86 = 3;
1137
1138         for (i = 0; i < X86_VENDOR_NUM; i++)
1139                 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1140                         c->x86_vendor_id[0] = 0;
1141                         cpu_devs[i]->c_identify(c);
1142                         if (c->x86_vendor_id[0]) {
1143                                 get_cpu_vendor(c);
1144                                 break;
1145                         }
1146                 }
1147 #endif
1148 }
1149
1150 #define NO_SPECULATION          BIT(0)
1151 #define NO_MELTDOWN             BIT(1)
1152 #define NO_SSB                  BIT(2)
1153 #define NO_L1TF                 BIT(3)
1154 #define NO_MDS                  BIT(4)
1155 #define MSBDS_ONLY              BIT(5)
1156 #define NO_SWAPGS               BIT(6)
1157 #define NO_ITLB_MULTIHIT        BIT(7)
1158 #define NO_SPECTRE_V2           BIT(8)
1159 #define NO_MMIO                 BIT(9)
1160 #define NO_EIBRS_PBRSB          BIT(10)
1161
1162 #define VULNWL(vendor, family, model, whitelist)        \
1163         X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1164
1165 #define VULNWL_INTEL(model, whitelist)          \
1166         VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1167
1168 #define VULNWL_AMD(family, whitelist)           \
1169         VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1170
1171 #define VULNWL_HYGON(family, whitelist)         \
1172         VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1173
1174 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1175         VULNWL(ANY,     4, X86_MODEL_ANY,       NO_SPECULATION),
1176         VULNWL(CENTAUR, 5, X86_MODEL_ANY,       NO_SPECULATION),
1177         VULNWL(INTEL,   5, X86_MODEL_ANY,       NO_SPECULATION),
1178         VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
1179         VULNWL(VORTEX,  5, X86_MODEL_ANY,       NO_SPECULATION),
1180         VULNWL(VORTEX,  6, X86_MODEL_ANY,       NO_SPECULATION),
1181
1182         /* Intel Family 6 */
1183         VULNWL_INTEL(TIGERLAKE,                 NO_MMIO),
1184         VULNWL_INTEL(TIGERLAKE_L,               NO_MMIO),
1185         VULNWL_INTEL(ALDERLAKE,                 NO_MMIO),
1186         VULNWL_INTEL(ALDERLAKE_L,               NO_MMIO),
1187
1188         VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
1189         VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
1190         VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
1191         VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
1192         VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
1193
1194         VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1195         VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1196         VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1197         VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1198         VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1199         VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1200
1201         VULNWL_INTEL(CORE_YONAH,                NO_SSB),
1202
1203         VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204         VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1205
1206         VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1207         VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1208         VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1209
1210         /*
1211          * Technically, swapgs isn't serializing on AMD (despite it previously
1212          * being documented as such in the APM).  But according to AMD, %gs is
1213          * updated non-speculatively, and the issuing of %gs-relative memory
1214          * operands will be blocked until the %gs update completes, which is
1215          * good enough for our purposes.
1216          */
1217
1218         VULNWL_INTEL(ATOM_TREMONT,              NO_EIBRS_PBRSB),
1219         VULNWL_INTEL(ATOM_TREMONT_L,            NO_EIBRS_PBRSB),
1220         VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1221
1222         /* AMD Family 0xf - 0x12 */
1223         VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1224         VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1225         VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1226         VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1227
1228         /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1229         VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1230         VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1231
1232         /* Zhaoxin Family 7 */
1233         VULNWL(CENTAUR, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1234         VULNWL(ZHAOXIN, 7, X86_MODEL_ANY,       NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1235         {}
1236 };
1237
1238 #define VULNBL(vendor, family, model, blacklist)        \
1239         X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1240
1241 #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                   \
1242         X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
1243                                             INTEL_FAM6_##model, steppings, \
1244                                             X86_FEATURE_ANY, issues)
1245
1246 #define VULNBL_AMD(family, blacklist)           \
1247         VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1248
1249 #define VULNBL_HYGON(family, blacklist)         \
1250         VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1251
1252 #define SRBDS           BIT(0)
1253 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1254 #define MMIO            BIT(1)
1255 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1256 #define MMIO_SBDS       BIT(2)
1257 /* CPU is affected by RETbleed, speculating where you would not expect it */
1258 #define RETBLEED        BIT(3)
1259 /* CPU is affected by SMT (cross-thread) return predictions */
1260 #define SMT_RSB         BIT(4)
1261
1262 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1263         VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
1264         VULNBL_INTEL_STEPPINGS(HASWELL,         X86_STEPPING_ANY,               SRBDS),
1265         VULNBL_INTEL_STEPPINGS(HASWELL_L,       X86_STEPPING_ANY,               SRBDS),
1266         VULNBL_INTEL_STEPPINGS(HASWELL_G,       X86_STEPPING_ANY,               SRBDS),
1267         VULNBL_INTEL_STEPPINGS(HASWELL_X,       X86_STEPPING_ANY,               MMIO),
1268         VULNBL_INTEL_STEPPINGS(BROADWELL_D,     X86_STEPPING_ANY,               MMIO),
1269         VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
1270         VULNBL_INTEL_STEPPINGS(BROADWELL_X,     X86_STEPPING_ANY,               MMIO),
1271         VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
1272         VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
1273         VULNBL_INTEL_STEPPINGS(SKYLAKE_X,       X86_STEPPING_ANY,               MMIO | RETBLEED),
1274         VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
1275         VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
1276         VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPING_ANY,               SRBDS | MMIO | RETBLEED),
1277         VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,    X86_STEPPING_ANY,               RETBLEED),
1278         VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
1279         VULNBL_INTEL_STEPPINGS(ICELAKE_D,       X86_STEPPING_ANY,               MMIO),
1280         VULNBL_INTEL_STEPPINGS(ICELAKE_X,       X86_STEPPING_ANY,               MMIO),
1281         VULNBL_INTEL_STEPPINGS(COMETLAKE,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
1282         VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED),
1283         VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
1284         VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
1285         VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPING_ANY,               MMIO | RETBLEED),
1286         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
1287         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
1288         VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPING_ANY,               MMIO | MMIO_SBDS),
1289
1290         VULNBL_AMD(0x15, RETBLEED),
1291         VULNBL_AMD(0x16, RETBLEED),
1292         VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
1293         VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
1294         {}
1295 };
1296
1297 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1298 {
1299         const struct x86_cpu_id *m = x86_match_cpu(table);
1300
1301         return m && !!(m->driver_data & which);
1302 }
1303
1304 u64 x86_read_arch_cap_msr(void)
1305 {
1306         u64 ia32_cap = 0;
1307
1308         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1309                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1310
1311         return ia32_cap;
1312 }
1313
1314 static bool arch_cap_mmio_immune(u64 ia32_cap)
1315 {
1316         return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1317                 ia32_cap & ARCH_CAP_PSDP_NO &&
1318                 ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
1319 }
1320
1321 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1322 {
1323         u64 ia32_cap = x86_read_arch_cap_msr();
1324
1325         /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1326         if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1327             !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1328                 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1329
1330         if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1331                 return;
1332
1333         setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1334
1335         if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1336                 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1337
1338         if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1339             !(ia32_cap & ARCH_CAP_SSB_NO) &&
1340            !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1341                 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1342
1343         if (ia32_cap & ARCH_CAP_IBRS_ALL)
1344                 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1345
1346         if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1347             !(ia32_cap & ARCH_CAP_MDS_NO)) {
1348                 setup_force_cpu_bug(X86_BUG_MDS);
1349                 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1350                         setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1351         }
1352
1353         if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1354                 setup_force_cpu_bug(X86_BUG_SWAPGS);
1355
1356         /*
1357          * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1358          *      - TSX is supported or
1359          *      - TSX_CTRL is present
1360          *
1361          * TSX_CTRL check is needed for cases when TSX could be disabled before
1362          * the kernel boot e.g. kexec.
1363          * TSX_CTRL check alone is not sufficient for cases when the microcode
1364          * update is not present or running as guest that don't get TSX_CTRL.
1365          */
1366         if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1367             (cpu_has(c, X86_FEATURE_RTM) ||
1368              (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1369                 setup_force_cpu_bug(X86_BUG_TAA);
1370
1371         /*
1372          * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1373          * in the vulnerability blacklist.
1374          *
1375          * Some of the implications and mitigation of Shared Buffers Data
1376          * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1377          * SRBDS.
1378          */
1379         if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1380              cpu_has(c, X86_FEATURE_RDSEED)) &&
1381             cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1382                     setup_force_cpu_bug(X86_BUG_SRBDS);
1383
1384         /*
1385          * Processor MMIO Stale Data bug enumeration
1386          *
1387          * Affected CPU list is generally enough to enumerate the vulnerability,
1388          * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1389          * not want the guest to enumerate the bug.
1390          *
1391          * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
1392          * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
1393          */
1394         if (!arch_cap_mmio_immune(ia32_cap)) {
1395                 if (cpu_matches(cpu_vuln_blacklist, MMIO))
1396                         setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1397                 else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
1398                         setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
1399         }
1400
1401         if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1402                 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1403                         setup_force_cpu_bug(X86_BUG_RETBLEED);
1404         }
1405
1406         if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
1407             !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1408             !(ia32_cap & ARCH_CAP_PBRSB_NO))
1409                 setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1410
1411         if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1412                 setup_force_cpu_bug(X86_BUG_SMT_RSB);
1413
1414         if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1415                 return;
1416
1417         /* Rogue Data Cache Load? No! */
1418         if (ia32_cap & ARCH_CAP_RDCL_NO)
1419                 return;
1420
1421         setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1422
1423         if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1424                 return;
1425
1426         setup_force_cpu_bug(X86_BUG_L1TF);
1427 }
1428
1429 /*
1430  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1431  * unfortunately, that's not true in practice because of early VIA
1432  * chips and (more importantly) broken virtualizers that are not easy
1433  * to detect. In the latter case it doesn't even *fail* reliably, so
1434  * probing for it doesn't even work. Disable it completely on 32-bit
1435  * unless we can find a reliable way to detect all the broken cases.
1436  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1437  */
1438 static void detect_nopl(void)
1439 {
1440 #ifdef CONFIG_X86_32
1441         setup_clear_cpu_cap(X86_FEATURE_NOPL);
1442 #else
1443         setup_force_cpu_cap(X86_FEATURE_NOPL);
1444 #endif
1445 }
1446
1447 /*
1448  * We parse cpu parameters early because fpu__init_system() is executed
1449  * before parse_early_param().
1450  */
1451 static void __init cpu_parse_early_param(void)
1452 {
1453         char arg[128];
1454         char *argptr = arg, *opt;
1455         int arglen, taint = 0;
1456
1457 #ifdef CONFIG_X86_32
1458         if (cmdline_find_option_bool(boot_command_line, "no387"))
1459 #ifdef CONFIG_MATH_EMULATION
1460                 setup_clear_cpu_cap(X86_FEATURE_FPU);
1461 #else
1462                 pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1463 #endif
1464
1465         if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1466                 setup_clear_cpu_cap(X86_FEATURE_FXSR);
1467 #endif
1468
1469         if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1470                 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1471
1472         if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1473                 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1474
1475         if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1476                 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1477
1478         arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1479         if (arglen <= 0)
1480                 return;
1481
1482         pr_info("Clearing CPUID bits:");
1483
1484         while (argptr) {
1485                 bool found __maybe_unused = false;
1486                 unsigned int bit;
1487
1488                 opt = strsep(&argptr, ",");
1489
1490                 /*
1491                  * Handle naked numbers first for feature flags which don't
1492                  * have names.
1493                  */
1494                 if (!kstrtouint(opt, 10, &bit)) {
1495                         if (bit < NCAPINTS * 32) {
1496
1497 #ifdef CONFIG_X86_FEATURE_NAMES
1498                                 /* empty-string, i.e., ""-defined feature flags */
1499                                 if (!x86_cap_flags[bit])
1500                                         pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1501                                 else
1502 #endif
1503                                         pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1504
1505                                 setup_clear_cpu_cap(bit);
1506                                 taint++;
1507                         }
1508                         /*
1509                          * The assumption is that there are no feature names with only
1510                          * numbers in the name thus go to the next argument.
1511                          */
1512                         continue;
1513                 }
1514
1515 #ifdef CONFIG_X86_FEATURE_NAMES
1516                 for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1517                         if (!x86_cap_flag(bit))
1518                                 continue;
1519
1520                         if (strcmp(x86_cap_flag(bit), opt))
1521                                 continue;
1522
1523                         pr_cont(" %s", opt);
1524                         setup_clear_cpu_cap(bit);
1525                         taint++;
1526                         found = true;
1527                         break;
1528                 }
1529
1530                 if (!found)
1531                         pr_cont(" (unknown: %s)", opt);
1532 #endif
1533         }
1534         pr_cont("\n");
1535
1536         if (taint)
1537                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1538 }
1539
1540 /*
1541  * Do minimum CPU detection early.
1542  * Fields really needed: vendor, cpuid_level, family, model, mask,
1543  * cache alignment.
1544  * The others are not touched to avoid unwanted side effects.
1545  *
1546  * WARNING: this function is only called on the boot CPU.  Don't add code
1547  * here that is supposed to run on all CPUs.
1548  */
1549 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1550 {
1551 #ifdef CONFIG_X86_64
1552         c->x86_clflush_size = 64;
1553         c->x86_phys_bits = 36;
1554         c->x86_virt_bits = 48;
1555 #else
1556         c->x86_clflush_size = 32;
1557         c->x86_phys_bits = 32;
1558         c->x86_virt_bits = 32;
1559 #endif
1560         c->x86_cache_alignment = c->x86_clflush_size;
1561
1562         memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1563         c->extended_cpuid_level = 0;
1564
1565         if (!have_cpuid_p())
1566                 identify_cpu_without_cpuid(c);
1567
1568         /* cyrix could have cpuid enabled via c_identify()*/
1569         if (have_cpuid_p()) {
1570                 cpu_detect(c);
1571                 get_cpu_vendor(c);
1572                 get_cpu_cap(c);
1573                 get_cpu_address_sizes(c);
1574                 setup_force_cpu_cap(X86_FEATURE_CPUID);
1575                 cpu_parse_early_param();
1576
1577                 if (this_cpu->c_early_init)
1578                         this_cpu->c_early_init(c);
1579
1580                 c->cpu_index = 0;
1581                 filter_cpuid_features(c, false);
1582
1583                 if (this_cpu->c_bsp_init)
1584                         this_cpu->c_bsp_init(c);
1585         } else {
1586                 setup_clear_cpu_cap(X86_FEATURE_CPUID);
1587         }
1588
1589         setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1590
1591         cpu_set_bug_bits(c);
1592
1593         sld_setup(c);
1594
1595         fpu__init_system(c);
1596
1597         init_sigframe_size();
1598
1599 #ifdef CONFIG_X86_32
1600         /*
1601          * Regardless of whether PCID is enumerated, the SDM says
1602          * that it can't be enabled in 32-bit mode.
1603          */
1604         setup_clear_cpu_cap(X86_FEATURE_PCID);
1605 #endif
1606
1607         /*
1608          * Later in the boot process pgtable_l5_enabled() relies on
1609          * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1610          * enabled by this point we need to clear the feature bit to avoid
1611          * false-positives at the later stage.
1612          *
1613          * pgtable_l5_enabled() can be false here for several reasons:
1614          *  - 5-level paging is disabled compile-time;
1615          *  - it's 32-bit kernel;
1616          *  - machine doesn't support 5-level paging;
1617          *  - user specified 'no5lvl' in kernel command line.
1618          */
1619         if (!pgtable_l5_enabled())
1620                 setup_clear_cpu_cap(X86_FEATURE_LA57);
1621
1622         detect_nopl();
1623 }
1624
1625 void __init early_cpu_init(void)
1626 {
1627         const struct cpu_dev *const *cdev;
1628         int count = 0;
1629
1630 #ifdef CONFIG_PROCESSOR_SELECT
1631         pr_info("KERNEL supported cpus:\n");
1632 #endif
1633
1634         for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1635                 const struct cpu_dev *cpudev = *cdev;
1636
1637                 if (count >= X86_VENDOR_NUM)
1638                         break;
1639                 cpu_devs[count] = cpudev;
1640                 count++;
1641
1642 #ifdef CONFIG_PROCESSOR_SELECT
1643                 {
1644                         unsigned int j;
1645
1646                         for (j = 0; j < 2; j++) {
1647                                 if (!cpudev->c_ident[j])
1648                                         continue;
1649                                 pr_info("  %s %s\n", cpudev->c_vendor,
1650                                         cpudev->c_ident[j]);
1651                         }
1652                 }
1653 #endif
1654         }
1655         early_identify_cpu(&boot_cpu_data);
1656 }
1657
1658 static bool detect_null_seg_behavior(void)
1659 {
1660         /*
1661          * Empirically, writing zero to a segment selector on AMD does
1662          * not clear the base, whereas writing zero to a segment
1663          * selector on Intel does clear the base.  Intel's behavior
1664          * allows slightly faster context switches in the common case
1665          * where GS is unused by the prev and next threads.
1666          *
1667          * Since neither vendor documents this anywhere that I can see,
1668          * detect it directly instead of hard-coding the choice by
1669          * vendor.
1670          *
1671          * I've designated AMD's behavior as the "bug" because it's
1672          * counterintuitive and less friendly.
1673          */
1674
1675         unsigned long old_base, tmp;
1676         rdmsrl(MSR_FS_BASE, old_base);
1677         wrmsrl(MSR_FS_BASE, 1);
1678         loadsegment(fs, 0);
1679         rdmsrl(MSR_FS_BASE, tmp);
1680         wrmsrl(MSR_FS_BASE, old_base);
1681         return tmp == 0;
1682 }
1683
1684 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1685 {
1686         /* BUG_NULL_SEG is only relevant with 64bit userspace */
1687         if (!IS_ENABLED(CONFIG_X86_64))
1688                 return;
1689
1690         /* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */
1691         if (c->extended_cpuid_level >= 0x80000021 &&
1692             cpuid_eax(0x80000021) & BIT(6))
1693                 return;
1694
1695         /*
1696          * CPUID bit above wasn't set. If this kernel is still running
1697          * as a HV guest, then the HV has decided not to advertize
1698          * that CPUID bit for whatever reason.  For example, one
1699          * member of the migration pool might be vulnerable.  Which
1700          * means, the bug is present: set the BUG flag and return.
1701          */
1702         if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1703                 set_cpu_bug(c, X86_BUG_NULL_SEG);
1704                 return;
1705         }
1706
1707         /*
1708          * Zen2 CPUs also have this behaviour, but no CPUID bit.
1709          * 0x18 is the respective family for Hygon.
1710          */
1711         if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1712             detect_null_seg_behavior())
1713                 return;
1714
1715         /* All the remaining ones are affected */
1716         set_cpu_bug(c, X86_BUG_NULL_SEG);
1717 }
1718
1719 static void generic_identify(struct cpuinfo_x86 *c)
1720 {
1721         c->extended_cpuid_level = 0;
1722
1723         if (!have_cpuid_p())
1724                 identify_cpu_without_cpuid(c);
1725
1726         /* cyrix could have cpuid enabled via c_identify()*/
1727         if (!have_cpuid_p())
1728                 return;
1729
1730         cpu_detect(c);
1731
1732         get_cpu_vendor(c);
1733
1734         get_cpu_cap(c);
1735
1736         get_cpu_address_sizes(c);
1737
1738         if (c->cpuid_level >= 0x00000001) {
1739                 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1740 #ifdef CONFIG_X86_32
1741 # ifdef CONFIG_SMP
1742                 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1743 # else
1744                 c->apicid = c->initial_apicid;
1745 # endif
1746 #endif
1747                 c->phys_proc_id = c->initial_apicid;
1748         }
1749
1750         get_model_name(c); /* Default name */
1751
1752         /*
1753          * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1754          * systems that run Linux at CPL > 0 may or may not have the
1755          * issue, but, even if they have the issue, there's absolutely
1756          * nothing we can do about it because we can't use the real IRET
1757          * instruction.
1758          *
1759          * NB: For the time being, only 32-bit kernels support
1760          * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1761          * whether to apply espfix using paravirt hooks.  If any
1762          * non-paravirt system ever shows up that does *not* have the
1763          * ESPFIX issue, we can change this.
1764          */
1765 #ifdef CONFIG_X86_32
1766         set_cpu_bug(c, X86_BUG_ESPFIX);
1767 #endif
1768 }
1769
1770 /*
1771  * Validate that ACPI/mptables have the same information about the
1772  * effective APIC id and update the package map.
1773  */
1774 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1775 {
1776 #ifdef CONFIG_SMP
1777         unsigned int apicid, cpu = smp_processor_id();
1778
1779         apicid = apic->cpu_present_to_apicid(cpu);
1780
1781         if (apicid != c->apicid) {
1782                 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1783                        cpu, apicid, c->initial_apicid);
1784         }
1785         BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1786         BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
1787 #else
1788         c->logical_proc_id = 0;
1789 #endif
1790 }
1791
1792 /*
1793  * This does the hard work of actually picking apart the CPU stuff...
1794  */
1795 static void identify_cpu(struct cpuinfo_x86 *c)
1796 {
1797         int i;
1798
1799         c->loops_per_jiffy = loops_per_jiffy;
1800         c->x86_cache_size = 0;
1801         c->x86_vendor = X86_VENDOR_UNKNOWN;
1802         c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
1803         c->x86_vendor_id[0] = '\0'; /* Unset */
1804         c->x86_model_id[0] = '\0';  /* Unset */
1805         c->x86_max_cores = 1;
1806         c->x86_coreid_bits = 0;
1807         c->cu_id = 0xff;
1808 #ifdef CONFIG_X86_64
1809         c->x86_clflush_size = 64;
1810         c->x86_phys_bits = 36;
1811         c->x86_virt_bits = 48;
1812 #else
1813         c->cpuid_level = -1;    /* CPUID not detected */
1814         c->x86_clflush_size = 32;
1815         c->x86_phys_bits = 32;
1816         c->x86_virt_bits = 32;
1817 #endif
1818         c->x86_cache_alignment = c->x86_clflush_size;
1819         memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1820 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
1821         memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1822 #endif
1823
1824         generic_identify(c);
1825
1826         if (this_cpu->c_identify)
1827                 this_cpu->c_identify(c);
1828
1829         /* Clear/Set all flags overridden by options, after probe */
1830         apply_forced_caps(c);
1831
1832 #ifdef CONFIG_X86_64
1833         c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1834 #endif
1835
1836         /*
1837          * Vendor-specific initialization.  In this section we
1838          * canonicalize the feature flags, meaning if there are
1839          * features a certain CPU supports which CPUID doesn't
1840          * tell us, CPUID claiming incorrect flags, or other bugs,
1841          * we handle them here.
1842          *
1843          * At the end of this section, c->x86_capability better
1844          * indicate the features this CPU genuinely supports!
1845          */
1846         if (this_cpu->c_init)
1847                 this_cpu->c_init(c);
1848
1849         /* Disable the PN if appropriate */
1850         squash_the_stupid_serial_number(c);
1851
1852         /* Set up SMEP/SMAP/UMIP */
1853         setup_smep(c);
1854         setup_smap(c);
1855         setup_umip(c);
1856
1857         /* Enable FSGSBASE instructions if available. */
1858         if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1859                 cr4_set_bits(X86_CR4_FSGSBASE);
1860                 elf_hwcap2 |= HWCAP2_FSGSBASE;
1861         }
1862
1863         /*
1864          * The vendor-specific functions might have changed features.
1865          * Now we do "generic changes."
1866          */
1867
1868         /* Filter out anything that depends on CPUID levels we don't have */
1869         filter_cpuid_features(c, true);
1870
1871         /* If the model name is still unset, do table lookup. */
1872         if (!c->x86_model_id[0]) {
1873                 const char *p;
1874                 p = table_lookup_model(c);
1875                 if (p)
1876                         strcpy(c->x86_model_id, p);
1877                 else
1878                         /* Last resort... */
1879                         sprintf(c->x86_model_id, "%02x/%02x",
1880                                 c->x86, c->x86_model);
1881         }
1882
1883 #ifdef CONFIG_X86_64
1884         detect_ht(c);
1885 #endif
1886
1887         x86_init_rdrand(c);
1888         setup_pku(c);
1889         setup_cet(c);
1890
1891         /*
1892          * Clear/Set all flags overridden by options, need do it
1893          * before following smp all cpus cap AND.
1894          */
1895         apply_forced_caps(c);
1896
1897         /*
1898          * On SMP, boot_cpu_data holds the common feature set between
1899          * all CPUs; so make sure that we indicate which features are
1900          * common between the CPUs.  The first time this routine gets
1901          * executed, c == &boot_cpu_data.
1902          */
1903         if (c != &boot_cpu_data) {
1904                 /* AND the already accumulated flags with these */
1905                 for (i = 0; i < NCAPINTS; i++)
1906                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1907
1908                 /* OR, i.e. replicate the bug flags */
1909                 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1910                         c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1911         }
1912
1913         ppin_init(c);
1914
1915         /* Init Machine Check Exception if available. */
1916         mcheck_cpu_init(c);
1917
1918         select_idle_routine(c);
1919
1920 #ifdef CONFIG_NUMA
1921         numa_add_cpu(smp_processor_id());
1922 #endif
1923 }
1924
1925 /*
1926  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1927  * on 32-bit kernels:
1928  */
1929 #ifdef CONFIG_X86_32
1930 void enable_sep_cpu(void)
1931 {
1932         struct tss_struct *tss;
1933         int cpu;
1934
1935         if (!boot_cpu_has(X86_FEATURE_SEP))
1936                 return;
1937
1938         cpu = get_cpu();
1939         tss = &per_cpu(cpu_tss_rw, cpu);
1940
1941         /*
1942          * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1943          * see the big comment in struct x86_hw_tss's definition.
1944          */
1945
1946         tss->x86_tss.ss1 = __KERNEL_CS;
1947         wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1948         wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1949         wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1950
1951         put_cpu();
1952 }
1953 #endif
1954
1955 void __init identify_boot_cpu(void)
1956 {
1957         identify_cpu(&boot_cpu_data);
1958         if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1959                 pr_info("CET detected: Indirect Branch Tracking enabled\n");
1960 #ifdef CONFIG_X86_32
1961         sysenter_setup();
1962         enable_sep_cpu();
1963 #endif
1964         cpu_detect_tlb(&boot_cpu_data);
1965         setup_cr_pinning();
1966
1967         tsx_init();
1968 }
1969
1970 void identify_secondary_cpu(struct cpuinfo_x86 *c)
1971 {
1972         BUG_ON(c == &boot_cpu_data);
1973         identify_cpu(c);
1974 #ifdef CONFIG_X86_32
1975         enable_sep_cpu();
1976 #endif
1977         validate_apic_and_package_id(c);
1978         x86_spec_ctrl_setup_ap();
1979         update_srbds_msr();
1980
1981         tsx_ap_init();
1982 }
1983
1984 void print_cpu_info(struct cpuinfo_x86 *c)
1985 {
1986         const char *vendor = NULL;
1987
1988         if (c->x86_vendor < X86_VENDOR_NUM) {
1989                 vendor = this_cpu->c_vendor;
1990         } else {
1991                 if (c->cpuid_level >= 0)
1992                         vendor = c->x86_vendor_id;
1993         }
1994
1995         if (vendor && !strstr(c->x86_model_id, vendor))
1996                 pr_cont("%s ", vendor);
1997
1998         if (c->x86_model_id[0])
1999                 pr_cont("%s", c->x86_model_id);
2000         else
2001                 pr_cont("%d86", c->x86);
2002
2003         pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2004
2005         if (c->x86_stepping || c->cpuid_level >= 0)
2006                 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2007         else
2008                 pr_cont(")\n");
2009 }
2010
2011 /*
2012  * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
2013  * function prevents it from becoming an environment variable for init.
2014  */
2015 static __init int setup_clearcpuid(char *arg)
2016 {
2017         return 1;
2018 }
2019 __setup("clearcpuid=", setup_clearcpuid);
2020
2021 DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
2022         .current_task   = &init_task,
2023         .preempt_count  = INIT_PREEMPT_COUNT,
2024         .top_of_stack   = TOP_OF_INIT_STACK,
2025 };
2026 EXPORT_PER_CPU_SYMBOL(pcpu_hot);
2027
2028 #ifdef CONFIG_X86_64
2029 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
2030                      fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
2031 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
2032
2033 static void wrmsrl_cstar(unsigned long val)
2034 {
2035         /*
2036          * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2037          * is so far ignored by the CPU, but raises a #VE trap in a TDX
2038          * guest. Avoid the pointless write on all Intel CPUs.
2039          */
2040         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2041                 wrmsrl(MSR_CSTAR, val);
2042 }
2043
2044 /* May not be marked __init: used by software suspend */
2045 void syscall_init(void)
2046 {
2047         wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2048         wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2049
2050 #ifdef CONFIG_IA32_EMULATION
2051         wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
2052         /*
2053          * This only works on Intel CPUs.
2054          * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2055          * This does not cause SYSENTER to jump to the wrong location, because
2056          * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2057          */
2058         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2059         wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
2060                     (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2061         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2062 #else
2063         wrmsrl_cstar((unsigned long)ignore_sysret);
2064         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2065         wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2066         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2067 #endif
2068
2069         /*
2070          * Flags to clear on syscall; clear as much as possible
2071          * to minimize user space-kernel interference.
2072          */
2073         wrmsrl(MSR_SYSCALL_MASK,
2074                X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2075                X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2076                X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2077                X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2078                X86_EFLAGS_AC|X86_EFLAGS_ID);
2079 }
2080
2081 #else   /* CONFIG_X86_64 */
2082
2083 #ifdef CONFIG_STACKPROTECTOR
2084 DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
2085 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2086 #endif
2087
2088 #endif  /* CONFIG_X86_64 */
2089
2090 /*
2091  * Clear all 6 debug registers:
2092  */
2093 static void clear_all_debug_regs(void)
2094 {
2095         int i;
2096
2097         for (i = 0; i < 8; i++) {
2098                 /* Ignore db4, db5 */
2099                 if ((i == 4) || (i == 5))
2100                         continue;
2101
2102                 set_debugreg(0, i);
2103         }
2104 }
2105
2106 #ifdef CONFIG_KGDB
2107 /*
2108  * Restore debug regs if using kgdbwait and you have a kernel debugger
2109  * connection established.
2110  */
2111 static void dbg_restore_debug_regs(void)
2112 {
2113         if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2114                 arch_kgdb_ops.correct_hw_break();
2115 }
2116 #else /* ! CONFIG_KGDB */
2117 #define dbg_restore_debug_regs()
2118 #endif /* ! CONFIG_KGDB */
2119
2120 static void wait_for_master_cpu(int cpu)
2121 {
2122 #ifdef CONFIG_SMP
2123         /*
2124          * wait for ACK from master CPU before continuing
2125          * with AP initialization
2126          */
2127         WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
2128         while (!cpumask_test_cpu(cpu, cpu_callout_mask))
2129                 cpu_relax();
2130 #endif
2131 }
2132
2133 #ifdef CONFIG_X86_64
2134 static inline void setup_getcpu(int cpu)
2135 {
2136         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2137         struct desc_struct d = { };
2138
2139         if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2140                 wrmsr(MSR_TSC_AUX, cpudata, 0);
2141
2142         /* Store CPU and node number in limit. */
2143         d.limit0 = cpudata;
2144         d.limit1 = cpudata >> 16;
2145
2146         d.type = 5;             /* RO data, expand down, accessed */
2147         d.dpl = 3;              /* Visible to user code */
2148         d.s = 1;                /* Not a system segment */
2149         d.p = 1;                /* Present */
2150         d.d = 1;                /* 32-bit */
2151
2152         write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2153 }
2154
2155 static inline void ucode_cpu_init(int cpu)
2156 {
2157         if (cpu)
2158                 load_ucode_ap();
2159 }
2160
2161 static inline void tss_setup_ist(struct tss_struct *tss)
2162 {
2163         /* Set up the per-CPU TSS IST stacks */
2164         tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2165         tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2166         tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2167         tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2168         /* Only mapped when SEV-ES is active */
2169         tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2170 }
2171
2172 #else /* CONFIG_X86_64 */
2173
2174 static inline void setup_getcpu(int cpu) { }
2175
2176 static inline void ucode_cpu_init(int cpu)
2177 {
2178         show_ucode_info_early();
2179 }
2180
2181 static inline void tss_setup_ist(struct tss_struct *tss) { }
2182
2183 #endif /* !CONFIG_X86_64 */
2184
2185 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2186 {
2187         tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2188
2189 #ifdef CONFIG_X86_IOPL_IOPERM
2190         tss->io_bitmap.prev_max = 0;
2191         tss->io_bitmap.prev_sequence = 0;
2192         memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2193         /*
2194          * Invalidate the extra array entry past the end of the all
2195          * permission bitmap as required by the hardware.
2196          */
2197         tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2198 #endif
2199 }
2200
2201 /*
2202  * Setup everything needed to handle exceptions from the IDT, including the IST
2203  * exceptions which use paranoid_entry().
2204  */
2205 void cpu_init_exception_handling(void)
2206 {
2207         struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2208         int cpu = raw_smp_processor_id();
2209
2210         /* paranoid_entry() gets the CPU number from the GDT */
2211         setup_getcpu(cpu);
2212
2213         /* IST vectors need TSS to be set up. */
2214         tss_setup_ist(tss);
2215         tss_setup_io_bitmap(tss);
2216         set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2217
2218         load_TR_desc();
2219
2220         /* GHCB needs to be setup to handle #VC. */
2221         setup_ghcb();
2222
2223         /* Finally load the IDT */
2224         load_current_idt();
2225 }
2226
2227 /*
2228  * cpu_init() initializes state that is per-CPU. Some data is already
2229  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2230  * reload it nevertheless, this function acts as a 'CPU state barrier',
2231  * nothing should get across.
2232  */
2233 void cpu_init(void)
2234 {
2235         struct task_struct *cur = current;
2236         int cpu = raw_smp_processor_id();
2237
2238         wait_for_master_cpu(cpu);
2239
2240         ucode_cpu_init(cpu);
2241
2242 #ifdef CONFIG_NUMA
2243         if (this_cpu_read(numa_node) == 0 &&
2244             early_cpu_to_node(cpu) != NUMA_NO_NODE)
2245                 set_numa_node(early_cpu_to_node(cpu));
2246 #endif
2247         pr_debug("Initializing CPU#%d\n", cpu);
2248
2249         if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2250             boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2251                 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2252
2253         if (IS_ENABLED(CONFIG_X86_64)) {
2254                 loadsegment(fs, 0);
2255                 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2256                 syscall_init();
2257
2258                 wrmsrl(MSR_FS_BASE, 0);
2259                 wrmsrl(MSR_KERNEL_GS_BASE, 0);
2260                 barrier();
2261
2262                 x2apic_setup();
2263         }
2264
2265         mmgrab(&init_mm);
2266         cur->active_mm = &init_mm;
2267         BUG_ON(cur->mm);
2268         initialize_tlbstate_and_flush();
2269         enter_lazy_tlb(&init_mm, cur);
2270
2271         /*
2272          * sp0 points to the entry trampoline stack regardless of what task
2273          * is running.
2274          */
2275         load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2276
2277         load_mm_ldt(&init_mm);
2278
2279         clear_all_debug_regs();
2280         dbg_restore_debug_regs();
2281
2282         doublefault_init_cpu_tss();
2283
2284         fpu__init_cpu();
2285
2286         if (is_uv_system())
2287                 uv_cpu_init();
2288
2289         load_fixmap_gdt(cpu);
2290 }
2291
2292 #ifdef CONFIG_SMP
2293 void cpu_init_secondary(void)
2294 {
2295         /*
2296          * Relies on the BP having set-up the IDT tables, which are loaded
2297          * on this CPU in cpu_init_exception_handling().
2298          */
2299         cpu_init_exception_handling();
2300         cpu_init();
2301 }
2302 #endif
2303
2304 #ifdef CONFIG_MICROCODE_LATE_LOADING
2305 /**
2306  * store_cpu_caps() - Store a snapshot of CPU capabilities
2307  * @curr_info: Pointer where to store it
2308  *
2309  * Returns: None
2310  */
2311 void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2312 {
2313         /* Reload CPUID max function as it might've changed. */
2314         curr_info->cpuid_level = cpuid_eax(0);
2315
2316         /* Copy all capability leafs and pick up the synthetic ones. */
2317         memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2318                sizeof(curr_info->x86_capability));
2319
2320         /* Get the hardware CPUID leafs */
2321         get_cpu_cap(curr_info);
2322 }
2323
2324 /**
2325  * microcode_check() - Check if any CPU capabilities changed after an update.
2326  * @prev_info:  CPU capabilities stored before an update.
2327  *
2328  * The microcode loader calls this upon late microcode load to recheck features,
2329  * only when microcode has been updated. Caller holds microcode_mutex and CPU
2330  * hotplug lock.
2331  *
2332  * Return: None
2333  */
2334 void microcode_check(struct cpuinfo_x86 *prev_info)
2335 {
2336         struct cpuinfo_x86 curr_info;
2337
2338         perf_check_microcode();
2339
2340         store_cpu_caps(&curr_info);
2341
2342         if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2343                     sizeof(prev_info->x86_capability)))
2344                 return;
2345
2346         pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2347         pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2348 }
2349 #endif
2350
2351 /*
2352  * Invoked from core CPU hotplug code after hotplug operations
2353  */
2354 void arch_smt_update(void)
2355 {
2356         /* Handle the speculative execution misfeatures */
2357         cpu_bugs_smt_update();
2358         /* Check whether IPI broadcasting can be enabled */
2359         apic_smt_update();
2360 }
This page took 0.175353 seconds and 4 git commands to generate.