1 // SPDX-License-Identifier: GPL-2.0
3 * Routines to identify caches on Intel CPU.
6 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
11 #include <linux/slab.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/cpu.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/sched.h>
16 #include <linux/capability.h>
17 #include <linux/sysfs.h>
18 #include <linux/pci.h>
19 #include <linux/stop_machine.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cacheinfo.h>
23 #include <asm/amd_nb.h>
26 #include <asm/tlbflush.h>
36 /* Shared last level cache maps */
37 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
39 /* Shared L2 cache maps */
40 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
42 /* Kernel controls MTRR and/or PAT MSRs. */
43 unsigned int memory_caching_control __ro_after_init;
46 unsigned char descriptor;
51 #define MB(x) ((x) * 1024)
53 /* All the cache descriptor types we care about (no TLB or
54 trace cache entries) */
56 static const struct _cache_table cache_table[] =
58 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
59 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
60 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
61 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
62 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
63 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
64 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
65 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
66 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
69 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
70 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
71 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
72 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
74 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
75 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
76 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
77 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
79 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
80 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
81 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
82 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
83 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
84 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
85 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
86 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
87 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
88 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
89 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
90 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
91 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
92 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
93 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
94 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
95 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
96 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
97 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
98 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
99 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
100 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
101 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
102 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
103 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
104 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
105 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
106 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
107 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
108 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
109 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
110 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
111 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
112 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
113 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
114 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
115 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
116 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
117 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
118 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
119 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
120 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
121 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
122 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
123 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
124 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
125 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
126 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
127 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
128 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
129 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
141 union _cpuid4_leaf_eax {
143 enum _cache_type type:5;
144 unsigned int level:3;
145 unsigned int is_self_initializing:1;
146 unsigned int is_fully_associative:1;
147 unsigned int reserved:4;
148 unsigned int num_threads_sharing:12;
149 unsigned int num_cores_on_die:6;
154 union _cpuid4_leaf_ebx {
156 unsigned int coherency_line_size:12;
157 unsigned int physical_line_partition:10;
158 unsigned int ways_of_associativity:10;
163 union _cpuid4_leaf_ecx {
165 unsigned int number_of_sets:32;
170 struct _cpuid4_info_regs {
171 union _cpuid4_leaf_eax eax;
172 union _cpuid4_leaf_ebx ebx;
173 union _cpuid4_leaf_ecx ecx;
176 struct amd_northbridge *nb;
179 static unsigned short num_cache_leaves;
181 /* AMD doesn't have CPUID4. Emulate it here to report the same
182 information to the user. This makes some assumptions about the machine:
183 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
185 In theory the TLBs could be reported as fake type (they are in "dummy").
189 unsigned line_size:8;
190 unsigned lines_per_tag:8;
192 unsigned size_in_kb:8;
199 unsigned line_size:8;
200 unsigned lines_per_tag:4;
202 unsigned size_in_kb:16;
209 unsigned line_size:8;
210 unsigned lines_per_tag:4;
213 unsigned size_encoded:14;
218 static const unsigned short assocs[] = {
229 [0xf] = 0xffff /* fully associative - no way to show this currently */
232 static const unsigned char levels[] = { 1, 1, 2, 3 };
233 static const unsigned char types[] = { 1, 2, 3, 3 };
235 static const enum cache_type cache_type_map[] = {
236 [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
237 [CTYPE_DATA] = CACHE_TYPE_DATA,
238 [CTYPE_INST] = CACHE_TYPE_INST,
239 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
243 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
244 union _cpuid4_leaf_ebx *ebx,
245 union _cpuid4_leaf_ecx *ecx)
248 unsigned line_size, lines_per_tag, assoc, size_in_kb;
249 union l1_cache l1i, l1d;
252 union l1_cache *l1 = &l1d;
258 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
259 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
268 assoc = assocs[l1->assoc];
269 line_size = l1->line_size;
270 lines_per_tag = l1->lines_per_tag;
271 size_in_kb = l1->size_in_kb;
276 assoc = assocs[l2.assoc];
277 line_size = l2.line_size;
278 lines_per_tag = l2.lines_per_tag;
279 /* cpu_data has errata corrections for K7 applied */
280 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
285 assoc = assocs[l3.assoc];
286 line_size = l3.line_size;
287 lines_per_tag = l3.lines_per_tag;
288 size_in_kb = l3.size_encoded * 512;
289 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
290 size_in_kb = size_in_kb >> 1;
298 eax->split.is_self_initializing = 1;
299 eax->split.type = types[leaf];
300 eax->split.level = levels[leaf];
301 eax->split.num_threads_sharing = 0;
302 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
306 eax->split.is_fully_associative = 1;
307 ebx->split.coherency_line_size = line_size - 1;
308 ebx->split.ways_of_associativity = assoc - 1;
309 ebx->split.physical_line_partition = lines_per_tag - 1;
310 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
311 (ebx->split.ways_of_associativity + 1) - 1;
314 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
317 * L3 cache descriptors
319 static void amd_calc_l3_indices(struct amd_northbridge *nb)
321 struct amd_l3_cache *l3 = &nb->l3_cache;
322 unsigned int sc0, sc1, sc2, sc3;
325 pci_read_config_dword(nb->misc, 0x1C4, &val);
327 /* calculate subcache sizes */
328 l3->subcaches[0] = sc0 = !(val & BIT(0));
329 l3->subcaches[1] = sc1 = !(val & BIT(4));
331 if (boot_cpu_data.x86 == 0x15) {
332 l3->subcaches[0] = sc0 += !(val & BIT(1));
333 l3->subcaches[1] = sc1 += !(val & BIT(5));
336 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
337 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
339 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
347 * @returns: the disabled index if used or negative value if slot free.
349 static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
351 unsigned int reg = 0;
353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
362 static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
366 struct amd_northbridge *nb = this_leaf->priv;
368 index = amd_get_l3_disable_slot(nb, slot);
370 return sprintf(buf, "%d\n", index);
372 return sprintf(buf, "FREE\n");
375 #define SHOW_CACHE_DISABLE(slot) \
377 cache_disable_##slot##_show(struct device *dev, \
378 struct device_attribute *attr, char *buf) \
380 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
381 return show_cache_disable(this_leaf, buf, slot); \
383 SHOW_CACHE_DISABLE(0)
384 SHOW_CACHE_DISABLE(1)
386 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
387 unsigned slot, unsigned long idx)
394 * disable index in all 4 subcaches
396 for (i = 0; i < 4; i++) {
397 u32 reg = idx | (i << 20);
399 if (!nb->l3_cache.subcaches[i])
402 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
405 * We need to WBINVD on a core on the node containing the L3
406 * cache which indices we disable therefore a simple wbinvd()
412 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
417 * disable a L3 cache index by using a disable-slot
419 * @l3: L3 cache descriptor
420 * @cpu: A CPU on the node containing the L3 cache
421 * @slot: slot number (0..1)
422 * @index: index to disable
424 * @return: 0 on success, error status on failure
426 static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
427 unsigned slot, unsigned long index)
431 /* check if @slot is already used or the index is already disabled */
432 ret = amd_get_l3_disable_slot(nb, slot);
436 if (index > nb->l3_cache.indices)
439 /* check whether the other slot has disabled the same index already */
440 if (index == amd_get_l3_disable_slot(nb, !slot))
443 amd_l3_disable_index(nb, cpu, slot, index);
448 static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
449 const char *buf, size_t count,
452 unsigned long val = 0;
454 struct amd_northbridge *nb = this_leaf->priv;
456 if (!capable(CAP_SYS_ADMIN))
459 cpu = cpumask_first(&this_leaf->shared_cpu_map);
461 if (kstrtoul(buf, 10, &val) < 0)
464 err = amd_set_l3_disable_slot(nb, cpu, slot, val);
467 pr_warn("L3 slot %d in use/index already disabled!\n",
474 #define STORE_CACHE_DISABLE(slot) \
476 cache_disable_##slot##_store(struct device *dev, \
477 struct device_attribute *attr, \
478 const char *buf, size_t count) \
480 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
481 return store_cache_disable(this_leaf, buf, count, slot); \
483 STORE_CACHE_DISABLE(0)
484 STORE_CACHE_DISABLE(1)
486 static ssize_t subcaches_show(struct device *dev,
487 struct device_attribute *attr, char *buf)
489 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
490 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
492 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
495 static ssize_t subcaches_store(struct device *dev,
496 struct device_attribute *attr,
497 const char *buf, size_t count)
499 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
500 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
503 if (!capable(CAP_SYS_ADMIN))
506 if (kstrtoul(buf, 16, &val) < 0)
509 if (amd_set_subcaches(cpu, val))
515 static DEVICE_ATTR_RW(cache_disable_0);
516 static DEVICE_ATTR_RW(cache_disable_1);
517 static DEVICE_ATTR_RW(subcaches);
520 cache_private_attrs_is_visible(struct kobject *kobj,
521 struct attribute *attr, int unused)
523 struct device *dev = kobj_to_dev(kobj);
524 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
525 umode_t mode = attr->mode;
527 if (!this_leaf->priv)
530 if ((attr == &dev_attr_subcaches.attr) &&
531 amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
534 if ((attr == &dev_attr_cache_disable_0.attr ||
535 attr == &dev_attr_cache_disable_1.attr) &&
536 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
542 static struct attribute_group cache_private_group = {
543 .is_visible = cache_private_attrs_is_visible,
546 static void init_amd_l3_attrs(void)
549 static struct attribute **amd_l3_attrs;
551 if (amd_l3_attrs) /* already initialized */
554 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
556 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
559 amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
564 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
565 amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
566 amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
568 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
569 amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
571 cache_private_group.attrs = amd_l3_attrs;
574 const struct attribute_group *
575 cache_get_priv_group(struct cacheinfo *this_leaf)
577 struct amd_northbridge *nb = this_leaf->priv;
579 if (this_leaf->level < 3 || !nb)
582 if (nb && nb->l3_cache.indices)
585 return &cache_private_group;
588 static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
592 /* only for L3, and not in virtualized environments */
596 node = topology_die_id(smp_processor_id());
597 this_leaf->nb = node_to_amd_nb(node);
598 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
599 amd_calc_l3_indices(this_leaf->nb);
602 #define amd_init_l3_cache(x, y)
603 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
606 cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
608 union _cpuid4_leaf_eax eax;
609 union _cpuid4_leaf_ebx ebx;
610 union _cpuid4_leaf_ecx ecx;
613 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
614 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
615 cpuid_count(0x8000001d, index, &eax.full,
616 &ebx.full, &ecx.full, &edx);
618 amd_cpuid4(index, &eax, &ebx, &ecx);
619 amd_init_l3_cache(this_leaf, index);
620 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
621 cpuid_count(0x8000001d, index, &eax.full,
622 &ebx.full, &ecx.full, &edx);
623 amd_init_l3_cache(this_leaf, index);
625 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
628 if (eax.split.type == CTYPE_NULL)
629 return -EIO; /* better error ? */
631 this_leaf->eax = eax;
632 this_leaf->ebx = ebx;
633 this_leaf->ecx = ecx;
634 this_leaf->size = (ecx.split.number_of_sets + 1) *
635 (ebx.split.coherency_line_size + 1) *
636 (ebx.split.physical_line_partition + 1) *
637 (ebx.split.ways_of_associativity + 1);
641 static int find_num_cache_leaves(struct cpuinfo_x86 *c)
643 unsigned int eax, ebx, ecx, edx, op;
644 union _cpuid4_leaf_eax cache_eax;
647 if (c->x86_vendor == X86_VENDOR_AMD ||
648 c->x86_vendor == X86_VENDOR_HYGON)
655 /* Do cpuid(op) loop to find out num_cache_leaves */
656 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
657 cache_eax.full = eax;
658 } while (cache_eax.split.type != CTYPE_NULL);
662 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
665 * We may have multiple LLCs if L3 caches exist, so check if we
666 * have an L3 cache by looking at the L3 cache CPUID leaf.
668 if (!cpuid_edx(0x80000006))
672 /* LLC is at the node level. */
673 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
674 } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
676 * LLC is at the core complex level.
677 * Core complex ID is ApicId[3] for these processors.
679 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
682 * LLC ID is calculated from the number of threads sharing the
685 u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
686 u32 llc_index = find_num_cache_leaves(c) - 1;
688 cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
690 num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
692 if (num_sharing_cache) {
693 int bits = get_count_order(num_sharing_cache);
695 per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
700 void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
703 * We may have multiple LLCs if L3 caches exist, so check if we
704 * have an L3 cache by looking at the L3 cache CPUID leaf.
706 if (!cpuid_edx(0x80000006))
710 * LLC is at the core complex level.
711 * Core complex ID is ApicId[3] for these processors.
713 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
716 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
719 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
720 num_cache_leaves = find_num_cache_leaves(c);
721 } else if (c->extended_cpuid_level >= 0x80000006) {
722 if (cpuid_edx(0x80000006) & 0xf000)
723 num_cache_leaves = 4;
725 num_cache_leaves = 3;
729 void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
731 num_cache_leaves = find_num_cache_leaves(c);
734 void init_intel_cacheinfo(struct cpuinfo_x86 *c)
737 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
738 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
739 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
740 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
742 unsigned int cpu = c->cpu_index;
745 if (c->cpuid_level > 3) {
746 static int is_initialized;
748 if (is_initialized == 0) {
749 /* Init num_cache_leaves from boot CPU */
750 num_cache_leaves = find_num_cache_leaves(c);
755 * Whenever possible use cpuid(4), deterministic cache
756 * parameters cpuid leaf to find the cache details
758 for (i = 0; i < num_cache_leaves; i++) {
759 struct _cpuid4_info_regs this_leaf = {};
762 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
766 switch (this_leaf.eax.split.level) {
768 if (this_leaf.eax.split.type == CTYPE_DATA)
769 new_l1d = this_leaf.size/1024;
770 else if (this_leaf.eax.split.type == CTYPE_INST)
771 new_l1i = this_leaf.size/1024;
774 new_l2 = this_leaf.size/1024;
775 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
776 index_msb = get_count_order(num_threads_sharing);
777 l2_id = c->apicid & ~((1 << index_msb) - 1);
780 new_l3 = this_leaf.size/1024;
781 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
782 index_msb = get_count_order(num_threads_sharing);
783 l3_id = c->apicid & ~((1 << index_msb) - 1);
791 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
794 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
795 /* supports eax=2 call */
797 unsigned int regs[4];
798 unsigned char *dp = (unsigned char *)regs;
801 if (num_cache_leaves != 0 && c->x86 == 15)
804 /* Number of times to iterate */
805 n = cpuid_eax(2) & 0xFF;
807 for (i = 0 ; i < n ; i++) {
808 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
810 /* If bit 31 is set, this is an unknown format */
811 for (j = 0 ; j < 3 ; j++)
812 if (regs[j] & (1 << 31))
815 /* Byte 0 is level count, not a descriptor */
816 for (j = 1 ; j < 16 ; j++) {
817 unsigned char des = dp[j];
820 /* look up this descriptor in the table */
821 while (cache_table[k].descriptor != 0) {
822 if (cache_table[k].descriptor == des) {
823 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
825 switch (cache_table[k].cache_type) {
827 l1i += cache_table[k].size;
830 l1d += cache_table[k].size;
833 l2 += cache_table[k].size;
836 l3 += cache_table[k].size;
839 trace += cache_table[k].size;
861 per_cpu(cpu_llc_id, cpu) = l2_id;
862 per_cpu(cpu_l2c_id, cpu) = l2_id;
869 per_cpu(cpu_llc_id, cpu) = l3_id;
875 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
876 * turns means that the only possibility is SMT (as indicated in
877 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
878 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
881 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
882 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
885 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
888 cpu_detect_cache_sizes(c);
891 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
892 struct _cpuid4_info_regs *base)
894 struct cpu_cacheinfo *this_cpu_ci;
895 struct cacheinfo *this_leaf;
899 * For L3, always use the pre-calculated cpu_llc_shared_mask
900 * to derive shared_cpu_map.
903 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
904 this_cpu_ci = get_cpu_cacheinfo(i);
905 if (!this_cpu_ci->info_list)
907 this_leaf = this_cpu_ci->info_list + index;
908 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
909 if (!cpu_online(sibling))
911 cpumask_set_cpu(sibling,
912 &this_leaf->shared_cpu_map);
915 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
916 unsigned int apicid, nshared, first, last;
918 nshared = base->eax.split.num_threads_sharing + 1;
919 apicid = cpu_data(cpu).apicid;
920 first = apicid - (apicid % nshared);
921 last = first + nshared - 1;
923 for_each_online_cpu(i) {
924 this_cpu_ci = get_cpu_cacheinfo(i);
925 if (!this_cpu_ci->info_list)
928 apicid = cpu_data(i).apicid;
929 if ((apicid < first) || (apicid > last))
932 this_leaf = this_cpu_ci->info_list + index;
934 for_each_online_cpu(sibling) {
935 apicid = cpu_data(sibling).apicid;
936 if ((apicid < first) || (apicid > last))
938 cpumask_set_cpu(sibling,
939 &this_leaf->shared_cpu_map);
948 static void __cache_cpumap_setup(unsigned int cpu, int index,
949 struct _cpuid4_info_regs *base)
951 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
952 struct cacheinfo *this_leaf, *sibling_leaf;
953 unsigned long num_threads_sharing;
955 struct cpuinfo_x86 *c = &cpu_data(cpu);
957 if (c->x86_vendor == X86_VENDOR_AMD ||
958 c->x86_vendor == X86_VENDOR_HYGON) {
959 if (__cache_amd_cpumap_setup(cpu, index, base))
963 this_leaf = this_cpu_ci->info_list + index;
964 num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
966 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
967 if (num_threads_sharing == 1)
970 index_msb = get_count_order(num_threads_sharing);
972 for_each_online_cpu(i)
973 if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
974 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
976 if (i == cpu || !sib_cpu_ci->info_list)
977 continue;/* skip if itself or no cacheinfo */
978 sibling_leaf = sib_cpu_ci->info_list + index;
979 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
980 cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
984 static void ci_leaf_init(struct cacheinfo *this_leaf,
985 struct _cpuid4_info_regs *base)
987 this_leaf->id = base->id;
988 this_leaf->attributes = CACHE_ID;
989 this_leaf->level = base->eax.split.level;
990 this_leaf->type = cache_type_map[base->eax.split.type];
991 this_leaf->coherency_line_size =
992 base->ebx.split.coherency_line_size + 1;
993 this_leaf->ways_of_associativity =
994 base->ebx.split.ways_of_associativity + 1;
995 this_leaf->size = base->size;
996 this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
997 this_leaf->physical_line_partition =
998 base->ebx.split.physical_line_partition + 1;
999 this_leaf->priv = base->nb;
1002 int init_cache_level(unsigned int cpu)
1004 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
1006 if (!num_cache_leaves)
1010 this_cpu_ci->num_levels = 3;
1011 this_cpu_ci->num_leaves = num_cache_leaves;
1016 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
1017 * ECX as cache index. Then right shift apicid by the number's order to get
1018 * cache id for this cache node.
1020 static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
1022 struct cpuinfo_x86 *c = &cpu_data(cpu);
1023 unsigned long num_threads_sharing;
1026 num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
1027 index_msb = get_count_order(num_threads_sharing);
1028 id4_regs->id = c->apicid >> index_msb;
1031 int populate_cache_leaves(unsigned int cpu)
1033 unsigned int idx, ret;
1034 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
1035 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
1036 struct _cpuid4_info_regs id4_regs = {};
1038 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
1039 ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
1042 get_cache_id(cpu, &id4_regs);
1043 ci_leaf_init(this_leaf++, &id4_regs);
1044 __cache_cpumap_setup(cpu, idx, &id4_regs);
1046 this_cpu_ci->cpu_map_populated = true;
1052 * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
1054 * Since we are disabling the cache don't allow any interrupts,
1055 * they would run extremely slow and would only increase the pain.
1057 * The caller must ensure that local interrupts are disabled and
1058 * are reenabled after cache_enable() has been called.
1060 static unsigned long saved_cr4;
1061 static DEFINE_RAW_SPINLOCK(cache_disable_lock);
1063 void cache_disable(void) __acquires(cache_disable_lock)
1068 * Note that this is not ideal
1069 * since the cache is only flushed/disabled for this CPU while the
1070 * MTRRs are changed, but changing this requires more invasive
1071 * changes to the way the kernel boots
1074 raw_spin_lock(&cache_disable_lock);
1076 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
1077 cr0 = read_cr0() | X86_CR0_CD;
1081 * Cache flushing is the most time-consuming step when programming
1082 * the MTRRs. Fortunately, as per the Intel Software Development
1083 * Manual, we can skip it if the processor supports cache self-
1086 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
1089 /* Save value of CR4 and clear Page Global Enable (bit 7) */
1090 if (cpu_feature_enabled(X86_FEATURE_PGE)) {
1091 saved_cr4 = __read_cr4();
1092 __write_cr4(saved_cr4 & ~X86_CR4_PGE);
1095 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
1096 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1099 if (cpu_feature_enabled(X86_FEATURE_MTRR))
1102 /* Again, only flush caches if we have to. */
1103 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
1107 void cache_enable(void) __releases(cache_disable_lock)
1109 /* Flush TLBs (no need to flush caches - they are disabled) */
1110 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1113 if (cpu_feature_enabled(X86_FEATURE_MTRR))
1117 write_cr0(read_cr0() & ~X86_CR0_CD);
1119 /* Restore value of CR4 */
1120 if (cpu_feature_enabled(X86_FEATURE_PGE))
1121 __write_cr4(saved_cr4);
1123 raw_spin_unlock(&cache_disable_lock);
1126 static void cache_cpu_init(void)
1128 unsigned long flags;
1130 local_irq_save(flags);
1133 if (memory_caching_control & CACHE_MTRR)
1134 mtrr_generic_set_state();
1136 if (memory_caching_control & CACHE_PAT)
1140 local_irq_restore(flags);
1143 static bool cache_aps_delayed_init = true;
1145 void set_cache_aps_delayed_init(bool val)
1147 cache_aps_delayed_init = val;
1150 bool get_cache_aps_delayed_init(void)
1152 return cache_aps_delayed_init;
1155 static int cache_rendezvous_handler(void *unused)
1157 if (get_cache_aps_delayed_init() || !cpu_online(smp_processor_id()))
1163 void __init cache_bp_init(void)
1168 if (memory_caching_control)
1172 void cache_bp_restore(void)
1174 if (memory_caching_control)
1178 static int cache_ap_init(unsigned int cpu)
1180 if (!memory_caching_control || get_cache_aps_delayed_init())
1184 * Ideally we should hold mtrr_mutex here to avoid MTRR entries
1185 * changed, but this routine will be called in CPU boot time,
1186 * holding the lock breaks it.
1188 * This routine is called in two cases:
1190 * 1. very early time of software resume, when there absolutely
1191 * isn't MTRR entry changes;
1193 * 2. CPU hotadd time. We let mtrr_add/del_page hold cpuhotplug
1194 * lock to prevent MTRR entry changes
1196 stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL,
1203 * Delayed cache initialization for all AP's
1205 void cache_aps_init(void)
1207 if (!memory_caching_control || !get_cache_aps_delayed_init())
1210 stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask);
1211 set_cache_aps_delayed_init(false);
1214 static int __init cache_ap_register(void)
1216 cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING,
1217 "x86/cachectrl:starting",
1218 cache_ap_init, NULL);
1221 core_initcall(cache_ap_register);