1 // SPDX-License-Identifier: GPL-2.0
3 * cacheinfo support - processor cache information via sysfs
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
29 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
31 return ci_cacheinfo(cpu);
35 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
36 struct cacheinfo *sib_leaf)
38 return sib_leaf->fw_token == this_leaf->fw_token;
41 /* OF properties to query for a given cache type */
42 struct cache_type_info {
43 const char *size_prop;
44 const char *line_size_props[2];
45 const char *nr_sets_prop;
48 static const struct cache_type_info cache_type_info[] = {
50 .size_prop = "cache-size",
51 .line_size_props = { "cache-line-size",
52 "cache-block-size", },
53 .nr_sets_prop = "cache-sets",
55 .size_prop = "i-cache-size",
56 .line_size_props = { "i-cache-line-size",
57 "i-cache-block-size", },
58 .nr_sets_prop = "i-cache-sets",
60 .size_prop = "d-cache-size",
61 .line_size_props = { "d-cache-line-size",
62 "d-cache-block-size", },
63 .nr_sets_prop = "d-cache-sets",
67 static inline int get_cacheinfo_idx(enum cache_type type)
69 if (type == CACHE_TYPE_UNIFIED)
74 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
79 ct_idx = get_cacheinfo_idx(this_leaf->type);
80 propname = cache_type_info[ct_idx].size_prop;
82 of_property_read_u32(np, propname, &this_leaf->size);
85 /* not cache_line_size() because that's a macro in include/linux/cache.h */
86 static void cache_get_line_size(struct cacheinfo *this_leaf,
87 struct device_node *np)
91 ct_idx = get_cacheinfo_idx(this_leaf->type);
92 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
94 for (i = 0; i < lim; i++) {
99 propname = cache_type_info[ct_idx].line_size_props[i];
100 ret = of_property_read_u32(np, propname, &line_size);
102 this_leaf->coherency_line_size = line_size;
108 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
110 const char *propname;
113 ct_idx = get_cacheinfo_idx(this_leaf->type);
114 propname = cache_type_info[ct_idx].nr_sets_prop;
116 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
119 static void cache_associativity(struct cacheinfo *this_leaf)
121 unsigned int line_size = this_leaf->coherency_line_size;
122 unsigned int nr_sets = this_leaf->number_of_sets;
123 unsigned int size = this_leaf->size;
126 * If the cache is fully associative, there is no need to
127 * check the other properties.
129 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
130 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
133 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
134 struct device_node *np)
136 return of_property_read_bool(np, "cache-unified");
139 static void cache_of_set_props(struct cacheinfo *this_leaf,
140 struct device_node *np)
143 * init_cache_level must setup the cache level correctly
144 * overriding the architecturally specified levels, so
145 * if type is NONE at this stage, it should be unified
147 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
148 cache_node_is_unified(this_leaf, np))
149 this_leaf->type = CACHE_TYPE_UNIFIED;
150 cache_size(this_leaf, np);
151 cache_get_line_size(this_leaf, np);
152 cache_nr_sets(this_leaf, np);
153 cache_associativity(this_leaf);
156 static int cache_setup_of_node(unsigned int cpu)
158 struct device_node *np;
159 struct cacheinfo *this_leaf;
160 struct device *cpu_dev = get_cpu_device(cpu);
161 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
162 unsigned int index = 0;
164 /* skip if fw_token is already populated */
165 if (this_cpu_ci->info_list->fw_token) {
170 pr_err("No cpu device for CPU %d\n", cpu);
173 np = cpu_dev->of_node;
175 pr_err("Failed to find cpu%d device node\n", cpu);
179 while (index < cache_leaves(cpu)) {
180 this_leaf = this_cpu_ci->info_list + index;
181 if (this_leaf->level != 1)
182 np = of_find_next_cache_node(np);
184 np = of_node_get(np);/* cpu node itself */
187 cache_of_set_props(this_leaf, np);
188 this_leaf->fw_token = np;
192 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
198 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
199 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
200 struct cacheinfo *sib_leaf)
203 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
204 * shared caches for all other levels. This will be used only if
205 * arch specific code has not populated shared_cpu_map
207 return !(this_leaf->level == 1);
211 int __weak cache_setup_acpi(unsigned int cpu)
216 unsigned int coherency_max_size;
218 static int cache_shared_cpu_map_setup(unsigned int cpu)
220 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
221 struct cacheinfo *this_leaf, *sib_leaf;
225 if (this_cpu_ci->cpu_map_populated)
228 if (of_have_populated_dt())
229 ret = cache_setup_of_node(cpu);
230 else if (!acpi_disabled)
231 ret = cache_setup_acpi(cpu);
236 for (index = 0; index < cache_leaves(cpu); index++) {
239 this_leaf = this_cpu_ci->info_list + index;
240 /* skip if shared_cpu_map is already populated */
241 if (!cpumask_empty(&this_leaf->shared_cpu_map))
244 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
245 for_each_online_cpu(i) {
246 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
248 if (i == cpu || !sib_cpu_ci->info_list)
249 continue;/* skip if itself or no cacheinfo */
250 sib_leaf = sib_cpu_ci->info_list + index;
251 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
252 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
253 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
256 /* record the maximum cache line size */
257 if (this_leaf->coherency_line_size > coherency_max_size)
258 coherency_max_size = this_leaf->coherency_line_size;
264 static void cache_shared_cpu_map_remove(unsigned int cpu)
266 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
267 struct cacheinfo *this_leaf, *sib_leaf;
268 unsigned int sibling, index;
270 for (index = 0; index < cache_leaves(cpu); index++) {
271 this_leaf = this_cpu_ci->info_list + index;
272 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
273 struct cpu_cacheinfo *sib_cpu_ci;
275 if (sibling == cpu) /* skip itself */
278 sib_cpu_ci = get_cpu_cacheinfo(sibling);
279 if (!sib_cpu_ci->info_list)
282 sib_leaf = sib_cpu_ci->info_list + index;
283 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
284 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
286 if (of_have_populated_dt())
287 of_node_put(this_leaf->fw_token);
291 static void free_cache_attributes(unsigned int cpu)
293 if (!per_cpu_cacheinfo(cpu))
296 cache_shared_cpu_map_remove(cpu);
298 kfree(per_cpu_cacheinfo(cpu));
299 per_cpu_cacheinfo(cpu) = NULL;
300 cache_leaves(cpu) = 0;
303 int __weak init_cache_level(unsigned int cpu)
308 int __weak populate_cache_leaves(unsigned int cpu)
313 static int detect_cache_attributes(unsigned int cpu)
317 if (init_cache_level(cpu) || !cache_leaves(cpu))
320 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
321 sizeof(struct cacheinfo), GFP_KERNEL);
322 if (per_cpu_cacheinfo(cpu) == NULL)
326 * populate_cache_leaves() may completely setup the cache leaves and
327 * shared_cpu_map or it may leave it partially setup.
329 ret = populate_cache_leaves(cpu);
333 * For systems using DT for cache hierarchy, fw_token
334 * and shared_cpu_map will be set up here only if they are
335 * not populated already
337 ret = cache_shared_cpu_map_setup(cpu);
339 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
346 free_cache_attributes(cpu);
350 /* pointer to cpuX/cache device */
351 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
352 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
354 static cpumask_t cache_dev_map;
356 /* pointer to array of devices for cpuX/cache/indexY */
357 static DEFINE_PER_CPU(struct device **, ci_index_dev);
358 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
359 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
361 #define show_one(file_name, object) \
362 static ssize_t file_name##_show(struct device *dev, \
363 struct device_attribute *attr, char *buf) \
365 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
366 return sysfs_emit(buf, "%u\n", this_leaf->object); \
370 show_one(level, level);
371 show_one(coherency_line_size, coherency_line_size);
372 show_one(number_of_sets, number_of_sets);
373 show_one(physical_line_partition, physical_line_partition);
374 show_one(ways_of_associativity, ways_of_associativity);
376 static ssize_t size_show(struct device *dev,
377 struct device_attribute *attr, char *buf)
379 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
381 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
384 static ssize_t shared_cpu_map_show(struct device *dev,
385 struct device_attribute *attr, char *buf)
387 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
388 const struct cpumask *mask = &this_leaf->shared_cpu_map;
390 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
393 static ssize_t shared_cpu_list_show(struct device *dev,
394 struct device_attribute *attr, char *buf)
396 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
397 const struct cpumask *mask = &this_leaf->shared_cpu_map;
399 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
402 static ssize_t type_show(struct device *dev,
403 struct device_attribute *attr, char *buf)
405 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
408 switch (this_leaf->type) {
409 case CACHE_TYPE_DATA:
412 case CACHE_TYPE_INST:
413 output = "Instruction";
415 case CACHE_TYPE_UNIFIED:
422 return sysfs_emit(buf, "%s\n", output);
425 static ssize_t allocation_policy_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
429 unsigned int ci_attr = this_leaf->attributes;
432 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
433 output = "ReadWriteAllocate";
434 else if (ci_attr & CACHE_READ_ALLOCATE)
435 output = "ReadAllocate";
436 else if (ci_attr & CACHE_WRITE_ALLOCATE)
437 output = "WriteAllocate";
441 return sysfs_emit(buf, "%s\n", output);
444 static ssize_t write_policy_show(struct device *dev,
445 struct device_attribute *attr, char *buf)
447 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
448 unsigned int ci_attr = this_leaf->attributes;
451 if (ci_attr & CACHE_WRITE_THROUGH)
452 n = sysfs_emit(buf, "WriteThrough\n");
453 else if (ci_attr & CACHE_WRITE_BACK)
454 n = sysfs_emit(buf, "WriteBack\n");
458 static DEVICE_ATTR_RO(id);
459 static DEVICE_ATTR_RO(level);
460 static DEVICE_ATTR_RO(type);
461 static DEVICE_ATTR_RO(coherency_line_size);
462 static DEVICE_ATTR_RO(ways_of_associativity);
463 static DEVICE_ATTR_RO(number_of_sets);
464 static DEVICE_ATTR_RO(size);
465 static DEVICE_ATTR_RO(allocation_policy);
466 static DEVICE_ATTR_RO(write_policy);
467 static DEVICE_ATTR_RO(shared_cpu_map);
468 static DEVICE_ATTR_RO(shared_cpu_list);
469 static DEVICE_ATTR_RO(physical_line_partition);
471 static struct attribute *cache_default_attrs[] = {
474 &dev_attr_level.attr,
475 &dev_attr_shared_cpu_map.attr,
476 &dev_attr_shared_cpu_list.attr,
477 &dev_attr_coherency_line_size.attr,
478 &dev_attr_ways_of_associativity.attr,
479 &dev_attr_number_of_sets.attr,
481 &dev_attr_allocation_policy.attr,
482 &dev_attr_write_policy.attr,
483 &dev_attr_physical_line_partition.attr,
488 cache_default_attrs_is_visible(struct kobject *kobj,
489 struct attribute *attr, int unused)
491 struct device *dev = kobj_to_dev(kobj);
492 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
493 const struct cpumask *mask = &this_leaf->shared_cpu_map;
494 umode_t mode = attr->mode;
496 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
498 if ((attr == &dev_attr_type.attr) && this_leaf->type)
500 if ((attr == &dev_attr_level.attr) && this_leaf->level)
502 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
504 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
506 if ((attr == &dev_attr_coherency_line_size.attr) &&
507 this_leaf->coherency_line_size)
509 if ((attr == &dev_attr_ways_of_associativity.attr) &&
510 this_leaf->size) /* allow 0 = full associativity */
512 if ((attr == &dev_attr_number_of_sets.attr) &&
513 this_leaf->number_of_sets)
515 if ((attr == &dev_attr_size.attr) && this_leaf->size)
517 if ((attr == &dev_attr_write_policy.attr) &&
518 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
520 if ((attr == &dev_attr_allocation_policy.attr) &&
521 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
523 if ((attr == &dev_attr_physical_line_partition.attr) &&
524 this_leaf->physical_line_partition)
530 static const struct attribute_group cache_default_group = {
531 .attrs = cache_default_attrs,
532 .is_visible = cache_default_attrs_is_visible,
535 static const struct attribute_group *cache_default_groups[] = {
536 &cache_default_group,
540 static const struct attribute_group *cache_private_groups[] = {
541 &cache_default_group,
542 NULL, /* Place holder for private group */
546 const struct attribute_group *
547 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
552 static const struct attribute_group **
553 cache_get_attribute_groups(struct cacheinfo *this_leaf)
555 const struct attribute_group *priv_group =
556 cache_get_priv_group(this_leaf);
559 return cache_default_groups;
561 if (!cache_private_groups[1])
562 cache_private_groups[1] = priv_group;
564 return cache_private_groups;
567 /* Add/Remove cache interface for CPU device */
568 static void cpu_cache_sysfs_exit(unsigned int cpu)
571 struct device *ci_dev;
573 if (per_cpu_index_dev(cpu)) {
574 for (i = 0; i < cache_leaves(cpu); i++) {
575 ci_dev = per_cache_index_dev(cpu, i);
578 device_unregister(ci_dev);
580 kfree(per_cpu_index_dev(cpu));
581 per_cpu_index_dev(cpu) = NULL;
583 device_unregister(per_cpu_cache_dev(cpu));
584 per_cpu_cache_dev(cpu) = NULL;
587 static int cpu_cache_sysfs_init(unsigned int cpu)
589 struct device *dev = get_cpu_device(cpu);
591 if (per_cpu_cacheinfo(cpu) == NULL)
594 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
595 if (IS_ERR(per_cpu_cache_dev(cpu)))
596 return PTR_ERR(per_cpu_cache_dev(cpu));
598 /* Allocate all required memory */
599 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
600 sizeof(struct device *), GFP_KERNEL);
601 if (unlikely(per_cpu_index_dev(cpu) == NULL))
607 cpu_cache_sysfs_exit(cpu);
611 static int cache_add_dev(unsigned int cpu)
615 struct device *ci_dev, *parent;
616 struct cacheinfo *this_leaf;
617 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
618 const struct attribute_group **cache_groups;
620 rc = cpu_cache_sysfs_init(cpu);
621 if (unlikely(rc < 0))
624 parent = per_cpu_cache_dev(cpu);
625 for (i = 0; i < cache_leaves(cpu); i++) {
626 this_leaf = this_cpu_ci->info_list + i;
627 if (this_leaf->disable_sysfs)
629 if (this_leaf->type == CACHE_TYPE_NOCACHE)
631 cache_groups = cache_get_attribute_groups(this_leaf);
632 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
634 if (IS_ERR(ci_dev)) {
635 rc = PTR_ERR(ci_dev);
638 per_cache_index_dev(cpu, i) = ci_dev;
640 cpumask_set_cpu(cpu, &cache_dev_map);
644 cpu_cache_sysfs_exit(cpu);
648 static int cacheinfo_cpu_online(unsigned int cpu)
650 int rc = detect_cache_attributes(cpu);
654 rc = cache_add_dev(cpu);
656 free_cache_attributes(cpu);
660 static int cacheinfo_cpu_pre_down(unsigned int cpu)
662 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
663 cpu_cache_sysfs_exit(cpu);
665 free_cache_attributes(cpu);
669 static int __init cacheinfo_sysfs_init(void)
671 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
672 "base/cacheinfo:online",
673 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
675 device_initcall(cacheinfo_sysfs_init);