2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
5 * Copyright 2008 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/kernel.h>
16 #include <linux/kobject.h>
17 #include <linux/list.h>
18 #include <linux/notifier.h>
20 #include <linux/percpu.h>
21 #include <linux/slab.h>
23 #include <asm/cputhreads.h>
26 #include "cacheinfo.h"
28 /* per-cpu object for tracking:
29 * - a "cache" kobject for the top-level directory
30 * - a list of "index" objects representing the cpu's local cache hierarchy
33 struct kobject *kobj; /* bare (not embedded) kobject for cache
35 struct cache_index_dir *index; /* list of index objects */
38 /* "index" object: each cpu's cache directory has an index
39 * subdirectory corresponding to a cache object associated with the
40 * cpu. This object's lifetime is managed via the embedded kobject.
42 struct cache_index_dir {
44 struct cache_index_dir *next; /* next index in parent directory */
48 /* Template for determining which OF properties to query for a given
50 struct cache_type_info {
52 const char *size_prop;
54 /* Allow for both [di]-cache-line-size and
55 * [di]-cache-block-size properties. According to the PowerPC
56 * Processor binding, -line-size should be provided if it
57 * differs from the cache block size (that which is operated
58 * on by cache instructions), so we look for -line-size first.
59 * See cache_get_line_size(). */
61 const char *line_size_props[2];
62 const char *nr_sets_prop;
65 /* These are used to index the cache_type_info array. */
66 #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
67 #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
68 #define CACHE_TYPE_INSTRUCTION 2
69 #define CACHE_TYPE_DATA 3
71 static const struct cache_type_info cache_type_info[] = {
73 /* Embedded systems that use cache-size, cache-block-size,
74 * etc. for the Unified (typically L2) cache. */
76 .size_prop = "cache-size",
77 .line_size_props = { "cache-line-size",
78 "cache-block-size", },
79 .nr_sets_prop = "cache-sets",
82 /* PowerPC Processor binding says the [di]-cache-*
83 * must be equal on unified caches, so just use
84 * d-cache properties. */
86 .size_prop = "d-cache-size",
87 .line_size_props = { "d-cache-line-size",
88 "d-cache-block-size", },
89 .nr_sets_prop = "d-cache-sets",
92 .name = "Instruction",
93 .size_prop = "i-cache-size",
94 .line_size_props = { "i-cache-line-size",
95 "i-cache-block-size", },
96 .nr_sets_prop = "i-cache-sets",
100 .size_prop = "d-cache-size",
101 .line_size_props = { "d-cache-line-size",
102 "d-cache-block-size", },
103 .nr_sets_prop = "d-cache-sets",
107 /* Cache object: each instance of this corresponds to a distinct cache
108 * in the system. There are separate objects for Harvard caches: one
109 * each for instruction and data, and each refers to the same OF node.
110 * The refcount of the OF node is elevated for the lifetime of the
111 * cache object. A cache object is released when its shared_cpu_map
112 * is cleared (see cache_cpu_clear).
114 * A cache object is on two lists: an unsorted global list
115 * (cache_list) of cache objects; and a singly-linked list
116 * representing the local cache hierarchy, which is ordered by level
117 * (e.g. L1d -> L1i -> L2 -> L3).
120 struct device_node *ofnode; /* OF node for this cache, may be cpu */
121 struct cpumask shared_cpu_map; /* online CPUs using this cache */
122 int type; /* split cache disambiguation */
123 int level; /* level not explicit in device tree */
124 struct list_head list; /* global list of cache objects */
125 struct cache *next_local; /* next cache of >= level */
128 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
130 /* traversal/modification of this list occurs only at cpu hotplug time;
131 * access is serialized by cpu hotplug locking
133 static LIST_HEAD(cache_list);
135 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
137 return container_of(k, struct cache_index_dir, kobj);
140 static const char *cache_type_string(const struct cache *cache)
142 return cache_type_info[cache->type].name;
145 static void cache_init(struct cache *cache, int type, int level,
146 struct device_node *ofnode)
149 cache->level = level;
150 cache->ofnode = of_node_get(ofnode);
151 INIT_LIST_HEAD(&cache->list);
152 list_add(&cache->list, &cache_list);
155 static struct cache *new_cache(int type, int level, struct device_node *ofnode)
159 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
161 cache_init(cache, type, level, ofnode);
166 static void release_cache_debugcheck(struct cache *cache)
170 list_for_each_entry(iter, &cache_list, list)
171 WARN_ONCE(iter->next_local == cache,
172 "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
174 cache_type_string(iter),
176 cache_type_string(cache));
179 static void release_cache(struct cache *cache)
184 pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
185 cache_type_string(cache), cache->ofnode);
187 release_cache_debugcheck(cache);
188 list_del(&cache->list);
189 of_node_put(cache->ofnode);
193 static void cache_cpu_set(struct cache *cache, int cpu)
195 struct cache *next = cache;
198 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
199 "CPU %i already accounted in %pOF(%s)\n",
201 cache_type_string(next));
202 cpumask_set_cpu(cpu, &next->shared_cpu_map);
203 next = next->next_local;
207 static int cache_size(const struct cache *cache, unsigned int *ret)
209 const char *propname;
210 const __be32 *cache_size;
212 propname = cache_type_info[cache->type].size_prop;
214 cache_size = of_get_property(cache->ofnode, propname, NULL);
218 *ret = of_read_number(cache_size, 1);
222 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
226 if (cache_size(cache, &size))
233 /* not cache_line_size() because that's a macro in include/linux/cache.h */
234 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
236 const __be32 *line_size;
239 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
241 for (i = 0; i < lim; i++) {
242 const char *propname;
244 propname = cache_type_info[cache->type].line_size_props[i];
245 line_size = of_get_property(cache->ofnode, propname, NULL);
253 *ret = of_read_number(line_size, 1);
257 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
259 const char *propname;
260 const __be32 *nr_sets;
262 propname = cache_type_info[cache->type].nr_sets_prop;
264 nr_sets = of_get_property(cache->ofnode, propname, NULL);
268 *ret = of_read_number(nr_sets, 1);
272 static int cache_associativity(const struct cache *cache, unsigned int *ret)
274 unsigned int line_size;
275 unsigned int nr_sets;
278 if (cache_nr_sets(cache, &nr_sets))
281 /* If the cache is fully associative, there is no need to
282 * check the other properties.
289 if (cache_get_line_size(cache, &line_size))
291 if (cache_size(cache, &size))
294 if (!(nr_sets > 0 && size > 0 && line_size > 0))
297 *ret = (size / nr_sets) / line_size;
303 /* helper for dealing with split caches */
304 static struct cache *cache_find_first_sibling(struct cache *cache)
308 if (cache->type == CACHE_TYPE_UNIFIED ||
309 cache->type == CACHE_TYPE_UNIFIED_D)
312 list_for_each_entry(iter, &cache_list, list)
313 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
319 /* return the first cache on a local list matching node */
320 static struct cache *cache_lookup_by_node(const struct device_node *node)
322 struct cache *cache = NULL;
325 list_for_each_entry(iter, &cache_list, list) {
326 if (iter->ofnode != node)
328 cache = cache_find_first_sibling(iter);
335 static bool cache_node_is_unified(const struct device_node *np)
337 return of_get_property(np, "cache-unified", NULL);
341 * Unified caches can have two different sets of tags. Most embedded
342 * use cache-size, etc. for the unified cache size, but open firmware systems
343 * use d-cache-size, etc. Check on initialization for which type we have, and
344 * return the appropriate structure type. Assume it's embedded if it isn't
345 * open firmware. If it's yet a 3rd type, then there will be missing entries
346 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
347 * to be extended further.
349 static int cache_is_unified_d(const struct device_node *np)
351 return of_get_property(np,
352 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
353 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
358 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
360 pr_debug("creating L%d ucache for %pOF\n", level, node);
362 return new_cache(cache_is_unified_d(node), level, node);
365 static struct cache *cache_do_one_devnode_split(struct device_node *node,
368 struct cache *dcache, *icache;
370 pr_debug("creating L%d dcache and icache for %pOF\n", level,
373 dcache = new_cache(CACHE_TYPE_DATA, level, node);
374 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
376 if (!dcache || !icache)
379 dcache->next_local = icache;
383 release_cache(dcache);
384 release_cache(icache);
388 static struct cache *cache_do_one_devnode(struct device_node *node, int level)
392 if (cache_node_is_unified(node))
393 cache = cache_do_one_devnode_unified(node, level);
395 cache = cache_do_one_devnode_split(node, level);
400 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
405 cache = cache_lookup_by_node(node);
407 WARN_ONCE(cache && cache->level != level,
408 "cache level mismatch on lookup (got %d, expected %d)\n",
409 cache->level, level);
412 cache = cache_do_one_devnode(node, level);
417 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
419 while (smaller->next_local) {
420 if (smaller->next_local == bigger)
421 return; /* already linked */
422 smaller = smaller->next_local;
425 smaller->next_local = bigger;
428 static void do_subsidiary_caches_debugcheck(struct cache *cache)
430 WARN_ON_ONCE(cache->level != 1);
431 WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
434 static void do_subsidiary_caches(struct cache *cache)
436 struct device_node *subcache_node;
437 int level = cache->level;
439 do_subsidiary_caches_debugcheck(cache);
441 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
442 struct cache *subcache;
445 subcache = cache_lookup_or_instantiate(subcache_node, level);
446 of_node_put(subcache_node);
450 link_cache_lists(cache, subcache);
455 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
457 struct device_node *cpu_node;
458 struct cache *cpu_cache = NULL;
460 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
462 cpu_node = of_get_cpu_node(cpu_id, NULL);
463 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
467 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
471 do_subsidiary_caches(cpu_cache);
473 cache_cpu_set(cpu_cache, cpu_id);
475 of_node_put(cpu_node);
480 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
482 struct cache_dir *cache_dir;
484 struct kobject *kobj = NULL;
486 dev = get_cpu_device(cpu_id);
487 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
491 kobj = kobject_create_and_add("cache", &dev->kobj);
495 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
499 cache_dir->kobj = kobj;
501 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
503 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
511 static void cache_index_release(struct kobject *kobj)
513 struct cache_index_dir *index;
515 index = kobj_to_cache_index_dir(kobj);
517 pr_debug("freeing index directory for L%d %s cache\n",
518 index->cache->level, cache_type_string(index->cache));
523 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
525 struct kobj_attribute *kobj_attr;
527 kobj_attr = container_of(attr, struct kobj_attribute, attr);
529 return kobj_attr->show(k, kobj_attr, buf);
532 static struct cache *index_kobj_to_cache(struct kobject *k)
534 struct cache_index_dir *index;
536 index = kobj_to_cache_index_dir(k);
541 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
543 unsigned int size_kb;
546 cache = index_kobj_to_cache(k);
548 if (cache_size_kb(cache, &size_kb))
551 return sprintf(buf, "%uK\n", size_kb);
554 static struct kobj_attribute cache_size_attr =
555 __ATTR(size, 0444, size_show, NULL);
558 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
560 unsigned int line_size;
563 cache = index_kobj_to_cache(k);
565 if (cache_get_line_size(cache, &line_size))
568 return sprintf(buf, "%u\n", line_size);
571 static struct kobj_attribute cache_line_size_attr =
572 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
574 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
576 unsigned int nr_sets;
579 cache = index_kobj_to_cache(k);
581 if (cache_nr_sets(cache, &nr_sets))
584 return sprintf(buf, "%u\n", nr_sets);
587 static struct kobj_attribute cache_nr_sets_attr =
588 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
590 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
592 unsigned int associativity;
595 cache = index_kobj_to_cache(k);
597 if (cache_associativity(cache, &associativity))
600 return sprintf(buf, "%u\n", associativity);
603 static struct kobj_attribute cache_assoc_attr =
604 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
606 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
610 cache = index_kobj_to_cache(k);
612 return sprintf(buf, "%s\n", cache_type_string(cache));
615 static struct kobj_attribute cache_type_attr =
616 __ATTR(type, 0444, type_show, NULL);
618 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
620 struct cache_index_dir *index;
623 index = kobj_to_cache_index_dir(k);
624 cache = index->cache;
626 return sprintf(buf, "%d\n", cache->level);
629 static struct kobj_attribute cache_level_attr =
630 __ATTR(level, 0444, level_show, NULL);
632 static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
634 struct kobject *index_dir_kobj = &index->kobj;
635 struct kobject *cache_dir_kobj = index_dir_kobj->parent;
636 struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
637 struct device *dev = kobj_to_dev(cpu_dev_kobj);
643 * On big-core systems, each core has two groups of CPUs each of which
644 * has its own L1-cache. The thread-siblings which share l1-cache with
645 * @cpu can be obtained via cpu_smallcore_mask().
647 static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
649 if (cache->level == 1)
650 return cpu_smallcore_mask(cpu);
652 return &cache->shared_cpu_map;
655 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
657 struct cache_index_dir *index;
659 const struct cpumask *mask;
662 index = kobj_to_cache_index_dir(k);
663 cache = index->cache;
666 cpu = index_dir_to_cpu(index);
667 mask = get_big_core_shared_cpu_map(cpu, cache);
669 mask = &cache->shared_cpu_map;
672 ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
673 cpumask_pr_args(mask));
679 static struct kobj_attribute cache_shared_cpu_map_attr =
680 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
682 /* Attributes which should always be created -- the kobject/sysfs core
683 * does this automatically via kobj_type->default_attrs. This is the
684 * minimum data required to uniquely identify a cache.
686 static struct attribute *cache_index_default_attrs[] = {
687 &cache_type_attr.attr,
688 &cache_level_attr.attr,
689 &cache_shared_cpu_map_attr.attr,
693 /* Attributes which should be created if the cache device node has the
694 * right properties -- see cacheinfo_create_index_opt_attrs
696 static struct kobj_attribute *cache_index_opt_attrs[] = {
698 &cache_line_size_attr,
703 static const struct sysfs_ops cache_index_ops = {
704 .show = cache_index_show,
707 static struct kobj_type cache_index_type = {
708 .release = cache_index_release,
709 .sysfs_ops = &cache_index_ops,
710 .default_attrs = cache_index_default_attrs,
713 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
715 const char *cache_type;
720 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
725 cache_type = cache_type_string(cache);
727 /* We don't want to create an attribute that can't provide a
728 * meaningful value. Check the return value of each optional
729 * attribute's ->show method before registering the
732 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
733 struct kobj_attribute *attr;
736 attr = cache_index_opt_attrs[i];
738 rc = attr->show(&dir->kobj, attr, buf);
740 pr_debug("not creating %s attribute for "
741 "%pOF(%s) (rc = %zd)\n",
742 attr->attr.name, cache->ofnode,
746 if (sysfs_create_file(&dir->kobj, &attr->attr))
747 pr_debug("could not create %s attribute for %pOF(%s)\n",
748 attr->attr.name, cache->ofnode, cache_type);
754 static void cacheinfo_create_index_dir(struct cache *cache, int index,
755 struct cache_dir *cache_dir)
757 struct cache_index_dir *index_dir;
760 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
764 index_dir->cache = cache;
766 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
767 cache_dir->kobj, "index%d", index);
769 kobject_put(&index_dir->kobj);
773 index_dir->next = cache_dir->index;
774 cache_dir->index = index_dir;
776 cacheinfo_create_index_opt_attrs(index_dir);
779 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
780 struct cache *cache_list)
782 struct cache_dir *cache_dir;
786 cache_dir = cacheinfo_create_cache_dir(cpu_id);
792 cacheinfo_create_index_dir(cache, index, cache_dir);
794 cache = cache->next_local;
798 void cacheinfo_cpu_online(unsigned int cpu_id)
802 cache = cache_chain_instantiate(cpu_id);
806 cacheinfo_sysfs_populate(cpu_id, cache);
809 /* functions needed to remove cache entry for cpu offline or suspend/resume */
811 #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
812 defined(CONFIG_HOTPLUG_CPU)
814 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
816 struct device_node *cpu_node;
819 cpu_node = of_get_cpu_node(cpu_id, NULL);
820 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
824 cache = cache_lookup_by_node(cpu_node);
825 of_node_put(cpu_node);
830 static void remove_index_dirs(struct cache_dir *cache_dir)
832 struct cache_index_dir *index;
834 index = cache_dir->index;
837 struct cache_index_dir *next;
840 kobject_put(&index->kobj);
845 static void remove_cache_dir(struct cache_dir *cache_dir)
847 remove_index_dirs(cache_dir);
849 /* Remove cache dir from sysfs */
850 kobject_del(cache_dir->kobj);
852 kobject_put(cache_dir->kobj);
857 static void cache_cpu_clear(struct cache *cache, int cpu)
860 struct cache *next = cache->next_local;
862 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
863 "CPU %i not accounted in %pOF(%s)\n",
865 cache_type_string(cache));
867 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
869 /* Release the cache object if all the cpus using it
871 if (cpumask_empty(&cache->shared_cpu_map))
872 release_cache(cache);
878 void cacheinfo_cpu_offline(unsigned int cpu_id)
880 struct cache_dir *cache_dir;
883 /* Prevent userspace from seeing inconsistent state - remove
884 * the sysfs hierarchy first */
885 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
887 /* careful, sysfs population may have failed */
889 remove_cache_dir(cache_dir);
891 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
893 /* clear the CPU's bit in its cache chain, possibly freeing
895 cache = cache_lookup_by_cpu(cpu_id);
897 cache_cpu_clear(cache, cpu_id);
899 #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */