]> Git Repo - linux.git/blob - drivers/base/arch_topology.c
ARM: 9154/1: decompressor: do not copy source files while building
[linux.git] / drivers / base / arch_topology.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arch specific cpu topology information
4  *
5  * Copyright (C) 2016, ARM Ltd.
6  * Written by: Juri Lelli, ARM Ltd.
7  */
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
13 #include <linux/of.h>
14 #include <linux/slab.h>
15 #include <linux/sched/topology.h>
16 #include <linux/cpuset.h>
17 #include <linux/cpumask.h>
18 #include <linux/init.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched.h>
21
22 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
23 static struct cpumask scale_freq_counters_mask;
24 static bool scale_freq_invariant;
25
26 static bool supports_scale_freq_counters(const struct cpumask *cpus)
27 {
28         return cpumask_subset(cpus, &scale_freq_counters_mask);
29 }
30
31 bool topology_scale_freq_invariant(void)
32 {
33         return cpufreq_supports_freq_invariance() ||
34                supports_scale_freq_counters(cpu_online_mask);
35 }
36
37 static void update_scale_freq_invariant(bool status)
38 {
39         if (scale_freq_invariant == status)
40                 return;
41
42         /*
43          * Task scheduler behavior depends on frequency invariance support,
44          * either cpufreq or counter driven. If the support status changes as
45          * a result of counter initialisation and use, retrigger the build of
46          * scheduling domains to ensure the information is propagated properly.
47          */
48         if (topology_scale_freq_invariant() == status) {
49                 scale_freq_invariant = status;
50                 rebuild_sched_domains_energy();
51         }
52 }
53
54 void topology_set_scale_freq_source(struct scale_freq_data *data,
55                                     const struct cpumask *cpus)
56 {
57         struct scale_freq_data *sfd;
58         int cpu;
59
60         /*
61          * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
62          * supported by cpufreq.
63          */
64         if (cpumask_empty(&scale_freq_counters_mask))
65                 scale_freq_invariant = topology_scale_freq_invariant();
66
67         rcu_read_lock();
68
69         for_each_cpu(cpu, cpus) {
70                 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
71
72                 /* Use ARCH provided counters whenever possible */
73                 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
74                         rcu_assign_pointer(per_cpu(sft_data, cpu), data);
75                         cpumask_set_cpu(cpu, &scale_freq_counters_mask);
76                 }
77         }
78
79         rcu_read_unlock();
80
81         update_scale_freq_invariant(true);
82 }
83 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
84
85 void topology_clear_scale_freq_source(enum scale_freq_source source,
86                                       const struct cpumask *cpus)
87 {
88         struct scale_freq_data *sfd;
89         int cpu;
90
91         rcu_read_lock();
92
93         for_each_cpu(cpu, cpus) {
94                 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
95
96                 if (sfd && sfd->source == source) {
97                         rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
98                         cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
99                 }
100         }
101
102         rcu_read_unlock();
103
104         /*
105          * Make sure all references to previous sft_data are dropped to avoid
106          * use-after-free races.
107          */
108         synchronize_rcu();
109
110         update_scale_freq_invariant(false);
111 }
112 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
113
114 void topology_scale_freq_tick(void)
115 {
116         struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
117
118         if (sfd)
119                 sfd->set_freq_scale();
120 }
121
122 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
123 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
124
125 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
126                              unsigned long max_freq)
127 {
128         unsigned long scale;
129         int i;
130
131         if (WARN_ON_ONCE(!cur_freq || !max_freq))
132                 return;
133
134         /*
135          * If the use of counters for FIE is enabled, just return as we don't
136          * want to update the scale factor with information from CPUFREQ.
137          * Instead the scale factor will be updated from arch_scale_freq_tick.
138          */
139         if (supports_scale_freq_counters(cpus))
140                 return;
141
142         scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
143
144         for_each_cpu(i, cpus)
145                 per_cpu(arch_freq_scale, i) = scale;
146 }
147
148 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
149 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
150
151 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
152 {
153         per_cpu(cpu_scale, cpu) = capacity;
154 }
155
156 DEFINE_PER_CPU(unsigned long, thermal_pressure);
157
158 void topology_set_thermal_pressure(const struct cpumask *cpus,
159                                unsigned long th_pressure)
160 {
161         int cpu;
162
163         for_each_cpu(cpu, cpus)
164                 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
165 }
166 EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
167
168 static ssize_t cpu_capacity_show(struct device *dev,
169                                  struct device_attribute *attr,
170                                  char *buf)
171 {
172         struct cpu *cpu = container_of(dev, struct cpu, dev);
173
174         return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
175 }
176
177 static void update_topology_flags_workfn(struct work_struct *work);
178 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
179
180 static DEVICE_ATTR_RO(cpu_capacity);
181
182 static int register_cpu_capacity_sysctl(void)
183 {
184         int i;
185         struct device *cpu;
186
187         for_each_possible_cpu(i) {
188                 cpu = get_cpu_device(i);
189                 if (!cpu) {
190                         pr_err("%s: too early to get CPU%d device!\n",
191                                __func__, i);
192                         continue;
193                 }
194                 device_create_file(cpu, &dev_attr_cpu_capacity);
195         }
196
197         return 0;
198 }
199 subsys_initcall(register_cpu_capacity_sysctl);
200
201 static int update_topology;
202
203 int topology_update_cpu_topology(void)
204 {
205         return update_topology;
206 }
207
208 /*
209  * Updating the sched_domains can't be done directly from cpufreq callbacks
210  * due to locking, so queue the work for later.
211  */
212 static void update_topology_flags_workfn(struct work_struct *work)
213 {
214         update_topology = 1;
215         rebuild_sched_domains();
216         pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
217         update_topology = 0;
218 }
219
220 static DEFINE_PER_CPU(u32, freq_factor) = 1;
221 static u32 *raw_capacity;
222
223 static int free_raw_capacity(void)
224 {
225         kfree(raw_capacity);
226         raw_capacity = NULL;
227
228         return 0;
229 }
230
231 void topology_normalize_cpu_scale(void)
232 {
233         u64 capacity;
234         u64 capacity_scale;
235         int cpu;
236
237         if (!raw_capacity)
238                 return;
239
240         capacity_scale = 1;
241         for_each_possible_cpu(cpu) {
242                 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
243                 capacity_scale = max(capacity, capacity_scale);
244         }
245
246         pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
247         for_each_possible_cpu(cpu) {
248                 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
249                 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
250                         capacity_scale);
251                 topology_set_cpu_scale(cpu, capacity);
252                 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
253                         cpu, topology_get_cpu_scale(cpu));
254         }
255 }
256
257 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
258 {
259         struct clk *cpu_clk;
260         static bool cap_parsing_failed;
261         int ret;
262         u32 cpu_capacity;
263
264         if (cap_parsing_failed)
265                 return false;
266
267         ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
268                                    &cpu_capacity);
269         if (!ret) {
270                 if (!raw_capacity) {
271                         raw_capacity = kcalloc(num_possible_cpus(),
272                                                sizeof(*raw_capacity),
273                                                GFP_KERNEL);
274                         if (!raw_capacity) {
275                                 cap_parsing_failed = true;
276                                 return false;
277                         }
278                 }
279                 raw_capacity[cpu] = cpu_capacity;
280                 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
281                         cpu_node, raw_capacity[cpu]);
282
283                 /*
284                  * Update freq_factor for calculating early boot cpu capacities.
285                  * For non-clk CPU DVFS mechanism, there's no way to get the
286                  * frequency value now, assuming they are running at the same
287                  * frequency (by keeping the initial freq_factor value).
288                  */
289                 cpu_clk = of_clk_get(cpu_node, 0);
290                 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
291                         per_cpu(freq_factor, cpu) =
292                                 clk_get_rate(cpu_clk) / 1000;
293                         clk_put(cpu_clk);
294                 }
295         } else {
296                 if (raw_capacity) {
297                         pr_err("cpu_capacity: missing %pOF raw capacity\n",
298                                 cpu_node);
299                         pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
300                 }
301                 cap_parsing_failed = true;
302                 free_raw_capacity();
303         }
304
305         return !ret;
306 }
307
308 #ifdef CONFIG_CPU_FREQ
309 static cpumask_var_t cpus_to_visit;
310 static void parsing_done_workfn(struct work_struct *work);
311 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
312
313 static int
314 init_cpu_capacity_callback(struct notifier_block *nb,
315                            unsigned long val,
316                            void *data)
317 {
318         struct cpufreq_policy *policy = data;
319         int cpu;
320
321         if (!raw_capacity)
322                 return 0;
323
324         if (val != CPUFREQ_CREATE_POLICY)
325                 return 0;
326
327         pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
328                  cpumask_pr_args(policy->related_cpus),
329                  cpumask_pr_args(cpus_to_visit));
330
331         cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
332
333         for_each_cpu(cpu, policy->related_cpus)
334                 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
335
336         if (cpumask_empty(cpus_to_visit)) {
337                 topology_normalize_cpu_scale();
338                 schedule_work(&update_topology_flags_work);
339                 free_raw_capacity();
340                 pr_debug("cpu_capacity: parsing done\n");
341                 schedule_work(&parsing_done_work);
342         }
343
344         return 0;
345 }
346
347 static struct notifier_block init_cpu_capacity_notifier = {
348         .notifier_call = init_cpu_capacity_callback,
349 };
350
351 static int __init register_cpufreq_notifier(void)
352 {
353         int ret;
354
355         /*
356          * on ACPI-based systems we need to use the default cpu capacity
357          * until we have the necessary code to parse the cpu capacity, so
358          * skip registering cpufreq notifier.
359          */
360         if (!acpi_disabled || !raw_capacity)
361                 return -EINVAL;
362
363         if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
364                 return -ENOMEM;
365
366         cpumask_copy(cpus_to_visit, cpu_possible_mask);
367
368         ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
369                                         CPUFREQ_POLICY_NOTIFIER);
370
371         if (ret)
372                 free_cpumask_var(cpus_to_visit);
373
374         return ret;
375 }
376 core_initcall(register_cpufreq_notifier);
377
378 static void parsing_done_workfn(struct work_struct *work)
379 {
380         cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
381                                          CPUFREQ_POLICY_NOTIFIER);
382         free_cpumask_var(cpus_to_visit);
383 }
384
385 #else
386 core_initcall(free_raw_capacity);
387 #endif
388
389 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
390 /*
391  * This function returns the logic cpu number of the node.
392  * There are basically three kinds of return values:
393  * (1) logic cpu number which is > 0.
394  * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
395  * there is no possible logical CPU in the kernel to match. This happens
396  * when CONFIG_NR_CPUS is configure to be smaller than the number of
397  * CPU nodes in DT. We need to just ignore this case.
398  * (3) -1 if the node does not exist in the device tree
399  */
400 static int __init get_cpu_for_node(struct device_node *node)
401 {
402         struct device_node *cpu_node;
403         int cpu;
404
405         cpu_node = of_parse_phandle(node, "cpu", 0);
406         if (!cpu_node)
407                 return -1;
408
409         cpu = of_cpu_node_to_id(cpu_node);
410         if (cpu >= 0)
411                 topology_parse_cpu_capacity(cpu_node, cpu);
412         else
413                 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
414                         cpu_node, cpumask_pr_args(cpu_possible_mask));
415
416         of_node_put(cpu_node);
417         return cpu;
418 }
419
420 static int __init parse_core(struct device_node *core, int package_id,
421                              int core_id)
422 {
423         char name[20];
424         bool leaf = true;
425         int i = 0;
426         int cpu;
427         struct device_node *t;
428
429         do {
430                 snprintf(name, sizeof(name), "thread%d", i);
431                 t = of_get_child_by_name(core, name);
432                 if (t) {
433                         leaf = false;
434                         cpu = get_cpu_for_node(t);
435                         if (cpu >= 0) {
436                                 cpu_topology[cpu].package_id = package_id;
437                                 cpu_topology[cpu].core_id = core_id;
438                                 cpu_topology[cpu].thread_id = i;
439                         } else if (cpu != -ENODEV) {
440                                 pr_err("%pOF: Can't get CPU for thread\n", t);
441                                 of_node_put(t);
442                                 return -EINVAL;
443                         }
444                         of_node_put(t);
445                 }
446                 i++;
447         } while (t);
448
449         cpu = get_cpu_for_node(core);
450         if (cpu >= 0) {
451                 if (!leaf) {
452                         pr_err("%pOF: Core has both threads and CPU\n",
453                                core);
454                         return -EINVAL;
455                 }
456
457                 cpu_topology[cpu].package_id = package_id;
458                 cpu_topology[cpu].core_id = core_id;
459         } else if (leaf && cpu != -ENODEV) {
460                 pr_err("%pOF: Can't get CPU for leaf core\n", core);
461                 return -EINVAL;
462         }
463
464         return 0;
465 }
466
467 static int __init parse_cluster(struct device_node *cluster, int depth)
468 {
469         char name[20];
470         bool leaf = true;
471         bool has_cores = false;
472         struct device_node *c;
473         static int package_id __initdata;
474         int core_id = 0;
475         int i, ret;
476
477         /*
478          * First check for child clusters; we currently ignore any
479          * information about the nesting of clusters and present the
480          * scheduler with a flat list of them.
481          */
482         i = 0;
483         do {
484                 snprintf(name, sizeof(name), "cluster%d", i);
485                 c = of_get_child_by_name(cluster, name);
486                 if (c) {
487                         leaf = false;
488                         ret = parse_cluster(c, depth + 1);
489                         of_node_put(c);
490                         if (ret != 0)
491                                 return ret;
492                 }
493                 i++;
494         } while (c);
495
496         /* Now check for cores */
497         i = 0;
498         do {
499                 snprintf(name, sizeof(name), "core%d", i);
500                 c = of_get_child_by_name(cluster, name);
501                 if (c) {
502                         has_cores = true;
503
504                         if (depth == 0) {
505                                 pr_err("%pOF: cpu-map children should be clusters\n",
506                                        c);
507                                 of_node_put(c);
508                                 return -EINVAL;
509                         }
510
511                         if (leaf) {
512                                 ret = parse_core(c, package_id, core_id++);
513                         } else {
514                                 pr_err("%pOF: Non-leaf cluster with core %s\n",
515                                        cluster, name);
516                                 ret = -EINVAL;
517                         }
518
519                         of_node_put(c);
520                         if (ret != 0)
521                                 return ret;
522                 }
523                 i++;
524         } while (c);
525
526         if (leaf && !has_cores)
527                 pr_warn("%pOF: empty cluster\n", cluster);
528
529         if (leaf)
530                 package_id++;
531
532         return 0;
533 }
534
535 static int __init parse_dt_topology(void)
536 {
537         struct device_node *cn, *map;
538         int ret = 0;
539         int cpu;
540
541         cn = of_find_node_by_path("/cpus");
542         if (!cn) {
543                 pr_err("No CPU information found in DT\n");
544                 return 0;
545         }
546
547         /*
548          * When topology is provided cpu-map is essentially a root
549          * cluster with restricted subnodes.
550          */
551         map = of_get_child_by_name(cn, "cpu-map");
552         if (!map)
553                 goto out;
554
555         ret = parse_cluster(map, 0);
556         if (ret != 0)
557                 goto out_map;
558
559         topology_normalize_cpu_scale();
560
561         /*
562          * Check that all cores are in the topology; the SMP code will
563          * only mark cores described in the DT as possible.
564          */
565         for_each_possible_cpu(cpu)
566                 if (cpu_topology[cpu].package_id == -1)
567                         ret = -EINVAL;
568
569 out_map:
570         of_node_put(map);
571 out:
572         of_node_put(cn);
573         return ret;
574 }
575 #endif
576
577 /*
578  * cpu topology table
579  */
580 struct cpu_topology cpu_topology[NR_CPUS];
581 EXPORT_SYMBOL_GPL(cpu_topology);
582
583 const struct cpumask *cpu_coregroup_mask(int cpu)
584 {
585         const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
586
587         /* Find the smaller of NUMA, core or LLC siblings */
588         if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
589                 /* not numa in package, lets use the package siblings */
590                 core_mask = &cpu_topology[cpu].core_sibling;
591         }
592         if (cpu_topology[cpu].llc_id != -1) {
593                 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
594                         core_mask = &cpu_topology[cpu].llc_sibling;
595         }
596
597         return core_mask;
598 }
599
600 const struct cpumask *cpu_clustergroup_mask(int cpu)
601 {
602         return &cpu_topology[cpu].cluster_sibling;
603 }
604
605 void update_siblings_masks(unsigned int cpuid)
606 {
607         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
608         int cpu;
609
610         /* update core and thread sibling masks */
611         for_each_online_cpu(cpu) {
612                 cpu_topo = &cpu_topology[cpu];
613
614                 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
615                         cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
616                         cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
617                 }
618
619                 if (cpuid_topo->package_id != cpu_topo->package_id)
620                         continue;
621
622                 if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
623                     cpuid_topo->cluster_id != -1) {
624                         cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
625                         cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
626                 }
627
628                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
629                 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
630
631                 if (cpuid_topo->core_id != cpu_topo->core_id)
632                         continue;
633
634                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
635                 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
636         }
637 }
638
639 static void clear_cpu_topology(int cpu)
640 {
641         struct cpu_topology *cpu_topo = &cpu_topology[cpu];
642
643         cpumask_clear(&cpu_topo->llc_sibling);
644         cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
645
646         cpumask_clear(&cpu_topo->cluster_sibling);
647         cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
648
649         cpumask_clear(&cpu_topo->core_sibling);
650         cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
651         cpumask_clear(&cpu_topo->thread_sibling);
652         cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
653 }
654
655 void __init reset_cpu_topology(void)
656 {
657         unsigned int cpu;
658
659         for_each_possible_cpu(cpu) {
660                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
661
662                 cpu_topo->thread_id = -1;
663                 cpu_topo->core_id = -1;
664                 cpu_topo->cluster_id = -1;
665                 cpu_topo->package_id = -1;
666                 cpu_topo->llc_id = -1;
667
668                 clear_cpu_topology(cpu);
669         }
670 }
671
672 void remove_cpu_topology(unsigned int cpu)
673 {
674         int sibling;
675
676         for_each_cpu(sibling, topology_core_cpumask(cpu))
677                 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
678         for_each_cpu(sibling, topology_sibling_cpumask(cpu))
679                 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
680         for_each_cpu(sibling, topology_cluster_cpumask(cpu))
681                 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
682         for_each_cpu(sibling, topology_llc_cpumask(cpu))
683                 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
684
685         clear_cpu_topology(cpu);
686 }
687
688 __weak int __init parse_acpi_topology(void)
689 {
690         return 0;
691 }
692
693 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
694 void __init init_cpu_topology(void)
695 {
696         reset_cpu_topology();
697
698         /*
699          * Discard anything that was parsed if we hit an error so we
700          * don't use partial information.
701          */
702         if (parse_acpi_topology())
703                 reset_cpu_topology();
704         else if (of_have_populated_dt() && parse_dt_topology())
705                 reset_cpu_topology();
706 }
707 #endif
This page took 0.073007 seconds and 4 git commands to generate.