+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+ #include <linux/arch_topology.h>
+
+ /* Replace task scheduler's default frequency-invariant accounting */
+ #define arch_scale_freq_capacity topology_get_freq_scale
+
+ /* Replace task scheduler's default cpu-invariant accounting */
+ #define arch_scale_cpu_capacity topology_get_cpu_scale
+
#else
static inline void init_cpu_topology(void) { }
#include <linux/string.h>
#include <linux/sched/topology.h>
- static DEFINE_MUTEX(cpu_scale_mutex);
- static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+ DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
- unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
{
- return per_cpu(cpu_scale, cpu);
+ unsigned long scale;
+ int i;
+
+ scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(i, cpus)
+ per_cpu(freq_scale, i) = scale;
}
+ static DEFINE_MUTEX(cpu_scale_mutex);
+ DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
}
#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+static cpumask_var_t cpus_to_visit __initdata;
+static void __init parsing_done_workfn(struct work_struct *work);
+static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-static int
+static int __init
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
return 0;
}
-static struct notifier_block init_cpu_capacity_notifier = {
+static struct notifier_block init_cpu_capacity_notifier __initdata = {
.notifier_call = init_cpu_capacity_callback,
};
static int __init register_cpufreq_notifier(void)
{
+ int ret;
+
/*
* on ACPI-based systems we need to use the default cpu capacity
* until we have the necessary code to parse the cpu capacity, so
cpumask_copy(cpus_to_visit, cpu_possible_mask);
- return cpufreq_register_notifier(&init_cpu_capacity_notifier,
- CPUFREQ_POLICY_NOTIFIER);
+ ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret)
+ free_cpumask_var(cpus_to_visit);
+
+ return ret;
}
core_initcall(register_cpufreq_notifier);
-static void parsing_done_workfn(struct work_struct *work)
+static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
+ free_cpumask_var(cpus_to_visit);
}
#else
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/arch_topology.h - arch specific cpu topology information
*/
#define _LINUX_ARCH_TOPOLOGY_H_
#include <linux/types.h>
+ #include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+ DECLARE_PER_CPU(unsigned long, cpu_scale);
+
struct sched_domain;
- unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
+ static inline
+ unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+ {
+ return per_cpu(cpu_scale, cpu);
+ }
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+ DECLARE_PER_CPU(unsigned long, freq_scale);
+
+ static inline
+ unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+ {
+ return per_cpu(freq_scale, cpu);
+ }
+
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */