]> Git Repo - linux.git/blob - drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
Merge tag 'sched-core-2024-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / platform / x86 / intel / uncore-frequency / uncore-frequency.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Uncore Frequency Setting
4  * Copyright (c) 2022, Intel Corporation.
5  * All rights reserved.
6  *
7  * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
8  * one control CPU is identified per die to read/write limit. This control CPU
9  * is changed, if the CPU state is changed to offline. When the last CPU is
10  * offline in a die then remove the sysfs object for that die.
11  * The majority of actual code is related to sysfs create and read/write
12  * attributes.
13  *
14  * Author: Srinivas Pandruvada <[email protected]>
15  */
16
17 #include <linux/bitfield.h>
18 #include <linux/cpu.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/suspend.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/intel-family.h>
24
25 #include "uncore-frequency-common.h"
26
27 /* Max instances for uncore data, one for each die */
28 static int uncore_max_entries __read_mostly;
29 /* Storage for uncore data for all instances */
30 static struct uncore_data *uncore_instances;
31 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
32 static cpumask_t uncore_cpu_mask;
33 /* CPU online callback register instance */
34 static enum cpuhp_state uncore_hp_state __read_mostly;
35
36 #define MSR_UNCORE_RATIO_LIMIT  0x620
37 #define MSR_UNCORE_PERF_STATUS  0x621
38 #define UNCORE_FREQ_KHZ_MULTIPLIER      100000
39
40 #define UNCORE_MAX_RATIO_MASK   GENMASK_ULL(6, 0)
41 #define UNCORE_MIN_RATIO_MASK   GENMASK_ULL(14, 8)
42
43 #define UNCORE_CURRENT_RATIO_MASK       GENMASK_ULL(6, 0)
44
45 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
46                                     enum uncore_index index)
47 {
48         u64 cap;
49         int ret;
50
51         if (data->control_cpu < 0)
52                 return -ENXIO;
53
54         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
55         if (ret)
56                 return ret;
57
58         if (index == UNCORE_INDEX_MAX_FREQ)
59                 *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
60         else
61                 *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER;
62
63         return 0;
64 }
65
66 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
67                                      enum uncore_index index)
68 {
69         int ret;
70         u64 cap;
71
72         input /= UNCORE_FREQ_KHZ_MULTIPLIER;
73         if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK))
74                 return -EINVAL;
75
76         if (data->control_cpu < 0)
77                 return -ENXIO;
78
79         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
80         if (ret)
81                 return ret;
82
83         if (index == UNCORE_INDEX_MAX_FREQ) {
84                 cap &= ~UNCORE_MAX_RATIO_MASK;
85                 cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
86         } else  {
87                 cap &= ~UNCORE_MIN_RATIO_MASK;
88                 cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
89         }
90
91         ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
92         if (ret)
93                 return ret;
94
95         data->stored_uncore_data = cap;
96
97         return 0;
98 }
99
100 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
101 {
102         u64 ratio;
103         int ret;
104
105         if (data->control_cpu < 0)
106                 return -ENXIO;
107
108         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
109         if (ret)
110                 return ret;
111
112         *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER;
113
114         return 0;
115 }
116
117 static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
118 {
119         switch (index) {
120         case UNCORE_INDEX_MIN_FREQ:
121         case UNCORE_INDEX_MAX_FREQ:
122                 return uncore_read_control_freq(data, value, index);
123
124         case UNCORE_INDEX_CURRENT_FREQ:
125                 return uncore_read_freq(data, value);
126
127         default:
128                 break;
129         }
130
131         return -EOPNOTSUPP;
132 }
133
134 /* Caller provides protection */
135 static struct uncore_data *uncore_get_instance(unsigned int cpu)
136 {
137         int id = topology_logical_die_id(cpu);
138
139         if (id >= 0 && id < uncore_max_entries)
140                 return &uncore_instances[id];
141
142         return NULL;
143 }
144
145 static int uncore_event_cpu_online(unsigned int cpu)
146 {
147         struct uncore_data *data;
148         int target;
149
150         /* Check if there is an online cpu in the package for uncore MSR */
151         target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
152         if (target < nr_cpu_ids)
153                 return 0;
154
155         /* Use this CPU on this die as a control CPU */
156         cpumask_set_cpu(cpu, &uncore_cpu_mask);
157
158         data = uncore_get_instance(cpu);
159         if (!data)
160                 return 0;
161
162         data->package_id = topology_physical_package_id(cpu);
163         data->die_id = topology_die_id(cpu);
164         data->domain_id = UNCORE_DOMAIN_ID_INVALID;
165
166         return uncore_freq_add_entry(data, cpu);
167 }
168
169 static int uncore_event_cpu_offline(unsigned int cpu)
170 {
171         struct uncore_data *data;
172         int target;
173
174         data = uncore_get_instance(cpu);
175         if (!data)
176                 return 0;
177
178         /* Check if existing cpu is used for uncore MSRs */
179         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
180                 return 0;
181
182         /* Find a new cpu to set uncore MSR */
183         target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
184
185         if (target < nr_cpu_ids) {
186                 cpumask_set_cpu(target, &uncore_cpu_mask);
187                 uncore_freq_add_entry(data, target);
188         } else {
189                 uncore_freq_remove_die_entry(data);
190         }
191
192         return 0;
193 }
194
195 static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
196                             void *_unused)
197 {
198         int i;
199
200         switch (mode) {
201         case PM_POST_HIBERNATION:
202         case PM_POST_RESTORE:
203         case PM_POST_SUSPEND:
204                 for (i = 0; i < uncore_max_entries; ++i) {
205                         struct uncore_data *data = &uncore_instances[i];
206
207                         if (!data || !data->valid || !data->stored_uncore_data)
208                                 return 0;
209
210                         wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
211                                       data->stored_uncore_data);
212                 }
213                 break;
214         default:
215                 break;
216         }
217         return 0;
218 }
219
220 static struct notifier_block uncore_pm_nb = {
221         .notifier_call = uncore_pm_notify,
222 };
223
224 static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
225         X86_MATCH_VFM(INTEL_BROADWELL_G,        NULL),
226         X86_MATCH_VFM(INTEL_BROADWELL_X,        NULL),
227         X86_MATCH_VFM(INTEL_BROADWELL_D,        NULL),
228         X86_MATCH_VFM(INTEL_SKYLAKE_X,  NULL),
229         X86_MATCH_VFM(INTEL_ICELAKE_X,  NULL),
230         X86_MATCH_VFM(INTEL_ICELAKE_D,  NULL),
231         X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
232         X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, NULL),
233         X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
234         X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
235         X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
236         X86_MATCH_VFM(INTEL_COMETLAKE_L, NULL),
237         X86_MATCH_VFM(INTEL_CANNONLAKE_L, NULL),
238         X86_MATCH_VFM(INTEL_ICELAKE, NULL),
239         X86_MATCH_VFM(INTEL_ICELAKE_L, NULL),
240         X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
241         X86_MATCH_VFM(INTEL_TIGERLAKE, NULL),
242         X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL),
243         X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
244         X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
245         X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
246         X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
247         X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
248         X86_MATCH_VFM(INTEL_METEORLAKE, NULL),
249         X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
250         X86_MATCH_VFM(INTEL_ARROWLAKE, NULL),
251         X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
252         X86_MATCH_VFM(INTEL_LUNARLAKE_M, NULL),
253         {}
254 };
255 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
256
257 static int __init intel_uncore_init(void)
258 {
259         const struct x86_cpu_id *id;
260         int ret;
261
262         if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
263                 return -ENODEV;
264
265         id = x86_match_cpu(intel_uncore_cpu_ids);
266         if (!id)
267                 return -ENODEV;
268
269         uncore_max_entries = topology_max_packages() *
270                                         topology_max_dies_per_package();
271         uncore_instances = kcalloc(uncore_max_entries,
272                                    sizeof(*uncore_instances), GFP_KERNEL);
273         if (!uncore_instances)
274                 return -ENOMEM;
275
276         ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq);
277         if (ret)
278                 goto err_free;
279
280         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
281                                 "platform/x86/uncore-freq:online",
282                                 uncore_event_cpu_online,
283                                 uncore_event_cpu_offline);
284         if (ret < 0)
285                 goto err_rem_kobj;
286
287         uncore_hp_state = ret;
288
289         ret = register_pm_notifier(&uncore_pm_nb);
290         if (ret)
291                 goto err_rem_state;
292
293         return 0;
294
295 err_rem_state:
296         cpuhp_remove_state(uncore_hp_state);
297 err_rem_kobj:
298         uncore_freq_common_exit();
299 err_free:
300         kfree(uncore_instances);
301
302         return ret;
303 }
304 module_init(intel_uncore_init)
305
306 static void __exit intel_uncore_exit(void)
307 {
308         int i;
309
310         unregister_pm_notifier(&uncore_pm_nb);
311         cpuhp_remove_state(uncore_hp_state);
312         for (i = 0; i < uncore_max_entries; ++i)
313                 uncore_freq_remove_die_entry(&uncore_instances[i]);
314         uncore_freq_common_exit();
315         kfree(uncore_instances);
316 }
317 module_exit(intel_uncore_exit)
318
319 MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
320 MODULE_LICENSE("GPL v2");
321 MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
This page took 0.052191 seconds and 4 git commands to generate.