]> Git Repo - linux.git/blob - drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
platform/x86/intel-uncore-freq: Return error on write frequency
[linux.git] / drivers / platform / x86 / intel / uncore-frequency / uncore-frequency.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Uncore Frequency Setting
4  * Copyright (c) 2022, Intel Corporation.
5  * All rights reserved.
6  *
7  * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
8  * one control CPU is identified per die to read/write limit. This control CPU
9  * is changed, if the CPU state is changed to offline. When the last CPU is
10  * offline in a die then remove the sysfs object for that die.
11  * The majority of actual code is related to sysfs create and read/write
12  * attributes.
13  *
14  * Author: Srinivas Pandruvada <[email protected]>
15  */
16
17 #include <linux/cpu.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/suspend.h>
21 #include <asm/cpu_device_id.h>
22 #include <asm/intel-family.h>
23
24 #include "uncore-frequency-common.h"
25
26 /* Max instances for uncore data, one for each die */
27 static int uncore_max_entries __read_mostly;
28 /* Storage for uncore data for all instances */
29 static struct uncore_data *uncore_instances;
30 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
31 static cpumask_t uncore_cpu_mask;
32 /* CPU online callback register instance */
33 static enum cpuhp_state uncore_hp_state __read_mostly;
34
35 #define MSR_UNCORE_RATIO_LIMIT  0x620
36 #define MSR_UNCORE_PERF_STATUS  0x621
37 #define UNCORE_FREQ_KHZ_MULTIPLIER      100000
38
39 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
40                                     unsigned int *max)
41 {
42         u64 cap;
43         int ret;
44
45         if (data->control_cpu < 0)
46                 return -ENXIO;
47
48         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
49         if (ret)
50                 return ret;
51
52         *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
53         *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
54
55         return 0;
56 }
57
58 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
59                                      unsigned int min_max)
60 {
61         int ret;
62         u64 cap;
63
64         input /= UNCORE_FREQ_KHZ_MULTIPLIER;
65         if (!input || input > 0x7F)
66                 return -EINVAL;
67
68         if (data->control_cpu < 0)
69                 return -ENXIO;
70
71         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
72         if (ret)
73                 return ret;
74
75         if (min_max) {
76                 cap &= ~0x7F;
77                 cap |= input;
78         } else  {
79                 cap &= ~GENMASK(14, 8);
80                 cap |= (input << 8);
81         }
82
83         ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
84         if (ret)
85                 return ret;
86
87         data->stored_uncore_data = cap;
88
89         return 0;
90 }
91
92 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
93 {
94         u64 ratio;
95         int ret;
96
97         if (data->control_cpu < 0)
98                 return -ENXIO;
99
100         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
101         if (ret)
102                 return ret;
103
104         *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
105
106         return 0;
107 }
108
109 /* Caller provides protection */
110 static struct uncore_data *uncore_get_instance(unsigned int cpu)
111 {
112         int id = topology_logical_die_id(cpu);
113
114         if (id >= 0 && id < uncore_max_entries)
115                 return &uncore_instances[id];
116
117         return NULL;
118 }
119
120 static int uncore_event_cpu_online(unsigned int cpu)
121 {
122         struct uncore_data *data;
123         int target;
124
125         /* Check if there is an online cpu in the package for uncore MSR */
126         target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
127         if (target < nr_cpu_ids)
128                 return 0;
129
130         /* Use this CPU on this die as a control CPU */
131         cpumask_set_cpu(cpu, &uncore_cpu_mask);
132
133         data = uncore_get_instance(cpu);
134         if (!data)
135                 return 0;
136
137         data->package_id = topology_physical_package_id(cpu);
138         data->die_id = topology_die_id(cpu);
139
140         return uncore_freq_add_entry(data, cpu);
141 }
142
143 static int uncore_event_cpu_offline(unsigned int cpu)
144 {
145         struct uncore_data *data;
146         int target;
147
148         data = uncore_get_instance(cpu);
149         if (!data)
150                 return 0;
151
152         /* Check if existing cpu is used for uncore MSRs */
153         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
154                 return 0;
155
156         /* Find a new cpu to set uncore MSR */
157         target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
158
159         if (target < nr_cpu_ids) {
160                 cpumask_set_cpu(target, &uncore_cpu_mask);
161                 uncore_freq_add_entry(data, target);
162         } else {
163                 uncore_freq_remove_die_entry(data);
164         }
165
166         return 0;
167 }
168
169 static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
170                             void *_unused)
171 {
172         int i;
173
174         switch (mode) {
175         case PM_POST_HIBERNATION:
176         case PM_POST_RESTORE:
177         case PM_POST_SUSPEND:
178                 for (i = 0; i < uncore_max_entries; ++i) {
179                         struct uncore_data *data = &uncore_instances[i];
180
181                         if (!data || !data->valid || !data->stored_uncore_data)
182                                 return 0;
183
184                         wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
185                                       data->stored_uncore_data);
186                 }
187                 break;
188         default:
189                 break;
190         }
191         return 0;
192 }
193
194 static struct notifier_block uncore_pm_nb = {
195         .notifier_call = uncore_pm_notify,
196 };
197
198 static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
199         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
200         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
201         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
202         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,   NULL),
203         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,   NULL),
204         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,   NULL),
205         X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
206         X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
207         X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
208         X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
209         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
210         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
211         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
212         X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
213         X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
214         {}
215 };
216 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
217
218 static int __init intel_uncore_init(void)
219 {
220         const struct x86_cpu_id *id;
221         int ret;
222
223         if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
224                 return -ENODEV;
225
226         id = x86_match_cpu(intel_uncore_cpu_ids);
227         if (!id)
228                 return -ENODEV;
229
230         uncore_max_entries = topology_max_packages() *
231                                         topology_max_die_per_package();
232         uncore_instances = kcalloc(uncore_max_entries,
233                                    sizeof(*uncore_instances), GFP_KERNEL);
234         if (!uncore_instances)
235                 return -ENOMEM;
236
237         ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
238                                       uncore_read_freq);
239         if (ret)
240                 goto err_free;
241
242         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
243                                 "platform/x86/uncore-freq:online",
244                                 uncore_event_cpu_online,
245                                 uncore_event_cpu_offline);
246         if (ret < 0)
247                 goto err_rem_kobj;
248
249         uncore_hp_state = ret;
250
251         ret = register_pm_notifier(&uncore_pm_nb);
252         if (ret)
253                 goto err_rem_state;
254
255         return 0;
256
257 err_rem_state:
258         cpuhp_remove_state(uncore_hp_state);
259 err_rem_kobj:
260         uncore_freq_common_exit();
261 err_free:
262         kfree(uncore_instances);
263
264         return ret;
265 }
266 module_init(intel_uncore_init)
267
268 static void __exit intel_uncore_exit(void)
269 {
270         int i;
271
272         unregister_pm_notifier(&uncore_pm_nb);
273         cpuhp_remove_state(uncore_hp_state);
274         for (i = 0; i < uncore_max_entries; ++i)
275                 uncore_freq_remove_die_entry(&uncore_instances[i]);
276         uncore_freq_common_exit();
277         kfree(uncore_instances);
278 }
279 module_exit(intel_uncore_exit)
280
281 MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
282 MODULE_LICENSE("GPL v2");
283 MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
This page took 0.052358 seconds and 4 git commands to generate.