]> Git Repo - linux.git/blob - drivers/cpufreq/amd-pstate.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / cpufreq / amd-pstate.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * amd-pstate.c - AMD Processor P-state Frequency Driver
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6  *
7  * Author: Huang Rui <[email protected]>
8  *
9  * AMD P-State introduces a new CPU performance scaling design for AMD
10  * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11  * feature which works with the AMD SMU firmware providing a finer grained
12  * frequency control range. It is to replace the legacy ACPI P-States control,
13  * allows a flexible, low-latency interface for the Linux kernel to directly
14  * communicate the performance hints to hardware.
15  *
16  * AMD P-State is supported on recent AMD Zen base CPU series include some of
17  * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18  * P-State supported system. And there are two types of hardware implementations
19  * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20  * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/smp.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/compiler.h>
32 #include <linux/dmi.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/uaccess.h>
38 #include <linux/static_call.h>
39 #include <linux/topology.h>
40
41 #include <acpi/processor.h>
42 #include <acpi/cppc_acpi.h>
43
44 #include <asm/msr.h>
45 #include <asm/processor.h>
46 #include <asm/cpufeature.h>
47 #include <asm/cpu_device_id.h>
48
49 #include "amd-pstate.h"
50 #include "amd-pstate-trace.h"
51
52 #define AMD_PSTATE_TRANSITION_LATENCY   20000
53 #define AMD_PSTATE_TRANSITION_DELAY     1000
54 #define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
55
56 #define AMD_CPPC_EPP_PERFORMANCE                0x00
57 #define AMD_CPPC_EPP_BALANCE_PERFORMANCE        0x80
58 #define AMD_CPPC_EPP_BALANCE_POWERSAVE          0xBF
59 #define AMD_CPPC_EPP_POWERSAVE                  0xFF
60
61 static const char * const amd_pstate_mode_string[] = {
62         [AMD_PSTATE_UNDEFINED]   = "undefined",
63         [AMD_PSTATE_DISABLE]     = "disable",
64         [AMD_PSTATE_PASSIVE]     = "passive",
65         [AMD_PSTATE_ACTIVE]      = "active",
66         [AMD_PSTATE_GUIDED]      = "guided",
67         NULL,
68 };
69
70 const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
71 {
72         if (mode < 0 || mode >= AMD_PSTATE_MAX)
73                 return NULL;
74         return amd_pstate_mode_string[mode];
75 }
76 EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
77
78 struct quirk_entry {
79         u32 nominal_freq;
80         u32 lowest_freq;
81 };
82
83 static struct cpufreq_driver *current_pstate_driver;
84 static struct cpufreq_driver amd_pstate_driver;
85 static struct cpufreq_driver amd_pstate_epp_driver;
86 static int cppc_state = AMD_PSTATE_UNDEFINED;
87 static bool cppc_enabled;
88 static bool amd_pstate_prefcore = true;
89 static struct quirk_entry *quirks;
90
91 /*
92  * AMD Energy Preference Performance (EPP)
93  * The EPP is used in the CCLK DPM controller to drive
94  * the frequency that a core is going to operate during
95  * short periods of activity. EPP values will be utilized for
96  * different OS profiles (balanced, performance, power savings)
97  * display strings corresponding to EPP index in the
98  * energy_perf_strings[]
99  *      index           String
100  *-------------------------------------
101  *      0               default
102  *      1               performance
103  *      2               balance_performance
104  *      3               balance_power
105  *      4               power
106  */
107 enum energy_perf_value_index {
108         EPP_INDEX_DEFAULT = 0,
109         EPP_INDEX_PERFORMANCE,
110         EPP_INDEX_BALANCE_PERFORMANCE,
111         EPP_INDEX_BALANCE_POWERSAVE,
112         EPP_INDEX_POWERSAVE,
113 };
114
115 static const char * const energy_perf_strings[] = {
116         [EPP_INDEX_DEFAULT] = "default",
117         [EPP_INDEX_PERFORMANCE] = "performance",
118         [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
119         [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
120         [EPP_INDEX_POWERSAVE] = "power",
121         NULL
122 };
123
124 static unsigned int epp_values[] = {
125         [EPP_INDEX_DEFAULT] = 0,
126         [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
127         [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
128         [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
129         [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
130  };
131
132 typedef int (*cppc_mode_transition_fn)(int);
133
134 static struct quirk_entry quirk_amd_7k62 = {
135         .nominal_freq = 2600,
136         .lowest_freq = 550,
137 };
138
139 static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
140 {
141         /**
142          * match the broken bios for family 17h processor support CPPC V2
143          * broken BIOS lack of nominal_freq and lowest_freq capabilities
144          * definition in ACPI tables
145          */
146         if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
147                 quirks = dmi->driver_data;
148                 pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
149                 return 1;
150         }
151
152         return 0;
153 }
154
155 static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
156         {
157                 .callback = dmi_matched_7k62_bios_bug,
158                 .ident = "AMD EPYC 7K62",
159                 .matches = {
160                         DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
161                         DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
162                 },
163                 .driver_data = &quirk_amd_7k62,
164         },
165         {}
166 };
167 MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
168
169 static inline int get_mode_idx_from_str(const char *str, size_t size)
170 {
171         int i;
172
173         for (i=0; i < AMD_PSTATE_MAX; i++) {
174                 if (!strncmp(str, amd_pstate_mode_string[i], size))
175                         return i;
176         }
177         return -EINVAL;
178 }
179
180 static DEFINE_MUTEX(amd_pstate_limits_lock);
181 static DEFINE_MUTEX(amd_pstate_driver_lock);
182
183 static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
184 {
185         u64 epp;
186         int ret;
187
188         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
189                 if (!cppc_req_cached) {
190                         epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
191                                         &cppc_req_cached);
192                         if (epp)
193                                 return epp;
194                 }
195                 epp = (cppc_req_cached >> 24) & 0xFF;
196         } else {
197                 ret = cppc_get_epp_perf(cpudata->cpu, &epp);
198                 if (ret < 0) {
199                         pr_debug("Could not retrieve energy perf value (%d)\n", ret);
200                         return -EIO;
201                 }
202         }
203
204         return (s16)(epp & 0xff);
205 }
206
207 static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
208 {
209         s16 epp;
210         int index = -EINVAL;
211
212         epp = amd_pstate_get_epp(cpudata, 0);
213         if (epp < 0)
214                 return epp;
215
216         switch (epp) {
217         case AMD_CPPC_EPP_PERFORMANCE:
218                 index = EPP_INDEX_PERFORMANCE;
219                 break;
220         case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
221                 index = EPP_INDEX_BALANCE_PERFORMANCE;
222                 break;
223         case AMD_CPPC_EPP_BALANCE_POWERSAVE:
224                 index = EPP_INDEX_BALANCE_POWERSAVE;
225                 break;
226         case AMD_CPPC_EPP_POWERSAVE:
227                 index = EPP_INDEX_POWERSAVE;
228                 break;
229         default:
230                 break;
231         }
232
233         return index;
234 }
235
236 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
237                                u32 des_perf, u32 max_perf, bool fast_switch)
238 {
239         if (fast_switch)
240                 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
241         else
242                 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
243                               READ_ONCE(cpudata->cppc_req_cached));
244 }
245
246 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
247
248 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
249                                           u32 min_perf, u32 des_perf,
250                                           u32 max_perf, bool fast_switch)
251 {
252         static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
253                                             max_perf, fast_switch);
254 }
255
256 static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
257 {
258         int ret;
259         struct cppc_perf_ctrls perf_ctrls;
260
261         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
262                 u64 value = READ_ONCE(cpudata->cppc_req_cached);
263
264                 value &= ~GENMASK_ULL(31, 24);
265                 value |= (u64)epp << 24;
266                 WRITE_ONCE(cpudata->cppc_req_cached, value);
267
268                 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
269                 if (!ret)
270                         cpudata->epp_cached = epp;
271         } else {
272                 amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
273                                              cpudata->max_limit_perf, false);
274
275                 perf_ctrls.energy_perf = epp;
276                 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
277                 if (ret) {
278                         pr_debug("failed to set energy perf value (%d)\n", ret);
279                         return ret;
280                 }
281                 cpudata->epp_cached = epp;
282         }
283
284         return ret;
285 }
286
287 static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
288                 int pref_index)
289 {
290         int epp = -EINVAL;
291         int ret;
292
293         if (!pref_index)
294                 epp = cpudata->epp_default;
295
296         if (epp == -EINVAL)
297                 epp = epp_values[pref_index];
298
299         if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
300                 pr_debug("EPP cannot be set under performance policy\n");
301                 return -EBUSY;
302         }
303
304         ret = amd_pstate_set_epp(cpudata, epp);
305
306         return ret;
307 }
308
309 static inline int pstate_enable(bool enable)
310 {
311         int ret, cpu;
312         unsigned long logical_proc_id_mask = 0;
313
314         if (enable == cppc_enabled)
315                 return 0;
316
317         for_each_present_cpu(cpu) {
318                 unsigned long logical_id = topology_logical_package_id(cpu);
319
320                 if (test_bit(logical_id, &logical_proc_id_mask))
321                         continue;
322
323                 set_bit(logical_id, &logical_proc_id_mask);
324
325                 ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
326                                 enable);
327                 if (ret)
328                         return ret;
329         }
330
331         cppc_enabled = enable;
332         return 0;
333 }
334
335 static int cppc_enable(bool enable)
336 {
337         int cpu, ret = 0;
338         struct cppc_perf_ctrls perf_ctrls;
339
340         if (enable == cppc_enabled)
341                 return 0;
342
343         for_each_present_cpu(cpu) {
344                 ret = cppc_set_enable(cpu, enable);
345                 if (ret)
346                         return ret;
347
348                 /* Enable autonomous mode for EPP */
349                 if (cppc_state == AMD_PSTATE_ACTIVE) {
350                         /* Set desired perf as zero to allow EPP firmware control */
351                         perf_ctrls.desired_perf = 0;
352                         ret = cppc_set_perf(cpu, &perf_ctrls);
353                         if (ret)
354                                 return ret;
355                 }
356         }
357
358         cppc_enabled = enable;
359         return ret;
360 }
361
362 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
363
364 static inline int amd_pstate_enable(bool enable)
365 {
366         return static_call(amd_pstate_enable)(enable);
367 }
368
369 static int pstate_init_perf(struct amd_cpudata *cpudata)
370 {
371         u64 cap1;
372
373         int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
374                                      &cap1);
375         if (ret)
376                 return ret;
377
378         WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
379         WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
380         WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
381         WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
382         WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
383         WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
384         WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
385         return 0;
386 }
387
388 static int cppc_init_perf(struct amd_cpudata *cpudata)
389 {
390         struct cppc_perf_caps cppc_perf;
391
392         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
393         if (ret)
394                 return ret;
395
396         WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
397         WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
398         WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
399         WRITE_ONCE(cpudata->lowest_nonlinear_perf,
400                    cppc_perf.lowest_nonlinear_perf);
401         WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
402         WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
403         WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
404
405         if (cppc_state == AMD_PSTATE_ACTIVE)
406                 return 0;
407
408         ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
409         if (ret) {
410                 pr_warn("failed to get auto_sel, ret: %d\n", ret);
411                 return 0;
412         }
413
414         ret = cppc_set_auto_sel(cpudata->cpu,
415                         (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
416
417         if (ret)
418                 pr_warn("failed to set auto_sel, ret: %d\n", ret);
419
420         return ret;
421 }
422
423 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
424
425 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
426 {
427         return static_call(amd_pstate_init_perf)(cpudata);
428 }
429
430 static void cppc_update_perf(struct amd_cpudata *cpudata,
431                              u32 min_perf, u32 des_perf,
432                              u32 max_perf, bool fast_switch)
433 {
434         struct cppc_perf_ctrls perf_ctrls;
435
436         perf_ctrls.max_perf = max_perf;
437         perf_ctrls.min_perf = min_perf;
438         perf_ctrls.desired_perf = des_perf;
439
440         cppc_set_perf(cpudata->cpu, &perf_ctrls);
441 }
442
443 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
444 {
445         u64 aperf, mperf, tsc;
446         unsigned long flags;
447
448         local_irq_save(flags);
449         rdmsrl(MSR_IA32_APERF, aperf);
450         rdmsrl(MSR_IA32_MPERF, mperf);
451         tsc = rdtsc();
452
453         if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
454                 local_irq_restore(flags);
455                 return false;
456         }
457
458         local_irq_restore(flags);
459
460         cpudata->cur.aperf = aperf;
461         cpudata->cur.mperf = mperf;
462         cpudata->cur.tsc =  tsc;
463         cpudata->cur.aperf -= cpudata->prev.aperf;
464         cpudata->cur.mperf -= cpudata->prev.mperf;
465         cpudata->cur.tsc -= cpudata->prev.tsc;
466
467         cpudata->prev.aperf = aperf;
468         cpudata->prev.mperf = mperf;
469         cpudata->prev.tsc = tsc;
470
471         cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
472
473         return true;
474 }
475
476 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
477                               u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
478 {
479         unsigned long max_freq;
480         struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
481         u64 prev = READ_ONCE(cpudata->cppc_req_cached);
482         u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
483         u64 value = prev;
484
485         min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
486                         cpudata->max_limit_perf);
487         max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
488                         cpudata->max_limit_perf);
489         des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
490
491         max_freq = READ_ONCE(cpudata->max_limit_freq);
492         policy->cur = div_u64(des_perf * max_freq, max_perf);
493
494         if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
495                 min_perf = des_perf;
496                 des_perf = 0;
497         }
498
499         value &= ~AMD_CPPC_MIN_PERF(~0L);
500         value |= AMD_CPPC_MIN_PERF(min_perf);
501
502         value &= ~AMD_CPPC_DES_PERF(~0L);
503         value |= AMD_CPPC_DES_PERF(des_perf);
504
505         /* limit the max perf when core performance boost feature is disabled */
506         if (!cpudata->boost_supported)
507                 max_perf = min_t(unsigned long, nominal_perf, max_perf);
508
509         value &= ~AMD_CPPC_MAX_PERF(~0L);
510         value |= AMD_CPPC_MAX_PERF(max_perf);
511
512         if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
513                 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
514                         cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
515                                 cpudata->cpu, (value != prev), fast_switch);
516         }
517
518         if (value == prev)
519                 goto cpufreq_policy_put;
520
521         WRITE_ONCE(cpudata->cppc_req_cached, value);
522
523         amd_pstate_update_perf(cpudata, min_perf, des_perf,
524                                max_perf, fast_switch);
525
526 cpufreq_policy_put:
527         cpufreq_cpu_put(policy);
528 }
529
530 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
531 {
532         cpufreq_verify_within_cpu_limits(policy);
533
534         return 0;
535 }
536
537 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
538 {
539         u32 max_limit_perf, min_limit_perf, lowest_perf;
540         struct amd_cpudata *cpudata = policy->driver_data;
541
542         max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
543         min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
544
545         lowest_perf = READ_ONCE(cpudata->lowest_perf);
546         if (min_limit_perf < lowest_perf)
547                 min_limit_perf = lowest_perf;
548
549         if (max_limit_perf < min_limit_perf)
550                 max_limit_perf = min_limit_perf;
551
552         WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
553         WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
554         WRITE_ONCE(cpudata->max_limit_freq, policy->max);
555         WRITE_ONCE(cpudata->min_limit_freq, policy->min);
556
557         return 0;
558 }
559
560 static int amd_pstate_update_freq(struct cpufreq_policy *policy,
561                                   unsigned int target_freq, bool fast_switch)
562 {
563         struct cpufreq_freqs freqs;
564         struct amd_cpudata *cpudata = policy->driver_data;
565         unsigned long max_perf, min_perf, des_perf, cap_perf;
566
567         if (!cpudata->max_freq)
568                 return -ENODEV;
569
570         if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
571                 amd_pstate_update_min_max_limit(policy);
572
573         cap_perf = READ_ONCE(cpudata->highest_perf);
574         min_perf = READ_ONCE(cpudata->lowest_perf);
575         max_perf = cap_perf;
576
577         freqs.old = policy->cur;
578         freqs.new = target_freq;
579
580         des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
581                                      cpudata->max_freq);
582
583         WARN_ON(fast_switch && !policy->fast_switch_enabled);
584         /*
585          * If fast_switch is desired, then there aren't any registered
586          * transition notifiers. See comment for
587          * cpufreq_enable_fast_switch().
588          */
589         if (!fast_switch)
590                 cpufreq_freq_transition_begin(policy, &freqs);
591
592         amd_pstate_update(cpudata, min_perf, des_perf,
593                         max_perf, fast_switch, policy->governor->flags);
594
595         if (!fast_switch)
596                 cpufreq_freq_transition_end(policy, &freqs, false);
597
598         return 0;
599 }
600
601 static int amd_pstate_target(struct cpufreq_policy *policy,
602                              unsigned int target_freq,
603                              unsigned int relation)
604 {
605         return amd_pstate_update_freq(policy, target_freq, false);
606 }
607
608 static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
609                                   unsigned int target_freq)
610 {
611         if (!amd_pstate_update_freq(policy, target_freq, true))
612                 return target_freq;
613         return policy->cur;
614 }
615
616 static void amd_pstate_adjust_perf(unsigned int cpu,
617                                    unsigned long _min_perf,
618                                    unsigned long target_perf,
619                                    unsigned long capacity)
620 {
621         unsigned long max_perf, min_perf, des_perf,
622                       cap_perf, lowest_nonlinear_perf;
623         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
624         struct amd_cpudata *cpudata;
625
626         if (!policy)
627                 return;
628
629         cpudata = policy->driver_data;
630
631         if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
632                 amd_pstate_update_min_max_limit(policy);
633
634
635         cap_perf = READ_ONCE(cpudata->highest_perf);
636         lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
637
638         des_perf = cap_perf;
639         if (target_perf < capacity)
640                 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
641
642         min_perf = READ_ONCE(cpudata->lowest_perf);
643         if (_min_perf < capacity)
644                 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
645
646         if (min_perf < lowest_nonlinear_perf)
647                 min_perf = lowest_nonlinear_perf;
648
649         max_perf = cap_perf;
650         if (max_perf < min_perf)
651                 max_perf = min_perf;
652
653         des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
654
655         amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
656                         policy->governor->flags);
657         cpufreq_cpu_put(policy);
658 }
659
660 static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
661 {
662         struct amd_cpudata *cpudata = policy->driver_data;
663         struct cppc_perf_ctrls perf_ctrls;
664         u32 highest_perf, nominal_perf, nominal_freq, max_freq;
665         int ret = 0;
666
667         highest_perf = READ_ONCE(cpudata->highest_perf);
668         nominal_perf = READ_ONCE(cpudata->nominal_perf);
669         nominal_freq = READ_ONCE(cpudata->nominal_freq);
670         max_freq = READ_ONCE(cpudata->max_freq);
671
672         if (boot_cpu_has(X86_FEATURE_CPPC)) {
673                 u64 value = READ_ONCE(cpudata->cppc_req_cached);
674
675                 value &= ~GENMASK_ULL(7, 0);
676                 value |= on ? highest_perf : nominal_perf;
677                 WRITE_ONCE(cpudata->cppc_req_cached, value);
678
679                 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
680         } else {
681                 perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
682                 ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
683                 if (ret) {
684                         cpufreq_cpu_release(policy);
685                         pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
686                                 cpudata->cpu, ret);
687                         return ret;
688                 }
689         }
690
691         if (on)
692                 policy->cpuinfo.max_freq = max_freq;
693         else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
694                 policy->cpuinfo.max_freq = nominal_freq * 1000;
695
696         policy->max = policy->cpuinfo.max_freq;
697
698         if (cppc_state == AMD_PSTATE_PASSIVE) {
699                 ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
700                 if (ret < 0)
701                         pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
702         }
703
704         return ret < 0 ? ret : 0;
705 }
706
707 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
708 {
709         struct amd_cpudata *cpudata = policy->driver_data;
710         int ret;
711
712         if (!cpudata->boost_supported) {
713                 pr_err("Boost mode is not supported by this processor or SBIOS\n");
714                 return -EOPNOTSUPP;
715         }
716         mutex_lock(&amd_pstate_driver_lock);
717         ret = amd_pstate_cpu_boost_update(policy, state);
718         WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
719         policy->boost_enabled = !ret ? state : false;
720         refresh_frequency_limits(policy);
721         mutex_unlock(&amd_pstate_driver_lock);
722
723         return ret;
724 }
725
726 static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
727 {
728         u64 boost_val;
729         int ret = -1;
730
731         /*
732          * If platform has no CPB support or disable it, initialize current driver
733          * boost_enabled state to be false, it is not an error for cpufreq core to handle.
734          */
735         if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
736                 pr_debug_once("Boost CPB capabilities not present in the processor\n");
737                 ret = 0;
738                 goto exit_err;
739         }
740
741         /* at least one CPU supports CPB, even if others fail later on to set up */
742         current_pstate_driver->boost_enabled = true;
743
744         ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
745         if (ret) {
746                 pr_err_once("failed to read initial CPU boost state!\n");
747                 ret = -EIO;
748                 goto exit_err;
749         }
750
751         if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
752                 cpudata->boost_supported = true;
753
754         return 0;
755
756 exit_err:
757         cpudata->boost_supported = false;
758         return ret;
759 }
760
761 static void amd_perf_ctl_reset(unsigned int cpu)
762 {
763         wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
764 }
765
766 /*
767  * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
768  * due to locking, so queue the work for later.
769  */
770 static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
771 {
772         sched_set_itmt_support();
773 }
774 static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
775
776 #define CPPC_MAX_PERF   U8_MAX
777
778 static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
779 {
780         /* user disabled or not detected */
781         if (!amd_pstate_prefcore)
782                 return;
783
784         cpudata->hw_prefcore = true;
785
786         /*
787          * The priorities can be set regardless of whether or not
788          * sched_set_itmt_support(true) has been called and it is valid to
789          * update them at any time after it has been called.
790          */
791         sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
792
793         schedule_work(&sched_prefcore_work);
794 }
795
796 static void amd_pstate_update_limits(unsigned int cpu)
797 {
798         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
799         struct amd_cpudata *cpudata;
800         u32 prev_high = 0, cur_high = 0;
801         int ret;
802         bool highest_perf_changed = false;
803
804         if (!policy)
805                 return;
806
807         cpudata = policy->driver_data;
808
809         if (!amd_pstate_prefcore)
810                 return;
811
812         mutex_lock(&amd_pstate_driver_lock);
813         ret = amd_get_highest_perf(cpu, &cur_high);
814         if (ret)
815                 goto free_cpufreq_put;
816
817         prev_high = READ_ONCE(cpudata->prefcore_ranking);
818         highest_perf_changed = (prev_high != cur_high);
819         if (highest_perf_changed) {
820                 WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
821
822                 if (cur_high < CPPC_MAX_PERF)
823                         sched_set_itmt_core_prio((int)cur_high, cpu);
824         }
825
826 free_cpufreq_put:
827         cpufreq_cpu_put(policy);
828
829         if (!highest_perf_changed)
830                 cpufreq_update_policy(cpu);
831
832         mutex_unlock(&amd_pstate_driver_lock);
833 }
834
835 /*
836  * Get pstate transition delay time from ACPI tables that firmware set
837  * instead of using hardcode value directly.
838  */
839 static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
840 {
841         u32 transition_delay_ns;
842
843         transition_delay_ns = cppc_get_transition_latency(cpu);
844         if (transition_delay_ns == CPUFREQ_ETERNAL) {
845                 if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
846                         return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
847                 else
848                         return AMD_PSTATE_TRANSITION_DELAY;
849         }
850
851         return transition_delay_ns / NSEC_PER_USEC;
852 }
853
854 /*
855  * Get pstate transition latency value from ACPI tables that firmware
856  * set instead of using hardcode value directly.
857  */
858 static u32 amd_pstate_get_transition_latency(unsigned int cpu)
859 {
860         u32 transition_latency;
861
862         transition_latency = cppc_get_transition_latency(cpu);
863         if (transition_latency  == CPUFREQ_ETERNAL)
864                 return AMD_PSTATE_TRANSITION_LATENCY;
865
866         return transition_latency;
867 }
868
869 /*
870  * amd_pstate_init_freq: Initialize the max_freq, min_freq,
871  *                       nominal_freq and lowest_nonlinear_freq for
872  *                       the @cpudata object.
873  *
874  *  Requires: highest_perf, lowest_perf, nominal_perf and
875  *            lowest_nonlinear_perf members of @cpudata to be
876  *            initialized.
877  *
878  *  Returns 0 on success, non-zero value on failure.
879  */
880 static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
881 {
882         int ret;
883         u32 min_freq, max_freq;
884         u64 numerator;
885         u32 nominal_perf, nominal_freq;
886         u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
887         u32 boost_ratio, lowest_nonlinear_ratio;
888         struct cppc_perf_caps cppc_perf;
889
890         ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
891         if (ret)
892                 return ret;
893
894         if (quirks && quirks->lowest_freq)
895                 min_freq = quirks->lowest_freq * 1000;
896         else
897                 min_freq = cppc_perf.lowest_freq * 1000;
898
899         if (quirks && quirks->nominal_freq)
900                 nominal_freq = quirks->nominal_freq ;
901         else
902                 nominal_freq = cppc_perf.nominal_freq;
903
904         nominal_perf = READ_ONCE(cpudata->nominal_perf);
905
906         ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
907         if (ret)
908                 return ret;
909         boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
910         max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
911
912         lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
913         lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
914                                          nominal_perf);
915         lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
916
917         WRITE_ONCE(cpudata->min_freq, min_freq);
918         WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
919         WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
920         WRITE_ONCE(cpudata->max_freq, max_freq);
921
922         /**
923          * Below values need to be initialized correctly, otherwise driver will fail to load
924          * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
925          * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
926          * Check _CPC in ACPI table objects if any values are incorrect
927          */
928         if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
929                 pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
930                         min_freq, max_freq, nominal_freq * 1000);
931                 return -EINVAL;
932         }
933
934         if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
935                 pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
936                         lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
937                 return -EINVAL;
938         }
939
940         return 0;
941 }
942
943 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
944 {
945         int min_freq, max_freq, ret;
946         struct device *dev;
947         struct amd_cpudata *cpudata;
948
949         /*
950          * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
951          * which is ideal for initialization process.
952          */
953         amd_perf_ctl_reset(policy->cpu);
954         dev = get_cpu_device(policy->cpu);
955         if (!dev)
956                 return -ENODEV;
957
958         cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
959         if (!cpudata)
960                 return -ENOMEM;
961
962         cpudata->cpu = policy->cpu;
963
964         ret = amd_pstate_init_perf(cpudata);
965         if (ret)
966                 goto free_cpudata1;
967
968         amd_pstate_init_prefcore(cpudata);
969
970         ret = amd_pstate_init_freq(cpudata);
971         if (ret)
972                 goto free_cpudata1;
973
974         ret = amd_pstate_init_boost_support(cpudata);
975         if (ret)
976                 goto free_cpudata1;
977
978         min_freq = READ_ONCE(cpudata->min_freq);
979         max_freq = READ_ONCE(cpudata->max_freq);
980
981         policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
982         policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
983
984         policy->min = min_freq;
985         policy->max = max_freq;
986
987         policy->cpuinfo.min_freq = min_freq;
988         policy->cpuinfo.max_freq = max_freq;
989
990         policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
991
992         /* It will be updated by governor */
993         policy->cur = policy->cpuinfo.min_freq;
994
995         if (cpu_feature_enabled(X86_FEATURE_CPPC))
996                 policy->fast_switch_possible = true;
997
998         ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
999                                    FREQ_QOS_MIN, policy->cpuinfo.min_freq);
1000         if (ret < 0) {
1001                 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
1002                 goto free_cpudata1;
1003         }
1004
1005         ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
1006                                    FREQ_QOS_MAX, policy->cpuinfo.max_freq);
1007         if (ret < 0) {
1008                 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
1009                 goto free_cpudata2;
1010         }
1011
1012         cpudata->max_limit_freq = max_freq;
1013         cpudata->min_limit_freq = min_freq;
1014
1015         policy->driver_data = cpudata;
1016
1017         if (!current_pstate_driver->adjust_perf)
1018                 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
1019
1020         return 0;
1021
1022 free_cpudata2:
1023         freq_qos_remove_request(&cpudata->req[0]);
1024 free_cpudata1:
1025         kfree(cpudata);
1026         return ret;
1027 }
1028
1029 static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
1030 {
1031         struct amd_cpudata *cpudata = policy->driver_data;
1032
1033         freq_qos_remove_request(&cpudata->req[1]);
1034         freq_qos_remove_request(&cpudata->req[0]);
1035         policy->fast_switch_possible = false;
1036         kfree(cpudata);
1037 }
1038
1039 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
1040 {
1041         int ret;
1042
1043         ret = amd_pstate_enable(true);
1044         if (ret)
1045                 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
1046
1047         return ret;
1048 }
1049
1050 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
1051 {
1052         int ret;
1053
1054         ret = amd_pstate_enable(false);
1055         if (ret)
1056                 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
1057
1058         return ret;
1059 }
1060
1061 /* Sysfs attributes */
1062
1063 /*
1064  * This frequency is to indicate the maximum hardware frequency.
1065  * If boost is not active but supported, the frequency will be larger than the
1066  * one in cpuinfo.
1067  */
1068 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
1069                                         char *buf)
1070 {
1071         int max_freq;
1072         struct amd_cpudata *cpudata = policy->driver_data;
1073
1074         max_freq = READ_ONCE(cpudata->max_freq);
1075         if (max_freq < 0)
1076                 return max_freq;
1077
1078         return sysfs_emit(buf, "%u\n", max_freq);
1079 }
1080
1081 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
1082                                                      char *buf)
1083 {
1084         int freq;
1085         struct amd_cpudata *cpudata = policy->driver_data;
1086
1087         freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
1088         if (freq < 0)
1089                 return freq;
1090
1091         return sysfs_emit(buf, "%u\n", freq);
1092 }
1093
1094 /*
1095  * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
1096  * need to expose it to sysfs.
1097  */
1098 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
1099                                             char *buf)
1100 {
1101         u32 perf;
1102         struct amd_cpudata *cpudata = policy->driver_data;
1103
1104         perf = READ_ONCE(cpudata->highest_perf);
1105
1106         return sysfs_emit(buf, "%u\n", perf);
1107 }
1108
1109 static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
1110                                                 char *buf)
1111 {
1112         u32 perf;
1113         struct amd_cpudata *cpudata = policy->driver_data;
1114
1115         perf = READ_ONCE(cpudata->prefcore_ranking);
1116
1117         return sysfs_emit(buf, "%u\n", perf);
1118 }
1119
1120 static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
1121                                            char *buf)
1122 {
1123         bool hw_prefcore;
1124         struct amd_cpudata *cpudata = policy->driver_data;
1125
1126         hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
1127
1128         return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
1129 }
1130
1131 static ssize_t show_energy_performance_available_preferences(
1132                                 struct cpufreq_policy *policy, char *buf)
1133 {
1134         int i = 0;
1135         int offset = 0;
1136         struct amd_cpudata *cpudata = policy->driver_data;
1137
1138         if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1139                 return sysfs_emit_at(buf, offset, "%s\n",
1140                                 energy_perf_strings[EPP_INDEX_PERFORMANCE]);
1141
1142         while (energy_perf_strings[i] != NULL)
1143                 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
1144
1145         offset += sysfs_emit_at(buf, offset, "\n");
1146
1147         return offset;
1148 }
1149
1150 static ssize_t store_energy_performance_preference(
1151                 struct cpufreq_policy *policy, const char *buf, size_t count)
1152 {
1153         struct amd_cpudata *cpudata = policy->driver_data;
1154         char str_preference[21];
1155         ssize_t ret;
1156
1157         ret = sscanf(buf, "%20s", str_preference);
1158         if (ret != 1)
1159                 return -EINVAL;
1160
1161         ret = match_string(energy_perf_strings, -1, str_preference);
1162         if (ret < 0)
1163                 return -EINVAL;
1164
1165         mutex_lock(&amd_pstate_limits_lock);
1166         ret = amd_pstate_set_energy_pref_index(cpudata, ret);
1167         mutex_unlock(&amd_pstate_limits_lock);
1168
1169         return ret ?: count;
1170 }
1171
1172 static ssize_t show_energy_performance_preference(
1173                                 struct cpufreq_policy *policy, char *buf)
1174 {
1175         struct amd_cpudata *cpudata = policy->driver_data;
1176         int preference;
1177
1178         preference = amd_pstate_get_energy_pref_index(cpudata);
1179         if (preference < 0)
1180                 return preference;
1181
1182         return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
1183 }
1184
1185 static void amd_pstate_driver_cleanup(void)
1186 {
1187         amd_pstate_enable(false);
1188         cppc_state = AMD_PSTATE_DISABLE;
1189         current_pstate_driver = NULL;
1190 }
1191
1192 static int amd_pstate_register_driver(int mode)
1193 {
1194         int ret;
1195
1196         if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
1197                 current_pstate_driver = &amd_pstate_driver;
1198         else if (mode == AMD_PSTATE_ACTIVE)
1199                 current_pstate_driver = &amd_pstate_epp_driver;
1200         else
1201                 return -EINVAL;
1202
1203         cppc_state = mode;
1204         ret = cpufreq_register_driver(current_pstate_driver);
1205         if (ret) {
1206                 amd_pstate_driver_cleanup();
1207                 return ret;
1208         }
1209         return 0;
1210 }
1211
1212 static int amd_pstate_unregister_driver(int dummy)
1213 {
1214         cpufreq_unregister_driver(current_pstate_driver);
1215         amd_pstate_driver_cleanup();
1216         return 0;
1217 }
1218
1219 static int amd_pstate_change_mode_without_dvr_change(int mode)
1220 {
1221         int cpu = 0;
1222
1223         cppc_state = mode;
1224
1225         if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
1226                 return 0;
1227
1228         for_each_present_cpu(cpu) {
1229                 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
1230         }
1231
1232         return 0;
1233 }
1234
1235 static int amd_pstate_change_driver_mode(int mode)
1236 {
1237         int ret;
1238
1239         ret = amd_pstate_unregister_driver(0);
1240         if (ret)
1241                 return ret;
1242
1243         ret = amd_pstate_register_driver(mode);
1244         if (ret)
1245                 return ret;
1246
1247         return 0;
1248 }
1249
1250 static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
1251         [AMD_PSTATE_DISABLE]         = {
1252                 [AMD_PSTATE_DISABLE]     = NULL,
1253                 [AMD_PSTATE_PASSIVE]     = amd_pstate_register_driver,
1254                 [AMD_PSTATE_ACTIVE]      = amd_pstate_register_driver,
1255                 [AMD_PSTATE_GUIDED]      = amd_pstate_register_driver,
1256         },
1257         [AMD_PSTATE_PASSIVE]         = {
1258                 [AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver,
1259                 [AMD_PSTATE_PASSIVE]     = NULL,
1260                 [AMD_PSTATE_ACTIVE]      = amd_pstate_change_driver_mode,
1261                 [AMD_PSTATE_GUIDED]      = amd_pstate_change_mode_without_dvr_change,
1262         },
1263         [AMD_PSTATE_ACTIVE]          = {
1264                 [AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver,
1265                 [AMD_PSTATE_PASSIVE]     = amd_pstate_change_driver_mode,
1266                 [AMD_PSTATE_ACTIVE]      = NULL,
1267                 [AMD_PSTATE_GUIDED]      = amd_pstate_change_driver_mode,
1268         },
1269         [AMD_PSTATE_GUIDED]          = {
1270                 [AMD_PSTATE_DISABLE]     = amd_pstate_unregister_driver,
1271                 [AMD_PSTATE_PASSIVE]     = amd_pstate_change_mode_without_dvr_change,
1272                 [AMD_PSTATE_ACTIVE]      = amd_pstate_change_driver_mode,
1273                 [AMD_PSTATE_GUIDED]      = NULL,
1274         },
1275 };
1276
1277 static ssize_t amd_pstate_show_status(char *buf)
1278 {
1279         if (!current_pstate_driver)
1280                 return sysfs_emit(buf, "disable\n");
1281
1282         return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
1283 }
1284
1285 int amd_pstate_update_status(const char *buf, size_t size)
1286 {
1287         int mode_idx;
1288
1289         if (size > strlen("passive") || size < strlen("active"))
1290                 return -EINVAL;
1291
1292         mode_idx = get_mode_idx_from_str(buf, size);
1293
1294         if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
1295                 return -EINVAL;
1296
1297         if (mode_state_machine[cppc_state][mode_idx])
1298                 return mode_state_machine[cppc_state][mode_idx](mode_idx);
1299
1300         return 0;
1301 }
1302 EXPORT_SYMBOL_GPL(amd_pstate_update_status);
1303
1304 static ssize_t status_show(struct device *dev,
1305                            struct device_attribute *attr, char *buf)
1306 {
1307         ssize_t ret;
1308
1309         mutex_lock(&amd_pstate_driver_lock);
1310         ret = amd_pstate_show_status(buf);
1311         mutex_unlock(&amd_pstate_driver_lock);
1312
1313         return ret;
1314 }
1315
1316 static ssize_t status_store(struct device *a, struct device_attribute *b,
1317                             const char *buf, size_t count)
1318 {
1319         char *p = memchr(buf, '\n', count);
1320         int ret;
1321
1322         mutex_lock(&amd_pstate_driver_lock);
1323         ret = amd_pstate_update_status(buf, p ? p - buf : count);
1324         mutex_unlock(&amd_pstate_driver_lock);
1325
1326         return ret < 0 ? ret : count;
1327 }
1328
1329 static ssize_t prefcore_show(struct device *dev,
1330                              struct device_attribute *attr, char *buf)
1331 {
1332         return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
1333 }
1334
1335 cpufreq_freq_attr_ro(amd_pstate_max_freq);
1336 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
1337
1338 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
1339 cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
1340 cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
1341 cpufreq_freq_attr_rw(energy_performance_preference);
1342 cpufreq_freq_attr_ro(energy_performance_available_preferences);
1343 static DEVICE_ATTR_RW(status);
1344 static DEVICE_ATTR_RO(prefcore);
1345
1346 static struct freq_attr *amd_pstate_attr[] = {
1347         &amd_pstate_max_freq,
1348         &amd_pstate_lowest_nonlinear_freq,
1349         &amd_pstate_highest_perf,
1350         &amd_pstate_prefcore_ranking,
1351         &amd_pstate_hw_prefcore,
1352         NULL,
1353 };
1354
1355 static struct freq_attr *amd_pstate_epp_attr[] = {
1356         &amd_pstate_max_freq,
1357         &amd_pstate_lowest_nonlinear_freq,
1358         &amd_pstate_highest_perf,
1359         &amd_pstate_prefcore_ranking,
1360         &amd_pstate_hw_prefcore,
1361         &energy_performance_preference,
1362         &energy_performance_available_preferences,
1363         NULL,
1364 };
1365
1366 static struct attribute *pstate_global_attributes[] = {
1367         &dev_attr_status.attr,
1368         &dev_attr_prefcore.attr,
1369         NULL
1370 };
1371
1372 static const struct attribute_group amd_pstate_global_attr_group = {
1373         .name = "amd_pstate",
1374         .attrs = pstate_global_attributes,
1375 };
1376
1377 static bool amd_pstate_acpi_pm_profile_server(void)
1378 {
1379         switch (acpi_gbl_FADT.preferred_profile) {
1380         case PM_ENTERPRISE_SERVER:
1381         case PM_SOHO_SERVER:
1382         case PM_PERFORMANCE_SERVER:
1383                 return true;
1384         }
1385         return false;
1386 }
1387
1388 static bool amd_pstate_acpi_pm_profile_undefined(void)
1389 {
1390         if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
1391                 return true;
1392         if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
1393                 return true;
1394         return false;
1395 }
1396
1397 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
1398 {
1399         int min_freq, max_freq, ret;
1400         struct amd_cpudata *cpudata;
1401         struct device *dev;
1402         u64 value;
1403
1404         /*
1405          * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
1406          * which is ideal for initialization process.
1407          */
1408         amd_perf_ctl_reset(policy->cpu);
1409         dev = get_cpu_device(policy->cpu);
1410         if (!dev)
1411                 return -ENODEV;
1412
1413         cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
1414         if (!cpudata)
1415                 return -ENOMEM;
1416
1417         cpudata->cpu = policy->cpu;
1418         cpudata->epp_policy = 0;
1419
1420         ret = amd_pstate_init_perf(cpudata);
1421         if (ret)
1422                 goto free_cpudata1;
1423
1424         amd_pstate_init_prefcore(cpudata);
1425
1426         ret = amd_pstate_init_freq(cpudata);
1427         if (ret)
1428                 goto free_cpudata1;
1429
1430         ret = amd_pstate_init_boost_support(cpudata);
1431         if (ret)
1432                 goto free_cpudata1;
1433
1434         min_freq = READ_ONCE(cpudata->min_freq);
1435         max_freq = READ_ONCE(cpudata->max_freq);
1436
1437         policy->cpuinfo.min_freq = min_freq;
1438         policy->cpuinfo.max_freq = max_freq;
1439         /* It will be updated by governor */
1440         policy->cur = policy->cpuinfo.min_freq;
1441
1442         policy->driver_data = cpudata;
1443
1444         cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
1445
1446         policy->min = policy->cpuinfo.min_freq;
1447         policy->max = policy->cpuinfo.max_freq;
1448
1449         policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
1450
1451         /*
1452          * Set the policy to provide a valid fallback value in case
1453          * the default cpufreq governor is neither powersave nor performance.
1454          */
1455         if (amd_pstate_acpi_pm_profile_server() ||
1456             amd_pstate_acpi_pm_profile_undefined())
1457                 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1458         else
1459                 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1460
1461         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
1462                 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
1463                 if (ret)
1464                         return ret;
1465                 WRITE_ONCE(cpudata->cppc_req_cached, value);
1466
1467                 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
1468                 if (ret)
1469                         return ret;
1470                 WRITE_ONCE(cpudata->cppc_cap1_cached, value);
1471         }
1472
1473         return 0;
1474
1475 free_cpudata1:
1476         kfree(cpudata);
1477         return ret;
1478 }
1479
1480 static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
1481 {
1482         struct amd_cpudata *cpudata = policy->driver_data;
1483
1484         if (cpudata) {
1485                 kfree(cpudata);
1486                 policy->driver_data = NULL;
1487         }
1488
1489         pr_debug("CPU %d exiting\n", policy->cpu);
1490 }
1491
1492 static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
1493 {
1494         struct amd_cpudata *cpudata = policy->driver_data;
1495         u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
1496         u64 value;
1497         s16 epp;
1498
1499         max_perf = READ_ONCE(cpudata->highest_perf);
1500         min_perf = READ_ONCE(cpudata->lowest_perf);
1501         max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
1502         min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
1503
1504         if (min_limit_perf < min_perf)
1505                 min_limit_perf = min_perf;
1506
1507         if (max_limit_perf < min_limit_perf)
1508                 max_limit_perf = min_limit_perf;
1509
1510         WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
1511         WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
1512
1513         max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
1514                         cpudata->max_limit_perf);
1515         min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
1516                         cpudata->max_limit_perf);
1517         value = READ_ONCE(cpudata->cppc_req_cached);
1518
1519         if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1520                 min_perf = max_perf;
1521
1522         /* Initial min/max values for CPPC Performance Controls Register */
1523         value &= ~AMD_CPPC_MIN_PERF(~0L);
1524         value |= AMD_CPPC_MIN_PERF(min_perf);
1525
1526         value &= ~AMD_CPPC_MAX_PERF(~0L);
1527         value |= AMD_CPPC_MAX_PERF(max_perf);
1528
1529         /* CPPC EPP feature require to set zero to the desire perf bit */
1530         value &= ~AMD_CPPC_DES_PERF(~0L);
1531         value |= AMD_CPPC_DES_PERF(0);
1532
1533         cpudata->epp_policy = cpudata->policy;
1534
1535         /* Get BIOS pre-defined epp value */
1536         epp = amd_pstate_get_epp(cpudata, value);
1537         if (epp < 0) {
1538                 /**
1539                  * This return value can only be negative for shared_memory
1540                  * systems where EPP register read/write not supported.
1541                  */
1542                 return epp;
1543         }
1544
1545         if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
1546                 epp = 0;
1547
1548         /* Set initial EPP value */
1549         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
1550                 value &= ~GENMASK_ULL(31, 24);
1551                 value |= (u64)epp << 24;
1552         }
1553
1554         WRITE_ONCE(cpudata->cppc_req_cached, value);
1555         return amd_pstate_set_epp(cpudata, epp);
1556 }
1557
1558 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
1559 {
1560         struct amd_cpudata *cpudata = policy->driver_data;
1561         int ret;
1562
1563         if (!policy->cpuinfo.max_freq)
1564                 return -ENODEV;
1565
1566         pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
1567                                 policy->cpuinfo.max_freq, policy->max);
1568
1569         cpudata->policy = policy->policy;
1570
1571         ret = amd_pstate_epp_update_limit(policy);
1572         if (ret)
1573                 return ret;
1574
1575         /*
1576          * policy->cur is never updated with the amd_pstate_epp driver, but it
1577          * is used as a stale frequency value. So, keep it within limits.
1578          */
1579         policy->cur = policy->min;
1580
1581         return 0;
1582 }
1583
1584 static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
1585 {
1586         struct cppc_perf_ctrls perf_ctrls;
1587         u64 value, max_perf;
1588         int ret;
1589
1590         ret = amd_pstate_enable(true);
1591         if (ret)
1592                 pr_err("failed to enable amd pstate during resume, return %d\n", ret);
1593
1594         value = READ_ONCE(cpudata->cppc_req_cached);
1595         max_perf = READ_ONCE(cpudata->highest_perf);
1596
1597         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
1598                 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1599         } else {
1600                 perf_ctrls.max_perf = max_perf;
1601                 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
1602                 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1603         }
1604 }
1605
1606 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
1607 {
1608         struct amd_cpudata *cpudata = policy->driver_data;
1609
1610         pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
1611
1612         if (cppc_state == AMD_PSTATE_ACTIVE) {
1613                 amd_pstate_epp_reenable(cpudata);
1614                 cpudata->suspended = false;
1615         }
1616
1617         return 0;
1618 }
1619
1620 static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
1621 {
1622         struct amd_cpudata *cpudata = policy->driver_data;
1623         struct cppc_perf_ctrls perf_ctrls;
1624         int min_perf;
1625         u64 value;
1626
1627         min_perf = READ_ONCE(cpudata->lowest_perf);
1628         value = READ_ONCE(cpudata->cppc_req_cached);
1629
1630         mutex_lock(&amd_pstate_limits_lock);
1631         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
1632                 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
1633
1634                 /* Set max perf same as min perf */
1635                 value &= ~AMD_CPPC_MAX_PERF(~0L);
1636                 value |= AMD_CPPC_MAX_PERF(min_perf);
1637                 value &= ~AMD_CPPC_MIN_PERF(~0L);
1638                 value |= AMD_CPPC_MIN_PERF(min_perf);
1639                 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
1640         } else {
1641                 perf_ctrls.desired_perf = 0;
1642                 perf_ctrls.max_perf = min_perf;
1643                 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
1644                 cppc_set_perf(cpudata->cpu, &perf_ctrls);
1645         }
1646         mutex_unlock(&amd_pstate_limits_lock);
1647 }
1648
1649 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
1650 {
1651         struct amd_cpudata *cpudata = policy->driver_data;
1652
1653         pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
1654
1655         if (cpudata->suspended)
1656                 return 0;
1657
1658         if (cppc_state == AMD_PSTATE_ACTIVE)
1659                 amd_pstate_epp_offline(policy);
1660
1661         return 0;
1662 }
1663
1664 static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
1665 {
1666         cpufreq_verify_within_cpu_limits(policy);
1667         pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
1668         return 0;
1669 }
1670
1671 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
1672 {
1673         struct amd_cpudata *cpudata = policy->driver_data;
1674         int ret;
1675
1676         /* avoid suspending when EPP is not enabled */
1677         if (cppc_state != AMD_PSTATE_ACTIVE)
1678                 return 0;
1679
1680         /* set this flag to avoid setting core offline*/
1681         cpudata->suspended = true;
1682
1683         /* disable CPPC in lowlevel firmware */
1684         ret = amd_pstate_enable(false);
1685         if (ret)
1686                 pr_err("failed to suspend, return %d\n", ret);
1687
1688         return 0;
1689 }
1690
1691 static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
1692 {
1693         struct amd_cpudata *cpudata = policy->driver_data;
1694
1695         if (cpudata->suspended) {
1696                 mutex_lock(&amd_pstate_limits_lock);
1697
1698                 /* enable amd pstate from suspend state*/
1699                 amd_pstate_epp_reenable(cpudata);
1700
1701                 mutex_unlock(&amd_pstate_limits_lock);
1702
1703                 cpudata->suspended = false;
1704         }
1705
1706         return 0;
1707 }
1708
1709 static struct cpufreq_driver amd_pstate_driver = {
1710         .flags          = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
1711         .verify         = amd_pstate_verify,
1712         .target         = amd_pstate_target,
1713         .fast_switch    = amd_pstate_fast_switch,
1714         .init           = amd_pstate_cpu_init,
1715         .exit           = amd_pstate_cpu_exit,
1716         .suspend        = amd_pstate_cpu_suspend,
1717         .resume         = amd_pstate_cpu_resume,
1718         .set_boost      = amd_pstate_set_boost,
1719         .update_limits  = amd_pstate_update_limits,
1720         .name           = "amd-pstate",
1721         .attr           = amd_pstate_attr,
1722 };
1723
1724 static struct cpufreq_driver amd_pstate_epp_driver = {
1725         .flags          = CPUFREQ_CONST_LOOPS,
1726         .verify         = amd_pstate_epp_verify_policy,
1727         .setpolicy      = amd_pstate_epp_set_policy,
1728         .init           = amd_pstate_epp_cpu_init,
1729         .exit           = amd_pstate_epp_cpu_exit,
1730         .offline        = amd_pstate_epp_cpu_offline,
1731         .online         = amd_pstate_epp_cpu_online,
1732         .suspend        = amd_pstate_epp_suspend,
1733         .resume         = amd_pstate_epp_resume,
1734         .update_limits  = amd_pstate_update_limits,
1735         .set_boost      = amd_pstate_set_boost,
1736         .name           = "amd-pstate-epp",
1737         .attr           = amd_pstate_epp_attr,
1738 };
1739
1740 static int __init amd_pstate_set_driver(int mode_idx)
1741 {
1742         if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
1743                 cppc_state = mode_idx;
1744                 if (cppc_state == AMD_PSTATE_DISABLE)
1745                         pr_info("driver is explicitly disabled\n");
1746
1747                 if (cppc_state == AMD_PSTATE_ACTIVE)
1748                         current_pstate_driver = &amd_pstate_epp_driver;
1749
1750                 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
1751                         current_pstate_driver = &amd_pstate_driver;
1752
1753                 return 0;
1754         }
1755
1756         return -EINVAL;
1757 }
1758
1759 /**
1760  * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
1761  * show the debug message that helps to check if the CPU has CPPC support for loading issue.
1762  */
1763 static bool amd_cppc_supported(void)
1764 {
1765         struct cpuinfo_x86 *c = &cpu_data(0);
1766         bool warn = false;
1767
1768         if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
1769                 pr_debug_once("CPPC feature is not supported by the processor\n");
1770                 return false;
1771         }
1772
1773         /*
1774          * If the CPPC feature is disabled in the BIOS for processors
1775          * that support MSR-based CPPC, the AMD Pstate driver may not
1776          * function correctly.
1777          *
1778          * For such processors, check the CPPC flag and display a
1779          * warning message if the platform supports CPPC.
1780          *
1781          * Note: The code check below will not abort the driver
1782          * registration process because of the code is added for
1783          * debugging purposes. Besides, it may still be possible for
1784          * the driver to work using the shared-memory mechanism.
1785          */
1786         if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
1787                 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
1788                         switch (c->x86_model) {
1789                         case 0x60 ... 0x6F:
1790                         case 0x80 ... 0xAF:
1791                                 warn = true;
1792                                 break;
1793                         }
1794                 } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
1795                            cpu_feature_enabled(X86_FEATURE_ZEN4)) {
1796                         switch (c->x86_model) {
1797                         case 0x10 ... 0x1F:
1798                         case 0x40 ... 0xAF:
1799                                 warn = true;
1800                                 break;
1801                         }
1802                 } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
1803                         warn = true;
1804                 }
1805         }
1806
1807         if (warn)
1808                 pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
1809                                         "Please enable it if your BIOS has the CPPC option.\n");
1810         return true;
1811 }
1812
1813 static int __init amd_pstate_init(void)
1814 {
1815         struct device *dev_root;
1816         int ret;
1817
1818         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1819                 return -ENODEV;
1820
1821         /* show debug message only if CPPC is not supported */
1822         if (!amd_cppc_supported())
1823                 return -EOPNOTSUPP;
1824
1825         /* show warning message when BIOS broken or ACPI disabled */
1826         if (!acpi_cpc_valid()) {
1827                 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
1828                 return -ENODEV;
1829         }
1830
1831         /* don't keep reloading if cpufreq_driver exists */
1832         if (cpufreq_get_current_driver())
1833                 return -EEXIST;
1834
1835         quirks = NULL;
1836
1837         /* check if this machine need CPPC quirks */
1838         dmi_check_system(amd_pstate_quirks_table);
1839
1840         /*
1841         * determine the driver mode from the command line or kernel config.
1842         * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
1843         * command line options will override the kernel config settings.
1844         */
1845
1846         if (cppc_state == AMD_PSTATE_UNDEFINED) {
1847                 /* Disable on the following configs by default:
1848                  * 1. Undefined platforms
1849                  * 2. Server platforms
1850                  */
1851                 if (amd_pstate_acpi_pm_profile_undefined() ||
1852                     amd_pstate_acpi_pm_profile_server()) {
1853                         pr_info("driver load is disabled, boot with specific mode to enable this\n");
1854                         return -ENODEV;
1855                 }
1856                 /* get driver mode from kernel config option [1:4] */
1857                 cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
1858         }
1859
1860         switch (cppc_state) {
1861         case AMD_PSTATE_DISABLE:
1862                 pr_info("driver load is disabled, boot with specific mode to enable this\n");
1863                 return -ENODEV;
1864         case AMD_PSTATE_PASSIVE:
1865         case AMD_PSTATE_ACTIVE:
1866         case AMD_PSTATE_GUIDED:
1867                 ret = amd_pstate_set_driver(cppc_state);
1868                 if (ret)
1869                         return ret;
1870                 break;
1871         default:
1872                 return -EINVAL;
1873         }
1874
1875         /* capability check */
1876         if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
1877                 pr_debug("AMD CPPC MSR based functionality is supported\n");
1878                 if (cppc_state != AMD_PSTATE_ACTIVE)
1879                         current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
1880         } else {
1881                 pr_debug("AMD CPPC shared memory based functionality is supported\n");
1882                 static_call_update(amd_pstate_enable, cppc_enable);
1883                 static_call_update(amd_pstate_init_perf, cppc_init_perf);
1884                 static_call_update(amd_pstate_update_perf, cppc_update_perf);
1885         }
1886
1887         if (amd_pstate_prefcore) {
1888                 ret = amd_detect_prefcore(&amd_pstate_prefcore);
1889                 if (ret)
1890                         return ret;
1891         }
1892
1893         /* enable amd pstate feature */
1894         ret = amd_pstate_enable(true);
1895         if (ret) {
1896                 pr_err("failed to enable driver mode(%d)\n", cppc_state);
1897                 return ret;
1898         }
1899
1900         ret = cpufreq_register_driver(current_pstate_driver);
1901         if (ret) {
1902                 pr_err("failed to register with return %d\n", ret);
1903                 goto disable_driver;
1904         }
1905
1906         dev_root = bus_get_dev_root(&cpu_subsys);
1907         if (dev_root) {
1908                 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
1909                 put_device(dev_root);
1910                 if (ret) {
1911                         pr_err("sysfs attribute export failed with error %d.\n", ret);
1912                         goto global_attr_free;
1913                 }
1914         }
1915
1916         return ret;
1917
1918 global_attr_free:
1919         cpufreq_unregister_driver(current_pstate_driver);
1920 disable_driver:
1921         amd_pstate_enable(false);
1922         return ret;
1923 }
1924 device_initcall(amd_pstate_init);
1925
1926 static int __init amd_pstate_param(char *str)
1927 {
1928         size_t size;
1929         int mode_idx;
1930
1931         if (!str)
1932                 return -EINVAL;
1933
1934         size = strlen(str);
1935         mode_idx = get_mode_idx_from_str(str, size);
1936
1937         return amd_pstate_set_driver(mode_idx);
1938 }
1939
1940 static int __init amd_prefcore_param(char *str)
1941 {
1942         if (!strcmp(str, "disable"))
1943                 amd_pstate_prefcore = false;
1944
1945         return 0;
1946 }
1947
1948 early_param("amd_pstate", amd_pstate_param);
1949 early_param("amd_prefcore", amd_prefcore_param);
1950
1951 MODULE_AUTHOR("Huang Rui <[email protected]>");
1952 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
This page took 0.136814 seconds and 4 git commands to generate.