]> Git Repo - linux.git/blob - drivers/platform/x86/amd/pmf/core.c
Merge tag 'wireless-next-2023-05-12' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / platform / x86 / amd / pmf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Platform Management Framework Driver
4  *
5  * Copyright (c) 2022, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Shyam Sundar S K <[email protected]>
9  */
10
11 #include <asm/amd_nb.h>
12 #include <linux/debugfs.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/power_supply.h>
18 #include "pmf.h"
19
20 /* PMF-SMU communication registers */
21 #define AMD_PMF_REGISTER_MESSAGE        0xA18
22 #define AMD_PMF_REGISTER_RESPONSE       0xA78
23 #define AMD_PMF_REGISTER_ARGUMENT       0xA58
24
25 /* Base address of SMU for mapping physical address to virtual address */
26 #define AMD_PMF_MAPPING_SIZE            0x01000
27 #define AMD_PMF_BASE_ADDR_OFFSET        0x10000
28 #define AMD_PMF_BASE_ADDR_LO            0x13B102E8
29 #define AMD_PMF_BASE_ADDR_HI            0x13B102EC
30 #define AMD_PMF_BASE_ADDR_LO_MASK       GENMASK(15, 0)
31 #define AMD_PMF_BASE_ADDR_HI_MASK       GENMASK(31, 20)
32
33 /* SMU Response Codes */
34 #define AMD_PMF_RESULT_OK                    0x01
35 #define AMD_PMF_RESULT_CMD_REJECT_BUSY       0xFC
36 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ     0xFD
37 #define AMD_PMF_RESULT_CMD_UNKNOWN           0xFE
38 #define AMD_PMF_RESULT_FAILED                0xFF
39
40 /* List of supported CPU ids */
41 #define AMD_CPU_ID_RMB                  0x14b5
42 #define AMD_CPU_ID_PS                   0x14e8
43
44 #define PMF_MSG_DELAY_MIN_US            50
45 #define RESPONSE_REGISTER_LOOP_MAX      20000
46
47 #define DELAY_MIN_US    2000
48 #define DELAY_MAX_US    3000
49
50 /* override Metrics Table sample size time (in ms) */
51 static int metrics_table_loop_ms = 1000;
52 module_param(metrics_table_loop_ms, int, 0644);
53 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
54
55 /* Force load on supported older platforms */
56 static bool force_load;
57 module_param(force_load, bool, 0444);
58 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
59
60 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
61 {
62         struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
63
64         if (event != PSY_EVENT_PROP_CHANGED)
65                 return NOTIFY_OK;
66
67         if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
68             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
69             is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
70                 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
71                         return NOTIFY_DONE;
72         }
73
74         amd_pmf_set_sps_power_limits(pmf);
75
76         return NOTIFY_OK;
77 }
78
79 static int current_power_limits_show(struct seq_file *seq, void *unused)
80 {
81         struct amd_pmf_dev *dev = seq->private;
82         struct amd_pmf_static_slider_granular table;
83         int mode, src = 0;
84
85         mode = amd_pmf_get_pprof_modes(dev);
86         if (mode < 0)
87                 return mode;
88
89         src = amd_pmf_get_power_source();
90         amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
91         seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
92                    table.prop[src][mode].spl,
93                    table.prop[src][mode].fppt,
94                    table.prop[src][mode].sppt,
95                    table.prop[src][mode].sppt_apu_only,
96                    table.prop[src][mode].stt_min,
97                    table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
98                    table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
99         return 0;
100 }
101 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
102
103 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
104 {
105         debugfs_remove_recursive(dev->dbgfs_dir);
106 }
107
108 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
109 {
110         dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
111         debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
112                             &current_power_limits_fops);
113 }
114
115 int amd_pmf_get_power_source(void)
116 {
117         if (power_supply_is_system_supplied() > 0)
118                 return POWER_SOURCE_AC;
119         else
120                 return POWER_SOURCE_DC;
121 }
122
123 static void amd_pmf_get_metrics(struct work_struct *work)
124 {
125         struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
126         ktime_t time_elapsed_ms;
127         int socket_power;
128
129         mutex_lock(&dev->update_mutex);
130         /* Transfer table contents */
131         memset(dev->buf, 0, sizeof(dev->m_table));
132         amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
133         memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
134
135         time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
136         /* Calculate the avg SoC power consumption */
137         socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
138
139         if (dev->amt_enabled) {
140                 /* Apply the Auto Mode transition */
141                 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
142         }
143
144         if (dev->cnqf_enabled) {
145                 /* Apply the CnQF transition */
146                 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
147         }
148
149         dev->start_time = ktime_to_ms(ktime_get());
150         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
151         mutex_unlock(&dev->update_mutex);
152 }
153
154 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
155 {
156         return ioread32(dev->regbase + reg_offset);
157 }
158
159 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
160 {
161         iowrite32(val, dev->regbase + reg_offset);
162 }
163
164 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
165 {
166         u32 value;
167
168         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
169         dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
170
171         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
172         dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
173
174         value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
175         dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
176 }
177
178 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
179 {
180         int rc;
181         u32 val;
182
183         mutex_lock(&dev->lock);
184
185         /* Wait until we get a valid response */
186         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
187                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
188                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
189         if (rc) {
190                 dev_err(dev->dev, "failed to talk to SMU\n");
191                 goto out_unlock;
192         }
193
194         /* Write zero to response register */
195         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
196
197         /* Write argument into argument register */
198         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
199
200         /* Write message ID to message ID register */
201         amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
202
203         /* Wait until we get a valid response */
204         rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
205                                 val, val != 0, PMF_MSG_DELAY_MIN_US,
206                                 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
207         if (rc) {
208                 dev_err(dev->dev, "SMU response timed out\n");
209                 goto out_unlock;
210         }
211
212         switch (val) {
213         case AMD_PMF_RESULT_OK:
214                 if (get) {
215                         /* PMFW may take longer time to return back the data */
216                         usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
217                         *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
218                 }
219                 break;
220         case AMD_PMF_RESULT_CMD_REJECT_BUSY:
221                 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
222                 rc = -EBUSY;
223                 goto out_unlock;
224         case AMD_PMF_RESULT_CMD_UNKNOWN:
225                 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
226                 rc = -EINVAL;
227                 goto out_unlock;
228         case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
229         case AMD_PMF_RESULT_FAILED:
230         default:
231                 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
232                 rc = -EIO;
233                 goto out_unlock;
234         }
235
236 out_unlock:
237         mutex_unlock(&dev->lock);
238         amd_pmf_dump_registers(dev);
239         return rc;
240 }
241
242 static const struct pci_device_id pmf_pci_ids[] = {
243         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
244         { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
245         { }
246 };
247
248 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
249 {
250         u64 phys_addr;
251         u32 hi, low;
252
253         INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
254
255         /* Get Metrics Table Address */
256         dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
257         if (!dev->buf)
258                 return -ENOMEM;
259
260         phys_addr = virt_to_phys(dev->buf);
261         hi = phys_addr >> 32;
262         low = phys_addr & GENMASK(31, 0);
263
264         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
265         amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
266
267         /*
268          * Start collecting the metrics data after a small delay
269          * or else, we might end up getting stale values from PMFW.
270          */
271         schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
272
273         return 0;
274 }
275
276 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
277 {
278         int ret;
279
280         /* Enable Static Slider */
281         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
282                 amd_pmf_init_sps(dev);
283                 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
284         }
285
286         /* Enable Auto Mode */
287         if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
288                 amd_pmf_init_auto_mode(dev);
289                 dev_dbg(dev->dev, "Auto Mode Init done\n");
290         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
291                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
292                 /* Enable Cool n Quiet Framework (CnQF) */
293                 ret = amd_pmf_init_cnqf(dev);
294                 if (ret)
295                         dev_warn(dev->dev, "CnQF Init failed\n");
296         }
297 }
298
299 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
300 {
301         if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
302                 amd_pmf_deinit_sps(dev);
303
304         if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
305                 amd_pmf_deinit_auto_mode(dev);
306         } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
307                           is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
308                 amd_pmf_deinit_cnqf(dev);
309         }
310 }
311
312 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
313         {"AMDI0100", 0x100},
314         {"AMDI0102", 0},
315         { }
316 };
317 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
318
319 static int amd_pmf_probe(struct platform_device *pdev)
320 {
321         const struct acpi_device_id *id;
322         struct amd_pmf_dev *dev;
323         struct pci_dev *rdev;
324         u32 base_addr_lo;
325         u32 base_addr_hi;
326         u64 base_addr;
327         u32 val;
328         int err;
329
330         id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
331         if (!id)
332                 return -ENODEV;
333
334         if (id->driver_data == 0x100 && !force_load)
335                 return -ENODEV;
336
337         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
338         if (!dev)
339                 return -ENOMEM;
340
341         dev->dev = &pdev->dev;
342
343         rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
344         if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
345                 pci_dev_put(rdev);
346                 return -ENODEV;
347         }
348
349         dev->cpu_id = rdev->device;
350
351         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
352         if (err) {
353                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
354                 pci_dev_put(rdev);
355                 return pcibios_err_to_errno(err);
356         }
357
358         base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
359
360         err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
361         if (err) {
362                 dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
363                 pci_dev_put(rdev);
364                 return pcibios_err_to_errno(err);
365         }
366
367         base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
368         pci_dev_put(rdev);
369         base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
370
371         dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
372                                     AMD_PMF_MAPPING_SIZE);
373         if (!dev->regbase)
374                 return -ENOMEM;
375
376         mutex_init(&dev->lock);
377         mutex_init(&dev->update_mutex);
378
379         apmf_acpi_init(dev);
380         platform_set_drvdata(pdev, dev);
381         amd_pmf_init_features(dev);
382         apmf_install_handler(dev);
383         amd_pmf_dbgfs_register(dev);
384
385         dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
386         power_supply_reg_notifier(&dev->pwr_src_notifier);
387
388         dev_info(dev->dev, "registered PMF device successfully\n");
389
390         return 0;
391 }
392
393 static void amd_pmf_remove(struct platform_device *pdev)
394 {
395         struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
396
397         power_supply_unreg_notifier(&dev->pwr_src_notifier);
398         amd_pmf_deinit_features(dev);
399         apmf_acpi_deinit(dev);
400         amd_pmf_dbgfs_unregister(dev);
401         mutex_destroy(&dev->lock);
402         mutex_destroy(&dev->update_mutex);
403         kfree(dev->buf);
404 }
405
406 static const struct attribute_group *amd_pmf_driver_groups[] = {
407         &cnqf_feature_attribute_group,
408         NULL,
409 };
410
411 static struct platform_driver amd_pmf_driver = {
412         .driver = {
413                 .name = "amd-pmf",
414                 .acpi_match_table = amd_pmf_acpi_ids,
415                 .dev_groups = amd_pmf_driver_groups,
416         },
417         .probe = amd_pmf_probe,
418         .remove_new = amd_pmf_remove,
419 };
420 module_platform_driver(amd_pmf_driver);
421
422 MODULE_LICENSE("GPL");
423 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
This page took 0.057368 seconds and 4 git commands to generate.