1 // SPDX-License-Identifier: GPL-2.0-only
3 * Processor thermal device for newer processors
4 * Copyright (c) 2020, Intel Corporation.
7 #include <linux/acpi.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/thermal.h>
13 #include "int340x_thermal_zone.h"
14 #include "processor_thermal_device.h"
16 #define DRV_NAME "proc_thermal_pci"
19 module_param(use_msi, bool, 0644);
20 MODULE_PARM_DESC(use_msi,
21 "Use PCI MSI based interrupts for processor thermal device.");
23 struct proc_thermal_pci {
25 struct proc_thermal_device *proc_priv;
26 struct thermal_zone_device *tzone;
27 struct delayed_work work;
32 enum proc_thermal_mmio_type {
33 PROC_THERMAL_MMIO_TJMAX,
34 PROC_THERMAL_MMIO_PP0_TEMP,
35 PROC_THERMAL_MMIO_PP1_TEMP,
36 PROC_THERMAL_MMIO_PKG_TEMP,
37 PROC_THERMAL_MMIO_THRES_0,
38 PROC_THERMAL_MMIO_THRES_1,
39 PROC_THERMAL_MMIO_INT_ENABLE_0,
40 PROC_THERMAL_MMIO_INT_ENABLE_1,
41 PROC_THERMAL_MMIO_INT_STATUS_0,
42 PROC_THERMAL_MMIO_INT_STATUS_1,
46 struct proc_thermal_mmio_info {
47 enum proc_thermal_mmio_type mmio_type;
53 static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = {
54 { PROC_THERMAL_MMIO_TJMAX, 0x599c, 16, 0xff },
55 { PROC_THERMAL_MMIO_PP0_TEMP, 0x597c, 0, 0xff },
56 { PROC_THERMAL_MMIO_PP1_TEMP, 0x5980, 0, 0xff },
57 { PROC_THERMAL_MMIO_PKG_TEMP, 0x5978, 0, 0xff },
58 { PROC_THERMAL_MMIO_THRES_0, 0x5820, 8, 0x7F },
59 { PROC_THERMAL_MMIO_THRES_1, 0x5820, 16, 0x7F },
60 { PROC_THERMAL_MMIO_INT_ENABLE_0, 0x5820, 15, 0x01 },
61 { PROC_THERMAL_MMIO_INT_ENABLE_1, 0x5820, 23, 0x01 },
62 { PROC_THERMAL_MMIO_INT_STATUS_0, 0x7200, 6, 0x01 },
63 { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 },
66 #define B0D4_THERMAL_NOTIFY_DELAY 1000
67 static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY;
69 static void proc_thermal_mmio_read(struct proc_thermal_pci *pci_info,
70 enum proc_thermal_mmio_type type,
73 *value = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
74 proc_thermal_mmio_info[type].mmio_addr));
75 *value >>= proc_thermal_mmio_info[type].shift;
76 *value &= proc_thermal_mmio_info[type].mask;
79 static void proc_thermal_mmio_write(struct proc_thermal_pci *pci_info,
80 enum proc_thermal_mmio_type type,
86 current_val = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
87 proc_thermal_mmio_info[type].mmio_addr));
88 mask = proc_thermal_mmio_info[type].mask << proc_thermal_mmio_info[type].shift;
91 value &= proc_thermal_mmio_info[type].mask;
92 value <<= proc_thermal_mmio_info[type].shift;
95 iowrite32(current_val, ((u8 __iomem *)pci_info->proc_priv->mmio_base +
96 proc_thermal_mmio_info[type].mmio_addr));
100 * To avoid sending two many messages to user space, we have 1 second delay.
101 * On interrupt we are disabling interrupt and enabling after 1 second.
102 * This workload function is delayed by 1 second.
104 static void proc_thermal_threshold_work_fn(struct work_struct *work)
106 struct delayed_work *delayed_work = to_delayed_work(work);
107 struct proc_thermal_pci *pci_info = container_of(delayed_work,
108 struct proc_thermal_pci, work);
109 struct thermal_zone_device *tzone = pci_info->tzone;
112 thermal_zone_device_update(tzone, THERMAL_TRIP_VIOLATED);
114 /* Enable interrupt flag */
115 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
118 static void pkg_thermal_schedule_work(struct delayed_work *work)
120 unsigned long ms = msecs_to_jiffies(notify_delay_ms);
122 schedule_delayed_work(work, ms);
125 static void proc_thermal_clear_soc_int_status(struct proc_thermal_device *proc_priv)
129 if (!(proc_priv->mmio_feature_mask &
130 (PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR)))
133 status = readq(proc_priv->mmio_base + SOC_WT_RES_INT_STATUS_OFFSET);
134 writeq(status & ~SOC_WT_RES_INT_STATUS_MASK,
135 proc_priv->mmio_base + SOC_WT_RES_INT_STATUS_OFFSET);
138 static irqreturn_t proc_thermal_irq_thread_handler(int irq, void *devid)
140 struct proc_thermal_pci *pci_info = devid;
142 proc_thermal_wt_intr_callback(pci_info->pdev, pci_info->proc_priv);
143 proc_thermal_power_floor_intr_callback(pci_info->pdev, pci_info->proc_priv);
144 proc_thermal_clear_soc_int_status(pci_info->proc_priv);
149 static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
151 struct proc_thermal_pci *pci_info = devid;
152 struct proc_thermal_device *proc_priv;
153 int ret = IRQ_HANDLED;
156 proc_priv = pci_info->proc_priv;
158 if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_WT_HINT) {
159 if (proc_thermal_check_wt_intr(pci_info->proc_priv))
160 ret = IRQ_WAKE_THREAD;
163 if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) {
164 if (proc_thermal_check_power_floor_intr(pci_info->proc_priv))
165 ret = IRQ_WAKE_THREAD;
169 * Since now there are two sources of interrupts: one from thermal threshold
170 * and another from workload hint, add a check if there was really a threshold
171 * interrupt before scheduling work function for thermal threshold.
173 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status);
175 /* Disable enable interrupt flag */
176 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
177 pkg_thermal_schedule_work(&pci_info->work);
180 pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
185 static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
187 struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd);
190 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_PKG_TEMP, &_temp);
191 *temp = (unsigned long)_temp * 1000;
196 static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
198 struct proc_thermal_pci *pci_info = thermal_zone_device_priv(tzd);
202 cancel_delayed_work_sync(&pci_info->work);
203 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
204 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
205 pci_info->stored_thres = 0;
209 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
210 _temp = tjmax - (temp / 1000);
214 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
215 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
217 pci_info->stored_thres = temp;
222 static int get_trip_temp(struct proc_thermal_pci *pci_info)
226 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &temp);
228 return THERMAL_TEMP_INVALID;
230 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
231 temp = (tjmax - temp) * 1000;
236 static struct thermal_trip psv_trip = {
237 .type = THERMAL_TRIP_PASSIVE,
240 static struct thermal_zone_device_ops tzone_ops = {
241 .get_temp = sys_get_curr_temp,
242 .set_trip_temp = sys_set_trip_temp,
245 static struct thermal_zone_params tzone_params = {
246 .governor_name = "user_space",
250 static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
252 struct proc_thermal_device *proc_priv;
253 struct proc_thermal_pci *pci_info;
254 int irq_flag = 0, irq, ret;
255 bool msi_irq = false;
257 proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
261 pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
265 pci_info->pdev = pdev;
266 ret = pcim_enable_device(pdev);
268 dev_err(&pdev->dev, "error: could not enable device\n");
272 pci_set_master(pdev);
274 INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn);
276 proc_priv->priv_data = pci_info;
277 pci_info->proc_priv = proc_priv;
278 pci_set_drvdata(pdev, proc_priv);
280 ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
284 ret = proc_thermal_add(&pdev->dev, proc_priv);
286 dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n");
287 pci_info->no_legacy = 1;
290 psv_trip.temperature = get_trip_temp(pci_info);
292 pci_info->tzone = thermal_zone_device_register_with_trips("TCPU_PCI", &psv_trip,
295 &tzone_params, 0, 0);
296 if (IS_ERR(pci_info->tzone)) {
297 ret = PTR_ERR(pci_info->tzone);
301 if (use_msi && (pdev->msi_enabled || pdev->msix_enabled)) {
302 /* request and enable interrupt */
303 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
305 dev_err(&pdev->dev, "Failed to allocate vectors!\n");
309 irq = pci_irq_vector(pdev, 0);
312 irq_flag = IRQF_SHARED;
316 ret = devm_request_threaded_irq(&pdev->dev, irq,
317 proc_thermal_irq_handler, proc_thermal_irq_thread_handler,
318 irq_flag, KBUILD_MODNAME, pci_info);
320 dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq);
321 goto err_free_vectors;
324 ret = thermal_zone_device_enable(pci_info->tzone);
326 goto err_free_vectors;
332 pci_free_irq_vectors(pdev);
334 thermal_zone_device_unregister(pci_info->tzone);
336 if (!pci_info->no_legacy)
337 proc_thermal_remove(proc_priv);
338 proc_thermal_mmio_remove(pdev, proc_priv);
339 pci_disable_device(pdev);
344 static void proc_thermal_pci_remove(struct pci_dev *pdev)
346 struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
347 struct proc_thermal_pci *pci_info = proc_priv->priv_data;
349 cancel_delayed_work_sync(&pci_info->work);
351 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
352 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
354 devm_free_irq(&pdev->dev, pdev->irq, pci_info);
355 pci_free_irq_vectors(pdev);
357 thermal_zone_device_unregister(pci_info->tzone);
358 proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
359 if (!pci_info->no_legacy)
360 proc_thermal_remove(proc_priv);
361 pci_disable_device(pdev);
364 #ifdef CONFIG_PM_SLEEP
365 static int proc_thermal_pci_suspend(struct device *dev)
367 struct pci_dev *pdev = to_pci_dev(dev);
368 struct proc_thermal_device *proc_priv;
369 struct proc_thermal_pci *pci_info;
371 proc_priv = pci_get_drvdata(pdev);
372 pci_info = proc_priv->priv_data;
374 if (!pci_info->no_legacy)
375 return proc_thermal_suspend(dev);
379 static int proc_thermal_pci_resume(struct device *dev)
381 struct pci_dev *pdev = to_pci_dev(dev);
382 struct proc_thermal_device *proc_priv;
383 struct proc_thermal_pci *pci_info;
385 proc_priv = pci_get_drvdata(pdev);
386 pci_info = proc_priv->priv_data;
388 if (pci_info->stored_thres) {
389 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0,
390 pci_info->stored_thres / 1000);
391 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
394 if (!pci_info->no_legacy)
395 return proc_thermal_resume(dev);
400 #define proc_thermal_pci_suspend NULL
401 #define proc_thermal_pci_resume NULL
404 static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
405 proc_thermal_pci_resume);
407 static const struct pci_device_id proc_thermal_pci_ids[] = {
408 { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
409 PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
410 { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL |
411 PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR |
412 PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
413 { PCI_DEVICE_DATA(INTEL, ARL_S_THERMAL, PROC_THERMAL_FEATURE_RAPL |
414 PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT) },
415 { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
416 PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
420 MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
422 static struct pci_driver proc_thermal_pci_driver = {
424 .probe = proc_thermal_pci_probe,
425 .remove = proc_thermal_pci_remove,
426 .id_table = proc_thermal_pci_ids,
427 .driver.pm = &proc_thermal_pci_pm,
430 module_pci_driver(proc_thermal_pci_driver);
432 MODULE_IMPORT_NS(INT340X_THERMAL);
435 MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
436 MODULE_LICENSE("GPL v2");