]> Git Repo - J-linux.git/blob - drivers/platform/x86/intel/pmc/core.c
platform/x86: p2sb: Cache correct PCI bar for P2SB on Gemini Lake
[J-linux.git] / drivers / platform / x86 / intel / pmc / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Core SoC Power Management Controller Driver
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Authors: Rajneesh Bhardwaj <[email protected]>
9  *          Vishwanath Somayaji <[email protected]>
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/acpi_pmtmr.h>
15 #include <linux/bitfield.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/dmi.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/suspend.h>
24 #include <linux/units.h>
25
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
28 #include <asm/msr.h>
29 #include <asm/tsc.h>
30
31 #include "core.h"
32 #include "../pmt/telemetry.h"
33
34 /* Maximum number of modes supported by platfoms that has low power mode capability */
35 const char *pmc_lpm_modes[] = {
36         "S0i2.0",
37         "S0i2.1",
38         "S0i2.2",
39         "S0i3.0",
40         "S0i3.1",
41         "S0i3.2",
42         "S0i3.3",
43         "S0i3.4",
44         NULL
45 };
46
47 /* PKGC MSRs are common across Intel Core SoCs */
48 const struct pmc_bit_map msr_map[] = {
49         {"Package C2",                  MSR_PKG_C2_RESIDENCY},
50         {"Package C3",                  MSR_PKG_C3_RESIDENCY},
51         {"Package C6",                  MSR_PKG_C6_RESIDENCY},
52         {"Package C7",                  MSR_PKG_C7_RESIDENCY},
53         {"Package C8",                  MSR_PKG_C8_RESIDENCY},
54         {"Package C9",                  MSR_PKG_C9_RESIDENCY},
55         {"Package C10",                 MSR_PKG_C10_RESIDENCY},
56         {}
57 };
58
59 static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset)
60 {
61         return readl(pmc->regbase + reg_offset);
62 }
63
64 static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset,
65                                       u32 val)
66 {
67         writel(val, pmc->regbase + reg_offset);
68 }
69
70 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
71 {
72         /*
73          * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
74          * used as a workaround which uses 30.5 usec tick. All other client
75          * programs have the legacy SLP_S0 residency counter that is using the 122
76          * usec tick.
77          */
78         const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
79
80         if (pmc->map == &adl_reg_map)
81                 return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
82         else
83                 return (u64)value * pmc->map->slp_s0_res_counter_step;
84 }
85
86 static int set_etr3(struct pmc_dev *pmcdev)
87 {
88         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
89         const struct pmc_reg_map *map = pmc->map;
90         u32 reg;
91
92         if (!map->etr3_offset)
93                 return -EOPNOTSUPP;
94
95         guard(mutex)(&pmcdev->lock);
96
97         /* check if CF9 is locked */
98         reg = pmc_core_reg_read(pmc, map->etr3_offset);
99         if (reg & ETR3_CF9LOCK)
100                 return -EACCES;
101
102         /* write CF9 global reset bit */
103         reg |= ETR3_CF9GR;
104         pmc_core_reg_write(pmc, map->etr3_offset, reg);
105
106         reg = pmc_core_reg_read(pmc, map->etr3_offset);
107         if (!(reg & ETR3_CF9GR))
108                 return -EIO;
109
110         return 0;
111 }
112 static umode_t etr3_is_visible(struct kobject *kobj,
113                                 struct attribute *attr,
114                                 int idx)
115 {
116         struct device *dev = kobj_to_dev(kobj);
117         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
118         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
119         const struct pmc_reg_map *map = pmc->map;
120         u32 reg;
121
122         scoped_guard(mutex, &pmcdev->lock)
123                 reg = pmc_core_reg_read(pmc, map->etr3_offset);
124
125         return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
126 }
127
128 static ssize_t etr3_show(struct device *dev,
129                                  struct device_attribute *attr, char *buf)
130 {
131         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
132         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
133         const struct pmc_reg_map *map = pmc->map;
134         u32 reg;
135
136         if (!map->etr3_offset)
137                 return -EOPNOTSUPP;
138
139         scoped_guard(mutex, &pmcdev->lock) {
140                 reg = pmc_core_reg_read(pmc, map->etr3_offset);
141                 reg &= ETR3_CF9GR | ETR3_CF9LOCK;
142         }
143
144         return sysfs_emit(buf, "0x%08x", reg);
145 }
146
147 static ssize_t etr3_store(struct device *dev,
148                                   struct device_attribute *attr,
149                                   const char *buf, size_t len)
150 {
151         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
152         int err;
153         u32 reg;
154
155         err = kstrtouint(buf, 16, &reg);
156         if (err)
157                 return err;
158
159         /* allow only CF9 writes */
160         if (reg != ETR3_CF9GR)
161                 return -EINVAL;
162
163         err = set_etr3(pmcdev);
164         if (err)
165                 return err;
166
167         return len;
168 }
169 static DEVICE_ATTR_RW(etr3);
170
171 static struct attribute *pmc_attrs[] = {
172         &dev_attr_etr3.attr,
173         NULL
174 };
175
176 static const struct attribute_group pmc_attr_group = {
177         .attrs = pmc_attrs,
178         .is_visible = etr3_is_visible,
179 };
180
181 static const struct attribute_group *pmc_dev_groups[] = {
182         &pmc_attr_group,
183         NULL
184 };
185
186 static int pmc_core_dev_state_get(void *data, u64 *val)
187 {
188         struct pmc *pmc = data;
189         const struct pmc_reg_map *map = pmc->map;
190         u32 value;
191
192         value = pmc_core_reg_read(pmc, map->slp_s0_offset);
193         *val = pmc_core_adjust_slp_s0_step(pmc, value);
194
195         return 0;
196 }
197
198 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
199
200 static int pmc_core_pson_residency_get(void *data, u64 *val)
201 {
202         struct pmc *pmc = data;
203         const struct pmc_reg_map *map = pmc->map;
204         u32 value;
205
206         value = pmc_core_reg_read(pmc, map->pson_residency_offset);
207         *val = (u64)value * map->pson_residency_counter_step;
208
209         return 0;
210 }
211
212 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n");
213
214 static int pmc_core_check_read_lock_bit(struct pmc *pmc)
215 {
216         u32 value;
217
218         value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
219         return value & BIT(pmc->map->pm_read_disable_bit);
220 }
221
222 static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
223                                    struct seq_file *s)
224 {
225         const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
226         const struct pmc_bit_map *map;
227         int offset = pmc->map->slps0_dbg_offset;
228         u32 data;
229
230         while (*maps) {
231                 map = *maps;
232                 data = pmc_core_reg_read(pmc, offset);
233                 offset += 4;
234                 while (map->name) {
235                         if (dev)
236                                 dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
237                                         map->name,
238                                         data & map->bit_mask ? "Yes" : "No");
239                         if (s)
240                                 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
241                                            map->name,
242                                            data & map->bit_mask ? "Yes" : "No");
243                         ++map;
244                 }
245                 ++maps;
246         }
247 }
248
249 static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
250 {
251         unsigned int idx;
252
253         for (idx = 0; maps[idx]; idx++)
254                 ;/* Nothing */
255
256         return idx;
257 }
258
259 static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
260                                  struct seq_file *s, u32 offset, int pmc_index,
261                                  const char *str,
262                                  const struct pmc_bit_map **maps)
263 {
264         unsigned int index, idx, len = 32, arr_size;
265         u32 bit_mask, *lpm_regs;
266
267         arr_size = pmc_core_lpm_get_arr_size(maps);
268         lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
269         if (!lpm_regs)
270                 return;
271
272         for (index = 0; index < arr_size; index++) {
273                 lpm_regs[index] = pmc_core_reg_read(pmc, offset);
274                 offset += 4;
275         }
276
277         for (idx = 0; idx < arr_size; idx++) {
278                 if (dev)
279                         dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
280                                 lpm_regs[idx]);
281                 if (s)
282                         seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
283                                    lpm_regs[idx]);
284                 for (index = 0; maps[idx][index].name && index < len; index++) {
285                         bit_mask = maps[idx][index].bit_mask;
286                         if (dev)
287                                 dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index,
288                                         maps[idx][index].name,
289                                         lpm_regs[idx] & bit_mask ? 1 : 0);
290                         if (s)
291                                 seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index,
292                                            maps[idx][index].name,
293                                            lpm_regs[idx] & bit_mask ? 1 : 0);
294                 }
295         }
296
297         kfree(lpm_regs);
298 }
299
300 static bool slps0_dbg_latch;
301
302 static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
303 {
304         return readb(pmc->regbase + offset);
305 }
306
307 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
308                                  int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
309 {
310         seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
311                    pmc_index, ip, pf_map[idx][index].name,
312                    pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
313 }
314
315 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
316 {
317         struct pmc_dev *pmcdev = s->private;
318         unsigned int i;
319
320         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
321                 struct pmc *pmc = pmcdev->pmcs[i];
322                 const struct pmc_bit_map **maps;
323                 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
324                 unsigned int index, iter, idx, ip = 0;
325
326                 if (!pmc)
327                         continue;
328
329                 maps = pmc->map->pfear_sts;
330                 iter = pmc->map->ppfear0_offset;
331
332                 for (index = 0; index < pmc->map->ppfear_buckets &&
333                      index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
334                         pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
335
336                 for (idx = 0; maps[idx]; idx++) {
337                         for (index = 0; maps[idx][index].name &&
338                              index < pmc->map->ppfear_buckets * 8; ip++, index++)
339                                 pmc_core_display_map(s, index, idx, ip, i,
340                                                      pf_regs[index / 8], maps);
341                 }
342         }
343
344         return 0;
345 }
346 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
347
348 /* This function should return link status, 0 means ready */
349 static int pmc_core_mtpmc_link_status(struct pmc *pmc)
350 {
351         u32 value;
352
353         value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET);
354         return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
355 }
356
357 static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram)
358 {
359         u32 dest;
360         int timeout;
361
362         for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
363                 if (pmc_core_mtpmc_link_status(pmc) == 0)
364                         break;
365                 msleep(5);
366         }
367
368         if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc))
369                 return -EBUSY;
370
371         dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
372         pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest);
373         return 0;
374 }
375
376 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
377 {
378         struct pmc_dev *pmcdev = s->private;
379         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
380         const struct pmc_bit_map *map = pmc->map->mphy_sts;
381         u32 mphy_core_reg_low, mphy_core_reg_high;
382         u32 val_low, val_high;
383         unsigned int index;
384         int err = 0;
385
386         if (pmcdev->pmc_xram_read_bit) {
387                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
388                 return 0;
389         }
390
391         mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
392         mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
393
394         guard(mutex)(&pmcdev->lock);
395
396         err = pmc_core_send_msg(pmc, &mphy_core_reg_low);
397         if (err)
398                 return err;
399
400         msleep(10);
401         val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
402
403         err = pmc_core_send_msg(pmc, &mphy_core_reg_high);
404         if (err)
405                 return err;
406
407         msleep(10);
408         val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
409
410         for (index = 0; index < 8 && map[index].name; index++) {
411                 seq_printf(s, "%-32s\tState: %s\n",
412                            map[index].name,
413                            map[index].bit_mask & val_low ? "Not power gated" :
414                            "Power gated");
415         }
416
417         for (index = 8; map[index].name; index++) {
418                 seq_printf(s, "%-32s\tState: %s\n",
419                            map[index].name,
420                            map[index].bit_mask & val_high ? "Not power gated" :
421                            "Power gated");
422         }
423
424         return 0;
425 }
426 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
427
428 static int pmc_core_pll_show(struct seq_file *s, void *unused)
429 {
430         struct pmc_dev *pmcdev = s->private;
431         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
432         const struct pmc_bit_map *map = pmc->map->pll_sts;
433         u32 mphy_common_reg, val;
434         unsigned int index;
435         int err = 0;
436
437         if (pmcdev->pmc_xram_read_bit) {
438                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
439                 return 0;
440         }
441
442         mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
443         guard(mutex)(&pmcdev->lock);
444
445         err = pmc_core_send_msg(pmc, &mphy_common_reg);
446         if (err)
447                 return err;
448
449         /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
450         msleep(10);
451         val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
452
453         for (index = 0; map[index].name ; index++) {
454                 seq_printf(s, "%-32s\tState: %s\n",
455                            map[index].name,
456                            map[index].bit_mask & val ? "Active" : "Idle");
457         }
458
459         return 0;
460 }
461 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
462
463 int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
464 {
465         struct pmc *pmc;
466         const struct pmc_reg_map *map;
467         u32 reg;
468         unsigned int pmc_index;
469         int ltr_index;
470
471         ltr_index = value;
472         /* For platforms with multiple pmcs, ltr index value given by user
473          * is based on the contiguous indexes from ltr_show output.
474          * pmc index and ltr index needs to be calculated from it.
475          */
476         for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
477                 pmc = pmcdev->pmcs[pmc_index];
478
479                 if (!pmc)
480                         continue;
481
482                 map = pmc->map;
483                 if (ltr_index <= map->ltr_ignore_max)
484                         break;
485
486                 /* Along with IP names, ltr_show map includes CURRENT_PLATFORM
487                  * and AGGREGATED_SYSTEM values per PMC. Take these two index
488                  * values into account in ltr_index calculation. Also, to start
489                  * ltr index from zero for next pmc, subtract it by 1.
490                  */
491                 ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
492         }
493
494         if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
495                 return -EINVAL;
496
497         pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
498
499         guard(mutex)(&pmcdev->lock);
500
501         reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
502         if (ignore)
503                 reg |= BIT(ltr_index);
504         else
505                 reg &= ~BIT(ltr_index);
506         pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
507
508         return 0;
509 }
510
511 static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev,
512                                   const char __user *userbuf,
513                                   size_t count, int ignore)
514 {
515         u32 value;
516         int err;
517
518         err = kstrtou32_from_user(userbuf, count, 10, &value);
519         if (err)
520                 return err;
521
522         err = pmc_core_send_ltr_ignore(pmcdev, value, ignore);
523
524         return err ?: count;
525 }
526
527 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
528                                          const char __user *userbuf,
529                                          size_t count, loff_t *ppos)
530 {
531         struct seq_file *s = file->private_data;
532         struct pmc_dev *pmcdev = s->private;
533
534         return pmc_core_ltr_write(pmcdev, userbuf, count, 1);
535 }
536
537 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
538 {
539         return 0;
540 }
541 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore);
542
543 static ssize_t pmc_core_ltr_restore_write(struct file *file,
544                                           const char __user *userbuf,
545                                           size_t count, loff_t *ppos)
546 {
547         struct seq_file *s = file->private_data;
548         struct pmc_dev *pmcdev = s->private;
549
550         return pmc_core_ltr_write(pmcdev, userbuf, count, 0);
551 }
552
553 static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused)
554 {
555         return 0;
556 }
557 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore);
558
559 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
560 {
561         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
562         const struct pmc_reg_map *map = pmc->map;
563         u32 fd;
564
565         guard(mutex)(&pmcdev->lock);
566
567         if (!reset && !slps0_dbg_latch)
568                 return;
569
570         fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
571         if (reset)
572                 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
573         else
574                 fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
575         pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
576
577         slps0_dbg_latch = false;
578 }
579
580 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
581 {
582         struct pmc_dev *pmcdev = s->private;
583
584         pmc_core_slps0_dbg_latch(pmcdev, false);
585         pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s);
586         pmc_core_slps0_dbg_latch(pmcdev, true);
587
588         return 0;
589 }
590 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
591
592 static u32 convert_ltr_scale(u32 val)
593 {
594         /*
595          * As per PCIE specification supporting document
596          * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
597          * Tolerance Reporting data payload is encoded in a
598          * 3 bit scale and 10 bit value fields. Values are
599          * multiplied by the indicated scale to yield an absolute time
600          * value, expressible in a range from 1 nanosecond to
601          * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
602          *
603          * scale encoding is as follows:
604          *
605          * ----------------------------------------------
606          * |scale factor        |       Multiplier (ns) |
607          * ----------------------------------------------
608          * |    0               |       1               |
609          * |    1               |       32              |
610          * |    2               |       1024            |
611          * |    3               |       32768           |
612          * |    4               |       1048576         |
613          * |    5               |       33554432        |
614          * |    6               |       Invalid         |
615          * |    7               |       Invalid         |
616          * ----------------------------------------------
617          */
618         if (val > 5) {
619                 pr_warn("Invalid LTR scale factor.\n");
620                 return 0;
621         }
622
623         return 1U << (5 * val);
624 }
625
626 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
627 {
628         struct pmc_dev *pmcdev = s->private;
629         u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
630         u32 ltr_raw_data, scale, val;
631         u16 snoop_ltr, nonsnoop_ltr;
632         unsigned int i, index, ltr_index = 0;
633
634         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
635                 struct pmc *pmc;
636                 const struct pmc_bit_map *map;
637                 u32 ltr_ign_reg;
638
639                 pmc = pmcdev->pmcs[i];
640                 if (!pmc)
641                         continue;
642
643                 scoped_guard(mutex, &pmcdev->lock)
644                         ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
645
646                 map = pmc->map->ltr_show_sts;
647                 for (index = 0; map[index].name; index++) {
648                         bool ltr_ign_data;
649
650                         if (index > pmc->map->ltr_ignore_max)
651                                 ltr_ign_data = false;
652                         else
653                                 ltr_ign_data = ltr_ign_reg & BIT(index);
654
655                         decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
656                         ltr_raw_data = pmc_core_reg_read(pmc,
657                                                          map[index].bit_mask);
658                         snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
659                         nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
660
661                         if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
662                                 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
663                                 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
664                                 decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
665                         }
666                         if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
667                                 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
668                                 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
669                                 decoded_snoop_ltr = val * convert_ltr_scale(scale);
670                         }
671
672                         seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
673                                    ltr_index, i, map[index].name, ltr_raw_data,
674                                    decoded_non_snoop_ltr,
675                                    decoded_snoop_ltr, ltr_ign_data);
676                         ltr_index++;
677                 }
678         }
679         return 0;
680 }
681 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
682
683 static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
684 {
685         struct pmc_dev *pmcdev = s->private;
686         unsigned int pmcidx;
687
688         for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
689                 const struct pmc_bit_map **maps;
690                 unsigned int arr_size, r_idx;
691                 u32 offset, counter;
692                 struct pmc *pmc;
693
694                 pmc = pmcdev->pmcs[pmcidx];
695                 if (!pmc)
696                         continue;
697                 maps = pmc->map->s0ix_blocker_maps;
698                 offset = pmc->map->s0ix_blocker_offset;
699                 arr_size = pmc_core_lpm_get_arr_size(maps);
700
701                 for (r_idx = 0; r_idx < arr_size; r_idx++) {
702                         const struct pmc_bit_map *map;
703
704                         for (map = maps[r_idx]; map->name; map++) {
705                                 if (!map->blk)
706                                         continue;
707                                 counter = pmc_core_reg_read(pmc, offset);
708                                 seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
709                                            map->name, counter);
710                                 offset += map->blk * S0IX_BLK_SIZE;
711                         }
712                 }
713         }
714         return 0;
715 }
716 DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
717
718 static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
719 {
720         unsigned int i;
721
722         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
723                 struct pmc *pmc;
724                 u32 ltr_ign;
725
726                 pmc = pmcdev->pmcs[i];
727                 if (!pmc)
728                         continue;
729
730                 guard(mutex)(&pmcdev->lock);
731                 pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset);
732
733                 /* ltr_ignore_max is the max index value for LTR ignore register */
734                 ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
735                 pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
736         }
737
738         /*
739          * Ignoring ME during suspend is blocking platforms with ADL PCH to get to
740          * deeper S0ix substate.
741          */
742         pmc_core_send_ltr_ignore(pmcdev, 6, 0);
743 }
744
745 static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
746 {
747         unsigned int i;
748
749         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
750                 struct pmc *pmc;
751
752                 pmc = pmcdev->pmcs[i];
753                 if (!pmc)
754                         continue;
755
756                 guard(mutex)(&pmcdev->lock);
757                 pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign);
758         }
759 }
760
761 static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
762                                        const int lpm_adj_x2)
763 {
764         u64 lpm_res = pmc_core_reg_read(pmc, offset);
765
766         return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
767 }
768
769 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
770 {
771         struct pmc_dev *pmcdev = s->private;
772         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
773         const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
774         u32 offset = pmc->map->lpm_residency_offset;
775         int mode;
776
777         seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
778
779         pmc_for_each_mode(mode, pmcdev) {
780                 seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
781                            adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
782         }
783
784         return 0;
785 }
786 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
787
788 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
789 {
790         struct pmc_dev *pmcdev = s->private;
791         unsigned int i;
792
793         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
794                 struct pmc *pmc = pmcdev->pmcs[i];
795                 const struct pmc_bit_map **maps;
796                 u32 offset;
797
798                 if (!pmc)
799                         continue;
800                 maps = pmc->map->lpm_sts;
801                 offset = pmc->map->lpm_status_offset;
802                 pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
803         }
804
805         return 0;
806 }
807 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
808
809 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
810 {
811         struct pmc_dev *pmcdev = s->private;
812         unsigned int i;
813
814         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
815                 struct pmc *pmc = pmcdev->pmcs[i];
816                 const struct pmc_bit_map **maps;
817                 u32 offset;
818
819                 if (!pmc)
820                         continue;
821                 maps = pmc->map->lpm_sts;
822                 offset = pmc->map->lpm_live_status_offset;
823                 pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
824         }
825
826         return 0;
827 }
828 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
829
830 static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
831 {
832         struct pmc_dev *pmcdev = s->private;
833         int mode;
834
835         seq_printf(s, "%30s |", "Element");
836         pmc_for_each_mode(mode, pmcdev)
837                 seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
838
839         seq_printf(s, " %9s |", "Status");
840         seq_printf(s, " %11s |\n", "Live Status");
841 }
842
843 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
844 {
845         struct pmc_dev *pmcdev = s->private;
846         u32 sts_offset;
847         u32 sts_offset_live;
848         u32 *lpm_req_regs;
849         unsigned int mp, pmc_index;
850         int num_maps;
851
852         for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
853                 struct pmc *pmc = pmcdev->pmcs[pmc_index];
854                 const struct pmc_bit_map **maps;
855
856                 if (!pmc)
857                         continue;
858
859                 maps = pmc->map->lpm_sts;
860                 num_maps = pmc->map->lpm_num_maps;
861                 sts_offset = pmc->map->lpm_status_offset;
862                 sts_offset_live = pmc->map->lpm_live_status_offset;
863                 lpm_req_regs = pmc->lpm_req_regs;
864
865                 /*
866                  * When there are multiple PMCs, though the PMC may exist, the
867                  * requirement register discovery could have failed so check
868                  * before accessing.
869                  */
870                 if (!lpm_req_regs)
871                         continue;
872
873                 /* Display the header */
874                 pmc_core_substate_req_header_show(s, pmc_index);
875
876                 /* Loop over maps */
877                 for (mp = 0; mp < num_maps; mp++) {
878                         u32 req_mask = 0;
879                         u32 lpm_status;
880                         u32 lpm_status_live;
881                         const struct pmc_bit_map *map;
882                         int mode, i, len = 32;
883
884                         /*
885                          * Capture the requirements and create a mask so that we only
886                          * show an element if it's required for at least one of the
887                          * enabled low power modes
888                          */
889                         pmc_for_each_mode(mode, pmcdev)
890                                 req_mask |= lpm_req_regs[mp + (mode * num_maps)];
891
892                         /* Get the last latched status for this map */
893                         lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
894
895                         /* Get the runtime status for this map */
896                         lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4));
897
898                         /*  Loop over elements in this map */
899                         map = maps[mp];
900                         for (i = 0; map[i].name && i < len; i++) {
901                                 u32 bit_mask = map[i].bit_mask;
902
903                                 if (!(bit_mask & req_mask)) {
904                                         /*
905                                          * Not required for any enabled states
906                                          * so don't display
907                                          */
908                                         continue;
909                                 }
910
911                                 /* Display the element name in the first column */
912                                 seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
913
914                                 /* Loop over the enabled states and display if required */
915                                 pmc_for_each_mode(mode, pmcdev) {
916                                         bool required = lpm_req_regs[mp + (mode * num_maps)] &
917                                                         bit_mask;
918                                         seq_printf(s, " %9s |", required ? "Required" : " ");
919                                 }
920
921                                 /* In Status column, show the last captured state of this agent */
922                                 seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
923
924                                 /* In Live status column, show the live state of this agent */
925                                 seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " ");
926
927                                 seq_puts(s, "\n");
928                         }
929                 }
930         }
931         return 0;
932 }
933 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
934
935 static unsigned int pmc_core_get_crystal_freq(void)
936 {
937         unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
938
939         if (boot_cpu_data.cpuid_level < 0x15)
940                 return 0;
941
942         eax_denominator = ebx_numerator = ecx_hz = edx = 0;
943
944         /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
945         cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
946
947         if (ebx_numerator == 0 || eax_denominator == 0)
948                 return 0;
949
950         return ecx_hz;
951 }
952
953 static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused)
954 {
955         struct pmc_dev *pmcdev = s->private;
956         u64 die_c6_res, count;
957         int ret;
958
959         if (!pmcdev->crystal_freq) {
960                 dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n");
961                 return -ENXIO;
962         }
963
964         ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset,
965                              &count, 1);
966         if (ret)
967                 return ret;
968
969         die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq);
970         seq_printf(s, "%llu\n", die_c6_res);
971
972         return 0;
973 }
974 DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us);
975
976 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
977 {
978         struct pmc_dev *pmcdev = s->private;
979         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
980         bool c10;
981         u32 reg;
982         int mode;
983
984         reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
985         if (reg & LPM_STS_LATCH_MODE) {
986                 seq_puts(s, "c10");
987                 c10 = false;
988         } else {
989                 seq_puts(s, "[c10]");
990                 c10 = true;
991         }
992
993         pmc_for_each_mode(mode, pmcdev) {
994                 if ((BIT(mode) & reg) && !c10)
995                         seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
996                 else
997                         seq_printf(s, " %s", pmc_lpm_modes[mode]);
998         }
999
1000         seq_puts(s, " clear\n");
1001
1002         return 0;
1003 }
1004
1005 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
1006                                              const char __user *userbuf,
1007                                              size_t count, loff_t *ppos)
1008 {
1009         struct seq_file *s = file->private_data;
1010         struct pmc_dev *pmcdev = s->private;
1011         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1012         bool clear = false, c10 = false;
1013         unsigned char buf[8];
1014         int m, mode;
1015         u32 reg;
1016
1017         if (count > sizeof(buf) - 1)
1018                 return -EINVAL;
1019         if (copy_from_user(buf, userbuf, count))
1020                 return -EFAULT;
1021         buf[count] = '\0';
1022
1023         /*
1024          * Allowed strings are:
1025          *      Any enabled substate, e.g. 'S0i2.0'
1026          *      'c10'
1027          *      'clear'
1028          */
1029         mode = sysfs_match_string(pmc_lpm_modes, buf);
1030
1031         /* Check string matches enabled mode */
1032         pmc_for_each_mode(m, pmcdev)
1033                 if (mode == m)
1034                         break;
1035
1036         if (mode != m || mode < 0) {
1037                 if (sysfs_streq(buf, "clear"))
1038                         clear = true;
1039                 else if (sysfs_streq(buf, "c10"))
1040                         c10 = true;
1041                 else
1042                         return -EINVAL;
1043         }
1044
1045         if (clear) {
1046                 guard(mutex)(&pmcdev->lock);
1047
1048                 reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
1049                 reg |= ETR3_CLEAR_LPM_EVENTS;
1050                 pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
1051
1052                 return count;
1053         }
1054
1055         if (c10) {
1056                 guard(mutex)(&pmcdev->lock);
1057
1058                 reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
1059                 reg &= ~LPM_STS_LATCH_MODE;
1060                 pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
1061
1062                 return count;
1063         }
1064
1065         /*
1066          * For LPM mode latching we set the latch enable bit and selected mode
1067          * and clear everything else.
1068          */
1069         reg = LPM_STS_LATCH_MODE | BIT(mode);
1070         guard(mutex)(&pmcdev->lock);
1071         pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
1072
1073         return count;
1074 }
1075 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
1076
1077 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
1078 {
1079         struct pmc *pmc = s->private;
1080         const struct pmc_bit_map *map = pmc->map->msr_sts;
1081         u64 pcstate_count;
1082         unsigned int index;
1083
1084         for (index = 0; map[index].name ; index++) {
1085                 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
1086                         continue;
1087
1088                 pcstate_count *= 1000;
1089                 do_div(pcstate_count, tsc_khz);
1090                 seq_printf(s, "%-8s : %llu\n", map[index].name,
1091                            pcstate_count);
1092         }
1093
1094         return 0;
1095 }
1096 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
1097
1098 static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
1099 {
1100         unsigned int i, j;
1101
1102         if (!lpm_pri)
1103                 return false;
1104         /*
1105          * Each byte contains the priority level for 2 modes (7:4 and 3:0).
1106          * In a 32 bit register this allows for describing 8 modes. Store the
1107          * levels and look for values out of range.
1108          */
1109         for (i = 0; i < 8; i++) {
1110                 int level = lpm_pri & GENMASK(3, 0);
1111
1112                 if (level >= LPM_MAX_NUM_MODES)
1113                         return false;
1114
1115                 mode_order[i] = level;
1116                 lpm_pri >>= 4;
1117         }
1118
1119         /* Check that we have unique values */
1120         for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
1121                 for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
1122                         if (mode_order[i] == mode_order[j])
1123                                 return false;
1124
1125         return true;
1126 }
1127
1128 void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
1129 {
1130         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1131         u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
1132         u8 mode_order[LPM_MAX_NUM_MODES];
1133         u32 lpm_pri;
1134         u32 lpm_en;
1135         unsigned int i;
1136         int mode, p;
1137
1138         /* Use LPM Maps to indicate support for substates */
1139         if (!pmc->map->lpm_num_maps)
1140                 return;
1141
1142         lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
1143         /* For MTL, BIT 31 is not an lpm mode but a enable bit.
1144          * Lower byte is enough to cover the number of lpm modes for all
1145          * platforms and hence mask the upper 3 bytes.
1146          */
1147         pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
1148
1149         /* Read 32 bit LPM_PRI register */
1150         lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
1151
1152
1153         /*
1154          * If lpm_pri value passes verification, then override the default
1155          * modes here. Otherwise stick with the default.
1156          */
1157         if (pmc_core_pri_verify(lpm_pri, mode_order))
1158                 /* Get list of modes in priority order */
1159                 for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
1160                         pri_order[mode_order[mode]] = mode;
1161         else
1162                 dev_warn(&pmcdev->pdev->dev,
1163                          "Assuming a default substate order for this platform\n");
1164
1165         /*
1166          * Loop through all modes from lowest to highest priority,
1167          * and capture all enabled modes in order
1168          */
1169         i = 0;
1170         for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1171                 int mode = pri_order[p];
1172
1173                 if (!(BIT(mode) & lpm_en))
1174                         continue;
1175
1176                 pmcdev->lpm_en_modes[i++] = mode;
1177         }
1178 }
1179
1180 int get_primary_reg_base(struct pmc *pmc)
1181 {
1182         u64 slp_s0_addr;
1183
1184         if (lpit_read_residency_count_address(&slp_s0_addr)) {
1185                 pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
1186
1187                 if (page_is_ram(PHYS_PFN(pmc->base_addr)))
1188                         return -ENODEV;
1189         } else {
1190                 pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
1191         }
1192
1193         pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
1194         if (!pmc->regbase)
1195                 return -ENOMEM;
1196         return 0;
1197 }
1198
1199 void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
1200 {
1201         struct telem_endpoint *ep;
1202         struct pci_dev *pcidev;
1203
1204         pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0));
1205         if (!pcidev) {
1206                 dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found.");
1207                 return;
1208         }
1209
1210         ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
1211         pci_dev_put(pcidev);
1212         if (IS_ERR(ep)) {
1213                 dev_err(&pmcdev->pdev->dev,
1214                         "pmc_core: couldn't get DMU telem endpoint %ld",
1215                         PTR_ERR(ep));
1216                 return;
1217         }
1218
1219         pmcdev->punit_ep = ep;
1220
1221         pmcdev->has_die_c6 = true;
1222         pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET;
1223 }
1224
1225 void pmc_core_set_device_d3(unsigned int device)
1226 {
1227         struct pci_dev *pcidev;
1228
1229         pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1230         if (pcidev) {
1231                 if (!device_trylock(&pcidev->dev)) {
1232                         pci_dev_put(pcidev);
1233                         return;
1234                 }
1235                 if (!pcidev->dev.driver) {
1236                         dev_info(&pcidev->dev, "Setting to D3hot\n");
1237                         pci_set_power_state(pcidev, PCI_D3hot);
1238                 }
1239                 device_unlock(&pcidev->dev);
1240                 pci_dev_put(pcidev);
1241         }
1242 }
1243
1244 static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
1245 {
1246         struct platform_device *pdev = pmcdev->pdev;
1247         struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
1248         u8 val;
1249
1250         if (!adev)
1251                 return false;
1252
1253         if (fwnode_property_read_u8(acpi_fwnode_handle(adev),
1254                                     "intel-cec-pson-switching-enabled-in-s0",
1255                                     &val))
1256                 return false;
1257
1258         return val == 1;
1259 }
1260
1261 /*
1262  * Enable or disable ACPI PM Timer
1263  *
1264  * This function is intended to be a callback for ACPI PM suspend/resume event.
1265  * The ACPI PM Timer is enabled on resume only if it was enabled during suspend.
1266  */
1267 static void pmc_core_acpi_pm_timer_suspend_resume(void *data, bool suspend)
1268 {
1269         struct pmc_dev *pmcdev = data;
1270         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1271         const struct pmc_reg_map *map = pmc->map;
1272         bool enabled;
1273         u32 reg;
1274
1275         if (!map->acpi_pm_tmr_ctl_offset)
1276                 return;
1277
1278         guard(mutex)(&pmcdev->lock);
1279
1280         if (!suspend && !pmcdev->enable_acpi_pm_timer_on_resume)
1281                 return;
1282
1283         reg = pmc_core_reg_read(pmc, map->acpi_pm_tmr_ctl_offset);
1284         enabled = !(reg & map->acpi_pm_tmr_disable_bit);
1285         if (suspend)
1286                 reg |= map->acpi_pm_tmr_disable_bit;
1287         else
1288                 reg &= ~map->acpi_pm_tmr_disable_bit;
1289         pmc_core_reg_write(pmc, map->acpi_pm_tmr_ctl_offset, reg);
1290
1291         pmcdev->enable_acpi_pm_timer_on_resume = suspend && enabled;
1292 }
1293
1294 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1295 {
1296         debugfs_remove_recursive(pmcdev->dbgfs_dir);
1297 }
1298
1299 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1300 {
1301         struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1302         struct dentry *dir;
1303
1304         dir = debugfs_create_dir("pmc_core", NULL);
1305         pmcdev->dbgfs_dir = dir;
1306
1307         debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc,
1308                             &pmc_core_dev_state);
1309
1310         if (primary_pmc->map->pfear_sts)
1311                 debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1312                                     pmcdev, &pmc_core_ppfear_fops);
1313
1314         debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1315                             &pmc_core_ltr_ignore_fops);
1316
1317         debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops);
1318
1319         debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1320
1321         if (primary_pmc->map->s0ix_blocker_maps)
1322                 debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops);
1323
1324         debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
1325                             &pmc_core_pkgc_fops);
1326
1327         if (primary_pmc->map->pll_sts)
1328                 debugfs_create_file("pll_status", 0444, dir, pmcdev,
1329                                     &pmc_core_pll_fops);
1330
1331         if (primary_pmc->map->mphy_sts)
1332                 debugfs_create_file("mphy_core_lanes_power_gating_status",
1333                                     0444, dir, pmcdev,
1334                                     &pmc_core_mphy_pg_fops);
1335
1336         if (primary_pmc->map->slps0_dbg_maps) {
1337                 debugfs_create_file("slp_s0_debug_status", 0444,
1338                                     dir, pmcdev,
1339                                     &pmc_core_slps0_dbg_fops);
1340
1341                 debugfs_create_bool("slp_s0_dbg_latch", 0644,
1342                                     dir, &slps0_dbg_latch);
1343         }
1344
1345         if (primary_pmc->map->lpm_en_offset) {
1346                 debugfs_create_file("substate_residencies", 0444,
1347                                     pmcdev->dbgfs_dir, pmcdev,
1348                                     &pmc_core_substate_res_fops);
1349         }
1350
1351         if (primary_pmc->map->lpm_status_offset) {
1352                 debugfs_create_file("substate_status_registers", 0444,
1353                                     pmcdev->dbgfs_dir, pmcdev,
1354                                     &pmc_core_substate_sts_regs_fops);
1355                 debugfs_create_file("substate_live_status_registers", 0444,
1356                                     pmcdev->dbgfs_dir, pmcdev,
1357                                     &pmc_core_substate_l_sts_regs_fops);
1358                 debugfs_create_file("lpm_latch_mode", 0644,
1359                                     pmcdev->dbgfs_dir, pmcdev,
1360                                     &pmc_core_lpm_latch_mode_fops);
1361         }
1362
1363         if (primary_pmc->lpm_req_regs) {
1364                 debugfs_create_file("substate_requirements", 0444,
1365                                     pmcdev->dbgfs_dir, pmcdev,
1366                                     &pmc_core_substate_req_regs_fops);
1367         }
1368
1369         if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) {
1370                 debugfs_create_file("pson_residency_usec", 0444,
1371                                     pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency);
1372         }
1373
1374         if (pmcdev->has_die_c6) {
1375                 debugfs_create_file("die_c6_us_show", 0444,
1376                                     pmcdev->dbgfs_dir, pmcdev,
1377                                     &pmc_core_die_c6_us_fops);
1378         }
1379 }
1380
1381 static const struct x86_cpu_id intel_pmc_core_ids[] = {
1382         X86_MATCH_VFM(INTEL_SKYLAKE_L,          spt_core_init),
1383         X86_MATCH_VFM(INTEL_SKYLAKE,            spt_core_init),
1384         X86_MATCH_VFM(INTEL_KABYLAKE_L,         spt_core_init),
1385         X86_MATCH_VFM(INTEL_KABYLAKE,           spt_core_init),
1386         X86_MATCH_VFM(INTEL_CANNONLAKE_L,       cnp_core_init),
1387         X86_MATCH_VFM(INTEL_ICELAKE_L,          icl_core_init),
1388         X86_MATCH_VFM(INTEL_ICELAKE_NNPI,       icl_core_init),
1389         X86_MATCH_VFM(INTEL_COMETLAKE,          cnp_core_init),
1390         X86_MATCH_VFM(INTEL_COMETLAKE_L,        cnp_core_init),
1391         X86_MATCH_VFM(INTEL_TIGERLAKE_L,        tgl_l_core_init),
1392         X86_MATCH_VFM(INTEL_TIGERLAKE,          tgl_core_init),
1393         X86_MATCH_VFM(INTEL_ATOM_TREMONT,       tgl_l_core_init),
1394         X86_MATCH_VFM(INTEL_ATOM_TREMONT_L,     icl_core_init),
1395         X86_MATCH_VFM(INTEL_ROCKETLAKE,         tgl_core_init),
1396         X86_MATCH_VFM(INTEL_ALDERLAKE_L,        tgl_l_core_init),
1397         X86_MATCH_VFM(INTEL_ATOM_GRACEMONT,     tgl_l_core_init),
1398         X86_MATCH_VFM(INTEL_ALDERLAKE,          adl_core_init),
1399         X86_MATCH_VFM(INTEL_RAPTORLAKE_P,       tgl_l_core_init),
1400         X86_MATCH_VFM(INTEL_RAPTORLAKE,         adl_core_init),
1401         X86_MATCH_VFM(INTEL_RAPTORLAKE_S,       adl_core_init),
1402         X86_MATCH_VFM(INTEL_METEORLAKE_L,       mtl_core_init),
1403         X86_MATCH_VFM(INTEL_ARROWLAKE,          arl_core_init),
1404         X86_MATCH_VFM(INTEL_LUNARLAKE_M,        lnl_core_init),
1405         {}
1406 };
1407
1408 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1409
1410 static const struct pci_device_id pmc_pci_ids[] = {
1411         { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1412         { }
1413 };
1414
1415 /*
1416  * This quirk can be used on those platforms where
1417  * the platform BIOS enforces 24Mhz crystal to shutdown
1418  * before PMC can assert SLP_S0#.
1419  */
1420 static bool xtal_ignore;
1421 static int quirk_xtal_ignore(const struct dmi_system_id *id)
1422 {
1423         xtal_ignore = true;
1424         return 0;
1425 }
1426
1427 static void pmc_core_xtal_ignore(struct pmc *pmc)
1428 {
1429         u32 value;
1430
1431         value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
1432         /* 24MHz Crystal Shutdown Qualification Disable */
1433         value |= SPT_PMC_VRIC1_XTALSDQDIS;
1434         /* Low Voltage Mode Enable */
1435         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1436         pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
1437 }
1438
1439 static const struct dmi_system_id pmc_core_dmi_table[]  = {
1440         {
1441         .callback = quirk_xtal_ignore,
1442         .ident = "HP Elite x2 1013 G3",
1443         .matches = {
1444                 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1445                 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1446                 },
1447         },
1448         {}
1449 };
1450
1451 static void pmc_core_do_dmi_quirks(struct pmc *pmc)
1452 {
1453         dmi_check_system(pmc_core_dmi_table);
1454
1455         if (xtal_ignore)
1456                 pmc_core_xtal_ignore(pmc);
1457 }
1458
1459 static void pmc_core_clean_structure(struct platform_device *pdev)
1460 {
1461         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1462         unsigned int i;
1463
1464         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1465                 struct pmc *pmc = pmcdev->pmcs[i];
1466
1467                 if (pmc)
1468                         iounmap(pmc->regbase);
1469         }
1470
1471         if (pmcdev->ssram_pcidev) {
1472                 pci_dev_put(pmcdev->ssram_pcidev);
1473                 pci_disable_device(pmcdev->ssram_pcidev);
1474         }
1475
1476         if (pmcdev->punit_ep)
1477                 pmt_telem_unregister_endpoint(pmcdev->punit_ep);
1478
1479         platform_set_drvdata(pdev, NULL);
1480         mutex_destroy(&pmcdev->lock);
1481 }
1482
1483 static int pmc_core_probe(struct platform_device *pdev)
1484 {
1485         static bool device_initialized;
1486         struct pmc_dev *pmcdev;
1487         const struct x86_cpu_id *cpu_id;
1488         int (*core_init)(struct pmc_dev *pmcdev);
1489         const struct pmc_reg_map *map;
1490         struct pmc *primary_pmc;
1491         int ret;
1492
1493         if (device_initialized)
1494                 return -ENODEV;
1495
1496         pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1497         if (!pmcdev)
1498                 return -ENOMEM;
1499
1500         pmcdev->crystal_freq = pmc_core_get_crystal_freq();
1501
1502         platform_set_drvdata(pdev, pmcdev);
1503         pmcdev->pdev = pdev;
1504
1505         cpu_id = x86_match_cpu(intel_pmc_core_ids);
1506         if (!cpu_id)
1507                 return -ENODEV;
1508
1509         core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
1510
1511         /* Primary PMC */
1512         primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
1513         if (!primary_pmc)
1514                 return -ENOMEM;
1515         pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
1516
1517         /* The last element in msr_map is empty */
1518         pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
1519         pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
1520                                             pmcdev->num_of_pkgc,
1521                                             sizeof(*pmcdev->pkgc_res_cnt),
1522                                             GFP_KERNEL);
1523         if (!pmcdev->pkgc_res_cnt)
1524                 return -ENOMEM;
1525
1526         /*
1527          * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1528          * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1529          * in this case.
1530          */
1531         if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
1532                 core_init = cnp_core_init;
1533
1534         mutex_init(&pmcdev->lock);
1535         ret = core_init(pmcdev);
1536         if (ret) {
1537                 pmc_core_clean_structure(pdev);
1538                 return ret;
1539         }
1540
1541         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
1542         pmc_core_do_dmi_quirks(primary_pmc);
1543
1544         pmc_core_dbgfs_register(pmcdev);
1545         pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
1546                                pmc_core_adjust_slp_s0_step(primary_pmc, 1));
1547
1548         map = primary_pmc->map;
1549         if (map->acpi_pm_tmr_ctl_offset)
1550                 acpi_pmtmr_register_suspend_resume_callback(pmc_core_acpi_pm_timer_suspend_resume,
1551                                                          pmcdev);
1552
1553         device_initialized = true;
1554         dev_info(&pdev->dev, " initialized\n");
1555
1556         return 0;
1557 }
1558
1559 static void pmc_core_remove(struct platform_device *pdev)
1560 {
1561         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1562         const struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1563         const struct pmc_reg_map *map = pmc->map;
1564
1565         if (map->acpi_pm_tmr_ctl_offset)
1566                 acpi_pmtmr_unregister_suspend_resume_callback();
1567
1568         pmc_core_dbgfs_unregister(pmcdev);
1569         pmc_core_clean_structure(pdev);
1570 }
1571
1572 static bool warn_on_s0ix_failures;
1573 module_param(warn_on_s0ix_failures, bool, 0644);
1574 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1575
1576 static bool ltr_ignore_all_suspend = true;
1577 module_param(ltr_ignore_all_suspend, bool, 0644);
1578 MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend");
1579
1580 static __maybe_unused int pmc_core_suspend(struct device *dev)
1581 {
1582         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1583         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1584         unsigned int i;
1585
1586         if (pmcdev->suspend)
1587                 pmcdev->suspend(pmcdev);
1588
1589         if (ltr_ignore_all_suspend)
1590                 pmc_core_ltr_ignore_all(pmcdev);
1591
1592         /* Check if the syspend will actually use S0ix */
1593         if (pm_suspend_via_firmware())
1594                 return 0;
1595
1596         /* Save PKGC residency for checking later */
1597         for (i = 0; i < pmcdev->num_of_pkgc; i++) {
1598                 if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
1599                         return -EIO;
1600         }
1601
1602         /* Save S0ix residency for checking later */
1603         if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
1604                 return -EIO;
1605
1606         return 0;
1607 }
1608
1609 static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
1610 {
1611         u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
1612         u64 deepest_pkgc_residency;
1613
1614         if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
1615                 return false;
1616
1617         if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
1618                 return true;
1619
1620         return false;
1621 }
1622
1623 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1624 {
1625         u64 s0ix_counter;
1626
1627         if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter))
1628                 return false;
1629
1630         pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter));
1631
1632         if (s0ix_counter == pmcdev->s0ix_counter)
1633                 return true;
1634
1635         return false;
1636 }
1637
1638 int pmc_core_resume_common(struct pmc_dev *pmcdev)
1639 {
1640         struct device *dev = &pmcdev->pdev->dev;
1641         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1642         const struct pmc_bit_map **maps = pmc->map->lpm_sts;
1643         int offset = pmc->map->lpm_status_offset;
1644         unsigned int i;
1645
1646         /* Check if the syspend used S0ix */
1647         if (pm_suspend_via_firmware())
1648                 return 0;
1649
1650         if (!pmc_core_is_s0ix_failed(pmcdev))
1651                 return 0;
1652
1653         if (!warn_on_s0ix_failures)
1654                 return 0;
1655
1656         if (pmc_core_is_deepest_pkgc_failed(pmcdev)) {
1657                 /* S0ix failed because of deepest PKGC entry failure */
1658                 dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
1659                          msr_map[pmcdev->num_of_pkgc - 1].name,
1660                          msr_map[pmcdev->num_of_pkgc - 1].name,
1661                          pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
1662
1663                 for (i = 0; i < pmcdev->num_of_pkgc; i++) {
1664                         u64 pc_cnt;
1665
1666                         if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
1667                                 dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
1668                                          msr_map[i].name, pmcdev->pkgc_res_cnt[i],
1669                                          msr_map[i].name, pc_cnt);
1670                         }
1671                 }
1672                 return 0;
1673         }
1674
1675         /* The real interesting case - S0ix failed - lets ask PMC why. */
1676         dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1677                  pmcdev->s0ix_counter);
1678
1679         if (pmc->map->slps0_dbg_maps)
1680                 pmc_core_slps0_display(pmc, dev, NULL);
1681
1682         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1683                 struct pmc *pmc = pmcdev->pmcs[i];
1684
1685                 if (!pmc)
1686                         continue;
1687                 if (pmc->map->lpm_sts)
1688                         pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
1689         }
1690
1691         return 0;
1692 }
1693
1694 static __maybe_unused int pmc_core_resume(struct device *dev)
1695 {
1696         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1697
1698         if (ltr_ignore_all_suspend)
1699                 pmc_core_ltr_restore_all(pmcdev);
1700
1701         if (pmcdev->resume)
1702                 return pmcdev->resume(pmcdev);
1703
1704         return pmc_core_resume_common(pmcdev);
1705 }
1706
1707 static const struct dev_pm_ops pmc_core_pm_ops = {
1708         SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1709 };
1710
1711 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1712         {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1713         { }
1714 };
1715 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1716
1717 static struct platform_driver pmc_core_driver = {
1718         .driver = {
1719                 .name = "intel_pmc_core",
1720                 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1721                 .pm = &pmc_core_pm_ops,
1722                 .dev_groups = pmc_dev_groups,
1723         },
1724         .probe = pmc_core_probe,
1725         .remove = pmc_core_remove,
1726 };
1727
1728 module_platform_driver(pmc_core_driver);
1729
1730 MODULE_LICENSE("GPL v2");
1731 MODULE_DESCRIPTION("Intel PMC Core Driver");
This page took 0.131229 seconds and 4 git commands to generate.