]> Git Repo - linux.git/blob - drivers/platform/x86/intel/pmc/core.c
Merge tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux.git] / drivers / platform / x86 / intel / pmc / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Core SoC Power Management Controller Driver
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Authors: Rajneesh Bhardwaj <[email protected]>
9  *          Vishwanath Somayaji <[email protected]>
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/bitfield.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/dmi.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/suspend.h>
23
24 #include <asm/cpu_device_id.h>
25 #include <asm/intel-family.h>
26 #include <asm/msr.h>
27 #include <asm/tsc.h>
28
29 #include "core.h"
30
31 /* Maximum number of modes supported by platfoms that has low power mode capability */
32 const char *pmc_lpm_modes[] = {
33         "S0i2.0",
34         "S0i2.1",
35         "S0i2.2",
36         "S0i3.0",
37         "S0i3.1",
38         "S0i3.2",
39         "S0i3.3",
40         "S0i3.4",
41         NULL
42 };
43
44 /* PKGC MSRs are common across Intel Core SoCs */
45 const struct pmc_bit_map msr_map[] = {
46         {"Package C2",                  MSR_PKG_C2_RESIDENCY},
47         {"Package C3",                  MSR_PKG_C3_RESIDENCY},
48         {"Package C6",                  MSR_PKG_C6_RESIDENCY},
49         {"Package C7",                  MSR_PKG_C7_RESIDENCY},
50         {"Package C8",                  MSR_PKG_C8_RESIDENCY},
51         {"Package C9",                  MSR_PKG_C9_RESIDENCY},
52         {"Package C10",                 MSR_PKG_C10_RESIDENCY},
53         {}
54 };
55
56 static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset)
57 {
58         return readl(pmc->regbase + reg_offset);
59 }
60
61 static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset,
62                                       u32 val)
63 {
64         writel(val, pmc->regbase + reg_offset);
65 }
66
67 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
68 {
69         /*
70          * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
71          * used as a workaround which uses 30.5 usec tick. All other client
72          * programs have the legacy SLP_S0 residency counter that is using the 122
73          * usec tick.
74          */
75         const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
76
77         if (pmc->map == &adl_reg_map)
78                 return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
79         else
80                 return (u64)value * pmc->map->slp_s0_res_counter_step;
81 }
82
83 static int set_etr3(struct pmc_dev *pmcdev)
84 {
85         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
86         const struct pmc_reg_map *map = pmc->map;
87         u32 reg;
88         int err;
89
90         if (!map->etr3_offset)
91                 return -EOPNOTSUPP;
92
93         mutex_lock(&pmcdev->lock);
94
95         /* check if CF9 is locked */
96         reg = pmc_core_reg_read(pmc, map->etr3_offset);
97         if (reg & ETR3_CF9LOCK) {
98                 err = -EACCES;
99                 goto out_unlock;
100         }
101
102         /* write CF9 global reset bit */
103         reg |= ETR3_CF9GR;
104         pmc_core_reg_write(pmc, map->etr3_offset, reg);
105
106         reg = pmc_core_reg_read(pmc, map->etr3_offset);
107         if (!(reg & ETR3_CF9GR)) {
108                 err = -EIO;
109                 goto out_unlock;
110         }
111
112         err = 0;
113
114 out_unlock:
115         mutex_unlock(&pmcdev->lock);
116         return err;
117 }
118 static umode_t etr3_is_visible(struct kobject *kobj,
119                                 struct attribute *attr,
120                                 int idx)
121 {
122         struct device *dev = kobj_to_dev(kobj);
123         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
124         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
125         const struct pmc_reg_map *map = pmc->map;
126         u32 reg;
127
128         mutex_lock(&pmcdev->lock);
129         reg = pmc_core_reg_read(pmc, map->etr3_offset);
130         mutex_unlock(&pmcdev->lock);
131
132         return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
133 }
134
135 static ssize_t etr3_show(struct device *dev,
136                                  struct device_attribute *attr, char *buf)
137 {
138         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
139         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
140         const struct pmc_reg_map *map = pmc->map;
141         u32 reg;
142
143         if (!map->etr3_offset)
144                 return -EOPNOTSUPP;
145
146         mutex_lock(&pmcdev->lock);
147
148         reg = pmc_core_reg_read(pmc, map->etr3_offset);
149         reg &= ETR3_CF9GR | ETR3_CF9LOCK;
150
151         mutex_unlock(&pmcdev->lock);
152
153         return sysfs_emit(buf, "0x%08x", reg);
154 }
155
156 static ssize_t etr3_store(struct device *dev,
157                                   struct device_attribute *attr,
158                                   const char *buf, size_t len)
159 {
160         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
161         int err;
162         u32 reg;
163
164         err = kstrtouint(buf, 16, &reg);
165         if (err)
166                 return err;
167
168         /* allow only CF9 writes */
169         if (reg != ETR3_CF9GR)
170                 return -EINVAL;
171
172         err = set_etr3(pmcdev);
173         if (err)
174                 return err;
175
176         return len;
177 }
178 static DEVICE_ATTR_RW(etr3);
179
180 static struct attribute *pmc_attrs[] = {
181         &dev_attr_etr3.attr,
182         NULL
183 };
184
185 static const struct attribute_group pmc_attr_group = {
186         .attrs = pmc_attrs,
187         .is_visible = etr3_is_visible,
188 };
189
190 static const struct attribute_group *pmc_dev_groups[] = {
191         &pmc_attr_group,
192         NULL
193 };
194
195 static int pmc_core_dev_state_get(void *data, u64 *val)
196 {
197         struct pmc *pmc = data;
198         const struct pmc_reg_map *map = pmc->map;
199         u32 value;
200
201         value = pmc_core_reg_read(pmc, map->slp_s0_offset);
202         *val = pmc_core_adjust_slp_s0_step(pmc, value);
203
204         return 0;
205 }
206
207 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
208
209 static int pmc_core_check_read_lock_bit(struct pmc *pmc)
210 {
211         u32 value;
212
213         value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
214         return value & BIT(pmc->map->pm_read_disable_bit);
215 }
216
217 static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
218                                    struct seq_file *s)
219 {
220         const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
221         const struct pmc_bit_map *map;
222         int offset = pmc->map->slps0_dbg_offset;
223         u32 data;
224
225         while (*maps) {
226                 map = *maps;
227                 data = pmc_core_reg_read(pmc, offset);
228                 offset += 4;
229                 while (map->name) {
230                         if (dev)
231                                 dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
232                                         map->name,
233                                         data & map->bit_mask ? "Yes" : "No");
234                         if (s)
235                                 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
236                                            map->name,
237                                            data & map->bit_mask ? "Yes" : "No");
238                         ++map;
239                 }
240                 ++maps;
241         }
242 }
243
244 static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
245 {
246         int idx;
247
248         for (idx = 0; maps[idx]; idx++)
249                 ;/* Nothing */
250
251         return idx;
252 }
253
254 static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
255                                  struct seq_file *s, u32 offset, int pmc_index,
256                                  const char *str,
257                                  const struct pmc_bit_map **maps)
258 {
259         int index, idx, len = 32, bit_mask, arr_size;
260         u32 *lpm_regs;
261
262         arr_size = pmc_core_lpm_get_arr_size(maps);
263         lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
264         if (!lpm_regs)
265                 return;
266
267         for (index = 0; index < arr_size; index++) {
268                 lpm_regs[index] = pmc_core_reg_read(pmc, offset);
269                 offset += 4;
270         }
271
272         for (idx = 0; idx < arr_size; idx++) {
273                 if (dev)
274                         dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
275                                 lpm_regs[idx]);
276                 if (s)
277                         seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
278                                    lpm_regs[idx]);
279                 for (index = 0; maps[idx][index].name && index < len; index++) {
280                         bit_mask = maps[idx][index].bit_mask;
281                         if (dev)
282                                 dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index,
283                                         maps[idx][index].name,
284                                         lpm_regs[idx] & bit_mask ? 1 : 0);
285                         if (s)
286                                 seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index,
287                                            maps[idx][index].name,
288                                            lpm_regs[idx] & bit_mask ? 1 : 0);
289                 }
290         }
291
292         kfree(lpm_regs);
293 }
294
295 static bool slps0_dbg_latch;
296
297 static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
298 {
299         return readb(pmc->regbase + offset);
300 }
301
302 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
303                                  int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
304 {
305         seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
306                    pmc_index, ip, pf_map[idx][index].name,
307                    pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
308 }
309
310 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
311 {
312         struct pmc_dev *pmcdev = s->private;
313         int i;
314
315         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
316                 struct pmc *pmc = pmcdev->pmcs[i];
317                 const struct pmc_bit_map **maps;
318                 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
319                 int index, iter, idx, ip = 0;
320
321                 if (!pmc)
322                         continue;
323
324                 maps = pmc->map->pfear_sts;
325                 iter = pmc->map->ppfear0_offset;
326
327                 for (index = 0; index < pmc->map->ppfear_buckets &&
328                      index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
329                         pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
330
331                 for (idx = 0; maps[idx]; idx++) {
332                         for (index = 0; maps[idx][index].name &&
333                              index < pmc->map->ppfear_buckets * 8; ip++, index++)
334                                 pmc_core_display_map(s, index, idx, ip, i,
335                                                      pf_regs[index / 8], maps);
336                 }
337         }
338
339         return 0;
340 }
341 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
342
343 /* This function should return link status, 0 means ready */
344 static int pmc_core_mtpmc_link_status(struct pmc *pmc)
345 {
346         u32 value;
347
348         value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET);
349         return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
350 }
351
352 static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram)
353 {
354         u32 dest;
355         int timeout;
356
357         for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
358                 if (pmc_core_mtpmc_link_status(pmc) == 0)
359                         break;
360                 msleep(5);
361         }
362
363         if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc))
364                 return -EBUSY;
365
366         dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
367         pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest);
368         return 0;
369 }
370
371 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
372 {
373         struct pmc_dev *pmcdev = s->private;
374         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
375         const struct pmc_bit_map *map = pmc->map->mphy_sts;
376         u32 mphy_core_reg_low, mphy_core_reg_high;
377         u32 val_low, val_high;
378         int index, err = 0;
379
380         if (pmcdev->pmc_xram_read_bit) {
381                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
382                 return 0;
383         }
384
385         mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
386         mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
387
388         mutex_lock(&pmcdev->lock);
389
390         if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
391                 err = -EBUSY;
392                 goto out_unlock;
393         }
394
395         msleep(10);
396         val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
397
398         if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
399                 err = -EBUSY;
400                 goto out_unlock;
401         }
402
403         msleep(10);
404         val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
405
406         for (index = 0; index < 8 && map[index].name; index++) {
407                 seq_printf(s, "%-32s\tState: %s\n",
408                            map[index].name,
409                            map[index].bit_mask & val_low ? "Not power gated" :
410                            "Power gated");
411         }
412
413         for (index = 8; map[index].name; index++) {
414                 seq_printf(s, "%-32s\tState: %s\n",
415                            map[index].name,
416                            map[index].bit_mask & val_high ? "Not power gated" :
417                            "Power gated");
418         }
419
420 out_unlock:
421         mutex_unlock(&pmcdev->lock);
422         return err;
423 }
424 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
425
426 static int pmc_core_pll_show(struct seq_file *s, void *unused)
427 {
428         struct pmc_dev *pmcdev = s->private;
429         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
430         const struct pmc_bit_map *map = pmc->map->pll_sts;
431         u32 mphy_common_reg, val;
432         int index, err = 0;
433
434         if (pmcdev->pmc_xram_read_bit) {
435                 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
436                 return 0;
437         }
438
439         mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
440         mutex_lock(&pmcdev->lock);
441
442         if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
443                 err = -EBUSY;
444                 goto out_unlock;
445         }
446
447         /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
448         msleep(10);
449         val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
450
451         for (index = 0; map[index].name ; index++) {
452                 seq_printf(s, "%-32s\tState: %s\n",
453                            map[index].name,
454                            map[index].bit_mask & val ? "Active" : "Idle");
455         }
456
457 out_unlock:
458         mutex_unlock(&pmcdev->lock);
459         return err;
460 }
461 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
462
463 int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
464 {
465         struct pmc *pmc;
466         const struct pmc_reg_map *map;
467         u32 reg;
468         int pmc_index, ltr_index;
469
470         ltr_index = value;
471         /* For platforms with multiple pmcs, ltr index value given by user
472          * is based on the contiguous indexes from ltr_show output.
473          * pmc index and ltr index needs to be calculated from it.
474          */
475         for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index > 0; pmc_index++) {
476                 pmc = pmcdev->pmcs[pmc_index];
477
478                 if (!pmc)
479                         continue;
480
481                 map = pmc->map;
482                 if (ltr_index <= map->ltr_ignore_max)
483                         break;
484
485                 /* Along with IP names, ltr_show map includes CURRENT_PLATFORM
486                  * and AGGREGATED_SYSTEM values per PMC. Take these two index
487                  * values into account in ltr_index calculation. Also, to start
488                  * ltr index from zero for next pmc, subtract it by 1.
489                  */
490                 ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
491         }
492
493         if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
494                 return -EINVAL;
495
496         pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
497
498         mutex_lock(&pmcdev->lock);
499
500         reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
501         reg |= BIT(ltr_index);
502         pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
503
504         mutex_unlock(&pmcdev->lock);
505
506         return 0;
507 }
508
509 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
510                                          const char __user *userbuf,
511                                          size_t count, loff_t *ppos)
512 {
513         struct seq_file *s = file->private_data;
514         struct pmc_dev *pmcdev = s->private;
515         u32 buf_size, value;
516         int err;
517
518         buf_size = min_t(u32, count, 64);
519
520         err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
521         if (err)
522                 return err;
523
524         err = pmc_core_send_ltr_ignore(pmcdev, value);
525
526         return err == 0 ? count : err;
527 }
528
529 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
530 {
531         return 0;
532 }
533
534 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
535 {
536         return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
537 }
538
539 static const struct file_operations pmc_core_ltr_ignore_ops = {
540         .open           = pmc_core_ltr_ignore_open,
541         .read           = seq_read,
542         .write          = pmc_core_ltr_ignore_write,
543         .llseek         = seq_lseek,
544         .release        = single_release,
545 };
546
547 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
548 {
549         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
550         const struct pmc_reg_map *map = pmc->map;
551         u32 fd;
552
553         mutex_lock(&pmcdev->lock);
554
555         if (!reset && !slps0_dbg_latch)
556                 goto out_unlock;
557
558         fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
559         if (reset)
560                 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
561         else
562                 fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
563         pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
564
565         slps0_dbg_latch = false;
566
567 out_unlock:
568         mutex_unlock(&pmcdev->lock);
569 }
570
571 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
572 {
573         struct pmc_dev *pmcdev = s->private;
574
575         pmc_core_slps0_dbg_latch(pmcdev, false);
576         pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s);
577         pmc_core_slps0_dbg_latch(pmcdev, true);
578
579         return 0;
580 }
581 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
582
583 static u32 convert_ltr_scale(u32 val)
584 {
585         /*
586          * As per PCIE specification supporting document
587          * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
588          * Tolerance Reporting data payload is encoded in a
589          * 3 bit scale and 10 bit value fields. Values are
590          * multiplied by the indicated scale to yield an absolute time
591          * value, expressible in a range from 1 nanosecond to
592          * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
593          *
594          * scale encoding is as follows:
595          *
596          * ----------------------------------------------
597          * |scale factor        |       Multiplier (ns) |
598          * ----------------------------------------------
599          * |    0               |       1               |
600          * |    1               |       32              |
601          * |    2               |       1024            |
602          * |    3               |       32768           |
603          * |    4               |       1048576         |
604          * |    5               |       33554432        |
605          * |    6               |       Invalid         |
606          * |    7               |       Invalid         |
607          * ----------------------------------------------
608          */
609         if (val > 5) {
610                 pr_warn("Invalid LTR scale factor.\n");
611                 return 0;
612         }
613
614         return 1U << (5 * val);
615 }
616
617 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
618 {
619         struct pmc_dev *pmcdev = s->private;
620         u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
621         u32 ltr_raw_data, scale, val;
622         u16 snoop_ltr, nonsnoop_ltr;
623         int i, index, ltr_index = 0;
624
625         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
626                 struct pmc *pmc = pmcdev->pmcs[i];
627                 const struct pmc_bit_map *map;
628
629                 if (!pmc)
630                         continue;
631
632                 map = pmc->map->ltr_show_sts;
633                 for (index = 0; map[index].name; index++) {
634                         decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
635                         ltr_raw_data = pmc_core_reg_read(pmc,
636                                                          map[index].bit_mask);
637                         snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
638                         nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
639
640                         if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
641                                 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
642                                 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
643                                 decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
644                         }
645                         if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
646                                 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
647                                 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
648                                 decoded_snoop_ltr = val * convert_ltr_scale(scale);
649                         }
650
651                         seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
652                                    ltr_index, i, map[index].name, ltr_raw_data,
653                                    decoded_non_snoop_ltr,
654                                    decoded_snoop_ltr);
655                         ltr_index++;
656                 }
657         }
658         return 0;
659 }
660 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
661
662 static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
663                                        const int lpm_adj_x2)
664 {
665         u64 lpm_res = pmc_core_reg_read(pmc, offset);
666
667         return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
668 }
669
670 static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
671 {
672         struct pmc_dev *pmcdev = s->private;
673         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
674         const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
675         u32 offset = pmc->map->lpm_residency_offset;
676         int i, mode;
677
678         seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
679
680         pmc_for_each_mode(i, mode, pmcdev) {
681                 seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
682                            adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
683         }
684
685         return 0;
686 }
687 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
688
689 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
690 {
691         struct pmc_dev *pmcdev = s->private;
692         int i;
693
694         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
695                 struct pmc *pmc = pmcdev->pmcs[i];
696                 const struct pmc_bit_map **maps;
697                 u32 offset;
698
699                 if (!pmc)
700                         continue;
701                 maps = pmc->map->lpm_sts;
702                 offset = pmc->map->lpm_status_offset;
703                 pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
704         }
705
706         return 0;
707 }
708 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
709
710 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
711 {
712         struct pmc_dev *pmcdev = s->private;
713         int i;
714
715         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
716                 struct pmc *pmc = pmcdev->pmcs[i];
717                 const struct pmc_bit_map **maps;
718                 u32 offset;
719
720                 if (!pmc)
721                         continue;
722                 maps = pmc->map->lpm_sts;
723                 offset = pmc->map->lpm_live_status_offset;
724                 pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
725         }
726
727         return 0;
728 }
729 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
730
731 static void pmc_core_substate_req_header_show(struct seq_file *s)
732 {
733         struct pmc_dev *pmcdev = s->private;
734         int i, mode;
735
736         seq_printf(s, "%30s |", "Element");
737         pmc_for_each_mode(i, mode, pmcdev)
738                 seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
739
740         seq_printf(s, " %9s |\n", "Status");
741 }
742
743 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
744 {
745         struct pmc_dev *pmcdev = s->private;
746         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
747         const struct pmc_bit_map **maps = pmc->map->lpm_sts;
748         const struct pmc_bit_map *map;
749         const int num_maps = pmc->map->lpm_num_maps;
750         u32 sts_offset = pmc->map->lpm_status_offset;
751         u32 *lpm_req_regs = pmc->lpm_req_regs;
752         int mp;
753
754         /* Display the header */
755         pmc_core_substate_req_header_show(s);
756
757         /* Loop over maps */
758         for (mp = 0; mp < num_maps; mp++) {
759                 u32 req_mask = 0;
760                 u32 lpm_status;
761                 int mode, idx, i, len = 32;
762
763                 /*
764                  * Capture the requirements and create a mask so that we only
765                  * show an element if it's required for at least one of the
766                  * enabled low power modes
767                  */
768                 pmc_for_each_mode(idx, mode, pmcdev)
769                         req_mask |= lpm_req_regs[mp + (mode * num_maps)];
770
771                 /* Get the last latched status for this map */
772                 lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
773
774                 /*  Loop over elements in this map */
775                 map = maps[mp];
776                 for (i = 0; map[i].name && i < len; i++) {
777                         u32 bit_mask = map[i].bit_mask;
778
779                         if (!(bit_mask & req_mask))
780                                 /*
781                                  * Not required for any enabled states
782                                  * so don't display
783                                  */
784                                 continue;
785
786                         /* Display the element name in the first column */
787                         seq_printf(s, "%30s |", map[i].name);
788
789                         /* Loop over the enabled states and display if required */
790                         pmc_for_each_mode(idx, mode, pmcdev) {
791                                 if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
792                                         seq_printf(s, " %9s |",
793                                                    "Required");
794                                 else
795                                         seq_printf(s, " %9s |", " ");
796                         }
797
798                         /* In Status column, show the last captured state of this agent */
799                         if (lpm_status & bit_mask)
800                                 seq_printf(s, " %9s |", "Yes");
801                         else
802                                 seq_printf(s, " %9s |", " ");
803
804                         seq_puts(s, "\n");
805                 }
806         }
807
808         return 0;
809 }
810 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
811
812 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
813 {
814         struct pmc_dev *pmcdev = s->private;
815         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
816         bool c10;
817         u32 reg;
818         int idx, mode;
819
820         reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
821         if (reg & LPM_STS_LATCH_MODE) {
822                 seq_puts(s, "c10");
823                 c10 = false;
824         } else {
825                 seq_puts(s, "[c10]");
826                 c10 = true;
827         }
828
829         pmc_for_each_mode(idx, mode, pmcdev) {
830                 if ((BIT(mode) & reg) && !c10)
831                         seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
832                 else
833                         seq_printf(s, " %s", pmc_lpm_modes[mode]);
834         }
835
836         seq_puts(s, " clear\n");
837
838         return 0;
839 }
840
841 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
842                                              const char __user *userbuf,
843                                              size_t count, loff_t *ppos)
844 {
845         struct seq_file *s = file->private_data;
846         struct pmc_dev *pmcdev = s->private;
847         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
848         bool clear = false, c10 = false;
849         unsigned char buf[8];
850         int idx, m, mode;
851         u32 reg;
852
853         if (count > sizeof(buf) - 1)
854                 return -EINVAL;
855         if (copy_from_user(buf, userbuf, count))
856                 return -EFAULT;
857         buf[count] = '\0';
858
859         /*
860          * Allowed strings are:
861          *      Any enabled substate, e.g. 'S0i2.0'
862          *      'c10'
863          *      'clear'
864          */
865         mode = sysfs_match_string(pmc_lpm_modes, buf);
866
867         /* Check string matches enabled mode */
868         pmc_for_each_mode(idx, m, pmcdev)
869                 if (mode == m)
870                         break;
871
872         if (mode != m || mode < 0) {
873                 if (sysfs_streq(buf, "clear"))
874                         clear = true;
875                 else if (sysfs_streq(buf, "c10"))
876                         c10 = true;
877                 else
878                         return -EINVAL;
879         }
880
881         if (clear) {
882                 mutex_lock(&pmcdev->lock);
883
884                 reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
885                 reg |= ETR3_CLEAR_LPM_EVENTS;
886                 pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
887
888                 mutex_unlock(&pmcdev->lock);
889
890                 return count;
891         }
892
893         if (c10) {
894                 mutex_lock(&pmcdev->lock);
895
896                 reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
897                 reg &= ~LPM_STS_LATCH_MODE;
898                 pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
899
900                 mutex_unlock(&pmcdev->lock);
901
902                 return count;
903         }
904
905         /*
906          * For LPM mode latching we set the latch enable bit and selected mode
907          * and clear everything else.
908          */
909         reg = LPM_STS_LATCH_MODE | BIT(mode);
910         mutex_lock(&pmcdev->lock);
911         pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
912         mutex_unlock(&pmcdev->lock);
913
914         return count;
915 }
916 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
917
918 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
919 {
920         struct pmc *pmc = s->private;
921         const struct pmc_bit_map *map = pmc->map->msr_sts;
922         u64 pcstate_count;
923         int index;
924
925         for (index = 0; map[index].name ; index++) {
926                 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
927                         continue;
928
929                 pcstate_count *= 1000;
930                 do_div(pcstate_count, tsc_khz);
931                 seq_printf(s, "%-8s : %llu\n", map[index].name,
932                            pcstate_count);
933         }
934
935         return 0;
936 }
937 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
938
939 static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
940 {
941         int i, j;
942
943         if (!lpm_pri)
944                 return false;
945         /*
946          * Each byte contains the priority level for 2 modes (7:4 and 3:0).
947          * In a 32 bit register this allows for describing 8 modes. Store the
948          * levels and look for values out of range.
949          */
950         for (i = 0; i < 8; i++) {
951                 int level = lpm_pri & GENMASK(3, 0);
952
953                 if (level >= LPM_MAX_NUM_MODES)
954                         return false;
955
956                 mode_order[i] = level;
957                 lpm_pri >>= 4;
958         }
959
960         /* Check that we have unique values */
961         for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
962                 for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
963                         if (mode_order[i] == mode_order[j])
964                                 return false;
965
966         return true;
967 }
968
969 static void pmc_core_get_low_power_modes(struct platform_device *pdev)
970 {
971         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
972         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
973         u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
974         u8 mode_order[LPM_MAX_NUM_MODES];
975         u32 lpm_pri;
976         u32 lpm_en;
977         int mode, i, p;
978
979         /* Use LPM Maps to indicate support for substates */
980         if (!pmc->map->lpm_num_maps)
981                 return;
982
983         lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
984         /* For MTL, BIT 31 is not an lpm mode but a enable bit.
985          * Lower byte is enough to cover the number of lpm modes for all
986          * platforms and hence mask the upper 3 bytes.
987          */
988         pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
989
990         /* Read 32 bit LPM_PRI register */
991         lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
992
993
994         /*
995          * If lpm_pri value passes verification, then override the default
996          * modes here. Otherwise stick with the default.
997          */
998         if (pmc_core_pri_verify(lpm_pri, mode_order))
999                 /* Get list of modes in priority order */
1000                 for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
1001                         pri_order[mode_order[mode]] = mode;
1002         else
1003                 dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
1004
1005         /*
1006          * Loop through all modes from lowest to highest priority,
1007          * and capture all enabled modes in order
1008          */
1009         i = 0;
1010         for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1011                 int mode = pri_order[p];
1012
1013                 if (!(BIT(mode) & lpm_en))
1014                         continue;
1015
1016                 pmcdev->lpm_en_modes[i++] = mode;
1017         }
1018 }
1019
1020 int get_primary_reg_base(struct pmc *pmc)
1021 {
1022         u64 slp_s0_addr;
1023
1024         if (lpit_read_residency_count_address(&slp_s0_addr)) {
1025                 pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
1026
1027                 if (page_is_ram(PHYS_PFN(pmc->base_addr)))
1028                         return -ENODEV;
1029         } else {
1030                 pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
1031         }
1032
1033         pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
1034         if (!pmc->regbase)
1035                 return -ENOMEM;
1036         return 0;
1037 }
1038
1039 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1040 {
1041         debugfs_remove_recursive(pmcdev->dbgfs_dir);
1042 }
1043
1044 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1045 {
1046         struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1047         struct dentry *dir;
1048
1049         dir = debugfs_create_dir("pmc_core", NULL);
1050         pmcdev->dbgfs_dir = dir;
1051
1052         debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc,
1053                             &pmc_core_dev_state);
1054
1055         if (primary_pmc->map->pfear_sts)
1056                 debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1057                                     pmcdev, &pmc_core_ppfear_fops);
1058
1059         debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1060                             &pmc_core_ltr_ignore_ops);
1061
1062         debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1063
1064         debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
1065                             &pmc_core_pkgc_fops);
1066
1067         if (primary_pmc->map->pll_sts)
1068                 debugfs_create_file("pll_status", 0444, dir, pmcdev,
1069                                     &pmc_core_pll_fops);
1070
1071         if (primary_pmc->map->mphy_sts)
1072                 debugfs_create_file("mphy_core_lanes_power_gating_status",
1073                                     0444, dir, pmcdev,
1074                                     &pmc_core_mphy_pg_fops);
1075
1076         if (primary_pmc->map->slps0_dbg_maps) {
1077                 debugfs_create_file("slp_s0_debug_status", 0444,
1078                                     dir, pmcdev,
1079                                     &pmc_core_slps0_dbg_fops);
1080
1081                 debugfs_create_bool("slp_s0_dbg_latch", 0644,
1082                                     dir, &slps0_dbg_latch);
1083         }
1084
1085         if (primary_pmc->map->lpm_en_offset) {
1086                 debugfs_create_file("substate_residencies", 0444,
1087                                     pmcdev->dbgfs_dir, pmcdev,
1088                                     &pmc_core_substate_res_fops);
1089         }
1090
1091         if (primary_pmc->map->lpm_status_offset) {
1092                 debugfs_create_file("substate_status_registers", 0444,
1093                                     pmcdev->dbgfs_dir, pmcdev,
1094                                     &pmc_core_substate_sts_regs_fops);
1095                 debugfs_create_file("substate_live_status_registers", 0444,
1096                                     pmcdev->dbgfs_dir, pmcdev,
1097                                     &pmc_core_substate_l_sts_regs_fops);
1098                 debugfs_create_file("lpm_latch_mode", 0644,
1099                                     pmcdev->dbgfs_dir, pmcdev,
1100                                     &pmc_core_lpm_latch_mode_fops);
1101         }
1102
1103         if (primary_pmc->lpm_req_regs) {
1104                 debugfs_create_file("substate_requirements", 0444,
1105                                     pmcdev->dbgfs_dir, pmcdev,
1106                                     &pmc_core_substate_req_regs_fops);
1107         }
1108 }
1109
1110 static const struct x86_cpu_id intel_pmc_core_ids[] = {
1111         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,           spt_core_init),
1112         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,             spt_core_init),
1113         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,          spt_core_init),
1114         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,            spt_core_init),
1115         X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,        cnp_core_init),
1116         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,           icl_core_init),
1117         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI,        icl_core_init),
1118         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           cnp_core_init),
1119         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         cnp_core_init),
1120         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         tgl_core_init),
1121         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           tgl_core_init),
1122         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        tgl_core_init),
1123         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      icl_core_init),
1124         X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          tgl_core_init),
1125         X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         tgl_core_init),
1126         X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT,      tgl_core_init),
1127         X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           adl_core_init),
1128         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        tgl_core_init),
1129         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          adl_core_init),
1130         X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        adl_core_init),
1131         X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        mtl_core_init),
1132         {}
1133 };
1134
1135 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1136
1137 static const struct pci_device_id pmc_pci_ids[] = {
1138         { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1139         { }
1140 };
1141
1142 /*
1143  * This quirk can be used on those platforms where
1144  * the platform BIOS enforces 24Mhz crystal to shutdown
1145  * before PMC can assert SLP_S0#.
1146  */
1147 static bool xtal_ignore;
1148 static int quirk_xtal_ignore(const struct dmi_system_id *id)
1149 {
1150         xtal_ignore = true;
1151         return 0;
1152 }
1153
1154 static void pmc_core_xtal_ignore(struct pmc *pmc)
1155 {
1156         u32 value;
1157
1158         value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
1159         /* 24MHz Crystal Shutdown Qualification Disable */
1160         value |= SPT_PMC_VRIC1_XTALSDQDIS;
1161         /* Low Voltage Mode Enable */
1162         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1163         pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
1164 }
1165
1166 static const struct dmi_system_id pmc_core_dmi_table[]  = {
1167         {
1168         .callback = quirk_xtal_ignore,
1169         .ident = "HP Elite x2 1013 G3",
1170         .matches = {
1171                 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1172                 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1173                 },
1174         },
1175         {}
1176 };
1177
1178 static void pmc_core_do_dmi_quirks(struct pmc *pmc)
1179 {
1180         dmi_check_system(pmc_core_dmi_table);
1181
1182         if (xtal_ignore)
1183                 pmc_core_xtal_ignore(pmc);
1184 }
1185
1186 static void pmc_core_clean_structure(struct platform_device *pdev)
1187 {
1188         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1189         int i;
1190
1191         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1192                 struct pmc *pmc = pmcdev->pmcs[i];
1193
1194                 if (pmc)
1195                         iounmap(pmc->regbase);
1196         }
1197
1198         if (pmcdev->ssram_pcidev) {
1199                 pci_dev_put(pmcdev->ssram_pcidev);
1200                 pci_disable_device(pmcdev->ssram_pcidev);
1201         }
1202         platform_set_drvdata(pdev, NULL);
1203         mutex_destroy(&pmcdev->lock);
1204 }
1205
1206 static int pmc_core_probe(struct platform_device *pdev)
1207 {
1208         static bool device_initialized;
1209         struct pmc_dev *pmcdev;
1210         const struct x86_cpu_id *cpu_id;
1211         int (*core_init)(struct pmc_dev *pmcdev);
1212         struct pmc *primary_pmc;
1213         int ret;
1214
1215         if (device_initialized)
1216                 return -ENODEV;
1217
1218         pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1219         if (!pmcdev)
1220                 return -ENOMEM;
1221
1222         platform_set_drvdata(pdev, pmcdev);
1223         pmcdev->pdev = pdev;
1224
1225         cpu_id = x86_match_cpu(intel_pmc_core_ids);
1226         if (!cpu_id)
1227                 return -ENODEV;
1228
1229         core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
1230
1231         /* Primary PMC */
1232         primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
1233         if (!primary_pmc)
1234                 return -ENOMEM;
1235         pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
1236
1237         /*
1238          * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1239          * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1240          * in this case.
1241          */
1242         if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
1243                 core_init = cnp_core_init;
1244
1245         mutex_init(&pmcdev->lock);
1246         ret = core_init(pmcdev);
1247         if (ret) {
1248                 pmc_core_clean_structure(pdev);
1249                 return ret;
1250         }
1251
1252         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
1253         pmc_core_get_low_power_modes(pdev);
1254         pmc_core_do_dmi_quirks(primary_pmc);
1255
1256         pmc_core_dbgfs_register(pmcdev);
1257         pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
1258                                pmc_core_adjust_slp_s0_step(primary_pmc, 1));
1259
1260         device_initialized = true;
1261         dev_info(&pdev->dev, " initialized\n");
1262
1263         return 0;
1264 }
1265
1266 static void pmc_core_remove(struct platform_device *pdev)
1267 {
1268         struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1269         pmc_core_dbgfs_unregister(pmcdev);
1270         pmc_core_clean_structure(pdev);
1271 }
1272
1273 static bool warn_on_s0ix_failures;
1274 module_param(warn_on_s0ix_failures, bool, 0644);
1275 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1276
1277 static __maybe_unused int pmc_core_suspend(struct device *dev)
1278 {
1279         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1280         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1281
1282         /* Check if the syspend will actually use S0ix */
1283         if (pm_suspend_via_firmware())
1284                 return 0;
1285
1286         /* Save PC10 residency for checking later */
1287         if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
1288                 return -EIO;
1289
1290         /* Save S0ix residency for checking later */
1291         if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
1292                 return -EIO;
1293
1294         return 0;
1295 }
1296
1297 static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
1298 {
1299         u64 pc10_counter;
1300
1301         if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
1302                 return false;
1303
1304         if (pc10_counter == pmcdev->pc10_counter)
1305                 return true;
1306
1307         return false;
1308 }
1309
1310 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1311 {
1312         u64 s0ix_counter;
1313
1314         if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter))
1315                 return false;
1316
1317         pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter));
1318
1319         if (s0ix_counter == pmcdev->s0ix_counter)
1320                 return true;
1321
1322         return false;
1323 }
1324
1325 int pmc_core_resume_common(struct pmc_dev *pmcdev)
1326 {
1327         struct device *dev = &pmcdev->pdev->dev;
1328         struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1329         const struct pmc_bit_map **maps = pmc->map->lpm_sts;
1330         int offset = pmc->map->lpm_status_offset;
1331         int i;
1332
1333         /* Check if the syspend used S0ix */
1334         if (pm_suspend_via_firmware())
1335                 return 0;
1336
1337         if (!pmc_core_is_s0ix_failed(pmcdev))
1338                 return 0;
1339
1340         if (!warn_on_s0ix_failures)
1341                 return 0;
1342
1343         if (pmc_core_is_pc10_failed(pmcdev)) {
1344                 /* S0ix failed because of PC10 entry failure */
1345                 dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
1346                          pmcdev->pc10_counter);
1347                 return 0;
1348         }
1349
1350         /* The real interesting case - S0ix failed - lets ask PMC why. */
1351         dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1352                  pmcdev->s0ix_counter);
1353
1354         if (pmc->map->slps0_dbg_maps)
1355                 pmc_core_slps0_display(pmc, dev, NULL);
1356
1357         for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1358                 struct pmc *pmc = pmcdev->pmcs[i];
1359
1360                 if (!pmc)
1361                         continue;
1362                 if (pmc->map->lpm_sts)
1363                         pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
1364         }
1365
1366         return 0;
1367 }
1368
1369 static __maybe_unused int pmc_core_resume(struct device *dev)
1370 {
1371         struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1372
1373         if (pmcdev->resume)
1374                 return pmcdev->resume(pmcdev);
1375
1376         return pmc_core_resume_common(pmcdev);
1377 }
1378
1379 static const struct dev_pm_ops pmc_core_pm_ops = {
1380         SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1381 };
1382
1383 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1384         {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1385         { }
1386 };
1387 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1388
1389 static struct platform_driver pmc_core_driver = {
1390         .driver = {
1391                 .name = "intel_pmc_core",
1392                 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1393                 .pm = &pmc_core_pm_ops,
1394                 .dev_groups = pmc_dev_groups,
1395         },
1396         .probe = pmc_core_probe,
1397         .remove_new = pmc_core_remove,
1398 };
1399
1400 module_platform_driver(pmc_core_driver);
1401
1402 MODULE_LICENSE("GPL v2");
1403 MODULE_DESCRIPTION("Intel PMC Core Driver");
This page took 0.139749 seconds and 4 git commands to generate.