1 // SPDX-License-Identifier: GPL-2.0-only
3 * uncore-frquency-tpmi: Uncore frequency scaling using TPMI
5 * Copyright (c) 2023, Intel Corporation.
8 * The hardware interface to read/write is basically substitution of
10 * There are specific MMIO offset and bits to get/set minimum and
11 * maximum uncore ratio, similar to MSRs.
12 * The scope of the uncore MSRs was package scope. But TPMI allows
13 * new gen CPUs to have multiple uncore controls at uncore-cluster
14 * level. Each package can have multiple power domains which further
15 * can have multiple clusters.
16 * Here number of power domains = number of resources in this aux
17 * device. There are offsets and bits to discover number of clusters
18 * and offset for each cluster level controls.
22 #include <linux/auxiliary_bus.h>
23 #include <linux/bitfield.h>
24 #include <linux/bits.h>
26 #include <linux/module.h>
27 #include <linux/intel_tpmi.h>
29 #include "uncore-frequency-common.h"
31 #define UNCORE_MAJOR_VERSION 0
32 #define UNCORE_MINOR_VERSION 2
33 #define UNCORE_ELC_SUPPORTED_VERSION 2
34 #define UNCORE_HEADER_INDEX 0
35 #define UNCORE_FABRIC_CLUSTER_OFFSET 8
37 /* status + control + adv_ctl1 + adv_ctl2 */
38 #define UNCORE_FABRIC_CLUSTER_SIZE (4 * 8)
40 #define UNCORE_STATUS_INDEX 0
41 #define UNCORE_CONTROL_INDEX 8
43 #define UNCORE_FREQ_KHZ_MULTIPLIER 100000
45 struct tpmi_uncore_struct;
47 /* Information for each cluster */
48 struct tpmi_uncore_cluster_info {
51 u8 __iomem *cluster_base;
52 struct uncore_data uncore_data;
53 struct tpmi_uncore_struct *uncore_root;
56 /* Information for each power domain */
57 struct tpmi_uncore_power_domain_info {
58 u8 __iomem *uncore_base;
61 struct tpmi_uncore_cluster_info *cluster_infos;
64 /* Information for all power domains in a package */
65 struct tpmi_uncore_struct {
66 int power_domain_count;
69 struct tpmi_uncore_power_domain_info *pd_info;
70 struct tpmi_uncore_cluster_info root_cluster;
74 /* Bit definitions for STATUS register */
75 #define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0)
77 /* Bit definitions for CONTROL register */
78 #define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8)
79 #define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15)
80 #define UNCORE_EFF_LAT_CTRL_RATIO_MASK GENMASK_ULL(28, 22)
81 #define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK GENMASK_ULL(38, 32)
82 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE BIT(39)
83 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK GENMASK_ULL(46, 40)
85 /* Helper function to read MMIO offset for max/min control frequency */
86 static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
87 unsigned int *value, enum uncore_index index)
91 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
92 if (index == UNCORE_INDEX_MAX_FREQ)
93 *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
95 *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
98 /* Helper function to read efficiency latency control values over MMIO */
99 static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
101 struct tpmi_uncore_cluster_info *cluster_info;
104 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
105 if (cluster_info->root_domain)
108 if (!cluster_info->elc_supported)
111 ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
114 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
115 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
117 *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
120 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
121 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
123 *val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
126 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
127 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
129 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
130 *val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
140 #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK)
142 /* Helper for sysfs read for max/min frequencies. Called under mutex locks */
143 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
144 enum uncore_index index)
146 struct tpmi_uncore_cluster_info *cluster_info;
148 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
150 if (cluster_info->root_domain) {
151 struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
152 unsigned int min, max, v;
155 min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
159 * Get the max/min by looking at each cluster. Get the lowest
160 * min and highest max.
162 for (i = 0; i < uncore_root->power_domain_count; ++i) {
165 for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
166 read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
175 if (index == UNCORE_INDEX_MIN_FREQ)
183 read_control_freq(cluster_info, value, index);
188 /* Helper function for writing efficiency latency control values over MMIO */
189 static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
191 struct tpmi_uncore_cluster_info *cluster_info;
194 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
196 if (cluster_info->root_domain)
199 if (!cluster_info->elc_supported)
203 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
208 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
213 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
218 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
219 val /= UNCORE_FREQ_KHZ_MULTIPLIER;
220 if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
228 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
231 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
232 val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
234 control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
235 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
238 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
239 val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
241 control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
242 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
245 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
246 control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
247 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
250 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
251 control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
252 control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
259 writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
264 /* Helper function to write MMIO offset for max/min control frequency */
265 static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
270 control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
272 if (index == UNCORE_INDEX_MAX_FREQ) {
273 control &= ~UNCORE_MAX_RATIO_MASK;
274 control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
276 control &= ~UNCORE_MIN_RATIO_MASK;
277 control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
280 writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
283 /* Helper for sysfs write for max/min frequencies. Called under mutex locks */
284 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
285 enum uncore_index index)
287 struct tpmi_uncore_cluster_info *cluster_info;
288 struct tpmi_uncore_struct *uncore_root;
290 input /= UNCORE_FREQ_KHZ_MULTIPLIER;
291 if (!input || input > UNCORE_MAX_RATIO)
294 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
295 uncore_root = cluster_info->uncore_root;
297 if (uncore_root->write_blocked)
300 /* Update each cluster in a package */
301 if (cluster_info->root_domain) {
302 struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
305 for (i = 0; i < uncore_root->power_domain_count; ++i) {
308 for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
309 write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
313 if (index == UNCORE_INDEX_MAX_FREQ)
314 uncore_root->max_ratio = input;
316 uncore_root->min_ratio = input;
321 if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
322 uncore_root->max_ratio < input)
325 if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
326 uncore_root->min_ratio > input)
329 write_control_freq(cluster_info, input, index);
334 /* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
335 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
337 struct tpmi_uncore_cluster_info *cluster_info;
340 cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
341 if (cluster_info->root_domain)
344 status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
345 *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
350 /* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
351 static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
354 case UNCORE_INDEX_MIN_FREQ:
355 case UNCORE_INDEX_MAX_FREQ:
356 return uncore_read_control_freq(data, value, index);
358 case UNCORE_INDEX_CURRENT_FREQ:
359 return uncore_read_freq(data, value);
361 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
362 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
363 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
364 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
365 return read_eff_lat_ctrl(data, value, index);
374 /* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
375 static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
378 case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
379 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
380 case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
381 case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
382 return write_eff_lat_ctrl(data, value, index);
384 case UNCORE_INDEX_MIN_FREQ:
385 case UNCORE_INDEX_MAX_FREQ:
386 return uncore_write_control_freq(data, value, index);
395 static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
399 for (i = 0; i < tpmi_uncore->power_domain_count; ++i) {
400 struct tpmi_uncore_power_domain_info *pd_info;
403 pd_info = &tpmi_uncore->pd_info[i];
404 if (!pd_info->uncore_base)
407 for (j = 0; j < pd_info->cluster_count; ++j) {
408 struct tpmi_uncore_cluster_info *cluster_info;
410 cluster_info = &pd_info->cluster_infos[j];
411 uncore_freq_remove_die_entry(&cluster_info->uncore_data);
416 #define UNCORE_VERSION_MASK GENMASK_ULL(7, 0)
417 #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8)
418 #define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0)
419 #define UNCORE_MAX_CLUSTER_PER_DOMAIN 8
421 static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
423 bool read_blocked = 0, write_blocked = 0;
424 struct intel_tpmi_plat_info *plat_info;
425 struct tpmi_uncore_struct *tpmi_uncore;
426 bool uncore_sysfs_added = false;
430 ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked);
432 dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n");
435 dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
439 /* Get number of power domains, which is equal to number of resources */
440 num_resources = tpmi_get_resource_count(auxdev);
444 /* Register callbacks to uncore core */
445 ret = uncore_freq_common_init(uncore_read, uncore_write);
449 /* Allocate uncore instance per package */
450 tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL);
456 /* Allocate memory for all power domains in a package */
457 tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources,
458 sizeof(*tpmi_uncore->pd_info),
460 if (!tpmi_uncore->pd_info) {
465 tpmi_uncore->power_domain_count = num_resources;
466 tpmi_uncore->write_blocked = write_blocked;
468 /* Get the package ID from the TPMI core */
469 plat_info = tpmi_get_platform_data(auxdev);
471 pkg = plat_info->package_id;
473 dev_info(&auxdev->dev, "Platform information is NULL\n");
475 for (i = 0; i < num_resources; ++i) {
476 struct tpmi_uncore_power_domain_info *pd_info;
477 struct resource *res;
483 res = tpmi_get_resource_at_index(auxdev, i);
487 pd_info = &tpmi_uncore->pd_info[i];
489 pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res);
490 if (IS_ERR(pd_info->uncore_base)) {
491 ret = PTR_ERR(pd_info->uncore_base);
493 * Set to NULL so that clean up can still remove other
494 * entries already created if any by
495 * remove_cluster_entries()
497 pd_info->uncore_base = NULL;
498 goto remove_clusters;
501 /* Check for version and skip this resource if there is mismatch */
502 header = readq(pd_info->uncore_base);
503 pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK;
505 if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID)
508 if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) {
509 dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n",
510 TPMI_MAJOR_VERSION(pd_info->ufs_header_ver));
512 goto remove_clusters;
515 if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
516 dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
517 TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
519 /* Get Cluster ID Mask */
520 cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header);
522 dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask);
526 /* Find out number of clusters in this resource */
527 pd_info->cluster_count = hweight8(cluster_mask);
529 pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count,
530 sizeof(struct tpmi_uncore_cluster_info),
532 if (!pd_info->cluster_infos) {
534 goto remove_clusters;
537 * Each byte in the register point to status and control
538 * registers belonging to cluster id 0-8.
540 cluster_offset = readq(pd_info->uncore_base +
541 UNCORE_FABRIC_CLUSTER_OFFSET);
543 for (j = 0; j < pd_info->cluster_count; ++j) {
544 struct tpmi_uncore_cluster_info *cluster_info;
546 /* Get the offset for this cluster */
547 mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK);
548 /* Offset in QWORD, so change to bytes */
551 cluster_info = &pd_info->cluster_infos[j];
553 cluster_info->cluster_base = pd_info->uncore_base + mask;
555 cluster_info->uncore_data.package_id = pkg;
556 /* There are no dies like Cascade Lake */
557 cluster_info->uncore_data.die_id = 0;
558 cluster_info->uncore_data.domain_id = i;
559 cluster_info->uncore_data.cluster_id = j;
561 cluster_info->uncore_root = tpmi_uncore;
563 if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
564 cluster_info->elc_supported = true;
566 ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
568 cluster_info->cluster_base = NULL;
569 goto remove_clusters;
571 /* Point to next cluster offset */
572 cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
573 uncore_sysfs_added = true;
577 if (!uncore_sysfs_added) {
579 goto remove_clusters;
582 auxiliary_set_drvdata(auxdev, tpmi_uncore);
584 if (topology_max_dies_per_package() > 1)
587 tpmi_uncore->root_cluster.root_domain = true;
588 tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
590 tpmi_uncore->root_cluster.uncore_data.package_id = pkg;
591 tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID;
592 ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0);
594 goto remove_clusters;
599 remove_cluster_entries(tpmi_uncore);
601 uncore_freq_common_exit();
606 static void uncore_remove(struct auxiliary_device *auxdev)
608 struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
610 if (tpmi_uncore->root_cluster.root_domain)
611 uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
613 remove_cluster_entries(tpmi_uncore);
615 uncore_freq_common_exit();
618 static const struct auxiliary_device_id intel_uncore_id_table[] = {
619 { .name = "intel_vsec.tpmi-uncore" },
622 MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table);
624 static struct auxiliary_driver intel_uncore_aux_driver = {
625 .id_table = intel_uncore_id_table,
626 .remove = uncore_remove,
627 .probe = uncore_probe,
630 module_auxiliary_driver(intel_uncore_aux_driver);
632 MODULE_IMPORT_NS("INTEL_TPMI");
633 MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY");
634 MODULE_DESCRIPTION("Intel TPMI UFS Driver");
635 MODULE_LICENSE("GPL");