1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/intel-family.h>
25 #include "isst_if_common.h"
27 #define MSR_THREAD_ID_INFO 0x53
28 #define MSR_PM_LOGICAL_ID 0x54
29 #define MSR_CPU_BUS_NUMBER 0x128
31 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
33 static int punit_msr_white_list[] = {
34 MSR_TURBO_RATIO_LIMIT,
35 MSR_CONFIG_TDP_CONTROL,
36 MSR_TURBO_RATIO_LIMIT1,
37 MSR_TURBO_RATIO_LIMIT2,
41 struct isst_valid_cmd_ranges {
47 struct isst_cmd_set_req_type {
53 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
62 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
73 struct hlist_node hnode;
81 static bool isst_hpm_support;
83 static DECLARE_HASHTABLE(isst_hash, 8);
84 static DEFINE_MUTEX(isst_hash_lock);
86 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
89 struct isst_cmd *sst_cmd;
91 sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
97 sst_cmd->mbox_cmd_type = mbox_cmd_type;
98 sst_cmd->param = param;
101 hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
106 static void isst_delete_hash(void)
108 struct isst_cmd *sst_cmd;
109 struct hlist_node *tmp;
112 hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
113 hash_del(&sst_cmd->hnode);
119 * isst_store_cmd() - Store command to a hash table
120 * @cmd: Mailbox command.
121 * @sub_cmd: Mailbox sub-command or MSR id.
122 * @cpu: Target CPU for the command
123 * @mbox_cmd_type: Mailbox or MSR command.
124 * @param: Mailbox parameter.
125 * @data: Mailbox request data or MSR data.
127 * Stores the command to a hash table if there is no such command already
128 * stored. If already stored update the latest parameter and data for the
131 * Return: Return result of store to hash table, 0 for success, others for
134 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
137 struct isst_cmd *sst_cmd;
140 full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
141 full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
142 mutex_lock(&isst_hash_lock);
143 hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
144 if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
145 sst_cmd->mbox_cmd_type == mbox_cmd_type) {
146 sst_cmd->param = param;
147 sst_cmd->data = data;
148 mutex_unlock(&isst_hash_lock);
153 ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
154 mutex_unlock(&isst_hash_lock);
158 EXPORT_SYMBOL_GPL(isst_store_cmd);
160 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
161 struct isst_cmd *sst_cmd)
163 struct isst_if_mbox_cmd mbox_cmd;
166 mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
167 mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
168 mbox_cmd.parameter = sst_cmd->param;
169 mbox_cmd.req_data = sst_cmd->data;
170 mbox_cmd.logical_cpu = sst_cmd->cpu;
171 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
175 * isst_resume_common() - Process Resume request
177 * On resume replay all mailbox commands and MSRs.
181 void isst_resume_common(void)
183 struct isst_cmd *sst_cmd;
186 hash_for_each(isst_hash, i, sst_cmd, hnode) {
187 struct isst_if_cmd_cb *cb;
189 if (sst_cmd->mbox_cmd_type) {
190 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
192 isst_mbox_resume_command(cb, sst_cmd);
194 wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
199 EXPORT_SYMBOL_GPL(isst_resume_common);
201 static void isst_restore_msr_local(int cpu)
203 struct isst_cmd *sst_cmd;
206 mutex_lock(&isst_hash_lock);
207 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
208 if (!punit_msr_white_list[i])
211 hash_for_each_possible(isst_hash, sst_cmd, hnode,
212 punit_msr_white_list[i]) {
213 if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
214 wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
217 mutex_unlock(&isst_hash_lock);
221 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
222 * @cmd: Pointer to the command structure to verify.
224 * Invalid command to PUNIT to may result in instability of the platform.
225 * This function has a whitelist of commands, which are allowed.
227 * Return: Return true if the command is invalid, else false.
229 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
233 if (cmd->logical_cpu >= nr_cpu_ids)
236 for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
237 if (cmd->command == isst_valid_cmds[i].cmd &&
238 (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
239 cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
246 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
249 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
250 * @cmd: Pointer to the command structure to verify.
252 * Check if the given mail box level is set request and not a get request.
254 * Return: Return true if the command is set_req, else false.
256 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
260 for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
261 if (cmd->command == isst_cmd_set_reqs[i].cmd &&
262 cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
263 cmd->parameter == isst_cmd_set_reqs[i].param) {
270 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
272 static int isst_if_api_version;
274 static int isst_if_get_platform_info(void __user *argp)
276 struct isst_if_platform_info info;
278 info.api_version = isst_if_api_version;
279 info.driver_version = ISST_IF_DRIVER_VERSION;
280 info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
281 info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
282 info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
284 if (copy_to_user(argp, &info, sizeof(info)))
290 #define ISST_MAX_BUS_NUMBER 2
292 struct isst_if_cpu_info {
293 /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
294 int bus_info[ISST_MAX_BUS_NUMBER];
295 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
300 struct isst_if_pkg_info {
301 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
304 static struct isst_if_cpu_info *isst_cpu_info;
305 static struct isst_if_pkg_info *isst_pkg_info;
307 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
309 struct pci_dev *matched_pci_dev = NULL;
310 struct pci_dev *pci_dev = NULL;
311 struct pci_dev *_pci_dev = NULL;
312 int no_matches = 0, pkg_id;
315 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
316 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
319 pkg_id = topology_logical_package_id(cpu);
320 if (pkg_id >= topology_max_packages())
323 bus_number = isst_cpu_info[cpu].bus_info[bus_no];
327 for_each_pci_dev(_pci_dev) {
330 if (_pci_dev->bus->number != bus_number ||
331 _pci_dev->devfn != PCI_DEVFN(dev, fn))
335 if (!matched_pci_dev)
336 matched_pci_dev = _pci_dev;
338 node = dev_to_node(&_pci_dev->dev);
339 if (node == NUMA_NO_NODE) {
340 pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
341 cpu, bus_no, dev, fn);
345 if (node == isst_cpu_info[cpu].numa_node) {
346 isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
354 * If there is no numa matched pci_dev, then there can be following cases:
355 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
356 * match, then we don't need numa information. Simply return last match.
357 * Othewise return NULL.
358 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
360 * 3. Numa information doesn't match with CPU numa node and more than one match
363 if (!pci_dev && no_matches == 1)
364 pci_dev = matched_pci_dev;
366 /* Return pci_dev pointer for any matched CPU in the package */
368 pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
374 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
375 * @cpu: Logical CPU number.
376 * @bus_no: The bus number assigned by the hardware.
377 * @dev: The device number assigned by the hardware.
378 * @fn: The function number assigned by the hardware.
380 * Using cached bus information, find out the PCI device for a bus number,
381 * device and function.
383 * Return: Return pci_dev pointer or NULL.
385 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
387 struct pci_dev *pci_dev;
389 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
390 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
393 pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
395 if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
398 return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
400 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
402 static int isst_if_cpu_online(unsigned int cpu)
407 isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
409 ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
411 /* This is not a fatal error on MSR mailbox only I/F */
412 isst_cpu_info[cpu].bus_info[0] = -1;
413 isst_cpu_info[cpu].bus_info[1] = -1;
415 isst_cpu_info[cpu].bus_info[0] = data & 0xff;
416 isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
417 isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
418 isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
421 if (isst_hpm_support) {
423 ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
428 ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
430 isst_cpu_info[cpu].punit_cpu_id = -1;
435 isst_cpu_info[cpu].punit_cpu_id = data;
437 isst_restore_msr_local(cpu);
442 static int isst_if_online_id;
444 static int isst_if_cpu_info_init(void)
448 isst_cpu_info = kcalloc(num_possible_cpus(),
449 sizeof(*isst_cpu_info),
454 isst_pkg_info = kcalloc(topology_max_packages(),
455 sizeof(*isst_pkg_info),
457 if (!isst_pkg_info) {
458 kfree(isst_cpu_info);
462 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
463 "platform/x86/isst-if:online",
464 isst_if_cpu_online, NULL);
466 kfree(isst_pkg_info);
467 kfree(isst_cpu_info);
471 isst_if_online_id = ret;
476 static void isst_if_cpu_info_exit(void)
478 cpuhp_remove_state(isst_if_online_id);
479 kfree(isst_pkg_info);
480 kfree(isst_cpu_info);
483 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
485 struct isst_if_cpu_map *cpu_map;
487 cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
488 if (cpu_map->logical_cpu >= nr_cpu_ids ||
489 cpu_map->logical_cpu >= num_possible_cpus())
493 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
498 static bool match_punit_msr_white_list(int msr)
502 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
503 if (punit_msr_white_list[i] == msr)
510 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
512 struct isst_if_msr_cmd *msr_cmd;
515 msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
517 if (!match_punit_msr_white_list(msr_cmd->msr))
520 if (msr_cmd->logical_cpu >= nr_cpu_ids)
523 if (msr_cmd->read_write) {
524 if (!capable(CAP_SYS_ADMIN))
527 ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
532 ret = isst_store_cmd(0, msr_cmd->msr,
533 msr_cmd->logical_cpu,
534 0, 0, msr_cmd->data);
538 ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
539 msr_cmd->msr, &data);
541 msr_cmd->data = data;
550 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
552 unsigned char __user *ptr;
558 /* Each multi command has u32 command count as the first field */
559 if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
562 if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
565 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
569 /* cb->offset points to start of the command after the command count */
570 ptr = argp + cb->offset;
572 for (i = 0; i < cmd_count; ++i) {
575 if (signal_pending(current)) {
580 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
585 ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
589 if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
602 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
605 void __user *argp = (void __user *)arg;
606 struct isst_if_cmd_cb cmd_cb;
607 struct isst_if_cmd_cb *cb;
612 case ISST_IF_GET_PLATFORM_INFO:
613 ret = isst_if_get_platform_info(argp);
615 case ISST_IF_GET_PHY_ID:
616 cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
617 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
618 cmd_cb.cmd_callback = isst_if_proc_phyid_req;
619 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
622 cb = &punit_callbacks[ISST_IF_DEV_MMIO];
624 ret = isst_if_exec_multi_cmd(argp, cb);
626 case ISST_IF_MBOX_COMMAND:
627 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
629 ret = isst_if_exec_multi_cmd(argp, cb);
631 case ISST_IF_MSR_COMMAND:
632 cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
633 cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
634 cmd_cb.cmd_callback = isst_if_msr_cmd_req;
635 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
638 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
639 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
643 ret = cb->def_ioctl(file, cmd, arg);
654 /* Lock to prevent module registration when already opened by user space */
655 static DEFINE_MUTEX(punit_misc_dev_open_lock);
656 static int misc_device_open;
658 static int isst_if_open(struct inode *inode, struct file *file)
662 /* Fail open, if a module is going away */
663 mutex_lock(&punit_misc_dev_open_lock);
664 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
665 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
667 if (cb->registered && !try_module_get(cb->owner)) {
675 for (j = 0; j < i; ++j) {
676 struct isst_if_cmd_cb *cb;
678 cb = &punit_callbacks[j];
680 module_put(cb->owner);
685 mutex_unlock(&punit_misc_dev_open_lock);
690 static int isst_if_relase(struct inode *inode, struct file *f)
694 mutex_lock(&punit_misc_dev_open_lock);
696 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
697 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
700 module_put(cb->owner);
702 mutex_unlock(&punit_misc_dev_open_lock);
707 static const struct file_operations isst_if_char_driver_ops = {
708 .open = isst_if_open,
709 .unlocked_ioctl = isst_if_def_ioctl,
710 .release = isst_if_relase,
713 static struct miscdevice isst_if_char_driver = {
714 .minor = MISC_DYNAMIC_MINOR,
715 .name = "isst_interface",
716 .fops = &isst_if_char_driver_ops,
719 static int isst_misc_reg(void)
723 ret = isst_if_cpu_info_init();
727 ret = misc_register(&isst_if_char_driver);
729 isst_if_cpu_info_exit();
734 static void isst_misc_unreg(void)
736 misc_deregister(&isst_if_char_driver);
737 isst_if_cpu_info_exit();
741 * isst_if_cdev_register() - Register callback for IOCTL
742 * @device_type: The device type this callback handling.
743 * @cb: Callback structure.
745 * This function registers a callback to device type. On very first call
746 * it will register a misc device, which is used for user kernel interface.
747 * Other calls simply increment ref count. Registry will fail, if the user
748 * already opened misc device for operation. Also if the misc device
749 * creation failed, then it will not try again and all callers will get
752 * Return: Return the return value from the misc creation device or -EINVAL
753 * for unsupported device type.
755 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
757 if (device_type >= ISST_IF_DEV_MAX)
760 if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
763 mutex_lock(&punit_misc_dev_open_lock);
764 /* Device is already open, we don't want to add new callbacks */
765 if (misc_device_open) {
766 mutex_unlock(&punit_misc_dev_open_lock);
769 if (!cb->api_version)
770 cb->api_version = ISST_IF_API_VERSION;
771 if (cb->api_version > isst_if_api_version)
772 isst_if_api_version = cb->api_version;
773 memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
774 punit_callbacks[device_type].registered = 1;
775 mutex_unlock(&punit_misc_dev_open_lock);
779 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
782 * isst_if_cdev_unregister() - Unregister callback for IOCTL
783 * @device_type: The device type to unregister.
785 * This function unregisters the previously registered callback. If this
786 * is the last callback unregistering, then misc device is removed.
790 void isst_if_cdev_unregister(int device_type)
792 mutex_lock(&punit_misc_dev_open_lock);
793 punit_callbacks[device_type].def_ioctl = NULL;
794 punit_callbacks[device_type].registered = 0;
795 if (device_type == ISST_IF_DEV_MBOX)
797 mutex_unlock(&punit_misc_dev_open_lock);
799 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
801 #define SST_HPM_SUPPORTED 0x01
802 #define SST_MBOX_SUPPORTED 0x02
804 static const struct x86_cpu_id isst_cpu_ids[] = {
805 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
806 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
807 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED),
808 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
809 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
810 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
811 X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
812 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
813 X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED),
814 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
815 X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
818 MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
820 static int __init isst_if_common_init(void)
822 const struct x86_cpu_id *id;
824 id = x86_match_cpu(isst_cpu_ids);
828 if (id->driver_data == SST_HPM_SUPPORTED) {
829 isst_hpm_support = true;
830 } else if (id->driver_data == SST_MBOX_SUPPORTED) {
833 /* Can fail only on some Skylake-X generations */
834 if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
835 rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
839 return isst_misc_reg();
841 module_init(isst_if_common_init)
843 static void __exit isst_if_common_exit(void)
847 module_exit(isst_if_common_exit)
849 MODULE_DESCRIPTION("ISST common interface module");
850 MODULE_LICENSE("GPL v2");