1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
4 #include <crypto/algapi.h>
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/slab.h>
9 #include <linux/bitops.h>
10 #include <linux/pci.h>
11 #include <linux/cdev.h>
12 #include <linux/uaccess.h>
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
17 #include "adf_cfg_common.h"
18 #include "adf_cfg_user.h"
20 #define ADF_CFG_MAX_SECTION 512
21 #define ADF_CFG_MAX_KEY_VAL 256
23 #define DEVICE_NAME "qat_adf_ctl"
25 static DEFINE_MUTEX(adf_ctl_lock);
26 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
28 static const struct file_operations adf_ctl_ops = {
30 .unlocked_ioctl = adf_ctl_ioctl,
31 .compat_ioctl = compat_ptr_ioctl,
34 static const struct class adf_ctl_class = {
38 struct adf_ctl_drv_info {
43 static struct adf_ctl_drv_info adf_ctl_drv;
45 static void adf_chr_drv_destroy(void)
47 device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0));
48 cdev_del(&adf_ctl_drv.drv_cdev);
49 class_unregister(&adf_ctl_class);
50 unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
53 static int adf_chr_drv_create(void)
56 struct device *drv_device;
59 if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
60 pr_err("QAT: unable to allocate chrdev region\n");
64 ret = class_register(&adf_ctl_class);
66 goto err_chrdev_unreg;
68 adf_ctl_drv.major = MAJOR(dev_id);
69 cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
70 if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
71 pr_err("QAT: cdev add failed\n");
75 drv_device = device_create(&adf_ctl_class, NULL,
76 MKDEV(adf_ctl_drv.major, 0),
78 if (IS_ERR(drv_device)) {
79 pr_err("QAT: failed to create device\n");
84 cdev_del(&adf_ctl_drv.drv_cdev);
86 class_unregister(&adf_ctl_class);
88 unregister_chrdev_region(dev_id, 1);
92 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
95 struct adf_user_cfg_ctl_data *cfg_data;
97 cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
101 /* Initialize device id to NO DEVICE as 0 is a valid device id */
102 cfg_data->device_id = ADF_CFG_NO_DEVICE;
104 if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
105 pr_err("QAT: failed to copy from user cfg_data.\n");
110 *ctl_data = cfg_data;
114 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
116 const struct adf_user_cfg_key_val *key_val)
118 if (key_val->type == ADF_HEX) {
119 long *ptr = (long *)key_val->val;
122 if (adf_cfg_add_key_value_param(accel_dev, section,
123 key_val->key, (void *)val,
125 dev_err(&GET_DEV(accel_dev),
126 "failed to add hex keyvalue.\n");
130 if (adf_cfg_add_key_value_param(accel_dev, section,
131 key_val->key, key_val->val,
133 dev_err(&GET_DEV(accel_dev),
134 "failed to add keyvalue.\n");
141 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
142 struct adf_user_cfg_ctl_data *ctl_data)
144 struct adf_user_cfg_key_val key_val;
145 struct adf_user_cfg_key_val *params_head;
146 struct adf_user_cfg_section section, *section_head;
149 section_head = ctl_data->config_section;
151 for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
152 if (copy_from_user(§ion, (void __user *)section_head,
153 sizeof(*section_head))) {
154 dev_err(&GET_DEV(accel_dev),
155 "failed to copy section info\n");
159 if (adf_cfg_section_add(accel_dev, section.name)) {
160 dev_err(&GET_DEV(accel_dev),
161 "failed to add section.\n");
165 params_head = section.params;
167 for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
168 if (copy_from_user(&key_val, (void __user *)params_head,
170 dev_err(&GET_DEV(accel_dev),
171 "Failed to copy keyvalue.\n");
174 if (adf_add_key_value_data(accel_dev, section.name,
178 params_head = key_val.next;
180 section_head = section.next;
184 adf_cfg_del_all(accel_dev);
188 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
192 struct adf_user_cfg_ctl_data *ctl_data;
193 struct adf_accel_dev *accel_dev;
195 ret = adf_ctl_alloc_resources(&ctl_data, arg);
199 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
205 if (adf_dev_started(accel_dev)) {
210 if (adf_copy_key_value_data(accel_dev, ctl_data)) {
214 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
220 static int adf_ctl_is_device_in_use(int id)
222 struct adf_accel_dev *dev;
224 list_for_each_entry(dev, adf_devmgr_get_head(), list) {
225 if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
226 if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
227 dev_info(&GET_DEV(dev),
228 "device qat_dev%d is busy\n",
237 static void adf_ctl_stop_devices(u32 id)
239 struct adf_accel_dev *accel_dev;
241 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
242 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
243 if (!adf_dev_started(accel_dev))
246 /* First stop all VFs */
247 if (!accel_dev->is_vf)
250 adf_dev_down(accel_dev);
254 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
255 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
256 if (!adf_dev_started(accel_dev))
259 adf_dev_down(accel_dev);
264 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
268 struct adf_user_cfg_ctl_data *ctl_data;
270 ret = adf_ctl_alloc_resources(&ctl_data, arg);
274 if (adf_devmgr_verify_id(ctl_data->device_id)) {
275 pr_err("QAT: Device %d not found\n", ctl_data->device_id);
280 ret = adf_ctl_is_device_in_use(ctl_data->device_id);
284 if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
285 pr_info("QAT: Stopping all acceleration devices.\n");
287 pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
288 ctl_data->device_id);
290 adf_ctl_stop_devices(ctl_data->device_id);
297 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
301 struct adf_user_cfg_ctl_data *ctl_data;
302 struct adf_accel_dev *accel_dev;
304 ret = adf_ctl_alloc_resources(&ctl_data, arg);
309 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
313 dev_info(&GET_DEV(accel_dev),
314 "Starting acceleration device qat_dev%d.\n",
315 ctl_data->device_id);
317 ret = adf_dev_up(accel_dev, false);
320 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
321 ctl_data->device_id);
322 adf_dev_down(accel_dev);
329 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
334 adf_devmgr_get_num_dev(&num_devices);
335 if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
341 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
344 struct adf_hw_device_data *hw_data;
345 struct adf_dev_status_info dev_info;
346 struct adf_accel_dev *accel_dev;
348 if (copy_from_user(&dev_info, (void __user *)arg,
349 sizeof(struct adf_dev_status_info))) {
350 pr_err("QAT: failed to copy from user.\n");
354 accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
358 hw_data = accel_dev->hw_device;
359 dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
360 dev_info.num_ae = hw_data->get_num_aes(hw_data);
361 dev_info.num_accel = hw_data->get_num_accels(hw_data);
362 dev_info.num_logical_accel = hw_data->num_logical_accel;
363 dev_info.banks_per_accel = hw_data->num_banks
364 / hw_data->num_logical_accel;
365 strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
366 dev_info.instance_id = hw_data->instance_id;
367 dev_info.type = hw_data->dev_class->type;
368 dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
369 dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
370 dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
372 if (copy_to_user((void __user *)arg, &dev_info,
373 sizeof(struct adf_dev_status_info))) {
374 dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
380 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
384 if (mutex_lock_interruptible(&adf_ctl_lock))
388 case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
389 ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
392 case IOCTL_STOP_ACCEL_DEV:
393 ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
396 case IOCTL_START_ACCEL_DEV:
397 ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
400 case IOCTL_GET_NUM_DEVICES:
401 ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
404 case IOCTL_STATUS_ACCEL_DEV:
405 ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
408 pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
412 mutex_unlock(&adf_ctl_lock);
416 static int __init adf_register_ctl_device_driver(void)
418 if (adf_chr_drv_create())
421 if (adf_init_misc_wq())
427 if (adf_init_pf_wq())
430 if (adf_init_vf_wq())
433 if (qat_crypto_register())
434 goto err_crypto_register;
436 if (qat_compression_register())
437 goto err_compression_register;
441 err_compression_register:
442 qat_crypto_unregister();
452 adf_chr_drv_destroy();
454 mutex_destroy(&adf_ctl_lock);
458 static void __exit adf_unregister_ctl_device_driver(void)
460 adf_chr_drv_destroy();
465 qat_crypto_unregister();
466 qat_compression_unregister();
467 adf_clean_vf_map(false);
468 mutex_destroy(&adf_ctl_lock);
471 module_init(adf_register_ctl_device_driver);
472 module_exit(adf_unregister_ctl_device_driver);
473 MODULE_LICENSE("Dual BSD/GPL");
474 MODULE_AUTHOR("Intel");
475 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
476 MODULE_ALIAS_CRYPTO("intel_qat");
477 MODULE_VERSION(ADF_DRV_VERSION);
478 MODULE_IMPORT_NS("CRYPTO_INTERNAL");