1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/errno.h>
6 #include "adf_accel_devices.h"
8 #include "adf_cfg_services.h"
9 #include "adf_common_drv.h"
11 #define UNSET_RING_NUM -1
13 static const char * const state_operations[] = {
18 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
21 struct adf_accel_dev *accel_dev;
24 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
28 state = adf_dev_started(accel_dev) ? "up" : "down";
29 return sysfs_emit(buf, "%s\n", state);
32 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
33 const char *buf, size_t count)
35 struct adf_accel_dev *accel_dev;
39 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
43 accel_id = accel_dev->accel_id;
45 if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
46 dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
50 ret = sysfs_match_string(state_operations, buf);
56 dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
58 if (!adf_dev_started(accel_dev)) {
59 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
65 ret = adf_dev_down(accel_dev);
71 dev_info(dev, "Starting device qat_dev%d\n", accel_id);
73 ret = adf_dev_up(accel_dev, true);
74 if (ret == -EALREADY) {
77 dev_err(dev, "Failed to start device qat_dev%d\n",
79 adf_dev_down(accel_dev);
90 static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
93 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
94 struct adf_accel_dev *accel_dev;
97 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
101 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
102 ADF_SERVICES_ENABLED, services);
106 return sysfs_emit(buf, "%s\n", services);
109 static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
110 const char *services)
112 return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
113 ADF_SERVICES_ENABLED, services,
117 static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
118 const char *buf, size_t count)
120 struct adf_hw_device_data *hw_data;
121 struct adf_accel_dev *accel_dev;
124 ret = sysfs_match_string(adf_cfg_services, buf);
128 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
132 if (adf_dev_started(accel_dev)) {
133 dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
134 accel_dev->accel_id);
138 ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
142 hw_data = GET_HW_DATA(accel_dev);
144 /* Update capabilities mask after change in configuration.
145 * A call to this function is required as capabilities are, at the
146 * moment, tied to configuration
148 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
149 if (!hw_data->accel_capabilities_mask)
155 static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
158 char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
159 struct adf_accel_dev *accel_dev;
162 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
166 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
167 ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
169 return sysfs_emit(buf, "1\n");
171 return sysfs_emit(buf, "%s\n", pm_idle_enabled);
174 static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t count)
177 unsigned long pm_idle_enabled_cfg_val;
178 struct adf_accel_dev *accel_dev;
179 bool pm_idle_enabled;
182 ret = kstrtobool(buf, &pm_idle_enabled);
186 pm_idle_enabled_cfg_val = pm_idle_enabled;
187 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
191 if (adf_dev_started(accel_dev)) {
192 dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
193 accel_dev->accel_id);
197 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
198 ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
205 static DEVICE_ATTR_RW(pm_idle_enabled);
207 static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
211 struct adf_accel_dev *accel_dev;
213 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
217 auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
219 return sysfs_emit(buf, "%s\n", auto_reset);
222 static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
223 const char *buf, size_t count)
225 struct adf_accel_dev *accel_dev;
226 bool enabled = false;
229 ret = kstrtobool(buf, &enabled);
233 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
237 accel_dev->autoreset_on_error = enabled;
241 static DEVICE_ATTR_RW(auto_reset);
243 static DEVICE_ATTR_RW(state);
244 static DEVICE_ATTR_RW(cfg_services);
246 static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
249 struct adf_hw_device_data *hw_data;
250 struct adf_accel_dev *accel_dev;
251 enum adf_cfg_service_type svc;
253 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
257 hw_data = GET_HW_DATA(accel_dev);
259 if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
262 down_read(&accel_dev->sysfs.lock);
263 svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num %
264 hw_data->num_banks_per_vf);
265 up_read(&accel_dev->sysfs.lock);
269 return sysfs_emit(buf, "%s\n", ADF_CFG_DC);
271 return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
273 return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
280 static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
281 const char *buf, size_t count)
283 struct adf_accel_dev *accel_dev;
287 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
291 ret = kstrtouint(buf, 10, &ring);
295 num_rings = GET_MAX_BANKS(accel_dev);
296 if (ring >= num_rings) {
297 dev_err(&GET_DEV(accel_dev),
298 "Device does not support more than %u ring pairs\n",
303 down_write(&accel_dev->sysfs.lock);
304 accel_dev->sysfs.ring_num = ring;
305 up_write(&accel_dev->sysfs.lock);
309 static DEVICE_ATTR_RW(rp2srv);
311 static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr,
314 struct adf_accel_dev *accel_dev;
316 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
320 return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev));
322 static DEVICE_ATTR_RO(num_rps);
324 static struct attribute *qat_attrs[] = {
325 &dev_attr_state.attr,
326 &dev_attr_cfg_services.attr,
327 &dev_attr_pm_idle_enabled.attr,
328 &dev_attr_rp2srv.attr,
329 &dev_attr_num_rps.attr,
330 &dev_attr_auto_reset.attr,
334 static struct attribute_group qat_group = {
339 int adf_sysfs_init(struct adf_accel_dev *accel_dev)
343 ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
345 dev_err(&GET_DEV(accel_dev),
346 "Failed to create qat attribute group: %d\n", ret);
349 accel_dev->sysfs.ring_num = UNSET_RING_NUM;
353 EXPORT_SYMBOL_GPL(adf_sysfs_init);