1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
6 #include "adf_common_drv.h"
8 static LIST_HEAD(accel_table);
9 static LIST_HEAD(vfs_table);
10 static DEFINE_MUTEX(table_lock);
11 static u32 num_devices;
12 static u8 id_map[ADF_MAX_DEVICES];
19 struct list_head list;
22 static int adf_get_vf_id(struct adf_accel_dev *vf)
24 return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
25 PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
26 (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
29 static int adf_get_vf_num(struct adf_accel_dev *vf)
31 return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
34 static struct vf_id_map *adf_find_vf(u32 bdf)
36 struct list_head *itr;
38 list_for_each(itr, &vfs_table) {
39 struct vf_id_map *ptr =
40 list_entry(itr, struct vf_id_map, list);
48 static int adf_get_vf_real_id(u32 fake)
50 struct list_head *itr;
52 list_for_each(itr, &vfs_table) {
53 struct vf_id_map *ptr =
54 list_entry(itr, struct vf_id_map, list);
55 if (ptr->fake_id == fake)
62 * adf_clean_vf_map() - Cleans VF id mapings
63 * @vf: flag indicating whether mappings is cleaned
64 * for vfs only or for vfs and pfs
66 * Function cleans internal ids for virtual functions.
68 void adf_clean_vf_map(bool vf)
70 struct vf_id_map *map;
71 struct list_head *ptr, *tmp;
73 mutex_lock(&table_lock);
74 list_for_each_safe(ptr, tmp, &vfs_table) {
75 map = list_entry(ptr, struct vf_id_map, list);
81 if (vf && map->bdf == -1)
87 mutex_unlock(&table_lock);
89 EXPORT_SYMBOL_GPL(adf_clean_vf_map);
92 * adf_devmgr_update_class_index() - Update internal index
93 * @hw_data: Pointer to internal device data.
95 * Function updates internal dev index for VFs
97 void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
99 struct adf_hw_device_class *class = hw_data->dev_class;
100 struct list_head *itr;
103 list_for_each(itr, &accel_table) {
104 struct adf_accel_dev *ptr =
105 list_entry(itr, struct adf_accel_dev, list);
107 if (ptr->hw_device->dev_class == class)
108 ptr->hw_device->instance_id = i++;
110 if (i == class->instances)
114 EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
116 static unsigned int adf_find_free_id(void)
120 for (i = 0; i < ADF_MAX_DEVICES; i++) {
126 return ADF_MAX_DEVICES + 1;
130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
131 * @accel_dev: Pointer to acceleration device.
132 * @pf: Corresponding PF if the accel_dev is a VF
134 * Function adds acceleration device to the acceleration framework.
135 * To be used by QAT device specific drivers.
137 * Return: 0 on success, error code otherwise.
139 int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
140 struct adf_accel_dev *pf)
142 struct list_head *itr;
145 if (num_devices == ADF_MAX_DEVICES) {
146 dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
151 mutex_lock(&table_lock);
152 atomic_set(&accel_dev->ref_count, 0);
154 /* PF on host or VF on guest - optimized to remove redundant is_vf */
155 if (!accel_dev->is_vf || !pf) {
156 struct vf_id_map *map;
158 list_for_each(itr, &accel_table) {
159 struct adf_accel_dev *ptr =
160 list_entry(itr, struct adf_accel_dev, list);
162 if (ptr == accel_dev) {
168 list_add_tail(&accel_dev->list, &accel_table);
169 accel_dev->accel_id = adf_find_free_id();
170 if (accel_dev->accel_id > ADF_MAX_DEVICES) {
175 map = kzalloc(sizeof(*map), GFP_KERNEL);
181 map->id = accel_dev->accel_id;
182 map->fake_id = map->id;
183 map->attached = true;
184 list_add_tail(&map->list, &vfs_table);
185 } else if (accel_dev->is_vf && pf) {
187 struct vf_id_map *map;
189 map = adf_find_vf(adf_get_vf_num(accel_dev));
191 struct vf_id_map *next;
193 accel_dev->accel_id = map->id;
194 list_add_tail(&accel_dev->list, &accel_table);
196 map->attached = true;
197 next = list_next_entry(map, list);
198 while (next && &next->list != &vfs_table) {
200 next = list_next_entry(next, list);
207 map = kzalloc(sizeof(*map), GFP_KERNEL);
212 accel_dev->accel_id = adf_find_free_id();
213 if (accel_dev->accel_id > ADF_MAX_DEVICES) {
219 list_add_tail(&accel_dev->list, &accel_table);
220 map->bdf = adf_get_vf_num(accel_dev);
221 map->id = accel_dev->accel_id;
222 map->fake_id = map->id;
223 map->attached = true;
224 list_add_tail(&map->list, &vfs_table);
226 mutex_init(&accel_dev->state_lock);
228 mutex_unlock(&table_lock);
231 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
233 struct list_head *adf_devmgr_get_head(void)
239 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
240 * @accel_dev: Pointer to acceleration device.
241 * @pf: Corresponding PF if the accel_dev is a VF
243 * Function removes acceleration device from the acceleration framework.
244 * To be used by QAT device specific drivers.
248 void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
249 struct adf_accel_dev *pf)
251 mutex_lock(&table_lock);
252 /* PF on host or VF on guest - optimized to remove redundant is_vf */
253 if (!accel_dev->is_vf || !pf) {
254 id_map[accel_dev->accel_id] = 0;
256 } else if (accel_dev->is_vf && pf) {
257 struct vf_id_map *map, *next;
259 map = adf_find_vf(adf_get_vf_num(accel_dev));
261 dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
265 map->attached = false;
266 next = list_next_entry(map, list);
267 while (next && &next->list != &vfs_table) {
269 next = list_next_entry(next, list);
273 mutex_destroy(&accel_dev->state_lock);
274 list_del(&accel_dev->list);
275 mutex_unlock(&table_lock);
277 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
279 struct adf_accel_dev *adf_devmgr_get_first(void)
281 struct adf_accel_dev *dev = NULL;
283 if (!list_empty(&accel_table))
284 dev = list_first_entry(&accel_table, struct adf_accel_dev,
290 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
291 * @pci_dev: Pointer to PCI device.
293 * Function returns acceleration device associated with the given PCI device.
294 * To be used by QAT device specific drivers.
296 * Return: pointer to accel_dev or NULL if not found.
298 struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
300 struct list_head *itr;
302 mutex_lock(&table_lock);
303 list_for_each(itr, &accel_table) {
304 struct adf_accel_dev *ptr =
305 list_entry(itr, struct adf_accel_dev, list);
307 if (ptr->accel_pci_dev.pci_dev == pci_dev) {
308 mutex_unlock(&table_lock);
312 mutex_unlock(&table_lock);
315 EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
317 struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
319 struct list_head *itr;
322 mutex_lock(&table_lock);
323 real_id = adf_get_vf_real_id(id);
329 list_for_each(itr, &accel_table) {
330 struct adf_accel_dev *ptr =
331 list_entry(itr, struct adf_accel_dev, list);
332 if (ptr->accel_id == id) {
333 mutex_unlock(&table_lock);
338 mutex_unlock(&table_lock);
342 int adf_devmgr_verify_id(u32 id)
344 if (id == ADF_CFG_ALL_DEVICES)
347 if (adf_devmgr_get_dev_by_id(id))
353 static int adf_get_num_dettached_vfs(void)
355 struct list_head *itr;
358 mutex_lock(&table_lock);
359 list_for_each(itr, &vfs_table) {
360 struct vf_id_map *ptr =
361 list_entry(itr, struct vf_id_map, list);
362 if (ptr->bdf != ~0 && !ptr->attached)
365 mutex_unlock(&table_lock);
369 void adf_devmgr_get_num_dev(u32 *num)
371 *num = num_devices - adf_get_num_dettached_vfs();
375 * adf_dev_in_use() - Check whether accel_dev is currently in use
376 * @accel_dev: Pointer to acceleration device.
378 * To be used by QAT device specific drivers.
380 * Return: 1 when device is in use, 0 otherwise.
382 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
384 return atomic_read(&accel_dev->ref_count) != 0;
386 EXPORT_SYMBOL_GPL(adf_dev_in_use);
389 * adf_dev_get() - Increment accel_dev reference count
390 * @accel_dev: Pointer to acceleration device.
392 * Increment the accel_dev refcount and if this is the first time
393 * incrementing it during this period the accel_dev is in use,
394 * increment the module refcount too.
395 * To be used by QAT device specific drivers.
397 * Return: 0 when successful, EFAULT when fail to bump module refcount
399 int adf_dev_get(struct adf_accel_dev *accel_dev)
401 if (atomic_add_return(1, &accel_dev->ref_count) == 1)
402 if (!try_module_get(accel_dev->owner))
406 EXPORT_SYMBOL_GPL(adf_dev_get);
409 * adf_dev_put() - Decrement accel_dev reference count
410 * @accel_dev: Pointer to acceleration device.
412 * Decrement the accel_dev refcount and if this is the last time
413 * decrementing it during this period the accel_dev is in use,
414 * decrement the module refcount too.
415 * To be used by QAT device specific drivers.
419 void adf_dev_put(struct adf_accel_dev *accel_dev)
421 if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
422 module_put(accel_dev->owner);
424 EXPORT_SYMBOL_GPL(adf_dev_put);
427 * adf_devmgr_in_reset() - Check whether device is in reset
428 * @accel_dev: Pointer to acceleration device.
430 * To be used by QAT device specific drivers.
432 * Return: 1 when the device is being reset, 0 otherwise.
434 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
436 return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
438 EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
441 * adf_dev_started() - Check whether device has started
442 * @accel_dev: Pointer to acceleration device.
444 * To be used by QAT device specific drivers.
446 * Return: 1 when the device has started, 0 otherwise
448 int adf_dev_started(struct adf_accel_dev *accel_dev)
450 return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
452 EXPORT_SYMBOL_GPL(adf_dev_started);