]> Git Repo - J-linux.git/blob - drivers/infiniband/hw/mlx5/data_direct.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / infiniband / hw / mlx5 / data_direct.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4  */
5
6 #include "mlx5_ib.h"
7 #include "data_direct.h"
8
9 static LIST_HEAD(mlx5_data_direct_dev_list);
10 static LIST_HEAD(mlx5_data_direct_reg_list);
11
12 /*
13  * This mutex should be held when accessing either of the above lists
14  */
15 static DEFINE_MUTEX(mlx5_data_direct_mutex);
16
17 struct mlx5_data_direct_registration {
18         struct mlx5_ib_dev *ibdev;
19         char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1];
20         struct list_head list;
21 };
22
23 static const struct pci_device_id mlx5_data_direct_pci_table[] = {
24         { PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */
25         { 0, }
26 };
27
28 static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
29 {
30         struct pci_dev *pdev = dev->pdev;
31         unsigned int vpd_size, kw_len;
32         u8 *vpd_data;
33         int start;
34         int ret;
35
36         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
37         if (IS_ERR(vpd_data)) {
38                 pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
39                 return PTR_ERR(vpd_data);
40         }
41
42         start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len);
43         if (start < 0) {
44                 ret = start;
45                 pci_err(pdev, "VU keyword not found, err=%d\n", ret);
46                 goto end;
47         }
48
49         dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
50         ret = dev->vuid ? 0 : -ENOMEM;
51
52 end:
53         kfree(vpd_data);
54         return ret;
55 }
56
57 static void mlx5_data_direct_shutdown(struct pci_dev *pdev)
58 {
59         pci_disable_device(pdev);
60 }
61
62 static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev)
63 {
64         int err;
65
66         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
67         if (err) {
68                 dev_warn(&pdev->dev,
69                          "Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err);
70                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
71                 if (err) {
72                         dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err);
73                         return err;
74                 }
75         }
76
77         dma_set_max_seg_size(&pdev->dev, SZ_2G);
78         return 0;
79 }
80
81 int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid)
82 {
83         struct mlx5_data_direct_registration *reg;
84         struct mlx5_data_direct_dev *dev;
85
86         reg = kzalloc(sizeof(*reg), GFP_KERNEL);
87         if (!reg)
88                 return -ENOMEM;
89
90         reg->ibdev = ibdev;
91         strcpy(reg->vuid, vuid);
92
93         mutex_lock(&mlx5_data_direct_mutex);
94         list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) {
95                 if (strcmp(dev->vuid, vuid) == 0) {
96                         mlx5_ib_data_direct_bind(ibdev, dev);
97                         break;
98                 }
99         }
100
101         /* Add the registration to its global list, to be used upon bind/unbind
102          * of its affiliated data direct device
103          */
104         list_add_tail(&reg->list, &mlx5_data_direct_reg_list);
105         mutex_unlock(&mlx5_data_direct_mutex);
106         return 0;
107 }
108
109 void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev)
110 {
111         struct mlx5_data_direct_registration *reg;
112
113         mutex_lock(&mlx5_data_direct_mutex);
114         list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
115                 if (reg->ibdev == ibdev) {
116                         list_del(&reg->list);
117                         kfree(reg);
118                         goto end;
119                 }
120         }
121
122         WARN_ON(true);
123 end:
124         mutex_unlock(&mlx5_data_direct_mutex);
125 }
126
127 static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev)
128 {
129         struct mlx5_data_direct_registration *reg;
130
131         mutex_lock(&mlx5_data_direct_mutex);
132         list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
133                 if (strcmp(dev->vuid, reg->vuid) == 0)
134                         mlx5_ib_data_direct_bind(reg->ibdev, dev);
135         }
136
137         /* Add the data direct device to the global list, further IB devices may
138          * use it later as well
139          */
140         list_add_tail(&dev->list, &mlx5_data_direct_dev_list);
141         mutex_unlock(&mlx5_data_direct_mutex);
142 }
143
144 static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev)
145 {
146         struct mlx5_data_direct_registration *reg;
147
148         mutex_lock(&mlx5_data_direct_mutex);
149         /* Prevent any further affiliations */
150         list_del(&dev->list);
151         list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
152                 if (strcmp(dev->vuid, reg->vuid) == 0)
153                         mlx5_ib_data_direct_unbind(reg->ibdev);
154         }
155         mutex_unlock(&mlx5_data_direct_mutex);
156 }
157
158 static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id)
159 {
160         struct mlx5_data_direct_dev *dev;
161         int err;
162
163         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
164         if (!dev)
165                 return -ENOMEM;
166
167         dev->device = &pdev->dev;
168         dev->pdev = pdev;
169
170         pci_set_drvdata(dev->pdev, dev);
171         err = pci_enable_device(pdev);
172         if (err) {
173                 dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err);
174                 goto err;
175         }
176
177         pci_set_master(pdev);
178         err = mlx5_data_direct_set_dma_caps(pdev);
179         if (err)
180                 goto err_disable;
181
182         if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
183             pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
184             pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
185                 dev_dbg(dev->device, "Enabling pci atomics failed\n");
186
187         err = mlx5_data_direct_vpd_get_vuid(dev);
188         if (err)
189                 goto err_disable;
190
191         mlx5_data_direct_dev_reg(dev);
192         return 0;
193
194 err_disable:
195         pci_disable_device(pdev);
196 err:
197         kfree(dev);
198         return err;
199 }
200
201 static void mlx5_data_direct_remove(struct pci_dev *pdev)
202 {
203         struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev);
204
205         mlx5_data_direct_dev_unreg(dev);
206         pci_disable_device(pdev);
207         kfree(dev->vuid);
208         kfree(dev);
209 }
210
211 static struct pci_driver mlx5_data_direct_driver = {
212         .name = KBUILD_MODNAME,
213         .id_table = mlx5_data_direct_pci_table,
214         .probe = mlx5_data_direct_probe,
215         .remove = mlx5_data_direct_remove,
216         .shutdown = mlx5_data_direct_shutdown,
217 };
218
219 int mlx5_data_direct_driver_register(void)
220 {
221         return pci_register_driver(&mlx5_data_direct_driver);
222 }
223
224 void mlx5_data_direct_driver_unregister(void)
225 {
226         pci_unregister_driver(&mlx5_data_direct_driver);
227 }
This page took 0.042312 seconds and 4 git commands to generate.