]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a4cf0443 UB |
2 | /* |
3 | * Shared Memory Communications over RDMA (SMC-R) and RoCE | |
4 | * | |
5 | * IB infrastructure: | |
6 | * Establish SMC-R as an Infiniband Client to be notified about added and | |
7 | * removed IB devices of type RDMA. | |
8 | * Determine device and port characteristics for these IB devices. | |
9 | * | |
10 | * Copyright IBM Corp. 2016 | |
11 | * | |
12 | * Author(s): Ursula Braun <[email protected]> | |
13 | */ | |
14 | ||
15 | #include <linux/random.h> | |
bd4ad577 | 16 | #include <linux/workqueue.h> |
10428dd8 | 17 | #include <linux/scatterlist.h> |
a4cf0443 | 18 | #include <rdma/ib_verbs.h> |
ddb457c6 | 19 | #include <rdma/ib_cache.h> |
a4cf0443 | 20 | |
6812baab | 21 | #include "smc_pnet.h" |
a4cf0443 | 22 | #include "smc_ib.h" |
cd6851f3 | 23 | #include "smc_core.h" |
f38ba179 | 24 | #include "smc_wr.h" |
a4cf0443 UB |
25 | #include "smc.h" |
26 | ||
c9f4c6cf UB |
27 | #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ |
28 | ||
bd4ad577 UB |
29 | #define SMC_QP_MIN_RNR_TIMER 5 |
30 | #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ | |
31 | #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ | |
32 | #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ | |
33 | ||
a4cf0443 UB |
34 | struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ |
35 | .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), | |
36 | .list = LIST_HEAD_INIT(smc_ib_devices.list), | |
37 | }; | |
38 | ||
39 | #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" | |
40 | ||
41 | u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system | |
42 | * identifier | |
43 | */ | |
44 | ||
bd4ad577 UB |
45 | static int smc_ib_modify_qp_init(struct smc_link *lnk) |
46 | { | |
47 | struct ib_qp_attr qp_attr; | |
48 | ||
49 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
50 | qp_attr.qp_state = IB_QPS_INIT; | |
51 | qp_attr.pkey_index = 0; | |
52 | qp_attr.port_num = lnk->ibport; | |
53 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE | |
54 | | IB_ACCESS_REMOTE_WRITE; | |
55 | return ib_modify_qp(lnk->roce_qp, &qp_attr, | |
56 | IB_QP_STATE | IB_QP_PKEY_INDEX | | |
57 | IB_QP_ACCESS_FLAGS | IB_QP_PORT); | |
58 | } | |
59 | ||
60 | static int smc_ib_modify_qp_rtr(struct smc_link *lnk) | |
61 | { | |
62 | enum ib_qp_attr_mask qp_attr_mask = | |
63 | IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | | |
64 | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; | |
65 | struct ib_qp_attr qp_attr; | |
66 | ||
67 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
68 | qp_attr.qp_state = IB_QPS_RTR; | |
69 | qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); | |
44c58487 | 70 | qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
d8966fcd | 71 | rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); |
7005ada6 | 72 | rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0); |
d8966fcd | 73 | rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); |
44c58487 | 74 | memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, |
bd4ad577 UB |
75 | sizeof(lnk->peer_mac)); |
76 | qp_attr.dest_qp_num = lnk->peer_qpn; | |
77 | qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ | |
78 | qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming | |
79 | * requests | |
80 | */ | |
81 | qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; | |
82 | ||
83 | return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); | |
84 | } | |
85 | ||
86 | int smc_ib_modify_qp_rts(struct smc_link *lnk) | |
87 | { | |
88 | struct ib_qp_attr qp_attr; | |
89 | ||
90 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
91 | qp_attr.qp_state = IB_QPS_RTS; | |
92 | qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ | |
93 | qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ | |
94 | qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ | |
95 | qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ | |
96 | qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and | |
97 | * atomic ops allowed | |
98 | */ | |
99 | return ib_modify_qp(lnk->roce_qp, &qp_attr, | |
100 | IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | |
101 | IB_QP_SQ_PSN | IB_QP_RNR_RETRY | | |
102 | IB_QP_MAX_QP_RD_ATOMIC); | |
103 | } | |
104 | ||
105 | int smc_ib_modify_qp_reset(struct smc_link *lnk) | |
106 | { | |
107 | struct ib_qp_attr qp_attr; | |
108 | ||
109 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
110 | qp_attr.qp_state = IB_QPS_RESET; | |
111 | return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); | |
112 | } | |
113 | ||
114 | int smc_ib_ready_link(struct smc_link *lnk) | |
115 | { | |
00e5fb26 | 116 | struct smc_link_group *lgr = smc_get_lgr(lnk); |
bd4ad577 UB |
117 | int rc = 0; |
118 | ||
119 | rc = smc_ib_modify_qp_init(lnk); | |
120 | if (rc) | |
121 | goto out; | |
122 | ||
123 | rc = smc_ib_modify_qp_rtr(lnk); | |
124 | if (rc) | |
125 | goto out; | |
126 | smc_wr_remember_qp_attr(lnk); | |
127 | rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, | |
128 | IB_CQ_SOLICITED_MASK); | |
129 | if (rc) | |
130 | goto out; | |
131 | rc = smc_wr_rx_post_init(lnk); | |
132 | if (rc) | |
133 | goto out; | |
134 | smc_wr_remember_qp_attr(lnk); | |
135 | ||
136 | if (lgr->role == SMC_SERV) { | |
137 | rc = smc_ib_modify_qp_rts(lnk); | |
138 | if (rc) | |
139 | goto out; | |
140 | smc_wr_remember_qp_attr(lnk); | |
141 | } | |
142 | out: | |
143 | return rc; | |
144 | } | |
145 | ||
7005ada6 | 146 | static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) |
be6a3f38 | 147 | { |
b4c296f9 | 148 | const struct ib_gid_attr *attr; |
5102eca9 | 149 | int rc; |
be6a3f38 | 150 | |
b4c296f9 JG |
151 | attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0); |
152 | if (IS_ERR(attr)) | |
be6a3f38 UB |
153 | return -ENODEV; |
154 | ||
5102eca9 | 155 | rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]); |
b4c296f9 JG |
156 | rdma_put_gid_attr(attr); |
157 | return rc; | |
be6a3f38 UB |
158 | } |
159 | ||
160 | /* Create an identifier unique for this instance of SMC-R. | |
161 | * The MAC-address of the first active registered IB device | |
162 | * plus a random 2-byte number is used to create this identifier. | |
163 | * This name is delivered to the peer during connection initialization. | |
164 | */ | |
165 | static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, | |
166 | u8 ibport) | |
167 | { | |
168 | memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], | |
169 | sizeof(smcibdev->mac[ibport - 1])); | |
170 | get_random_bytes(&local_systemid[0], 2); | |
171 | } | |
172 | ||
173 | bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) | |
174 | { | |
175 | return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; | |
176 | } | |
177 | ||
7005ada6 UB |
178 | /* determine the gid for an ib-device port and vlan id */ |
179 | int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, | |
180 | unsigned short vlan_id, u8 gid[], u8 *sgid_index) | |
181 | { | |
b4c296f9 | 182 | const struct ib_gid_attr *attr; |
5102eca9 | 183 | const struct net_device *ndev; |
7005ada6 UB |
184 | int i; |
185 | ||
186 | for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { | |
b4c296f9 JG |
187 | attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); |
188 | if (IS_ERR(attr)) | |
7005ada6 | 189 | continue; |
b4c296f9 | 190 | |
5102eca9 PP |
191 | rcu_read_lock(); |
192 | ndev = rdma_read_gid_attr_ndev_rcu(attr); | |
193 | if (!IS_ERR(ndev) && | |
b4c296f9 JG |
194 | ((!vlan_id && !is_vlan_dev(attr->ndev)) || |
195 | (vlan_id && is_vlan_dev(attr->ndev) && | |
196 | vlan_dev_vlan_id(attr->ndev) == vlan_id)) && | |
197 | attr->gid_type == IB_GID_TYPE_ROCE) { | |
5102eca9 | 198 | rcu_read_unlock(); |
7005ada6 | 199 | if (gid) |
b4c296f9 | 200 | memcpy(gid, &attr->gid, SMC_GID_SIZE); |
7005ada6 | 201 | if (sgid_index) |
b4c296f9 JG |
202 | *sgid_index = attr->index; |
203 | rdma_put_gid_attr(attr); | |
7005ada6 UB |
204 | return 0; |
205 | } | |
5102eca9 | 206 | rcu_read_unlock(); |
b4c296f9 | 207 | rdma_put_gid_attr(attr); |
7005ada6 UB |
208 | } |
209 | return -ENODEV; | |
210 | } | |
211 | ||
be6a3f38 UB |
212 | static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) |
213 | { | |
214 | int rc; | |
215 | ||
216 | memset(&smcibdev->pattr[ibport - 1], 0, | |
217 | sizeof(smcibdev->pattr[ibport - 1])); | |
218 | rc = ib_query_port(smcibdev->ibdev, ibport, | |
219 | &smcibdev->pattr[ibport - 1]); | |
220 | if (rc) | |
221 | goto out; | |
222 | /* the SMC protocol requires specification of the RoCE MAC address */ | |
7005ada6 | 223 | rc = smc_ib_fill_mac(smcibdev, ibport); |
be6a3f38 UB |
224 | if (rc) |
225 | goto out; | |
226 | if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, | |
227 | sizeof(local_systemid)) && | |
228 | smc_ib_port_active(smcibdev, ibport)) | |
229 | /* create unique system identifier */ | |
230 | smc_ib_define_local_systemid(smcibdev, ibport); | |
231 | out: | |
232 | return rc; | |
233 | } | |
234 | ||
bd4ad577 UB |
235 | /* process context wrapper for might_sleep smc_ib_remember_port_attr */ |
236 | static void smc_ib_port_event_work(struct work_struct *work) | |
237 | { | |
238 | struct smc_ib_device *smcibdev = container_of( | |
239 | work, struct smc_ib_device, port_event_work); | |
240 | u8 port_idx; | |
241 | ||
242 | for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { | |
243 | smc_ib_remember_port_attr(smcibdev, port_idx + 1); | |
244 | clear_bit(port_idx, &smcibdev->port_event_mask); | |
da05bf29 | 245 | if (!smc_ib_port_active(smcibdev, port_idx + 1)) |
9fda3510 | 246 | smc_port_terminate(smcibdev, port_idx + 1); |
bd4ad577 UB |
247 | } |
248 | } | |
249 | ||
250 | /* can be called in IRQ context */ | |
251 | static void smc_ib_global_event_handler(struct ib_event_handler *handler, | |
252 | struct ib_event *ibevent) | |
253 | { | |
254 | struct smc_ib_device *smcibdev; | |
255 | u8 port_idx; | |
256 | ||
257 | smcibdev = container_of(handler, struct smc_ib_device, event_handler); | |
bd4ad577 UB |
258 | |
259 | switch (ibevent->event) { | |
bd4ad577 | 260 | case IB_EVENT_DEVICE_FATAL: |
81cf6430 KG |
261 | /* terminate all ports on device */ |
262 | for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) | |
263 | set_bit(port_idx, &smcibdev->port_event_mask); | |
264 | schedule_work(&smcibdev->port_event_work); | |
265 | break; | |
266 | case IB_EVENT_PORT_ERR: | |
bd4ad577 | 267 | case IB_EVENT_PORT_ACTIVE: |
81cf6430 | 268 | case IB_EVENT_GID_CHANGE: |
bd4ad577 | 269 | port_idx = ibevent->element.port_num - 1; |
81cf6430 KG |
270 | if (port_idx < SMC_MAX_PORTS) { |
271 | set_bit(port_idx, &smcibdev->port_event_mask); | |
272 | schedule_work(&smcibdev->port_event_work); | |
273 | } | |
bd4ad577 UB |
274 | break; |
275 | default: | |
276 | break; | |
277 | } | |
278 | } | |
279 | ||
f38ba179 UB |
280 | void smc_ib_dealloc_protection_domain(struct smc_link *lnk) |
281 | { | |
da05bf29 UB |
282 | if (lnk->roce_pd) |
283 | ib_dealloc_pd(lnk->roce_pd); | |
f38ba179 UB |
284 | lnk->roce_pd = NULL; |
285 | } | |
286 | ||
287 | int smc_ib_create_protection_domain(struct smc_link *lnk) | |
288 | { | |
289 | int rc; | |
290 | ||
897e1c24 | 291 | lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); |
f38ba179 UB |
292 | rc = PTR_ERR_OR_ZERO(lnk->roce_pd); |
293 | if (IS_ERR(lnk->roce_pd)) | |
294 | lnk->roce_pd = NULL; | |
295 | return rc; | |
296 | } | |
297 | ||
298 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | |
299 | { | |
e5f3aa04 KG |
300 | struct smc_link *lnk = (struct smc_link *)priv; |
301 | struct smc_ib_device *smcibdev = lnk->smcibdev; | |
da05bf29 UB |
302 | u8 port_idx; |
303 | ||
f38ba179 | 304 | switch (ibevent->event) { |
81cf6430 | 305 | case IB_EVENT_QP_FATAL: |
f38ba179 | 306 | case IB_EVENT_QP_ACCESS_ERR: |
e5f3aa04 | 307 | port_idx = ibevent->element.qp->port - 1; |
81cf6430 KG |
308 | if (port_idx < SMC_MAX_PORTS) { |
309 | set_bit(port_idx, &smcibdev->port_event_mask); | |
310 | schedule_work(&smcibdev->port_event_work); | |
311 | } | |
f38ba179 UB |
312 | break; |
313 | default: | |
314 | break; | |
315 | } | |
316 | } | |
317 | ||
318 | void smc_ib_destroy_queue_pair(struct smc_link *lnk) | |
319 | { | |
da05bf29 UB |
320 | if (lnk->roce_qp) |
321 | ib_destroy_qp(lnk->roce_qp); | |
f38ba179 UB |
322 | lnk->roce_qp = NULL; |
323 | } | |
324 | ||
325 | /* create a queue pair within the protection domain for a link */ | |
326 | int smc_ib_create_queue_pair(struct smc_link *lnk) | |
327 | { | |
328 | struct ib_qp_init_attr qp_attr = { | |
329 | .event_handler = smc_ib_qp_event_handler, | |
330 | .qp_context = lnk, | |
331 | .send_cq = lnk->smcibdev->roce_cq_send, | |
332 | .recv_cq = lnk->smcibdev->roce_cq_recv, | |
333 | .srq = NULL, | |
334 | .cap = { | |
f38ba179 UB |
335 | /* include unsolicited rdma_writes as well, |
336 | * there are max. 2 RDMA_WRITE per 1 WR_SEND | |
337 | */ | |
652a1e41 | 338 | .max_send_wr = SMC_WR_BUF_CNT * 3, |
f38ba179 UB |
339 | .max_recv_wr = SMC_WR_BUF_CNT * 3, |
340 | .max_send_sge = SMC_IB_MAX_SEND_SGE, | |
341 | .max_recv_sge = 1, | |
f38ba179 UB |
342 | }, |
343 | .sq_sig_type = IB_SIGNAL_REQ_WR, | |
344 | .qp_type = IB_QPT_RC, | |
345 | }; | |
346 | int rc; | |
347 | ||
348 | lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); | |
349 | rc = PTR_ERR_OR_ZERO(lnk->roce_qp); | |
350 | if (IS_ERR(lnk->roce_qp)) | |
351 | lnk->roce_qp = NULL; | |
352 | else | |
353 | smc_wr_remember_qp_attr(lnk); | |
354 | return rc; | |
355 | } | |
356 | ||
897e1c24 UB |
357 | void smc_ib_put_memory_region(struct ib_mr *mr) |
358 | { | |
359 | ib_dereg_mr(mr); | |
360 | } | |
361 | ||
362 | static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot) | |
363 | { | |
364 | unsigned int offset = 0; | |
365 | int sg_num; | |
366 | ||
367 | /* map the largest prefix of a dma mapped SG list */ | |
368 | sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK], | |
369 | buf_slot->sgt[SMC_SINGLE_LINK].sgl, | |
370 | buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, | |
371 | &offset, PAGE_SIZE); | |
372 | ||
373 | return sg_num; | |
374 | } | |
375 | ||
376 | /* Allocate a memory region and map the dma mapped SG list of buf_slot */ | |
377 | int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, | |
378 | struct smc_buf_desc *buf_slot) | |
379 | { | |
380 | if (buf_slot->mr_rx[SMC_SINGLE_LINK]) | |
381 | return 0; /* already done */ | |
382 | ||
383 | buf_slot->mr_rx[SMC_SINGLE_LINK] = | |
384 | ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); | |
385 | if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) { | |
386 | int rc; | |
387 | ||
388 | rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]); | |
389 | buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL; | |
390 | return rc; | |
391 | } | |
392 | ||
393 | if (smc_ib_map_mr_sg(buf_slot) != 1) | |
394 | return -EINVAL; | |
395 | ||
396 | return 0; | |
397 | } | |
398 | ||
10428dd8 UB |
399 | /* synchronize buffer usage for cpu access */ |
400 | void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, | |
401 | struct smc_buf_desc *buf_slot, | |
402 | enum dma_data_direction data_direction) | |
403 | { | |
404 | struct scatterlist *sg; | |
405 | unsigned int i; | |
406 | ||
407 | /* for now there is just one DMA address */ | |
408 | for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, | |
409 | buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { | |
410 | if (!sg_dma_len(sg)) | |
411 | break; | |
412 | ib_dma_sync_single_for_cpu(smcibdev->ibdev, | |
413 | sg_dma_address(sg), | |
414 | sg_dma_len(sg), | |
415 | data_direction); | |
416 | } | |
417 | } | |
418 | ||
419 | /* synchronize buffer usage for device access */ | |
420 | void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, | |
421 | struct smc_buf_desc *buf_slot, | |
422 | enum dma_data_direction data_direction) | |
423 | { | |
424 | struct scatterlist *sg; | |
425 | unsigned int i; | |
426 | ||
427 | /* for now there is just one DMA address */ | |
428 | for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, | |
429 | buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { | |
430 | if (!sg_dma_len(sg)) | |
431 | break; | |
432 | ib_dma_sync_single_for_device(smcibdev->ibdev, | |
433 | sg_dma_address(sg), | |
434 | sg_dma_len(sg), | |
435 | data_direction); | |
436 | } | |
437 | } | |
438 | ||
a3fe3d01 UB |
439 | /* Map a new TX or RX buffer SG-table to DMA */ |
440 | int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, | |
441 | struct smc_buf_desc *buf_slot, | |
442 | enum dma_data_direction data_direction) | |
443 | { | |
444 | int mapped_nents; | |
445 | ||
446 | mapped_nents = ib_dma_map_sg(smcibdev->ibdev, | |
447 | buf_slot->sgt[SMC_SINGLE_LINK].sgl, | |
448 | buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, | |
449 | data_direction); | |
450 | if (!mapped_nents) | |
451 | return -ENOMEM; | |
452 | ||
453 | return mapped_nents; | |
454 | } | |
455 | ||
456 | void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, | |
457 | struct smc_buf_desc *buf_slot, | |
458 | enum dma_data_direction data_direction) | |
459 | { | |
460 | if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address) | |
461 | return; /* already unmapped */ | |
462 | ||
463 | ib_dma_unmap_sg(smcibdev->ibdev, | |
464 | buf_slot->sgt[SMC_SINGLE_LINK].sgl, | |
465 | buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, | |
466 | data_direction); | |
467 | buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; | |
468 | } | |
469 | ||
bd4ad577 UB |
470 | long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) |
471 | { | |
472 | struct ib_cq_init_attr cqattr = { | |
c9f4c6cf UB |
473 | .cqe = SMC_MAX_CQE, .comp_vector = 0 }; |
474 | int cqe_size_order, smc_order; | |
bd4ad577 UB |
475 | long rc; |
476 | ||
c9f4c6cf UB |
477 | /* the calculated number of cq entries fits to mlx5 cq allocation */ |
478 | cqe_size_order = cache_line_size() == 128 ? 7 : 6; | |
479 | smc_order = MAX_ORDER - cqe_size_order - 1; | |
480 | if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) | |
481 | cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; | |
bd4ad577 UB |
482 | smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, |
483 | smc_wr_tx_cq_handler, NULL, | |
484 | smcibdev, &cqattr); | |
485 | rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); | |
486 | if (IS_ERR(smcibdev->roce_cq_send)) { | |
487 | smcibdev->roce_cq_send = NULL; | |
488 | return rc; | |
489 | } | |
490 | smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, | |
491 | smc_wr_rx_cq_handler, NULL, | |
492 | smcibdev, &cqattr); | |
493 | rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); | |
494 | if (IS_ERR(smcibdev->roce_cq_recv)) { | |
495 | smcibdev->roce_cq_recv = NULL; | |
496 | goto err; | |
497 | } | |
bd4ad577 UB |
498 | smc_wr_add_dev(smcibdev); |
499 | smcibdev->initialized = 1; | |
500 | return rc; | |
501 | ||
502 | err: | |
503 | ib_destroy_cq(smcibdev->roce_cq_send); | |
504 | return rc; | |
505 | } | |
506 | ||
507 | static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) | |
508 | { | |
509 | if (!smcibdev->initialized) | |
510 | return; | |
da05bf29 | 511 | smcibdev->initialized = 0; |
bd4ad577 | 512 | smc_wr_remove_dev(smcibdev); |
bd4ad577 UB |
513 | ib_destroy_cq(smcibdev->roce_cq_recv); |
514 | ib_destroy_cq(smcibdev->roce_cq_send); | |
515 | } | |
516 | ||
a4cf0443 UB |
517 | static struct ib_client smc_ib_client; |
518 | ||
519 | /* callback function for ib_register_client() */ | |
520 | static void smc_ib_add_dev(struct ib_device *ibdev) | |
521 | { | |
522 | struct smc_ib_device *smcibdev; | |
be6a3f38 UB |
523 | u8 port_cnt; |
524 | int i; | |
a4cf0443 UB |
525 | |
526 | if (ibdev->node_type != RDMA_NODE_IB_CA) | |
527 | return; | |
528 | ||
529 | smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); | |
530 | if (!smcibdev) | |
531 | return; | |
532 | ||
533 | smcibdev->ibdev = ibdev; | |
bd4ad577 | 534 | INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); |
a4cf0443 UB |
535 | |
536 | spin_lock(&smc_ib_devices.lock); | |
537 | list_add_tail(&smcibdev->list, &smc_ib_devices.list); | |
538 | spin_unlock(&smc_ib_devices.lock); | |
539 | ib_set_client_data(ibdev, &smc_ib_client, smcibdev); | |
be6a3f38 UB |
540 | INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, |
541 | smc_ib_global_event_handler); | |
542 | ib_register_event_handler(&smcibdev->event_handler); | |
543 | ||
544 | /* trigger reading of the port attributes */ | |
545 | port_cnt = smcibdev->ibdev->phys_port_cnt; | |
546 | for (i = 0; | |
547 | i < min_t(size_t, port_cnt, SMC_MAX_PORTS); | |
0afff91c | 548 | i++) { |
be6a3f38 | 549 | set_bit(i, &smcibdev->port_event_mask); |
0afff91c UB |
550 | /* determine pnetids of the port */ |
551 | smc_pnetid_by_dev_port(ibdev->dev.parent, i, | |
552 | smcibdev->pnetid[i]); | |
553 | } | |
be6a3f38 | 554 | schedule_work(&smcibdev->port_event_work); |
a4cf0443 UB |
555 | } |
556 | ||
557 | /* callback function for ib_register_client() */ | |
558 | static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) | |
559 | { | |
560 | struct smc_ib_device *smcibdev; | |
561 | ||
562 | smcibdev = ib_get_client_data(ibdev, &smc_ib_client); | |
563 | ib_set_client_data(ibdev, &smc_ib_client, NULL); | |
564 | spin_lock(&smc_ib_devices.lock); | |
565 | list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ | |
566 | spin_unlock(&smc_ib_devices.lock); | |
bd4ad577 | 567 | smc_ib_cleanup_per_ibdev(smcibdev); |
be6a3f38 | 568 | ib_unregister_event_handler(&smcibdev->event_handler); |
a4cf0443 UB |
569 | kfree(smcibdev); |
570 | } | |
571 | ||
572 | static struct ib_client smc_ib_client = { | |
573 | .name = "smc_ib", | |
574 | .add = smc_ib_add_dev, | |
575 | .remove = smc_ib_remove_dev, | |
576 | }; | |
577 | ||
578 | int __init smc_ib_register_client(void) | |
579 | { | |
580 | return ib_register_client(&smc_ib_client); | |
581 | } | |
582 | ||
583 | void smc_ib_unregister_client(void) | |
584 | { | |
585 | ib_unregister_client(&smc_ib_client); | |
586 | } |