]>
Commit | Line | Data |
---|---|---|
ef6d4ccd YS |
1 | /* |
2 | * QEMU paravirtual RDMA - Generic RDMA backend | |
3 | * | |
4 | * Copyright (C) 2018 Oracle | |
5 | * Copyright (C) 2018 Red Hat Inc | |
6 | * | |
7 | * Authors: | |
8 | * Yuval Shaia <[email protected]> | |
9 | * Marcel Apfelbaum <[email protected]> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
0efc9511 | 16 | #include "qemu/osdep.h" |
2b05705d | 17 | #include "sysemu/sysemu.h" |
0efc9511 | 18 | #include "qapi/error.h" |
605ec166 YS |
19 | #include "qapi/qmp/qlist.h" |
20 | #include "qapi/qmp/qnum.h" | |
2b05705d | 21 | #include "qapi/qapi-events-rdma.h" |
ef6d4ccd YS |
22 | |
23 | #include <infiniband/verbs.h> | |
605ec166 YS |
24 | #include <infiniband/umad_types.h> |
25 | #include <infiniband/umad.h> | |
26 | #include <rdma/rdma_user_cm.h> | |
ef6d4ccd | 27 | |
2b05705d | 28 | #include "contrib/rdmacm-mux/rdmacm-mux.h" |
ef6d4ccd YS |
29 | #include "trace.h" |
30 | #include "rdma_utils.h" | |
31 | #include "rdma_rm.h" | |
32 | #include "rdma_backend.h" | |
33 | ||
ef6d4ccd | 34 | #define THR_NAME_LEN 16 |
75152227 | 35 | #define THR_POLL_TO 5000 |
ef6d4ccd | 36 | |
605ec166 YS |
37 | #define MAD_HDR_SIZE sizeof(struct ibv_grh) |
38 | ||
ef6d4ccd | 39 | typedef struct BackendCtx { |
ef6d4ccd | 40 | void *up_ctx; |
605ec166 | 41 | struct ibv_sge sge; /* Used to save MAD recv buffer */ |
bf441451 | 42 | RdmaBackendQP *backend_qp; /* To maintain recv buffers */ |
e926c9f1 | 43 | RdmaBackendSRQ *backend_srq; |
ef6d4ccd YS |
44 | } BackendCtx; |
45 | ||
605ec166 YS |
46 | struct backend_umad { |
47 | struct ib_user_mad hdr; | |
48 | char mad[RDMA_MAX_PRIVATE_DATA]; | |
49 | }; | |
50 | ||
eaac0100 | 51 | static void (*comp_handler)(void *ctx, struct ibv_wc *wc); |
ef6d4ccd | 52 | |
eaac0100 | 53 | static void dummy_comp_handler(void *ctx, struct ibv_wc *wc) |
ef6d4ccd | 54 | { |
4d71b38a | 55 | rdma_error_report("No completion handler is registered"); |
ef6d4ccd YS |
56 | } |
57 | ||
eaac0100 YS |
58 | static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, |
59 | void *ctx) | |
60 | { | |
a421c811 | 61 | struct ibv_wc wc = {}; |
eaac0100 YS |
62 | |
63 | wc.status = status; | |
64 | wc.vendor_err = vendor_err; | |
65 | ||
66 | comp_handler(ctx, &wc); | |
67 | } | |
68 | ||
ff30a446 YS |
69 | static void free_cqe_ctx(gpointer data, gpointer user_data) |
70 | { | |
71 | BackendCtx *bctx; | |
72 | RdmaDeviceResources *rdma_dev_res = user_data; | |
73 | unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); | |
74 | ||
75 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
76 | if (bctx) { | |
77 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
bf441451 | 78 | atomic_dec(&rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
79 | } |
80 | g_free(bctx); | |
81 | } | |
82 | ||
83 | static void clean_recv_mads(RdmaBackendDev *backend_dev) | |
84 | { | |
85 | unsigned long cqe_ctx_id; | |
86 | ||
87 | do { | |
88 | cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> | |
89 | recv_mads_list); | |
90 | if (cqe_ctx_id != -ENOENT) { | |
bf441451 | 91 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
92 | free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), |
93 | backend_dev->rdma_dev_res); | |
94 | } | |
95 | } while (cqe_ctx_id != -ENOENT); | |
96 | } | |
97 | ||
1373f4a8 | 98 | static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) |
ef6d4ccd | 99 | { |
c2dd117b | 100 | int i, ne, total_ne = 0; |
ef6d4ccd YS |
101 | BackendCtx *bctx; |
102 | struct ibv_wc wc[2]; | |
e926c9f1 | 103 | RdmaProtectedGSList *cqe_ctx_list; |
ef6d4ccd | 104 | |
2cfa9530 | 105 | qemu_mutex_lock(&rdma_dev_res->lock); |
ef6d4ccd YS |
106 | do { |
107 | ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); | |
108 | ||
4d71b38a | 109 | trace_rdma_poll_cq(ne, ibcq); |
ef6d4ccd YS |
110 | |
111 | for (i = 0; i < ne; i++) { | |
ef6d4ccd YS |
112 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); |
113 | if (unlikely(!bctx)) { | |
4d71b38a YS |
114 | rdma_error_report("No matching ctx for req %"PRId64, |
115 | wc[i].wr_id); | |
ef6d4ccd YS |
116 | continue; |
117 | } | |
ef6d4ccd | 118 | |
eaac0100 | 119 | comp_handler(bctx->up_ctx, &wc[i]); |
ef6d4ccd | 120 | |
e926c9f1 KH |
121 | if (bctx->backend_qp) { |
122 | cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; | |
123 | } else { | |
124 | cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; | |
125 | } | |
126 | ||
127 | rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); | |
ef6d4ccd YS |
128 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); |
129 | g_free(bctx); | |
130 | } | |
c2dd117b | 131 | total_ne += ne; |
ef6d4ccd | 132 | } while (ne > 0); |
c2dd117b | 133 | atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); |
2cfa9530 | 134 | qemu_mutex_unlock(&rdma_dev_res->lock); |
ef6d4ccd YS |
135 | |
136 | if (ne < 0) { | |
4d71b38a | 137 | rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); |
ef6d4ccd | 138 | } |
c2dd117b YS |
139 | |
140 | rdma_dev_res->stats.completions += total_ne; | |
141 | ||
142 | return total_ne; | |
ef6d4ccd YS |
143 | } |
144 | ||
145 | static void *comp_handler_thread(void *arg) | |
146 | { | |
147 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; | |
148 | int rc; | |
149 | struct ibv_cq *ev_cq; | |
150 | void *ev_ctx; | |
75152227 YS |
151 | int flags; |
152 | GPollFD pfds[1]; | |
153 | ||
154 | /* Change to non-blocking mode */ | |
155 | flags = fcntl(backend_dev->channel->fd, F_GETFL); | |
156 | rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); | |
157 | if (rc < 0) { | |
4d71b38a | 158 | rdma_error_report("Failed to change backend channel FD to non-blocking"); |
75152227 YS |
159 | return NULL; |
160 | } | |
ef6d4ccd | 161 | |
75152227 YS |
162 | pfds[0].fd = backend_dev->channel->fd; |
163 | pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; | |
164 | ||
165 | backend_dev->comp_thread.is_running = true; | |
166 | ||
ef6d4ccd | 167 | while (backend_dev->comp_thread.run) { |
75152227 YS |
168 | do { |
169 | rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); | |
c2dd117b YS |
170 | if (!rc) { |
171 | backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; | |
172 | } | |
75152227 YS |
173 | } while (!rc && backend_dev->comp_thread.run); |
174 | ||
175 | if (backend_dev->comp_thread.run) { | |
75152227 | 176 | rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); |
75152227 | 177 | if (unlikely(rc)) { |
4d71b38a YS |
178 | rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, |
179 | errno); | |
75152227 YS |
180 | continue; |
181 | } | |
ef6d4ccd | 182 | |
75152227 YS |
183 | rc = ibv_req_notify_cq(ev_cq, 0); |
184 | if (unlikely(rc)) { | |
4d71b38a YS |
185 | rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, |
186 | errno); | |
75152227 | 187 | } |
ef6d4ccd | 188 | |
c2dd117b | 189 | backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; |
1373f4a8 | 190 | rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); |
ef6d4ccd | 191 | |
75152227 YS |
192 | ibv_ack_cq_events(ev_cq, 1); |
193 | } | |
ef6d4ccd YS |
194 | } |
195 | ||
75152227 YS |
196 | backend_dev->comp_thread.is_running = false; |
197 | ||
198 | qemu_thread_exit(0); | |
199 | ||
ef6d4ccd YS |
200 | return NULL; |
201 | } | |
202 | ||
2b05705d YS |
203 | static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) |
204 | { | |
205 | atomic_set(&backend_dev->rdmacm_mux.can_receive, 0); | |
206 | } | |
207 | ||
208 | static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) | |
209 | { | |
210 | atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); | |
211 | } | |
212 | ||
213 | static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) | |
214 | { | |
215 | return atomic_read(&backend_dev->rdmacm_mux.can_receive); | |
216 | } | |
217 | ||
4d71b38a | 218 | static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) |
2b05705d | 219 | { |
555b3d67 | 220 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
221 | int ret; |
222 | ||
2b05705d YS |
223 | ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg)); |
224 | if (ret != sizeof(msg)) { | |
4d71b38a YS |
225 | rdma_error_report("Got invalid message from mux: size %d, expecting %d", |
226 | ret, (int)sizeof(msg)); | |
2b05705d YS |
227 | return -EIO; |
228 | } | |
229 | ||
4d71b38a YS |
230 | trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code, |
231 | msg.hdr.err_code); | |
2b05705d YS |
232 | |
233 | if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) { | |
4d71b38a | 234 | rdma_error_report("Got invalid message type %d", msg.hdr.msg_type); |
2b05705d YS |
235 | return -EIO; |
236 | } | |
237 | ||
238 | if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) { | |
4d71b38a YS |
239 | rdma_error_report("Operation failed in mux, error code %d", |
240 | msg.hdr.err_code); | |
2b05705d YS |
241 | return -EIO; |
242 | } | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
4d71b38a | 247 | static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) |
2b05705d YS |
248 | { |
249 | int rc = 0; | |
250 | ||
2b05705d | 251 | msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ; |
4d71b38a | 252 | trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
253 | disable_rdmacm_mux_async(backend_dev); |
254 | rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be, | |
255 | (const uint8_t *)msg, sizeof(*msg)); | |
256 | if (rc != sizeof(*msg)) { | |
257 | enable_rdmacm_mux_async(backend_dev); | |
4d71b38a | 258 | rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc); |
2b05705d YS |
259 | return -EIO; |
260 | } | |
261 | ||
4d71b38a | 262 | rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be); |
2b05705d | 263 | if (rc) { |
4d71b38a YS |
264 | rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)", |
265 | msg->hdr.op_code, rc); | |
2b05705d YS |
266 | } |
267 | ||
268 | enable_rdmacm_mux_async(backend_dev); | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
292dce62 | 273 | static void stop_backend_thread(RdmaBackendThread *thread) |
75152227 | 274 | { |
292dce62 YS |
275 | thread->run = false; |
276 | while (thread->is_running) { | |
75152227 YS |
277 | sleep(THR_POLL_TO / SCALE_US / 2); |
278 | } | |
279 | } | |
280 | ||
281 | static void start_comp_thread(RdmaBackendDev *backend_dev) | |
282 | { | |
a421c811 | 283 | char thread_name[THR_NAME_LEN] = {}; |
75152227 | 284 | |
292dce62 | 285 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
286 | |
287 | snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", | |
288 | ibv_get_device_name(backend_dev->ib_dev)); | |
289 | backend_dev->comp_thread.run = true; | |
290 | qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, | |
291 | comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); | |
292 | } | |
293 | ||
eaac0100 YS |
294 | void rdma_backend_register_comp_handler(void (*handler)(void *ctx, |
295 | struct ibv_wc *wc)) | |
ef6d4ccd YS |
296 | { |
297 | comp_handler = handler; | |
298 | } | |
299 | ||
300 | void rdma_backend_unregister_comp_handler(void) | |
301 | { | |
302 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
303 | } | |
304 | ||
305 | int rdma_backend_query_port(RdmaBackendDev *backend_dev, | |
306 | struct ibv_port_attr *port_attr) | |
307 | { | |
308 | int rc; | |
309 | ||
310 | rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr); | |
311 | if (rc) { | |
4d71b38a | 312 | rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
313 | return -EIO; |
314 | } | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq) | |
320 | { | |
c2dd117b YS |
321 | int polled; |
322 | ||
323 | rdma_dev_res->stats.poll_cq_from_guest++; | |
1373f4a8 | 324 | polled = rdma_poll_cq(rdma_dev_res, cq->ibcq); |
c2dd117b YS |
325 | if (!polled) { |
326 | rdma_dev_res->stats.poll_cq_from_guest_empty++; | |
327 | } | |
ef6d4ccd YS |
328 | } |
329 | ||
330 | static GHashTable *ah_hash; | |
331 | ||
332 | static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, | |
333 | uint8_t sgid_idx, union ibv_gid *dgid) | |
334 | { | |
335 | GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid)); | |
336 | struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key); | |
337 | ||
338 | if (ah) { | |
4d71b38a YS |
339 | trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), |
340 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
341 | g_bytes_unref(ah_key); |
342 | } else { | |
343 | struct ibv_ah_attr ah_attr = { | |
344 | .is_global = 1, | |
345 | .port_num = backend_dev->port_num, | |
346 | .grh.hop_limit = 1, | |
347 | }; | |
348 | ||
349 | ah_attr.grh.dgid = *dgid; | |
350 | ah_attr.grh.sgid_index = sgid_idx; | |
351 | ||
352 | ah = ibv_create_ah(pd, &ah_attr); | |
353 | if (ah) { | |
354 | g_hash_table_insert(ah_hash, ah_key, ah); | |
355 | } else { | |
356 | g_bytes_unref(ah_key); | |
4d71b38a YS |
357 | rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">", |
358 | be64_to_cpu(dgid->global.subnet_prefix), | |
359 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
360 | } |
361 | ||
4d71b38a YS |
362 | trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), |
363 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
364 | } |
365 | ||
366 | return ah; | |
367 | } | |
368 | ||
369 | static void destroy_ah_hash_key(gpointer data) | |
370 | { | |
371 | g_bytes_unref(data); | |
372 | } | |
373 | ||
374 | static void destroy_ah_hast_data(gpointer data) | |
375 | { | |
376 | struct ibv_ah *ah = data; | |
377 | ||
378 | ibv_destroy_ah(ah); | |
379 | } | |
380 | ||
381 | static void ah_cache_init(void) | |
382 | { | |
383 | ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, | |
384 | destroy_ah_hash_key, destroy_ah_hast_data); | |
385 | } | |
386 | ||
387 | static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, | |
388 | struct ibv_sge *dsge, struct ibv_sge *ssge, | |
c2dd117b | 389 | uint8_t num_sge, uint64_t *total_length) |
ef6d4ccd YS |
390 | { |
391 | RdmaRmMR *mr; | |
392 | int ssge_idx; | |
393 | ||
ef6d4ccd YS |
394 | for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) { |
395 | mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey); | |
396 | if (unlikely(!mr)) { | |
4d71b38a | 397 | rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey); |
ef6d4ccd YS |
398 | return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey; |
399 | } | |
400 | ||
7f99daad | 401 | dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start; |
ef6d4ccd YS |
402 | dsge->length = ssge[ssge_idx].length; |
403 | dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr); | |
404 | ||
c2dd117b YS |
405 | *total_length += dsge->length; |
406 | ||
ef6d4ccd YS |
407 | dsge++; |
408 | } | |
409 | ||
410 | return 0; | |
411 | } | |
412 | ||
4d71b38a YS |
413 | static void trace_mad_message(const char *title, char *buf, int len) |
414 | { | |
415 | int i; | |
416 | char *b = g_malloc0(len * 3 + 1); | |
417 | char b1[4]; | |
418 | ||
419 | for (i = 0; i < len; i++) { | |
420 | sprintf(b1, "%.2X ", buf[i] & 0x000000FF); | |
421 | strcat(b, b1); | |
422 | } | |
423 | ||
424 | trace_rdma_mad_message(title, len, b); | |
425 | ||
426 | g_free(b); | |
427 | } | |
428 | ||
2b05705d YS |
429 | static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, |
430 | union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge) | |
605ec166 | 431 | { |
555b3d67 | 432 | RdmaCmMuxMsg msg = {}; |
2b05705d | 433 | char *hdr, *data; |
605ec166 YS |
434 | int ret; |
435 | ||
605ec166 YS |
436 | if (num_sge != 2) { |
437 | return -EINVAL; | |
438 | } | |
439 | ||
2b05705d YS |
440 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD; |
441 | memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid)); | |
605ec166 | 442 | |
2b05705d | 443 | msg.umad_len = sge[0].length + sge[1].length; |
2b05705d YS |
444 | |
445 | if (msg.umad_len > sizeof(msg.umad.mad)) { | |
605ec166 YS |
446 | return -ENOMEM; |
447 | } | |
448 | ||
2b05705d YS |
449 | msg.umad.hdr.addr.qpn = htobe32(1); |
450 | msg.umad.hdr.addr.grh_present = 1; | |
2b05705d YS |
451 | msg.umad.hdr.addr.gid_index = sgid_idx; |
452 | memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid)); | |
453 | msg.umad.hdr.addr.hop_limit = 0xFF; | |
605ec166 YS |
454 | |
455 | hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length); | |
456 | if (!hdr) { | |
605ec166 YS |
457 | return -ENOMEM; |
458 | } | |
2b05705d YS |
459 | data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length); |
460 | if (!data) { | |
605ec166 YS |
461 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
462 | return -ENOMEM; | |
463 | } | |
464 | ||
2b05705d YS |
465 | memcpy(&msg.umad.mad[0], hdr, sge[0].length); |
466 | memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length); | |
605ec166 | 467 | |
2b05705d | 468 | rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length); |
605ec166 YS |
469 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
470 | ||
4d71b38a YS |
471 | trace_mad_message("send", msg.umad.mad, msg.umad_len); |
472 | ||
473 | ret = rdmacm_mux_send(backend_dev, &msg); | |
2b05705d | 474 | if (ret) { |
4d71b38a | 475 | rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret); |
2b05705d YS |
476 | return -EIO; |
477 | } | |
605ec166 | 478 | |
2b05705d | 479 | return 0; |
605ec166 YS |
480 | } |
481 | ||
ef6d4ccd YS |
482 | void rdma_backend_post_send(RdmaBackendDev *backend_dev, |
483 | RdmaBackendQP *qp, uint8_t qp_type, | |
484 | struct ibv_sge *sge, uint32_t num_sge, | |
2b05705d YS |
485 | uint8_t sgid_idx, union ibv_gid *sgid, |
486 | union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, | |
487 | void *ctx) | |
ef6d4ccd YS |
488 | { |
489 | BackendCtx *bctx; | |
490 | struct ibv_sge new_sge[MAX_SGE]; | |
491 | uint32_t bctx_id; | |
492 | int rc; | |
a421c811 | 493 | struct ibv_send_wr wr = {}, *bad_wr; |
ef6d4ccd | 494 | |
4d71b38a | 495 | if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */ |
ef6d4ccd | 496 | if (qp_type == IBV_QPT_SMI) { |
4d71b38a | 497 | rdma_error_report("Got QP0 request"); |
eaac0100 | 498 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd | 499 | } else if (qp_type == IBV_QPT_GSI) { |
2b05705d | 500 | rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge); |
605ec166 | 501 | if (rc) { |
eaac0100 | 502 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx); |
c2dd117b | 503 | backend_dev->rdma_dev_res->stats.mad_tx_err++; |
605ec166 | 504 | } else { |
eaac0100 | 505 | complete_work(IBV_WC_SUCCESS, 0, ctx); |
c2dd117b | 506 | backend_dev->rdma_dev_res->stats.mad_tx++; |
605ec166 | 507 | } |
ef6d4ccd | 508 | } |
ef6d4ccd YS |
509 | return; |
510 | } | |
511 | ||
ef6d4ccd YS |
512 | bctx = g_malloc0(sizeof(*bctx)); |
513 | bctx->up_ctx = ctx; | |
bf441451 | 514 | bctx->backend_qp = qp; |
ef6d4ccd YS |
515 | |
516 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
517 | if (unlikely(rc)) { | |
eaac0100 | 518 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 519 | goto err_free_bctx; |
ef6d4ccd YS |
520 | } |
521 | ||
bf441451 YS |
522 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
523 | ||
c2dd117b YS |
524 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, |
525 | &backend_dev->rdma_dev_res->stats.tx_len); | |
ef6d4ccd | 526 | if (rc) { |
eaac0100 | 527 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 528 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
529 | } |
530 | ||
531 | if (qp_type == IBV_QPT_UD) { | |
2b05705d | 532 | wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid); |
305bdd7a | 533 | if (!wr.wr.ud.ah) { |
eaac0100 | 534 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 535 | goto err_dealloc_cqe_ctx; |
305bdd7a | 536 | } |
ef6d4ccd YS |
537 | wr.wr.ud.remote_qpn = dqpn; |
538 | wr.wr.ud.remote_qkey = dqkey; | |
539 | } | |
540 | ||
541 | wr.num_sge = num_sge; | |
542 | wr.opcode = IBV_WR_SEND; | |
543 | wr.send_flags = IBV_SEND_SIGNALED; | |
544 | wr.sg_list = new_sge; | |
545 | wr.wr_id = bctx_id; | |
546 | ||
547 | rc = ibv_post_send(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 548 | if (rc) { |
4d71b38a YS |
549 | rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d", |
550 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 551 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 552 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
553 | } |
554 | ||
c2dd117b YS |
555 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
556 | backend_dev->rdma_dev_res->stats.tx++; | |
557 | ||
ef6d4ccd YS |
558 | return; |
559 | ||
c2dd117b YS |
560 | err_dealloc_cqe_ctx: |
561 | backend_dev->rdma_dev_res->stats.tx_err++; | |
ef6d4ccd YS |
562 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
563 | ||
c2dd117b | 564 | err_free_bctx: |
ef6d4ccd YS |
565 | g_free(bctx); |
566 | } | |
567 | ||
605ec166 YS |
568 | static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev, |
569 | struct ibv_sge *sge, uint32_t num_sge, | |
570 | void *ctx) | |
571 | { | |
572 | BackendCtx *bctx; | |
573 | int rc; | |
574 | uint32_t bctx_id; | |
575 | ||
576 | if (num_sge != 1) { | |
4d71b38a | 577 | rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge); |
605ec166 YS |
578 | return VENDOR_ERR_INV_NUM_SGE; |
579 | } | |
580 | ||
581 | if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) { | |
4d71b38a | 582 | rdma_error_report("Too small buffer for MAD"); |
605ec166 YS |
583 | return VENDOR_ERR_INV_MAD_BUFF; |
584 | } | |
585 | ||
605ec166 YS |
586 | bctx = g_malloc0(sizeof(*bctx)); |
587 | ||
588 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
589 | if (unlikely(rc)) { | |
590 | g_free(bctx); | |
605ec166 YS |
591 | return VENDOR_ERR_NOMEM; |
592 | } | |
593 | ||
605ec166 YS |
594 | bctx->up_ctx = ctx; |
595 | bctx->sge = *sge; | |
596 | ||
b20fc795 | 597 | rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id); |
605ec166 YS |
598 | |
599 | return 0; | |
600 | } | |
601 | ||
ef6d4ccd | 602 | void rdma_backend_post_recv(RdmaBackendDev *backend_dev, |
ef6d4ccd YS |
603 | RdmaBackendQP *qp, uint8_t qp_type, |
604 | struct ibv_sge *sge, uint32_t num_sge, void *ctx) | |
605 | { | |
606 | BackendCtx *bctx; | |
607 | struct ibv_sge new_sge[MAX_SGE]; | |
608 | uint32_t bctx_id; | |
609 | int rc; | |
a421c811 | 610 | struct ibv_recv_wr wr = {}, *bad_wr; |
ef6d4ccd YS |
611 | |
612 | if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ | |
613 | if (qp_type == IBV_QPT_SMI) { | |
4d71b38a | 614 | rdma_error_report("Got QP0 request"); |
eaac0100 | 615 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd YS |
616 | } |
617 | if (qp_type == IBV_QPT_GSI) { | |
605ec166 YS |
618 | rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx); |
619 | if (rc) { | |
eaac0100 | 620 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
3c890bcf | 621 | backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++; |
c2dd117b | 622 | } else { |
3c890bcf | 623 | backend_dev->rdma_dev_res->stats.mad_rx_bufs++; |
605ec166 | 624 | } |
ef6d4ccd YS |
625 | } |
626 | return; | |
627 | } | |
628 | ||
ef6d4ccd YS |
629 | bctx = g_malloc0(sizeof(*bctx)); |
630 | bctx->up_ctx = ctx; | |
bf441451 | 631 | bctx->backend_qp = qp; |
ef6d4ccd | 632 | |
3c890bcf | 633 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); |
ef6d4ccd | 634 | if (unlikely(rc)) { |
eaac0100 | 635 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 636 | goto err_free_bctx; |
ef6d4ccd YS |
637 | } |
638 | ||
bf441451 YS |
639 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
640 | ||
3c890bcf | 641 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, |
c2dd117b | 642 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); |
ef6d4ccd | 643 | if (rc) { |
eaac0100 | 644 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 645 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
646 | } |
647 | ||
648 | wr.num_sge = num_sge; | |
649 | wr.sg_list = new_sge; | |
650 | wr.wr_id = bctx_id; | |
651 | rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 652 | if (rc) { |
4d71b38a YS |
653 | rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d", |
654 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 655 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 656 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
657 | } |
658 | ||
c2dd117b | 659 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
3c890bcf | 660 | backend_dev->rdma_dev_res->stats.rx_bufs++; |
c2dd117b | 661 | |
ef6d4ccd YS |
662 | return; |
663 | ||
c2dd117b YS |
664 | err_dealloc_cqe_ctx: |
665 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
3c890bcf | 666 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
ef6d4ccd | 667 | |
c2dd117b | 668 | err_free_bctx: |
ef6d4ccd YS |
669 | g_free(bctx); |
670 | } | |
671 | ||
e926c9f1 KH |
672 | void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, |
673 | RdmaBackendSRQ *srq, struct ibv_sge *sge, | |
674 | uint32_t num_sge, void *ctx) | |
675 | { | |
676 | BackendCtx *bctx; | |
677 | struct ibv_sge new_sge[MAX_SGE]; | |
678 | uint32_t bctx_id; | |
679 | int rc; | |
680 | struct ibv_recv_wr wr = {}, *bad_wr; | |
681 | ||
682 | bctx = g_malloc0(sizeof(*bctx)); | |
683 | bctx->up_ctx = ctx; | |
684 | bctx->backend_srq = srq; | |
685 | ||
686 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
687 | if (unlikely(rc)) { | |
688 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); | |
689 | goto err_free_bctx; | |
690 | } | |
691 | ||
692 | rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id); | |
693 | ||
694 | rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge, | |
695 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); | |
696 | if (rc) { | |
697 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); | |
698 | goto err_dealloc_cqe_ctx; | |
699 | } | |
700 | ||
701 | wr.num_sge = num_sge; | |
702 | wr.sg_list = new_sge; | |
703 | wr.wr_id = bctx_id; | |
704 | rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr); | |
705 | if (rc) { | |
706 | rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d", | |
707 | srq->ibsrq->handle, rc, errno); | |
708 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); | |
709 | goto err_dealloc_cqe_ctx; | |
710 | } | |
711 | ||
712 | atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | |
713 | backend_dev->rdma_dev_res->stats.rx_bufs++; | |
714 | backend_dev->rdma_dev_res->stats.rx_srq++; | |
715 | ||
716 | return; | |
717 | ||
718 | err_dealloc_cqe_ctx: | |
719 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
720 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); | |
721 | ||
722 | err_free_bctx: | |
723 | g_free(bctx); | |
724 | } | |
725 | ||
ef6d4ccd YS |
726 | int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd) |
727 | { | |
728 | pd->ibpd = ibv_alloc_pd(backend_dev->context); | |
729 | ||
4d71b38a YS |
730 | if (!pd->ibpd) { |
731 | rdma_error_report("ibv_alloc_pd fail, errno=%d", errno); | |
732 | return -EIO; | |
733 | } | |
734 | ||
735 | return 0; | |
ef6d4ccd YS |
736 | } |
737 | ||
738 | void rdma_backend_destroy_pd(RdmaBackendPD *pd) | |
739 | { | |
740 | if (pd->ibpd) { | |
741 | ibv_dealloc_pd(pd->ibpd); | |
742 | } | |
743 | } | |
744 | ||
9bbb8d35 | 745 | int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, |
ef6d4ccd YS |
746 | size_t length, int access) |
747 | { | |
9bbb8d35 | 748 | mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); |
4d71b38a YS |
749 | if (!mr->ibmr) { |
750 | rdma_error_report("ibv_reg_mr fail, errno=%d", errno); | |
751 | return -EIO; | |
ef6d4ccd YS |
752 | } |
753 | ||
4d71b38a YS |
754 | mr->ibpd = pd->ibpd; |
755 | ||
756 | return 0; | |
ef6d4ccd YS |
757 | } |
758 | ||
759 | void rdma_backend_destroy_mr(RdmaBackendMR *mr) | |
760 | { | |
761 | if (mr->ibmr) { | |
762 | ibv_dereg_mr(mr->ibmr); | |
763 | } | |
764 | } | |
765 | ||
766 | int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, | |
767 | int cqe) | |
768 | { | |
769 | int rc; | |
770 | ||
ef6d4ccd YS |
771 | cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL, |
772 | backend_dev->channel, 0); | |
4d71b38a YS |
773 | if (!cq->ibcq) { |
774 | rdma_error_report("ibv_create_cq fail, errno=%d", errno); | |
775 | return -EIO; | |
776 | } | |
ef6d4ccd | 777 | |
4d71b38a YS |
778 | rc = ibv_req_notify_cq(cq->ibcq, 0); |
779 | if (rc) { | |
780 | rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
781 | } |
782 | ||
4d71b38a YS |
783 | cq->backend_dev = backend_dev; |
784 | ||
785 | return 0; | |
ef6d4ccd YS |
786 | } |
787 | ||
788 | void rdma_backend_destroy_cq(RdmaBackendCQ *cq) | |
789 | { | |
790 | if (cq->ibcq) { | |
791 | ibv_destroy_cq(cq->ibcq); | |
792 | } | |
793 | } | |
794 | ||
795 | int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, | |
796 | RdmaBackendPD *pd, RdmaBackendCQ *scq, | |
8b42cfab KH |
797 | RdmaBackendCQ *rcq, RdmaBackendSRQ *srq, |
798 | uint32_t max_send_wr, uint32_t max_recv_wr, | |
799 | uint32_t max_send_sge, uint32_t max_recv_sge) | |
ef6d4ccd | 800 | { |
a421c811 | 801 | struct ibv_qp_init_attr attr = {}; |
ef6d4ccd YS |
802 | |
803 | qp->ibqp = 0; | |
ef6d4ccd YS |
804 | |
805 | switch (qp_type) { | |
806 | case IBV_QPT_GSI: | |
ef6d4ccd YS |
807 | return 0; |
808 | ||
809 | case IBV_QPT_RC: | |
810 | /* fall through */ | |
811 | case IBV_QPT_UD: | |
812 | /* do nothing */ | |
813 | break; | |
814 | ||
815 | default: | |
4d71b38a | 816 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
817 | return -EIO; |
818 | } | |
819 | ||
820 | attr.qp_type = qp_type; | |
821 | attr.send_cq = scq->ibcq; | |
822 | attr.recv_cq = rcq->ibcq; | |
823 | attr.cap.max_send_wr = max_send_wr; | |
824 | attr.cap.max_recv_wr = max_recv_wr; | |
825 | attr.cap.max_send_sge = max_send_sge; | |
826 | attr.cap.max_recv_sge = max_recv_sge; | |
8b42cfab KH |
827 | if (srq) { |
828 | attr.srq = srq->ibsrq; | |
829 | } | |
ef6d4ccd | 830 | |
ef6d4ccd | 831 | qp->ibqp = ibv_create_qp(pd->ibpd, &attr); |
4d71b38a YS |
832 | if (!qp->ibqp) { |
833 | rdma_error_report("ibv_create_qp fail, errno=%d", errno); | |
ef6d4ccd YS |
834 | return -EIO; |
835 | } | |
836 | ||
bf441451 YS |
837 | rdma_protected_gslist_init(&qp->cqe_ctx_list); |
838 | ||
ef6d4ccd YS |
839 | qp->ibpd = pd->ibpd; |
840 | ||
841 | /* TODO: Query QP to get max_inline_data and save it to be used in send */ | |
842 | ||
ef6d4ccd YS |
843 | return 0; |
844 | } | |
845 | ||
846 | int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
847 | uint8_t qp_type, uint32_t qkey) | |
848 | { | |
a421c811 | 849 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
850 | int rc, attr_mask; |
851 | ||
ef6d4ccd YS |
852 | attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT; |
853 | attr.qp_state = IBV_QPS_INIT; | |
854 | attr.pkey_index = 0; | |
855 | attr.port_num = backend_dev->port_num; | |
856 | ||
857 | switch (qp_type) { | |
858 | case IBV_QPT_RC: | |
859 | attr_mask |= IBV_QP_ACCESS_FLAGS; | |
4d71b38a | 860 | trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num); |
ef6d4ccd YS |
861 | break; |
862 | ||
863 | case IBV_QPT_UD: | |
864 | attr.qkey = qkey; | |
865 | attr_mask |= IBV_QP_QKEY; | |
4d71b38a | 866 | trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey); |
ef6d4ccd YS |
867 | break; |
868 | ||
869 | default: | |
4d71b38a | 870 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
871 | return -EIO; |
872 | } | |
873 | ||
874 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
875 | if (rc) { | |
4d71b38a | 876 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
877 | return -EIO; |
878 | } | |
879 | ||
880 | return 0; | |
881 | } | |
882 | ||
883 | int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
2b05705d YS |
884 | uint8_t qp_type, uint8_t sgid_idx, |
885 | union ibv_gid *dgid, uint32_t dqpn, | |
886 | uint32_t rq_psn, uint32_t qkey, bool use_qkey) | |
ef6d4ccd | 887 | { |
a421c811 | 888 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
889 | union ibv_gid ibv_gid = { |
890 | .global.interface_id = dgid->global.interface_id, | |
891 | .global.subnet_prefix = dgid->global.subnet_prefix | |
892 | }; | |
893 | int rc, attr_mask; | |
894 | ||
895 | attr.qp_state = IBV_QPS_RTR; | |
896 | attr_mask = IBV_QP_STATE; | |
897 | ||
2b05705d YS |
898 | qp->sgid_idx = sgid_idx; |
899 | ||
ef6d4ccd YS |
900 | switch (qp_type) { |
901 | case IBV_QPT_RC: | |
ef6d4ccd YS |
902 | attr.path_mtu = IBV_MTU_1024; |
903 | attr.dest_qp_num = dqpn; | |
904 | attr.max_dest_rd_atomic = 1; | |
905 | attr.min_rnr_timer = 12; | |
906 | attr.ah_attr.port_num = backend_dev->port_num; | |
907 | attr.ah_attr.is_global = 1; | |
908 | attr.ah_attr.grh.hop_limit = 1; | |
909 | attr.ah_attr.grh.dgid = ibv_gid; | |
2b05705d | 910 | attr.ah_attr.grh.sgid_index = qp->sgid_idx; |
ef6d4ccd YS |
911 | attr.rq_psn = rq_psn; |
912 | ||
913 | attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | | |
914 | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | | |
915 | IBV_QP_MIN_RNR_TIMER; | |
4d71b38a YS |
916 | |
917 | trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, | |
918 | be64_to_cpu(ibv_gid.global. | |
919 | subnet_prefix), | |
920 | be64_to_cpu(ibv_gid.global. | |
921 | interface_id), | |
922 | qp->sgid_idx, dqpn, rq_psn); | |
ef6d4ccd YS |
923 | break; |
924 | ||
925 | case IBV_QPT_UD: | |
926 | if (use_qkey) { | |
ef6d4ccd YS |
927 | attr.qkey = qkey; |
928 | attr_mask |= IBV_QP_QKEY; | |
929 | } | |
4d71b38a YS |
930 | trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : |
931 | 0); | |
ef6d4ccd YS |
932 | break; |
933 | } | |
934 | ||
935 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
936 | if (rc) { | |
4d71b38a | 937 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
938 | return -EIO; |
939 | } | |
940 | ||
941 | return 0; | |
942 | } | |
943 | ||
944 | int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, | |
945 | uint32_t sq_psn, uint32_t qkey, bool use_qkey) | |
946 | { | |
a421c811 | 947 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
948 | int rc, attr_mask; |
949 | ||
ef6d4ccd YS |
950 | attr.qp_state = IBV_QPS_RTS; |
951 | attr.sq_psn = sq_psn; | |
952 | attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; | |
953 | ||
954 | switch (qp_type) { | |
955 | case IBV_QPT_RC: | |
956 | attr.timeout = 14; | |
957 | attr.retry_cnt = 7; | |
958 | attr.rnr_retry = 7; | |
959 | attr.max_rd_atomic = 1; | |
960 | ||
961 | attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | | |
962 | IBV_QP_MAX_QP_RD_ATOMIC; | |
4d71b38a | 963 | trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); |
ef6d4ccd YS |
964 | break; |
965 | ||
966 | case IBV_QPT_UD: | |
967 | if (use_qkey) { | |
ef6d4ccd YS |
968 | attr.qkey = qkey; |
969 | attr_mask |= IBV_QP_QKEY; | |
970 | } | |
4d71b38a YS |
971 | trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, |
972 | use_qkey ? qkey : 0); | |
ef6d4ccd YS |
973 | break; |
974 | } | |
975 | ||
976 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
977 | if (rc) { | |
4d71b38a | 978 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
979 | return -EIO; |
980 | } | |
981 | ||
982 | return 0; | |
983 | } | |
984 | ||
c99f2174 YS |
985 | int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, |
986 | int attr_mask, struct ibv_qp_init_attr *init_attr) | |
987 | { | |
988 | if (!qp->ibqp) { | |
c99f2174 YS |
989 | attr->qp_state = IBV_QPS_RTS; |
990 | return 0; | |
991 | } | |
992 | ||
993 | return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); | |
994 | } | |
995 | ||
bf441451 | 996 | void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) |
ef6d4ccd YS |
997 | { |
998 | if (qp->ibqp) { | |
999 | ibv_destroy_qp(qp->ibqp); | |
1000 | } | |
bf441451 YS |
1001 | g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); |
1002 | rdma_protected_gslist_destroy(&qp->cqe_ctx_list); | |
ef6d4ccd YS |
1003 | } |
1004 | ||
e926c9f1 KH |
1005 | int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, |
1006 | uint32_t max_wr, uint32_t max_sge, | |
1007 | uint32_t srq_limit) | |
1008 | { | |
1009 | struct ibv_srq_init_attr srq_init_attr = {}; | |
1010 | ||
1011 | srq_init_attr.attr.max_wr = max_wr; | |
1012 | srq_init_attr.attr.max_sge = max_sge; | |
1013 | srq_init_attr.attr.srq_limit = srq_limit; | |
1014 | ||
1015 | srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr); | |
1016 | if (!srq->ibsrq) { | |
1017 | rdma_error_report("ibv_create_srq failed, errno=%d", errno); | |
1018 | return -EIO; | |
1019 | } | |
1020 | ||
1021 | rdma_protected_gslist_init(&srq->cqe_ctx_list); | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr) | |
1027 | { | |
1028 | if (!srq->ibsrq) { | |
1029 | return -EINVAL; | |
1030 | } | |
1031 | ||
1032 | return ibv_query_srq(srq->ibsrq, srq_attr); | |
1033 | } | |
1034 | ||
1035 | int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, | |
1036 | int srq_attr_mask) | |
1037 | { | |
1038 | if (!srq->ibsrq) { | |
1039 | return -EINVAL; | |
1040 | } | |
1041 | ||
1042 | return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask); | |
1043 | } | |
1044 | ||
1045 | void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res) | |
1046 | { | |
1047 | if (srq->ibsrq) { | |
1048 | ibv_destroy_srq(srq->ibsrq); | |
1049 | } | |
1050 | g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res); | |
1051 | rdma_protected_gslist_destroy(&srq->cqe_ctx_list); | |
1052 | } | |
1053 | ||
ef6d4ccd | 1054 | #define CHK_ATTR(req, dev, member, fmt) ({ \ |
4d71b38a | 1055 | trace_rdma_check_dev_attr(#member, dev.member, req->member); \ |
ef6d4ccd | 1056 | if (req->member > dev.member) { \ |
4d71b38a YS |
1057 | rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ |
1058 | #member, req->member, dev.member); \ | |
ef6d4ccd YS |
1059 | req->member = dev.member; \ |
1060 | } \ | |
4d71b38a | 1061 | }) |
ef6d4ccd YS |
1062 | |
1063 | static int init_device_caps(RdmaBackendDev *backend_dev, | |
1064 | struct ibv_device_attr *dev_attr) | |
1065 | { | |
732d948c | 1066 | struct ibv_device_attr bk_dev_attr; |
4d71b38a | 1067 | int rc; |
732d948c | 1068 | |
4d71b38a YS |
1069 | rc = ibv_query_device(backend_dev->context, &bk_dev_attr); |
1070 | if (rc) { | |
1071 | rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
1072 | return -EIO; |
1073 | } | |
1074 | ||
ffef4775 | 1075 | dev_attr->max_sge = MAX_SGE; |
e926c9f1 | 1076 | dev_attr->max_srq_sge = MAX_SGE; |
ffef4775 | 1077 | |
732d948c YS |
1078 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); |
1079 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); | |
1080 | CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); | |
732d948c | 1081 | CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); |
732d948c YS |
1082 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); |
1083 | CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); | |
1084 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); | |
1085 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d"); | |
1086 | CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d"); | |
e926c9f1 | 1087 | CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d"); |
ef6d4ccd YS |
1088 | |
1089 | return 0; | |
1090 | } | |
1091 | ||
605ec166 YS |
1092 | static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, |
1093 | union ibv_gid *my_gid, int paylen) | |
1094 | { | |
1095 | grh->paylen = htons(paylen); | |
1096 | grh->sgid = *sgid; | |
1097 | grh->dgid = *my_gid; | |
605ec166 YS |
1098 | } |
1099 | ||
2b05705d YS |
1100 | static void process_incoming_mad_req(RdmaBackendDev *backend_dev, |
1101 | RdmaCmMuxMsg *msg) | |
605ec166 | 1102 | { |
605ec166 YS |
1103 | unsigned long cqe_ctx_id; |
1104 | BackendCtx *bctx; | |
1105 | char *mad; | |
605ec166 | 1106 | |
4d71b38a | 1107 | trace_mad_message("recv", msg->umad.mad, msg->umad_len); |
605ec166 | 1108 | |
b20fc795 YS |
1109 | cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list); |
1110 | if (cqe_ctx_id == -ENOENT) { | |
4d71b38a | 1111 | rdma_warn_report("No more free MADs buffers, waiting for a while"); |
605ec166 YS |
1112 | sleep(THR_POLL_TO); |
1113 | return; | |
1114 | } | |
1115 | ||
605ec166 YS |
1116 | bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); |
1117 | if (unlikely(!bctx)) { | |
4d71b38a | 1118 | rdma_error_report("No matching ctx for req %ld", cqe_ctx_id); |
c2dd117b | 1119 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
605ec166 YS |
1120 | return; |
1121 | } | |
1122 | ||
605ec166 YS |
1123 | mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr, |
1124 | bctx->sge.length); | |
2b05705d | 1125 | if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) { |
c2dd117b | 1126 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
eaac0100 YS |
1127 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF, |
1128 | bctx->up_ctx); | |
605ec166 | 1129 | } else { |
a421c811 | 1130 | struct ibv_wc wc = {}; |
605ec166 YS |
1131 | memset(mad, 0, bctx->sge.length); |
1132 | build_mad_hdr((struct ibv_grh *)mad, | |
2b05705d YS |
1133 | (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid, |
1134 | msg->umad_len); | |
1135 | memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len); | |
605ec166 YS |
1136 | rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length); |
1137 | ||
eaac0100 YS |
1138 | wc.byte_len = msg->umad_len; |
1139 | wc.status = IBV_WC_SUCCESS; | |
1140 | wc.wc_flags = IBV_WC_GRH; | |
c2dd117b | 1141 | backend_dev->rdma_dev_res->stats.mad_rx++; |
eaac0100 | 1142 | comp_handler(bctx->up_ctx, &wc); |
605ec166 YS |
1143 | } |
1144 | ||
1145 | g_free(bctx); | |
1146 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); | |
1147 | } | |
1148 | ||
2b05705d | 1149 | static inline int rdmacm_mux_can_receive(void *opaque) |
605ec166 | 1150 | { |
2b05705d | 1151 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; |
605ec166 | 1152 | |
2b05705d YS |
1153 | return rdmacm_mux_can_process_async(backend_dev); |
1154 | } | |
1155 | ||
1156 | static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size) | |
1157 | { | |
1158 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; | |
1159 | RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf; | |
1160 | ||
4d71b38a | 1161 | trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
1162 | |
1163 | if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ && | |
1164 | msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) { | |
4d71b38a | 1165 | rdma_error_report("Error: Not a MAD request, skipping"); |
2b05705d | 1166 | return; |
605ec166 | 1167 | } |
2b05705d YS |
1168 | process_incoming_mad_req(backend_dev, msg); |
1169 | } | |
1170 | ||
1171 | static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) | |
1172 | { | |
1173 | int ret; | |
605ec166 | 1174 | |
2b05705d | 1175 | backend_dev->rdmacm_mux.chr_be = mad_chr_be; |
605ec166 | 1176 | |
2b05705d YS |
1177 | ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be); |
1178 | if (!ret) { | |
4d71b38a | 1179 | rdma_error_report("Missing chardev for MAD multiplexer"); |
2b05705d | 1180 | return -EIO; |
605ec166 YS |
1181 | } |
1182 | ||
b20fc795 | 1183 | rdma_protected_qlist_init(&backend_dev->recv_mads_list); |
605ec166 | 1184 | |
2b05705d YS |
1185 | enable_rdmacm_mux_async(backend_dev); |
1186 | ||
1187 | qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be, | |
1188 | rdmacm_mux_can_receive, rdmacm_mux_read, NULL, | |
1189 | NULL, backend_dev, NULL, true); | |
1190 | ||
605ec166 YS |
1191 | return 0; |
1192 | } | |
1193 | ||
ff30a446 YS |
1194 | static void mad_stop(RdmaBackendDev *backend_dev) |
1195 | { | |
1196 | clean_recv_mads(backend_dev); | |
1197 | } | |
1198 | ||
605ec166 YS |
1199 | static void mad_fini(RdmaBackendDev *backend_dev) |
1200 | { | |
2b05705d YS |
1201 | disable_rdmacm_mux_async(backend_dev); |
1202 | qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be); | |
b20fc795 | 1203 | rdma_protected_qlist_destroy(&backend_dev->recv_mads_list); |
605ec166 YS |
1204 | } |
1205 | ||
2b05705d YS |
1206 | int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, |
1207 | union ibv_gid *gid) | |
1208 | { | |
1209 | union ibv_gid sgid; | |
1210 | int ret; | |
1211 | int i = 0; | |
1212 | ||
2b05705d YS |
1213 | do { |
1214 | ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i, | |
1215 | &sgid); | |
1216 | i++; | |
1217 | } while (!ret && (memcmp(&sgid, gid, sizeof(*gid)))); | |
1218 | ||
4d71b38a YS |
1219 | trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix), |
1220 | be64_to_cpu(gid->global.interface_id), | |
1221 | i - 1); | |
2b05705d YS |
1222 | |
1223 | return ret ? ret : i - 1; | |
1224 | } | |
1225 | ||
1226 | int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1227 | union ibv_gid *gid) | |
1228 | { | |
555b3d67 | 1229 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1230 | int ret; |
1231 | ||
4d71b38a YS |
1232 | trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix), |
1233 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1234 | |
1235 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG; | |
1236 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1237 | ||
4d71b38a | 1238 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1239 | if (ret) { |
4d71b38a | 1240 | rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret); |
2b05705d YS |
1241 | return -EIO; |
1242 | } | |
1243 | ||
1244 | qapi_event_send_rdma_gid_status_changed(ifname, true, | |
1245 | gid->global.subnet_prefix, | |
1246 | gid->global.interface_id); | |
1247 | ||
1248 | return ret; | |
1249 | } | |
1250 | ||
1251 | int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1252 | union ibv_gid *gid) | |
1253 | { | |
555b3d67 | 1254 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1255 | int ret; |
1256 | ||
4d71b38a YS |
1257 | trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix), |
1258 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1259 | |
1260 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG; | |
1261 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1262 | ||
4d71b38a | 1263 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1264 | if (ret) { |
4d71b38a YS |
1265 | rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)", |
1266 | ret); | |
2b05705d YS |
1267 | return -EIO; |
1268 | } | |
1269 | ||
1270 | qapi_event_send_rdma_gid_status_changed(ifname, false, | |
1271 | gid->global.subnet_prefix, | |
1272 | gid->global.interface_id); | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
430e440c | 1277 | int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, |
ef6d4ccd YS |
1278 | RdmaDeviceResources *rdma_dev_res, |
1279 | const char *backend_device_name, uint8_t port_num, | |
4d71b38a | 1280 | struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be) |
ef6d4ccd YS |
1281 | { |
1282 | int i; | |
1283 | int ret = 0; | |
1284 | int num_ibv_devices; | |
ef6d4ccd | 1285 | struct ibv_device **dev_list; |
ef6d4ccd | 1286 | |
430e440c YS |
1287 | memset(backend_dev, 0, sizeof(*backend_dev)); |
1288 | ||
1289 | backend_dev->dev = pdev; | |
ef6d4ccd YS |
1290 | backend_dev->port_num = port_num; |
1291 | backend_dev->rdma_dev_res = rdma_dev_res; | |
1292 | ||
1293 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
1294 | ||
1295 | dev_list = ibv_get_device_list(&num_ibv_devices); | |
1296 | if (!dev_list) { | |
4d71b38a | 1297 | rdma_error_report("Failed to get IB devices list"); |
ef6d4ccd YS |
1298 | return -EIO; |
1299 | } | |
1300 | ||
1301 | if (num_ibv_devices == 0) { | |
4d71b38a | 1302 | rdma_error_report("No IB devices were found"); |
ef6d4ccd YS |
1303 | ret = -ENXIO; |
1304 | goto out_free_dev_list; | |
1305 | } | |
1306 | ||
1307 | if (backend_device_name) { | |
1308 | for (i = 0; dev_list[i]; ++i) { | |
1309 | if (!strcmp(ibv_get_device_name(dev_list[i]), | |
1310 | backend_device_name)) { | |
1311 | break; | |
1312 | } | |
1313 | } | |
1314 | ||
1315 | backend_dev->ib_dev = dev_list[i]; | |
1316 | if (!backend_dev->ib_dev) { | |
4d71b38a YS |
1317 | rdma_error_report("Failed to find IB device %s", |
1318 | backend_device_name); | |
ef6d4ccd YS |
1319 | ret = -EIO; |
1320 | goto out_free_dev_list; | |
1321 | } | |
1322 | } else { | |
1323 | backend_dev->ib_dev = *dev_list; | |
1324 | } | |
1325 | ||
4d71b38a | 1326 | rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name); |
ef6d4ccd YS |
1327 | |
1328 | backend_dev->context = ibv_open_device(backend_dev->ib_dev); | |
1329 | if (!backend_dev->context) { | |
4d71b38a YS |
1330 | rdma_error_report("Failed to open IB device %s", |
1331 | ibv_get_device_name(backend_dev->ib_dev)); | |
ef6d4ccd YS |
1332 | ret = -EIO; |
1333 | goto out; | |
1334 | } | |
1335 | ||
1336 | backend_dev->channel = ibv_create_comp_channel(backend_dev->context); | |
1337 | if (!backend_dev->channel) { | |
4d71b38a | 1338 | rdma_error_report("Failed to create IB communication channel"); |
ef6d4ccd YS |
1339 | ret = -EIO; |
1340 | goto out_close_device; | |
1341 | } | |
ef6d4ccd | 1342 | |
ef6d4ccd YS |
1343 | ret = init_device_caps(backend_dev, dev_attr); |
1344 | if (ret) { | |
4d71b38a | 1345 | rdma_error_report("Failed to initialize device capabilities"); |
ef6d4ccd YS |
1346 | ret = -EIO; |
1347 | goto out_destroy_comm_channel; | |
1348 | } | |
1349 | ||
ef6d4ccd | 1350 | |
2b05705d | 1351 | ret = mad_init(backend_dev, mad_chr_be); |
605ec166 | 1352 | if (ret) { |
4d71b38a | 1353 | rdma_error_report("Failed to initialize mad"); |
605ec166 YS |
1354 | ret = -EIO; |
1355 | goto out_destroy_comm_channel; | |
1356 | } | |
1357 | ||
75152227 YS |
1358 | backend_dev->comp_thread.run = false; |
1359 | backend_dev->comp_thread.is_running = false; | |
ef6d4ccd YS |
1360 | |
1361 | ah_cache_init(); | |
1362 | ||
1363 | goto out_free_dev_list; | |
1364 | ||
1365 | out_destroy_comm_channel: | |
1366 | ibv_destroy_comp_channel(backend_dev->channel); | |
1367 | ||
1368 | out_close_device: | |
1369 | ibv_close_device(backend_dev->context); | |
1370 | ||
1371 | out_free_dev_list: | |
1372 | ibv_free_device_list(dev_list); | |
1373 | ||
1374 | out: | |
1375 | return ret; | |
1376 | } | |
1377 | ||
75152227 YS |
1378 | |
1379 | void rdma_backend_start(RdmaBackendDev *backend_dev) | |
1380 | { | |
75152227 YS |
1381 | start_comp_thread(backend_dev); |
1382 | } | |
1383 | ||
1384 | void rdma_backend_stop(RdmaBackendDev *backend_dev) | |
1385 | { | |
ff30a446 | 1386 | mad_stop(backend_dev); |
292dce62 | 1387 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
1388 | } |
1389 | ||
ef6d4ccd YS |
1390 | void rdma_backend_fini(RdmaBackendDev *backend_dev) |
1391 | { | |
605ec166 | 1392 | mad_fini(backend_dev); |
ef6d4ccd YS |
1393 | g_hash_table_destroy(ah_hash); |
1394 | ibv_destroy_comp_channel(backend_dev->channel); | |
1395 | ibv_close_device(backend_dev->context); | |
1396 | } |