2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * Work Requests exploiting Infiniband API
6 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
7 * are submitted to either RC SQ or RC RQ respectively
8 * (reliably connected send/receive queue)
9 * and become work queue entries (WQEs).
10 * While an SQ WR/WQE is pending, we track it until transmission completion.
11 * Through a send or receive completion queue (CQ) respectively,
12 * we get completion queue entries (CQEs) [aka work completions (WCs)].
13 * Since the CQ callback is called from IRQ context, we split work by using
14 * bottom halves implemented by tasklets.
16 * SMC uses this to exchange LLC (link layer control)
17 * and CDC (connection data control) messages.
19 * Copyright IBM Corp. 2016
24 #include <linux/atomic.h>
25 #include <linux/hashtable.h>
26 #include <linux/wait.h>
27 #include <rdma/ib_verbs.h>
28 #include <asm/div64.h>
33 #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
35 #define SMC_WR_RX_HASH_BITS 4
36 static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
37 static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
39 struct smc_wr_tx_pend { /* control data for a pending send request */
40 u64 wr_id; /* work request id sent */
41 smc_wr_tx_handler handler;
42 enum ib_wc_status wc_status; /* CQE status */
43 struct smc_link *link;
45 struct smc_wr_tx_pend_priv priv;
48 /******************************** send queue *********************************/
50 /*------------------------------- completion --------------------------------*/
52 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
56 for (i = 0; i < link->wr_tx_cnt; i++) {
57 if (link->wr_tx_pends[i].wr_id == wr_id)
60 return link->wr_tx_cnt;
63 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
65 struct smc_wr_tx_pend pnd_snd;
66 struct smc_link *link;
70 link = wc->qp->qp_context;
72 if (wc->opcode == IB_WC_REG_MR) {
74 link->wr_reg_state = FAILED;
76 link->wr_reg_state = CONFIRMED;
77 wake_up(&link->wr_reg_wait);
81 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
82 if (pnd_snd_idx == link->wr_tx_cnt)
84 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
85 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
86 /* clear the full struct smc_wr_tx_pend including .priv */
87 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
88 sizeof(link->wr_tx_pends[pnd_snd_idx]));
89 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
90 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
91 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
94 struct smc_link_group *lgr;
96 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
97 /* clear full struct smc_wr_tx_pend including .priv */
98 memset(&link->wr_tx_pends[i], 0,
99 sizeof(link->wr_tx_pends[i]));
100 memset(&link->wr_tx_bufs[i], 0,
101 sizeof(link->wr_tx_bufs[i]));
102 clear_bit(i, link->wr_tx_mask);
104 /* terminate connections of this link group abnormally */
105 lgr = container_of(link, struct smc_link_group,
106 lnk[SMC_SINGLE_LINK]);
107 smc_lgr_terminate(lgr);
110 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
111 wake_up(&link->wr_tx_wait);
114 static void smc_wr_tx_tasklet_fn(unsigned long data)
116 struct smc_ib_device *dev = (struct smc_ib_device *)data;
117 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
124 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
126 ib_req_notify_cq(dev->roce_cq_send,
128 IB_CQ_REPORT_MISSED_EVENTS);
132 for (i = 0; i < rc; i++)
133 smc_wr_tx_process_cqe(&wc[i]);
139 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
141 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
143 tasklet_schedule(&dev->send_tasklet);
146 /*---------------------------- request submission ---------------------------*/
148 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
150 *idx = link->wr_tx_cnt;
151 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
152 if (!test_and_set_bit(*idx, link->wr_tx_mask))
155 *idx = link->wr_tx_cnt;
160 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
161 * and sets info for pending transmit tracking
162 * @link: Pointer to smc_link used to later send the message.
163 * @handler: Send completion handler function pointer.
164 * @wr_buf: Out value returns pointer to message buffer.
165 * @wr_pend_priv: Out value returns pointer serving as handler context.
167 * Return: 0 on success, or -errno on error.
169 int smc_wr_tx_get_free_slot(struct smc_link *link,
170 smc_wr_tx_handler handler,
171 struct smc_wr_buf **wr_buf,
172 struct smc_wr_tx_pend_priv **wr_pend_priv)
174 struct smc_wr_tx_pend *wr_pend;
175 struct ib_send_wr *wr_ib;
181 *wr_pend_priv = NULL;
183 rc = smc_wr_tx_get_free_slot_index(link, &idx);
187 rc = wait_event_interruptible_timeout(
189 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
190 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
192 /* timeout - terminate connections */
193 struct smc_link_group *lgr;
195 lgr = container_of(link, struct smc_link_group,
196 lnk[SMC_SINGLE_LINK]);
197 smc_lgr_terminate(lgr);
200 if (rc == -ERESTARTSYS)
202 if (idx == link->wr_tx_cnt)
205 wr_id = smc_wr_tx_get_next_wr_id(link);
206 wr_pend = &link->wr_tx_pends[idx];
207 wr_pend->wr_id = wr_id;
208 wr_pend->handler = handler;
209 wr_pend->link = link;
211 wr_ib = &link->wr_tx_ibs[idx];
212 wr_ib->wr_id = wr_id;
213 *wr_buf = &link->wr_tx_bufs[idx];
214 *wr_pend_priv = &wr_pend->priv;
218 int smc_wr_tx_put_slot(struct smc_link *link,
219 struct smc_wr_tx_pend_priv *wr_pend_priv)
221 struct smc_wr_tx_pend *pend;
223 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
224 if (pend->idx < link->wr_tx_cnt) {
225 /* clear the full struct smc_wr_tx_pend including .priv */
226 memset(&link->wr_tx_pends[pend->idx], 0,
227 sizeof(link->wr_tx_pends[pend->idx]));
228 memset(&link->wr_tx_bufs[pend->idx], 0,
229 sizeof(link->wr_tx_bufs[pend->idx]));
230 test_and_clear_bit(pend->idx, link->wr_tx_mask);
237 /* Send prepared WR slot via ib_post_send.
238 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
240 int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
242 struct ib_send_wr *failed_wr = NULL;
243 struct smc_wr_tx_pend *pend;
246 ib_req_notify_cq(link->smcibdev->roce_cq_send,
247 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
248 pend = container_of(priv, struct smc_wr_tx_pend, priv);
249 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
252 smc_wr_tx_put_slot(link, priv);
256 /* Register a memory region and wait for result. */
257 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
259 struct ib_send_wr *failed_wr = NULL;
262 ib_req_notify_cq(link->smcibdev->roce_cq_send,
263 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
264 link->wr_reg_state = POSTED;
265 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
266 link->wr_reg.mr = mr;
267 link->wr_reg.key = mr->rkey;
268 failed_wr = &link->wr_reg.wr;
269 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, &failed_wr);
270 WARN_ON(failed_wr != &link->wr_reg.wr);
274 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
275 (link->wr_reg_state != POSTED),
276 SMC_WR_REG_MR_WAIT_TIME);
278 /* timeout - terminate connections */
279 struct smc_link_group *lgr;
281 lgr = container_of(link, struct smc_link_group,
282 lnk[SMC_SINGLE_LINK]);
283 smc_lgr_terminate(lgr);
286 if (rc == -ERESTARTSYS)
288 switch (link->wr_reg_state) {
302 void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
303 smc_wr_tx_filter filter,
304 smc_wr_tx_dismisser dismisser,
307 struct smc_wr_tx_pend_priv *tx_pend;
308 struct smc_wr_rx_hdr *wr_rx;
311 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
312 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
313 if (wr_rx->type != wr_rx_hdr_type)
315 tx_pend = &link->wr_tx_pends[i].priv;
316 if (filter(tx_pend, data))
321 bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
322 smc_wr_tx_filter filter, unsigned long data)
324 struct smc_wr_tx_pend_priv *tx_pend;
325 struct smc_wr_rx_hdr *wr_rx;
328 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
329 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
330 if (wr_rx->type != wr_rx_hdr_type)
332 tx_pend = &link->wr_tx_pends[i].priv;
333 if (filter(tx_pend, data))
339 /****************************** receive queue ********************************/
341 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
343 struct smc_wr_rx_handler *h_iter;
346 spin_lock(&smc_wr_rx_hash_lock);
347 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
348 if (h_iter->type == handler->type) {
353 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
355 spin_unlock(&smc_wr_rx_hash_lock);
359 /* Demultiplex a received work request based on the message type to its handler.
360 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
361 * and not being modified any more afterwards so we don't need to lock it.
363 static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
365 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
366 struct smc_wr_rx_handler *handler;
367 struct smc_wr_rx_hdr *wr_rx;
371 if (wc->byte_len < sizeof(*wr_rx))
372 return; /* short message */
373 temp_wr_id = wc->wr_id;
374 index = do_div(temp_wr_id, link->wr_rx_cnt);
375 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
376 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
377 if (handler->type == wr_rx->type)
378 handler->handler(wc, wr_rx);
382 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
384 struct smc_link *link;
387 for (i = 0; i < num; i++) {
388 link = wc[i].qp->qp_context;
389 if (wc[i].status == IB_WC_SUCCESS) {
390 smc_wr_rx_demultiplex(&wc[i]);
391 smc_wr_rx_post(link); /* refill WR RX */
393 struct smc_link_group *lgr;
395 /* handle status errors */
396 switch (wc[i].status) {
397 case IB_WC_RETRY_EXC_ERR:
398 case IB_WC_RNR_RETRY_EXC_ERR:
399 case IB_WC_WR_FLUSH_ERR:
400 /* terminate connections of this link group
403 lgr = container_of(link, struct smc_link_group,
404 lnk[SMC_SINGLE_LINK]);
405 smc_lgr_terminate(lgr);
408 smc_wr_rx_post(link); /* refill WR RX */
415 static void smc_wr_rx_tasklet_fn(unsigned long data)
417 struct smc_ib_device *dev = (struct smc_ib_device *)data;
418 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
425 memset(&wc, 0, sizeof(wc));
426 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
428 ib_req_notify_cq(dev->roce_cq_recv,
430 | IB_CQ_REPORT_MISSED_EVENTS);
434 smc_wr_rx_process_cqes(&wc[0], rc);
440 void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
442 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
444 tasklet_schedule(&dev->recv_tasklet);
447 int smc_wr_rx_post_init(struct smc_link *link)
452 for (i = 0; i < link->wr_rx_cnt; i++)
453 rc = smc_wr_rx_post(link);
457 /***************************** init, exit, misc ******************************/
459 void smc_wr_remember_qp_attr(struct smc_link *lnk)
461 struct ib_qp_attr *attr = &lnk->qp_attr;
462 struct ib_qp_init_attr init_attr;
464 memset(attr, 0, sizeof(*attr));
465 memset(&init_attr, 0, sizeof(init_attr));
466 ib_query_qp(lnk->roce_qp, attr,
479 IB_QP_MIN_RNR_TIMER |
481 IB_QP_PATH_MIG_STATE |
486 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
487 lnk->qp_attr.cap.max_send_wr);
488 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
489 lnk->qp_attr.cap.max_recv_wr);
492 static void smc_wr_init_sge(struct smc_link *lnk)
496 for (i = 0; i < lnk->wr_tx_cnt; i++) {
497 lnk->wr_tx_sges[i].addr =
498 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
499 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
500 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
501 lnk->wr_tx_ibs[i].next = NULL;
502 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
503 lnk->wr_tx_ibs[i].num_sge = 1;
504 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
505 lnk->wr_tx_ibs[i].send_flags =
506 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
508 for (i = 0; i < lnk->wr_rx_cnt; i++) {
509 lnk->wr_rx_sges[i].addr =
510 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
511 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
512 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
513 lnk->wr_rx_ibs[i].next = NULL;
514 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
515 lnk->wr_rx_ibs[i].num_sge = 1;
517 lnk->wr_reg.wr.next = NULL;
518 lnk->wr_reg.wr.num_sge = 0;
519 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
520 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
521 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
524 void smc_wr_free_link(struct smc_link *lnk)
526 struct ib_device *ibdev;
528 memset(lnk->wr_tx_mask, 0,
529 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
533 ibdev = lnk->smcibdev->ibdev;
535 if (lnk->wr_rx_dma_addr) {
536 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
537 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
539 lnk->wr_rx_dma_addr = 0;
541 if (lnk->wr_tx_dma_addr) {
542 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
543 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
545 lnk->wr_tx_dma_addr = 0;
549 void smc_wr_free_link_mem(struct smc_link *lnk)
551 kfree(lnk->wr_tx_pends);
552 lnk->wr_tx_pends = NULL;
553 kfree(lnk->wr_tx_mask);
554 lnk->wr_tx_mask = NULL;
555 kfree(lnk->wr_tx_sges);
556 lnk->wr_tx_sges = NULL;
557 kfree(lnk->wr_rx_sges);
558 lnk->wr_rx_sges = NULL;
559 kfree(lnk->wr_rx_ibs);
560 lnk->wr_rx_ibs = NULL;
561 kfree(lnk->wr_tx_ibs);
562 lnk->wr_tx_ibs = NULL;
563 kfree(lnk->wr_tx_bufs);
564 lnk->wr_tx_bufs = NULL;
565 kfree(lnk->wr_rx_bufs);
566 lnk->wr_rx_bufs = NULL;
569 int smc_wr_alloc_link_mem(struct smc_link *link)
571 /* allocate link related memory */
572 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
573 if (!link->wr_tx_bufs)
575 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
577 if (!link->wr_rx_bufs)
578 goto no_mem_wr_tx_bufs;
579 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
581 if (!link->wr_tx_ibs)
582 goto no_mem_wr_rx_bufs;
583 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
584 sizeof(link->wr_rx_ibs[0]),
586 if (!link->wr_rx_ibs)
587 goto no_mem_wr_tx_ibs;
588 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
590 if (!link->wr_tx_sges)
591 goto no_mem_wr_rx_ibs;
592 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
593 sizeof(link->wr_rx_sges[0]),
595 if (!link->wr_rx_sges)
596 goto no_mem_wr_tx_sges;
597 link->wr_tx_mask = kzalloc(
598 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*link->wr_tx_mask),
600 if (!link->wr_tx_mask)
601 goto no_mem_wr_rx_sges;
602 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
603 sizeof(link->wr_tx_pends[0]),
605 if (!link->wr_tx_pends)
606 goto no_mem_wr_tx_mask;
610 kfree(link->wr_tx_mask);
612 kfree(link->wr_rx_sges);
614 kfree(link->wr_tx_sges);
616 kfree(link->wr_rx_ibs);
618 kfree(link->wr_tx_ibs);
620 kfree(link->wr_rx_bufs);
622 kfree(link->wr_tx_bufs);
627 void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
629 tasklet_kill(&smcibdev->recv_tasklet);
630 tasklet_kill(&smcibdev->send_tasklet);
633 void smc_wr_add_dev(struct smc_ib_device *smcibdev)
635 tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
636 (unsigned long)smcibdev);
637 tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
638 (unsigned long)smcibdev);
641 int smc_wr_create_link(struct smc_link *lnk)
643 struct ib_device *ibdev = lnk->smcibdev->ibdev;
646 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
648 lnk->wr_rx_dma_addr = ib_dma_map_single(
649 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
651 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
652 lnk->wr_rx_dma_addr = 0;
656 lnk->wr_tx_dma_addr = ib_dma_map_single(
657 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
659 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
663 smc_wr_init_sge(lnk);
664 memset(lnk->wr_tx_mask, 0,
665 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
666 init_waitqueue_head(&lnk->wr_tx_wait);
667 init_waitqueue_head(&lnk->wr_reg_wait);
671 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
672 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
674 lnk->wr_rx_dma_addr = 0;