1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
7 #include <linux/vmalloc.h>
8 #include <linux/hash.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11 #include <rdma/rdma_vt.h>
12 #include <rdma/rdmavt_qp.h>
13 #include <rdma/ib_verbs.h>
18 #include "verbs_txreq.h"
20 unsigned int hfi1_qp_table_size = 256;
21 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
22 MODULE_PARM_DESC(qp_table_size, "QP table size");
24 static void flush_tx_list(struct rvt_qp *qp);
25 static int iowait_sleep(
26 struct sdma_engine *sde,
27 struct iowait_work *wait,
28 struct sdma_txreq *stx,
31 static void iowait_wakeup(struct iowait *wait, int reason);
32 static void iowait_sdma_drained(struct iowait *wait);
33 static void qp_pio_drain(struct rvt_qp *qp);
35 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
36 [IB_WR_RDMA_WRITE] = {
37 .length = sizeof(struct ib_rdma_wr),
38 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
42 .length = sizeof(struct ib_rdma_wr),
43 .qpt_support = BIT(IB_QPT_RC),
44 .flags = RVT_OPERATION_ATOMIC,
47 [IB_WR_ATOMIC_CMP_AND_SWP] = {
48 .length = sizeof(struct ib_atomic_wr),
49 .qpt_support = BIT(IB_QPT_RC),
50 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
53 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
54 .length = sizeof(struct ib_atomic_wr),
55 .qpt_support = BIT(IB_QPT_RC),
56 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
59 [IB_WR_RDMA_WRITE_WITH_IMM] = {
60 .length = sizeof(struct ib_rdma_wr),
61 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
65 .length = sizeof(struct ib_send_wr),
66 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
67 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
70 [IB_WR_SEND_WITH_IMM] = {
71 .length = sizeof(struct ib_send_wr),
72 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
73 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
77 .length = sizeof(struct ib_reg_wr),
78 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
79 .flags = RVT_OPERATION_LOCAL,
83 .length = sizeof(struct ib_send_wr),
84 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
85 .flags = RVT_OPERATION_LOCAL,
88 [IB_WR_SEND_WITH_INV] = {
89 .length = sizeof(struct ib_send_wr),
90 .qpt_support = BIT(IB_QPT_RC),
94 .length = sizeof(struct ib_atomic_wr),
95 .qpt_support = BIT(IB_QPT_RC),
96 .flags = RVT_OPERATION_USE_RESERVE,
99 [IB_WR_TID_RDMA_WRITE] = {
100 .length = sizeof(struct ib_rdma_wr),
101 .qpt_support = BIT(IB_QPT_RC),
102 .flags = RVT_OPERATION_IGN_RNR_CNT,
107 static void flush_list_head(struct list_head *l)
109 while (!list_empty(l)) {
110 struct sdma_txreq *tx;
112 tx = list_first_entry(
116 list_del_init(&tx->list);
118 container_of(tx, struct verbs_txreq, txreq));
122 static void flush_tx_list(struct rvt_qp *qp)
124 struct hfi1_qp_priv *priv = qp->priv;
126 flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
127 flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
130 static void flush_iowait(struct rvt_qp *qp)
132 struct hfi1_qp_priv *priv = qp->priv;
134 seqlock_t *lock = priv->s_iowait.lock;
138 write_seqlock_irqsave(lock, flags);
139 if (!list_empty(&priv->s_iowait.list)) {
140 list_del_init(&priv->s_iowait.list);
141 priv->s_iowait.lock = NULL;
144 write_sequnlock_irqrestore(lock, flags);
148 * This function is what we would push to the core layer if we wanted to be a
149 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
150 * to blindly pass the MTU enum value from the PathRecord to us.
152 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
154 /* Constraining 10KB packets to 8KB packets */
155 if (mtu == (enum ib_mtu)OPA_MTU_10240)
156 mtu = (enum ib_mtu)OPA_MTU_8192;
157 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
161 int attr_mask, struct ib_udata *udata)
163 struct ib_qp *ibqp = &qp->ibqp;
164 struct hfi1_ibdev *dev = to_idev(ibqp->device);
165 struct hfi1_devdata *dd = dd_from_dev(dev);
168 if (attr_mask & IB_QP_AV) {
169 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
173 if (!qp_to_sdma_engine(qp, sc) &&
174 dd->flags & HFI1_HAS_SEND_DMA)
177 if (!qp_to_send_context(qp, sc))
181 if (attr_mask & IB_QP_ALT_PATH) {
182 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
186 if (!qp_to_sdma_engine(qp, sc) &&
187 dd->flags & HFI1_HAS_SEND_DMA)
190 if (!qp_to_send_context(qp, sc))
198 * qp_set_16b - Set the hdr_type based on whether the slid or the
199 * dlid in the connection is extended. Only applicable for RC and UC
200 * QPs. UD QPs determine this on the fly from the ah in the wqe
202 static inline void qp_set_16b(struct rvt_qp *qp)
204 struct hfi1_pportdata *ppd;
205 struct hfi1_ibport *ibp;
206 struct hfi1_qp_priv *priv = qp->priv;
208 /* Update ah_attr to account for extended LIDs */
209 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
211 /* Create 32 bit LIDs */
212 hfi1_make_opa_lid(&qp->remote_ah_attr);
214 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
217 ibp = to_iport(qp->ibqp.device, qp->port_num);
218 ppd = ppd_from_ibp(ibp);
219 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
222 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
223 int attr_mask, struct ib_udata *udata)
225 struct ib_qp *ibqp = &qp->ibqp;
226 struct hfi1_qp_priv *priv = qp->priv;
228 if (attr_mask & IB_QP_AV) {
229 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
230 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
231 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
235 if (attr_mask & IB_QP_PATH_MIG_STATE &&
236 attr->path_mig_state == IB_MIG_MIGRATED &&
237 qp->s_mig_state == IB_MIG_ARMED) {
238 qp->s_flags |= HFI1_S_AHG_CLEAR;
239 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
240 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
241 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
245 opfn_qp_init(qp, attr, attr_mask);
249 * hfi1_setup_wqe - set up the wqe
251 * @wqe: The built wqe
252 * @call_send: Determine if the send should be posted or scheduled.
254 * Perform setup of the wqe. This is called
255 * prior to inserting the wqe into the ring but after
256 * the wqe has been setup by RDMAVT. This function
257 * allows the driver the opportunity to perform
258 * validation and additional setup of the wqe.
260 * Returns 0 on success, -EINVAL on failure
263 int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
265 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
267 struct hfi1_pportdata *ppd;
268 struct hfi1_devdata *dd;
270 switch (qp->ibqp.qp_type) {
272 hfi1_setup_tid_rdma_wqe(qp, wqe);
275 if (wqe->length > 0x80000000U)
277 if (wqe->length > qp->pmtu)
282 * SM packets should exclusively use VL15 and their SL is
283 * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
284 * is created, SL is 0 in most cases and as a result some
285 * fields (vl and pmtu) in ah may not be set correctly,
286 * depending on the SL2SC and SC2VL tables at the time.
288 ppd = ppd_from_ibp(ibp);
289 dd = dd_from_ppd(ppd);
290 if (wqe->length > dd->vld[15].mtu)
295 ah = rvt_get_swqe_ah(wqe);
296 if (wqe->length > (1 << ah->log_pmtu))
298 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
306 * System latency between send and schedule is large enough that
307 * forcing call_send to true for piothreshold packets is necessary.
309 if (wqe->length <= piothreshold)
315 * _hfi1_schedule_send - schedule progress
318 * This schedules qp progress w/o regard to the s_flags.
320 * It is only used in the post send, which doesn't hold
323 bool _hfi1_schedule_send(struct rvt_qp *qp)
325 struct hfi1_qp_priv *priv = qp->priv;
326 struct hfi1_ibport *ibp =
327 to_iport(qp->ibqp.device, qp->port_num);
328 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
329 struct hfi1_devdata *dd = ppd->dd;
331 if (dd->flags & HFI1_SHUTDOWN)
334 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
337 cpumask_first(cpumask_of_node(dd->node)));
340 static void qp_pio_drain(struct rvt_qp *qp)
342 struct hfi1_qp_priv *priv = qp->priv;
344 if (!priv->s_sendcontext)
346 while (iowait_pio_pending(&priv->s_iowait)) {
347 write_seqlock_irq(&priv->s_sendcontext->waitlock);
348 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
349 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
350 iowait_pio_drain(&priv->s_iowait);
351 write_seqlock_irq(&priv->s_sendcontext->waitlock);
352 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
353 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
358 * hfi1_schedule_send - schedule progress
361 * This schedules qp progress and caller should hold
363 * @return true if the first leg is scheduled;
364 * false if the first leg is not scheduled.
366 bool hfi1_schedule_send(struct rvt_qp *qp)
368 lockdep_assert_held(&qp->s_lock);
369 if (hfi1_send_ok(qp)) {
370 _hfi1_schedule_send(qp);
373 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
374 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
379 static void hfi1_qp_schedule(struct rvt_qp *qp)
381 struct hfi1_qp_priv *priv = qp->priv;
384 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
385 ret = hfi1_schedule_send(qp);
387 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
389 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
390 ret = hfi1_schedule_tid_send(qp);
392 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
396 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
400 spin_lock_irqsave(&qp->s_lock, flags);
401 if (qp->s_flags & flag) {
402 qp->s_flags &= ~flag;
403 trace_hfi1_qpwakeup(qp, flag);
404 hfi1_qp_schedule(qp);
406 spin_unlock_irqrestore(&qp->s_lock, flags);
407 /* Notify hfi1_destroy_qp() if it is waiting. */
411 void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
413 struct hfi1_qp_priv *priv = qp->priv;
415 if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
416 qp->s_flags &= ~RVT_S_BUSY;
418 * If we are sending a first-leg packet from the second leg,
419 * we need to clear the busy flag from priv->s_flags to
420 * avoid a race condition when the qp wakes up before
421 * the call to hfi1_verbs_send() returns to the second
422 * leg. In that case, the second leg will terminate without
423 * being re-scheduled, resulting in failure to send TID RDMA
424 * WRITE DATA and TID RDMA ACK packets.
426 if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
427 priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
429 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
432 priv->s_flags &= ~RVT_S_BUSY;
436 static int iowait_sleep(
437 struct sdma_engine *sde,
438 struct iowait_work *wait,
439 struct sdma_txreq *stx,
443 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
445 struct hfi1_qp_priv *priv;
452 spin_lock_irqsave(&qp->s_lock, flags);
453 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
455 * If we couldn't queue the DMA request, save the info
456 * and try again later rather than destroying the
457 * buffer and undoing the side effects of the copy.
459 /* Make a common routine? */
460 list_add_tail(&stx->list, &wait->tx_head);
461 write_seqlock(&sde->waitlock);
462 if (sdma_progress(sde, seq, stx))
464 if (list_empty(&priv->s_iowait.list)) {
465 struct hfi1_ibport *ibp =
466 to_iport(qp->ibqp.device, qp->port_num);
468 ibp->rvp.n_dmawait++;
469 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
470 iowait_get_priority(&priv->s_iowait);
471 iowait_queue(pkts_sent, &priv->s_iowait,
473 priv->s_iowait.lock = &sde->waitlock;
474 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
477 write_sequnlock(&sde->waitlock);
478 hfi1_qp_unbusy(qp, wait);
479 spin_unlock_irqrestore(&qp->s_lock, flags);
482 spin_unlock_irqrestore(&qp->s_lock, flags);
487 write_sequnlock(&sde->waitlock);
488 spin_unlock_irqrestore(&qp->s_lock, flags);
489 list_del_init(&stx->list);
493 static void iowait_wakeup(struct iowait *wait, int reason)
495 struct rvt_qp *qp = iowait_to_qp(wait);
497 WARN_ON(reason != SDMA_AVAIL_REASON);
498 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
501 static void iowait_sdma_drained(struct iowait *wait)
503 struct rvt_qp *qp = iowait_to_qp(wait);
507 * This happens when the send engine notes
508 * a QP in the error state and cannot
509 * do the flush work until that QP's
510 * sdma work has finished.
512 spin_lock_irqsave(&qp->s_lock, flags);
513 if (qp->s_flags & RVT_S_WAIT_DMA) {
514 qp->s_flags &= ~RVT_S_WAIT_DMA;
515 hfi1_schedule_send(qp);
517 spin_unlock_irqrestore(&qp->s_lock, flags);
520 static void hfi1_init_priority(struct iowait *w)
522 struct rvt_qp *qp = iowait_to_qp(w);
523 struct hfi1_qp_priv *priv = qp->priv;
525 if (qp->s_flags & RVT_S_ACK_PENDING)
527 if (priv->s_flags & RVT_S_ACK_PENDING)
532 * qp_to_sdma_engine - map a qp to a send engine
537 * A send engine for the qp or NULL for SMI type qp.
539 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
541 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
542 struct sdma_engine *sde;
544 if (!(dd->flags & HFI1_HAS_SEND_DMA))
546 switch (qp->ibqp.qp_type) {
552 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
557 * qp_to_send_context - map a qp to a send context
562 * A send context for the qp
564 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
566 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
568 switch (qp->ibqp.qp_type) {
570 /* SMA packets to VL15 */
571 return dd->vld[15].sc;
576 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
580 static const char * const qp_type_str[] = {
581 "SMI", "GSI", "RC", "UC", "UD",
584 static int qp_idle(struct rvt_qp *qp)
587 qp->s_last == qp->s_acked &&
588 qp->s_acked == qp->s_cur &&
589 qp->s_cur == qp->s_tail &&
590 qp->s_tail == qp->s_head;
594 * qp_iter_print - print the qp information to seq_file
595 * @s: the seq_file to emit the qp information on
596 * @iter: the iterator for the qp hash list
598 void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
600 struct rvt_swqe *wqe;
601 struct rvt_qp *qp = iter->qp;
602 struct hfi1_qp_priv *priv = qp->priv;
603 struct sdma_engine *sde;
604 struct send_context *send_context;
605 struct rvt_ack_entry *e = NULL;
606 struct rvt_srq *srq = qp->ibqp.srq ?
607 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
609 sde = qp_to_sdma_engine(qp, priv->s_sc);
610 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
611 send_context = qp_to_send_context(qp, priv->s_sc);
613 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
615 "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
617 qp_idle(qp) ? "I" : "B",
619 atomic_read(&qp->refcount),
620 qp_type_str[qp->ibqp.qp_type],
622 wqe ? wqe->wr.opcode : 0,
624 iowait_sdma_pending(&priv->s_iowait),
625 iowait_pio_pending(&priv->s_iowait),
626 !list_empty(&priv->s_iowait.list),
631 qp->s_psn, qp->s_next_psn,
632 qp->s_sending_psn, qp->s_sending_hpsn,
634 qp->s_last, qp->s_acked, qp->s_cur,
635 qp->s_tail, qp->s_head, qp->s_size,
637 /* ack_queue ring pointers, size */
638 qp->s_tail_ack_queue, qp->r_head_ack_queue,
639 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
642 rdma_ah_get_dlid(&qp->remote_ah_attr),
643 rdma_ah_get_sl(&qp->remote_ah_attr),
650 sde ? sde->this_idx : 0,
652 send_context ? send_context->sw_index : 0,
653 ib_cq_head(qp->ibqp.send_cq),
654 ib_cq_tail(qp->ibqp.send_cq),
658 /* ack queue information */
664 srq ? srq->rq.size : qp->r_rq.size
668 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
670 struct hfi1_qp_priv *priv;
672 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
674 return ERR_PTR(-ENOMEM);
678 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
682 return ERR_PTR(-ENOMEM);
693 /* Init to a value to start the running average correctly */
694 priv->s_running_pkt_size = piothreshold / 2;
698 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
700 struct hfi1_qp_priv *priv = qp->priv;
702 hfi1_qp_priv_tid_free(rdi, qp);
707 unsigned free_all_qps(struct rvt_dev_info *rdi)
709 struct hfi1_ibdev *verbs_dev = container_of(rdi,
712 struct hfi1_devdata *dd = container_of(verbs_dev,
716 unsigned qp_inuse = 0;
718 for (n = 0; n < dd->num_pports; n++) {
719 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
722 if (rcu_dereference(ibp->rvp.qp[0]))
724 if (rcu_dereference(ibp->rvp.qp[1]))
732 void flush_qp_waiters(struct rvt_qp *qp)
734 lockdep_assert_held(&qp->s_lock);
736 hfi1_tid_rdma_flush_wait(qp);
739 void stop_send_queue(struct rvt_qp *qp)
741 struct hfi1_qp_priv *priv = qp->priv;
743 iowait_cancel_work(&priv->s_iowait);
744 if (cancel_work_sync(&priv->tid_rdma.trigger_work))
748 void quiesce_qp(struct rvt_qp *qp)
750 struct hfi1_qp_priv *priv = qp->priv;
752 hfi1_del_tid_reap_timer(qp);
753 hfi1_del_tid_retry_timer(qp);
754 iowait_sdma_drain(&priv->s_iowait);
759 void notify_qp_reset(struct rvt_qp *qp)
761 hfi1_qp_kern_exp_rcv_clear_all(qp);
765 /* Clear any OPFN state */
766 if (qp->ibqp.qp_type == IB_QPT_RC)
771 * Switch to alternate path.
772 * The QP s_lock should be held and interrupts disabled.
774 void hfi1_migrate_qp(struct rvt_qp *qp)
776 struct hfi1_qp_priv *priv = qp->priv;
779 qp->s_mig_state = IB_MIG_MIGRATED;
780 qp->remote_ah_attr = qp->alt_ah_attr;
781 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
782 qp->s_pkey_index = qp->s_alt_pkey_index;
783 qp->s_flags |= HFI1_S_AHG_CLEAR;
784 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
785 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
788 ev.device = qp->ibqp.device;
789 ev.element.qp = &qp->ibqp;
790 ev.event = IB_EVENT_PATH_MIG;
791 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
794 int mtu_to_path_mtu(u32 mtu)
796 return mtu_to_enum(mtu, OPA_MTU_8192);
799 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
802 struct hfi1_ibdev *verbs_dev = container_of(rdi,
805 struct hfi1_devdata *dd = container_of(verbs_dev,
808 struct hfi1_ibport *ibp;
811 ibp = &dd->pport[qp->port_num - 1].ibport_data;
812 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
813 vl = sc_to_vlt(dd, sc);
815 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
816 if (vl < PER_VL_SEND_CONTEXTS)
817 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
821 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
822 struct ib_qp_attr *attr)
824 int mtu, pidx = qp->port_num - 1;
825 struct hfi1_ibdev *verbs_dev = container_of(rdi,
828 struct hfi1_devdata *dd = container_of(verbs_dev,
831 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
833 return -1; /* values less than 0 are error */
835 if (mtu > dd->pport[pidx].ibmtu)
836 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
838 return attr->path_mtu;
841 void notify_error_qp(struct rvt_qp *qp)
843 struct hfi1_qp_priv *priv = qp->priv;
844 seqlock_t *lock = priv->s_iowait.lock;
848 if (!list_empty(&priv->s_iowait.list) &&
849 !(qp->s_flags & RVT_S_BUSY) &&
850 !(priv->s_flags & RVT_S_BUSY)) {
851 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
852 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
853 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
854 list_del_init(&priv->s_iowait.list);
855 priv->s_iowait.lock = NULL;
858 write_sequnlock(lock);
861 if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) {
864 rvt_put_mr(qp->s_rdma_mr);
865 qp->s_rdma_mr = NULL;
872 * hfi1_qp_iter_cb - callback for iterator
874 * @v: the sl in low bits of v
876 * This is called from the iterator callback to work
877 * on an individual qp.
879 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
883 struct hfi1_ibport *ibp =
884 to_iport(qp->ibqp.device, qp->port_num);
885 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
888 if (qp->port_num != ppd->port ||
889 (qp->ibqp.qp_type != IB_QPT_UC &&
890 qp->ibqp.qp_type != IB_QPT_RC) ||
891 rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
892 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
895 spin_lock_irq(&qp->r_lock);
896 spin_lock(&qp->s_hlock);
897 spin_lock(&qp->s_lock);
898 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
899 spin_unlock(&qp->s_lock);
900 spin_unlock(&qp->s_hlock);
901 spin_unlock_irq(&qp->r_lock);
903 ev.device = qp->ibqp.device;
904 ev.element.qp = &qp->ibqp;
905 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
906 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
911 * hfi1_error_port_qps - put a port's RC/UC qps into error state
913 * @sl: the service level.
915 * This function places all RC/UC qps with a given service level into error
916 * state. It is generally called to force upper lay apps to abandon stale qps
917 * after an sl->sc mapping change.
919 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
921 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
922 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
924 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);