1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2016 - 2018 Intel Corporation.
7 #include "verbs_txreq.h"
13 void hfi1_put_txreq(struct verbs_txreq *tx)
15 struct hfi1_ibdev *dev;
19 struct hfi1_qp_priv *priv;
22 dev = to_idev(qp->ibqp.device);
27 sdma_txclean(dd_from_dev(dev), &tx->txreq);
29 /* Free verbs_txreq and return to slab cache */
30 kmem_cache_free(dev->verbs_txreq_cache, tx);
33 seq = read_seqbegin(&dev->txwait_lock);
34 if (!list_empty(&dev->txwait)) {
37 write_seqlock_irqsave(&dev->txwait_lock, flags);
38 wait = list_first_entry(&dev->txwait, struct iowait,
40 qp = iowait_to_qp(wait);
42 list_del_init(&priv->s_iowait.list);
43 /* refcount held until actual wake up */
44 write_sequnlock_irqrestore(&dev->txwait_lock, flags);
45 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
48 } while (read_seqretry(&dev->txwait_lock, seq));
51 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
53 __must_hold(&qp->s_lock)
55 struct verbs_txreq *tx = NULL;
57 write_seqlock(&dev->txwait_lock);
58 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
59 struct hfi1_qp_priv *priv;
61 tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
65 if (list_empty(&priv->s_iowait.list)) {
67 qp->s_flags |= RVT_S_WAIT_TX;
68 list_add_tail(&priv->s_iowait.list, &dev->txwait);
69 priv->s_iowait.lock = &dev->txwait_lock;
70 trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
73 qp->s_flags &= ~RVT_S_BUSY;
76 write_sequnlock(&dev->txwait_lock);
80 int verbs_txreq_init(struct hfi1_ibdev *dev)
83 struct hfi1_devdata *dd = dd_from_dev(dev);
85 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
86 dev->verbs_txreq_cache = kmem_cache_create(buf,
87 sizeof(struct verbs_txreq),
88 0, SLAB_HWCACHE_ALIGN,
90 if (!dev->verbs_txreq_cache)
95 void verbs_txreq_exit(struct hfi1_ibdev *dev)
97 kmem_cache_destroy(dev->verbs_txreq_cache);
98 dev->verbs_txreq_cache = NULL;