2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
70 struct bnxt_qplib_cq *scq, *rcq;
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq->compl_lock);
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 if (!qp->rq_hdr_buf) {
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
246 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
252 * The valid test of the entry must be done first before
253 * reading any further.
257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
259 case NQ_BASE_TYPE_CQ_NOTIFICATION:
261 struct nq_cn *nqcne = (struct nq_cn *)nqe;
263 q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
266 if ((unsigned long)cq == q_handle) {
267 nqcne->cq_handle_low = 0;
268 nqcne->cq_handle_high = 0;
276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 1, &nq->nq_db.dbinfo.flags);
279 spin_unlock_bh(&hwq->lock);
282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
289 while (retry_cnt--) {
290 if (cnq_events == cq->cnq_events)
292 usleep_range(50, 100);
293 clean_nq(cq->nq, cq);
297 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 struct bnxt_qplib_cq *cq;
302 int budget = nq->budget;
308 spin_lock_bh(&hwq->lock);
309 /* Service the NQ until empty */
311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
316 * The valid test of the entry must be done first before
317 * reading any further.
321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
323 case NQ_BASE_TYPE_CQ_NOTIFICATION:
325 struct nq_cn *nqcne = (struct nq_cn *)nqe;
327 q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
333 cq->toggle = (le16_to_cpu(nqe->info10_type) &
334 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335 cq->dbinfo.toggle = cq->toggle;
336 bnxt_qplib_armen_db(&cq->dbinfo,
337 DBC_DBC_TYPE_CQ_ARMENA);
338 spin_lock_bh(&cq->compl_lock);
339 atomic_set(&cq->arm_state, 0);
340 if (nq->cqn_handler(nq, (cq)))
341 dev_warn(&nq->pdev->dev,
342 "cqn - type 0x%x not handled\n", type);
344 spin_unlock_bh(&cq->compl_lock);
347 case NQ_BASE_TYPE_SRQ_EVENT:
349 struct bnxt_qplib_srq *srq;
350 struct nq_srq_event *nqsrqe =
351 (struct nq_srq_event *)nqe;
353 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356 srq = (struct bnxt_qplib_srq *)q_handle;
357 bnxt_qplib_armen_db(&srq->dbinfo,
358 DBC_DBC_TYPE_SRQ_ARMENA);
359 if (nq->srqn_handler(nq,
360 (struct bnxt_qplib_srq *)q_handle,
362 dev_warn(&nq->pdev->dev,
363 "SRQ event 0x%x not handled\n",
367 case NQ_BASE_TYPE_DBQ_EVENT:
370 dev_warn(&nq->pdev->dev,
371 "nqe with type = 0x%x not handled\n", type);
375 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376 1, &nq->nq_db.dbinfo.flags);
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 spin_unlock_bh(&hwq->lock);
383 /* bnxt_re_synchronize_nq - self polling notification queue.
384 * @nq - notification queue pointer
386 * This function will start polling entries of a given notification queue
387 * for all pending entries.
388 * This function is useful to synchronize notification entries while resources
392 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
394 int budget = nq->budget;
396 nq->budget = nq->hwq.max_elements;
397 bnxt_qplib_service_nq(&nq->nq_tasklet);
401 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
403 struct bnxt_qplib_nq *nq = dev_instance;
404 struct bnxt_qplib_hwq *hwq = &nq->hwq;
407 /* Prefetch the NQ element */
408 sw_cons = HWQ_CMP(hwq->cons, hwq);
409 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
411 /* Fan out to CPU affinitized kthreads? */
412 tasklet_schedule(&nq->nq_tasklet);
417 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
422 nq->requested = false;
423 /* Mask h/w interrupt */
424 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425 /* Sync with last running IRQ handler */
426 synchronize_irq(nq->msix_vec);
427 irq_set_affinity_hint(nq->msix_vec, NULL);
428 free_irq(nq->msix_vec, nq);
433 tasklet_kill(&nq->nq_tasklet);
434 tasklet_disable(&nq->nq_tasklet);
437 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
440 destroy_workqueue(nq->cqn_wq);
444 /* Make sure the HW is stopped! */
445 bnxt_qplib_nq_stop_irq(nq, true);
447 if (nq->nq_db.reg.bar_reg) {
448 iounmap(nq->nq_db.reg.bar_reg);
449 nq->nq_db.reg.bar_reg = NULL;
452 nq->cqn_handler = NULL;
453 nq->srqn_handler = NULL;
457 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458 int msix_vector, bool need_init)
460 struct bnxt_qplib_res *res = nq->res;
466 nq->msix_vec = msix_vector;
468 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
470 tasklet_enable(&nq->nq_tasklet);
472 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473 nq_indx, pci_name(res->pdev));
476 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
480 tasklet_disable(&nq->nq_tasklet);
484 cpumask_clear(&nq->mask);
485 cpumask_set_cpu(nq_indx, &nq->mask);
486 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
488 dev_warn(&nq->pdev->dev,
489 "set affinity failed; vector: %d nq_idx: %d\n",
490 nq->msix_vec, nq_indx);
492 nq->requested = true;
493 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
498 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
500 resource_size_t reg_base;
501 struct bnxt_qplib_nq_db *nq_db;
502 struct pci_dev *pdev;
507 nq_db->dbinfo.flags = 0;
508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 if (!nq_db->reg.bar_base) {
511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
516 reg_base = nq_db->reg.bar_base + reg_offt;
517 /* Unconditionally map 8 bytes to support 57500 series */
519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 if (!nq_db->reg.bar_reg) {
521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
526 nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 nq_db->dbinfo.hwq = &nq->hwq;
528 nq_db->dbinfo.xid = nq->ring_id;
533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 int nq_idx, int msix_vector, int bar_reg_offset,
535 cqn_handler_t cqn_handler,
536 srqn_handler_t srqn_handler)
541 nq->cqn_handler = cqn_handler;
542 nq->srqn_handler = srqn_handler;
544 /* Have a task to schedule CQ notifiers in post send case */
545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
555 dev_err(&nq->pdev->dev,
556 "Failed to request irq for nq-idx %d\n", nq_idx);
562 bnxt_qplib_disable_nq(nq);
566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
568 if (nq->hwq.max_elements) {
569 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 nq->hwq.max_elements = 0;
574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
576 struct bnxt_qplib_hwq_attr hwq_attr = {};
577 struct bnxt_qplib_sg_info sginfo = {};
579 nq->pdev = res->pdev;
581 if (!nq->hwq.max_elements ||
582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
585 sginfo.pgsize = PAGE_SIZE;
586 sginfo.pgshft = PAGE_SHIFT;
588 hwq_attr.sginfo = &sginfo;
589 hwq_attr.depth = nq->hwq.max_elements;
590 hwq_attr.stride = sizeof(struct nq_base);
591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_srq *srq)
604 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 struct creq_destroy_srq_resp resp = {};
606 struct bnxt_qplib_cmdqmsg msg = {};
607 struct cmdq_destroy_srq req = {};
610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 CMDQ_BASE_OPCODE_DESTROY_SRQ,
614 /* Configure the request */
615 req.srq_cid = cpu_to_le32(srq->id);
617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
622 bnxt_qplib_free_hwq(res, &srq->hwq);
625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 struct bnxt_qplib_srq *srq)
628 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 struct bnxt_qplib_hwq_attr hwq_attr = {};
630 struct creq_create_srq_resp resp = {};
631 struct bnxt_qplib_cmdqmsg msg = {};
632 struct cmdq_create_srq req = {};
633 struct bnxt_qplib_pbl *pbl;
638 hwq_attr.sginfo = &srq->sg_info;
639 hwq_attr.depth = srq->max_wqe;
640 hwq_attr.stride = srq->wqe_size;
641 hwq_attr.type = HWQ_TYPE_QUEUE;
642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
652 srq->dbinfo.flags = 0;
653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 CMDQ_BASE_OPCODE_CREATE_SRQ,
657 /* Configure the request */
658 req.dpi = cpu_to_le32(srq->dpi->dpi);
659 req.srq_handle = cpu_to_le64((uintptr_t)srq);
661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 pbl = &srq->hwq.pbl[PBL_LVL_0];
663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 CMDQ_CREATE_SRQ_LVL_SFT;
667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 req.pd_id = cpu_to_le32(srq->pd->id);
670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
677 spin_lock_init(&srq->lock);
679 srq->last_idx = srq->hwq.max_elements - 1;
680 for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 srq->swq[idx].next_idx = idx + 1;
682 srq->swq[srq->last_idx].next_idx = -1;
684 srq->id = le32_to_cpu(resp.xid);
685 srq->dbinfo.hwq = &srq->hwq;
686 srq->dbinfo.xid = srq->id;
687 srq->dbinfo.db = srq->dpi->dbr;
688 srq->dbinfo.max_slot = 1;
689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 srq->arm_req = false;
696 bnxt_qplib_free_hwq(res, &srq->hwq);
702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 struct bnxt_qplib_srq *srq)
705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
708 count = __bnxt_qplib_get_avail(srq_hwq);
709 if (count > srq->threshold) {
710 srq->arm_req = false;
711 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
713 /* Deferred arming */
720 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
723 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724 struct creq_query_srq_resp resp = {};
725 struct bnxt_qplib_cmdqmsg msg = {};
726 struct bnxt_qplib_rcfw_sbuf sbuf;
727 struct creq_query_srq_resp_sb *sb;
728 struct cmdq_query_srq req = {};
731 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732 CMDQ_BASE_OPCODE_QUERY_SRQ,
735 /* Configure the request */
736 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738 &sbuf.dma_addr, GFP_KERNEL);
741 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742 req.srq_cid = cpu_to_le32(srq->id);
744 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
746 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
748 srq->threshold = le16_to_cpu(sb->srq_limit);
749 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
750 sbuf.sb, sbuf.dma_addr);
755 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
756 struct bnxt_qplib_swqe *wqe)
758 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
760 struct sq_sge *hw_sge;
764 spin_lock(&srq_hwq->lock);
765 if (srq->start_idx == srq->last_idx) {
766 dev_err(&srq_hwq->pdev->dev,
767 "FP: SRQ (0x%x) is full!\n", srq->id);
768 spin_unlock(&srq_hwq->lock);
771 next = srq->start_idx;
772 srq->start_idx = srq->swq[next].next_idx;
773 spin_unlock(&srq_hwq->lock);
775 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
776 memset(srqe, 0, srq->wqe_size);
777 /* Calculate wqe_size16 and data_len */
778 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
779 i < wqe->num_sge; i++, hw_sge++) {
780 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
781 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
782 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
784 srqe->wqe_type = wqe->type;
785 srqe->flags = wqe->flags;
786 srqe->wqe_size = wqe->num_sge +
787 ((offsetof(typeof(*srqe), data) + 15) >> 4);
788 srqe->wr_id[0] = cpu_to_le32((u32)next);
789 srq->swq[next].wr_id = wqe->wr_id;
791 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
793 spin_lock(&srq_hwq->lock);
794 count = __bnxt_qplib_get_avail(srq_hwq);
795 spin_unlock(&srq_hwq->lock);
797 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
798 if (srq->arm_req == true && count > srq->threshold) {
799 srq->arm_req = false;
800 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
808 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
812 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
817 que->swq_last = que->max_wqe - 1;
818 for (indx = 0; indx < que->max_wqe; indx++)
819 que->swq[indx].next_idx = indx + 1;
820 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
826 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
828 struct bnxt_qplib_hwq_attr hwq_attr = {};
829 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
830 struct creq_create_qp1_resp resp = {};
831 struct bnxt_qplib_cmdqmsg msg = {};
832 struct bnxt_qplib_q *sq = &qp->sq;
833 struct bnxt_qplib_q *rq = &qp->rq;
834 struct cmdq_create_qp1 req = {};
835 struct bnxt_qplib_pbl *pbl;
841 sq->dbinfo.flags = 0;
842 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
843 CMDQ_BASE_OPCODE_CREATE_QP1,
847 req.dpi = cpu_to_le32(qp->dpi->dpi);
848 req.qp_handle = cpu_to_le64(qp->qp_handle);
852 hwq_attr.sginfo = &sq->sg_info;
853 hwq_attr.stride = sizeof(struct sq_sge);
854 hwq_attr.depth = bnxt_qplib_get_depth(sq);
855 hwq_attr.type = HWQ_TYPE_QUEUE;
856 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
860 rc = bnxt_qplib_alloc_init_swq(sq);
864 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
865 pbl = &sq->hwq.pbl[PBL_LVL_0];
866 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
867 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
868 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
869 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
870 req.sq_pg_size_sq_lvl = pg_sz_lvl;
872 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
873 CMDQ_CREATE_QP1_SQ_SGE_SFT);
874 req.scq_cid = cpu_to_le32(qp->scq->id);
878 rq->dbinfo.flags = 0;
880 hwq_attr.sginfo = &rq->sg_info;
881 hwq_attr.stride = sizeof(struct sq_sge);
882 hwq_attr.depth = bnxt_qplib_get_depth(rq);
883 hwq_attr.type = HWQ_TYPE_QUEUE;
884 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
887 rc = bnxt_qplib_alloc_init_swq(rq);
890 req.rq_size = cpu_to_le32(rq->max_wqe);
891 pbl = &rq->hwq.pbl[PBL_LVL_0];
892 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
893 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
894 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
895 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
896 req.rq_pg_size_rq_lvl = pg_sz_lvl;
898 cpu_to_le16((rq->max_sge &
899 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
900 CMDQ_CREATE_QP1_RQ_SGE_SFT);
902 req.rcq_cid = cpu_to_le32(qp->rcq->id);
903 /* Header buffer - allow hdr_buf pass in */
904 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
909 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
910 req.qp_flags = cpu_to_le32(qp_flags);
911 req.pd_id = cpu_to_le32(qp->pd->id);
913 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
914 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
918 qp->id = le32_to_cpu(resp.xid);
919 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
920 qp->cctx = res->cctx;
921 sq->dbinfo.hwq = &sq->hwq;
922 sq->dbinfo.xid = qp->id;
923 sq->dbinfo.db = qp->dpi->dbr;
924 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
926 rq->dbinfo.hwq = &rq->hwq;
927 rq->dbinfo.xid = qp->id;
928 rq->dbinfo.db = qp->dpi->dbr;
929 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
931 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
932 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
933 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
938 bnxt_qplib_free_qp_hdr_buf(res, qp);
942 bnxt_qplib_free_hwq(res, &rq->hwq);
946 bnxt_qplib_free_hwq(res, &sq->hwq);
950 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
952 struct bnxt_qplib_hwq *hwq;
953 struct bnxt_qplib_q *sq;
959 /* First psn entry */
960 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
961 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
962 indx_pad = (fpsne & ~PAGE_MASK) / size;
963 hwq->pad_pgofft = indx_pad;
964 hwq->pad_pg = (u64 *)psn_pg;
965 hwq->pad_stride = size;
968 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
970 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
971 struct bnxt_qplib_hwq_attr hwq_attr = {};
972 struct bnxt_qplib_sg_info sginfo = {};
973 struct creq_create_qp_resp resp = {};
974 struct bnxt_qplib_cmdqmsg msg = {};
975 struct bnxt_qplib_q *sq = &qp->sq;
976 struct bnxt_qplib_q *rq = &qp->rq;
977 struct cmdq_create_qp req = {};
978 int rc, req_size, psn_sz = 0;
979 struct bnxt_qplib_hwq *xrrq;
980 struct bnxt_qplib_pbl *pbl;
987 qp->dev_cap_flags = res->dattr->dev_cap_flags;
989 sq->dbinfo.flags = 0;
990 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
991 CMDQ_BASE_OPCODE_CREATE_QP,
996 req.dpi = cpu_to_le32(qp->dpi->dpi);
997 req.qp_handle = cpu_to_le64(qp->qp_handle);
1000 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1001 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1002 sizeof(struct sq_psn_search_ext) :
1003 sizeof(struct sq_psn_search);
1005 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1006 psn_sz = sizeof(struct sq_msn_search);
1012 hwq_attr.sginfo = &sq->sg_info;
1013 hwq_attr.stride = sizeof(struct sq_sge);
1014 hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015 hwq_attr.aux_stride = psn_sz;
1016 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1017 /* Update msn tbl size */
1018 if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1019 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1020 qp->msn_tbl_sz = hwq_attr.aux_depth;
1024 hwq_attr.type = HWQ_TYPE_QUEUE;
1025 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1029 rc = bnxt_qplib_alloc_init_swq(sq);
1034 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1036 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1037 pbl = &sq->hwq.pbl[PBL_LVL_0];
1038 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1040 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1041 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1042 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1044 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1045 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1046 req.scq_cid = cpu_to_le32(qp->scq->id);
1050 rq->dbinfo.flags = 0;
1052 hwq_attr.sginfo = &rq->sg_info;
1053 hwq_attr.stride = sizeof(struct sq_sge);
1054 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1055 hwq_attr.aux_stride = 0;
1056 hwq_attr.aux_depth = 0;
1057 hwq_attr.type = HWQ_TYPE_QUEUE;
1058 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1061 rc = bnxt_qplib_alloc_init_swq(rq);
1065 req.rq_size = cpu_to_le32(rq->max_wqe);
1066 pbl = &rq->hwq.pbl[PBL_LVL_0];
1067 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1068 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1069 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1070 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1071 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1072 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1075 cpu_to_le16(((nsge &
1076 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1077 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1080 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1081 req.srq_cid = cpu_to_le32(qp->srq->id);
1083 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1086 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1088 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1089 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1090 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1091 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1092 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1094 req.qp_flags = cpu_to_le32(qp_flags);
1099 xrrq->max_elements =
1100 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1101 req_size = xrrq->max_elements *
1102 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1103 req_size &= ~(PAGE_SIZE - 1);
1104 sginfo.pgsize = req_size;
1105 sginfo.pgshft = PAGE_SHIFT;
1108 hwq_attr.sginfo = &sginfo;
1109 hwq_attr.depth = xrrq->max_elements;
1110 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1111 hwq_attr.aux_stride = 0;
1112 hwq_attr.aux_depth = 0;
1113 hwq_attr.type = HWQ_TYPE_CTX;
1114 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1117 pbl = &xrrq->pbl[PBL_LVL_0];
1118 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1121 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1122 qp->max_dest_rd_atomic);
1123 req_size = xrrq->max_elements *
1124 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1125 req_size &= ~(PAGE_SIZE - 1);
1126 sginfo.pgsize = req_size;
1127 hwq_attr.depth = xrrq->max_elements;
1128 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1129 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1133 pbl = &xrrq->pbl[PBL_LVL_0];
1134 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1136 req.pd_id = cpu_to_le32(qp->pd->id);
1138 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1140 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1144 qp->id = le32_to_cpu(resp.xid);
1145 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1146 INIT_LIST_HEAD(&qp->sq_flush);
1147 INIT_LIST_HEAD(&qp->rq_flush);
1148 qp->cctx = res->cctx;
1149 sq->dbinfo.hwq = &sq->hwq;
1150 sq->dbinfo.xid = qp->id;
1151 sq->dbinfo.db = qp->dpi->dbr;
1152 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1154 rq->dbinfo.hwq = &rq->hwq;
1155 rq->dbinfo.xid = qp->id;
1156 rq->dbinfo.db = qp->dpi->dbr;
1157 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1159 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1160 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1161 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1165 bnxt_qplib_free_hwq(res, &qp->irrq);
1167 bnxt_qplib_free_hwq(res, &qp->orrq);
1171 bnxt_qplib_free_hwq(res, &rq->hwq);
1175 bnxt_qplib_free_hwq(res, &sq->hwq);
1179 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1181 switch (qp->state) {
1182 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1183 /* INIT->RTR, configure the path_mtu to the default
1184 * 2048 if not being requested
1186 if (!(qp->modify_flags &
1187 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1189 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1191 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1194 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1195 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1196 if (qp->max_dest_rd_atomic < 1)
1197 qp->max_dest_rd_atomic = 1;
1198 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1199 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1200 if (!(qp->modify_flags &
1201 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1203 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1204 qp->ah.sgid_index = 0;
1212 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1214 switch (qp->state) {
1215 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1216 /* Bono FW requires the max_rd_atomic to be >= 1 */
1217 if (qp->max_rd_atomic < 1)
1218 qp->max_rd_atomic = 1;
1219 /* Bono FW does not allow PKEY_INDEX,
1220 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1221 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1222 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1226 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1235 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1236 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1237 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1244 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1246 switch (qp->cur_qp_state) {
1247 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1249 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1250 __modify_flags_from_init_state(qp);
1252 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1253 __modify_flags_from_rtr_state(qp);
1255 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1257 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1259 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1261 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1268 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1270 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1271 struct creq_modify_qp_resp resp = {};
1272 struct bnxt_qplib_cmdqmsg msg = {};
1273 struct cmdq_modify_qp req = {};
1278 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1279 CMDQ_BASE_OPCODE_MODIFY_QP,
1282 /* Filter out the qp_attr_mask based on the state->new transition */
1283 __filter_modify_flags(qp);
1284 bmask = qp->modify_flags;
1285 req.modify_mask = cpu_to_le32(qp->modify_flags);
1286 req.qp_cid = cpu_to_le32(qp->id);
1287 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1288 req.network_type_en_sqd_async_notify_new_state =
1289 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1290 (qp->en_sqd_async_notify ?
1291 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1293 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1295 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1296 req.access = qp->access;
1298 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1299 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1301 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1302 req.qkey = cpu_to_le32(qp->qkey);
1304 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1305 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1306 req.dgid[0] = cpu_to_le32(temp32[0]);
1307 req.dgid[1] = cpu_to_le32(temp32[1]);
1308 req.dgid[2] = cpu_to_le32(temp32[2]);
1309 req.dgid[3] = cpu_to_le32(temp32[3]);
1311 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1312 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1314 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1315 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1316 [qp->ah.sgid_index]);
1318 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1319 req.hop_limit = qp->ah.hop_limit;
1321 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1322 req.traffic_class = qp->ah.traffic_class;
1324 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1325 memcpy(req.dest_mac, qp->ah.dmac, 6);
1327 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1328 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1330 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1331 req.timeout = qp->timeout;
1333 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1334 req.retry_cnt = qp->retry_cnt;
1336 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1337 req.rnr_retry = qp->rnr_retry;
1339 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1340 req.min_rnr_timer = qp->min_rnr_timer;
1342 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1343 req.rq_psn = cpu_to_le32(qp->rq.psn);
1345 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1346 req.sq_psn = cpu_to_le32(qp->sq.psn);
1348 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1350 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1352 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1353 req.max_dest_rd_atomic =
1354 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1356 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1357 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1358 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1359 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1360 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1361 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1362 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1364 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1366 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1367 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1370 qp->cur_qp_state = qp->state;
1374 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1376 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1377 struct creq_query_qp_resp resp = {};
1378 struct bnxt_qplib_cmdqmsg msg = {};
1379 struct bnxt_qplib_rcfw_sbuf sbuf;
1380 struct creq_query_qp_resp_sb *sb;
1381 struct cmdq_query_qp req = {};
1385 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1386 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1387 &sbuf.dma_addr, GFP_KERNEL);
1392 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1393 CMDQ_BASE_OPCODE_QUERY_QP,
1396 req.qp_cid = cpu_to_le32(qp->id);
1397 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1398 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1400 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1403 /* Extract the context from the side buffer */
1404 qp->state = sb->en_sqd_async_notify_state &
1405 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1406 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1407 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1408 qp->access = sb->access;
1409 qp->pkey_index = le16_to_cpu(sb->pkey);
1410 qp->qkey = le32_to_cpu(sb->qkey);
1412 temp32[0] = le32_to_cpu(sb->dgid[0]);
1413 temp32[1] = le32_to_cpu(sb->dgid[1]);
1414 temp32[2] = le32_to_cpu(sb->dgid[2]);
1415 temp32[3] = le32_to_cpu(sb->dgid[3]);
1416 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1418 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1420 qp->ah.sgid_index = 0;
1421 for (i = 0; i < res->sgid_tbl.max; i++) {
1422 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1423 qp->ah.sgid_index = i;
1427 if (i == res->sgid_tbl.max)
1428 dev_warn(&res->pdev->dev, "SGID not found??\n");
1430 qp->ah.hop_limit = sb->hop_limit;
1431 qp->ah.traffic_class = sb->traffic_class;
1432 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1433 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1434 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1435 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1436 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1437 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1438 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1439 qp->timeout = sb->timeout;
1440 qp->retry_cnt = sb->retry_cnt;
1441 qp->rnr_retry = sb->rnr_retry;
1442 qp->min_rnr_timer = sb->min_rnr_timer;
1443 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1444 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1445 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1446 qp->max_dest_rd_atomic =
1447 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1448 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1449 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1450 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1451 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1452 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1453 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1454 memcpy(qp->smac, sb->src_mac, 6);
1455 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1457 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1458 sbuf.sb, sbuf.dma_addr);
1462 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1464 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1465 u32 peek_flags, peek_cons;
1466 struct cq_base *hw_cqe;
1469 peek_flags = cq->dbinfo.flags;
1470 peek_cons = cq_hwq->cons;
1471 for (i = 0; i < cq_hwq->max_elements; i++) {
1472 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1473 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1476 * The valid test of the entry must be done first before
1477 * reading any further.
1480 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1481 case CQ_BASE_CQE_TYPE_REQ:
1482 case CQ_BASE_CQE_TYPE_TERMINAL:
1484 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1486 if (qp == le64_to_cpu(cqe->qp_handle))
1490 case CQ_BASE_CQE_TYPE_RES_RC:
1491 case CQ_BASE_CQE_TYPE_RES_UD:
1492 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1494 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1496 if (qp == le64_to_cpu(cqe->qp_handle))
1503 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1508 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1509 struct bnxt_qplib_qp *qp)
1511 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1512 struct creq_destroy_qp_resp resp = {};
1513 struct bnxt_qplib_cmdqmsg msg = {};
1514 struct cmdq_destroy_qp req = {};
1518 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1519 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1520 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1522 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1523 CMDQ_BASE_OPCODE_DESTROY_QP,
1526 req.qp_cid = cpu_to_le32(qp->id);
1527 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1529 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1531 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1532 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1539 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1540 struct bnxt_qplib_qp *qp)
1542 bnxt_qplib_free_qp_hdr_buf(res, qp);
1543 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1546 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1549 if (qp->irrq.max_elements)
1550 bnxt_qplib_free_hwq(res, &qp->irrq);
1551 if (qp->orrq.max_elements)
1552 bnxt_qplib_free_hwq(res, &qp->orrq);
1556 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1557 struct bnxt_qplib_sge *sge)
1559 struct bnxt_qplib_q *sq = &qp->sq;
1562 memset(sge, 0, sizeof(*sge));
1564 if (qp->sq_hdr_buf) {
1565 sw_prod = sq->swq_start;
1566 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1567 sw_prod * qp->sq_hdr_buf_size);
1568 sge->lkey = 0xFFFFFFFF;
1569 sge->size = qp->sq_hdr_buf_size;
1570 return qp->sq_hdr_buf + sw_prod * sge->size;
1575 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1577 struct bnxt_qplib_q *rq = &qp->rq;
1579 return rq->swq_start;
1582 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1584 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1587 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1588 struct bnxt_qplib_sge *sge)
1590 struct bnxt_qplib_q *rq = &qp->rq;
1593 memset(sge, 0, sizeof(*sge));
1595 if (qp->rq_hdr_buf) {
1596 sw_prod = rq->swq_start;
1597 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1598 sw_prod * qp->rq_hdr_buf_size);
1599 sge->lkey = 0xFFFFFFFF;
1600 sge->size = qp->rq_hdr_buf_size;
1601 return qp->rq_hdr_buf + sw_prod * sge->size;
1606 /* Fil the MSN table into the next psn row */
1607 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1608 struct bnxt_qplib_swqe *wqe,
1609 struct bnxt_qplib_swq *swq)
1611 struct sq_msn_search *msns;
1612 u32 start_psn, next_psn;
1615 msns = (struct sq_msn_search *)swq->psn_search;
1616 msns->start_idx_next_psn_start_psn = 0;
1618 start_psn = swq->start_psn;
1619 next_psn = swq->next_psn;
1620 start_idx = swq->slot_idx;
1621 msns->start_idx_next_psn_start_psn |=
1622 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1624 qp->msn %= qp->msn_tbl_sz;
1627 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1628 struct bnxt_qplib_swqe *wqe,
1629 struct bnxt_qplib_swq *swq)
1631 struct sq_psn_search_ext *psns_ext;
1632 struct sq_psn_search *psns;
1636 if (!swq->psn_search)
1638 /* Handle MSN differently on cap flags */
1639 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1640 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1643 psns = (struct sq_psn_search *)swq->psn_search;
1644 psns = swq->psn_search;
1645 psns_ext = swq->psn_ext;
1647 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1648 SQ_PSN_SEARCH_START_PSN_MASK);
1649 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1650 SQ_PSN_SEARCH_OPCODE_MASK);
1651 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1652 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1654 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1655 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1656 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1657 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1659 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1660 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1664 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1665 struct bnxt_qplib_swqe *wqe,
1668 struct bnxt_qplib_hwq *hwq;
1669 int len, t_len, offt;
1670 bool pull_dst = true;
1671 void *il_dst = NULL;
1672 void *il_src = NULL;
1678 for (indx = 0; indx < wqe->num_sge; indx++) {
1679 len = wqe->sg_list[indx].size;
1680 il_src = (void *)wqe->sg_list[indx].addr;
1682 if (t_len > qp->max_inline_data)
1687 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1692 cplen = min_t(int, len, sizeof(struct sq_sge));
1693 cplen = min_t(int, cplen,
1694 (sizeof(struct sq_sge) - offt));
1695 memcpy(il_dst, il_src, cplen);
1701 if (t_cplen == sizeof(struct sq_sge))
1709 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1710 struct bnxt_qplib_sge *ssge,
1713 struct sq_sge *dsge;
1716 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1717 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1718 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1719 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1720 dsge->size = cpu_to_le32(ssge[indx].size);
1721 len += ssge[indx].size;
1727 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1728 struct bnxt_qplib_swqe *wqe,
1729 u16 *wqe_sz, u16 *qdf, u8 mode)
1735 nsge = wqe->num_sge;
1736 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1737 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1738 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1739 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1740 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1741 bytes += sizeof(struct sq_send_hdr);
1744 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1747 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1752 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1753 struct bnxt_qplib_swq *swq, bool hw_retx)
1755 struct bnxt_qplib_hwq *hwq;
1756 u32 pg_num, pg_indx;
1763 tail = swq->slot_idx / sq->dbinfo.max_slot;
1765 /* For HW retx use qp msn index */
1767 tail %= qp->msn_tbl_sz;
1769 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1770 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1771 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1772 swq->psn_ext = buff;
1773 swq->psn_search = buff;
1776 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1778 struct bnxt_qplib_q *sq = &qp->sq;
1780 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1783 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1784 struct bnxt_qplib_swqe *wqe)
1786 struct bnxt_qplib_nq_work *nq_work = NULL;
1787 int i, rc = 0, data_len = 0, pkt_num = 0;
1788 struct bnxt_qplib_q *sq = &qp->sq;
1789 struct bnxt_qplib_hwq *hwq;
1790 struct bnxt_qplib_swq *swq;
1791 bool sch_handler = false;
1792 u16 wqe_sz, qdf = 0;
1802 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1803 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1804 dev_err(&hwq->pdev->dev,
1805 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1811 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1812 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1813 dev_err(&hwq->pdev->dev,
1814 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1815 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1820 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1821 bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1824 swq->slot_idx = hwq->prod;
1826 swq->wr_id = wqe->wr_id;
1827 swq->type = wqe->type;
1828 swq->flags = wqe->flags;
1829 swq->start_psn = sq->psn & BTH_PSN_MASK;
1831 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1833 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1835 dev_dbg(&hwq->pdev->dev,
1836 "%s Error QP. Scheduling for poll_cq\n", __func__);
1840 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842 memset(base_hdr, 0, sizeof(struct sq_sge));
1843 memset(ext_hdr, 0, sizeof(struct sq_sge));
1845 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1846 /* Copy the inline data */
1847 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1849 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1853 /* Make sure we update MSN table only for wired wqes */
1856 switch (wqe->type) {
1857 case BNXT_QPLIB_SWQE_TYPE_SEND:
1858 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1859 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1860 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1861 /* Assemble info for Raw Ethertype QPs */
1863 sqe->wqe_type = wqe->type;
1864 sqe->flags = wqe->flags;
1865 sqe->wqe_size = wqe_sz;
1866 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1867 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1868 sqe->length = cpu_to_le32(data_len);
1869 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1870 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1871 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1876 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1877 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1879 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1880 struct sq_send_hdr *sqe = base_hdr;
1882 sqe->wqe_type = wqe->type;
1883 sqe->flags = wqe->flags;
1884 sqe->wqe_size = wqe_sz;
1885 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1886 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1887 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1888 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1889 sqe->length = cpu_to_le32(data_len);
1890 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1891 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1892 SQ_SEND_DST_QP_MASK);
1893 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1897 sqe->length = cpu_to_le32(data_len);
1899 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1902 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1906 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1907 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1908 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1910 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1911 struct sq_rdma_hdr *sqe = base_hdr;
1913 sqe->wqe_type = wqe->type;
1914 sqe->flags = wqe->flags;
1915 sqe->wqe_size = wqe_sz;
1916 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1917 sqe->length = cpu_to_le32((u32)data_len);
1918 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1919 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1921 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1924 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1927 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1928 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1930 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1931 struct sq_atomic_hdr *sqe = base_hdr;
1933 sqe->wqe_type = wqe->type;
1934 sqe->flags = wqe->flags;
1935 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1936 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1937 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1938 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1940 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1943 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1946 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1948 struct sq_localinvalidate *sqe = base_hdr;
1950 sqe->wqe_type = wqe->type;
1951 sqe->flags = wqe->flags;
1952 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1956 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1958 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1959 struct sq_fr_pmr_hdr *sqe = base_hdr;
1961 sqe->wqe_type = wqe->type;
1962 sqe->flags = wqe->flags;
1963 sqe->access_cntl = wqe->frmr.access_cntl |
1964 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1965 sqe->zero_based_page_size_log =
1966 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1967 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1968 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1969 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1970 temp32 = cpu_to_le32(wqe->frmr.length);
1971 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1972 sqe->numlevels_pbl_page_size_log =
1973 ((wqe->frmr.pbl_pg_sz_log <<
1974 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1975 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1976 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1977 SQ_FR_PMR_NUMLEVELS_MASK);
1979 for (i = 0; i < wqe->frmr.page_list_len; i++)
1980 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1981 wqe->frmr.page_list[i] |
1983 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1984 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1989 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1991 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1992 struct sq_bind_hdr *sqe = base_hdr;
1994 sqe->wqe_type = wqe->type;
1995 sqe->flags = wqe->flags;
1996 sqe->access_cntl = wqe->bind.access_cntl;
1997 sqe->mw_type_zero_based = wqe->bind.mw_type |
1998 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1999 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2000 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2001 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2002 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2007 /* Bad wqe, return error */
2011 if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2012 swq->next_psn = sq->psn & BTH_PSN_MASK;
2013 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2016 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2017 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2021 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2023 nq_work->cq = qp->scq;
2024 nq_work->nq = qp->scq->nq;
2025 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2026 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2028 dev_err(&hwq->pdev->dev,
2029 "FP: Failed to allocate SQ nq_work!\n");
2036 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2038 struct bnxt_qplib_q *rq = &qp->rq;
2040 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2043 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2044 struct bnxt_qplib_swqe *wqe)
2046 struct bnxt_qplib_nq_work *nq_work = NULL;
2047 struct bnxt_qplib_q *rq = &qp->rq;
2048 struct rq_wqe_hdr *base_hdr;
2049 struct rq_ext_hdr *ext_hdr;
2050 struct bnxt_qplib_hwq *hwq;
2051 struct bnxt_qplib_swq *swq;
2052 bool sch_handler = false;
2058 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2059 dev_err(&hwq->pdev->dev,
2060 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2066 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2067 dev_err(&hwq->pdev->dev,
2068 "FP: QP (0x%x) RQ is full!\n", qp->id);
2073 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2074 swq->wr_id = wqe->wr_id;
2075 swq->slots = rq->dbinfo.max_slot;
2077 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2079 dev_dbg(&hwq->pdev->dev,
2080 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2085 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087 memset(base_hdr, 0, sizeof(struct sq_sge));
2088 memset(ext_hdr, 0, sizeof(struct sq_sge));
2089 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2090 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2091 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2092 if (!wqe->num_sge) {
2095 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2099 base_hdr->wqe_type = wqe->type;
2100 base_hdr->flags = wqe->flags;
2101 base_hdr->wqe_size = wqe_sz;
2102 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2104 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2105 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2108 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2110 nq_work->cq = qp->rcq;
2111 nq_work->nq = qp->rcq->nq;
2112 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2113 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2115 dev_err(&hwq->pdev->dev,
2116 "FP: Failed to allocate RQ nq_work!\n");
2125 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2127 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2128 struct bnxt_qplib_hwq_attr hwq_attr = {};
2129 struct creq_create_cq_resp resp = {};
2130 struct bnxt_qplib_cmdqmsg msg = {};
2131 struct cmdq_create_cq req = {};
2132 struct bnxt_qplib_pbl *pbl;
2137 dev_err(&rcfw->pdev->dev,
2138 "FP: CREATE_CQ failed due to NULL DPI\n");
2142 cq->dbinfo.flags = 0;
2144 hwq_attr.depth = cq->max_wqe;
2145 hwq_attr.stride = sizeof(struct cq_base);
2146 hwq_attr.type = HWQ_TYPE_QUEUE;
2147 hwq_attr.sginfo = &cq->sg_info;
2148 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2152 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2153 CMDQ_BASE_OPCODE_CREATE_CQ,
2156 req.dpi = cpu_to_le32(cq->dpi->dpi);
2157 req.cq_handle = cpu_to_le64(cq->cq_handle);
2158 req.cq_size = cpu_to_le32(cq->max_wqe);
2159 pbl = &cq->hwq.pbl[PBL_LVL_0];
2160 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2161 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2162 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2163 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2164 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2165 req.cq_fco_cnq_id = cpu_to_le32(
2166 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2167 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2168 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2170 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2174 cq->id = le32_to_cpu(resp.xid);
2175 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2176 init_waitqueue_head(&cq->waitq);
2177 INIT_LIST_HEAD(&cq->sqf_head);
2178 INIT_LIST_HEAD(&cq->rqf_head);
2179 spin_lock_init(&cq->compl_lock);
2180 spin_lock_init(&cq->flush_lock);
2182 cq->dbinfo.hwq = &cq->hwq;
2183 cq->dbinfo.xid = cq->id;
2184 cq->dbinfo.db = cq->dpi->dbr;
2185 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2186 cq->dbinfo.flags = 0;
2187 cq->dbinfo.toggle = 0;
2189 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2194 bnxt_qplib_free_hwq(res, &cq->hwq);
2198 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2199 struct bnxt_qplib_cq *cq)
2201 bnxt_qplib_free_hwq(res, &cq->hwq);
2202 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2203 /* Reset only the cons bit in the flags */
2204 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2207 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2210 struct bnxt_qplib_hwq_attr hwq_attr = {};
2211 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2212 struct creq_resize_cq_resp resp = {};
2213 struct bnxt_qplib_cmdqmsg msg = {};
2214 struct cmdq_resize_cq req = {};
2215 struct bnxt_qplib_pbl *pbl;
2216 u32 pg_sz, lvl, new_sz;
2219 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2220 CMDQ_BASE_OPCODE_RESIZE_CQ,
2222 hwq_attr.sginfo = &cq->sg_info;
2224 hwq_attr.depth = new_cqes;
2225 hwq_attr.stride = sizeof(struct cq_base);
2226 hwq_attr.type = HWQ_TYPE_QUEUE;
2227 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2231 req.cq_cid = cpu_to_le32(cq->id);
2232 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2233 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2234 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2235 CMDQ_RESIZE_CQ_LVL_MASK;
2236 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2237 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2238 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2239 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2241 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2243 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2247 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2249 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2250 struct creq_destroy_cq_resp resp = {};
2251 struct bnxt_qplib_cmdqmsg msg = {};
2252 struct cmdq_destroy_cq req = {};
2253 u16 total_cnq_events;
2256 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2257 CMDQ_BASE_OPCODE_DESTROY_CQ,
2260 req.cq_cid = cpu_to_le32(cq->id);
2261 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2263 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2266 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2267 __wait_for_all_nqes(cq, total_cnq_events);
2268 bnxt_qplib_free_hwq(res, &cq->hwq);
2272 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2273 struct bnxt_qplib_cqe **pcqe, int *budget)
2275 struct bnxt_qplib_cqe *cqe;
2279 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2280 start = sq->swq_start;
2283 last = sq->swq_last;
2286 /* Skip the FENCE WQE completions */
2287 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2288 bnxt_qplib_cancel_phantom_processing(qp);
2291 memset(cqe, 0, sizeof(*cqe));
2292 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2293 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2294 cqe->qp_handle = (u64)(unsigned long)qp;
2295 cqe->wr_id = sq->swq[last].wr_id;
2296 cqe->src_qp = qp->id;
2297 cqe->type = sq->swq[last].type;
2301 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2302 sq->swq[last].slots, &sq->dbinfo.flags);
2303 sq->swq_last = sq->swq[last].next_idx;
2306 if (!(*budget) && sq->swq_last != start)
2313 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2314 struct bnxt_qplib_cqe **pcqe, int *budget)
2316 struct bnxt_qplib_cqe *cqe;
2322 case CMDQ_CREATE_QP1_TYPE_GSI:
2323 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2325 case CMDQ_CREATE_QP_TYPE_RC:
2326 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2328 case CMDQ_CREATE_QP_TYPE_UD:
2329 case CMDQ_CREATE_QP_TYPE_GSI:
2330 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2334 /* Flush the rest of the RQ */
2335 start = rq->swq_start;
2338 last = rq->swq_last;
2341 memset(cqe, 0, sizeof(*cqe));
2343 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2344 cqe->opcode = opcode;
2345 cqe->qp_handle = (unsigned long)qp;
2346 cqe->wr_id = rq->swq[last].wr_id;
2349 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2350 rq->swq[last].slots, &rq->dbinfo.flags);
2351 rq->swq_last = rq->swq[last].next_idx;
2354 if (!*budget && rq->swq_last != start)
2361 void bnxt_qplib_mark_qp_error(void *qp_handle)
2363 struct bnxt_qplib_qp *qp = qp_handle;
2368 /* Must block new posting of SQ and RQ */
2369 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2370 bnxt_qplib_cancel_phantom_processing(qp);
2373 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2374 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2376 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2377 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2379 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2380 struct bnxt_qplib_q *sq = &qp->sq;
2381 struct cq_req *peek_req_hwcqe;
2382 struct bnxt_qplib_qp *peek_qp;
2383 struct bnxt_qplib_q *peek_sq;
2384 struct bnxt_qplib_swq *swq;
2385 struct cq_base *peek_hwcqe;
2389 /* Check for the psn_search marking before completing */
2390 swq = &sq->swq[swq_last];
2391 if (swq->psn_search &&
2392 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2394 swq->psn_search->flags_next_psn = cpu_to_le32
2395 (le32_to_cpu(swq->psn_search->flags_next_psn)
2397 dev_dbg(&cq->hwq.pdev->dev,
2398 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2399 cq_cons, qp->id, swq_last, cqe_sq_cons);
2400 sq->condition = true;
2401 sq->send_phantom = true;
2403 /* TODO: Only ARM if the previous SQE is ARMALL */
2404 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2408 if (sq->condition) {
2409 /* Peek at the completions */
2410 peek_flags = cq->dbinfo.flags;
2411 peek_sw_cq_cons = cq_cons;
2412 i = cq->hwq.max_elements;
2414 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2415 peek_sw_cq_cons, NULL);
2416 /* If the next hwcqe is VALID */
2417 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2419 * The valid test of the entry must be done first before
2420 * reading any further.
2423 /* If the next hwcqe is a REQ */
2424 if ((peek_hwcqe->cqe_type_toggle &
2425 CQ_BASE_CQE_TYPE_MASK) ==
2426 CQ_BASE_CQE_TYPE_REQ) {
2427 peek_req_hwcqe = (struct cq_req *)
2429 peek_qp = (struct bnxt_qplib_qp *)
2432 (peek_req_hwcqe->qp_handle));
2433 peek_sq = &peek_qp->sq;
2436 peek_req_hwcqe->sq_cons_idx)
2437 - 1) % sq->max_wqe);
2438 /* If the hwcqe's sq's wr_id matches */
2439 if (peek_sq == sq &&
2440 sq->swq[peek_sq_cons_idx].wr_id ==
2441 BNXT_QPLIB_FENCE_WRID) {
2443 * Unbreak only if the phantom
2446 dev_dbg(&cq->hwq.pdev->dev,
2447 "FP: Got Phantom CQE\n");
2448 sq->condition = false;
2454 /* Valid but not the phantom, so keep looping */
2456 /* Not valid yet, just exit and wait */
2460 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2464 dev_err(&cq->hwq.pdev->dev,
2465 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2466 cq_cons, qp->id, swq_last, cqe_sq_cons);
2473 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2474 struct cq_req *hwcqe,
2475 struct bnxt_qplib_cqe **pcqe, int *budget,
2476 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2478 struct bnxt_qplib_swq *swq;
2479 struct bnxt_qplib_cqe *cqe;
2480 struct bnxt_qplib_qp *qp;
2481 struct bnxt_qplib_q *sq;
2485 qp = (struct bnxt_qplib_qp *)((unsigned long)
2486 le64_to_cpu(hwcqe->qp_handle));
2488 dev_err(&cq->hwq.pdev->dev,
2489 "FP: Process Req qp is NULL\n");
2494 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2495 if (qp->sq.flushed) {
2496 dev_dbg(&cq->hwq.pdev->dev,
2497 "%s: QP in Flush QP = %p\n", __func__, qp);
2500 /* Require to walk the sq's swq to fabricate CQEs for all previously
2501 * signaled SWQEs due to CQE aggregation from the current sq cons
2502 * to the cqe_sq_cons
2506 if (sq->swq_last == cqe_sq_cons)
2510 swq = &sq->swq[sq->swq_last];
2511 memset(cqe, 0, sizeof(*cqe));
2512 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2513 cqe->qp_handle = (u64)(unsigned long)qp;
2514 cqe->src_qp = qp->id;
2515 cqe->wr_id = swq->wr_id;
2516 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2518 cqe->type = swq->type;
2520 /* For the last CQE, check for status. For errors, regardless
2521 * of the request being signaled or not, it must complete with
2522 * the hwcqe error status
2524 if (swq->next_idx == cqe_sq_cons &&
2525 hwcqe->status != CQ_REQ_STATUS_OK) {
2526 cqe->status = hwcqe->status;
2527 dev_err(&cq->hwq.pdev->dev,
2528 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2529 sq->swq_last, cqe->wr_id, cqe->status);
2532 bnxt_qplib_mark_qp_error(qp);
2533 /* Add qp to flush list of the CQ */
2534 bnxt_qplib_add_flush_qp(qp);
2536 /* Before we complete, do WA 9060 */
2537 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2542 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2543 cqe->status = CQ_REQ_STATUS_OK;
2549 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2550 swq->slots, &sq->dbinfo.flags);
2551 sq->swq_last = swq->next_idx;
2557 if (sq->swq_last != cqe_sq_cons) {
2563 * Back to normal completion mode only after it has completed all of
2564 * the WC for this CQE
2571 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2573 spin_lock(&srq->hwq.lock);
2574 srq->swq[srq->last_idx].next_idx = (int)tag;
2575 srq->last_idx = (int)tag;
2576 srq->swq[srq->last_idx].next_idx = -1;
2577 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2578 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2579 spin_unlock(&srq->hwq.lock);
2582 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2583 struct cq_res_rc *hwcqe,
2584 struct bnxt_qplib_cqe **pcqe,
2587 struct bnxt_qplib_srq *srq;
2588 struct bnxt_qplib_cqe *cqe;
2589 struct bnxt_qplib_qp *qp;
2590 struct bnxt_qplib_q *rq;
2593 qp = (struct bnxt_qplib_qp *)((unsigned long)
2594 le64_to_cpu(hwcqe->qp_handle));
2596 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2599 if (qp->rq.flushed) {
2600 dev_dbg(&cq->hwq.pdev->dev,
2601 "%s: QP in Flush QP = %p\n", __func__, qp);
2606 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2607 cqe->length = le32_to_cpu(hwcqe->length);
2608 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2609 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2610 cqe->flags = le16_to_cpu(hwcqe->flags);
2611 cqe->status = hwcqe->status;
2612 cqe->qp_handle = (u64)(unsigned long)qp;
2614 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2615 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2616 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2620 if (wr_id_idx >= srq->hwq.max_elements) {
2621 dev_err(&cq->hwq.pdev->dev,
2622 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2623 wr_id_idx, srq->hwq.max_elements);
2626 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2627 bnxt_qplib_release_srqe(srq, wr_id_idx);
2632 struct bnxt_qplib_swq *swq;
2635 if (wr_id_idx > (rq->max_wqe - 1)) {
2636 dev_err(&cq->hwq.pdev->dev,
2637 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2638 wr_id_idx, rq->max_wqe);
2641 if (wr_id_idx != rq->swq_last)
2643 swq = &rq->swq[rq->swq_last];
2644 cqe->wr_id = swq->wr_id;
2647 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2648 swq->slots, &rq->dbinfo.flags);
2649 rq->swq_last = swq->next_idx;
2652 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2653 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2654 /* Add qp to flush list of the CQ */
2655 bnxt_qplib_add_flush_qp(qp);
2662 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2663 struct cq_res_ud *hwcqe,
2664 struct bnxt_qplib_cqe **pcqe,
2667 struct bnxt_qplib_srq *srq;
2668 struct bnxt_qplib_cqe *cqe;
2669 struct bnxt_qplib_qp *qp;
2670 struct bnxt_qplib_q *rq;
2673 qp = (struct bnxt_qplib_qp *)((unsigned long)
2674 le64_to_cpu(hwcqe->qp_handle));
2676 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2679 if (qp->rq.flushed) {
2680 dev_dbg(&cq->hwq.pdev->dev,
2681 "%s: QP in Flush QP = %p\n", __func__, qp);
2685 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2686 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2687 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2688 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2689 cqe->flags = le16_to_cpu(hwcqe->flags);
2690 cqe->status = hwcqe->status;
2691 cqe->qp_handle = (u64)(unsigned long)qp;
2692 /*FIXME: Endianness fix needed for smace */
2693 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2694 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2695 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2696 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2698 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2699 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2701 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2706 if (wr_id_idx >= srq->hwq.max_elements) {
2707 dev_err(&cq->hwq.pdev->dev,
2708 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2709 wr_id_idx, srq->hwq.max_elements);
2712 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2713 bnxt_qplib_release_srqe(srq, wr_id_idx);
2718 struct bnxt_qplib_swq *swq;
2721 if (wr_id_idx > (rq->max_wqe - 1)) {
2722 dev_err(&cq->hwq.pdev->dev,
2723 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2724 wr_id_idx, rq->max_wqe);
2728 if (rq->swq_last != wr_id_idx)
2730 swq = &rq->swq[rq->swq_last];
2731 cqe->wr_id = swq->wr_id;
2734 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2735 swq->slots, &rq->dbinfo.flags);
2736 rq->swq_last = swq->next_idx;
2739 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2740 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2741 /* Add qp to flush list of the CQ */
2742 bnxt_qplib_add_flush_qp(qp);
2749 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2751 struct cq_base *hw_cqe;
2754 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2755 /* Check for Valid bit. If the CQE is valid, return false */
2756 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2760 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2761 struct cq_res_raweth_qp1 *hwcqe,
2762 struct bnxt_qplib_cqe **pcqe,
2765 struct bnxt_qplib_qp *qp;
2766 struct bnxt_qplib_q *rq;
2767 struct bnxt_qplib_srq *srq;
2768 struct bnxt_qplib_cqe *cqe;
2771 qp = (struct bnxt_qplib_qp *)((unsigned long)
2772 le64_to_cpu(hwcqe->qp_handle));
2774 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2777 if (qp->rq.flushed) {
2778 dev_dbg(&cq->hwq.pdev->dev,
2779 "%s: QP in Flush QP = %p\n", __func__, qp);
2783 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2784 cqe->flags = le16_to_cpu(hwcqe->flags);
2785 cqe->qp_handle = (u64)(unsigned long)qp;
2788 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2789 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2790 cqe->src_qp = qp->id;
2791 if (qp->id == 1 && !cqe->length) {
2792 /* Add workaround for the length misdetection */
2795 cqe->length = le16_to_cpu(hwcqe->length);
2797 cqe->pkey_index = qp->pkey_index;
2798 memcpy(cqe->smac, qp->smac, 6);
2800 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2801 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2802 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2804 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2807 dev_err(&cq->hwq.pdev->dev,
2808 "FP: SRQ used but not defined??\n");
2811 if (wr_id_idx >= srq->hwq.max_elements) {
2812 dev_err(&cq->hwq.pdev->dev,
2813 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2814 wr_id_idx, srq->hwq.max_elements);
2817 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2818 bnxt_qplib_release_srqe(srq, wr_id_idx);
2823 struct bnxt_qplib_swq *swq;
2826 if (wr_id_idx > (rq->max_wqe - 1)) {
2827 dev_err(&cq->hwq.pdev->dev,
2828 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2829 wr_id_idx, rq->max_wqe);
2832 if (rq->swq_last != wr_id_idx)
2834 swq = &rq->swq[rq->swq_last];
2835 cqe->wr_id = swq->wr_id;
2838 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2839 swq->slots, &rq->dbinfo.flags);
2840 rq->swq_last = swq->next_idx;
2843 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2844 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2845 /* Add qp to flush list of the CQ */
2846 bnxt_qplib_add_flush_qp(qp);
2853 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2854 struct cq_terminal *hwcqe,
2855 struct bnxt_qplib_cqe **pcqe,
2858 struct bnxt_qplib_qp *qp;
2859 struct bnxt_qplib_q *sq, *rq;
2860 struct bnxt_qplib_cqe *cqe;
2861 u32 swq_last = 0, cqe_cons;
2864 /* Check the Status */
2865 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2866 dev_warn(&cq->hwq.pdev->dev,
2867 "FP: CQ Process Terminal Error status = 0x%x\n",
2870 qp = (struct bnxt_qplib_qp *)((unsigned long)
2871 le64_to_cpu(hwcqe->qp_handle));
2875 /* Must block new posting of SQ and RQ */
2876 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2881 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2882 if (cqe_cons == 0xFFFF)
2884 cqe_cons %= sq->max_wqe;
2886 if (qp->sq.flushed) {
2887 dev_dbg(&cq->hwq.pdev->dev,
2888 "%s: QP in Flush QP = %p\n", __func__, qp);
2892 /* Terminal CQE can also include aggregated successful CQEs prior.
2893 * So we must complete all CQEs from the current sq's cons to the
2894 * cq_cons with status OK
2898 swq_last = sq->swq_last;
2899 if (swq_last == cqe_cons)
2901 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2902 memset(cqe, 0, sizeof(*cqe));
2903 cqe->status = CQ_REQ_STATUS_OK;
2904 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2905 cqe->qp_handle = (u64)(unsigned long)qp;
2906 cqe->src_qp = qp->id;
2907 cqe->wr_id = sq->swq[swq_last].wr_id;
2908 cqe->type = sq->swq[swq_last].type;
2912 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2913 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2914 sq->swq_last = sq->swq[swq_last].next_idx;
2917 if (!(*budget) && swq_last != cqe_cons) {
2926 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2927 if (cqe_cons == 0xFFFF) {
2929 } else if (cqe_cons > rq->max_wqe - 1) {
2930 dev_err(&cq->hwq.pdev->dev,
2931 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2932 cqe_cons, rq->max_wqe);
2937 if (qp->rq.flushed) {
2938 dev_dbg(&cq->hwq.pdev->dev,
2939 "%s: QP in Flush QP = %p\n", __func__, qp);
2944 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2945 * from the current rq->cons to the rq->prod regardless what the
2946 * rq->cons the terminal CQE indicates
2949 /* Add qp to flush list of the CQ */
2950 bnxt_qplib_add_flush_qp(qp);
2955 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2956 struct cq_cutoff *hwcqe)
2958 /* Check the Status */
2959 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2960 dev_err(&cq->hwq.pdev->dev,
2961 "FP: CQ Process Cutoff Error status = 0x%x\n",
2965 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2966 wake_up_interruptible(&cq->waitq);
2971 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2972 struct bnxt_qplib_cqe *cqe,
2975 struct bnxt_qplib_qp *qp = NULL;
2976 u32 budget = num_cqes;
2977 unsigned long flags;
2979 spin_lock_irqsave(&cq->flush_lock, flags);
2980 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2981 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2982 __flush_sq(&qp->sq, qp, &cqe, &budget);
2985 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2986 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2987 __flush_rq(&qp->rq, qp, &cqe, &budget);
2989 spin_unlock_irqrestore(&cq->flush_lock, flags);
2991 return num_cqes - budget;
2994 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2995 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2997 struct cq_base *hw_cqe;
3005 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3007 /* Check for Valid bit */
3008 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3012 * The valid test of the entry must be done first before
3013 * reading any further.
3016 /* From the device's respective CQE format to qplib_wc*/
3017 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3019 case CQ_BASE_CQE_TYPE_REQ:
3020 rc = bnxt_qplib_cq_process_req(cq,
3021 (struct cq_req *)hw_cqe,
3023 cq->hwq.cons, lib_qp);
3025 case CQ_BASE_CQE_TYPE_RES_RC:
3026 rc = bnxt_qplib_cq_process_res_rc(cq,
3027 (struct cq_res_rc *)
3031 case CQ_BASE_CQE_TYPE_RES_UD:
3032 rc = bnxt_qplib_cq_process_res_ud
3033 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3036 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3037 rc = bnxt_qplib_cq_process_res_raweth_qp1
3038 (cq, (struct cq_res_raweth_qp1 *)
3039 hw_cqe, &cqe, &budget);
3041 case CQ_BASE_CQE_TYPE_TERMINAL:
3042 rc = bnxt_qplib_cq_process_terminal
3043 (cq, (struct cq_terminal *)hw_cqe,
3046 case CQ_BASE_CQE_TYPE_CUT_OFF:
3047 bnxt_qplib_cq_process_cutoff
3048 (cq, (struct cq_cutoff *)hw_cqe);
3049 /* Done processing this CQ */
3052 dev_err(&cq->hwq.pdev->dev,
3053 "process_cq unknown type 0x%lx\n",
3054 hw_cqe->cqe_type_toggle &
3055 CQ_BASE_CQE_TYPE_MASK);
3062 /* Error while processing the CQE, just skip to the
3065 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3066 dev_err(&cq->hwq.pdev->dev,
3067 "process_cqe error rc = 0x%x\n", rc);
3070 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3071 1, &cq->dbinfo.flags);
3075 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3077 return num_cqes - budget;
3080 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3082 cq->dbinfo.toggle = cq->toggle;
3084 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3085 /* Using cq->arm_state variable to track whether to issue cq handler */
3086 atomic_set(&cq->arm_state, 1);
3089 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3091 flush_workqueue(qp->scq->nq->cqn_wq);
3092 if (qp->scq != qp->rcq)
3093 flush_workqueue(qp->rcq->nq->cqn_wq);