2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
57 #include <rdma/ib_addr.h>
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
66 qp->sq.condition = false;
67 qp->sq.send_phantom = false;
68 qp->sq.single = false;
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
74 struct bnxt_qplib_cq *scq, *rcq;
79 if (!qp->sq.flushed) {
80 dev_dbg(&scq->hwq.pdev->dev,
81 "FP: Adding to SQ Flush list = %p\n", qp);
82 bnxt_qplib_cancel_phantom_processing(qp);
83 list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 qp->sq.flushed = true;
87 if (!qp->rq.flushed) {
88 dev_dbg(&rcq->hwq.pdev->dev,
89 "FP: Adding to RQ Flush list = %p\n", qp);
90 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 qp->rq.flushed = true;
96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
100 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 if (qp->scq == qp->rcq)
102 __acquire(&qp->rcq->flush_lock);
104 spin_lock(&qp->rcq->flush_lock);
107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 unsigned long *flags)
109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
111 if (qp->scq == qp->rcq)
112 __release(&qp->rcq->flush_lock);
114 spin_unlock(&qp->rcq->flush_lock);
115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 __bnxt_qplib_add_flush_qp(qp);
124 bnxt_qplib_release_cq_flush_locks(qp, &flags);
127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
129 if (qp->sq.flushed) {
130 qp->sq.flushed = false;
131 list_del(&qp->sq_flush);
134 if (qp->rq.flushed) {
135 qp->rq.flushed = false;
136 list_del(&qp->rq_flush);
141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 __clean_cq(qp->scq, (u64)(unsigned long)qp);
149 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
153 __bnxt_qplib_del_flush_qp(qp);
154 bnxt_qplib_release_cq_flush_locks(qp, &flags);
157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
159 struct bnxt_qplib_nq_work *nq_work =
160 container_of(work, struct bnxt_qplib_nq_work, work);
162 struct bnxt_qplib_cq *cq = nq_work->cq;
163 struct bnxt_qplib_nq *nq = nq_work->nq;
166 spin_lock_bh(&cq->compl_lock);
167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 dev_dbg(&nq->pdev->dev,
169 "%s:Trigger cq = %p event nq = %p\n",
171 nq->cqn_handler(nq, cq);
173 spin_unlock_bh(&cq->compl_lock);
178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 struct bnxt_qplib_qp *qp)
181 struct bnxt_qplib_q *rq = &qp->rq;
182 struct bnxt_qplib_q *sq = &qp->sq;
185 dma_free_coherent(&res->pdev->dev,
186 rq->max_wqe * qp->rq_hdr_buf_size,
187 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
189 dma_free_coherent(&res->pdev->dev,
190 sq->max_wqe * qp->sq_hdr_buf_size,
191 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 qp->rq_hdr_buf = NULL;
193 qp->sq_hdr_buf = NULL;
194 qp->rq_hdr_buf_map = 0;
195 qp->sq_hdr_buf_map = 0;
196 qp->sq_hdr_buf_size = 0;
197 qp->rq_hdr_buf_size = 0;
200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 struct bnxt_qplib_qp *qp)
203 struct bnxt_qplib_q *rq = &qp->rq;
204 struct bnxt_qplib_q *sq = &qp->sq;
207 if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 sq->max_wqe * qp->sq_hdr_buf_size,
210 &qp->sq_hdr_buf_map, GFP_KERNEL);
211 if (!qp->sq_hdr_buf) {
213 dev_err(&res->pdev->dev,
214 "Failed to create sq_hdr_buf\n");
219 if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
225 if (!qp->rq_hdr_buf) {
227 dev_err(&res->pdev->dev,
228 "Failed to create rq_hdr_buf\n");
235 bnxt_qplib_free_qp_hdr_buf(res, qp);
239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
241 struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 struct nq_base *nqe, **nq_ptr;
243 int budget = nq->budget;
247 spin_lock_bh(&hwq->lock);
248 /* Service the NQ until empty */
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
256 * The valid test of the entry must be done first before
257 * reading any further.
261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
263 case NQ_BASE_TYPE_CQ_NOTIFICATION:
265 struct nq_cn *nqcne = (struct nq_cn *)nqe;
267 q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
270 if ((unsigned long)cq == q_handle) {
271 nqcne->cq_handle_low = 0;
272 nqcne->cq_handle_high = 0;
280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 1, &nq->nq_db.dbinfo.flags);
283 spin_unlock_bh(&hwq->lock);
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
293 while (retry_cnt--) {
294 if (cnq_events == cq->cnq_events)
296 usleep_range(50, 100);
297 clean_nq(cq->nq, cq);
301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 struct bnxt_qplib_cq *cq;
306 int budget = nq->budget;
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
320 * The valid test of the entry must be done first before
321 * reading any further.
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 struct bnxt_re_cq *cq_p;
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
338 cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 cq->dbinfo.toggle = cq->toggle;
341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 if (cq_p->uctx_cq_page)
343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
345 bnxt_qplib_armen_db(&cq->dbinfo,
346 DBC_DBC_TYPE_CQ_ARMENA);
347 spin_lock_bh(&cq->compl_lock);
348 atomic_set(&cq->arm_state, 0);
349 if (nq->cqn_handler(nq, (cq)))
350 dev_warn(&nq->pdev->dev,
351 "cqn - type 0x%x not handled\n", type);
353 spin_unlock_bh(&cq->compl_lock);
356 case NQ_BASE_TYPE_SRQ_EVENT:
358 struct bnxt_qplib_srq *srq;
359 struct bnxt_re_srq *srq_p;
360 struct nq_srq_event *nqsrqe =
361 (struct nq_srq_event *)nqe;
363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
366 srq = (struct bnxt_qplib_srq *)q_handle;
367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
369 srq->dbinfo.toggle = srq->toggle;
370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 if (srq_p->uctx_srq_page)
372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 bnxt_qplib_armen_db(&srq->dbinfo,
374 DBC_DBC_TYPE_SRQ_ARMENA);
375 if (nq->srqn_handler(nq,
376 (struct bnxt_qplib_srq *)q_handle,
378 dev_warn(&nq->pdev->dev,
379 "SRQ event 0x%x not handled\n",
383 case NQ_BASE_TYPE_DBQ_EVENT:
386 dev_warn(&nq->pdev->dev,
387 "nqe with type = 0x%x not handled\n", type);
391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 1, &nq->nq_db.dbinfo.flags);
395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 spin_unlock_bh(&hwq->lock);
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
410 int budget = nq->budget;
412 nq->budget = nq->hwq.max_elements;
413 bnxt_qplib_service_nq(&nq->nq_tasklet);
417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
419 struct bnxt_qplib_nq *nq = dev_instance;
420 struct bnxt_qplib_hwq *hwq = &nq->hwq;
423 /* Prefetch the NQ element */
424 sw_cons = HWQ_CMP(hwq->cons, hwq);
425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq->nq_tasklet);
433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
438 nq->requested = false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq->msix_vec);
443 irq_set_affinity_hint(nq->msix_vec, NULL);
444 free_irq(nq->msix_vec, nq);
449 tasklet_kill(&nq->nq_tasklet);
450 tasklet_disable(&nq->nq_tasklet);
453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
456 destroy_workqueue(nq->cqn_wq);
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq, true);
463 if (nq->nq_db.reg.bar_reg) {
464 iounmap(nq->nq_db.reg.bar_reg);
465 nq->nq_db.reg.bar_reg = NULL;
468 nq->cqn_handler = NULL;
469 nq->srqn_handler = NULL;
473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 int msix_vector, bool need_init)
476 struct bnxt_qplib_res *res = nq->res;
482 nq->msix_vec = msix_vector;
484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
486 tasklet_enable(&nq->nq_tasklet);
488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 nq_indx, pci_name(res->pdev));
492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
496 tasklet_disable(&nq->nq_tasklet);
500 cpumask_clear(&nq->mask);
501 cpumask_set_cpu(nq_indx, &nq->mask);
502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
504 dev_warn(&nq->pdev->dev,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq->msix_vec, nq_indx);
508 nq->requested = true;
509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
516 resource_size_t reg_base;
517 struct bnxt_qplib_nq_db *nq_db;
518 struct pci_dev *pdev;
523 nq_db->dbinfo.flags = 0;
524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 if (!nq_db->reg.bar_base) {
527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
532 reg_base = nq_db->reg.bar_base + reg_offt;
533 /* Unconditionally map 8 bytes to support 57500 series */
535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 if (!nq_db->reg.bar_reg) {
537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
542 nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 nq_db->dbinfo.hwq = &nq->hwq;
544 nq_db->dbinfo.xid = nq->ring_id;
549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 int nq_idx, int msix_vector, int bar_reg_offset,
551 cqn_handler_t cqn_handler,
552 srqn_handler_t srqn_handler)
557 nq->cqn_handler = cqn_handler;
558 nq->srqn_handler = srqn_handler;
561 /* Have a task to schedule CQ notifiers in post send case */
562 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
566 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
570 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
572 dev_err(&nq->pdev->dev,
573 "Failed to request irq for nq-idx %d\n", nq_idx);
579 bnxt_qplib_disable_nq(nq);
583 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
585 if (nq->hwq.max_elements) {
586 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
587 nq->hwq.max_elements = 0;
591 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
593 struct bnxt_qplib_hwq_attr hwq_attr = {};
594 struct bnxt_qplib_sg_info sginfo = {};
596 nq->pdev = res->pdev;
598 if (!nq->hwq.max_elements ||
599 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
600 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
602 sginfo.pgsize = PAGE_SIZE;
603 sginfo.pgshft = PAGE_SHIFT;
605 hwq_attr.sginfo = &sginfo;
606 hwq_attr.depth = nq->hwq.max_elements;
607 hwq_attr.stride = sizeof(struct nq_base);
608 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
609 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
610 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
618 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
619 struct bnxt_qplib_srq *srq)
621 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
622 struct creq_destroy_srq_resp resp = {};
623 struct bnxt_qplib_cmdqmsg msg = {};
624 struct cmdq_destroy_srq req = {};
627 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
628 CMDQ_BASE_OPCODE_DESTROY_SRQ,
631 /* Configure the request */
632 req.srq_cid = cpu_to_le32(srq->id);
634 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
635 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
639 bnxt_qplib_free_hwq(res, &srq->hwq);
642 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
643 struct bnxt_qplib_srq *srq)
645 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
646 struct bnxt_qplib_hwq_attr hwq_attr = {};
647 struct creq_create_srq_resp resp = {};
648 struct bnxt_qplib_cmdqmsg msg = {};
649 struct cmdq_create_srq req = {};
650 struct bnxt_qplib_pbl *pbl;
655 hwq_attr.sginfo = &srq->sg_info;
656 hwq_attr.depth = srq->max_wqe;
657 hwq_attr.stride = srq->wqe_size;
658 hwq_attr.type = HWQ_TYPE_QUEUE;
659 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
662 srq->dbinfo.flags = 0;
663 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
664 CMDQ_BASE_OPCODE_CREATE_SRQ,
667 /* Configure the request */
668 req.dpi = cpu_to_le32(srq->dpi->dpi);
669 req.srq_handle = cpu_to_le64((uintptr_t)srq);
671 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
672 pbl = &srq->hwq.pbl[PBL_LVL_0];
673 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
674 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
675 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
676 CMDQ_CREATE_SRQ_LVL_SFT;
677 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
678 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
679 req.pd_id = cpu_to_le32(srq->pd->id);
680 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
682 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
683 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
687 spin_lock_init(&srq->lock);
689 srq->last_idx = srq->hwq.max_elements - 1;
690 if (!srq->hwq.is_user) {
691 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
697 for (idx = 0; idx < srq->hwq.max_elements; idx++)
698 srq->swq[idx].next_idx = idx + 1;
699 srq->swq[srq->last_idx].next_idx = -1;
702 srq->id = le32_to_cpu(resp.xid);
703 srq->dbinfo.hwq = &srq->hwq;
704 srq->dbinfo.xid = srq->id;
705 srq->dbinfo.db = srq->dpi->dbr;
706 srq->dbinfo.max_slot = 1;
707 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
709 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
710 srq->arm_req = false;
714 bnxt_qplib_free_hwq(res, &srq->hwq);
720 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
723 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
726 count = __bnxt_qplib_get_avail(srq_hwq);
727 if (count > srq->threshold) {
728 srq->arm_req = false;
729 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
731 /* Deferred arming */
738 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
739 struct bnxt_qplib_srq *srq)
741 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
742 struct creq_query_srq_resp resp = {};
743 struct bnxt_qplib_cmdqmsg msg = {};
744 struct bnxt_qplib_rcfw_sbuf sbuf;
745 struct creq_query_srq_resp_sb *sb;
746 struct cmdq_query_srq req = {};
749 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
750 CMDQ_BASE_OPCODE_QUERY_SRQ,
753 /* Configure the request */
754 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
755 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
756 &sbuf.dma_addr, GFP_KERNEL);
759 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
760 req.srq_cid = cpu_to_le32(srq->id);
762 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
764 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
766 srq->threshold = le16_to_cpu(sb->srq_limit);
767 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
768 sbuf.sb, sbuf.dma_addr);
773 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
774 struct bnxt_qplib_swqe *wqe)
776 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
778 struct sq_sge *hw_sge;
782 spin_lock(&srq_hwq->lock);
783 if (srq->start_idx == srq->last_idx) {
784 dev_err(&srq_hwq->pdev->dev,
785 "FP: SRQ (0x%x) is full!\n", srq->id);
786 spin_unlock(&srq_hwq->lock);
789 next = srq->start_idx;
790 srq->start_idx = srq->swq[next].next_idx;
791 spin_unlock(&srq_hwq->lock);
793 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
794 memset(srqe, 0, srq->wqe_size);
795 /* Calculate wqe_size16 and data_len */
796 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
797 i < wqe->num_sge; i++, hw_sge++) {
798 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
799 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
800 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
802 srqe->wqe_type = wqe->type;
803 srqe->flags = wqe->flags;
804 srqe->wqe_size = wqe->num_sge +
805 ((offsetof(typeof(*srqe), data) + 15) >> 4);
806 srqe->wr_id[0] = cpu_to_le32((u32)next);
807 srq->swq[next].wr_id = wqe->wr_id;
809 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
811 spin_lock(&srq_hwq->lock);
812 count = __bnxt_qplib_get_avail(srq_hwq);
813 spin_unlock(&srq_hwq->lock);
815 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
816 if (srq->arm_req == true && count > srq->threshold) {
817 srq->arm_req = false;
818 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
826 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
830 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
835 que->swq_last = que->max_sw_wqe - 1;
836 for (indx = 0; indx < que->max_sw_wqe; indx++)
837 que->swq[indx].next_idx = indx + 1;
838 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
844 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
846 struct bnxt_qplib_hwq_attr hwq_attr = {};
847 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
848 struct creq_create_qp1_resp resp = {};
849 struct bnxt_qplib_cmdqmsg msg = {};
850 struct bnxt_qplib_q *sq = &qp->sq;
851 struct bnxt_qplib_q *rq = &qp->rq;
852 struct cmdq_create_qp1 req = {};
853 struct bnxt_qplib_pbl *pbl;
859 sq->dbinfo.flags = 0;
860 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
861 CMDQ_BASE_OPCODE_CREATE_QP1,
865 req.dpi = cpu_to_le32(qp->dpi->dpi);
866 req.qp_handle = cpu_to_le64(qp->qp_handle);
870 hwq_attr.sginfo = &sq->sg_info;
871 hwq_attr.stride = sizeof(struct sq_sge);
872 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
873 hwq_attr.type = HWQ_TYPE_QUEUE;
874 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
878 rc = bnxt_qplib_alloc_init_swq(sq);
882 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
883 pbl = &sq->hwq.pbl[PBL_LVL_0];
884 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
885 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
886 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
887 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
888 req.sq_pg_size_sq_lvl = pg_sz_lvl;
890 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
891 CMDQ_CREATE_QP1_SQ_SGE_SFT);
892 req.scq_cid = cpu_to_le32(qp->scq->id);
896 rq->dbinfo.flags = 0;
898 hwq_attr.sginfo = &rq->sg_info;
899 hwq_attr.stride = sizeof(struct sq_sge);
900 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
901 hwq_attr.type = HWQ_TYPE_QUEUE;
902 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
905 rc = bnxt_qplib_alloc_init_swq(rq);
908 req.rq_size = cpu_to_le32(rq->max_wqe);
909 pbl = &rq->hwq.pbl[PBL_LVL_0];
910 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
911 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
912 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
913 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
914 req.rq_pg_size_rq_lvl = pg_sz_lvl;
916 cpu_to_le16((rq->max_sge &
917 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
918 CMDQ_CREATE_QP1_RQ_SGE_SFT);
920 req.rcq_cid = cpu_to_le32(qp->rcq->id);
921 /* Header buffer - allow hdr_buf pass in */
922 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
927 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
928 req.qp_flags = cpu_to_le32(qp_flags);
929 req.pd_id = cpu_to_le32(qp->pd->id);
931 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
932 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
936 qp->id = le32_to_cpu(resp.xid);
937 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
938 qp->cctx = res->cctx;
939 sq->dbinfo.hwq = &sq->hwq;
940 sq->dbinfo.xid = qp->id;
941 sq->dbinfo.db = qp->dpi->dbr;
942 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
944 rq->dbinfo.hwq = &rq->hwq;
945 rq->dbinfo.xid = qp->id;
946 rq->dbinfo.db = qp->dpi->dbr;
947 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
949 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
950 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
951 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
956 bnxt_qplib_free_qp_hdr_buf(res, qp);
960 bnxt_qplib_free_hwq(res, &rq->hwq);
964 bnxt_qplib_free_hwq(res, &sq->hwq);
968 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
970 struct bnxt_qplib_hwq *hwq;
971 struct bnxt_qplib_q *sq;
977 /* First psn entry */
978 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
979 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
980 indx_pad = (fpsne & ~PAGE_MASK) / size;
981 hwq->pad_pgofft = indx_pad;
982 hwq->pad_pg = (u64 *)psn_pg;
983 hwq->pad_stride = size;
986 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
988 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
989 struct bnxt_qplib_hwq_attr hwq_attr = {};
990 struct bnxt_qplib_sg_info sginfo = {};
991 struct creq_create_qp_resp resp = {};
992 struct bnxt_qplib_cmdqmsg msg = {};
993 struct bnxt_qplib_q *sq = &qp->sq;
994 struct bnxt_qplib_q *rq = &qp->rq;
995 struct cmdq_create_qp req = {};
996 int rc, req_size, psn_sz = 0;
997 struct bnxt_qplib_hwq *xrrq;
998 struct bnxt_qplib_pbl *pbl;
1004 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
1005 sq->dbinfo.flags = 0;
1006 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1007 CMDQ_BASE_OPCODE_CREATE_QP,
1011 req.type = qp->type;
1012 req.dpi = cpu_to_le32(qp->dpi->dpi);
1013 req.qp_handle = cpu_to_le64(qp->qp_handle);
1016 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1017 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1018 sizeof(struct sq_psn_search_ext) :
1019 sizeof(struct sq_psn_search);
1021 if (qp->is_host_msn_tbl) {
1022 psn_sz = sizeof(struct sq_msn_search);
1028 hwq_attr.sginfo = &sq->sg_info;
1029 hwq_attr.stride = sizeof(struct sq_sge);
1030 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1031 hwq_attr.aux_stride = psn_sz;
1032 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1034 /* Update msn tbl size */
1035 if (qp->is_host_msn_tbl && psn_sz) {
1036 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1037 hwq_attr.aux_depth =
1038 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1040 hwq_attr.aux_depth =
1041 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1042 qp->msn_tbl_sz = hwq_attr.aux_depth;
1046 hwq_attr.type = HWQ_TYPE_QUEUE;
1047 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1051 if (!sq->hwq.is_user) {
1052 rc = bnxt_qplib_alloc_init_swq(sq);
1057 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1059 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1060 pbl = &sq->hwq.pbl[PBL_LVL_0];
1061 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1062 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1063 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1064 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1065 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1067 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1068 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1069 req.scq_cid = cpu_to_le32(qp->scq->id);
1073 rq->dbinfo.flags = 0;
1075 hwq_attr.sginfo = &rq->sg_info;
1076 hwq_attr.stride = sizeof(struct sq_sge);
1077 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1078 hwq_attr.aux_stride = 0;
1079 hwq_attr.aux_depth = 0;
1080 hwq_attr.type = HWQ_TYPE_QUEUE;
1081 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1084 if (!rq->hwq.is_user) {
1085 rc = bnxt_qplib_alloc_init_swq(rq);
1090 req.rq_size = cpu_to_le32(rq->max_wqe);
1091 pbl = &rq->hwq.pbl[PBL_LVL_0];
1092 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1093 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1094 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1095 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1096 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1097 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1100 cpu_to_le16(((nsge &
1101 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1102 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1105 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1106 req.srq_cid = cpu_to_le32(qp->srq->id);
1108 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1110 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1111 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1113 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1114 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1115 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1116 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1117 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1119 req.qp_flags = cpu_to_le32(qp_flags);
1124 xrrq->max_elements =
1125 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1126 req_size = xrrq->max_elements *
1127 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1128 req_size &= ~(PAGE_SIZE - 1);
1129 sginfo.pgsize = req_size;
1130 sginfo.pgshft = PAGE_SHIFT;
1133 hwq_attr.sginfo = &sginfo;
1134 hwq_attr.depth = xrrq->max_elements;
1135 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1136 hwq_attr.aux_stride = 0;
1137 hwq_attr.aux_depth = 0;
1138 hwq_attr.type = HWQ_TYPE_CTX;
1139 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1142 pbl = &xrrq->pbl[PBL_LVL_0];
1143 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1146 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1147 qp->max_dest_rd_atomic);
1148 req_size = xrrq->max_elements *
1149 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1150 req_size &= ~(PAGE_SIZE - 1);
1151 sginfo.pgsize = req_size;
1152 hwq_attr.depth = xrrq->max_elements;
1153 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1154 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1158 pbl = &xrrq->pbl[PBL_LVL_0];
1159 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1161 req.pd_id = cpu_to_le32(qp->pd->id);
1163 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1165 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1169 qp->id = le32_to_cpu(resp.xid);
1170 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1171 INIT_LIST_HEAD(&qp->sq_flush);
1172 INIT_LIST_HEAD(&qp->rq_flush);
1173 qp->cctx = res->cctx;
1174 sq->dbinfo.hwq = &sq->hwq;
1175 sq->dbinfo.xid = qp->id;
1176 sq->dbinfo.db = qp->dpi->dbr;
1177 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1179 rq->dbinfo.hwq = &rq->hwq;
1180 rq->dbinfo.xid = qp->id;
1181 rq->dbinfo.db = qp->dpi->dbr;
1182 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1184 spin_lock_bh(&rcfw->tbl_lock);
1185 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1186 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1187 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1188 spin_unlock_bh(&rcfw->tbl_lock);
1192 bnxt_qplib_free_hwq(res, &qp->irrq);
1194 bnxt_qplib_free_hwq(res, &qp->orrq);
1198 bnxt_qplib_free_hwq(res, &rq->hwq);
1202 bnxt_qplib_free_hwq(res, &sq->hwq);
1206 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1208 switch (qp->state) {
1209 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1210 /* INIT->RTR, configure the path_mtu to the default
1211 * 2048 if not being requested
1213 if (!(qp->modify_flags &
1214 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1216 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1218 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1221 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1222 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1223 if (qp->max_dest_rd_atomic < 1)
1224 qp->max_dest_rd_atomic = 1;
1225 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1226 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1227 if (!(qp->modify_flags &
1228 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1230 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1231 qp->ah.sgid_index = 0;
1239 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1241 switch (qp->state) {
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1243 /* Bono FW requires the max_rd_atomic to be >= 1 */
1244 if (qp->max_rd_atomic < 1)
1245 qp->max_rd_atomic = 1;
1246 /* Bono FW does not allow PKEY_INDEX,
1247 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1248 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1249 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1253 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1254 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1255 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1256 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1257 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1258 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1259 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1260 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1261 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1262 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1263 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1264 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1271 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1273 switch (qp->cur_qp_state) {
1274 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1276 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1277 __modify_flags_from_init_state(qp);
1279 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1280 __modify_flags_from_rtr_state(qp);
1282 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1284 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1286 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1288 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1295 static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
1296 struct bnxt_qplib_qp *qp,
1297 struct cmdq_modify_qp *req)
1299 u32 mandatory_flags = 0;
1301 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1302 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1304 if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1305 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
1306 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
1307 req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
1308 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1311 if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
1312 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1313 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
1314 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1316 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1319 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
1320 qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
1321 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1323 qp->modify_flags |= mandatory_flags;
1324 req->qp_type = qp->type;
1327 static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
1329 if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1330 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
1331 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1332 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
1338 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1340 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1341 struct creq_modify_qp_resp resp = {};
1342 struct bnxt_qplib_cmdqmsg msg = {};
1343 struct cmdq_modify_qp req = {};
1344 u16 vlan_pcp_vlan_dei_vlan_id;
1349 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1350 CMDQ_BASE_OPCODE_MODIFY_QP,
1353 /* Filter out the qp_attr_mask based on the state->new transition */
1354 __filter_modify_flags(qp);
1355 if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1356 /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
1357 if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
1358 is_optimized_state_transition(qp))
1359 bnxt_set_mandatory_attributes(res, qp, &req);
1361 bmask = qp->modify_flags;
1362 req.modify_mask = cpu_to_le32(qp->modify_flags);
1363 req.qp_cid = cpu_to_le32(qp->id);
1364 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1365 req.network_type_en_sqd_async_notify_new_state =
1366 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1367 (qp->en_sqd_async_notify ?
1368 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1370 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1372 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1373 req.access = qp->access;
1375 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1376 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1378 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1379 req.qkey = cpu_to_le32(qp->qkey);
1381 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1382 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1383 req.dgid[0] = cpu_to_le32(temp32[0]);
1384 req.dgid[1] = cpu_to_le32(temp32[1]);
1385 req.dgid[2] = cpu_to_le32(temp32[2]);
1386 req.dgid[3] = cpu_to_le32(temp32[3]);
1388 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1389 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1391 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1392 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1393 [qp->ah.sgid_index]);
1395 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1396 req.hop_limit = qp->ah.hop_limit;
1398 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1399 req.traffic_class = qp->ah.traffic_class;
1401 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1402 memcpy(req.dest_mac, qp->ah.dmac, 6);
1404 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1405 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1407 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1408 req.timeout = qp->timeout;
1410 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1411 req.retry_cnt = qp->retry_cnt;
1413 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1414 req.rnr_retry = qp->rnr_retry;
1416 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1417 req.min_rnr_timer = qp->min_rnr_timer;
1419 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1420 req.rq_psn = cpu_to_le32(qp->rq.psn);
1422 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1423 req.sq_psn = cpu_to_le32(qp->sq.psn);
1425 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1427 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1429 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1430 req.max_dest_rd_atomic =
1431 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1433 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1434 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1435 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1436 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1437 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1438 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1439 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1441 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
1442 vlan_pcp_vlan_dei_vlan_id =
1443 ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
1444 CMDQ_MODIFY_QP_VLAN_ID_SFT) &
1445 CMDQ_MODIFY_QP_VLAN_ID_MASK);
1446 vlan_pcp_vlan_dei_vlan_id |=
1447 ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
1448 CMDQ_MODIFY_QP_VLAN_PCP_MASK);
1449 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
1452 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1453 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1456 qp->cur_qp_state = qp->state;
1460 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1462 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1463 struct creq_query_qp_resp resp = {};
1464 struct bnxt_qplib_cmdqmsg msg = {};
1465 struct bnxt_qplib_rcfw_sbuf sbuf;
1466 struct creq_query_qp_resp_sb *sb;
1467 struct cmdq_query_qp req = {};
1471 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1472 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1473 &sbuf.dma_addr, GFP_KERNEL);
1478 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1479 CMDQ_BASE_OPCODE_QUERY_QP,
1482 req.qp_cid = cpu_to_le32(qp->id);
1483 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1484 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1486 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1489 /* Extract the context from the side buffer */
1490 qp->state = sb->en_sqd_async_notify_state &
1491 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1492 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1493 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1494 qp->access = sb->access;
1495 qp->pkey_index = le16_to_cpu(sb->pkey);
1496 qp->qkey = le32_to_cpu(sb->qkey);
1498 temp32[0] = le32_to_cpu(sb->dgid[0]);
1499 temp32[1] = le32_to_cpu(sb->dgid[1]);
1500 temp32[2] = le32_to_cpu(sb->dgid[2]);
1501 temp32[3] = le32_to_cpu(sb->dgid[3]);
1502 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1504 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1506 qp->ah.sgid_index = 0;
1507 for (i = 0; i < res->sgid_tbl.max; i++) {
1508 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1509 qp->ah.sgid_index = i;
1513 if (i == res->sgid_tbl.max)
1514 dev_warn(&res->pdev->dev, "SGID not found??\n");
1516 qp->ah.hop_limit = sb->hop_limit;
1517 qp->ah.traffic_class = sb->traffic_class;
1518 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1519 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1520 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1521 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1522 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1523 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1524 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1525 qp->timeout = sb->timeout;
1526 qp->retry_cnt = sb->retry_cnt;
1527 qp->rnr_retry = sb->rnr_retry;
1528 qp->min_rnr_timer = sb->min_rnr_timer;
1529 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1530 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1531 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1532 qp->max_dest_rd_atomic =
1533 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1534 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1535 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1536 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1537 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1538 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1539 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1540 memcpy(qp->smac, sb->src_mac, 6);
1541 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1542 qp->port_id = le16_to_cpu(sb->port_id);
1544 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1545 sbuf.sb, sbuf.dma_addr);
1549 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1551 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1552 u32 peek_flags, peek_cons;
1553 struct cq_base *hw_cqe;
1556 peek_flags = cq->dbinfo.flags;
1557 peek_cons = cq_hwq->cons;
1558 for (i = 0; i < cq_hwq->max_elements; i++) {
1559 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1560 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1563 * The valid test of the entry must be done first before
1564 * reading any further.
1567 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1568 case CQ_BASE_CQE_TYPE_REQ:
1569 case CQ_BASE_CQE_TYPE_TERMINAL:
1571 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1573 if (qp == le64_to_cpu(cqe->qp_handle))
1577 case CQ_BASE_CQE_TYPE_RES_RC:
1578 case CQ_BASE_CQE_TYPE_RES_UD:
1579 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1581 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1583 if (qp == le64_to_cpu(cqe->qp_handle))
1590 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1595 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1596 struct bnxt_qplib_qp *qp)
1598 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1599 struct creq_destroy_qp_resp resp = {};
1600 struct bnxt_qplib_cmdqmsg msg = {};
1601 struct cmdq_destroy_qp req = {};
1605 spin_lock_bh(&rcfw->tbl_lock);
1606 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1607 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1608 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1609 spin_unlock_bh(&rcfw->tbl_lock);
1611 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1612 CMDQ_BASE_OPCODE_DESTROY_QP,
1615 req.qp_cid = cpu_to_le32(qp->id);
1616 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1620 spin_lock_bh(&rcfw->tbl_lock);
1621 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1622 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1623 spin_unlock_bh(&rcfw->tbl_lock);
1630 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1631 struct bnxt_qplib_qp *qp)
1633 bnxt_qplib_free_qp_hdr_buf(res, qp);
1634 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1637 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1640 if (qp->irrq.max_elements)
1641 bnxt_qplib_free_hwq(res, &qp->irrq);
1642 if (qp->orrq.max_elements)
1643 bnxt_qplib_free_hwq(res, &qp->orrq);
1647 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1648 struct bnxt_qplib_sge *sge)
1650 struct bnxt_qplib_q *sq = &qp->sq;
1653 memset(sge, 0, sizeof(*sge));
1655 if (qp->sq_hdr_buf) {
1656 sw_prod = sq->swq_start;
1657 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1658 sw_prod * qp->sq_hdr_buf_size);
1659 sge->lkey = 0xFFFFFFFF;
1660 sge->size = qp->sq_hdr_buf_size;
1661 return qp->sq_hdr_buf + sw_prod * sge->size;
1666 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1668 struct bnxt_qplib_q *rq = &qp->rq;
1670 return rq->swq_start;
1673 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1675 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1678 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1679 struct bnxt_qplib_sge *sge)
1681 struct bnxt_qplib_q *rq = &qp->rq;
1684 memset(sge, 0, sizeof(*sge));
1686 if (qp->rq_hdr_buf) {
1687 sw_prod = rq->swq_start;
1688 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1689 sw_prod * qp->rq_hdr_buf_size);
1690 sge->lkey = 0xFFFFFFFF;
1691 sge->size = qp->rq_hdr_buf_size;
1692 return qp->rq_hdr_buf + sw_prod * sge->size;
1697 /* Fil the MSN table into the next psn row */
1698 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1699 struct bnxt_qplib_swqe *wqe,
1700 struct bnxt_qplib_swq *swq)
1702 struct sq_msn_search *msns;
1703 u32 start_psn, next_psn;
1706 msns = (struct sq_msn_search *)swq->psn_search;
1707 msns->start_idx_next_psn_start_psn = 0;
1709 start_psn = swq->start_psn;
1710 next_psn = swq->next_psn;
1711 start_idx = swq->slot_idx;
1712 msns->start_idx_next_psn_start_psn |=
1713 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1715 qp->msn %= qp->msn_tbl_sz;
1718 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1719 struct bnxt_qplib_swqe *wqe,
1720 struct bnxt_qplib_swq *swq)
1722 struct sq_psn_search_ext *psns_ext;
1723 struct sq_psn_search *psns;
1727 if (!swq->psn_search)
1729 /* Handle MSN differently on cap flags */
1730 if (qp->is_host_msn_tbl) {
1731 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1734 psns = (struct sq_psn_search *)swq->psn_search;
1735 psns = swq->psn_search;
1736 psns_ext = swq->psn_ext;
1738 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1739 SQ_PSN_SEARCH_START_PSN_MASK);
1740 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1741 SQ_PSN_SEARCH_OPCODE_MASK);
1742 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1743 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1745 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1746 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1747 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1748 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1750 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1751 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1755 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1756 struct bnxt_qplib_swqe *wqe,
1759 struct bnxt_qplib_hwq *hwq;
1760 int len, t_len, offt;
1761 bool pull_dst = true;
1762 void *il_dst = NULL;
1763 void *il_src = NULL;
1769 for (indx = 0; indx < wqe->num_sge; indx++) {
1770 len = wqe->sg_list[indx].size;
1771 il_src = (void *)wqe->sg_list[indx].addr;
1773 if (t_len > qp->max_inline_data)
1778 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1783 cplen = min_t(int, len, sizeof(struct sq_sge));
1784 cplen = min_t(int, cplen,
1785 (sizeof(struct sq_sge) - offt));
1786 memcpy(il_dst, il_src, cplen);
1792 if (t_cplen == sizeof(struct sq_sge))
1800 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1801 struct bnxt_qplib_sge *ssge,
1804 struct sq_sge *dsge;
1807 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1808 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1809 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1810 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1811 dsge->size = cpu_to_le32(ssge[indx].size);
1812 len += ssge[indx].size;
1818 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1819 struct bnxt_qplib_swqe *wqe,
1820 u16 *wqe_sz, u16 *qdf, u8 mode)
1826 nsge = wqe->num_sge;
1827 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1828 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1829 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1830 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1831 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1832 bytes += sizeof(struct sq_send_hdr);
1835 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1838 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1843 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1844 struct bnxt_qplib_swq *swq, bool hw_retx)
1846 struct bnxt_qplib_hwq *hwq;
1847 u32 pg_num, pg_indx;
1854 tail = swq->slot_idx / sq->dbinfo.max_slot;
1856 /* For HW retx use qp msn index */
1858 tail %= qp->msn_tbl_sz;
1860 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1861 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1862 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1863 swq->psn_ext = buff;
1864 swq->psn_search = buff;
1867 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1869 struct bnxt_qplib_q *sq = &qp->sq;
1871 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1874 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1875 struct bnxt_qplib_swqe *wqe)
1877 struct bnxt_qplib_nq_work *nq_work = NULL;
1878 int i, rc = 0, data_len = 0, pkt_num = 0;
1879 struct bnxt_qplib_q *sq = &qp->sq;
1880 struct bnxt_qplib_hwq *hwq;
1881 struct bnxt_qplib_swq *swq;
1882 bool sch_handler = false;
1883 u16 wqe_sz, qdf = 0;
1893 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1894 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1895 dev_err(&hwq->pdev->dev,
1896 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1902 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1903 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1904 dev_err(&hwq->pdev->dev,
1905 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1906 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1911 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1912 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1915 swq->slot_idx = hwq->prod;
1917 swq->wr_id = wqe->wr_id;
1918 swq->type = wqe->type;
1919 swq->flags = wqe->flags;
1920 swq->start_psn = sq->psn & BTH_PSN_MASK;
1922 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1924 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1926 dev_dbg(&hwq->pdev->dev,
1927 "%s Error QP. Scheduling for poll_cq\n", __func__);
1931 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1932 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1933 memset(base_hdr, 0, sizeof(struct sq_sge));
1934 memset(ext_hdr, 0, sizeof(struct sq_sge));
1936 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1937 /* Copy the inline data */
1938 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1940 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1944 /* Make sure we update MSN table only for wired wqes */
1947 switch (wqe->type) {
1948 case BNXT_QPLIB_SWQE_TYPE_SEND:
1949 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1950 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1951 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1952 /* Assemble info for Raw Ethertype QPs */
1954 sqe->wqe_type = wqe->type;
1955 sqe->flags = wqe->flags;
1956 sqe->wqe_size = wqe_sz;
1957 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1958 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1959 sqe->length = cpu_to_le32(data_len);
1960 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1961 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1962 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1967 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1968 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1970 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1971 struct sq_send_hdr *sqe = base_hdr;
1973 sqe->wqe_type = wqe->type;
1974 sqe->flags = wqe->flags;
1975 sqe->wqe_size = wqe_sz;
1976 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1977 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1978 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1979 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1980 sqe->length = cpu_to_le32(data_len);
1981 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1982 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1983 SQ_SEND_DST_QP_MASK);
1984 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1988 sqe->length = cpu_to_le32(data_len);
1990 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1993 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1997 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1998 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1999 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2001 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
2002 struct sq_rdma_hdr *sqe = base_hdr;
2004 sqe->wqe_type = wqe->type;
2005 sqe->flags = wqe->flags;
2006 sqe->wqe_size = wqe_sz;
2007 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
2008 sqe->length = cpu_to_le32((u32)data_len);
2009 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
2010 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
2012 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2015 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2018 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2019 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2021 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
2022 struct sq_atomic_hdr *sqe = base_hdr;
2024 sqe->wqe_type = wqe->type;
2025 sqe->flags = wqe->flags;
2026 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
2027 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
2028 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
2029 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
2031 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2034 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2037 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2039 struct sq_localinvalidate *sqe = base_hdr;
2041 sqe->wqe_type = wqe->type;
2042 sqe->flags = wqe->flags;
2043 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
2047 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
2049 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
2050 struct sq_fr_pmr_hdr *sqe = base_hdr;
2052 sqe->wqe_type = wqe->type;
2053 sqe->flags = wqe->flags;
2054 sqe->access_cntl = wqe->frmr.access_cntl |
2055 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2056 sqe->zero_based_page_size_log =
2057 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
2058 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
2059 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
2060 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
2061 temp32 = cpu_to_le32(wqe->frmr.length);
2062 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
2063 sqe->numlevels_pbl_page_size_log =
2064 ((wqe->frmr.pbl_pg_sz_log <<
2065 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
2066 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
2067 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
2068 SQ_FR_PMR_NUMLEVELS_MASK);
2070 for (i = 0; i < wqe->frmr.page_list_len; i++)
2071 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2072 wqe->frmr.page_list[i] |
2074 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2075 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2080 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2082 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2083 struct sq_bind_hdr *sqe = base_hdr;
2085 sqe->wqe_type = wqe->type;
2086 sqe->flags = wqe->flags;
2087 sqe->access_cntl = wqe->bind.access_cntl;
2088 sqe->mw_type_zero_based = wqe->bind.mw_type |
2089 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2090 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2091 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2092 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2093 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2098 /* Bad wqe, return error */
2102 if (!qp->is_host_msn_tbl || msn_update) {
2103 swq->next_psn = sq->psn & BTH_PSN_MASK;
2104 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2107 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2108 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2112 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2114 nq_work->cq = qp->scq;
2115 nq_work->nq = qp->scq->nq;
2116 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2117 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2119 dev_err(&hwq->pdev->dev,
2120 "FP: Failed to allocate SQ nq_work!\n");
2127 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2129 struct bnxt_qplib_q *rq = &qp->rq;
2131 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2134 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2135 struct bnxt_qplib_swqe *wqe)
2137 struct bnxt_qplib_nq_work *nq_work = NULL;
2138 struct bnxt_qplib_q *rq = &qp->rq;
2139 struct rq_wqe_hdr *base_hdr;
2140 struct rq_ext_hdr *ext_hdr;
2141 struct bnxt_qplib_hwq *hwq;
2142 struct bnxt_qplib_swq *swq;
2143 bool sch_handler = false;
2149 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2150 dev_err(&hwq->pdev->dev,
2151 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2157 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2158 dev_err(&hwq->pdev->dev,
2159 "FP: QP (0x%x) RQ is full!\n", qp->id);
2164 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2165 swq->wr_id = wqe->wr_id;
2166 swq->slots = rq->dbinfo.max_slot;
2168 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2170 dev_dbg(&hwq->pdev->dev,
2171 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2176 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2177 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2178 memset(base_hdr, 0, sizeof(struct sq_sge));
2179 memset(ext_hdr, 0, sizeof(struct sq_sge));
2180 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2181 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2182 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2183 if (!wqe->num_sge) {
2186 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2190 base_hdr->wqe_type = wqe->type;
2191 base_hdr->flags = wqe->flags;
2192 base_hdr->wqe_size = wqe_sz;
2193 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2195 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2196 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2199 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2201 nq_work->cq = qp->rcq;
2202 nq_work->nq = qp->rcq->nq;
2203 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2204 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2206 dev_err(&hwq->pdev->dev,
2207 "FP: Failed to allocate RQ nq_work!\n");
2216 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2218 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2219 struct bnxt_qplib_hwq_attr hwq_attr = {};
2220 struct creq_create_cq_resp resp = {};
2221 struct bnxt_qplib_cmdqmsg msg = {};
2222 struct cmdq_create_cq req = {};
2223 struct bnxt_qplib_pbl *pbl;
2229 dev_err(&rcfw->pdev->dev,
2230 "FP: CREATE_CQ failed due to NULL DPI\n");
2234 cq->dbinfo.flags = 0;
2236 hwq_attr.depth = cq->max_wqe;
2237 hwq_attr.stride = sizeof(struct cq_base);
2238 hwq_attr.type = HWQ_TYPE_QUEUE;
2239 hwq_attr.sginfo = &cq->sg_info;
2240 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2244 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2245 CMDQ_BASE_OPCODE_CREATE_CQ,
2248 req.dpi = cpu_to_le32(cq->dpi->dpi);
2249 req.cq_handle = cpu_to_le64(cq->cq_handle);
2250 req.cq_size = cpu_to_le32(cq->max_wqe);
2252 if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
2253 req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
2254 coalescing |= ((cq->coalescing->buf_maxtime <<
2255 CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
2256 CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
2257 coalescing |= ((cq->coalescing->normal_maxbuf <<
2258 CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
2259 CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
2260 coalescing |= ((cq->coalescing->during_maxbuf <<
2261 CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
2262 CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
2263 if (cq->coalescing->en_ring_idle_mode)
2264 coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2266 coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2267 req.coalescing = cpu_to_le32(coalescing);
2270 pbl = &cq->hwq.pbl[PBL_LVL_0];
2271 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2272 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2273 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2274 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2275 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2276 req.cq_fco_cnq_id = cpu_to_le32(
2277 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2278 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2279 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2281 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2285 cq->id = le32_to_cpu(resp.xid);
2286 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2287 init_waitqueue_head(&cq->waitq);
2288 INIT_LIST_HEAD(&cq->sqf_head);
2289 INIT_LIST_HEAD(&cq->rqf_head);
2290 spin_lock_init(&cq->compl_lock);
2291 spin_lock_init(&cq->flush_lock);
2293 cq->dbinfo.hwq = &cq->hwq;
2294 cq->dbinfo.xid = cq->id;
2295 cq->dbinfo.db = cq->dpi->dbr;
2296 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2297 cq->dbinfo.flags = 0;
2298 cq->dbinfo.toggle = 0;
2300 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2305 bnxt_qplib_free_hwq(res, &cq->hwq);
2309 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2310 struct bnxt_qplib_cq *cq)
2312 bnxt_qplib_free_hwq(res, &cq->hwq);
2313 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2314 /* Reset only the cons bit in the flags */
2315 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2318 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2321 struct bnxt_qplib_hwq_attr hwq_attr = {};
2322 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2323 struct creq_resize_cq_resp resp = {};
2324 struct bnxt_qplib_cmdqmsg msg = {};
2325 struct cmdq_resize_cq req = {};
2326 struct bnxt_qplib_pbl *pbl;
2327 u32 pg_sz, lvl, new_sz;
2330 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2331 CMDQ_BASE_OPCODE_RESIZE_CQ,
2333 hwq_attr.sginfo = &cq->sg_info;
2335 hwq_attr.depth = new_cqes;
2336 hwq_attr.stride = sizeof(struct cq_base);
2337 hwq_attr.type = HWQ_TYPE_QUEUE;
2338 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2342 req.cq_cid = cpu_to_le32(cq->id);
2343 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2344 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2345 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2346 CMDQ_RESIZE_CQ_LVL_MASK;
2347 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2348 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2349 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2350 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2352 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2354 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2358 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2360 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2361 struct creq_destroy_cq_resp resp = {};
2362 struct bnxt_qplib_cmdqmsg msg = {};
2363 struct cmdq_destroy_cq req = {};
2364 u16 total_cnq_events;
2367 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2368 CMDQ_BASE_OPCODE_DESTROY_CQ,
2371 req.cq_cid = cpu_to_le32(cq->id);
2372 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2374 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2377 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2378 __wait_for_all_nqes(cq, total_cnq_events);
2379 bnxt_qplib_free_hwq(res, &cq->hwq);
2383 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2384 struct bnxt_qplib_cqe **pcqe, int *budget)
2386 struct bnxt_qplib_cqe *cqe;
2390 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2391 start = sq->swq_start;
2394 last = sq->swq_last;
2397 /* Skip the FENCE WQE completions */
2398 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2399 bnxt_qplib_cancel_phantom_processing(qp);
2402 memset(cqe, 0, sizeof(*cqe));
2403 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2404 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2405 cqe->qp_handle = (u64)(unsigned long)qp;
2406 cqe->wr_id = sq->swq[last].wr_id;
2407 cqe->src_qp = qp->id;
2408 cqe->type = sq->swq[last].type;
2412 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2413 sq->swq[last].slots, &sq->dbinfo.flags);
2414 sq->swq_last = sq->swq[last].next_idx;
2417 if (!(*budget) && sq->swq_last != start)
2424 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2425 struct bnxt_qplib_cqe **pcqe, int *budget)
2427 struct bnxt_qplib_cqe *cqe;
2433 case CMDQ_CREATE_QP1_TYPE_GSI:
2434 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2436 case CMDQ_CREATE_QP_TYPE_RC:
2437 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2439 case CMDQ_CREATE_QP_TYPE_UD:
2440 case CMDQ_CREATE_QP_TYPE_GSI:
2441 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2445 /* Flush the rest of the RQ */
2446 start = rq->swq_start;
2449 last = rq->swq_last;
2452 memset(cqe, 0, sizeof(*cqe));
2454 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2455 cqe->opcode = opcode;
2456 cqe->qp_handle = (unsigned long)qp;
2457 cqe->wr_id = rq->swq[last].wr_id;
2460 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2461 rq->swq[last].slots, &rq->dbinfo.flags);
2462 rq->swq_last = rq->swq[last].next_idx;
2465 if (!*budget && rq->swq_last != start)
2472 void bnxt_qplib_mark_qp_error(void *qp_handle)
2474 struct bnxt_qplib_qp *qp = qp_handle;
2479 /* Must block new posting of SQ and RQ */
2480 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2481 bnxt_qplib_cancel_phantom_processing(qp);
2484 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2485 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2487 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2488 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2490 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2491 struct bnxt_qplib_q *sq = &qp->sq;
2492 struct cq_req *peek_req_hwcqe;
2493 struct bnxt_qplib_qp *peek_qp;
2494 struct bnxt_qplib_q *peek_sq;
2495 struct bnxt_qplib_swq *swq;
2496 struct cq_base *peek_hwcqe;
2500 /* Check for the psn_search marking before completing */
2501 swq = &sq->swq[swq_last];
2502 if (swq->psn_search &&
2503 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2505 swq->psn_search->flags_next_psn = cpu_to_le32
2506 (le32_to_cpu(swq->psn_search->flags_next_psn)
2508 dev_dbg(&cq->hwq.pdev->dev,
2509 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2510 cq_cons, qp->id, swq_last, cqe_sq_cons);
2511 sq->condition = true;
2512 sq->send_phantom = true;
2514 /* TODO: Only ARM if the previous SQE is ARMALL */
2515 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2519 if (sq->condition) {
2520 /* Peek at the completions */
2521 peek_flags = cq->dbinfo.flags;
2522 peek_sw_cq_cons = cq_cons;
2523 i = cq->hwq.max_elements;
2525 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2526 peek_sw_cq_cons, NULL);
2527 /* If the next hwcqe is VALID */
2528 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2530 * The valid test of the entry must be done first before
2531 * reading any further.
2534 /* If the next hwcqe is a REQ */
2535 if ((peek_hwcqe->cqe_type_toggle &
2536 CQ_BASE_CQE_TYPE_MASK) ==
2537 CQ_BASE_CQE_TYPE_REQ) {
2538 peek_req_hwcqe = (struct cq_req *)
2540 peek_qp = (struct bnxt_qplib_qp *)
2543 (peek_req_hwcqe->qp_handle));
2544 peek_sq = &peek_qp->sq;
2547 peek_req_hwcqe->sq_cons_idx)
2548 - 1) % sq->max_wqe);
2549 /* If the hwcqe's sq's wr_id matches */
2550 if (peek_sq == sq &&
2551 sq->swq[peek_sq_cons_idx].wr_id ==
2552 BNXT_QPLIB_FENCE_WRID) {
2554 * Unbreak only if the phantom
2557 dev_dbg(&cq->hwq.pdev->dev,
2558 "FP: Got Phantom CQE\n");
2559 sq->condition = false;
2565 /* Valid but not the phantom, so keep looping */
2567 /* Not valid yet, just exit and wait */
2571 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2575 dev_err(&cq->hwq.pdev->dev,
2576 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2577 cq_cons, qp->id, swq_last, cqe_sq_cons);
2584 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2586 struct bnxt_qplib_hwq *sq_hwq;
2587 struct bnxt_qplib_swq *swq;
2588 int cqe_sq_cons = -1;
2593 start = sq->swq_start;
2594 last = sq->swq_last;
2596 while (last != start) {
2597 swq = &sq->swq[last];
2598 if (swq->slot_idx == cqe_slot) {
2599 cqe_sq_cons = swq->next_idx;
2600 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2601 __func__, cqe_sq_cons, cqe_slot);
2605 last = swq->next_idx;
2610 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2611 struct cq_req *hwcqe,
2612 struct bnxt_qplib_cqe **pcqe, int *budget,
2613 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2615 struct bnxt_qplib_swq *swq;
2616 struct bnxt_qplib_cqe *cqe;
2617 u32 cqe_sq_cons, slot_num;
2618 struct bnxt_qplib_qp *qp;
2619 struct bnxt_qplib_q *sq;
2623 qp = (struct bnxt_qplib_qp *)((unsigned long)
2624 le64_to_cpu(hwcqe->qp_handle));
2626 dev_err(&cq->hwq.pdev->dev,
2627 "FP: Process Req qp is NULL\n");
2632 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2633 if (qp->sq.flushed) {
2634 dev_dbg(&cq->hwq.pdev->dev,
2635 "%s: QP in Flush QP = %p\n", __func__, qp);
2639 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2640 slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2641 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2643 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2644 __func__, slot_num);
2647 cqe_sq_cons = cqe_cons;
2648 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2649 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2652 /* Require to walk the sq's swq to fabricate CQEs for all previously
2653 * signaled SWQEs due to CQE aggregation from the current sq cons
2654 * to the cqe_sq_cons
2658 if (sq->swq_last == cqe_sq_cons)
2662 swq = &sq->swq[sq->swq_last];
2663 memset(cqe, 0, sizeof(*cqe));
2664 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2665 cqe->qp_handle = (u64)(unsigned long)qp;
2666 cqe->src_qp = qp->id;
2667 cqe->wr_id = swq->wr_id;
2668 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2670 cqe->type = swq->type;
2672 /* For the last CQE, check for status. For errors, regardless
2673 * of the request being signaled or not, it must complete with
2674 * the hwcqe error status
2676 if (swq->next_idx == cqe_sq_cons &&
2677 hwcqe->status != CQ_REQ_STATUS_OK) {
2678 cqe->status = hwcqe->status;
2679 dev_err(&cq->hwq.pdev->dev,
2680 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2681 sq->swq_last, cqe->wr_id, cqe->status);
2684 bnxt_qplib_mark_qp_error(qp);
2685 /* Add qp to flush list of the CQ */
2686 bnxt_qplib_add_flush_qp(qp);
2688 /* Before we complete, do WA 9060 */
2689 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2690 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2696 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2697 cqe->status = CQ_REQ_STATUS_OK;
2703 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2704 swq->slots, &sq->dbinfo.flags);
2705 sq->swq_last = swq->next_idx;
2711 if (sq->swq_last != cqe_sq_cons) {
2717 * Back to normal completion mode only after it has completed all of
2718 * the WC for this CQE
2725 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2727 spin_lock(&srq->hwq.lock);
2728 srq->swq[srq->last_idx].next_idx = (int)tag;
2729 srq->last_idx = (int)tag;
2730 srq->swq[srq->last_idx].next_idx = -1;
2731 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2732 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2733 spin_unlock(&srq->hwq.lock);
2736 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2737 struct cq_res_rc *hwcqe,
2738 struct bnxt_qplib_cqe **pcqe,
2741 struct bnxt_qplib_srq *srq;
2742 struct bnxt_qplib_cqe *cqe;
2743 struct bnxt_qplib_qp *qp;
2744 struct bnxt_qplib_q *rq;
2747 qp = (struct bnxt_qplib_qp *)((unsigned long)
2748 le64_to_cpu(hwcqe->qp_handle));
2750 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2753 if (qp->rq.flushed) {
2754 dev_dbg(&cq->hwq.pdev->dev,
2755 "%s: QP in Flush QP = %p\n", __func__, qp);
2760 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2761 cqe->length = le32_to_cpu(hwcqe->length);
2762 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2763 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2764 cqe->flags = le16_to_cpu(hwcqe->flags);
2765 cqe->status = hwcqe->status;
2766 cqe->qp_handle = (u64)(unsigned long)qp;
2768 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2769 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2770 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2774 if (wr_id_idx >= srq->hwq.max_elements) {
2775 dev_err(&cq->hwq.pdev->dev,
2776 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2777 wr_id_idx, srq->hwq.max_elements);
2780 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2781 bnxt_qplib_release_srqe(srq, wr_id_idx);
2786 struct bnxt_qplib_swq *swq;
2789 if (wr_id_idx > (rq->max_wqe - 1)) {
2790 dev_err(&cq->hwq.pdev->dev,
2791 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2792 wr_id_idx, rq->max_wqe);
2795 if (wr_id_idx != rq->swq_last)
2797 swq = &rq->swq[rq->swq_last];
2798 cqe->wr_id = swq->wr_id;
2801 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2802 swq->slots, &rq->dbinfo.flags);
2803 rq->swq_last = swq->next_idx;
2806 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2807 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2808 /* Add qp to flush list of the CQ */
2809 bnxt_qplib_add_flush_qp(qp);
2816 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2817 struct cq_res_ud *hwcqe,
2818 struct bnxt_qplib_cqe **pcqe,
2821 struct bnxt_qplib_srq *srq;
2822 struct bnxt_qplib_cqe *cqe;
2823 struct bnxt_qplib_qp *qp;
2824 struct bnxt_qplib_q *rq;
2827 qp = (struct bnxt_qplib_qp *)((unsigned long)
2828 le64_to_cpu(hwcqe->qp_handle));
2830 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2833 if (qp->rq.flushed) {
2834 dev_dbg(&cq->hwq.pdev->dev,
2835 "%s: QP in Flush QP = %p\n", __func__, qp);
2839 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2840 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2841 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2842 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2843 cqe->flags = le16_to_cpu(hwcqe->flags);
2844 cqe->status = hwcqe->status;
2845 cqe->qp_handle = (u64)(unsigned long)qp;
2846 /*FIXME: Endianness fix needed for smace */
2847 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2848 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2849 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2850 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2852 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2853 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2855 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2860 if (wr_id_idx >= srq->hwq.max_elements) {
2861 dev_err(&cq->hwq.pdev->dev,
2862 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2863 wr_id_idx, srq->hwq.max_elements);
2866 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2867 bnxt_qplib_release_srqe(srq, wr_id_idx);
2872 struct bnxt_qplib_swq *swq;
2875 if (wr_id_idx > (rq->max_wqe - 1)) {
2876 dev_err(&cq->hwq.pdev->dev,
2877 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2878 wr_id_idx, rq->max_wqe);
2882 if (rq->swq_last != wr_id_idx)
2884 swq = &rq->swq[rq->swq_last];
2885 cqe->wr_id = swq->wr_id;
2888 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2889 swq->slots, &rq->dbinfo.flags);
2890 rq->swq_last = swq->next_idx;
2893 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2894 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2895 /* Add qp to flush list of the CQ */
2896 bnxt_qplib_add_flush_qp(qp);
2903 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2905 struct cq_base *hw_cqe;
2908 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2909 /* Check for Valid bit. If the CQE is valid, return false */
2910 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2914 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2915 struct cq_res_raweth_qp1 *hwcqe,
2916 struct bnxt_qplib_cqe **pcqe,
2919 struct bnxt_qplib_qp *qp;
2920 struct bnxt_qplib_q *rq;
2921 struct bnxt_qplib_srq *srq;
2922 struct bnxt_qplib_cqe *cqe;
2925 qp = (struct bnxt_qplib_qp *)((unsigned long)
2926 le64_to_cpu(hwcqe->qp_handle));
2928 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2931 if (qp->rq.flushed) {
2932 dev_dbg(&cq->hwq.pdev->dev,
2933 "%s: QP in Flush QP = %p\n", __func__, qp);
2937 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2938 cqe->flags = le16_to_cpu(hwcqe->flags);
2939 cqe->qp_handle = (u64)(unsigned long)qp;
2942 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2943 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2944 cqe->src_qp = qp->id;
2945 if (qp->id == 1 && !cqe->length) {
2946 /* Add workaround for the length misdetection */
2949 cqe->length = le16_to_cpu(hwcqe->length);
2951 cqe->pkey_index = qp->pkey_index;
2952 memcpy(cqe->smac, qp->smac, 6);
2954 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2955 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2956 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2958 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2961 dev_err(&cq->hwq.pdev->dev,
2962 "FP: SRQ used but not defined??\n");
2965 if (wr_id_idx >= srq->hwq.max_elements) {
2966 dev_err(&cq->hwq.pdev->dev,
2967 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2968 wr_id_idx, srq->hwq.max_elements);
2971 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2972 bnxt_qplib_release_srqe(srq, wr_id_idx);
2977 struct bnxt_qplib_swq *swq;
2980 if (wr_id_idx > (rq->max_wqe - 1)) {
2981 dev_err(&cq->hwq.pdev->dev,
2982 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2983 wr_id_idx, rq->max_wqe);
2986 if (rq->swq_last != wr_id_idx)
2988 swq = &rq->swq[rq->swq_last];
2989 cqe->wr_id = swq->wr_id;
2992 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2993 swq->slots, &rq->dbinfo.flags);
2994 rq->swq_last = swq->next_idx;
2997 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2998 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2999 /* Add qp to flush list of the CQ */
3000 bnxt_qplib_add_flush_qp(qp);
3007 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
3008 struct cq_terminal *hwcqe,
3009 struct bnxt_qplib_cqe **pcqe,
3012 struct bnxt_qplib_qp *qp;
3013 struct bnxt_qplib_q *sq, *rq;
3014 struct bnxt_qplib_cqe *cqe;
3015 u32 swq_last = 0, cqe_cons;
3018 /* Check the Status */
3019 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
3020 dev_warn(&cq->hwq.pdev->dev,
3021 "FP: CQ Process Terminal Error status = 0x%x\n",
3024 qp = (struct bnxt_qplib_qp *)((unsigned long)
3025 le64_to_cpu(hwcqe->qp_handle));
3029 /* Must block new posting of SQ and RQ */
3030 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
3035 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
3036 if (cqe_cons == 0xFFFF)
3038 cqe_cons %= sq->max_sw_wqe;
3040 if (qp->sq.flushed) {
3041 dev_dbg(&cq->hwq.pdev->dev,
3042 "%s: QP in Flush QP = %p\n", __func__, qp);
3046 /* Terminal CQE can also include aggregated successful CQEs prior.
3047 * So we must complete all CQEs from the current sq's cons to the
3048 * cq_cons with status OK
3052 swq_last = sq->swq_last;
3053 if (swq_last == cqe_cons)
3055 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3056 memset(cqe, 0, sizeof(*cqe));
3057 cqe->status = CQ_REQ_STATUS_OK;
3058 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
3059 cqe->qp_handle = (u64)(unsigned long)qp;
3060 cqe->src_qp = qp->id;
3061 cqe->wr_id = sq->swq[swq_last].wr_id;
3062 cqe->type = sq->swq[swq_last].type;
3066 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
3067 sq->swq[swq_last].slots, &sq->dbinfo.flags);
3068 sq->swq_last = sq->swq[swq_last].next_idx;
3071 if (!(*budget) && swq_last != cqe_cons) {
3080 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
3081 if (cqe_cons == 0xFFFF) {
3083 } else if (cqe_cons > rq->max_wqe - 1) {
3084 dev_err(&cq->hwq.pdev->dev,
3085 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3086 cqe_cons, rq->max_wqe);
3091 if (qp->rq.flushed) {
3092 dev_dbg(&cq->hwq.pdev->dev,
3093 "%s: QP in Flush QP = %p\n", __func__, qp);
3098 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3099 * from the current rq->cons to the rq->prod regardless what the
3100 * rq->cons the terminal CQE indicates
3103 /* Add qp to flush list of the CQ */
3104 bnxt_qplib_add_flush_qp(qp);
3109 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3110 struct cq_cutoff *hwcqe)
3112 /* Check the Status */
3113 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3114 dev_err(&cq->hwq.pdev->dev,
3115 "FP: CQ Process Cutoff Error status = 0x%x\n",
3119 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3120 wake_up_interruptible(&cq->waitq);
3125 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3126 struct bnxt_qplib_cqe *cqe,
3129 struct bnxt_qplib_qp *qp = NULL;
3130 u32 budget = num_cqes;
3131 unsigned long flags;
3133 spin_lock_irqsave(&cq->flush_lock, flags);
3134 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3135 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3136 __flush_sq(&qp->sq, qp, &cqe, &budget);
3139 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3140 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3141 __flush_rq(&qp->rq, qp, &cqe, &budget);
3143 spin_unlock_irqrestore(&cq->flush_lock, flags);
3145 return num_cqes - budget;
3148 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3149 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3151 struct cq_base *hw_cqe;
3159 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3161 /* Check for Valid bit */
3162 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3166 * The valid test of the entry must be done first before
3167 * reading any further.
3170 /* From the device's respective CQE format to qplib_wc*/
3171 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3173 case CQ_BASE_CQE_TYPE_REQ:
3174 rc = bnxt_qplib_cq_process_req(cq,
3175 (struct cq_req *)hw_cqe,
3177 cq->hwq.cons, lib_qp);
3179 case CQ_BASE_CQE_TYPE_RES_RC:
3180 rc = bnxt_qplib_cq_process_res_rc(cq,
3181 (struct cq_res_rc *)
3185 case CQ_BASE_CQE_TYPE_RES_UD:
3186 rc = bnxt_qplib_cq_process_res_ud
3187 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3190 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3191 rc = bnxt_qplib_cq_process_res_raweth_qp1
3192 (cq, (struct cq_res_raweth_qp1 *)
3193 hw_cqe, &cqe, &budget);
3195 case CQ_BASE_CQE_TYPE_TERMINAL:
3196 rc = bnxt_qplib_cq_process_terminal
3197 (cq, (struct cq_terminal *)hw_cqe,
3200 case CQ_BASE_CQE_TYPE_CUT_OFF:
3201 bnxt_qplib_cq_process_cutoff
3202 (cq, (struct cq_cutoff *)hw_cqe);
3203 /* Done processing this CQ */
3206 dev_err(&cq->hwq.pdev->dev,
3207 "process_cq unknown type 0x%lx\n",
3208 hw_cqe->cqe_type_toggle &
3209 CQ_BASE_CQE_TYPE_MASK);
3216 /* Error while processing the CQE, just skip to the
3219 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3220 dev_err(&cq->hwq.pdev->dev,
3221 "process_cqe error rc = 0x%x\n", rc);
3224 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3225 1, &cq->dbinfo.flags);
3229 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3231 return num_cqes - budget;
3234 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3236 cq->dbinfo.toggle = cq->toggle;
3238 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3239 /* Using cq->arm_state variable to track whether to issue cq handler */
3240 atomic_set(&cq->arm_state, 1);
3243 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3245 flush_workqueue(qp->scq->nq->cqn_wq);
3246 if (qp->scq != qp->rcq)
3247 flush_workqueue(qp->rcq->nq->cqn_wq);