1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
49 enum nix_makr_fmt_indexes {
50 NIX_MARK_CFG_IP_DSCP_RED,
51 NIX_MARK_CFG_IP_DSCP_YELLOW,
52 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
53 NIX_MARK_CFG_IP_ECN_RED,
54 NIX_MARK_CFG_IP_ECN_YELLOW,
55 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
56 NIX_MARK_CFG_VLAN_DEI_RED,
57 NIX_MARK_CFG_VLAN_DEI_YELLOW,
58 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
62 /* For now considering MC resources needed for broadcast
63 * pkt replication only. i.e 256 HWVFs + 12 PFs.
65 #define MC_TBL_SIZE MC_TBL_SZ_512
66 #define MC_BUF_CNT MC_BUF_CNT_128
69 struct hlist_node node;
73 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
77 /*If blkaddr is 0, return the first nix block address*/
79 return rvu->nix_blkaddr[blkaddr];
81 while (i + 1 < MAX_NIX_BLKS) {
82 if (rvu->nix_blkaddr[i] == blkaddr)
83 return rvu->nix_blkaddr[i + 1];
90 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
92 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
95 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
96 if (!pfvf->nixlf || blkaddr < 0)
101 int rvu_get_nixlf_count(struct rvu *rvu)
103 int blkaddr = 0, max = 0;
104 struct rvu_block *block;
106 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
108 block = &rvu->hw->block[blkaddr];
109 max += block->lf.max;
110 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
115 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
117 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
118 struct rvu_hwinfo *hw = rvu->hw;
121 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
122 if (!pfvf->nixlf || blkaddr < 0)
123 return NIX_AF_ERR_AF_LF_INVALID;
125 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
127 return NIX_AF_ERR_AF_LF_INVALID;
130 *nix_blkaddr = blkaddr;
135 static void nix_mce_list_init(struct nix_mce_list *list, int max)
137 INIT_HLIST_HEAD(&list->head);
142 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
149 idx = mcast->next_free_mce;
150 mcast->next_free_mce += count;
154 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
156 int nix_blkaddr = 0, i = 0;
157 struct rvu *rvu = hw->rvu;
159 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
160 while (nix_blkaddr) {
161 if (blkaddr == nix_blkaddr && hw->nix)
163 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
169 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
173 /*Sync all in flight RX packets to LLC/DRAM */
174 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
175 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
177 dev_err(rvu->dev, "NIX RX software sync failed\n");
180 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
181 int lvl, u16 pcifunc, u16 schq)
183 struct rvu_hwinfo *hw = rvu->hw;
184 struct nix_txsch *txsch;
185 struct nix_hw *nix_hw;
188 nix_hw = get_nix_hw(rvu->hw, blkaddr);
192 txsch = &nix_hw->txsch[lvl];
193 /* Check out of bounds */
194 if (schq >= txsch->schq.max)
197 mutex_lock(&rvu->rsrc_lock);
198 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
199 mutex_unlock(&rvu->rsrc_lock);
201 /* TLs aggegating traffic are shared across PF and VFs */
202 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
203 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
209 if (map_func != pcifunc)
215 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
217 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
218 struct mac_ops *mac_ops;
219 int pkind, pf, vf, lbkid;
223 pf = rvu_get_pf(pcifunc);
224 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
228 case NIX_INTF_TYPE_CGX:
229 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
230 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
232 pkind = rvu_npc_get_pkind(rvu, pf);
235 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
238 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
239 pfvf->tx_chan_base = pfvf->rx_chan_base;
240 pfvf->rx_chan_cnt = 1;
241 pfvf->tx_chan_cnt = 1;
242 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
243 rvu_npc_set_pkind(rvu, pkind, pfvf);
245 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
246 /* By default we enable pause frames */
247 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
248 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
250 lmac_id, true, true);
252 case NIX_INTF_TYPE_LBK:
253 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
255 /* If NIX1 block is present on the silicon then NIXes are
256 * assigned alternatively for lbk interfaces. NIX0 should
257 * send packets on lbk link 1 channels and NIX1 should send
258 * on lbk link 0 channels for the communication between
262 if (rvu->hw->lbk_links > 1)
263 lbkid = vf & 0x1 ? 0 : 1;
265 /* Note that AF's VFs work in pairs and talk over consecutive
266 * loopback channels.Therefore if odd number of AF VFs are
267 * enabled then the last VF remains with no pair.
269 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
270 pfvf->tx_chan_base = vf & 0x1 ?
271 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
272 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
273 pfvf->rx_chan_cnt = 1;
274 pfvf->tx_chan_cnt = 1;
275 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
276 pfvf->rx_chan_base, false);
280 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
281 * RVU PF/VF's MAC address.
283 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
284 pfvf->rx_chan_base, pfvf->mac_addr);
286 /* Add this PF_FUNC to bcast pkt replication list */
287 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
290 "Bcast list, failed to enable PF_FUNC 0x%x\n",
295 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
296 nixlf, pfvf->rx_chan_base);
297 pfvf->maxlen = NIC_HW_MIN_FRS;
298 pfvf->minlen = NIC_HW_MIN_FRS;
303 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
305 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
311 /* Remove this PF_FUNC from bcast pkt replication list */
312 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
315 "Bcast list, failed to disable PF_FUNC 0x%x\n",
319 /* Free and disable any MCAM entries used by this NIX LF */
320 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
323 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
324 struct nix_bp_cfg_req *req,
327 u16 pcifunc = req->hdr.pcifunc;
328 struct rvu_pfvf *pfvf;
329 int blkaddr, pf, type;
333 pf = rvu_get_pf(pcifunc);
334 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
335 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
338 pfvf = rvu_get_pfvf(rvu, pcifunc);
339 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
341 chan_base = pfvf->rx_chan_base + req->chan_base;
342 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
343 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
344 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
350 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
351 int type, int chan_id)
353 int bpid, blkaddr, lmac_chan_cnt;
354 struct rvu_hwinfo *hw = rvu->hw;
355 u16 cgx_bpid_cnt, lbk_bpid_cnt;
356 struct rvu_pfvf *pfvf;
360 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
361 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
362 lmac_chan_cnt = cfg & 0xFF;
364 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
365 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
367 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
369 /* Backpressure IDs range division
370 * CGX channles are mapped to (0 - 191) BPIDs
371 * LBK channles are mapped to (192 - 255) BPIDs
372 * SDP channles are mapped to (256 - 511) BPIDs
374 * Lmac channles and bpids mapped as follows
375 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
376 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
377 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
380 case NIX_INTF_TYPE_CGX:
381 if ((req->chan_base + req->chan_cnt) > 15)
383 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
384 /* Assign bpid based on cgx, lmac and chan id */
385 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
386 (lmac_id * lmac_chan_cnt) + req->chan_base;
388 if (req->bpid_per_chan)
390 if (bpid > cgx_bpid_cnt)
394 case NIX_INTF_TYPE_LBK:
395 if ((req->chan_base + req->chan_cnt) > 63)
397 bpid = cgx_bpid_cnt + req->chan_base;
398 if (req->bpid_per_chan)
400 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
409 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
410 struct nix_bp_cfg_req *req,
411 struct nix_bp_cfg_rsp *rsp)
413 int blkaddr, pf, type, chan_id = 0;
414 u16 pcifunc = req->hdr.pcifunc;
415 struct rvu_pfvf *pfvf;
420 pf = rvu_get_pf(pcifunc);
421 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
423 /* Enable backpressure only for CGX mapped PFs and LBK interface */
424 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
427 pfvf = rvu_get_pfvf(rvu, pcifunc);
428 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
430 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
431 chan_base = pfvf->rx_chan_base + req->chan_base;
434 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
436 dev_warn(rvu->dev, "Fail to enable backpressure\n");
440 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
441 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
442 cfg | (bpid & 0xFF) | BIT_ULL(16));
444 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
447 for (chan = 0; chan < req->chan_cnt; chan++) {
448 /* Map channel and bpid assign to it */
449 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
451 if (req->bpid_per_chan)
454 rsp->chan_cnt = req->chan_cnt;
459 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
460 u64 format, bool v4, u64 *fidx)
462 struct nix_lso_format field = {0};
464 /* IP's Length field */
465 field.layer = NIX_TXLAYER_OL3;
466 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
467 field.offset = v4 ? 2 : 4;
468 field.sizem1 = 1; /* i.e 2 bytes */
469 field.alg = NIX_LSOALG_ADD_PAYLEN;
470 rvu_write64(rvu, blkaddr,
471 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
474 /* No ID field in IPv6 header */
479 field.layer = NIX_TXLAYER_OL3;
481 field.sizem1 = 1; /* i.e 2 bytes */
482 field.alg = NIX_LSOALG_ADD_SEGNUM;
483 rvu_write64(rvu, blkaddr,
484 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
488 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
489 u64 format, u64 *fidx)
491 struct nix_lso_format field = {0};
493 /* TCP's sequence number field */
494 field.layer = NIX_TXLAYER_OL4;
496 field.sizem1 = 3; /* i.e 4 bytes */
497 field.alg = NIX_LSOALG_ADD_OFFSET;
498 rvu_write64(rvu, blkaddr,
499 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
502 /* TCP's flags field */
503 field.layer = NIX_TXLAYER_OL4;
505 field.sizem1 = 1; /* 2 bytes */
506 field.alg = NIX_LSOALG_TCP_FLAGS;
507 rvu_write64(rvu, blkaddr,
508 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
512 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
514 u64 cfg, idx, fidx = 0;
516 /* Get max HW supported format indices */
517 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
518 nix_hw->lso.total = cfg;
521 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
522 /* For TSO, set first and middle segment flags to
523 * mask out PSH, RST & FIN flags in TCP packet
525 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
526 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
527 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
529 /* Setup default static LSO formats
531 * Configure format fields for TCPv4 segmentation offload
533 idx = NIX_LSO_FORMAT_IDX_TSOV4;
534 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
535 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
537 /* Set rest of the fields to NOP */
538 for (; fidx < 8; fidx++) {
539 rvu_write64(rvu, blkaddr,
540 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
542 nix_hw->lso.in_use++;
544 /* Configure format fields for TCPv6 segmentation offload */
545 idx = NIX_LSO_FORMAT_IDX_TSOV6;
547 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
548 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
550 /* Set rest of the fields to NOP */
551 for (; fidx < 8; fidx++) {
552 rvu_write64(rvu, blkaddr,
553 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
555 nix_hw->lso.in_use++;
558 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
560 kfree(pfvf->rq_bmap);
561 kfree(pfvf->sq_bmap);
562 kfree(pfvf->cq_bmap);
564 qmem_free(rvu->dev, pfvf->rq_ctx);
566 qmem_free(rvu->dev, pfvf->sq_ctx);
568 qmem_free(rvu->dev, pfvf->cq_ctx);
570 qmem_free(rvu->dev, pfvf->rss_ctx);
571 if (pfvf->nix_qints_ctx)
572 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
573 if (pfvf->cq_ints_ctx)
574 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
576 pfvf->rq_bmap = NULL;
577 pfvf->cq_bmap = NULL;
578 pfvf->sq_bmap = NULL;
582 pfvf->rss_ctx = NULL;
583 pfvf->nix_qints_ctx = NULL;
584 pfvf->cq_ints_ctx = NULL;
587 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
588 struct rvu_pfvf *pfvf, int nixlf,
589 int rss_sz, int rss_grps, int hwctx_size,
592 int err, grp, num_indices;
594 /* RSS is not requested for this NIXLF */
597 num_indices = rss_sz * rss_grps;
599 /* Alloc NIX RSS HW context memory and config the base */
600 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
605 (u64)pfvf->rss_ctx->iova);
607 /* Config full RSS table size, enable RSS and caching */
608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
609 BIT_ULL(36) | BIT_ULL(4) |
610 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
612 /* Config RSS group offset and sizes */
613 for (grp = 0; grp < rss_grps; grp++)
614 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
615 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
619 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
620 struct nix_aq_inst_s *inst)
622 struct admin_queue *aq = block->aq;
623 struct nix_aq_res_s *result;
627 result = (struct nix_aq_res_s *)aq->res->base;
629 /* Get current head pointer where to append this instruction */
630 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
631 head = (reg >> 4) & AQ_PTR_MASK;
633 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
634 (void *)inst, aq->inst->entry_sz);
635 memset(result, 0, sizeof(*result));
636 /* sync into memory */
639 /* Ring the doorbell and wait for result */
640 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
641 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
649 if (result->compcode != NIX_AQ_COMP_GOOD)
650 /* TODO: Replace this with some error code */
656 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
657 struct nix_aq_enq_req *req,
658 struct nix_aq_enq_rsp *rsp)
660 struct rvu_hwinfo *hw = rvu->hw;
661 u16 pcifunc = req->hdr.pcifunc;
662 int nixlf, blkaddr, rc = 0;
663 struct nix_aq_inst_s inst;
664 struct rvu_block *block;
665 struct admin_queue *aq;
666 struct rvu_pfvf *pfvf;
671 blkaddr = nix_hw->blkaddr;
672 block = &hw->block[blkaddr];
675 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
676 return NIX_AF_ERR_AQ_ENQUEUE;
679 pfvf = rvu_get_pfvf(rvu, pcifunc);
680 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
682 /* Skip NIXLF check for broadcast MCE entry init */
683 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
684 if (!pfvf->nixlf || nixlf < 0)
685 return NIX_AF_ERR_AF_LF_INVALID;
688 switch (req->ctype) {
689 case NIX_AQ_CTYPE_RQ:
690 /* Check if index exceeds max no of queues */
691 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
692 rc = NIX_AF_ERR_AQ_ENQUEUE;
694 case NIX_AQ_CTYPE_SQ:
695 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
696 rc = NIX_AF_ERR_AQ_ENQUEUE;
698 case NIX_AQ_CTYPE_CQ:
699 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
700 rc = NIX_AF_ERR_AQ_ENQUEUE;
702 case NIX_AQ_CTYPE_RSS:
703 /* Check if RSS is enabled and qidx is within range */
704 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
705 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
706 (req->qidx >= (256UL << (cfg & 0xF))))
707 rc = NIX_AF_ERR_AQ_ENQUEUE;
709 case NIX_AQ_CTYPE_MCE:
710 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
712 /* Check if index exceeds MCE list length */
713 if (!nix_hw->mcast.mce_ctx ||
714 (req->qidx >= (256UL << (cfg & 0xF))))
715 rc = NIX_AF_ERR_AQ_ENQUEUE;
717 /* Adding multicast lists for requests from PF/VFs is not
718 * yet supported, so ignore this.
721 rc = NIX_AF_ERR_AQ_ENQUEUE;
724 rc = NIX_AF_ERR_AQ_ENQUEUE;
730 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
731 if (req->ctype == NIX_AQ_CTYPE_SQ &&
732 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
733 (req->op == NIX_AQ_INSTOP_WRITE &&
734 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
735 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
736 pcifunc, req->sq.smq))
737 return NIX_AF_ERR_AQ_ENQUEUE;
740 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
742 inst.cindex = req->qidx;
743 inst.ctype = req->ctype;
745 /* Currently we are not supporting enqueuing multiple instructions,
746 * so always choose first entry in result memory.
748 inst.res_addr = (u64)aq->res->iova;
750 /* Hardware uses same aq->res->base for updating result of
751 * previous instruction hence wait here till it is done.
753 spin_lock(&aq->lock);
755 /* Clean result + context memory */
756 memset(aq->res->base, 0, aq->res->entry_sz);
757 /* Context needs to be written at RES_ADDR + 128 */
758 ctx = aq->res->base + 128;
759 /* Mask needs to be written at RES_ADDR + 256 */
760 mask = aq->res->base + 256;
763 case NIX_AQ_INSTOP_WRITE:
764 if (req->ctype == NIX_AQ_CTYPE_RQ)
765 memcpy(mask, &req->rq_mask,
766 sizeof(struct nix_rq_ctx_s));
767 else if (req->ctype == NIX_AQ_CTYPE_SQ)
768 memcpy(mask, &req->sq_mask,
769 sizeof(struct nix_sq_ctx_s));
770 else if (req->ctype == NIX_AQ_CTYPE_CQ)
771 memcpy(mask, &req->cq_mask,
772 sizeof(struct nix_cq_ctx_s));
773 else if (req->ctype == NIX_AQ_CTYPE_RSS)
774 memcpy(mask, &req->rss_mask,
775 sizeof(struct nix_rsse_s));
776 else if (req->ctype == NIX_AQ_CTYPE_MCE)
777 memcpy(mask, &req->mce_mask,
778 sizeof(struct nix_rx_mce_s));
780 case NIX_AQ_INSTOP_INIT:
781 if (req->ctype == NIX_AQ_CTYPE_RQ)
782 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
783 else if (req->ctype == NIX_AQ_CTYPE_SQ)
784 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
785 else if (req->ctype == NIX_AQ_CTYPE_CQ)
786 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
787 else if (req->ctype == NIX_AQ_CTYPE_RSS)
788 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
789 else if (req->ctype == NIX_AQ_CTYPE_MCE)
790 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
792 case NIX_AQ_INSTOP_NOP:
793 case NIX_AQ_INSTOP_READ:
794 case NIX_AQ_INSTOP_LOCK:
795 case NIX_AQ_INSTOP_UNLOCK:
798 rc = NIX_AF_ERR_AQ_ENQUEUE;
799 spin_unlock(&aq->lock);
803 /* Submit the instruction to AQ */
804 rc = nix_aq_enqueue_wait(rvu, block, &inst);
806 spin_unlock(&aq->lock);
810 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
811 if (req->op == NIX_AQ_INSTOP_INIT) {
812 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
813 __set_bit(req->qidx, pfvf->rq_bmap);
814 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
815 __set_bit(req->qidx, pfvf->sq_bmap);
816 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
817 __set_bit(req->qidx, pfvf->cq_bmap);
820 if (req->op == NIX_AQ_INSTOP_WRITE) {
821 if (req->ctype == NIX_AQ_CTYPE_RQ) {
822 ena = (req->rq.ena & req->rq_mask.ena) |
823 (test_bit(req->qidx, pfvf->rq_bmap) &
826 __set_bit(req->qidx, pfvf->rq_bmap);
828 __clear_bit(req->qidx, pfvf->rq_bmap);
830 if (req->ctype == NIX_AQ_CTYPE_SQ) {
831 ena = (req->rq.ena & req->sq_mask.ena) |
832 (test_bit(req->qidx, pfvf->sq_bmap) &
835 __set_bit(req->qidx, pfvf->sq_bmap);
837 __clear_bit(req->qidx, pfvf->sq_bmap);
839 if (req->ctype == NIX_AQ_CTYPE_CQ) {
840 ena = (req->rq.ena & req->cq_mask.ena) |
841 (test_bit(req->qidx, pfvf->cq_bmap) &
844 __set_bit(req->qidx, pfvf->cq_bmap);
846 __clear_bit(req->qidx, pfvf->cq_bmap);
851 /* Copy read context into mailbox */
852 if (req->op == NIX_AQ_INSTOP_READ) {
853 if (req->ctype == NIX_AQ_CTYPE_RQ)
854 memcpy(&rsp->rq, ctx,
855 sizeof(struct nix_rq_ctx_s));
856 else if (req->ctype == NIX_AQ_CTYPE_SQ)
857 memcpy(&rsp->sq, ctx,
858 sizeof(struct nix_sq_ctx_s));
859 else if (req->ctype == NIX_AQ_CTYPE_CQ)
860 memcpy(&rsp->cq, ctx,
861 sizeof(struct nix_cq_ctx_s));
862 else if (req->ctype == NIX_AQ_CTYPE_RSS)
863 memcpy(&rsp->rss, ctx,
864 sizeof(struct nix_rsse_s));
865 else if (req->ctype == NIX_AQ_CTYPE_MCE)
866 memcpy(&rsp->mce, ctx,
867 sizeof(struct nix_rx_mce_s));
871 spin_unlock(&aq->lock);
875 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
876 struct nix_aq_enq_rsp *rsp)
878 struct nix_hw *nix_hw;
881 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
883 return NIX_AF_ERR_AF_LF_INVALID;
885 nix_hw = get_nix_hw(rvu->hw, blkaddr);
889 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
892 static const char *nix_get_ctx_name(int ctype)
895 case NIX_AQ_CTYPE_CQ:
897 case NIX_AQ_CTYPE_SQ:
899 case NIX_AQ_CTYPE_RQ:
901 case NIX_AQ_CTYPE_RSS:
907 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
909 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
910 struct nix_aq_enq_req aq_req;
915 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
916 return NIX_AF_ERR_AQ_ENQUEUE;
918 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
919 aq_req.hdr.pcifunc = req->hdr.pcifunc;
921 if (req->ctype == NIX_AQ_CTYPE_CQ) {
923 aq_req.cq_mask.ena = 1;
924 aq_req.cq.bp_ena = 0;
925 aq_req.cq_mask.bp_ena = 1;
926 q_cnt = pfvf->cq_ctx->qsize;
927 bmap = pfvf->cq_bmap;
929 if (req->ctype == NIX_AQ_CTYPE_SQ) {
931 aq_req.sq_mask.ena = 1;
932 q_cnt = pfvf->sq_ctx->qsize;
933 bmap = pfvf->sq_bmap;
935 if (req->ctype == NIX_AQ_CTYPE_RQ) {
937 aq_req.rq_mask.ena = 1;
938 q_cnt = pfvf->rq_ctx->qsize;
939 bmap = pfvf->rq_bmap;
942 aq_req.ctype = req->ctype;
943 aq_req.op = NIX_AQ_INSTOP_WRITE;
945 for (qidx = 0; qidx < q_cnt; qidx++) {
946 if (!test_bit(qidx, bmap))
949 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
952 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
953 nix_get_ctx_name(req->ctype), qidx);
960 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
961 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
963 struct nix_aq_enq_req lock_ctx_req;
966 if (req->op != NIX_AQ_INSTOP_INIT)
969 if (req->ctype == NIX_AQ_CTYPE_MCE ||
970 req->ctype == NIX_AQ_CTYPE_DYNO)
973 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
974 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
975 lock_ctx_req.ctype = req->ctype;
976 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
977 lock_ctx_req.qidx = req->qidx;
978 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
981 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
983 nix_get_ctx_name(req->ctype), req->qidx);
987 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
988 struct nix_aq_enq_req *req,
989 struct nix_aq_enq_rsp *rsp)
993 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
995 err = nix_lf_hwctx_lockdown(rvu, req);
1000 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1001 struct nix_aq_enq_req *req,
1002 struct nix_aq_enq_rsp *rsp)
1004 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1007 /* CN10K mbox handler */
1008 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1009 struct nix_cn10k_aq_enq_req *req,
1010 struct nix_cn10k_aq_enq_rsp *rsp)
1012 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1013 (struct nix_aq_enq_rsp *)rsp);
1016 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1017 struct hwctx_disable_req *req,
1018 struct msg_rsp *rsp)
1020 return nix_lf_hwctx_disable(rvu, req);
1023 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1024 struct nix_lf_alloc_req *req,
1025 struct nix_lf_alloc_rsp *rsp)
1027 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1028 struct rvu_hwinfo *hw = rvu->hw;
1029 u16 pcifunc = req->hdr.pcifunc;
1030 struct rvu_block *block;
1031 struct rvu_pfvf *pfvf;
1035 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1036 return NIX_AF_ERR_PARAM;
1039 req->way_mask &= 0xFFFF;
1041 pfvf = rvu_get_pfvf(rvu, pcifunc);
1042 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1043 if (!pfvf->nixlf || blkaddr < 0)
1044 return NIX_AF_ERR_AF_LF_INVALID;
1046 block = &hw->block[blkaddr];
1047 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1049 return NIX_AF_ERR_AF_LF_INVALID;
1051 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1052 if (req->npa_func) {
1053 /* If default, use 'this' NIXLF's PFFUNC */
1054 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1055 req->npa_func = pcifunc;
1056 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1057 return NIX_AF_INVAL_NPA_PF_FUNC;
1060 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1061 if (req->sso_func) {
1062 /* If default, use 'this' NIXLF's PFFUNC */
1063 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1064 req->sso_func = pcifunc;
1065 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1066 return NIX_AF_INVAL_SSO_PF_FUNC;
1069 /* If RSS is being enabled, check if requested config is valid.
1070 * RSS table size should be power of two, otherwise
1071 * RSS_GRP::OFFSET + adder might go beyond that group or
1072 * won't be able to use entire table.
1074 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1075 !is_power_of_2(req->rss_sz)))
1076 return NIX_AF_ERR_RSS_SIZE_INVALID;
1079 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1080 return NIX_AF_ERR_RSS_GRPS_INVALID;
1082 /* Reset this NIX LF */
1083 err = rvu_lf_reset(rvu, block, nixlf);
1085 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1086 block->addr - BLKADDR_NIX0, nixlf);
1087 return NIX_AF_ERR_LF_RESET;
1090 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1092 /* Alloc NIX RQ HW context memory and config the base */
1093 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1094 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1098 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1102 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1103 (u64)pfvf->rq_ctx->iova);
1105 /* Set caching and queue count in HW */
1106 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1109 /* Alloc NIX SQ HW context memory and config the base */
1110 hwctx_size = 1UL << (ctx_cfg & 0xF);
1111 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1115 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1119 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1120 (u64)pfvf->sq_ctx->iova);
1122 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1123 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1125 /* Alloc NIX CQ HW context memory and config the base */
1126 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1127 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1131 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1135 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1136 (u64)pfvf->cq_ctx->iova);
1138 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1139 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1141 /* Initialize receive side scaling (RSS) */
1142 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1143 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1144 req->rss_grps, hwctx_size, req->way_mask);
1148 /* Alloc memory for CQINT's HW contexts */
1149 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1150 qints = (cfg >> 24) & 0xFFF;
1151 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1152 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1156 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1157 (u64)pfvf->cq_ints_ctx->iova);
1159 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1160 BIT_ULL(36) | req->way_mask << 20);
1162 /* Alloc memory for QINT's HW contexts */
1163 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1164 qints = (cfg >> 12) & 0xFFF;
1165 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1166 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1171 (u64)pfvf->nix_qints_ctx->iova);
1172 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1173 BIT_ULL(36) | req->way_mask << 20);
1175 /* Setup VLANX TPID's.
1176 * Use VLAN1 for 802.1Q
1177 * and VLAN0 for 802.1AD.
1179 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1180 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1182 /* Enable LMTST for this NIX LF */
1183 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1185 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1187 cfg = req->npa_func;
1189 cfg |= (u64)req->sso_func << 16;
1191 cfg |= (u64)req->xqe_sz << 33;
1192 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1194 /* Config Rx pkt length, csum checks and apad enable / disable */
1195 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1197 /* Configure pkind for TX parse config */
1198 cfg = NPC_TX_DEF_PKIND;
1199 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1201 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1202 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1206 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1207 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1209 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1210 rvu_write64(rvu, blkaddr,
1211 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1212 VTAGSIZE_T4 | VTAG_STRIP);
1217 nix_ctx_free(rvu, pfvf);
1221 /* Set macaddr of this PF/VF */
1222 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1224 /* set SQB size info */
1225 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1226 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1227 rsp->rx_chan_base = pfvf->rx_chan_base;
1228 rsp->tx_chan_base = pfvf->tx_chan_base;
1229 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1230 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1231 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1232 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1233 /* Get HW supported stat count */
1234 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1235 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1236 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1237 /* Get count of CQ IRQs and error IRQs supported per LF */
1238 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1239 rsp->qints = ((cfg >> 12) & 0xFFF);
1240 rsp->cints = ((cfg >> 24) & 0xFFF);
1241 rsp->cgx_links = hw->cgx_links;
1242 rsp->lbk_links = hw->lbk_links;
1243 rsp->sdp_links = hw->sdp_links;
1248 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1249 struct msg_rsp *rsp)
1251 struct rvu_hwinfo *hw = rvu->hw;
1252 u16 pcifunc = req->hdr.pcifunc;
1253 struct rvu_block *block;
1254 int blkaddr, nixlf, err;
1255 struct rvu_pfvf *pfvf;
1257 pfvf = rvu_get_pfvf(rvu, pcifunc);
1258 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1259 if (!pfvf->nixlf || blkaddr < 0)
1260 return NIX_AF_ERR_AF_LF_INVALID;
1262 block = &hw->block[blkaddr];
1263 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1265 return NIX_AF_ERR_AF_LF_INVALID;
1267 if (req->flags & NIX_LF_DISABLE_FLOWS)
1268 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1270 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1272 /* Free any tx vtag def entries used by this NIX LF */
1273 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1274 nix_free_tx_vtag_entries(rvu, pcifunc);
1276 nix_interface_deinit(rvu, pcifunc, nixlf);
1278 /* Reset this NIX LF */
1279 err = rvu_lf_reset(rvu, block, nixlf);
1281 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1282 block->addr - BLKADDR_NIX0, nixlf);
1283 return NIX_AF_ERR_LF_RESET;
1286 nix_ctx_free(rvu, pfvf);
1291 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1292 struct nix_mark_format_cfg *req,
1293 struct nix_mark_format_cfg_rsp *rsp)
1295 u16 pcifunc = req->hdr.pcifunc;
1296 struct nix_hw *nix_hw;
1297 struct rvu_pfvf *pfvf;
1301 pfvf = rvu_get_pfvf(rvu, pcifunc);
1302 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1303 if (!pfvf->nixlf || blkaddr < 0)
1304 return NIX_AF_ERR_AF_LF_INVALID;
1306 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1310 cfg = (((u32)req->offset & 0x7) << 16) |
1311 (((u32)req->y_mask & 0xF) << 12) |
1312 (((u32)req->y_val & 0xF) << 8) |
1313 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1315 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1317 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1318 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1319 return NIX_AF_ERR_MARK_CFG_FAIL;
1322 rsp->mark_format_idx = rc;
1326 /* Disable shaping of pkts by a scheduler queue
1327 * at a given scheduler level.
1329 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1332 u64 cir_reg = 0, pir_reg = 0;
1336 case NIX_TXSCH_LVL_TL1:
1337 cir_reg = NIX_AF_TL1X_CIR(schq);
1338 pir_reg = 0; /* PIR not available at TL1 */
1340 case NIX_TXSCH_LVL_TL2:
1341 cir_reg = NIX_AF_TL2X_CIR(schq);
1342 pir_reg = NIX_AF_TL2X_PIR(schq);
1344 case NIX_TXSCH_LVL_TL3:
1345 cir_reg = NIX_AF_TL3X_CIR(schq);
1346 pir_reg = NIX_AF_TL3X_PIR(schq);
1348 case NIX_TXSCH_LVL_TL4:
1349 cir_reg = NIX_AF_TL4X_CIR(schq);
1350 pir_reg = NIX_AF_TL4X_PIR(schq);
1356 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1357 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1361 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1362 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1365 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1368 struct rvu_hwinfo *hw = rvu->hw;
1371 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1374 /* Reset TL4's SDP link config */
1375 if (lvl == NIX_TXSCH_LVL_TL4)
1376 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1378 if (lvl != NIX_TXSCH_LVL_TL2)
1381 /* Reset TL2's CGX or LBK link config */
1382 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1383 rvu_write64(rvu, blkaddr,
1384 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1387 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1389 struct rvu_hwinfo *hw = rvu->hw;
1390 int pf = rvu_get_pf(pcifunc);
1391 u8 cgx_id = 0, lmac_id = 0;
1393 if (is_afvf(pcifunc)) {/* LBK links */
1394 return hw->cgx_links;
1395 } else if (is_pf_cgxmapped(rvu, pf)) {
1396 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1397 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1401 return hw->cgx_links + hw->lbk_links;
1404 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1405 int link, int *start, int *end)
1407 struct rvu_hwinfo *hw = rvu->hw;
1408 int pf = rvu_get_pf(pcifunc);
1410 if (is_afvf(pcifunc)) { /* LBK links */
1411 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1412 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1413 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1414 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1415 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1416 } else { /* SDP link */
1417 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1418 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1419 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1423 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1424 struct nix_hw *nix_hw,
1425 struct nix_txsch_alloc_req *req)
1427 struct rvu_hwinfo *hw = rvu->hw;
1428 int schq, req_schq, free_cnt;
1429 struct nix_txsch *txsch;
1430 int link, start, end;
1432 txsch = &nix_hw->txsch[lvl];
1433 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1438 link = nix_get_tx_link(rvu, pcifunc);
1440 /* For traffic aggregating scheduler level, one queue is enough */
1441 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1443 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1447 /* Get free SCHQ count and check if request can be accomodated */
1448 if (hw->cap.nix_fixed_txschq_mapping) {
1449 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1450 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1451 if (end <= txsch->schq.max && schq < end &&
1452 !test_bit(schq, txsch->schq.bmap))
1457 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1460 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1461 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1463 /* If contiguous queues are needed, check for availability */
1464 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1465 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1466 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1471 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1472 struct nix_txsch_alloc_rsp *rsp,
1473 int lvl, int start, int end)
1475 struct rvu_hwinfo *hw = rvu->hw;
1476 u16 pcifunc = rsp->hdr.pcifunc;
1479 /* For traffic aggregating levels, queue alloc is based
1480 * on transmit link to which PF_FUNC is mapped to.
1482 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1483 /* A single TL queue is allocated */
1484 if (rsp->schq_contig[lvl]) {
1485 rsp->schq_contig[lvl] = 1;
1486 rsp->schq_contig_list[lvl][0] = start;
1489 /* Both contig and non-contig reqs doesn't make sense here */
1490 if (rsp->schq_contig[lvl])
1493 if (rsp->schq[lvl]) {
1495 rsp->schq_list[lvl][0] = start;
1500 /* Adjust the queue request count if HW supports
1501 * only one queue per level configuration.
1503 if (hw->cap.nix_fixed_txschq_mapping) {
1504 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1506 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1507 rsp->schq_contig[lvl] = 0;
1512 if (rsp->schq_contig[lvl]) {
1513 rsp->schq_contig[lvl] = 1;
1514 set_bit(schq, txsch->schq.bmap);
1515 rsp->schq_contig_list[lvl][0] = schq;
1517 } else if (rsp->schq[lvl]) {
1519 set_bit(schq, txsch->schq.bmap);
1520 rsp->schq_list[lvl][0] = schq;
1525 /* Allocate contiguous queue indices requesty first */
1526 if (rsp->schq_contig[lvl]) {
1527 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1528 txsch->schq.max, start,
1529 rsp->schq_contig[lvl], 0);
1531 rsp->schq_contig[lvl] = 0;
1532 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1533 set_bit(schq, txsch->schq.bmap);
1534 rsp->schq_contig_list[lvl][idx] = schq;
1539 /* Allocate non-contiguous queue indices */
1540 if (rsp->schq[lvl]) {
1542 for (schq = start; schq < end; schq++) {
1543 if (!test_bit(schq, txsch->schq.bmap)) {
1544 set_bit(schq, txsch->schq.bmap);
1545 rsp->schq_list[lvl][idx++] = schq;
1547 if (idx == rsp->schq[lvl])
1550 /* Update how many were allocated */
1551 rsp->schq[lvl] = idx;
1555 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1556 struct nix_txsch_alloc_req *req,
1557 struct nix_txsch_alloc_rsp *rsp)
1559 struct rvu_hwinfo *hw = rvu->hw;
1560 u16 pcifunc = req->hdr.pcifunc;
1561 int link, blkaddr, rc = 0;
1562 int lvl, idx, start, end;
1563 struct nix_txsch *txsch;
1564 struct rvu_pfvf *pfvf;
1565 struct nix_hw *nix_hw;
1569 pfvf = rvu_get_pfvf(rvu, pcifunc);
1570 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1571 if (!pfvf->nixlf || blkaddr < 0)
1572 return NIX_AF_ERR_AF_LF_INVALID;
1574 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1578 mutex_lock(&rvu->rsrc_lock);
1580 /* Check if request is valid as per HW capabilities
1581 * and can be accomodated.
1583 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1584 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1589 /* Allocate requested Tx scheduler queues */
1590 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1591 txsch = &nix_hw->txsch[lvl];
1592 pfvf_map = txsch->pfvf_map;
1594 if (!req->schq[lvl] && !req->schq_contig[lvl])
1597 rsp->schq[lvl] = req->schq[lvl];
1598 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1600 link = nix_get_tx_link(rvu, pcifunc);
1602 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1605 } else if (hw->cap.nix_fixed_txschq_mapping) {
1606 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1609 end = txsch->schq.max;
1612 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1614 /* Reset queue config */
1615 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1616 schq = rsp->schq_contig_list[lvl][idx];
1617 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1618 NIX_TXSCHQ_CFG_DONE))
1619 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1620 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1621 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1624 for (idx = 0; idx < req->schq[lvl]; idx++) {
1625 schq = rsp->schq_list[lvl][idx];
1626 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1627 NIX_TXSCHQ_CFG_DONE))
1628 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1629 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1630 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1634 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1635 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1636 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1637 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1638 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1641 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1643 mutex_unlock(&rvu->rsrc_lock);
1647 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1648 int smq, u16 pcifunc, int nixlf)
1650 int pf = rvu_get_pf(pcifunc);
1651 u8 cgx_id = 0, lmac_id = 0;
1652 int err, restore_tx_en = 0;
1655 /* enable cgx tx if disabled */
1656 if (is_pf_cgxmapped(rvu, pf)) {
1657 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1658 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1662 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1663 /* Do SMQ flush and set enqueue xoff */
1664 cfg |= BIT_ULL(50) | BIT_ULL(49);
1665 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1667 /* Disable backpressure from physical link,
1668 * otherwise SMQ flush may stall.
1670 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1672 /* Wait for flush to complete */
1673 err = rvu_poll_reg(rvu, blkaddr,
1674 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1677 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1679 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1680 /* restore cgx tx state */
1682 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1685 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1687 int blkaddr, nixlf, lvl, schq, err;
1688 struct rvu_hwinfo *hw = rvu->hw;
1689 struct nix_txsch *txsch;
1690 struct nix_hw *nix_hw;
1692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1694 return NIX_AF_ERR_AF_LF_INVALID;
1696 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1700 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1702 return NIX_AF_ERR_AF_LF_INVALID;
1704 /* Disable TL2/3 queue links before SMQ flush*/
1705 mutex_lock(&rvu->rsrc_lock);
1706 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1707 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1710 txsch = &nix_hw->txsch[lvl];
1711 for (schq = 0; schq < txsch->schq.max; schq++) {
1712 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1714 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1719 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1720 for (schq = 0; schq < txsch->schq.max; schq++) {
1721 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1723 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1726 /* Now free scheduler queues to free pool */
1727 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1728 /* TLs above aggregation level are shared across all PF
1729 * and it's VFs, hence skip freeing them.
1731 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1734 txsch = &nix_hw->txsch[lvl];
1735 for (schq = 0; schq < txsch->schq.max; schq++) {
1736 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1738 rvu_free_rsrc(&txsch->schq, schq);
1739 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1742 mutex_unlock(&rvu->rsrc_lock);
1744 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1745 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1746 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1748 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1753 static int nix_txschq_free_one(struct rvu *rvu,
1754 struct nix_txsch_free_req *req)
1756 struct rvu_hwinfo *hw = rvu->hw;
1757 u16 pcifunc = req->hdr.pcifunc;
1758 int lvl, schq, nixlf, blkaddr;
1759 struct nix_txsch *txsch;
1760 struct nix_hw *nix_hw;
1763 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1765 return NIX_AF_ERR_AF_LF_INVALID;
1767 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1771 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1773 return NIX_AF_ERR_AF_LF_INVALID;
1775 lvl = req->schq_lvl;
1777 txsch = &nix_hw->txsch[lvl];
1779 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1782 pfvf_map = txsch->pfvf_map;
1783 mutex_lock(&rvu->rsrc_lock);
1785 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1786 mutex_unlock(&rvu->rsrc_lock);
1790 /* Flush if it is a SMQ. Onus of disabling
1791 * TL2/3 queue links before SMQ flush is on user
1793 if (lvl == NIX_TXSCH_LVL_SMQ)
1794 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1796 /* Free the resource */
1797 rvu_free_rsrc(&txsch->schq, schq);
1798 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1799 mutex_unlock(&rvu->rsrc_lock);
1802 return NIX_AF_ERR_TLX_INVALID;
1805 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1806 struct nix_txsch_free_req *req,
1807 struct msg_rsp *rsp)
1809 if (req->flags & TXSCHQ_FREE_ALL)
1810 return nix_txschq_free(rvu, req->hdr.pcifunc);
1812 return nix_txschq_free_one(rvu, req);
1815 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1816 int lvl, u64 reg, u64 regval)
1818 u64 regbase = reg & 0xFFFF;
1821 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1824 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1825 /* Check if this schq belongs to this PF/VF or not */
1826 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1829 parent = (regval >> 16) & 0x1FF;
1830 /* Validate MDQ's TL4 parent */
1831 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1832 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1835 /* Validate TL4's TL3 parent */
1836 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1837 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1840 /* Validate TL3's TL2 parent */
1841 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1842 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1845 /* Validate TL2's TL1 parent */
1846 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1847 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1853 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1857 if (hw->cap.nix_shaping)
1860 /* If shaping and coloring is not supported, then
1861 * *_CIR and *_PIR registers should not be configured.
1863 regbase = reg & 0xFFFF;
1866 case NIX_TXSCH_LVL_TL1:
1867 if (regbase == NIX_AF_TL1X_CIR(0))
1870 case NIX_TXSCH_LVL_TL2:
1871 if (regbase == NIX_AF_TL2X_CIR(0) ||
1872 regbase == NIX_AF_TL2X_PIR(0))
1875 case NIX_TXSCH_LVL_TL3:
1876 if (regbase == NIX_AF_TL3X_CIR(0) ||
1877 regbase == NIX_AF_TL3X_PIR(0))
1880 case NIX_TXSCH_LVL_TL4:
1881 if (regbase == NIX_AF_TL4X_CIR(0) ||
1882 regbase == NIX_AF_TL4X_PIR(0))
1889 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1890 u16 pcifunc, int blkaddr)
1895 schq = nix_get_tx_link(rvu, pcifunc);
1896 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1897 /* Skip if PF has already done the config */
1898 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1900 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1901 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1902 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1903 TXSCH_TL1_DFLT_RR_QTM);
1904 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1905 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1908 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1909 struct nix_txschq_config *req,
1910 struct msg_rsp *rsp)
1912 struct rvu_hwinfo *hw = rvu->hw;
1913 u16 pcifunc = req->hdr.pcifunc;
1914 u64 reg, regval, schq_regbase;
1915 struct nix_txsch *txsch;
1916 struct nix_hw *nix_hw;
1917 int blkaddr, idx, err;
1921 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1922 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1923 return NIX_AF_INVAL_TXSCHQ_CFG;
1925 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1929 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1933 txsch = &nix_hw->txsch[req->lvl];
1934 pfvf_map = txsch->pfvf_map;
1936 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1937 pcifunc & RVU_PFVF_FUNC_MASK) {
1938 mutex_lock(&rvu->rsrc_lock);
1939 if (req->lvl == NIX_TXSCH_LVL_TL1)
1940 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1941 mutex_unlock(&rvu->rsrc_lock);
1945 for (idx = 0; idx < req->num_regs; idx++) {
1946 reg = req->reg[idx];
1947 regval = req->regval[idx];
1948 schq_regbase = reg & 0xFFFF;
1950 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1951 txsch->lvl, reg, regval))
1952 return NIX_AF_INVAL_TXSCHQ_CFG;
1954 /* Check if shaping and coloring is supported */
1955 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1958 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1959 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1960 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1962 regval &= ~(0x7FULL << 24);
1963 regval |= ((u64)nixlf << 24);
1966 /* Clear 'BP_ENA' config, if it's not allowed */
1967 if (!hw->cap.nix_tx_link_bp) {
1968 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1969 (schq_regbase & 0xFF00) ==
1970 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1971 regval &= ~BIT_ULL(13);
1974 /* Mark config as done for TL1 by PF */
1975 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1976 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1977 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1978 mutex_lock(&rvu->rsrc_lock);
1979 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1980 NIX_TXSCHQ_CFG_DONE);
1981 mutex_unlock(&rvu->rsrc_lock);
1984 /* SMQ flush is special hence split register writes such
1985 * that flush first and write rest of the bits later.
1987 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1988 (regval & BIT_ULL(49))) {
1989 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1990 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1991 regval &= ~BIT_ULL(49);
1993 rvu_write64(rvu, blkaddr, reg, regval);
1999 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2000 struct nix_vtag_config *req)
2002 u64 regval = req->vtag_size;
2004 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2005 req->vtag_size > VTAGSIZE_T8)
2008 /* RX VTAG Type 7 reserved for vf vlan */
2009 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2010 return NIX_AF_ERR_RX_VTAG_INUSE;
2012 if (req->rx.capture_vtag)
2013 regval |= BIT_ULL(5);
2014 if (req->rx.strip_vtag)
2015 regval |= BIT_ULL(4);
2017 rvu_write64(rvu, blkaddr,
2018 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2022 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2023 u16 pcifunc, int index)
2025 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2026 struct nix_txvlan *vlan = &nix_hw->txvlan;
2028 if (vlan->entry2pfvf_map[index] != pcifunc)
2029 return NIX_AF_ERR_PARAM;
2031 rvu_write64(rvu, blkaddr,
2032 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2033 rvu_write64(rvu, blkaddr,
2034 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2036 vlan->entry2pfvf_map[index] = 0;
2037 rvu_free_rsrc(&vlan->rsrc, index);
2042 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2044 struct nix_txvlan *vlan;
2045 struct nix_hw *nix_hw;
2048 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2052 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2053 vlan = &nix_hw->txvlan;
2055 mutex_lock(&vlan->rsrc_lock);
2056 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2057 for (index = 0; index < vlan->rsrc.max; index++) {
2058 if (vlan->entry2pfvf_map[index] == pcifunc)
2059 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2061 mutex_unlock(&vlan->rsrc_lock);
2064 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2067 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2068 struct nix_txvlan *vlan = &nix_hw->txvlan;
2072 mutex_lock(&vlan->rsrc_lock);
2074 index = rvu_alloc_rsrc(&vlan->rsrc);
2076 mutex_unlock(&vlan->rsrc_lock);
2080 mutex_unlock(&vlan->rsrc_lock);
2082 regval = size ? vtag : vtag << 32;
2084 rvu_write64(rvu, blkaddr,
2085 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2086 rvu_write64(rvu, blkaddr,
2087 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2092 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2093 struct nix_vtag_config *req)
2095 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2096 struct nix_txvlan *vlan = &nix_hw->txvlan;
2097 u16 pcifunc = req->hdr.pcifunc;
2098 int idx0 = req->tx.vtag0_idx;
2099 int idx1 = req->tx.vtag1_idx;
2102 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2103 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2104 vlan->entry2pfvf_map[idx1] != pcifunc)
2105 return NIX_AF_ERR_PARAM;
2107 mutex_lock(&vlan->rsrc_lock);
2109 if (req->tx.free_vtag0) {
2110 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2115 if (req->tx.free_vtag1)
2116 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2119 mutex_unlock(&vlan->rsrc_lock);
2123 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2124 struct nix_vtag_config *req,
2125 struct nix_vtag_config_rsp *rsp)
2127 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2128 struct nix_txvlan *vlan = &nix_hw->txvlan;
2129 u16 pcifunc = req->hdr.pcifunc;
2131 if (req->tx.cfg_vtag0) {
2133 nix_tx_vtag_alloc(rvu, blkaddr,
2134 req->tx.vtag0, req->vtag_size);
2136 if (rsp->vtag0_idx < 0)
2137 return NIX_AF_ERR_TX_VTAG_NOSPC;
2139 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2142 if (req->tx.cfg_vtag1) {
2144 nix_tx_vtag_alloc(rvu, blkaddr,
2145 req->tx.vtag1, req->vtag_size);
2147 if (rsp->vtag1_idx < 0)
2150 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2156 if (req->tx.cfg_vtag0)
2157 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2159 return NIX_AF_ERR_TX_VTAG_NOSPC;
2162 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2163 struct nix_vtag_config *req,
2164 struct nix_vtag_config_rsp *rsp)
2166 u16 pcifunc = req->hdr.pcifunc;
2167 int blkaddr, nixlf, err;
2169 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2173 if (req->cfg_type) {
2174 /* rx vtag configuration */
2175 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2177 return NIX_AF_ERR_PARAM;
2179 /* tx vtag configuration */
2180 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2181 (req->tx.free_vtag0 || req->tx.free_vtag1))
2182 return NIX_AF_ERR_PARAM;
2184 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2185 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2187 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2188 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2194 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2195 int mce, u8 op, u16 pcifunc, int next, bool eol)
2197 struct nix_aq_enq_req aq_req;
2200 aq_req.hdr.pcifunc = 0;
2201 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2205 /* Forward bcast pkts to RQ0, RSS not needed */
2207 aq_req.mce.index = 0;
2208 aq_req.mce.eol = eol;
2209 aq_req.mce.pf_func = pcifunc;
2210 aq_req.mce.next = next;
2212 /* All fields valid */
2213 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2215 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2217 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2218 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2224 static int nix_update_mce_list(struct nix_mce_list *mce_list,
2225 u16 pcifunc, bool add)
2227 struct mce *mce, *tail = NULL;
2228 bool delete = false;
2230 /* Scan through the current list */
2231 hlist_for_each_entry(mce, &mce_list->head, node) {
2232 /* If already exists, then delete */
2233 if (mce->pcifunc == pcifunc && !add) {
2241 hlist_del(&mce->node);
2250 /* Add a new one to the list, at the tail */
2251 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2254 mce->pcifunc = pcifunc;
2256 hlist_add_head(&mce->node, &mce_list->head);
2258 hlist_add_behind(&mce->node, &tail->node);
2263 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2265 int err = 0, idx, next_idx, last_idx;
2266 struct nix_mce_list *mce_list;
2267 struct nix_mcast *mcast;
2268 struct nix_hw *nix_hw;
2269 struct rvu_pfvf *pfvf;
2273 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2274 if (is_afvf(pcifunc))
2277 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2281 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2285 mcast = &nix_hw->mcast;
2287 /* Get this PF/VF func's MCE index */
2288 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2289 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2291 mce_list = &pfvf->bcast_mce_list;
2292 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2294 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2295 __func__, idx, mce_list->max,
2296 pcifunc >> RVU_PFVF_PF_SHIFT);
2300 mutex_lock(&mcast->mce_lock);
2302 err = nix_update_mce_list(mce_list, pcifunc, add);
2306 /* Disable MCAM entry in NPC */
2307 if (!mce_list->count) {
2308 rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2312 /* Dump the updated list to HW */
2313 idx = pfvf->bcast_mce_idx;
2314 last_idx = idx + mce_list->count - 1;
2315 hlist_for_each_entry(mce, &mce_list->head, node) {
2320 /* EOL should be set in last MCE */
2321 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2322 mce->pcifunc, next_idx,
2323 (next_idx > last_idx) ? true : false);
2330 mutex_unlock(&mcast->mce_lock);
2334 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2336 struct nix_mcast *mcast = &nix_hw->mcast;
2337 int err, pf, numvfs, idx;
2338 struct rvu_pfvf *pfvf;
2342 /* Skip PF0 (i.e AF) */
2343 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2344 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2345 /* If PF is not enabled, nothing to do */
2346 if (!((cfg >> 20) & 0x01))
2348 /* Get numVFs attached to this PF */
2349 numvfs = (cfg >> 12) & 0xFF;
2351 pfvf = &rvu->pf[pf];
2353 /* This NIX0/1 block mapped to PF ? */
2354 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2357 /* Save the start MCE */
2358 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2360 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2362 for (idx = 0; idx < (numvfs + 1); idx++) {
2363 /* idx-0 is for PF, followed by VFs */
2364 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2366 /* Add dummy entries now, so that we don't have to check
2367 * for whether AQ_OP should be INIT/WRITE later on.
2368 * Will be updated when a NIXLF is attached/detached to
2371 err = nix_blk_setup_mce(rvu, nix_hw,
2372 pfvf->bcast_mce_idx + idx,
2382 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2384 struct nix_mcast *mcast = &nix_hw->mcast;
2385 struct rvu_hwinfo *hw = rvu->hw;
2388 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2389 size = (1ULL << size);
2391 /* Alloc memory for multicast/mirror replication entries */
2392 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2393 (256UL << MC_TBL_SIZE), size);
2397 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2398 (u64)mcast->mce_ctx->iova);
2400 /* Set max list length equal to max no of VFs per PF + PF itself */
2401 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2402 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2404 /* Alloc memory for multicast replication buffers */
2405 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2406 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2407 (8UL << MC_BUF_CNT), size);
2411 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2412 (u64)mcast->mcast_buf->iova);
2414 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2415 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2417 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2418 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2419 BIT_ULL(20) | MC_BUF_CNT);
2421 mutex_init(&mcast->mce_lock);
2423 return nix_setup_bcast_tables(rvu, nix_hw);
2426 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2428 struct nix_txvlan *vlan = &nix_hw->txvlan;
2431 /* Allocate resource bimap for tx vtag def registers*/
2432 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2433 err = rvu_alloc_bitmap(&vlan->rsrc);
2437 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2438 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2439 sizeof(u16), GFP_KERNEL);
2440 if (!vlan->entry2pfvf_map)
2443 mutex_init(&vlan->rsrc_lock);
2447 kfree(vlan->rsrc.bmap);
2451 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2453 struct nix_txsch *txsch;
2457 /* Get scheduler queue count of each type and alloc
2458 * bitmap for each for alloc/free/attach operations.
2460 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2461 txsch = &nix_hw->txsch[lvl];
2464 case NIX_TXSCH_LVL_SMQ:
2465 reg = NIX_AF_MDQ_CONST;
2467 case NIX_TXSCH_LVL_TL4:
2468 reg = NIX_AF_TL4_CONST;
2470 case NIX_TXSCH_LVL_TL3:
2471 reg = NIX_AF_TL3_CONST;
2473 case NIX_TXSCH_LVL_TL2:
2474 reg = NIX_AF_TL2_CONST;
2476 case NIX_TXSCH_LVL_TL1:
2477 reg = NIX_AF_TL1_CONST;
2480 cfg = rvu_read64(rvu, blkaddr, reg);
2481 txsch->schq.max = cfg & 0xFFFF;
2482 err = rvu_alloc_bitmap(&txsch->schq);
2486 /* Allocate memory for scheduler queues to
2487 * PF/VF pcifunc mapping info.
2489 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2490 sizeof(u32), GFP_KERNEL);
2491 if (!txsch->pfvf_map)
2493 for (schq = 0; schq < txsch->schq.max; schq++)
2494 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2499 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2500 int blkaddr, u32 cfg)
2504 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2505 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2508 if (fmt_idx >= nix_hw->mark_format.total)
2511 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2512 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2513 nix_hw->mark_format.in_use++;
2517 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2521 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2522 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2523 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2524 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2525 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2526 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2527 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2528 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2529 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2534 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2535 nix_hw->mark_format.total = (u8)total;
2536 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2538 if (!nix_hw->mark_format.cfg)
2540 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2541 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2543 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2550 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2552 /* CN10K supports LBK FIFO size 72 KB */
2553 if (rvu->hw->lbk_bufsize == 0x12000)
2554 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2556 *max_mtu = NIC_HW_MAX_FRS;
2559 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2561 /* RPM supports FIFO len 128 KB */
2562 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2563 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2565 *max_mtu = NIC_HW_MAX_FRS;
2568 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2569 struct nix_hw_info *rsp)
2571 u16 pcifunc = req->hdr.pcifunc;
2574 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2576 return NIX_AF_ERR_AF_LF_INVALID;
2578 if (is_afvf(pcifunc))
2579 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2581 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2583 rsp->min_mtu = NIC_HW_MIN_FRS;
2587 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2588 struct msg_rsp *rsp)
2590 u16 pcifunc = req->hdr.pcifunc;
2591 int i, nixlf, blkaddr, err;
2594 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2598 /* Get stats count supported by HW */
2599 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2601 /* Reset tx stats */
2602 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2603 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2605 /* Reset rx stats */
2606 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2607 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2612 /* Returns the ALG index to be set into NPC_RX_ACTION */
2613 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2617 /* Scan over exiting algo entries to find a match */
2618 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2619 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2625 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2627 int idx, nr_field, key_off, field_marker, keyoff_marker;
2628 int max_key_off, max_bit_pos, group_member;
2629 struct nix_rx_flowkey_alg *field;
2630 struct nix_rx_flowkey_alg tmp;
2631 u32 key_type, valid_key;
2632 int l4_key_offset = 0;
2637 #define FIELDS_PER_ALG 5
2638 #define MAX_KEY_OFF 40
2639 /* Clear all fields */
2640 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2642 /* Each of the 32 possible flow key algorithm definitions should
2643 * fall into above incremental config (except ALG0). Otherwise a
2644 * single NPC MCAM entry is not sufficient for supporting RSS.
2646 * If a different definition or combination needed then NPC MCAM
2647 * has to be programmed to filter such pkts and it's action should
2648 * point to this definition to calculate flowtag or hash.
2650 * The `for loop` goes over _all_ protocol field and the following
2651 * variables depicts the state machine forward progress logic.
2653 * keyoff_marker - Enabled when hash byte length needs to be accounted
2654 * in field->key_offset update.
2655 * field_marker - Enabled when a new field needs to be selected.
2656 * group_member - Enabled when protocol is part of a group.
2659 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2660 nr_field = 0; key_off = 0; field_marker = 1;
2661 field = &tmp; max_bit_pos = fls(flow_cfg);
2663 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2664 key_off < MAX_KEY_OFF; idx++) {
2665 key_type = BIT(idx);
2666 valid_key = flow_cfg & key_type;
2667 /* Found a field marker, reset the field values */
2669 memset(&tmp, 0, sizeof(tmp));
2671 field_marker = true;
2672 keyoff_marker = true;
2674 case NIX_FLOW_KEY_TYPE_PORT:
2675 field->sel_chan = true;
2676 /* This should be set to 1, when SEL_CHAN is set */
2679 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2680 field->lid = NPC_LID_LC;
2681 field->hdr_offset = 9; /* offset */
2682 field->bytesm1 = 0; /* 1 byte */
2683 field->ltype_match = NPC_LT_LC_IP;
2684 field->ltype_mask = 0xF;
2686 case NIX_FLOW_KEY_TYPE_IPV4:
2687 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2688 field->lid = NPC_LID_LC;
2689 field->ltype_match = NPC_LT_LC_IP;
2690 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2691 field->lid = NPC_LID_LG;
2692 field->ltype_match = NPC_LT_LG_TU_IP;
2694 field->hdr_offset = 12; /* SIP offset */
2695 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2696 field->ltype_mask = 0xF; /* Match only IPv4 */
2697 keyoff_marker = false;
2699 case NIX_FLOW_KEY_TYPE_IPV6:
2700 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2701 field->lid = NPC_LID_LC;
2702 field->ltype_match = NPC_LT_LC_IP6;
2703 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2704 field->lid = NPC_LID_LG;
2705 field->ltype_match = NPC_LT_LG_TU_IP6;
2707 field->hdr_offset = 8; /* SIP offset */
2708 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2709 field->ltype_mask = 0xF; /* Match only IPv6 */
2711 case NIX_FLOW_KEY_TYPE_TCP:
2712 case NIX_FLOW_KEY_TYPE_UDP:
2713 case NIX_FLOW_KEY_TYPE_SCTP:
2714 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2715 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2716 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2717 field->lid = NPC_LID_LD;
2718 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2719 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2720 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2721 field->lid = NPC_LID_LH;
2722 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2724 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2725 * so no need to change the ltype_match, just change
2726 * the lid for inner protocols
2728 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2729 (int)NPC_LT_LH_TU_TCP);
2730 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2731 (int)NPC_LT_LH_TU_UDP);
2732 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2733 (int)NPC_LT_LH_TU_SCTP);
2735 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2736 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2738 field->ltype_match |= NPC_LT_LD_TCP;
2739 group_member = true;
2740 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2741 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2743 field->ltype_match |= NPC_LT_LD_UDP;
2744 group_member = true;
2745 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2746 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2748 field->ltype_match |= NPC_LT_LD_SCTP;
2749 group_member = true;
2751 field->ltype_mask = ~field->ltype_match;
2752 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2753 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2754 /* Handle the case where any of the group item
2755 * is enabled in the group but not the final one
2759 group_member = false;
2762 field_marker = false;
2763 keyoff_marker = false;
2766 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2767 * remember the TCP key offset of 40 byte hash key.
2769 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2770 l4_key_offset = key_off;
2772 case NIX_FLOW_KEY_TYPE_NVGRE:
2773 field->lid = NPC_LID_LD;
2774 field->hdr_offset = 4; /* VSID offset */
2776 field->ltype_match = NPC_LT_LD_NVGRE;
2777 field->ltype_mask = 0xF;
2779 case NIX_FLOW_KEY_TYPE_VXLAN:
2780 case NIX_FLOW_KEY_TYPE_GENEVE:
2781 field->lid = NPC_LID_LE;
2783 field->hdr_offset = 4;
2784 field->ltype_mask = 0xF;
2785 field_marker = false;
2786 keyoff_marker = false;
2788 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2789 field->ltype_match |= NPC_LT_LE_VXLAN;
2790 group_member = true;
2793 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2794 field->ltype_match |= NPC_LT_LE_GENEVE;
2795 group_member = true;
2798 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2800 field->ltype_mask = ~field->ltype_match;
2801 field_marker = true;
2802 keyoff_marker = true;
2804 group_member = false;
2808 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2809 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2810 field->lid = NPC_LID_LA;
2811 field->ltype_match = NPC_LT_LA_ETHER;
2812 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2813 field->lid = NPC_LID_LF;
2814 field->ltype_match = NPC_LT_LF_TU_ETHER;
2816 field->hdr_offset = 0;
2817 field->bytesm1 = 5; /* DMAC 6 Byte */
2818 field->ltype_mask = 0xF;
2820 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2821 field->lid = NPC_LID_LC;
2822 field->hdr_offset = 40; /* IPV6 hdr */
2823 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2824 field->ltype_match = NPC_LT_LC_IP6_EXT;
2825 field->ltype_mask = 0xF;
2827 case NIX_FLOW_KEY_TYPE_GTPU:
2828 field->lid = NPC_LID_LE;
2829 field->hdr_offset = 4;
2830 field->bytesm1 = 3; /* 4 bytes TID*/
2831 field->ltype_match = NPC_LT_LE_GTPU;
2832 field->ltype_mask = 0xF;
2834 case NIX_FLOW_KEY_TYPE_VLAN:
2835 field->lid = NPC_LID_LB;
2836 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2837 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2838 field->ltype_match = NPC_LT_LB_CTAG;
2839 field->ltype_mask = 0xF;
2840 field->fn_mask = 1; /* Mask out the first nibble */
2842 case NIX_FLOW_KEY_TYPE_AH:
2843 case NIX_FLOW_KEY_TYPE_ESP:
2844 field->hdr_offset = 0;
2845 field->bytesm1 = 7; /* SPI + sequence number */
2846 field->ltype_mask = 0xF;
2847 field->lid = NPC_LID_LE;
2848 field->ltype_match = NPC_LT_LE_ESP;
2849 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2850 field->lid = NPC_LID_LD;
2851 field->ltype_match = NPC_LT_LD_AH;
2852 field->hdr_offset = 4;
2853 keyoff_marker = false;
2859 /* Found a valid flow key type */
2861 /* Use the key offset of TCP/UDP/SCTP fields
2862 * for ESP/AH fields.
2864 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2865 key_type == NIX_FLOW_KEY_TYPE_AH)
2866 key_off = l4_key_offset;
2867 field->key_offset = key_off;
2868 memcpy(&alg[nr_field], field, sizeof(*field));
2869 max_key_off = max(max_key_off, field->bytesm1 + 1);
2871 /* Found a field marker, get the next field */
2876 /* Found a keyoff marker, update the new key_off */
2877 if (keyoff_marker) {
2878 key_off += max_key_off;
2882 /* Processed all the flow key types */
2883 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2886 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2889 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2891 u64 field[FIELDS_PER_ALG];
2895 hw = get_nix_hw(rvu->hw, blkaddr);
2899 /* No room to add new flow hash algoritham */
2900 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2901 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2903 /* Generate algo fields for the given flow_cfg */
2904 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2908 /* Update ALGX_FIELDX register with generated fields */
2909 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2910 rvu_write64(rvu, blkaddr,
2911 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2914 /* Store the flow_cfg for futher lookup */
2915 rc = hw->flowkey.in_use;
2916 hw->flowkey.flowkey[rc] = flow_cfg;
2917 hw->flowkey.in_use++;
2922 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2923 struct nix_rss_flowkey_cfg *req,
2924 struct nix_rss_flowkey_cfg_rsp *rsp)
2926 u16 pcifunc = req->hdr.pcifunc;
2927 int alg_idx, nixlf, blkaddr;
2928 struct nix_hw *nix_hw;
2931 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2935 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2939 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2940 /* Failed to get algo index from the exiting list, reserve new */
2942 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2947 rsp->alg_idx = alg_idx;
2948 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2949 alg_idx, req->mcam_index);
2953 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2955 u32 flowkey_cfg, minkey_cfg;
2958 /* Disable all flow key algx fieldx */
2959 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2960 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2961 rvu_write64(rvu, blkaddr,
2962 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2966 /* IPv4/IPv6 SIP/DIPs */
2967 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2968 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2972 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2973 minkey_cfg = flowkey_cfg;
2974 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2975 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2979 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2980 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2981 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2985 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2986 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2987 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2991 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2992 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2993 NIX_FLOW_KEY_TYPE_UDP;
2994 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2998 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2999 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3000 NIX_FLOW_KEY_TYPE_SCTP;
3001 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3005 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3006 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3007 NIX_FLOW_KEY_TYPE_SCTP;
3008 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3012 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3013 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3014 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3015 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3022 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3023 struct nix_set_mac_addr *req,
3024 struct msg_rsp *rsp)
3026 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3027 u16 pcifunc = req->hdr.pcifunc;
3028 int blkaddr, nixlf, err;
3029 struct rvu_pfvf *pfvf;
3031 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3035 pfvf = rvu_get_pfvf(rvu, pcifunc);
3037 /* VF can't overwrite admin(PF) changes */
3038 if (from_vf && pfvf->pf_set_vf_cfg)
3041 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3043 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3044 pfvf->rx_chan_base, req->mac_addr);
3049 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3050 struct msg_req *req,
3051 struct nix_get_mac_addr_rsp *rsp)
3053 u16 pcifunc = req->hdr.pcifunc;
3054 struct rvu_pfvf *pfvf;
3056 if (!is_nixlf_attached(rvu, pcifunc))
3057 return NIX_AF_ERR_AF_LF_INVALID;
3059 pfvf = rvu_get_pfvf(rvu, pcifunc);
3061 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3066 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3067 struct msg_rsp *rsp)
3069 bool allmulti = false, disable_promisc = false;
3070 u16 pcifunc = req->hdr.pcifunc;
3071 int blkaddr, nixlf, err;
3072 struct rvu_pfvf *pfvf;
3074 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3078 pfvf = rvu_get_pfvf(rvu, pcifunc);
3080 if (req->mode & NIX_RX_MODE_PROMISC)
3082 else if (req->mode & NIX_RX_MODE_ALLMULTI)
3085 disable_promisc = true;
3087 if (disable_promisc)
3088 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
3090 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3091 pfvf->rx_chan_base, allmulti);
3095 static void nix_find_link_frs(struct rvu *rvu,
3096 struct nix_frs_cfg *req, u16 pcifunc)
3098 int pf = rvu_get_pf(pcifunc);
3099 struct rvu_pfvf *pfvf;
3104 /* Update with requester's min/max lengths */
3105 pfvf = rvu_get_pfvf(rvu, pcifunc);
3106 pfvf->maxlen = req->maxlen;
3107 if (req->update_minlen)
3108 pfvf->minlen = req->minlen;
3110 maxlen = req->maxlen;
3111 minlen = req->update_minlen ? req->minlen : 0;
3113 /* Get this PF's numVFs and starting hwvf */
3114 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3116 /* For each VF, compare requested max/minlen */
3117 for (vf = 0; vf < numvfs; vf++) {
3118 pfvf = &rvu->hwvf[hwvf + vf];
3119 if (pfvf->maxlen > maxlen)
3120 maxlen = pfvf->maxlen;
3121 if (req->update_minlen &&
3122 pfvf->minlen && pfvf->minlen < minlen)
3123 minlen = pfvf->minlen;
3126 /* Compare requested max/minlen with PF's max/minlen */
3127 pfvf = &rvu->pf[pf];
3128 if (pfvf->maxlen > maxlen)
3129 maxlen = pfvf->maxlen;
3130 if (req->update_minlen &&
3131 pfvf->minlen && pfvf->minlen < minlen)
3132 minlen = pfvf->minlen;
3134 /* Update the request with max/min PF's and it's VF's max/min */
3135 req->maxlen = maxlen;
3136 if (req->update_minlen)
3137 req->minlen = minlen;
3140 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3141 struct msg_rsp *rsp)
3143 struct rvu_hwinfo *hw = rvu->hw;
3144 u16 pcifunc = req->hdr.pcifunc;
3145 int pf = rvu_get_pf(pcifunc);
3146 int blkaddr, schq, link = -1;
3147 struct nix_txsch *txsch;
3148 u64 cfg, lmac_fifo_len;
3149 struct nix_hw *nix_hw;
3150 u8 cgx = 0, lmac = 0;
3153 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3155 return NIX_AF_ERR_AF_LF_INVALID;
3157 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3161 if (is_afvf(pcifunc))
3162 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3164 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3166 if (!req->sdp_link && req->maxlen > max_mtu)
3167 return NIX_AF_ERR_FRS_INVALID;
3169 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3170 return NIX_AF_ERR_FRS_INVALID;
3172 /* Check if requester wants to update SMQ's */
3173 if (!req->update_smq)
3176 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3177 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3178 mutex_lock(&rvu->rsrc_lock);
3179 for (schq = 0; schq < txsch->schq.max; schq++) {
3180 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3182 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3183 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3184 if (req->update_minlen)
3185 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3186 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3188 mutex_unlock(&rvu->rsrc_lock);
3191 /* Check if config is for SDP link */
3192 if (req->sdp_link) {
3194 return NIX_AF_ERR_RX_LINK_INVALID;
3195 link = hw->cgx_links + hw->lbk_links;
3199 /* Check if the request is from CGX mapped RVU PF */
3200 if (is_pf_cgxmapped(rvu, pf)) {
3201 /* Get CGX and LMAC to which this PF is mapped and find link */
3202 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3203 link = (cgx * hw->lmac_per_cgx) + lmac;
3204 } else if (pf == 0) {
3205 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3206 link = hw->cgx_links;
3210 return NIX_AF_ERR_RX_LINK_INVALID;
3212 nix_find_link_frs(rvu, req, pcifunc);
3215 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3216 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3217 if (req->update_minlen)
3218 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3219 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3221 if (req->sdp_link || pf == 0)
3224 /* Update transmit credits for CGX links */
3226 rvu_cgx_get_fifolen(rvu) /
3227 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3228 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3229 cfg &= ~(0xFFFFFULL << 12);
3230 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3231 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3235 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3236 struct msg_rsp *rsp)
3238 int nixlf, blkaddr, err;
3241 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3245 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3246 /* Set the interface configuration */
3247 if (req->len_verify & BIT(0))
3250 cfg &= ~BIT_ULL(41);
3252 if (req->len_verify & BIT(1))
3255 cfg &= ~BIT_ULL(40);
3257 if (req->csum_verify & BIT(0))
3260 cfg &= ~BIT_ULL(37);
3262 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3267 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3269 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3270 if (rvu->hw->lbk_bufsize == 0x12000)
3271 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3273 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3276 static void nix_link_config(struct rvu *rvu, int blkaddr)
3278 struct rvu_hwinfo *hw = rvu->hw;
3279 int cgx, lmac_cnt, slink, link;
3280 u16 lbk_max_frs, lmac_max_frs;
3283 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3284 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3286 /* Set default min/max packet lengths allowed on NIX Rx links.
3288 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3289 * as undersize and report them to SW as error pkts, hence
3290 * setting it to 40 bytes.
3292 for (link = 0; link < hw->cgx_links; link++) {
3293 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3294 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3297 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3298 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3299 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3301 if (hw->sdp_links) {
3302 link = hw->cgx_links + hw->lbk_links;
3303 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3304 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3307 /* Set credits for Tx links assuming max packet length allowed.
3308 * This will be reconfigured based on MTU set for PF/VF.
3310 for (cgx = 0; cgx < hw->cgx; cgx++) {
3311 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3312 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3314 /* Enable credits and set credit pkt count to max allowed */
3315 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3316 slink = cgx * hw->lmac_per_cgx;
3317 for (link = slink; link < (slink + lmac_cnt); link++) {
3318 rvu_write64(rvu, blkaddr,
3319 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3324 /* Set Tx credits for LBK link */
3325 slink = hw->cgx_links;
3326 for (link = slink; link < (slink + hw->lbk_links); link++) {
3327 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3328 /* Enable credits and set credit pkt count to max allowed */
3329 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3330 rvu_write64(rvu, blkaddr,
3331 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3335 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3340 /* Start X2P bus calibration */
3341 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3342 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3343 /* Wait for calibration to complete */
3344 err = rvu_poll_reg(rvu, blkaddr,
3345 NIX_AF_STATUS, BIT_ULL(10), false);
3347 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3351 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3352 /* Check if CGX devices are ready */
3353 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3354 /* Skip when cgx port is not available */
3355 if (!rvu_cgx_pdata(idx, rvu) ||
3356 (status & (BIT_ULL(16 + idx))))
3359 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3363 /* Check if LBK is ready */
3364 if (!(status & BIT_ULL(19))) {
3366 "LBK didn't respond to NIX X2P calibration\n");
3370 /* Clear 'calibrate_x2p' bit */
3371 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3372 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3373 if (err || (status & 0x3FFULL))
3375 "NIX X2P calibration failed, status 0x%llx\n", status);
3381 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3386 /* Set admin queue endianness */
3387 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3390 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3393 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3396 /* Do not bypass NDC cache */
3397 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3399 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3400 /* Disable caching of SQB aka SQEs */
3403 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3405 /* Result structure can be followed by RQ/SQ/CQ context at
3406 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3407 * operation type. Alloc sufficient result memory for all operations.
3409 err = rvu_aq_alloc(rvu, &block->aq,
3410 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3411 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3415 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3416 rvu_write64(rvu, block->addr,
3417 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3421 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3423 const struct npc_lt_def_cfg *ltdefs;
3424 struct rvu_hwinfo *hw = rvu->hw;
3425 int blkaddr = nix_hw->blkaddr;
3426 struct rvu_block *block;
3430 block = &hw->block[blkaddr];
3432 if (is_rvu_96xx_B0(rvu)) {
3433 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3434 * internal state when conditional clocks are turned off.
3435 * Hence enable them.
3437 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3438 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3440 /* Set chan/link to backpressure TL3 instead of TL2 */
3441 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3443 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3444 * This sticky mode is known to cause SQ stalls when multiple
3445 * SQs are mapped to same SMQ and transmitting pkts at a time.
3447 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3448 cfg &= ~BIT_ULL(15);
3449 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3452 ltdefs = rvu->kpu.lt_def;
3453 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3454 err = nix_calibrate_x2p(rvu, blkaddr);
3458 /* Initialize admin queue */
3459 err = nix_aq_init(rvu, block);
3463 /* Restore CINT timer delay to HW reset values */
3464 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3466 if (is_block_implemented(hw, blkaddr)) {
3467 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3471 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3475 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3479 err = nix_setup_txvlan(rvu, nix_hw);
3483 /* Configure segmentation offload formats */
3484 nix_setup_lso(rvu, nix_hw, blkaddr);
3486 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3487 * This helps HW protocol checker to identify headers
3488 * and validate length and checksums.
3490 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3491 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3492 ltdefs->rx_ol2.ltype_mask);
3493 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3494 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3495 ltdefs->rx_oip4.ltype_mask);
3496 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3497 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3498 ltdefs->rx_iip4.ltype_mask);
3499 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3500 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3501 ltdefs->rx_oip6.ltype_mask);
3502 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3503 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3504 ltdefs->rx_iip6.ltype_mask);
3505 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3506 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3507 ltdefs->rx_otcp.ltype_mask);
3508 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3509 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3510 ltdefs->rx_itcp.ltype_mask);
3511 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3512 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3513 ltdefs->rx_oudp.ltype_mask);
3514 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3515 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3516 ltdefs->rx_iudp.ltype_mask);
3517 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3518 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3519 ltdefs->rx_osctp.ltype_mask);
3520 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3521 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3522 ltdefs->rx_isctp.ltype_mask);
3524 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3528 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3529 nix_link_config(rvu, blkaddr);
3531 /* Enable Channel backpressure */
3532 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3537 int rvu_nix_init(struct rvu *rvu)
3539 struct rvu_hwinfo *hw = rvu->hw;
3540 struct nix_hw *nix_hw;
3541 int blkaddr = 0, err;
3544 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3549 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3551 nix_hw = &hw->nix[i];
3553 nix_hw->blkaddr = blkaddr;
3554 err = rvu_nix_block_init(rvu, nix_hw);
3557 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3564 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3565 struct rvu_block *block)
3567 struct nix_txsch *txsch;
3568 struct nix_mcast *mcast;
3569 struct nix_txvlan *vlan;
3570 struct nix_hw *nix_hw;
3573 rvu_aq_free(rvu, block->aq);
3575 if (is_block_implemented(rvu->hw, blkaddr)) {
3576 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3580 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3581 txsch = &nix_hw->txsch[lvl];
3582 kfree(txsch->schq.bmap);
3585 vlan = &nix_hw->txvlan;
3586 kfree(vlan->rsrc.bmap);
3587 mutex_destroy(&vlan->rsrc_lock);
3588 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3590 mcast = &nix_hw->mcast;
3591 qmem_free(rvu->dev, mcast->mce_ctx);
3592 qmem_free(rvu->dev, mcast->mcast_buf);
3593 mutex_destroy(&mcast->mce_lock);
3597 void rvu_nix_freemem(struct rvu *rvu)
3599 struct rvu_hwinfo *hw = rvu->hw;
3600 struct rvu_block *block;
3603 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3605 block = &hw->block[blkaddr];
3606 rvu_nix_block_freemem(rvu, blkaddr, block);
3607 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3611 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3612 struct msg_rsp *rsp)
3614 u16 pcifunc = req->hdr.pcifunc;
3617 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3621 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3623 npc_mcam_enable_flows(rvu, pcifunc);
3625 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3628 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3629 struct msg_rsp *rsp)
3631 u16 pcifunc = req->hdr.pcifunc;
3634 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3638 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3640 npc_mcam_disable_flows(rvu, pcifunc);
3642 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3645 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3647 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3648 struct hwctx_disable_req ctx_req;
3651 ctx_req.hdr.pcifunc = pcifunc;
3653 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3654 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3655 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3656 nix_interface_deinit(rvu, pcifunc, nixlf);
3657 nix_rx_sync(rvu, blkaddr);
3658 nix_txschq_free(rvu, pcifunc);
3660 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3663 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3664 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3666 dev_err(rvu->dev, "SQ ctx disable failed\n");
3670 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3671 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3673 dev_err(rvu->dev, "RQ ctx disable failed\n");
3677 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3678 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3680 dev_err(rvu->dev, "CQ ctx disable failed\n");
3683 nix_ctx_free(rvu, pfvf);
3686 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3688 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3690 struct rvu_hwinfo *hw = rvu->hw;
3691 struct rvu_block *block;
3696 pf = rvu_get_pf(pcifunc);
3697 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3700 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3702 return NIX_AF_ERR_AF_LF_INVALID;
3704 block = &hw->block[blkaddr];
3705 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3707 return NIX_AF_ERR_AF_LF_INVALID;
3709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3712 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3714 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3716 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3721 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3722 struct msg_rsp *rsp)
3724 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3727 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3728 struct msg_rsp *rsp)
3730 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3733 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3734 struct nix_lso_format_cfg *req,
3735 struct nix_lso_format_cfg_rsp *rsp)
3737 u16 pcifunc = req->hdr.pcifunc;
3738 struct nix_hw *nix_hw;
3739 struct rvu_pfvf *pfvf;
3740 int blkaddr, idx, f;
3743 pfvf = rvu_get_pfvf(rvu, pcifunc);
3744 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3745 if (!pfvf->nixlf || blkaddr < 0)
3746 return NIX_AF_ERR_AF_LF_INVALID;
3748 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3752 /* Find existing matching LSO format, if any */
3753 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3754 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3755 reg = rvu_read64(rvu, blkaddr,
3756 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3757 if (req->fields[f] != (reg & req->field_mask))
3761 if (f == NIX_LSO_FIELD_MAX)
3765 if (idx < nix_hw->lso.in_use) {
3767 rsp->lso_format_idx = idx;
3771 if (nix_hw->lso.in_use == nix_hw->lso.total)
3772 return NIX_AF_ERR_LSO_CFG_FAIL;
3774 rsp->lso_format_idx = nix_hw->lso.in_use++;
3776 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3777 rvu_write64(rvu, blkaddr,
3778 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3784 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
3786 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
3788 /* overwrite vf mac address with default_mac */
3790 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);