1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include "otx2_cptpf_ucode.h"
7 #include "otx2_cpt_common.h"
8 #include "otx2_cptpf.h"
9 #include "otx2_cptlf.h"
10 #include "otx2_cpt_reqmgr.h"
15 #define LOADFVC_RLEN 8
16 #define LOADFVC_MAJOR_OP 0x01
17 #define LOADFVC_MINOR_OP 0x08
20 * Interval to flush dirty data for next CTX entry. The interval is measured
21 * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
23 #define CTX_FLUSH_TIMER_CNT 0x2FAF0
26 struct list_head ucodes;
29 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
30 struct otx2_cpt_eng_grp_info *eng_grp)
32 struct otx2_cpt_bitmap bmap = { {0} };
36 if (eng_grp->g->engs_num < 0 ||
37 eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
38 dev_err(dev, "unsupported number of engines %d on octeontx2\n",
39 eng_grp->g->engs_num);
43 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
44 if (eng_grp->engs[i].type) {
45 bitmap_or(bmap.bits, bmap.bits,
46 eng_grp->engs[i].bmap,
47 eng_grp->g->engs_num);
48 bmap.size = eng_grp->g->engs_num;
54 dev_err(dev, "No engines reserved for engine group %d\n",
59 static int is_eng_type(int val, int eng_type)
61 return val & (1 << eng_type);
64 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
66 if (eng_grp->ucode[1].type)
72 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
75 strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
78 static char *get_eng_type_str(int eng_type)
80 char *str = "unknown";
83 case OTX2_CPT_SE_TYPES:
87 case OTX2_CPT_IE_TYPES:
91 case OTX2_CPT_AE_TYPES:
98 static char *get_ucode_type_str(int ucode_type)
100 char *str = "unknown";
102 switch (ucode_type) {
103 case (1 << OTX2_CPT_SE_TYPES):
107 case (1 << OTX2_CPT_IE_TYPES):
111 case (1 << OTX2_CPT_AE_TYPES):
115 case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
122 static int get_ucode_type(struct device *dev,
123 struct otx2_cpt_ucode_hdr *ucode_hdr,
124 int *ucode_type, u16 rid)
126 char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
127 char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
131 strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
132 for (i = 0; i < strlen(tmp_ver_str); i++)
133 tmp_ver_str[i] = tolower(tmp_ver_str[i]);
135 sprintf(ver_str_prefix, "ocpt-%02d", rid);
136 if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
139 nn = ucode_hdr->ver_num.nn;
140 if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
141 (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
142 nn == OTX2_CPT_SE_UC_TYPE3))
143 val |= 1 << OTX2_CPT_SE_TYPES;
144 if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
145 (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
146 nn == OTX2_CPT_IE_UC_TYPE3))
147 val |= 1 << OTX2_CPT_IE_TYPES;
148 if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
149 nn == OTX2_CPT_AE_UC_TYPE)
150 val |= 1 << OTX2_CPT_AE_TYPES;
160 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
161 dma_addr_t dma_addr, int blkaddr)
163 return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
164 CPT_AF_EXEX_UCODE_BASE(eng),
165 (u64)dma_addr, blkaddr);
168 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
169 struct otx2_cptpf_dev *cptpf, int blkaddr)
171 struct otx2_cpt_engs_rsvd *engs;
175 /* Set PF number for microcode fetches */
176 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
178 cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
182 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
183 engs = &eng_grp->engs[i];
187 dma_addr = engs->ucode->dma;
190 * Set UCODE_BASE only for the cores which are not used,
191 * other cores should have already valid UCODE_BASE set
193 for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
194 if (!eng_grp->g->eng_ref_cnt[bit]) {
195 ret = __write_ucode_base(cptpf, bit, dma_addr,
204 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
206 struct otx2_cptpf_dev *cptpf = obj;
209 if (cptpf->has_cpt1) {
210 ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
214 return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
217 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
218 struct otx2_cptpf_dev *cptpf,
219 struct otx2_cpt_bitmap bmap,
226 /* Detach the cores from group */
227 for_each_set_bit(i, bmap.bits, bmap.size) {
228 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
229 CPT_AF_EXEX_CTL2(i), ®, blkaddr);
233 if (reg & (1ull << eng_grp->idx)) {
234 eng_grp->g->eng_ref_cnt[i]--;
235 reg &= ~(1ull << eng_grp->idx);
237 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
239 CPT_AF_EXEX_CTL2(i), reg,
246 /* Wait for cores to become idle */
249 usleep_range(10000, 20000);
253 for_each_set_bit(i, bmap.bits, bmap.size) {
254 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
256 CPT_AF_EXEX_STS(i), ®,
268 /* Disable the cores only if they are not used anymore */
269 for_each_set_bit(i, bmap.bits, bmap.size) {
270 if (!eng_grp->g->eng_ref_cnt[i]) {
271 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
273 CPT_AF_EXEX_CTL(i), 0x0,
283 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
286 struct otx2_cptpf_dev *cptpf = obj;
287 struct otx2_cpt_bitmap bmap;
290 bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
294 if (cptpf->has_cpt1) {
295 ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
300 return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
304 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
305 struct otx2_cptpf_dev *cptpf,
306 struct otx2_cpt_bitmap bmap,
312 /* Attach the cores to the group */
313 for_each_set_bit(i, bmap.bits, bmap.size) {
314 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
315 CPT_AF_EXEX_CTL2(i), ®, blkaddr);
319 if (!(reg & (1ull << eng_grp->idx))) {
320 eng_grp->g->eng_ref_cnt[i]++;
321 reg |= 1ull << eng_grp->idx;
323 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
325 CPT_AF_EXEX_CTL2(i), reg,
332 /* Enable the cores */
333 for_each_set_bit(i, bmap.bits, bmap.size) {
334 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
335 CPT_AF_EXEX_CTL(i), 0x1,
340 return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
343 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
346 struct otx2_cptpf_dev *cptpf = obj;
347 struct otx2_cpt_bitmap bmap;
350 bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
354 if (cptpf->has_cpt1) {
355 ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
360 return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
363 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
364 char *filename, u16 rid)
366 struct otx2_cpt_ucode_hdr *ucode_hdr;
367 struct otx2_cpt_uc_info_t *uc_info;
368 int ucode_type, ucode_size;
371 uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
375 ret = request_firmware(&uc_info->fw, filename, dev);
379 ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
380 ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
384 ucode_size = ntohl(ucode_hdr->code_length) * 2;
386 dev_err(dev, "Ucode %s invalid size\n", filename);
391 set_ucode_filename(&uc_info->ucode, filename);
392 memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
393 OTX2_CPT_UCODE_VER_STR_SZ);
394 uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
395 uc_info->ucode.ver_num = ucode_hdr->ver_num;
396 uc_info->ucode.type = ucode_type;
397 uc_info->ucode.size = ucode_size;
398 list_add_tail(&uc_info->list, &fw_info->ucodes);
403 release_firmware(uc_info->fw);
409 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
411 struct otx2_cpt_uc_info_t *curr, *temp;
416 list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
417 list_del(&curr->list);
418 release_firmware(curr->fw);
423 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
426 struct otx2_cpt_uc_info_t *curr;
428 list_for_each_entry(curr, &fw_info->ucodes, list) {
429 if (!is_eng_type(curr->ucode.type, ucode_type))
437 static void print_uc_info(struct fw_info_t *fw_info)
439 struct otx2_cpt_uc_info_t *curr;
441 list_for_each_entry(curr, &fw_info->ucodes, list) {
442 pr_debug("Ucode filename %s\n", curr->ucode.filename);
443 pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
444 pr_debug("Ucode version %d.%d.%d.%d\n",
445 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
446 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
447 pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
448 get_ucode_type_str(curr->ucode.type));
449 pr_debug("Ucode size %d\n", curr->ucode.size);
450 pr_debug("Ucode ptr %p\n", curr->fw->data);
454 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
457 char filename[OTX2_CPT_NAME_LENGTH];
458 char eng_type[8] = {0};
461 INIT_LIST_HEAD(&fw_info->ucodes);
463 for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
464 strcpy(eng_type, get_eng_type_str(e));
465 for (i = 0; i < strlen(eng_type); i++)
466 eng_type[i] = tolower(eng_type[i]);
468 snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
470 /* Request firmware for each engine type */
471 ret = load_fw(&pdev->dev, fw_info, filename, rid);
475 print_uc_info(fw_info);
479 cpt_ucode_release_fw(fw_info);
483 struct otx2_cpt_engs_rsvd *find_engines_by_type(
484 struct otx2_cpt_eng_grp_info *eng_grp,
489 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
490 if (!eng_grp->engs[i].type)
493 if (eng_grp->engs[i].type == eng_type)
494 return &eng_grp->engs[i];
499 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
502 struct otx2_cpt_engs_rsvd *engs;
504 engs = find_engines_by_type(eng_grp, eng_type);
506 return (engs != NULL ? 1 : 0);
509 static int update_engines_avail_count(struct device *dev,
510 struct otx2_cpt_engs_available *avail,
511 struct otx2_cpt_engs_rsvd *engs, int val)
513 switch (engs->type) {
514 case OTX2_CPT_SE_TYPES:
515 avail->se_cnt += val;
518 case OTX2_CPT_IE_TYPES:
519 avail->ie_cnt += val;
522 case OTX2_CPT_AE_TYPES:
523 avail->ae_cnt += val;
527 dev_err(dev, "Invalid engine type %d\n", engs->type);
533 static int update_engines_offset(struct device *dev,
534 struct otx2_cpt_engs_available *avail,
535 struct otx2_cpt_engs_rsvd *engs)
537 switch (engs->type) {
538 case OTX2_CPT_SE_TYPES:
542 case OTX2_CPT_IE_TYPES:
543 engs->offset = avail->max_se_cnt;
546 case OTX2_CPT_AE_TYPES:
547 engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
551 dev_err(dev, "Invalid engine type %d\n", engs->type);
557 static int release_engines(struct device *dev,
558 struct otx2_cpt_eng_grp_info *grp)
562 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
563 if (!grp->engs[i].type)
566 if (grp->engs[i].count > 0) {
567 ret = update_engines_avail_count(dev, &grp->g->avail,
574 grp->engs[i].type = 0;
575 grp->engs[i].count = 0;
576 grp->engs[i].offset = 0;
577 grp->engs[i].ucode = NULL;
578 bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
583 static int do_reserve_engines(struct device *dev,
584 struct otx2_cpt_eng_grp_info *grp,
585 struct otx2_cpt_engines *req_engs)
587 struct otx2_cpt_engs_rsvd *engs = NULL;
590 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
591 if (!grp->engs[i].type) {
592 engs = &grp->engs[i];
600 engs->type = req_engs->type;
601 engs->count = req_engs->count;
603 ret = update_engines_offset(dev, &grp->g->avail, engs);
607 if (engs->count > 0) {
608 ret = update_engines_avail_count(dev, &grp->g->avail, engs,
617 static int check_engines_availability(struct device *dev,
618 struct otx2_cpt_eng_grp_info *grp,
619 struct otx2_cpt_engines *req_eng)
623 switch (req_eng->type) {
624 case OTX2_CPT_SE_TYPES:
625 avail_cnt = grp->g->avail.se_cnt;
628 case OTX2_CPT_IE_TYPES:
629 avail_cnt = grp->g->avail.ie_cnt;
632 case OTX2_CPT_AE_TYPES:
633 avail_cnt = grp->g->avail.ae_cnt;
637 dev_err(dev, "Invalid engine type %d\n", req_eng->type);
641 if (avail_cnt < req_eng->count) {
643 "Error available %s engines %d < than requested %d\n",
644 get_eng_type_str(req_eng->type),
645 avail_cnt, req_eng->count);
651 static int reserve_engines(struct device *dev,
652 struct otx2_cpt_eng_grp_info *grp,
653 struct otx2_cpt_engines *req_engs, int ucodes_cnt)
657 /* Validate if a number of requested engines are available */
658 for (i = 0; i < ucodes_cnt; i++) {
659 ret = check_engines_availability(dev, grp, &req_engs[i]);
664 /* Reserve requested engines for this engine group */
665 for (i = 0; i < ucodes_cnt; i++) {
666 ret = do_reserve_engines(dev, grp, &req_engs[i]);
673 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
676 dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
683 memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
684 memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
685 set_ucode_filename(ucode, "");
689 static int copy_ucode_to_dma_mem(struct device *dev,
690 struct otx2_cpt_ucode *ucode,
691 const u8 *ucode_data)
695 /* Allocate DMAable space */
696 ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
701 memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
704 /* Byte swap 64-bit */
705 for (i = 0; i < (ucode->size / 8); i++)
706 cpu_to_be64s(&((u64 *)ucode->va)[i]);
707 /* Ucode needs 16-bit swap */
708 for (i = 0; i < (ucode->size / 2); i++)
709 cpu_to_be16s(&((u16 *)ucode->va)[i]);
713 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
718 /* Point microcode to each core of the group */
719 ret = cpt_set_ucode_base(eng_grp, obj);
723 /* Attach the cores to the group and enable them */
724 ret = cpt_attach_and_enable_cores(eng_grp, obj);
729 static int disable_eng_grp(struct device *dev,
730 struct otx2_cpt_eng_grp_info *eng_grp,
735 /* Disable all engines used by this group */
736 ret = cpt_detach_and_disable_cores(eng_grp, obj);
740 /* Unload ucode used by this engine group */
741 ucode_unload(dev, &eng_grp->ucode[0]);
742 ucode_unload(dev, &eng_grp->ucode[1]);
744 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
745 if (!eng_grp->engs[i].type)
748 eng_grp->engs[i].ucode = &eng_grp->ucode[0];
751 /* Clear UCODE_BASE register for each engine used by this group */
752 ret = cpt_set_ucode_base(eng_grp, obj);
757 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
758 struct otx2_cpt_eng_grp_info *src_grp)
760 /* Setup fields for engine group which is mirrored */
761 src_grp->mirror.is_ena = false;
762 src_grp->mirror.idx = 0;
763 src_grp->mirror.ref_count++;
765 /* Setup fields for mirroring engine group */
766 dst_grp->mirror.is_ena = true;
767 dst_grp->mirror.idx = src_grp->idx;
768 dst_grp->mirror.ref_count = 0;
771 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
773 struct otx2_cpt_eng_grp_info *src_grp;
775 if (!dst_grp->mirror.is_ena)
778 src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
780 src_grp->mirror.ref_count--;
781 dst_grp->mirror.is_ena = false;
782 dst_grp->mirror.idx = 0;
783 dst_grp->mirror.ref_count = 0;
786 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
787 struct otx2_cpt_engines *engs, int engs_cnt)
789 struct otx2_cpt_engs_rsvd *mirrored_engs;
792 for (i = 0; i < engs_cnt; i++) {
793 mirrored_engs = find_engines_by_type(mirror_eng_grp,
799 * If mirrored group has this type of engines attached then
800 * there are 3 scenarios possible:
801 * 1) mirrored_engs.count == engs[i].count then all engines
802 * from mirrored engine group will be shared with this engine
804 * 2) mirrored_engs.count > engs[i].count then only a subset of
805 * engines from mirrored engine group will be shared with this
807 * 3) mirrored_engs.count < engs[i].count then all engines
808 * from mirrored engine group will be shared with this group
809 * and additional engines will be reserved for exclusively use
810 * by this engine group
812 engs[i].count -= mirrored_engs->count;
816 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
817 struct otx2_cpt_eng_grp_info *grp)
819 struct otx2_cpt_eng_grps *eng_grps = grp->g;
822 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
823 if (!eng_grps->grp[i].is_enabled)
825 if (eng_grps->grp[i].ucode[0].type &&
826 eng_grps->grp[i].ucode[1].type)
830 if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
831 grp->ucode[0].ver_str,
832 OTX2_CPT_UCODE_VER_STR_SZ))
833 return &eng_grps->grp[i];
839 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
840 struct otx2_cpt_eng_grps *eng_grps)
844 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
845 if (!eng_grps->grp[i].is_enabled)
846 return &eng_grps->grp[i];
851 static int eng_grp_update_masks(struct device *dev,
852 struct otx2_cpt_eng_grp_info *eng_grp)
854 struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
855 struct otx2_cpt_bitmap tmp_bmap = { {0} };
856 int i, j, cnt, max_cnt;
859 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
860 engs = &eng_grp->engs[i];
863 if (engs->count <= 0)
866 switch (engs->type) {
867 case OTX2_CPT_SE_TYPES:
868 max_cnt = eng_grp->g->avail.max_se_cnt;
871 case OTX2_CPT_IE_TYPES:
872 max_cnt = eng_grp->g->avail.max_ie_cnt;
875 case OTX2_CPT_AE_TYPES:
876 max_cnt = eng_grp->g->avail.max_ae_cnt;
880 dev_err(dev, "Invalid engine type %d\n", engs->type);
885 WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
886 bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
887 for (j = engs->offset; j < engs->offset + max_cnt; j++) {
888 if (!eng_grp->g->eng_ref_cnt[j]) {
889 bitmap_set(tmp_bmap.bits, j, 1);
899 bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
902 if (!eng_grp->mirror.is_ena)
905 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
906 engs = &eng_grp->engs[i];
910 mirrored_engs = find_engines_by_type(
911 &eng_grp->g->grp[eng_grp->mirror.idx],
913 WARN_ON(!mirrored_engs && engs->count <= 0);
917 bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
918 eng_grp->g->engs_num);
919 if (engs->count < 0) {
920 bit = find_first_bit(mirrored_engs->bmap,
921 eng_grp->g->engs_num);
922 bitmap_clear(tmp_bmap.bits, bit, -engs->count);
924 bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
925 eng_grp->g->engs_num);
930 static int delete_engine_group(struct device *dev,
931 struct otx2_cpt_eng_grp_info *eng_grp)
935 if (!eng_grp->is_enabled)
938 if (eng_grp->mirror.ref_count)
941 /* Removing engine group mirroring if enabled */
942 remove_eng_grp_mirroring(eng_grp);
944 /* Disable engine group */
945 ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
949 /* Release all engines held by this engine group */
950 ret = release_engines(dev, eng_grp);
954 eng_grp->is_enabled = false;
959 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
961 struct otx2_cpt_ucode *ucode;
963 if (eng_grp->mirror.is_ena)
964 ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
966 ucode = &eng_grp->ucode[0];
967 WARN_ON(!eng_grp->engs[0].type);
968 eng_grp->engs[0].ucode = ucode;
970 if (eng_grp->engs[1].type) {
971 if (is_2nd_ucode_used(eng_grp))
972 eng_grp->engs[1].ucode = &eng_grp->ucode[1];
974 eng_grp->engs[1].ucode = ucode;
978 static int create_engine_group(struct device *dev,
979 struct otx2_cpt_eng_grps *eng_grps,
980 struct otx2_cpt_engines *engs, int ucodes_cnt,
981 void *ucode_data[], int is_print)
983 struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
984 struct otx2_cpt_eng_grp_info *eng_grp;
985 struct otx2_cpt_uc_info_t *uc_info;
988 /* Find engine group which is not used */
989 eng_grp = find_unused_eng_grp(eng_grps);
991 dev_err(dev, "Error all engine groups are being used\n");
995 for (i = 0; i < ucodes_cnt; i++) {
996 uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
997 eng_grp->ucode[i] = uc_info->ucode;
998 ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1004 /* Check if this group mirrors another existing engine group */
1005 mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1006 if (mirrored_eng_grp) {
1007 /* Setup mirroring */
1008 setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1011 * Update count of requested engines because some
1012 * of them might be shared with mirrored group
1014 update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1016 ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1020 /* Update ucode pointers used by engines */
1021 update_ucode_ptrs(eng_grp);
1023 /* Update engine masks used by this group */
1024 ret = eng_grp_update_masks(dev, eng_grp);
1028 /* Enable engine group */
1029 ret = enable_eng_grp(eng_grp, eng_grps->obj);
1034 * If this engine group mirrors another engine group
1035 * then we need to unload ucode as we will use ucode
1036 * from mirrored engine group
1038 if (eng_grp->mirror.is_ena)
1039 ucode_unload(dev, &eng_grp->ucode[0]);
1041 eng_grp->is_enabled = true;
1046 if (mirrored_eng_grp)
1048 "Engine_group%d: reuse microcode %s from group %d\n",
1049 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1050 mirrored_eng_grp->idx);
1052 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1053 eng_grp->idx, eng_grp->ucode[0].ver_str);
1054 if (is_2nd_ucode_used(eng_grp))
1055 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1056 eng_grp->idx, eng_grp->ucode[1].ver_str);
1061 release_engines(dev, eng_grp);
1063 ucode_unload(dev, &eng_grp->ucode[0]);
1064 ucode_unload(dev, &eng_grp->ucode[1]);
1068 static void delete_engine_grps(struct pci_dev *pdev,
1069 struct otx2_cpt_eng_grps *eng_grps)
1073 /* First delete all mirroring engine groups */
1074 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1075 if (eng_grps->grp[i].mirror.is_ena)
1076 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1078 /* Delete remaining engine groups */
1079 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1080 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1083 #define PCI_DEVID_CN10K_RNM 0xA098
1084 #define RNM_ENTROPY_STATUS 0x8
1086 static void rnm_to_cpt_errata_fixup(struct device *dev)
1088 struct pci_dev *pdev;
1092 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1096 base = pci_ioremap_bar(pdev, 0);
1100 while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1105 dev_warn(dev, "RNM is not producing entropy\n");
1116 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1119 int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1120 struct otx2_cpt_eng_grp_info *grp;
1123 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1124 grp = &eng_grps->grp[i];
1125 if (!grp->is_enabled)
1128 if (eng_type == OTX2_CPT_SE_TYPES) {
1129 if (eng_grp_has_eng_type(grp, eng_type) &&
1130 !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1135 if (eng_grp_has_eng_type(grp, eng_type)) {
1144 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1145 struct otx2_cpt_eng_grps *eng_grps)
1147 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
1148 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1149 struct pci_dev *pdev = cptpf->pdev;
1150 struct fw_info_t fw_info;
1154 mutex_lock(&eng_grps->lock);
1156 * We don't create engine groups if it was already
1157 * made (when user enabled VFs for the first time)
1159 if (eng_grps->is_grps_created)
1162 ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1167 * Create engine group with SE engines for kernel
1168 * crypto functionality (symmetric crypto)
1170 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1171 if (uc_info[0] == NULL) {
1172 dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1176 engs[0].type = OTX2_CPT_SE_TYPES;
1177 engs[0].count = eng_grps->avail.max_se_cnt;
1179 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1180 (void **) uc_info, 1);
1185 * Create engine group with SE+IE engines for IPSec.
1186 * All SE engines will be shared with engine group 0.
1188 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1189 uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1191 if (uc_info[1] == NULL) {
1192 dev_err(&pdev->dev, "Unable to find firmware for IE");
1194 goto delete_eng_grp;
1196 engs[0].type = OTX2_CPT_SE_TYPES;
1197 engs[0].count = eng_grps->avail.max_se_cnt;
1198 engs[1].type = OTX2_CPT_IE_TYPES;
1199 engs[1].count = eng_grps->avail.max_ie_cnt;
1201 ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1202 (void **) uc_info, 1);
1204 goto delete_eng_grp;
1207 * Create engine group with AE engines for asymmetric
1208 * crypto functionality.
1210 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1211 if (uc_info[0] == NULL) {
1212 dev_err(&pdev->dev, "Unable to find firmware for AE");
1214 goto delete_eng_grp;
1216 engs[0].type = OTX2_CPT_AE_TYPES;
1217 engs[0].count = eng_grps->avail.max_ae_cnt;
1219 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1220 (void **) uc_info, 1);
1222 goto delete_eng_grp;
1224 eng_grps->is_grps_created = true;
1226 cpt_ucode_release_fw(&fw_info);
1228 if (is_dev_otx2(pdev))
1232 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1233 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1235 rnm_to_cpt_errata_fixup(&pdev->dev);
1237 otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val,
1240 * Configure engine group mask to allow context prefetching
1241 * for the groups and enable random number request, to enable
1242 * CPT to request random numbers from RNM.
1244 reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
1245 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1246 reg_val, BLKADDR_CPT0);
1248 * Set interval to periodically flush dirty data for the next
1249 * CTX cache entry. Set the interval count to maximum supported
1252 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1253 CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1256 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1257 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1258 * encounters a fault/poison, a rare case may result in
1259 * unpredictable data being delivered to a CPT engine.
1261 if (cpt_is_errata_38550_exists(pdev)) {
1262 otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1263 ®_val, BLKADDR_CPT0);
1264 otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1265 reg_val | BIT_ULL(24), BLKADDR_CPT0);
1268 mutex_unlock(&eng_grps->lock);
1272 delete_engine_grps(pdev, eng_grps);
1274 cpt_ucode_release_fw(&fw_info);
1276 mutex_unlock(&eng_grps->lock);
1280 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1283 int timeout = 10, ret;
1287 /* Disengage the cores from groups */
1288 for (i = 0; i < total_cores; i++) {
1289 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1290 CPT_AF_EXEX_CTL2(i), 0x0,
1295 cptpf->eng_grps.eng_ref_cnt[i] = 0;
1297 ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1301 /* Wait for cores to become idle */
1304 usleep_range(10000, 20000);
1308 for (i = 0; i < total_cores; i++) {
1309 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1311 CPT_AF_EXEX_STS(i), ®,
1323 /* Disable the cores */
1324 for (i = 0; i < total_cores; i++) {
1325 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1326 CPT_AF_EXEX_CTL(i), 0x0,
1331 return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1334 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1336 int total_cores, ret;
1338 total_cores = cptpf->eng_grps.avail.max_se_cnt +
1339 cptpf->eng_grps.avail.max_ie_cnt +
1340 cptpf->eng_grps.avail.max_ae_cnt;
1342 if (cptpf->has_cpt1) {
1343 ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1347 return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1350 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1351 struct otx2_cpt_eng_grps *eng_grps)
1353 struct otx2_cpt_eng_grp_info *grp;
1356 mutex_lock(&eng_grps->lock);
1357 delete_engine_grps(pdev, eng_grps);
1358 /* Release memory */
1359 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1360 grp = &eng_grps->grp[i];
1361 for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1362 kfree(grp->engs[j].bmap);
1363 grp->engs[j].bmap = NULL;
1366 mutex_unlock(&eng_grps->lock);
1369 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1370 struct otx2_cpt_eng_grps *eng_grps)
1372 struct otx2_cpt_eng_grp_info *grp;
1375 mutex_init(&eng_grps->lock);
1376 eng_grps->obj = pci_get_drvdata(pdev);
1377 eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1378 eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1379 eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1381 eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1382 eng_grps->avail.max_ie_cnt +
1383 eng_grps->avail.max_ae_cnt;
1384 if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1386 "Number of engines %d > than max supported %d\n",
1387 eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1389 goto cleanup_eng_grps;
1392 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1393 grp = &eng_grps->grp[i];
1397 for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1399 kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1400 sizeof(long), GFP_KERNEL);
1401 if (!grp->engs[j].bmap) {
1403 goto cleanup_eng_grps;
1410 otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1414 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1415 struct otx2_cpt_eng_grps *eng_grps)
1417 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
1418 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1419 struct fw_info_t fw_info;
1422 mutex_lock(&eng_grps->lock);
1423 ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1425 mutex_unlock(&eng_grps->lock);
1429 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1430 if (uc_info[0] == NULL) {
1431 dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1435 engs[0].type = OTX2_CPT_AE_TYPES;
1438 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1439 (void **) uc_info, 0);
1443 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1444 if (uc_info[0] == NULL) {
1445 dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1447 goto delete_eng_grp;
1449 engs[0].type = OTX2_CPT_SE_TYPES;
1452 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1453 (void **) uc_info, 0);
1455 goto delete_eng_grp;
1457 uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1458 if (uc_info[0] == NULL) {
1459 dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1461 goto delete_eng_grp;
1463 engs[0].type = OTX2_CPT_IE_TYPES;
1466 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1467 (void **) uc_info, 0);
1469 goto delete_eng_grp;
1471 cpt_ucode_release_fw(&fw_info);
1472 mutex_unlock(&eng_grps->lock);
1476 delete_engine_grps(pdev, eng_grps);
1478 cpt_ucode_release_fw(&fw_info);
1479 mutex_unlock(&eng_grps->lock);
1484 * Get CPT HW capabilities using LOAD_FVC operation.
1486 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1488 struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1489 struct otx2_cpt_iq_command iq_cmd;
1490 union otx2_cpt_opcode opcode;
1491 union otx2_cpt_res_s *result;
1492 union otx2_cpt_inst_s inst;
1493 dma_addr_t rptr_baddr;
1494 struct pci_dev *pdev;
1495 u32 len, compl_rlen;
1500 * We don't get capabilities if it was already done
1501 * (when user enabled VFs for the first time)
1503 if (cptpf->is_eng_caps_discovered)
1508 * Create engine groups for each type to submit LOAD_FVC op and
1509 * get engine's capabilities.
1511 ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1515 otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
1516 &cptpf->afpf_mbox, BLKADDR_CPT0);
1517 ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1518 OTX2_CPT_QUEUE_HI_PRIO, 1);
1522 compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1523 len = compl_rlen + LOADFVC_RLEN;
1525 result = kzalloc(len, GFP_KERNEL);
1530 rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1532 if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1533 dev_err(&pdev->dev, "DMA mapping failed\n");
1537 rptr = (u8 *)result + compl_rlen;
1539 /* Fill in the command */
1540 opcode.s.major = LOADFVC_MAJOR_OP;
1541 opcode.s.minor = LOADFVC_MINOR_OP;
1544 iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1546 /* 64-bit swap for microcode data reads, not needed for addresses */
1547 cpu_to_be64s(&iq_cmd.cmd.u);
1549 iq_cmd.rptr = rptr_baddr + compl_rlen;
1552 for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1553 result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1554 iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1556 otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1557 lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1559 while (lfs->ops->cpt_get_compcode(result) ==
1560 OTX2_CPT_COMPLETION_CODE_INIT)
1563 cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1565 dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1566 cptpf->is_eng_caps_discovered = true;
1571 otx2_cptlf_shutdown(lfs);
1573 delete_engine_grps(pdev, &cptpf->eng_grps);
1578 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1579 struct devlink_param_gset_ctx *ctx)
1581 struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1582 struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1583 struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1584 char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1585 char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1586 struct device *dev = &cptpf->pdev->dev;
1587 char *start, *val, *err_msg, *tmp;
1588 int grp_idx = 0, ret = -EINVAL;
1589 bool has_se, has_ie, has_ae;
1590 struct fw_info_t fw_info;
1593 if (!eng_grps->is_grps_created) {
1594 dev_err(dev, "Not allowed before creating the default groups\n");
1597 err_msg = "Invalid engine group format";
1598 strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1601 has_se = has_ie = has_ae = false;
1604 val = strsep(&start, ";");
1611 if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1612 if (has_se || ucode_idx)
1614 tmp = strsep(&val, ":");
1620 if (strlen(tmp) != 2)
1622 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1624 engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1626 } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1627 if (has_ae || ucode_idx)
1629 tmp = strsep(&val, ":");
1635 if (strlen(tmp) != 2)
1637 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1639 engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1641 } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1642 if (has_ie || ucode_idx)
1644 tmp = strsep(&val, ":");
1650 if (strlen(tmp) != 2)
1652 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1654 engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1661 if (strnstr(val, " ", strlen(val)))
1663 ucode_filename[ucode_idx++] = val;
1667 /* Validate input parameters */
1668 if (!(grp_idx && ucode_idx))
1671 if (ucode_idx > 1 && grp_idx < 2)
1674 if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1675 err_msg = "Error max 2 engine types can be attached";
1680 if ((engs[0].type + engs[1].type) !=
1681 (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1682 err_msg = "Only combination of SE+IE engines is allowed";
1685 /* Keep SE engines at zero index */
1686 if (engs[1].type == OTX2_CPT_SE_TYPES)
1687 swap(engs[0], engs[1]);
1689 mutex_lock(&eng_grps->lock);
1691 if (cptpf->enabled_vfs) {
1692 dev_err(dev, "Disable VFs before modifying engine groups\n");
1696 INIT_LIST_HEAD(&fw_info.ucodes);
1698 ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
1700 dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1703 if (ucode_idx > 1) {
1704 ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
1706 dev_err(dev, "Unable to load firmware %s\n",
1711 uc_info[0] = get_ucode(&fw_info, engs[0].type);
1712 if (uc_info[0] == NULL) {
1713 dev_err(dev, "Unable to find firmware for %s\n",
1714 get_eng_type_str(engs[0].type));
1718 if (ucode_idx > 1) {
1719 uc_info[1] = get_ucode(&fw_info, engs[1].type);
1720 if (uc_info[1] == NULL) {
1721 dev_err(dev, "Unable to find firmware for %s\n",
1722 get_eng_type_str(engs[1].type));
1727 ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1728 (void **)uc_info, 1);
1731 cpt_ucode_release_fw(&fw_info);
1733 mutex_unlock(&eng_grps->lock);
1736 dev_err(dev, "%s\n", err_msg);
1740 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1741 struct devlink_param_gset_ctx *ctx)
1743 struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1744 struct device *dev = &cptpf->pdev->dev;
1745 char *tmp, *err_msg;
1749 err_msg = "Invalid input string format(ex: egrp:0)";
1750 if (strncasecmp(ctx->val.vstr, "egrp", 4))
1752 tmp = ctx->val.vstr;
1756 if (kstrtoint(tmp, 10, &egrp))
1759 if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1760 dev_err(dev, "Invalid engine group %d", egrp);
1763 if (!eng_grps->grp[egrp].is_enabled) {
1764 dev_err(dev, "Error engine_group%d is not configured", egrp);
1767 mutex_lock(&eng_grps->lock);
1768 ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1769 mutex_unlock(&eng_grps->lock);
1774 dev_err(dev, "%s\n", err_msg);
1778 static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
1781 struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
1782 struct otx2_cpt_engs_rsvd *engs;
1786 for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
1787 engs = &eng_grp->engs[i];
1790 if (idx != -1 && idx != i)
1793 if (eng_grp->mirror.is_ena)
1794 mirrored_engs = find_engines_by_type(
1795 &eng_grp->g->grp[eng_grp->mirror.idx],
1797 if (i > 0 && idx == -1) {
1799 scnprintf(buf + len, size - len, ", ");
1803 scnprintf(buf + len, size - len, "%d %s ",
1804 mirrored_engs ? engs->count + mirrored_engs->count :
1806 get_eng_type_str(engs->type));
1807 if (mirrored_engs) {
1809 scnprintf(buf + len, size - len,
1810 "(%d shared with engine_group%d) ",
1812 engs->count + mirrored_engs->count :
1813 mirrored_engs->count,
1814 eng_grp->mirror.idx);
1819 void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
1821 struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1822 struct otx2_cpt_eng_grp_info *mirrored_grp;
1823 char engs_info[2 * OTX2_CPT_NAME_LENGTH];
1824 struct otx2_cpt_eng_grp_info *grp;
1825 struct otx2_cpt_engs_rsvd *engs;
1828 pr_debug("Engine groups global info");
1829 pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
1830 eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
1831 pr_debug("free SE %d", eng_grps->avail.se_cnt);
1832 pr_debug("free IE %d", eng_grps->avail.ie_cnt);
1833 pr_debug("free AE %d", eng_grps->avail.ae_cnt);
1835 for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1836 grp = &eng_grps->grp[i];
1837 pr_debug("engine_group%d, state %s", i,
1838 grp->is_enabled ? "enabled" : "disabled");
1839 if (grp->is_enabled) {
1840 mirrored_grp = &eng_grps->grp[grp->mirror.idx];
1841 pr_debug("Ucode0 filename %s, version %s",
1842 grp->mirror.is_ena ?
1843 mirrored_grp->ucode[0].filename :
1844 grp->ucode[0].filename,
1845 grp->mirror.is_ena ?
1846 mirrored_grp->ucode[0].ver_str :
1847 grp->ucode[0].ver_str);
1848 if (is_2nd_ucode_used(grp))
1849 pr_debug("Ucode1 filename %s, version %s",
1850 grp->ucode[1].filename,
1851 grp->ucode[1].ver_str);
1854 for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1855 engs = &grp->engs[j];
1859 get_engs_info(grp, engs_info,
1860 2 * OTX2_CPT_NAME_LENGTH, j);
1861 pr_debug("Slot%d: %s", j, engs_info);
1862 bitmap_to_arr32(mask, engs->bmap,
1863 eng_grps->engs_num);
1864 if (is_dev_otx2(cptpf->pdev))
1865 pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
1866 mask[3], mask[2], mask[1],
1869 pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
1870 mask[4], mask[3], mask[2], mask[1],