1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptvf.h"
6 #include "otx2_cptlf.h"
7 #include "otx2_cptvf_algs.h"
11 #define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
13 static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
15 /* Clear interrupt if any */
16 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
19 /* Enable PF-VF interrupt */
20 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
21 OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
24 static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
26 /* Disable PF-VF interrupt */
27 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
28 OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
30 /* Clear interrupt if any */
31 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
35 static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
40 num_vec = pci_msix_vec_count(cptvf->pdev);
45 ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
48 dev_err(&cptvf->pdev->dev,
49 "Request for %d msix vectors failed\n", num_vec);
52 irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
53 /* Register VF<=>PF mailbox interrupt handler */
54 ret = devm_request_irq(&cptvf->pdev->dev, irq,
55 otx2_cptvf_pfvf_mbox_intr, 0,
56 "CPTPFVF Mbox", cptvf);
59 /* Enable PF-VF mailbox interrupts */
60 cptvf_enable_pfvf_mbox_intrs(cptvf);
62 ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
64 dev_warn(&cptvf->pdev->dev,
65 "PF not responding to mailbox, deferring probe\n");
66 cptvf_disable_pfvf_mbox_intrs(cptvf);
72 static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
74 struct pci_dev *pdev = cptvf->pdev;
75 resource_size_t offset, size;
79 alloc_ordered_workqueue("cpt_pfvf_mailbox",
80 WQ_HIGHPRI | WQ_MEM_RECLAIM);
81 if (!cptvf->pfvf_mbox_wq)
84 if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
85 /* For cn10k platform, VF mailbox region is in its BAR2
88 cptvf->pfvf_mbox_base = cptvf->reg_base +
89 CN10K_CPT_VF_MBOX_REGION;
91 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
92 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
93 /* Map PF-VF mailbox memory */
94 cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
96 if (!cptvf->pfvf_mbox_base) {
97 dev_err(&pdev->dev, "Unable to map BAR4\n");
103 ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
104 pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
108 ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
112 INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
116 otx2_mbox_destroy(&cptvf->pfvf_mbox);
118 destroy_workqueue(cptvf->pfvf_mbox_wq);
122 static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
124 destroy_workqueue(cptvf->pfvf_mbox_wq);
125 otx2_mbox_destroy(&cptvf->pfvf_mbox);
128 static void cptlf_work_handler(unsigned long data)
130 otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
133 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
137 for (i = 0; i < lfs->lfs_num; i++) {
141 tasklet_kill(&lfs->lf[i].wqe->work);
142 kfree(lfs->lf[i].wqe);
143 lfs->lf[i].wqe = NULL;
147 static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
149 struct otx2_cptlf_wqe *wqe;
152 for (i = 0; i < lfs->lfs_num; i++) {
153 wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
156 goto cleanup_tasklet;
159 tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
162 lfs->lf[i].wqe = wqe;
167 cleanup_tasklet_work(lfs);
171 static void free_pending_queues(struct otx2_cptlfs_info *lfs)
175 for (i = 0; i < lfs->lfs_num; i++) {
176 kfree(lfs->lf[i].pqueue.head);
177 lfs->lf[i].pqueue.head = NULL;
181 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
188 for (i = 0; i < lfs->lfs_num; i++) {
189 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
190 size = lfs->lf[i].pqueue.qlen *
191 sizeof(struct otx2_cpt_pending_entry);
193 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
194 if (!lfs->lf[i].pqueue.head) {
199 /* Initialize spin lock */
200 spin_lock_init(&lfs->lf[i].pqueue.lock);
205 free_pending_queues(lfs);
209 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
211 cleanup_tasklet_work(lfs);
212 free_pending_queues(lfs);
215 static int lf_sw_init(struct otx2_cptlfs_info *lfs)
219 ret = alloc_pending_queues(lfs);
221 dev_err(&lfs->pdev->dev,
222 "Allocating pending queues failed\n");
225 ret = init_tasklet_work(lfs);
227 dev_err(&lfs->pdev->dev,
228 "Tasklet work init failed\n");
229 goto pending_queues_free;
234 free_pending_queues(lfs);
238 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
240 atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
242 /* Remove interrupts affinity */
243 otx2_cptlf_free_irqs_affinity(lfs);
244 /* Disable instruction queue */
245 otx2_cptlf_disable_iqueues(lfs);
246 /* Unregister crypto algorithms */
247 otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
248 /* Unregister LFs interrupts */
249 otx2_cptlf_unregister_misc_interrupts(lfs);
250 otx2_cptlf_unregister_done_interrupts(lfs);
251 /* Cleanup LFs software side */
253 /* Free instruction queues */
254 otx2_cpt_free_instruction_queues(lfs);
255 /* Send request to detach LFs */
256 otx2_cpt_detach_rsrcs_msg(lfs);
260 static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
262 struct otx2_cptlfs_info *lfs = &cptvf->lfs;
263 struct device *dev = &cptvf->pdev->dev;
267 /* Get engine group number for symmetric crypto */
268 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
269 ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
273 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
274 dev_err(dev, "Engine group for kernel crypto not available\n");
278 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
280 ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
284 lfs_num = cptvf->lfs.kvf_limits;
286 otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
287 &cptvf->pfvf_mbox, cptvf->blkaddr);
288 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
293 /* Get msix offsets for attached LFs */
294 ret = otx2_cpt_msix_offset_msg(lfs);
298 /* Initialize LFs software side */
299 ret = lf_sw_init(lfs);
303 /* Register LFs interrupts */
304 ret = otx2_cptlf_register_misc_interrupts(lfs);
308 ret = otx2_cptlf_register_done_interrupts(lfs);
312 /* Set interrupts affinity */
313 ret = otx2_cptlf_set_irqs_affinity(lfs);
315 goto unregister_intr;
317 atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
318 /* Register crypto algorithms */
319 ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
321 dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
327 otx2_cptlf_free_irqs_affinity(lfs);
329 otx2_cptlf_unregister_misc_interrupts(lfs);
330 otx2_cptlf_unregister_done_interrupts(lfs);
334 otx2_cptlf_shutdown(lfs);
339 static int otx2_cptvf_probe(struct pci_dev *pdev,
340 const struct pci_device_id *ent)
342 struct device *dev = &pdev->dev;
343 struct otx2_cptvf_dev *cptvf;
346 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
350 ret = pcim_enable_device(pdev);
352 dev_err(dev, "Failed to enable PCI device\n");
356 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
358 dev_err(dev, "Unable to get usable DMA configuration\n");
362 ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME);
364 dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
367 pci_set_master(pdev);
368 pci_set_drvdata(pdev, cptvf);
371 /* Map VF's configuration registers */
372 cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
373 if (!cptvf->reg_base) {
375 dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret);
379 otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
381 ret = cn10k_cptvf_lmtst_init(cptvf);
385 /* Initialize PF<=>VF mailbox */
386 ret = cptvf_pfvf_mbox_init(cptvf);
390 /* Register interrupts */
391 ret = cptvf_register_interrupts(cptvf);
393 goto destroy_pfvf_mbox;
395 cptvf->blkaddr = BLKADDR_CPT0;
397 cptvf_hw_ops_get(cptvf);
399 ret = otx2_cptvf_send_caps_msg(cptvf);
401 dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
402 goto unregister_interrupts;
404 if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
405 cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
407 /* Initialize CPT LFs */
408 ret = cptvf_lf_init(cptvf);
410 goto unregister_interrupts;
414 unregister_interrupts:
415 cptvf_disable_pfvf_mbox_intrs(cptvf);
417 cptvf_pfvf_mbox_destroy(cptvf);
419 pci_set_drvdata(pdev, NULL);
424 static void otx2_cptvf_remove(struct pci_dev *pdev)
426 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
429 dev_err(&pdev->dev, "Invalid CPT VF device.\n");
432 cptvf_lf_shutdown(&cptvf->lfs);
433 /* Disable PF-VF mailbox interrupt */
434 cptvf_disable_pfvf_mbox_intrs(cptvf);
435 /* Destroy PF-VF mbox */
436 cptvf_pfvf_mbox_destroy(cptvf);
437 pci_set_drvdata(pdev, NULL);
440 /* Supported devices */
441 static const struct pci_device_id otx2_cptvf_id_table[] = {
442 {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
443 {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
444 { 0, } /* end of table */
447 static struct pci_driver otx2_cptvf_pci_driver = {
448 .name = OTX2_CPTVF_DRV_NAME,
449 .id_table = otx2_cptvf_id_table,
450 .probe = otx2_cptvf_probe,
451 .remove = otx2_cptvf_remove,
454 module_pci_driver(otx2_cptvf_pci_driver);
456 MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
458 MODULE_AUTHOR("Marvell");
459 MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
460 MODULE_LICENSE("GPL v2");
461 MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);