1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <linux/module.h>
9 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
11 return readl(g->bar0_va + offset);
14 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
16 return readq(g->bar0_va + offset);
19 static void mana_gd_init_registers(struct pci_dev *pdev)
21 struct gdma_context *gc = pci_get_drvdata(pdev);
23 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
25 gc->db_page_base = gc->bar0_va +
26 mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
28 gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
31 static int mana_gd_query_max_resources(struct pci_dev *pdev)
33 struct gdma_context *gc = pci_get_drvdata(pdev);
34 struct gdma_query_max_resources_resp resp = {};
35 struct gdma_general_req req = {};
38 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
39 sizeof(req), sizeof(resp));
41 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
42 if (err || resp.hdr.status) {
43 dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
44 err, resp.hdr.status);
45 return err ? err : -EPROTO;
48 if (gc->num_msix_usable > resp.max_msix)
49 gc->num_msix_usable = resp.max_msix;
51 if (gc->num_msix_usable <= 1)
54 gc->max_num_queues = num_online_cpus();
55 if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
56 gc->max_num_queues = MANA_MAX_NUM_QUEUES;
58 if (gc->max_num_queues > resp.max_eq)
59 gc->max_num_queues = resp.max_eq;
61 if (gc->max_num_queues > resp.max_cq)
62 gc->max_num_queues = resp.max_cq;
64 if (gc->max_num_queues > resp.max_sq)
65 gc->max_num_queues = resp.max_sq;
67 if (gc->max_num_queues > resp.max_rq)
68 gc->max_num_queues = resp.max_rq;
73 static int mana_gd_detect_devices(struct pci_dev *pdev)
75 struct gdma_context *gc = pci_get_drvdata(pdev);
76 struct gdma_list_devices_resp resp = {};
77 struct gdma_general_req req = {};
78 struct gdma_dev_id dev;
83 mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
86 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
87 if (err || resp.hdr.status) {
88 dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
90 return err ? err : -EPROTO;
93 max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
95 for (i = 0; i < max_num_devs; i++) {
99 /* HWC is already detected in mana_hwc_create_channel(). */
100 if (dev_type == GDMA_DEVICE_HWC)
103 if (dev_type == GDMA_DEVICE_MANA) {
104 gc->mana.gdma_context = gc;
105 gc->mana.dev_id = dev;
109 return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
112 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
113 u32 resp_len, void *resp)
115 struct hw_channel_context *hwc = gc->hwc.driver_data;
117 return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
120 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
121 struct gdma_mem_info *gmi)
123 dma_addr_t dma_handle;
126 if (length < PAGE_SIZE || !is_power_of_2(length))
130 buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
134 gmi->dma_handle = dma_handle;
135 gmi->virt_addr = buf;
136 gmi->length = length;
141 void mana_gd_free_memory(struct gdma_mem_info *gmi)
143 dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
147 static int mana_gd_create_hw_eq(struct gdma_context *gc,
148 struct gdma_queue *queue)
150 struct gdma_create_queue_resp resp = {};
151 struct gdma_create_queue_req req = {};
154 if (queue->type != GDMA_EQ)
157 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
158 sizeof(req), sizeof(resp));
160 req.hdr.dev_id = queue->gdma_dev->dev_id;
161 req.type = queue->type;
162 req.pdid = queue->gdma_dev->pdid;
163 req.doolbell_id = queue->gdma_dev->doorbell;
164 req.gdma_region = queue->mem_info.gdma_region;
165 req.queue_size = queue->queue_size;
166 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
167 req.eq_pci_msix_index = queue->eq.msix_index;
169 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
170 if (err || resp.hdr.status) {
171 dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
173 return err ? err : -EPROTO;
176 queue->id = resp.queue_index;
177 queue->eq.disable_needed = true;
178 queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
182 static int mana_gd_disable_queue(struct gdma_queue *queue)
184 struct gdma_context *gc = queue->gdma_dev->gdma_context;
185 struct gdma_disable_queue_req req = {};
186 struct gdma_general_resp resp = {};
189 WARN_ON(queue->type != GDMA_EQ);
191 mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
192 sizeof(req), sizeof(resp));
194 req.hdr.dev_id = queue->gdma_dev->dev_id;
195 req.type = queue->type;
196 req.queue_index = queue->id;
197 req.alloc_res_id_on_creation = 1;
199 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
200 if (err || resp.hdr.status) {
201 dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
203 return err ? err : -EPROTO;
209 #define DOORBELL_OFFSET_SQ 0x0
210 #define DOORBELL_OFFSET_RQ 0x400
211 #define DOORBELL_OFFSET_CQ 0x800
212 #define DOORBELL_OFFSET_EQ 0xFF8
214 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
215 enum gdma_queue_type q_type, u32 qid,
216 u32 tail_ptr, u8 num_req)
218 void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
219 union gdma_doorbell_entry e = {};
224 e.eq.tail_ptr = tail_ptr;
227 addr += DOORBELL_OFFSET_EQ;
232 e.cq.tail_ptr = tail_ptr;
235 addr += DOORBELL_OFFSET_CQ;
240 e.rq.tail_ptr = tail_ptr;
241 e.rq.wqe_cnt = num_req;
243 addr += DOORBELL_OFFSET_RQ;
248 e.sq.tail_ptr = tail_ptr;
250 addr += DOORBELL_OFFSET_SQ;
258 /* Ensure all writes are done before ring doorbell */
261 writeq(e.as_uint64, addr);
264 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
266 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
267 queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
270 void mana_gd_arm_cq(struct gdma_queue *cq)
272 struct gdma_context *gc = cq->gdma_dev->gdma_context;
274 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
276 u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
278 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
282 static void mana_gd_process_eqe(struct gdma_queue *eq)
284 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
285 struct gdma_context *gc = eq->gdma_dev->gdma_context;
286 struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
287 union gdma_eqe_info eqe_info;
288 enum gdma_eqe_type type;
289 struct gdma_event event;
290 struct gdma_queue *cq;
291 struct gdma_eqe *eqe;
294 eqe = &eq_eqe_ptr[head];
295 eqe_info.as_uint32 = eqe->eqe_info;
296 type = eqe_info.type;
299 case GDMA_EQE_COMPLETION:
300 cq_id = eqe->details[0] & 0xFFFFFF;
301 if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
304 cq = gc->cq_table[cq_id];
305 if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
309 cq->cq.callback(cq->cq.context, cq);
313 case GDMA_EQE_TEST_EVENT:
314 gc->test_event_eq_id = eq->id;
315 complete(&gc->eq_test_event);
318 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
319 case GDMA_EQE_HWC_INIT_DATA:
320 case GDMA_EQE_HWC_INIT_DONE:
321 if (!eq->eq.callback)
325 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
326 eq->eq.callback(eq->eq.context, eq, &event);
334 static void mana_gd_process_eq_events(void *arg)
336 u32 owner_bits, new_bits, old_bits;
337 union gdma_eqe_info eqe_info;
338 struct gdma_eqe *eq_eqe_ptr;
339 struct gdma_queue *eq = arg;
340 struct gdma_context *gc;
341 struct gdma_eqe *eqe;
342 unsigned int arm_bit;
346 gc = eq->gdma_dev->gdma_context;
348 num_eqe = eq->queue_size / GDMA_EQE_SIZE;
349 eq_eqe_ptr = eq->queue_mem_ptr;
351 /* Process up to 5 EQEs at a time, and update the HW head. */
352 for (i = 0; i < 5; i++) {
353 eqe = &eq_eqe_ptr[eq->head % num_eqe];
354 eqe_info.as_uint32 = eqe->eqe_info;
355 owner_bits = eqe_info.owner_bits;
357 old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
358 /* No more entries */
359 if (owner_bits == old_bits)
362 new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
363 if (owner_bits != new_bits) {
364 dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
368 mana_gd_process_eqe(eq);
373 /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */
374 if (mana_gd_is_hwc(eq->gdma_dev)) {
375 arm_bit = SET_ARM_BIT;
376 } else if (eq->eq.work_done < eq->eq.budget &&
377 napi_complete_done(&eq->eq.napi, eq->eq.work_done)) {
378 arm_bit = SET_ARM_BIT;
383 head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
385 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
389 static int mana_poll(struct napi_struct *napi, int budget)
391 struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi);
393 eq->eq.work_done = 0;
394 eq->eq.budget = budget;
396 mana_gd_process_eq_events(eq);
398 return min(eq->eq.work_done, budget);
401 static void mana_gd_schedule_napi(void *arg)
403 struct gdma_queue *eq = arg;
404 struct napi_struct *napi;
407 napi_schedule_irqoff(napi);
410 static int mana_gd_register_irq(struct gdma_queue *queue,
411 const struct gdma_queue_spec *spec)
413 struct gdma_dev *gd = queue->gdma_dev;
414 bool is_mana = mana_gd_is_mana(gd);
415 struct gdma_irq_context *gic;
416 struct gdma_context *gc;
417 struct gdma_resource *r;
418 unsigned int msi_index;
422 gc = gd->gdma_context;
423 r = &gc->msix_resource;
425 spin_lock_irqsave(&r->lock, flags);
427 msi_index = find_first_zero_bit(r->map, r->size);
428 if (msi_index >= r->size) {
431 bitmap_set(r->map, msi_index, 1);
432 queue->eq.msix_index = msi_index;
436 spin_unlock_irqrestore(&r->lock, flags);
441 WARN_ON(msi_index >= gc->num_msix_usable);
443 gic = &gc->irq_contexts[msi_index];
446 netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll,
448 napi_enable(&queue->eq.napi);
451 WARN_ON(gic->handler || gic->arg);
456 gic->handler = mana_gd_schedule_napi;
458 gic->handler = mana_gd_process_eq_events;
463 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
465 struct gdma_dev *gd = queue->gdma_dev;
466 struct gdma_irq_context *gic;
467 struct gdma_context *gc;
468 struct gdma_resource *r;
469 unsigned int msix_index;
472 gc = gd->gdma_context;
473 r = &gc->msix_resource;
475 /* At most num_online_cpus() + 1 interrupts are used. */
476 msix_index = queue->eq.msix_index;
477 if (WARN_ON(msix_index >= gc->num_msix_usable))
480 gic = &gc->irq_contexts[msix_index];
484 spin_lock_irqsave(&r->lock, flags);
485 bitmap_clear(r->map, msix_index, 1);
486 spin_unlock_irqrestore(&r->lock, flags);
488 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
491 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
493 struct gdma_generate_test_event_req req = {};
494 struct gdma_general_resp resp = {};
495 struct device *dev = gc->dev;
498 mutex_lock(&gc->eq_test_event_mutex);
500 init_completion(&gc->eq_test_event);
501 gc->test_event_eq_id = INVALID_QUEUE_ID;
503 mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
504 sizeof(req), sizeof(resp));
506 req.hdr.dev_id = eq->gdma_dev->dev_id;
507 req.queue_index = eq->id;
509 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
511 dev_err(dev, "test_eq failed: %d\n", err);
517 if (resp.hdr.status) {
518 dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
522 if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
523 dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
527 if (eq->id != gc->test_event_eq_id) {
528 dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
529 gc->test_event_eq_id, eq->id);
535 mutex_unlock(&gc->eq_test_event_mutex);
539 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
540 struct gdma_queue *queue)
545 err = mana_gd_test_eq(gc, queue);
547 dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
550 mana_gd_deregiser_irq(queue);
552 if (mana_gd_is_mana(queue->gdma_dev)) {
553 napi_disable(&queue->eq.napi);
554 netif_napi_del(&queue->eq.napi);
557 if (queue->eq.disable_needed)
558 mana_gd_disable_queue(queue);
561 static int mana_gd_create_eq(struct gdma_dev *gd,
562 const struct gdma_queue_spec *spec,
563 bool create_hwq, struct gdma_queue *queue)
565 struct gdma_context *gc = gd->gdma_context;
566 struct device *dev = gc->dev;
567 u32 log2_num_entries;
570 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
572 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
574 if (spec->eq.log2_throttle_limit > log2_num_entries) {
575 dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
576 spec->eq.log2_throttle_limit, log2_num_entries);
580 err = mana_gd_register_irq(queue, spec);
582 dev_err(dev, "Failed to register irq: %d\n", err);
586 queue->eq.callback = spec->eq.callback;
587 queue->eq.context = spec->eq.context;
588 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
589 queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
592 err = mana_gd_create_hw_eq(gc, queue);
596 err = mana_gd_test_eq(gc, queue);
603 dev_err(dev, "Failed to create EQ: %d\n", err);
604 mana_gd_destroy_eq(gc, false, queue);
608 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
609 struct gdma_queue *queue)
611 u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
613 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
614 queue->cq.parent = spec->cq.parent_eq;
615 queue->cq.context = spec->cq.context;
616 queue->cq.callback = spec->cq.callback;
619 static void mana_gd_destroy_cq(struct gdma_context *gc,
620 struct gdma_queue *queue)
624 if (id >= gc->max_num_cqs)
627 if (!gc->cq_table[id])
630 gc->cq_table[id] = NULL;
633 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
634 const struct gdma_queue_spec *spec,
635 struct gdma_queue **queue_ptr)
637 struct gdma_context *gc = gd->gdma_context;
638 struct gdma_mem_info *gmi;
639 struct gdma_queue *queue;
642 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
646 gmi = &queue->mem_info;
647 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
653 queue->queue_mem_ptr = gmi->virt_addr;
654 queue->queue_size = spec->queue_size;
655 queue->monitor_avl_buf = spec->monitor_avl_buf;
656 queue->type = spec->type;
657 queue->gdma_dev = gd;
659 if (spec->type == GDMA_EQ)
660 err = mana_gd_create_eq(gd, spec, false, queue);
661 else if (spec->type == GDMA_CQ)
662 mana_gd_create_cq(spec, queue);
670 mana_gd_free_memory(gmi);
676 static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
678 struct gdma_destroy_dma_region_req req = {};
679 struct gdma_general_resp resp = {};
682 if (gdma_region == GDMA_INVALID_DMA_REGION)
685 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
687 req.gdma_region = gdma_region;
689 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
690 if (err || resp.hdr.status)
691 dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
692 err, resp.hdr.status);
695 static int mana_gd_create_dma_region(struct gdma_dev *gd,
696 struct gdma_mem_info *gmi)
698 unsigned int num_page = gmi->length / PAGE_SIZE;
699 struct gdma_create_dma_region_req *req = NULL;
700 struct gdma_create_dma_region_resp resp = {};
701 struct gdma_context *gc = gd->gdma_context;
702 struct hw_channel_context *hwc;
703 u32 length = gmi->length;
708 if (length < PAGE_SIZE || !is_power_of_2(length))
711 if (offset_in_page(gmi->virt_addr) != 0)
714 hwc = gc->hwc.driver_data;
715 req_msg_size = sizeof(*req) + num_page * sizeof(u64);
716 if (req_msg_size > hwc->max_req_msg_size)
719 req = kzalloc(req_msg_size, GFP_KERNEL);
723 mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
724 req_msg_size, sizeof(resp));
725 req->length = length;
726 req->offset_in_page = 0;
727 req->gdma_page_type = GDMA_PAGE_TYPE_4K;
728 req->page_count = num_page;
729 req->page_addr_list_len = num_page;
731 for (i = 0; i < num_page; i++)
732 req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
734 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
738 if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
739 dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
745 gmi->gdma_region = resp.gdma_region;
751 int mana_gd_create_mana_eq(struct gdma_dev *gd,
752 const struct gdma_queue_spec *spec,
753 struct gdma_queue **queue_ptr)
755 struct gdma_context *gc = gd->gdma_context;
756 struct gdma_mem_info *gmi;
757 struct gdma_queue *queue;
760 if (spec->type != GDMA_EQ)
763 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
767 gmi = &queue->mem_info;
768 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
772 err = mana_gd_create_dma_region(gd, gmi);
778 queue->queue_mem_ptr = gmi->virt_addr;
779 queue->queue_size = spec->queue_size;
780 queue->monitor_avl_buf = spec->monitor_avl_buf;
781 queue->type = spec->type;
782 queue->gdma_dev = gd;
784 err = mana_gd_create_eq(gd, spec, true, queue);
791 mana_gd_free_memory(gmi);
797 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
798 const struct gdma_queue_spec *spec,
799 struct gdma_queue **queue_ptr)
801 struct gdma_context *gc = gd->gdma_context;
802 struct gdma_mem_info *gmi;
803 struct gdma_queue *queue;
806 if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
807 spec->type != GDMA_RQ)
810 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
814 gmi = &queue->mem_info;
815 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
819 err = mana_gd_create_dma_region(gd, gmi);
825 queue->queue_mem_ptr = gmi->virt_addr;
826 queue->queue_size = spec->queue_size;
827 queue->monitor_avl_buf = spec->monitor_avl_buf;
828 queue->type = spec->type;
829 queue->gdma_dev = gd;
831 if (spec->type == GDMA_CQ)
832 mana_gd_create_cq(spec, queue);
837 mana_gd_free_memory(gmi);
843 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
845 struct gdma_mem_info *gmi = &queue->mem_info;
847 switch (queue->type) {
849 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
853 mana_gd_destroy_cq(gc, queue);
863 dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
868 mana_gd_destroy_dma_region(gc, gmi->gdma_region);
869 mana_gd_free_memory(gmi);
873 int mana_gd_verify_vf_version(struct pci_dev *pdev)
875 struct gdma_context *gc = pci_get_drvdata(pdev);
876 struct gdma_verify_ver_resp resp = {};
877 struct gdma_verify_ver_req req = {};
880 mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
881 sizeof(req), sizeof(resp));
883 req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
884 req.protocol_ver_max = GDMA_PROTOCOL_LAST;
886 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
887 if (err || resp.hdr.status) {
888 dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
889 err, resp.hdr.status);
890 return err ? err : -EPROTO;
896 int mana_gd_register_device(struct gdma_dev *gd)
898 struct gdma_context *gc = gd->gdma_context;
899 struct gdma_register_device_resp resp = {};
900 struct gdma_general_req req = {};
903 gd->pdid = INVALID_PDID;
904 gd->doorbell = INVALID_DOORBELL;
905 gd->gpa_mkey = INVALID_MEM_KEY;
907 mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
910 req.hdr.dev_id = gd->dev_id;
912 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
913 if (err || resp.hdr.status) {
914 dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
915 err, resp.hdr.status);
916 return err ? err : -EPROTO;
919 gd->pdid = resp.pdid;
920 gd->gpa_mkey = resp.gpa_mkey;
921 gd->doorbell = resp.db_id;
926 int mana_gd_deregister_device(struct gdma_dev *gd)
928 struct gdma_context *gc = gd->gdma_context;
929 struct gdma_general_resp resp = {};
930 struct gdma_general_req req = {};
933 if (gd->pdid == INVALID_PDID)
936 mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
939 req.hdr.dev_id = gd->dev_id;
941 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
942 if (err || resp.hdr.status) {
943 dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
944 err, resp.hdr.status);
949 gd->pdid = INVALID_PDID;
950 gd->doorbell = INVALID_DOORBELL;
951 gd->gpa_mkey = INVALID_MEM_KEY;
956 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
958 u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
959 u32 wq_size = wq->queue_size;
961 WARN_ON_ONCE(used_space > wq_size);
963 return wq_size - used_space;
966 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
968 u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
970 WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
972 return wq->queue_mem_ptr + offset;
975 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
976 enum gdma_queue_type q_type,
977 u32 client_oob_size, u32 sgl_data_size,
980 bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
981 bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
982 struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
985 memset(header, 0, sizeof(struct gdma_wqe));
986 header->num_sge = wqe_req->num_sge;
987 header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
990 WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
992 header->client_oob_in_sgl = 1;
995 header->last_vbytes = wqe_req->sgl[0].size;
998 if (q_type == GDMA_SQ)
999 header->client_data_unit = wqe_req->client_data_unit;
1001 /* The size of gdma_wqe + client_oob_size must be less than or equal
1002 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1003 * the queue memory buffer boundary.
1005 ptr = wqe_ptr + sizeof(header);
1007 if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1008 memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1010 if (client_oob_size > wqe_req->inline_oob_size)
1011 memset(ptr + wqe_req->inline_oob_size, 0,
1012 client_oob_size - wqe_req->inline_oob_size);
1015 return sizeof(header) + client_oob_size;
1018 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1019 const struct gdma_wqe_request *wqe_req)
1021 u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1022 const u8 *address = (u8 *)wqe_req->sgl;
1023 u8 *base_ptr, *end_ptr;
1026 base_ptr = wq->queue_mem_ptr;
1027 end_ptr = base_ptr + wq->queue_size;
1028 size_to_end = (u32)(end_ptr - wqe_ptr);
1030 if (size_to_end < sgl_size) {
1031 memcpy(wqe_ptr, address, size_to_end);
1034 address += size_to_end;
1035 sgl_size -= size_to_end;
1038 memcpy(wqe_ptr, address, sgl_size);
1041 int mana_gd_post_work_request(struct gdma_queue *wq,
1042 const struct gdma_wqe_request *wqe_req,
1043 struct gdma_posted_wqe_info *wqe_info)
1045 u32 client_oob_size = wqe_req->inline_oob_size;
1046 struct gdma_context *gc;
1052 if (wqe_req->num_sge == 0)
1055 if (wq->type == GDMA_RQ) {
1056 if (client_oob_size != 0)
1059 client_oob_size = INLINE_OOB_SMALL_SIZE;
1061 max_wqe_size = GDMA_MAX_RQE_SIZE;
1063 if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1064 client_oob_size != INLINE_OOB_LARGE_SIZE)
1067 max_wqe_size = GDMA_MAX_SQE_SIZE;
1070 sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1071 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1072 sgl_data_size, GDMA_WQE_BU_SIZE);
1073 if (wqe_size > max_wqe_size)
1076 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1077 gc = wq->gdma_dev->gdma_context;
1078 dev_err(gc->dev, "unsuccessful flow control!\n");
1083 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1085 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1086 wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1087 sgl_data_size, wqe_ptr);
1088 if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1089 wqe_ptr -= wq->queue_size;
1091 mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1093 wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1098 int mana_gd_post_and_ring(struct gdma_queue *queue,
1099 const struct gdma_wqe_request *wqe_req,
1100 struct gdma_posted_wqe_info *wqe_info)
1102 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1105 err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1109 mana_gd_wq_ring_doorbell(gc, queue);
1114 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1116 unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1117 struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1118 u32 owner_bits, new_bits, old_bits;
1119 struct gdma_cqe *cqe;
1121 cqe = &cq_cqe[cq->head % num_cqe];
1122 owner_bits = cqe->cqe_info.owner_bits;
1124 old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1125 /* Return 0 if no more entries. */
1126 if (owner_bits == old_bits)
1129 new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1130 /* Return -1 if overflow detected. */
1131 if (owner_bits != new_bits)
1134 comp->wq_num = cqe->cqe_info.wq_num;
1135 comp->is_sq = cqe->cqe_info.is_sq;
1136 memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1141 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1146 for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1147 ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1150 cq->head -= cqe_idx;
1163 static irqreturn_t mana_gd_intr(int irq, void *arg)
1165 struct gdma_irq_context *gic = arg;
1168 gic->handler(gic->arg);
1173 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1175 r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1179 r->size = res_avail;
1180 spin_lock_init(&r->lock);
1185 void mana_gd_free_res_map(struct gdma_resource *r)
1187 bitmap_free(r->map);
1192 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1194 unsigned int max_queues_per_port = num_online_cpus();
1195 struct gdma_context *gc = pci_get_drvdata(pdev);
1196 struct gdma_irq_context *gic;
1197 unsigned int max_irqs;
1201 if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1202 max_queues_per_port = MANA_MAX_NUM_QUEUES;
1204 max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV;
1206 /* Need 1 interrupt for the Hardware communication Channel (HWC) */
1209 nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1213 gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1215 if (!gc->irq_contexts) {
1217 goto free_irq_vector;
1220 for (i = 0; i < nvec; i++) {
1221 gic = &gc->irq_contexts[i];
1222 gic->handler = NULL;
1225 irq = pci_irq_vector(pdev, i);
1231 err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
1236 err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1240 gc->max_num_msix = nvec;
1241 gc->num_msix_usable = nvec;
1246 for (j = i - 1; j >= 0; j--) {
1247 irq = pci_irq_vector(pdev, j);
1248 gic = &gc->irq_contexts[j];
1252 kfree(gc->irq_contexts);
1253 gc->irq_contexts = NULL;
1255 pci_free_irq_vectors(pdev);
1259 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1261 struct gdma_context *gc = pci_get_drvdata(pdev);
1262 struct gdma_irq_context *gic;
1265 if (gc->max_num_msix < 1)
1268 mana_gd_free_res_map(&gc->msix_resource);
1270 for (i = 0; i < gc->max_num_msix; i++) {
1271 irq = pci_irq_vector(pdev, i);
1275 gic = &gc->irq_contexts[i];
1279 pci_free_irq_vectors(pdev);
1281 gc->max_num_msix = 0;
1282 gc->num_msix_usable = 0;
1283 kfree(gc->irq_contexts);
1284 gc->irq_contexts = NULL;
1287 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1289 struct gdma_context *gc;
1290 void __iomem *bar0_va;
1294 err = pci_enable_device(pdev);
1298 pci_set_master(pdev);
1300 err = pci_request_regions(pdev, "mana");
1304 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1306 goto release_region;
1309 gc = vzalloc(sizeof(*gc));
1311 goto release_region;
1313 bar0_va = pci_iomap(pdev, bar, 0);
1317 gc->bar0_va = bar0_va;
1318 gc->dev = &pdev->dev;
1320 pci_set_drvdata(pdev, gc);
1322 mana_gd_init_registers(pdev);
1324 mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1326 err = mana_gd_setup_irqs(pdev);
1330 mutex_init(&gc->eq_test_event_mutex);
1332 err = mana_hwc_create_channel(gc);
1336 err = mana_gd_verify_vf_version(pdev);
1340 err = mana_gd_query_max_resources(pdev);
1344 err = mana_gd_detect_devices(pdev);
1348 err = mana_probe(&gc->mana);
1355 mana_hwc_destroy_channel(gc);
1356 vfree(gc->cq_table);
1357 gc->cq_table = NULL;
1359 mana_gd_remove_irqs(pdev);
1361 pci_iounmap(pdev, bar0_va);
1365 pci_release_regions(pdev);
1367 pci_clear_master(pdev);
1368 pci_disable_device(pdev);
1369 dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1373 static void mana_gd_remove(struct pci_dev *pdev)
1375 struct gdma_context *gc = pci_get_drvdata(pdev);
1377 mana_remove(&gc->mana);
1379 mana_hwc_destroy_channel(gc);
1380 vfree(gc->cq_table);
1381 gc->cq_table = NULL;
1383 mana_gd_remove_irqs(pdev);
1385 pci_iounmap(pdev, gc->bar0_va);
1389 pci_release_regions(pdev);
1390 pci_clear_master(pdev);
1391 pci_disable_device(pdev);
1394 #ifndef PCI_VENDOR_ID_MICROSOFT
1395 #define PCI_VENDOR_ID_MICROSOFT 0x1414
1398 static const struct pci_device_id mana_id_table[] = {
1399 { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
1403 static struct pci_driver mana_driver = {
1405 .id_table = mana_id_table,
1406 .probe = mana_gd_probe,
1407 .remove = mana_gd_remove,
1410 module_pci_driver(mana_driver);
1412 MODULE_DEVICE_TABLE(pci, mana_id_table);
1414 MODULE_LICENSE("Dual BSD/GPL");
1415 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");