1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
10 #include "shm_channel.h"
12 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
16 enum gdma_request_type {
17 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
18 GDMA_QUERY_MAX_RESOURCES = 2,
19 GDMA_LIST_DEVICES = 3,
20 GDMA_REGISTER_DEVICE = 4,
21 GDMA_DEREGISTER_DEVICE = 5,
22 GDMA_GENERATE_TEST_EQE = 10,
23 GDMA_CREATE_QUEUE = 12,
24 GDMA_DISABLE_QUEUE = 13,
25 GDMA_CREATE_DMA_REGION = 25,
26 GDMA_DMA_REGION_ADD_PAGES = 26,
27 GDMA_DESTROY_DMA_REGION = 27,
30 enum gdma_queue_type {
38 enum gdma_work_request_flags {
40 GDMA_WR_OOB_IN_SGL = BIT(0),
41 GDMA_WR_PAD_BY_SGE0 = BIT(1),
45 GDMA_EQE_COMPLETION = 3,
46 GDMA_EQE_TEST_EVENT = 64,
47 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
48 GDMA_EQE_HWC_INIT_DATA = 130,
49 GDMA_EQE_HWC_INIT_DONE = 131,
58 struct gdma_resource {
59 /* Protect the bitmap */
62 /* The bitmap size in bits. */
65 /* The bitmap tracks the resources. */
69 union gdma_doorbell_entry {
118 struct gdma_req_hdr {
119 struct gdma_msg_hdr req;
120 struct gdma_msg_hdr resp; /* The expected response */
121 struct gdma_dev_id dev_id;
125 struct gdma_resp_hdr {
126 struct gdma_msg_hdr response;
127 struct gdma_dev_id dev_id;
133 struct gdma_general_req {
134 struct gdma_req_hdr hdr;
137 #define GDMA_MESSAGE_V1 1
139 struct gdma_general_resp {
140 struct gdma_resp_hdr hdr;
143 #define GDMA_STANDARD_HEADER_TYPE 0
145 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
146 u32 req_size, u32 resp_size)
148 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
149 hdr->req.msg_type = code;
150 hdr->req.msg_version = GDMA_MESSAGE_V1;
151 hdr->req.msg_size = req_size;
153 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
154 hdr->resp.msg_type = code;
155 hdr->resp.msg_version = GDMA_MESSAGE_V1;
156 hdr->resp.msg_size = resp_size;
159 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
166 struct gdma_wqe_request {
167 struct gdma_sge *sgl;
171 const void *inline_oob_data;
174 u32 client_data_unit;
177 enum gdma_page_type {
181 #define GDMA_INVALID_DMA_REGION 0
183 struct gdma_mem_info {
186 dma_addr_t dma_handle;
190 /* Allocated by the PF driver */
194 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
197 struct gdma_context *gdma_context;
199 struct gdma_dev_id dev_id;
205 /* GDMA driver specific pointer */
209 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
211 #define GDMA_CQE_SIZE 64
212 #define GDMA_EQE_SIZE 16
213 #define GDMA_MAX_SQE_SIZE 512
214 #define GDMA_MAX_RQE_SIZE 256
216 #define GDMA_COMP_DATA_SIZE 0x3C
218 #define GDMA_EVENT_DATA_SIZE 0xC
220 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
221 #define GDMA_WQE_BU_SIZE 32
223 #define INVALID_PDID UINT_MAX
224 #define INVALID_DOORBELL UINT_MAX
225 #define INVALID_MEM_KEY UINT_MAX
226 #define INVALID_QUEUE_ID UINT_MAX
227 #define INVALID_PCI_MSIX_INDEX UINT_MAX
230 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
236 u32 details[GDMA_EVENT_DATA_SIZE / 4];
243 struct gdma_queue *eq;
246 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
247 struct gdma_event *e);
249 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
251 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
252 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
253 * driver increases the 'head' in BUs rather than in bytes, and notifies
254 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
255 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
257 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
258 * processed, the driver increases the 'tail' to indicate that WQEs have
259 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
261 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
262 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
263 * the owner bits mechanism to detect if the queue has become empty.
266 struct gdma_dev *gdma_dev;
268 enum gdma_queue_type type;
271 struct gdma_mem_info mem_info;
276 bool monitor_avl_buf;
281 /* Extra fields specific to EQ/CQ. */
286 gdma_eq_callback *callback;
289 unsigned int msix_index;
291 u32 log2_throttle_limit;
295 gdma_cq_callback *callback;
298 struct gdma_queue *parent; /* For CQ/EQ relationship */
303 struct gdma_queue_spec {
304 enum gdma_queue_type type;
305 bool monitor_avl_buf;
306 unsigned int queue_size;
308 /* Extra fields specific to EQ/CQ. */
311 gdma_eq_callback *callback;
314 unsigned long log2_throttle_limit;
318 gdma_cq_callback *callback;
321 struct gdma_queue *parent_eq;
327 struct gdma_irq_context {
328 void (*handler)(void *arg);
332 struct gdma_context {
335 /* Per-vPort max number of queues */
336 unsigned int max_num_queues;
337 unsigned int max_num_msix;
338 unsigned int num_msix_usable;
339 struct gdma_resource msix_resource;
340 struct gdma_irq_context *irq_contexts;
342 /* This maps a CQ index to the queue structure. */
343 unsigned int max_num_cqs;
344 struct gdma_queue **cq_table;
346 /* Protect eq_test_event and test_event_eq_id */
347 struct mutex eq_test_event_mutex;
348 struct completion eq_test_event;
349 u32 test_event_eq_id;
352 void __iomem *bar0_va;
353 void __iomem *shm_base;
354 void __iomem *db_page_base;
357 /* Shared memory chanenl (used to bootstrap HWC) */
358 struct shm_channel shm_channel;
360 /* Hardware communication channel (HWC) */
363 /* Azure network adapter */
364 struct gdma_dev mana;
367 #define MAX_NUM_GDMA_DEVICES 4
369 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
371 return gd->dev_id.type == GDMA_DEVICE_MANA;
374 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
376 return gd->dev_id.type == GDMA_DEVICE_HWC;
379 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
380 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
382 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
384 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
385 const struct gdma_queue_spec *spec,
386 struct gdma_queue **queue_ptr);
388 int mana_gd_create_mana_eq(struct gdma_dev *gd,
389 const struct gdma_queue_spec *spec,
390 struct gdma_queue **queue_ptr);
392 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
393 const struct gdma_queue_spec *spec,
394 struct gdma_queue **queue_ptr);
396 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
398 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
400 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
411 u32 inline_oob_size_div4:3;
412 u32 client_oob_in_sgl :1;
414 u32 client_data_unit :14;
420 #define INLINE_OOB_SMALL_SIZE 8
421 #define INLINE_OOB_LARGE_SIZE 24
423 #define MAX_TX_WQE_SIZE 512
424 #define MAX_RX_WQE_SIZE 256
427 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
441 #define GDMA_CQE_OWNER_BITS 3
443 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
445 #define SET_ARM_BIT 1
447 #define GDMA_EQE_OWNER_BITS 3
449 union gdma_eqe_info {
461 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
462 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
465 u32 details[GDMA_EVENT_DATA_SIZE / 4];
469 #define GDMA_REG_DB_PAGE_OFFSET 8
470 #define GDMA_REG_DB_PAGE_SIZE 0x10
471 #define GDMA_REG_SHM_OFFSET 0x18
473 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
474 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
475 #define GDMA_PF_REG_SHM_OFF 0x70
477 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
479 #define MANA_PF_DEVICE_ID 0x00B9
480 #define MANA_VF_DEVICE_ID 0x00BA
482 struct gdma_posted_wqe_info {
486 /* GDMA_GENERATE_TEST_EQE */
487 struct gdma_generate_test_event_req {
488 struct gdma_req_hdr hdr;
492 /* GDMA_VERIFY_VF_DRIVER_VERSION */
494 GDMA_PROTOCOL_V1 = 1,
495 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
496 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
499 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
501 #define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT
503 #define GDMA_DRV_CAP_FLAGS2 0
505 #define GDMA_DRV_CAP_FLAGS3 0
507 #define GDMA_DRV_CAP_FLAGS4 0
509 struct gdma_verify_ver_req {
510 struct gdma_req_hdr hdr;
512 /* Mandatory fields required for protocol establishment */
513 u64 protocol_ver_min;
514 u64 protocol_ver_max;
516 /* Gdma Driver Capability Flags */
517 u64 gd_drv_cap_flags1;
518 u64 gd_drv_cap_flags2;
519 u64 gd_drv_cap_flags3;
520 u64 gd_drv_cap_flags4;
522 /* Advisory fields */
524 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
537 struct gdma_verify_ver_resp {
538 struct gdma_resp_hdr hdr;
539 u64 gdma_protocol_ver;
546 /* GDMA_QUERY_MAX_RESOURCES */
547 struct gdma_query_max_resources_resp {
548 struct gdma_resp_hdr hdr;
561 /* GDMA_LIST_DEVICES */
562 struct gdma_list_devices_resp {
563 struct gdma_resp_hdr hdr;
566 struct gdma_dev_id devs[64];
569 /* GDMA_REGISTER_DEVICE */
570 struct gdma_register_device_resp {
571 struct gdma_resp_hdr hdr;
577 /* GDMA_CREATE_QUEUE */
578 struct gdma_create_queue_req {
579 struct gdma_req_hdr hdr;
587 u32 log2_throttle_limit;
588 u32 eq_pci_msix_index;
591 u8 rq_drop_on_overrun;
592 u8 rq_err_on_wqe_overflow;
593 u8 rq_chain_rec_wqes;
598 struct gdma_create_queue_resp {
599 struct gdma_resp_hdr hdr;
603 /* GDMA_DISABLE_QUEUE */
604 struct gdma_disable_queue_req {
605 struct gdma_req_hdr hdr;
608 u32 alloc_res_id_on_creation;
611 /* GDMA_CREATE_DMA_REGION */
612 struct gdma_create_dma_region_req {
613 struct gdma_req_hdr hdr;
615 /* The total size of the DMA region */
618 /* The offset in the first page */
621 /* enum gdma_page_type */
624 /* The total number of pages */
627 /* If page_addr_list_len is smaller than page_count,
628 * the remaining page addresses will be added via the
629 * message GDMA_DMA_REGION_ADD_PAGES.
631 u32 page_addr_list_len;
632 u64 page_addr_list[];
635 struct gdma_create_dma_region_resp {
636 struct gdma_resp_hdr hdr;
640 /* GDMA_DMA_REGION_ADD_PAGES */
641 struct gdma_dma_region_add_pages_req {
642 struct gdma_req_hdr hdr;
646 u32 page_addr_list_len;
649 u64 page_addr_list[];
652 /* GDMA_DESTROY_DMA_REGION */
653 struct gdma_destroy_dma_region_req {
654 struct gdma_req_hdr hdr;
659 int mana_gd_verify_vf_version(struct pci_dev *pdev);
661 int mana_gd_register_device(struct gdma_dev *gd);
662 int mana_gd_deregister_device(struct gdma_dev *gd);
664 int mana_gd_post_work_request(struct gdma_queue *wq,
665 const struct gdma_wqe_request *wqe_req,
666 struct gdma_posted_wqe_info *wqe_info);
668 int mana_gd_post_and_ring(struct gdma_queue *queue,
669 const struct gdma_wqe_request *wqe,
670 struct gdma_posted_wqe_info *wqe_info);
672 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
673 void mana_gd_free_res_map(struct gdma_resource *r);
675 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
676 struct gdma_queue *queue);
678 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
679 struct gdma_mem_info *gmi);
681 void mana_gd_free_memory(struct gdma_mem_info *gmi);
683 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
684 u32 resp_len, void *resp);