1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
8 #include "hw_channel.h"
10 /* Microsoft Azure Network Adapter (MANA)'s definitions
12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
16 /* MANA protocol version */
17 #define MANA_MAJOR_VERSION 0
18 #define MANA_MINOR_VERSION 1
19 #define MANA_MICRO_VERSION 1
21 typedef u64 mana_handle_t;
22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 TRI_STATE_UNKNOWN = -1,
30 /* Number of entries for hardware indirection table must be in power of 2 */
31 #define MANA_INDIRECT_TABLE_SIZE 64
32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35 #define MANA_HASH_KEY_SIZE 40
37 #define COMP_ENTRY_SIZE 64
39 #define ADAPTER_MTU_SIZE 1500
40 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
42 #define RX_BUFFERS_PER_QUEUE 512
44 #define MAX_SEND_BUFFERS_PER_QUEUE 256
46 #define EQ_SIZE (8 * PAGE_SIZE)
47 #define LOG2_EQ_THROTTLE 3
49 #define MAX_PORTS_IN_MANA_DEV 16
54 struct u64_stats_sync syncp;
58 struct gdma_queue *gdma_sq;
71 struct net_device *ndev;
73 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
74 struct sk_buff_head pending_skbs;
75 struct netdev_queue *net_txq;
77 atomic_t pending_sends;
79 struct mana_stats stats;
82 /* skb data and frags dma mappings */
83 struct mana_skb_head {
84 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
86 u32 size[MAX_SKB_FRAGS + 1];
89 #define MANA_HEADROOM sizeof(struct mana_skb_head)
91 enum mana_tx_pkt_format {
92 MANA_SHORT_PKT_FMT = 0,
93 MANA_LONG_PKT_FMT = 1,
96 struct mana_tx_short_oob {
98 u32 is_outer_ipv4 : 1;
99 u32 is_outer_ipv6 : 1;
100 u32 comp_iphdr_csum : 1;
101 u32 comp_tcp_csum : 1;
102 u32 comp_udp_csum : 1;
103 u32 supress_txcqe_gen : 1;
106 u32 trans_off : 10; /* Transport header offset */
108 u32 short_vp_offset : 8;
111 struct mana_tx_long_oob {
113 u32 inner_is_ipv6 : 1;
114 u32 inner_tcp_opt : 1;
115 u32 inject_vlan_pri_tag : 1;
117 u32 pcp : 3; /* 802.1Q */
118 u32 dei : 1; /* 802.1Q */
119 u32 vlan_id : 12; /* 802.1Q */
121 u32 inner_frame_offset : 10;
122 u32 inner_ip_rel_offset : 6;
123 u32 long_vp_offset : 12;
131 struct mana_tx_short_oob s_oob;
132 struct mana_tx_long_oob l_oob;
143 CQE_RX_COALESCED_4 = 2,
144 CQE_RX_OBJECT_FENCE = 3,
145 CQE_RX_TRUNCATED = 4,
149 CQE_TX_MTU_DROP = 34,
150 CQE_TX_INVALID_OOB = 35,
151 CQE_TX_INVALID_ETH_TYPE = 36,
152 CQE_TX_HDR_PROCESSING_ERROR = 37,
153 CQE_TX_VF_DISABLED = 38,
154 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
155 CQE_TX_VPORT_DISABLED = 40,
156 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
159 #define MANA_CQE_COMPLETION 1
161 struct mana_cqe_header {
167 /* NDIS HASH Types */
168 #define NDIS_HASH_IPV4 BIT(0)
169 #define NDIS_HASH_TCP_IPV4 BIT(1)
170 #define NDIS_HASH_UDP_IPV4 BIT(2)
171 #define NDIS_HASH_IPV6 BIT(3)
172 #define NDIS_HASH_TCP_IPV6 BIT(4)
173 #define NDIS_HASH_UDP_IPV6 BIT(5)
174 #define NDIS_HASH_IPV6_EX BIT(6)
175 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
176 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
178 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
179 #define MANA_HASH_L4 \
180 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
181 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
183 struct mana_rxcomp_perpkt_info {
190 #define MANA_RXCOMP_OOB_NUM_PPI 4
192 /* Receive completion OOB */
193 struct mana_rxcomp_oob {
194 struct mana_cqe_header cqe_hdr;
197 u32 rx_vlantag_present : 1;
198 u32 rx_outer_iphdr_csum_succeed : 1;
199 u32 rx_outer_iphdr_csum_fail : 1;
202 u32 rx_iphdr_csum_succeed : 1;
203 u32 rx_iphdr_csum_fail : 1;
204 u32 rx_tcp_csum_succeed : 1;
205 u32 rx_tcp_csum_fail : 1;
206 u32 rx_udp_csum_succeed : 1;
207 u32 rx_udp_csum_fail : 1;
210 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
215 struct mana_tx_comp_oob {
216 struct mana_cqe_header cqe_hdr;
220 u32 tx_sgl_offset : 5;
221 u32 tx_wqe_offset : 27;
229 struct gdma_queue *gdma_cq;
231 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
234 /* Type of the CQ: TX or RX */
235 enum mana_cq_type type;
237 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
238 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
240 struct mana_rxq *rxq;
242 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
243 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
245 struct mana_txq *txq;
247 /* Pointer to a buffer which the CQ handler can copy the CQE's into. */
248 struct gdma_comp *gdma_comp_buf;
251 #define GDMA_MAX_RQE_SGES 15
253 struct mana_recv_buf_oob {
254 /* A valid GDMA work request representing the data buffer. */
255 struct gdma_wqe_request wqe_req;
258 dma_addr_t buf_dma_addr;
260 /* SGL of the buffer going to be sent has part of the work request. */
262 struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
264 /* Required to store the result of mana_gd_post_work_request.
265 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
266 * work queue when the WQE is consumed.
268 struct gdma_posted_wqe_info wqe_inf;
272 struct gdma_queue *gdma_rq;
273 /* Cache the gdma receive queue id */
276 /* Index of RQ in the vPort, not gdma receive queue id */
283 struct mana_cq rx_cq;
285 struct net_device *ndev;
287 /* Total number of receive buffers to be allocated */
292 struct mana_stats stats;
294 /* MUST BE THE LAST MEMBER:
295 * Each receive buffer has an associated mana_recv_buf_oob.
297 struct mana_recv_buf_oob rx_oobs[];
303 struct mana_cq tx_cq;
305 mana_handle_t tx_object;
308 struct mana_ethtool_stats {
313 struct mana_context {
314 struct gdma_dev *gdma_dev;
318 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
321 struct mana_port_context {
322 struct mana_context *ac;
323 struct net_device *ndev;
325 u8 mac_addr[ETH_ALEN];
329 enum TRI_STATE rss_state;
331 mana_handle_t default_rxobj;
332 bool tx_shortform_allowed;
335 struct mana_tx_qp *tx_qp;
337 /* Indirection Table for RX & TX. The values are queue indexes */
338 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
340 /* Indirection table containing RxObject Handles */
341 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
343 /* Hash key used by the NIC */
344 u8 hashkey[MANA_HASH_KEY_SIZE];
346 /* This points to an array of num_queues of RQ pointers. */
347 struct mana_rxq **rxqs;
349 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
350 unsigned int max_queues;
351 unsigned int num_queues;
353 mana_handle_t port_handle;
358 bool port_st_save; /* Saved port state */
360 struct mana_ethtool_stats eth_stats;
363 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
364 bool update_hash, bool update_tab);
366 int mana_alloc_queues(struct net_device *ndev);
367 int mana_attach(struct net_device *ndev);
368 int mana_detach(struct net_device *ndev, bool from_close);
370 int mana_probe(struct gdma_dev *gd);
371 void mana_remove(struct gdma_dev *gd);
373 extern const struct ethtool_ops mana_ethtool_ops;
375 struct mana_obj_spec {
383 enum mana_command_code {
384 MANA_QUERY_DEV_CONFIG = 0x20001,
385 MANA_QUERY_GF_STAT = 0x20002,
386 MANA_CONFIG_VPORT_TX = 0x20003,
387 MANA_CREATE_WQ_OBJ = 0x20004,
388 MANA_DESTROY_WQ_OBJ = 0x20005,
389 MANA_FENCE_RQ = 0x20006,
390 MANA_CONFIG_VPORT_RX = 0x20007,
391 MANA_QUERY_VPORT_CONFIG = 0x20008,
394 /* Query Device Configuration */
395 struct mana_query_device_cfg_req {
396 struct gdma_req_hdr hdr;
398 /* Driver Capability flags */
411 struct mana_query_device_cfg_resp {
412 struct gdma_resp_hdr hdr;
424 /* Query vPort Configuration */
425 struct mana_query_vport_cfg_req {
426 struct gdma_req_hdr hdr;
430 struct mana_query_vport_cfg_resp {
431 struct gdma_resp_hdr hdr;
434 u32 num_indirection_ent;
441 /* Configure vPort */
442 struct mana_config_vport_req {
443 struct gdma_req_hdr hdr;
449 struct mana_config_vport_resp {
450 struct gdma_resp_hdr hdr;
452 u8 short_form_allowed;
456 /* Create WQ Object */
457 struct mana_create_wqobj_req {
458 struct gdma_req_hdr hdr;
466 u32 cq_moderation_ctx_id;
470 struct mana_create_wqobj_resp {
471 struct gdma_resp_hdr hdr;
474 mana_handle_t wq_obj;
477 /* Destroy WQ Object */
478 struct mana_destroy_wqobj_req {
479 struct gdma_req_hdr hdr;
482 mana_handle_t wq_obj_handle;
485 struct mana_destroy_wqobj_resp {
486 struct gdma_resp_hdr hdr;
490 struct mana_fence_rq_req {
491 struct gdma_req_hdr hdr;
492 mana_handle_t wq_obj_handle;
495 struct mana_fence_rq_resp {
496 struct gdma_resp_hdr hdr;
499 /* Configure vPort Rx Steering */
500 struct mana_cfg_rx_steer_req {
501 struct gdma_req_hdr hdr;
503 u16 num_indir_entries;
504 u16 indir_tab_offset;
507 u8 update_default_rxobj;
511 mana_handle_t default_rxobj;
512 u8 hashkey[MANA_HASH_KEY_SIZE];
515 struct mana_cfg_rx_steer_resp {
516 struct gdma_resp_hdr hdr;
519 #define MANA_MAX_NUM_QUEUES 16
521 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
523 struct mana_tx_package {
524 struct gdma_wqe_request wqe_req;
525 struct gdma_sge sgl_array[5];
526 struct gdma_sge *sgl_ptr;
528 struct mana_tx_oob tx_oob;
530 struct gdma_posted_wqe_info wqe_info;