1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 #define IRDMA_MAX_SAVED_PHY_PGADDR 4
7 #define IRDMA_FLUSH_DELAY_MS 20
9 #define IRDMA_PKEY_TBL_SZ 1
10 #define IRDMA_DEFAULT_PKEY 0xFFFF
12 struct irdma_ucontext {
13 struct ib_ucontext ibucontext;
14 struct irdma_device *iwdev;
15 struct rdma_user_mmap_entry *db_mmap_entry;
16 struct list_head cq_reg_mem_list;
17 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
18 struct list_head qp_reg_mem_list;
19 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
26 struct irdma_sc_pd sc_pd;
29 union irdma_sockaddr {
30 struct sockaddr_in saddr_in;
31 struct sockaddr_in6 saddr_in6;
36 struct rdma_ah_attr attrs;
37 union irdma_sockaddr sgid_addr;
38 union irdma_sockaddr dgid_addr;
44 struct irdma_sc_ah sc_ah;
49 struct hlist_node list;
51 struct irdma_ah *parent_ah; /* AH from cached list */
54 struct irdma_hmc_pble {
62 struct irdma_hmc_pble cq_pbl;
68 struct irdma_hmc_pble sq_pbl;
69 struct irdma_hmc_pble rq_pbl;
75 struct irdma_dma_mem kmem_buf;
76 struct irdma_cq_uk cq_uk;
78 struct list_head list;
79 struct work_struct work;
83 struct list_head list;
85 struct irdma_qp_mr qp_mr;
86 struct irdma_cq_mr cq_mr;
92 struct irdma_pble_alloc pble_alloc;
93 struct irdma_mr *iwmr;
101 struct ib_umem *region;
108 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
109 struct irdma_pbl iwpbl;
114 struct irdma_sc_cq sc_cq;
120 enum irdma_cmpl_notify last_notify;
123 struct irdma_dma_mem kmem;
124 struct irdma_dma_mem kmem_shadow;
125 spinlock_t lock; /* for poll cq */
126 struct irdma_pbl *iwpbl;
127 struct irdma_pbl *iwpbl_shadow;
128 struct list_head resize_list;
129 struct irdma_cq_poll_info cur_cqe;
130 struct list_head cmpl_generated;
133 struct irdma_cmpl_gen {
134 struct list_head list;
135 struct irdma_cq_poll_info cpi;
138 struct disconn_work {
139 struct work_struct work;
140 struct irdma_qp *iwqp;
145 struct irdma_qp_kmode {
146 struct irdma_dma_mem dma_mem;
147 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
153 struct irdma_sc_qp sc_qp;
154 struct irdma_device *iwdev;
155 struct irdma_cq *iwscq;
156 struct irdma_cq *iwrcq;
157 struct irdma_pd *iwpd;
158 struct rdma_user_mmap_entry *push_wqe_mmap_entry;
159 struct rdma_user_mmap_entry *push_db_mmap_entry;
160 struct irdma_qp_host_ctx_info ctx_info;
162 struct irdma_iwarp_offload_info iwarp_info;
163 struct irdma_roce_offload_info roce_info;
167 struct irdma_tcp_offload_info tcp_info;
168 struct irdma_udp_offload_info udp_info;
171 struct irdma_ah roce_ah;
172 struct list_head teardown_entry;
174 struct iw_cm_id *cm_id;
175 struct irdma_cm_node *cm_node;
176 struct delayed_work dwork_flush;
177 struct ib_mr *lsmm_mr;
178 atomic_t hw_mod_qp_pend;
179 enum ib_qp_state ibqp_state;
184 atomic_t close_timer_started;
185 spinlock_t lock; /* serialize posting WRs to SQ/RQ */
186 struct irdma_qp_context *iwqp_context;
188 dma_addr_t pbl_pbase;
198 u16 term_sq_flush_code;
199 u16 term_rq_flush_code;
202 struct irdma_qp_kmode kqp;
203 struct irdma_dma_mem host_ctx;
204 struct timer_list terminate_timer;
205 struct irdma_pbl *iwpbl;
206 struct irdma_dma_mem q2_ctx_mem;
207 struct irdma_dma_mem ietf_mem;
208 struct completion free_qp;
209 wait_queue_head_t waitq;
210 wait_queue_head_t mod_qp_waitq;
214 enum irdma_mmap_flag {
219 struct irdma_user_mmap_entry {
220 struct rdma_user_mmap_entry rdma_entry;
225 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
227 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
230 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
232 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
235 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
238 switch (cq_poll_info->op_type) {
239 case IRDMA_OP_TYPE_RDMA_WRITE:
240 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
241 entry->opcode = IB_WC_RDMA_WRITE;
243 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
244 case IRDMA_OP_TYPE_RDMA_READ:
245 entry->opcode = IB_WC_RDMA_READ;
247 case IRDMA_OP_TYPE_SEND_SOL:
248 case IRDMA_OP_TYPE_SEND_SOL_INV:
249 case IRDMA_OP_TYPE_SEND_INV:
250 case IRDMA_OP_TYPE_SEND:
251 entry->opcode = IB_WC_SEND;
253 case IRDMA_OP_TYPE_FAST_REG_NSMR:
254 entry->opcode = IB_WC_REG_MR;
256 case IRDMA_OP_TYPE_INV_STAG:
257 entry->opcode = IB_WC_LOCAL_INV;
260 entry->status = IB_WC_GENERAL_ERR;
264 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
265 struct ib_wc *entry, bool send_imm_support)
268 * iWARP does not support sendImm, so the presence of Imm data
271 if (!send_imm_support) {
272 entry->opcode = cq_poll_info->imm_valid ?
273 IB_WC_RECV_RDMA_WITH_IMM :
278 switch (cq_poll_info->op_type) {
279 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
280 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
281 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
284 entry->opcode = IB_WC_RECV;
288 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
289 int irdma_ib_register_device(struct irdma_device *iwdev);
290 void irdma_ib_unregister_device(struct irdma_device *iwdev);
291 void irdma_ib_dealloc_device(struct ib_device *ibdev);
292 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
293 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
294 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
295 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
296 #endif /* IRDMA_VERBS_H */