]>
Commit | Line | Data |
---|---|---|
f1372ee1 KM |
1 | /* QLogic qed NIC Driver |
2 | * Copyright (c) 2015-2017 QLogic Corporation | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and /or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/types.h> | |
33 | #include <asm/byteorder.h> | |
34 | #include <linux/bitops.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/dma-mapping.h> | |
37 | #include <linux/errno.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/kernel.h> | |
40 | #include <linux/list.h> | |
41 | #include <linux/module.h> | |
42 | #include <linux/mutex.h> | |
43 | #include <linux/pci.h> | |
44 | #include <linux/slab.h> | |
45 | #include <linux/spinlock.h> | |
46 | #include <linux/string.h> | |
47 | #include "qed.h" | |
48 | #include "qed_cxt.h" | |
49 | #include "qed_hsi.h" | |
50 | #include "qed_hw.h" | |
51 | #include "qed_init_ops.h" | |
52 | #include "qed_int.h" | |
53 | #include "qed_ll2.h" | |
54 | #include "qed_mcp.h" | |
55 | #include "qed_reg_addr.h" | |
7003cdd6 | 56 | #include <linux/qed/qed_rdma_if.h> |
b71b9afd KM |
57 | #include "qed_rdma.h" |
58 | #include "qed_roce.h" | |
f1372ee1 KM |
59 | #include "qed_sp.h" |
60 | ||
f1372ee1 | 61 | |
b71b9afd KM |
62 | int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, |
63 | struct qed_bmap *bmap, u32 max_count, char *name) | |
f1372ee1 KM |
64 | { |
65 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); | |
66 | ||
67 | bmap->max_count = max_count; | |
68 | ||
69 | bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long), | |
70 | GFP_KERNEL); | |
71 | if (!bmap->bitmap) | |
72 | return -ENOMEM; | |
73 | ||
74 | snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); | |
75 | ||
76 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); | |
77 | return 0; | |
78 | } | |
79 | ||
b71b9afd KM |
80 | int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, |
81 | struct qed_bmap *bmap, u32 *id_num) | |
f1372ee1 KM |
82 | { |
83 | *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); | |
84 | if (*id_num >= bmap->max_count) | |
85 | return -EINVAL; | |
86 | ||
87 | __set_bit(*id_num, bmap->bitmap); | |
88 | ||
89 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", | |
90 | bmap->name, *id_num); | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
b71b9afd KM |
95 | void qed_bmap_set_id(struct qed_hwfn *p_hwfn, |
96 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
97 | { |
98 | if (id_num >= bmap->max_count) | |
99 | return; | |
100 | ||
101 | __set_bit(id_num, bmap->bitmap); | |
102 | } | |
103 | ||
b71b9afd KM |
104 | void qed_bmap_release_id(struct qed_hwfn *p_hwfn, |
105 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
106 | { |
107 | bool b_acquired; | |
108 | ||
109 | if (id_num >= bmap->max_count) | |
110 | return; | |
111 | ||
112 | b_acquired = test_and_clear_bit(id_num, bmap->bitmap); | |
113 | if (!b_acquired) { | |
114 | DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", | |
115 | bmap->name, id_num); | |
116 | return; | |
117 | } | |
118 | ||
119 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", | |
120 | bmap->name, id_num); | |
121 | } | |
122 | ||
b71b9afd KM |
123 | int qed_bmap_test_id(struct qed_hwfn *p_hwfn, |
124 | struct qed_bmap *bmap, u32 id_num) | |
f1372ee1 KM |
125 | { |
126 | if (id_num >= bmap->max_count) | |
127 | return -1; | |
128 | ||
129 | return test_bit(id_num, bmap->bitmap); | |
130 | } | |
131 | ||
132 | static bool qed_bmap_is_empty(struct qed_bmap *bmap) | |
133 | { | |
134 | return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); | |
135 | } | |
136 | ||
bf774d14 | 137 | static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) |
f1372ee1 KM |
138 | { |
139 | /* First sb id for RoCE is after all the l2 sb */ | |
140 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; | |
141 | } | |
142 | ||
291d57f6 | 143 | int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) |
f1372ee1 KM |
144 | { |
145 | struct qed_rdma_info *p_rdma_info; | |
f1372ee1 | 146 | |
f1372ee1 KM |
147 | p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); |
148 | if (!p_rdma_info) | |
291d57f6 MK |
149 | return -ENOMEM; |
150 | ||
151 | spin_lock_init(&p_rdma_info->lock); | |
f1372ee1 KM |
152 | |
153 | p_hwfn->p_rdma_info = p_rdma_info; | |
291d57f6 MK |
154 | return 0; |
155 | } | |
156 | ||
157 | void qed_rdma_info_free(struct qed_hwfn *p_hwfn) | |
158 | { | |
159 | kfree(p_hwfn->p_rdma_info); | |
160 | p_hwfn->p_rdma_info = NULL; | |
161 | } | |
162 | ||
163 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) | |
164 | { | |
165 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | |
166 | u32 num_cons, num_tasks; | |
167 | int rc = -ENOMEM; | |
168 | ||
169 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); | |
170 | ||
e0a8f9de MK |
171 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
172 | p_rdma_info->proto = PROTOCOLID_IWARP; | |
173 | else | |
174 | p_rdma_info->proto = PROTOCOLID_ROCE; | |
f1372ee1 KM |
175 | |
176 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, | |
177 | NULL); | |
178 | ||
67b40dcc KM |
179 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
180 | p_rdma_info->num_qps = num_cons; | |
181 | else | |
182 | p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ | |
f1372ee1 KM |
183 | |
184 | num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); | |
185 | ||
186 | /* Each MR uses a single task */ | |
187 | p_rdma_info->num_mrs = num_tasks; | |
188 | ||
189 | /* Queue zone lines are shared between RoCE and L2 in such a way that | |
190 | * they can be used by each without obstructing the other. | |
191 | */ | |
192 | p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); | |
193 | p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); | |
194 | ||
195 | /* Allocate a struct with device params and fill it */ | |
196 | p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); | |
197 | if (!p_rdma_info->dev) | |
291d57f6 | 198 | return rc; |
f1372ee1 KM |
199 | |
200 | /* Allocate a struct with port params and fill it */ | |
201 | p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); | |
202 | if (!p_rdma_info->port) | |
203 | goto free_rdma_dev; | |
204 | ||
205 | /* Allocate bit map for pd's */ | |
206 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, | |
207 | "PD"); | |
208 | if (rc) { | |
209 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
210 | "Failed to allocate pd_map, rc = %d\n", | |
211 | rc); | |
212 | goto free_rdma_port; | |
213 | } | |
214 | ||
215 | /* Allocate DPI bitmap */ | |
216 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, | |
217 | p_hwfn->dpi_count, "DPI"); | |
218 | if (rc) { | |
219 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
220 | "Failed to allocate DPI bitmap, rc = %d\n", rc); | |
221 | goto free_pd_map; | |
222 | } | |
223 | ||
471115ab MK |
224 | /* Allocate bitmap for cq's. The maximum number of CQs is bound to |
225 | * the number of connections we support. (num_qps in iWARP or | |
226 | * num_qps/2 in RoCE). | |
f1372ee1 | 227 | */ |
471115ab | 228 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); |
f1372ee1 KM |
229 | if (rc) { |
230 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
231 | "Failed to allocate cq bitmap, rc = %d\n", rc); | |
232 | goto free_dpi_map; | |
233 | } | |
234 | ||
235 | /* Allocate bitmap for toggle bit for cq icids | |
236 | * We toggle the bit every time we create or resize cq for a given icid. | |
471115ab | 237 | * Size needs to equal the size of the cq bmap. |
f1372ee1 KM |
238 | */ |
239 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, | |
471115ab | 240 | num_cons, "Toggle"); |
f1372ee1 KM |
241 | if (rc) { |
242 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
5a94df70 | 243 | "Failed to allocate toggle bits, rc = %d\n", rc); |
f1372ee1 KM |
244 | goto free_cq_map; |
245 | } | |
246 | ||
247 | /* Allocate bitmap for itids */ | |
248 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, | |
249 | p_rdma_info->num_mrs, "MR"); | |
250 | if (rc) { | |
251 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
252 | "Failed to allocate itids bitmaps, rc = %d\n", rc); | |
253 | goto free_toggle_map; | |
254 | } | |
255 | ||
256 | /* Allocate bitmap for cids used for qps. */ | |
257 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, | |
258 | "CID"); | |
259 | if (rc) { | |
260 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
261 | "Failed to allocate cid bitmap, rc = %d\n", rc); | |
262 | goto free_tid_map; | |
263 | } | |
264 | ||
265 | /* Allocate bitmap for cids used for responders/requesters. */ | |
266 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, | |
267 | "REAL_CID"); | |
268 | if (rc) { | |
269 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
270 | "Failed to allocate real cid bitmap, rc = %d\n", rc); | |
271 | goto free_cid_map; | |
272 | } | |
67b40dcc | 273 | |
39dbc646 YB |
274 | /* Allocate bitmap for srqs */ |
275 | p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); | |
276 | rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, | |
277 | p_rdma_info->num_srqs, "SRQ"); | |
278 | if (rc) { | |
279 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
280 | "Failed to allocate srq bitmap, rc = %d\n", rc); | |
281 | goto free_real_cid_map; | |
282 | } | |
283 | ||
67b40dcc KM |
284 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
285 | rc = qed_iwarp_alloc(p_hwfn); | |
286 | ||
287 | if (rc) | |
39dbc646 | 288 | goto free_srq_map; |
67b40dcc | 289 | |
f1372ee1 KM |
290 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); |
291 | return 0; | |
292 | ||
39dbc646 YB |
293 | free_srq_map: |
294 | kfree(p_rdma_info->srq_map.bitmap); | |
295 | free_real_cid_map: | |
296 | kfree(p_rdma_info->real_cid_map.bitmap); | |
f1372ee1 KM |
297 | free_cid_map: |
298 | kfree(p_rdma_info->cid_map.bitmap); | |
299 | free_tid_map: | |
300 | kfree(p_rdma_info->tid_map.bitmap); | |
301 | free_toggle_map: | |
302 | kfree(p_rdma_info->toggle_bits.bitmap); | |
303 | free_cq_map: | |
304 | kfree(p_rdma_info->cq_map.bitmap); | |
305 | free_dpi_map: | |
306 | kfree(p_rdma_info->dpi_map.bitmap); | |
307 | free_pd_map: | |
308 | kfree(p_rdma_info->pd_map.bitmap); | |
309 | free_rdma_port: | |
310 | kfree(p_rdma_info->port); | |
311 | free_rdma_dev: | |
312 | kfree(p_rdma_info->dev); | |
f1372ee1 KM |
313 | |
314 | return rc; | |
315 | } | |
316 | ||
b71b9afd KM |
317 | void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, |
318 | struct qed_bmap *bmap, bool check) | |
f1372ee1 KM |
319 | { |
320 | int weight = bitmap_weight(bmap->bitmap, bmap->max_count); | |
321 | int last_line = bmap->max_count / (64 * 8); | |
322 | int last_item = last_line * 8 + | |
323 | DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); | |
324 | u64 *pmap = (u64 *)bmap->bitmap; | |
325 | int line, item, offset; | |
326 | u8 str_last_line[200] = { 0 }; | |
327 | ||
328 | if (!weight || !check) | |
329 | goto end; | |
330 | ||
331 | DP_NOTICE(p_hwfn, | |
332 | "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", | |
333 | bmap->name, bmap->max_count, weight); | |
334 | ||
335 | /* print aligned non-zero lines, if any */ | |
336 | for (item = 0, line = 0; line < last_line; line++, item += 8) | |
337 | if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) | |
338 | DP_NOTICE(p_hwfn, | |
339 | "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", | |
340 | line, | |
341 | pmap[item], | |
342 | pmap[item + 1], | |
343 | pmap[item + 2], | |
344 | pmap[item + 3], | |
345 | pmap[item + 4], | |
346 | pmap[item + 5], | |
347 | pmap[item + 6], pmap[item + 7]); | |
348 | ||
349 | /* print last unaligned non-zero line, if any */ | |
350 | if ((bmap->max_count % (64 * 8)) && | |
351 | (bitmap_weight((unsigned long *)&pmap[item], | |
352 | bmap->max_count - item * 64))) { | |
353 | offset = sprintf(str_last_line, "line 0x%04x: ", line); | |
354 | for (; item < last_item; item++) | |
355 | offset += sprintf(str_last_line + offset, | |
356 | "0x%016llx ", pmap[item]); | |
357 | DP_NOTICE(p_hwfn, "%s\n", str_last_line); | |
358 | } | |
359 | ||
360 | end: | |
361 | kfree(bmap->bitmap); | |
362 | bmap->bitmap = NULL; | |
363 | } | |
364 | ||
365 | static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) | |
366 | { | |
367 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | |
368 | ||
67b40dcc KM |
369 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
370 | qed_iwarp_resc_free(p_hwfn); | |
371 | ||
f1372ee1 KM |
372 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); |
373 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); | |
374 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); | |
375 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); | |
376 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); | |
377 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); | |
39dbc646 YB |
378 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); |
379 | qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); | |
f1372ee1 KM |
380 | |
381 | kfree(p_rdma_info->port); | |
382 | kfree(p_rdma_info->dev); | |
f1372ee1 KM |
383 | } |
384 | ||
1fe280a0 MK |
385 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) |
386 | { | |
fdd6d771 | 387 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1fe280a0 | 388 | |
fdd6d771 | 389 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); |
1fe280a0 | 390 | |
fdd6d771 RV |
391 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); |
392 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); | |
393 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1fe280a0 MK |
394 | } |
395 | ||
396 | static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) | |
397 | { | |
398 | qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); | |
399 | } | |
400 | ||
f1372ee1 KM |
401 | static void qed_rdma_free(struct qed_hwfn *p_hwfn) |
402 | { | |
403 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); | |
404 | ||
1fe280a0 | 405 | qed_rdma_free_reserved_lkey(p_hwfn); |
9de506a5 | 406 | qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); |
f89782c2 | 407 | qed_rdma_resc_free(p_hwfn); |
f1372ee1 KM |
408 | } |
409 | ||
410 | static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) | |
411 | { | |
412 | guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; | |
413 | guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; | |
414 | guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; | |
415 | guid[3] = 0xff; | |
416 | guid[4] = 0xfe; | |
417 | guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; | |
418 | guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; | |
419 | guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; | |
420 | } | |
421 | ||
422 | static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, | |
423 | struct qed_rdma_start_in_params *params) | |
424 | { | |
425 | struct qed_rdma_events *events; | |
426 | ||
427 | events = &p_hwfn->p_rdma_info->events; | |
428 | ||
429 | events->unaffiliated_event = params->events->unaffiliated_event; | |
430 | events->affiliated_event = params->events->affiliated_event; | |
431 | events->context = params->events->context; | |
432 | } | |
433 | ||
434 | static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | |
435 | struct qed_rdma_start_in_params *params) | |
436 | { | |
437 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
438 | struct qed_dev *cdev = p_hwfn->cdev; | |
439 | u32 pci_status_control; | |
440 | u32 num_qps; | |
441 | ||
442 | /* Vendor specific information */ | |
443 | dev->vendor_id = cdev->vendor_id; | |
444 | dev->vendor_part_id = cdev->device_id; | |
445 | dev->hw_ver = 0; | |
446 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | | |
447 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); | |
448 | ||
449 | qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); | |
450 | dev->node_guid = dev->sys_image_guid; | |
451 | ||
452 | dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, | |
453 | RDMA_MAX_SGE_PER_RQ_WQE); | |
454 | ||
455 | if (cdev->rdma_max_sge) | |
456 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); | |
457 | ||
39dbc646 YB |
458 | dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; |
459 | if (p_hwfn->cdev->rdma_max_srq_sge) { | |
460 | dev->max_srq_sge = min_t(u32, | |
461 | p_hwfn->cdev->rdma_max_srq_sge, | |
462 | dev->max_srq_sge); | |
463 | } | |
f1372ee1 KM |
464 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; |
465 | ||
466 | dev->max_inline = (cdev->rdma_max_inline) ? | |
467 | min_t(u32, cdev->rdma_max_inline, dev->max_inline) : | |
468 | dev->max_inline; | |
469 | ||
470 | dev->max_wqe = QED_RDMA_MAX_WQE; | |
471 | dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); | |
472 | ||
473 | /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because | |
474 | * it is up-aligned to 16 and then to ILT page size within qed cxt. | |
475 | * This is OK in terms of ILT but we don't want to configure the FW | |
476 | * above its abilities | |
477 | */ | |
478 | num_qps = ROCE_MAX_QPS; | |
479 | num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); | |
480 | dev->max_qp = num_qps; | |
481 | ||
482 | /* CQs uses the same icids that QPs use hence they are limited by the | |
483 | * number of icids. There are two icids per QP. | |
484 | */ | |
485 | dev->max_cq = num_qps * 2; | |
486 | ||
487 | /* The number of mrs is smaller by 1 since the first is reserved */ | |
488 | dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; | |
489 | dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; | |
490 | ||
491 | /* The maximum CQE capacity per CQ supported. | |
492 | * max number of cqes will be in two layer pbl, | |
493 | * 8 is the pointer size in bytes | |
494 | * 32 is the size of cq element in bytes | |
495 | */ | |
496 | if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) | |
497 | dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; | |
498 | else | |
499 | dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; | |
500 | ||
501 | dev->max_mw = 0; | |
502 | dev->max_fmr = QED_RDMA_MAX_FMR; | |
503 | dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); | |
504 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; | |
505 | dev->max_pkey = QED_RDMA_MAX_P_KEY; | |
506 | ||
39dbc646 YB |
507 | dev->max_srq = p_hwfn->p_rdma_info->num_srqs; |
508 | dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; | |
f1372ee1 KM |
509 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
510 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); | |
511 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / | |
512 | RDMA_REQ_RD_ATOMIC_ELM_SIZE; | |
513 | dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * | |
514 | p_hwfn->p_rdma_info->num_qps; | |
515 | dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; | |
516 | dev->dev_ack_delay = QED_RDMA_ACK_DELAY; | |
517 | dev->max_pd = RDMA_MAX_PDS; | |
518 | dev->max_ah = p_hwfn->p_rdma_info->num_qps; | |
519 | dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); | |
520 | ||
521 | /* Set capablities */ | |
522 | dev->dev_caps = 0; | |
523 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); | |
524 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); | |
525 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); | |
526 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); | |
527 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); | |
528 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); | |
529 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); | |
530 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); | |
531 | ||
532 | /* Check atomic operations support in PCI configuration space. */ | |
533 | pci_read_config_dword(cdev->pdev, | |
534 | cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, | |
535 | &pci_status_control); | |
536 | ||
537 | if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) | |
538 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); | |
67b40dcc KM |
539 | |
540 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) | |
541 | qed_iwarp_init_devinfo(p_hwfn); | |
f1372ee1 KM |
542 | } |
543 | ||
544 | static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) | |
545 | { | |
546 | struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; | |
547 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
548 | ||
549 | port->port_state = p_hwfn->mcp_info->link_output.link_up ? | |
550 | QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; | |
551 | ||
552 | port->max_msg_size = min_t(u64, | |
553 | (dev->max_mr_mw_fmr_size * | |
554 | p_hwfn->cdev->rdma_max_sge), | |
555 | BIT(31)); | |
556 | ||
557 | port->pkey_bad_counter = 0; | |
558 | } | |
559 | ||
560 | static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
561 | { | |
67b40dcc | 562 | int rc = 0; |
f1372ee1 KM |
563 | |
564 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); | |
565 | p_hwfn->b_rdma_enabled_in_prs = false; | |
566 | ||
67b40dcc KM |
567 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
568 | qed_iwarp_init_hw(p_hwfn, p_ptt); | |
569 | else | |
570 | rc = qed_roce_init_hw(p_hwfn, p_ptt); | |
f1372ee1 | 571 | |
67b40dcc | 572 | return rc; |
f1372ee1 KM |
573 | } |
574 | ||
575 | static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, | |
576 | struct qed_rdma_start_in_params *params, | |
577 | struct qed_ptt *p_ptt) | |
578 | { | |
579 | struct rdma_init_func_ramrod_data *p_ramrod; | |
580 | struct qed_rdma_cnq_params *p_cnq_pbl_list; | |
581 | struct rdma_init_func_hdr *p_params_header; | |
582 | struct rdma_cnq_params *p_cnq_params; | |
583 | struct qed_sp_init_data init_data; | |
584 | struct qed_spq_entry *p_ent; | |
585 | u32 cnq_id, sb_id; | |
586 | u16 igu_sb_id; | |
587 | int rc; | |
588 | ||
589 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); | |
590 | ||
591 | /* Save the number of cnqs for the function close ramrod */ | |
592 | p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; | |
593 | ||
594 | /* Get SPQ entry */ | |
595 | memset(&init_data, 0, sizeof(init_data)); | |
596 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
597 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
598 | ||
599 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, | |
600 | p_hwfn->p_rdma_info->proto, &init_data); | |
601 | if (rc) | |
602 | return rc; | |
603 | ||
d1abfd0b MK |
604 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
605 | qed_iwarp_init_fw_ramrod(p_hwfn, | |
da090917 | 606 | &p_ent->ramrod.iwarp_init_func); |
67b40dcc | 607 | p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; |
d1abfd0b | 608 | } else { |
67b40dcc | 609 | p_ramrod = &p_ent->ramrod.roce_init_func.rdma; |
d1abfd0b | 610 | } |
f1372ee1 KM |
611 | |
612 | p_params_header = &p_ramrod->params_header; | |
613 | p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, | |
614 | QED_RDMA_CNQ_RAM); | |
615 | p_params_header->num_cnqs = params->desired_cnq; | |
616 | ||
617 | if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) | |
618 | p_params_header->cq_ring_mode = 1; | |
619 | else | |
620 | p_params_header->cq_ring_mode = 0; | |
621 | ||
622 | for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { | |
623 | sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); | |
624 | igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); | |
625 | p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); | |
626 | p_cnq_params = &p_ramrod->cnq_params[cnq_id]; | |
627 | p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; | |
628 | ||
629 | p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; | |
630 | p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; | |
631 | ||
632 | DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, | |
633 | p_cnq_pbl_list->pbl_ptr); | |
634 | ||
635 | /* we assume here that cnq_id and qz_offset are the same */ | |
636 | p_cnq_params->queue_zone_num = | |
637 | cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + | |
638 | cnq_id); | |
639 | } | |
640 | ||
641 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
642 | } | |
643 | ||
644 | static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) | |
645 | { | |
646 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
647 | int rc; | |
648 | ||
649 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); | |
650 | ||
651 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
652 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | |
653 | &p_hwfn->p_rdma_info->tid_map, itid); | |
654 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
655 | if (rc) | |
656 | goto out; | |
657 | ||
658 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); | |
659 | out: | |
660 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); | |
661 | return rc; | |
662 | } | |
663 | ||
664 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) | |
665 | { | |
666 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | |
667 | ||
f1372ee1 KM |
668 | /* Tid 0 will be used as the key for "reserved MR". |
669 | * The driver should allocate memory for it so it can be loaded but no | |
670 | * ramrod should be passed on it. | |
671 | */ | |
672 | qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); | |
673 | if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { | |
674 | DP_NOTICE(p_hwfn, | |
675 | "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); | |
676 | return -EINVAL; | |
677 | } | |
678 | ||
679 | return 0; | |
680 | } | |
681 | ||
682 | static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | |
683 | struct qed_ptt *p_ptt, | |
684 | struct qed_rdma_start_in_params *params) | |
685 | { | |
686 | int rc; | |
687 | ||
688 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); | |
689 | ||
f1372ee1 KM |
690 | qed_rdma_init_devinfo(p_hwfn, params); |
691 | qed_rdma_init_port(p_hwfn); | |
692 | qed_rdma_init_events(p_hwfn, params); | |
693 | ||
694 | rc = qed_rdma_reserve_lkey(p_hwfn); | |
695 | if (rc) | |
696 | return rc; | |
697 | ||
698 | rc = qed_rdma_init_hw(p_hwfn, p_ptt); | |
699 | if (rc) | |
700 | return rc; | |
701 | ||
67b40dcc KM |
702 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
703 | rc = qed_iwarp_setup(p_hwfn, p_ptt, params); | |
704 | if (rc) | |
705 | return rc; | |
706 | } else { | |
707 | rc = qed_roce_setup(p_hwfn); | |
708 | if (rc) | |
709 | return rc; | |
710 | } | |
f1372ee1 KM |
711 | |
712 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); | |
713 | } | |
714 | ||
bf774d14 | 715 | static int qed_rdma_stop(void *rdma_cxt) |
f1372ee1 KM |
716 | { |
717 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
718 | struct rdma_close_func_ramrod_data *p_ramrod; | |
719 | struct qed_sp_init_data init_data; | |
720 | struct qed_spq_entry *p_ent; | |
721 | struct qed_ptt *p_ptt; | |
722 | u32 ll2_ethertype_en; | |
723 | int rc = -EBUSY; | |
724 | ||
725 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); | |
726 | ||
727 | p_ptt = qed_ptt_acquire(p_hwfn); | |
728 | if (!p_ptt) { | |
729 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); | |
730 | return rc; | |
731 | } | |
732 | ||
733 | /* Disable RoCE search */ | |
734 | qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); | |
735 | p_hwfn->b_rdma_enabled_in_prs = false; | |
291d57f6 | 736 | p_hwfn->p_rdma_info->active = 0; |
f1372ee1 KM |
737 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); |
738 | ||
739 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); | |
740 | ||
741 | qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, | |
742 | (ll2_ethertype_en & 0xFFFE)); | |
743 | ||
67b40dcc KM |
744 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
745 | rc = qed_iwarp_stop(p_hwfn, p_ptt); | |
746 | if (rc) { | |
747 | qed_ptt_release(p_hwfn, p_ptt); | |
748 | return rc; | |
749 | } | |
750 | } else { | |
751 | qed_roce_stop(p_hwfn); | |
752 | } | |
753 | ||
f1372ee1 KM |
754 | qed_ptt_release(p_hwfn, p_ptt); |
755 | ||
756 | /* Get SPQ entry */ | |
757 | memset(&init_data, 0, sizeof(init_data)); | |
758 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
759 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
760 | ||
761 | /* Stop RoCE */ | |
762 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, | |
763 | p_hwfn->p_rdma_info->proto, &init_data); | |
764 | if (rc) | |
765 | goto out; | |
766 | ||
767 | p_ramrod = &p_ent->ramrod.rdma_close_func; | |
768 | ||
769 | p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; | |
770 | p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); | |
771 | ||
772 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
773 | ||
774 | out: | |
775 | qed_rdma_free(p_hwfn); | |
776 | ||
777 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); | |
778 | return rc; | |
779 | } | |
780 | ||
781 | static int qed_rdma_add_user(void *rdma_cxt, | |
782 | struct qed_rdma_add_user_out_params *out_params) | |
783 | { | |
784 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
785 | u32 dpi_start_offset; | |
786 | u32 returned_id = 0; | |
787 | int rc; | |
788 | ||
789 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); | |
790 | ||
791 | /* Allocate DPI */ | |
792 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
793 | rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, | |
794 | &returned_id); | |
795 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
796 | ||
797 | out_params->dpi = (u16)returned_id; | |
798 | ||
799 | /* Calculate the corresponding DPI address */ | |
800 | dpi_start_offset = p_hwfn->dpi_start_offset; | |
801 | ||
802 | out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + | |
803 | dpi_start_offset + | |
804 | ((out_params->dpi) * p_hwfn->dpi_size)); | |
805 | ||
806 | out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + | |
807 | dpi_start_offset + | |
808 | ((out_params->dpi) * p_hwfn->dpi_size); | |
809 | ||
810 | out_params->dpi_size = p_hwfn->dpi_size; | |
811 | out_params->wid_count = p_hwfn->wid_count; | |
812 | ||
813 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); | |
814 | return rc; | |
815 | } | |
816 | ||
817 | static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) | |
818 | { | |
819 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
820 | struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; | |
821 | ||
822 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); | |
823 | ||
824 | /* Link may have changed */ | |
825 | p_port->port_state = p_hwfn->mcp_info->link_output.link_up ? | |
826 | QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; | |
827 | ||
828 | p_port->link_speed = p_hwfn->mcp_info->link_output.speed; | |
829 | ||
830 | p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; | |
831 | ||
832 | return p_port; | |
833 | } | |
834 | ||
835 | static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) | |
836 | { | |
837 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
838 | ||
839 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); | |
840 | ||
841 | /* Return struct with device parameters */ | |
842 | return p_hwfn->p_rdma_info->dev; | |
843 | } | |
844 | ||
f1372ee1 KM |
845 | static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) |
846 | { | |
847 | struct qed_hwfn *p_hwfn; | |
848 | u16 qz_num; | |
849 | u32 addr; | |
850 | ||
851 | p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
852 | ||
853 | if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { | |
854 | DP_NOTICE(p_hwfn, | |
855 | "queue zone offset %d is too large (max is %d)\n", | |
856 | qz_offset, p_hwfn->p_rdma_info->max_queue_zones); | |
857 | return; | |
858 | } | |
859 | ||
860 | qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; | |
861 | addr = GTT_BAR0_MAP_REG_USDM_RAM + | |
862 | USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); | |
863 | ||
864 | REG_WR16(p_hwfn, addr, prod); | |
865 | ||
866 | /* keep prod updates ordered */ | |
867 | wmb(); | |
868 | } | |
869 | ||
870 | static int qed_fill_rdma_dev_info(struct qed_dev *cdev, | |
871 | struct qed_dev_rdma_info *info) | |
872 | { | |
873 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); | |
874 | ||
875 | memset(info, 0, sizeof(*info)); | |
876 | ||
67b40dcc KM |
877 | info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? |
878 | QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; | |
879 | ||
f1372ee1 KM |
880 | info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); |
881 | ||
882 | qed_fill_dev_info(cdev, &info->common); | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
887 | static int qed_rdma_get_sb_start(struct qed_dev *cdev) | |
888 | { | |
889 | int feat_num; | |
890 | ||
891 | if (cdev->num_hwfns > 1) | |
892 | feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); | |
893 | else | |
894 | feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * | |
895 | cdev->num_hwfns; | |
896 | ||
897 | return feat_num; | |
898 | } | |
899 | ||
900 | static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) | |
901 | { | |
902 | int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); | |
903 | int n_msix = cdev->int_params.rdma_msix_cnt; | |
904 | ||
905 | return min_t(int, n_cnq, n_msix); | |
906 | } | |
907 | ||
908 | static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) | |
909 | { | |
910 | int limit = 0; | |
911 | ||
912 | /* Mark the fastpath as free/used */ | |
913 | cdev->int_params.fp_initialized = cnt ? true : false; | |
914 | ||
915 | if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { | |
916 | DP_ERR(cdev, | |
917 | "qed roce supports only MSI-X interrupts (detected %d).\n", | |
918 | cdev->int_params.out.int_mode); | |
919 | return -EINVAL; | |
920 | } else if (cdev->int_params.fp_msix_cnt) { | |
921 | limit = cdev->int_params.rdma_msix_cnt; | |
922 | } | |
923 | ||
924 | if (!limit) | |
925 | return -ENOMEM; | |
926 | ||
927 | return min_t(int, cnt, limit); | |
928 | } | |
929 | ||
930 | static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) | |
931 | { | |
932 | memset(info, 0, sizeof(*info)); | |
933 | ||
934 | if (!cdev->int_params.fp_initialized) { | |
935 | DP_INFO(cdev, | |
936 | "Protocol driver requested interrupt information, but its support is not yet configured\n"); | |
937 | return -EINVAL; | |
938 | } | |
939 | ||
940 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | |
941 | int msix_base = cdev->int_params.rdma_msix_base; | |
942 | ||
943 | info->msix_cnt = cdev->int_params.rdma_msix_cnt; | |
944 | info->msix = &cdev->int_params.msix_table[msix_base]; | |
945 | ||
946 | DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", | |
947 | info->msix_cnt, msix_base); | |
948 | } | |
949 | ||
950 | return 0; | |
951 | } | |
952 | ||
953 | static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) | |
954 | { | |
955 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
956 | u32 returned_id; | |
957 | int rc; | |
958 | ||
959 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); | |
960 | ||
961 | /* Allocates an unused protection domain */ | |
962 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
963 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | |
964 | &p_hwfn->p_rdma_info->pd_map, &returned_id); | |
965 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
966 | ||
967 | *pd = (u16)returned_id; | |
968 | ||
969 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); | |
970 | return rc; | |
971 | } | |
972 | ||
973 | static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) | |
974 | { | |
975 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
976 | ||
977 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); | |
978 | ||
979 | /* Returns a previously allocated protection domain for reuse */ | |
980 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
981 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); | |
982 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
983 | } | |
984 | ||
985 | static enum qed_rdma_toggle_bit | |
986 | qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) | |
987 | { | |
988 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; | |
989 | enum qed_rdma_toggle_bit toggle_bit; | |
990 | u32 bmap_id; | |
991 | ||
992 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); | |
993 | ||
994 | /* the function toggle the bit that is related to a given icid | |
995 | * and returns the new toggle bit's value | |
996 | */ | |
997 | bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); | |
998 | ||
999 | spin_lock_bh(&p_info->lock); | |
1000 | toggle_bit = !test_and_change_bit(bmap_id, | |
1001 | p_info->toggle_bits.bitmap); | |
1002 | spin_unlock_bh(&p_info->lock); | |
1003 | ||
1004 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", | |
1005 | toggle_bit); | |
1006 | ||
1007 | return toggle_bit; | |
1008 | } | |
1009 | ||
1010 | static int qed_rdma_create_cq(void *rdma_cxt, | |
1011 | struct qed_rdma_create_cq_in_params *params, | |
1012 | u16 *icid) | |
1013 | { | |
1014 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1015 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; | |
1016 | struct rdma_create_cq_ramrod_data *p_ramrod; | |
1017 | enum qed_rdma_toggle_bit toggle_bit; | |
1018 | struct qed_sp_init_data init_data; | |
1019 | struct qed_spq_entry *p_ent; | |
1020 | u32 returned_id, start_cid; | |
1021 | int rc; | |
1022 | ||
1023 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", | |
1024 | params->cq_handle_hi, params->cq_handle_lo); | |
1025 | ||
1026 | /* Allocate icid */ | |
1027 | spin_lock_bh(&p_info->lock); | |
1028 | rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); | |
1029 | spin_unlock_bh(&p_info->lock); | |
1030 | ||
1031 | if (rc) { | |
1032 | DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); | |
1033 | return rc; | |
1034 | } | |
1035 | ||
1036 | start_cid = qed_cxt_get_proto_cid_start(p_hwfn, | |
1037 | p_info->proto); | |
1038 | *icid = returned_id + start_cid; | |
1039 | ||
1040 | /* Check if icid requires a page allocation */ | |
1041 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); | |
1042 | if (rc) | |
1043 | goto err; | |
1044 | ||
1045 | /* Get SPQ entry */ | |
1046 | memset(&init_data, 0, sizeof(init_data)); | |
1047 | init_data.cid = *icid; | |
1048 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1049 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1050 | ||
1051 | /* Send create CQ ramrod */ | |
1052 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1053 | RDMA_RAMROD_CREATE_CQ, | |
1054 | p_info->proto, &init_data); | |
1055 | if (rc) | |
1056 | goto err; | |
1057 | ||
1058 | p_ramrod = &p_ent->ramrod.rdma_create_cq; | |
1059 | ||
1060 | p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); | |
1061 | p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); | |
1062 | p_ramrod->dpi = cpu_to_le16(params->dpi); | |
1063 | p_ramrod->is_two_level_pbl = params->pbl_two_level; | |
1064 | p_ramrod->max_cqes = cpu_to_le32(params->cq_size); | |
1065 | DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); | |
1066 | p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); | |
1067 | p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + | |
1068 | params->cnq_id; | |
1069 | p_ramrod->int_timeout = params->int_timeout; | |
1070 | ||
1071 | /* toggle the bit for every resize or create cq for a given icid */ | |
1072 | toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); | |
1073 | ||
1074 | p_ramrod->toggle_bit = toggle_bit; | |
1075 | ||
1076 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1077 | if (rc) { | |
1078 | /* restore toggle bit */ | |
1079 | qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); | |
1080 | goto err; | |
1081 | } | |
1082 | ||
1083 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); | |
1084 | return rc; | |
1085 | ||
1086 | err: | |
1087 | /* release allocated icid */ | |
1088 | spin_lock_bh(&p_info->lock); | |
1089 | qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); | |
1090 | spin_unlock_bh(&p_info->lock); | |
1091 | DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); | |
1092 | ||
1093 | return rc; | |
1094 | } | |
1095 | ||
1096 | static int | |
1097 | qed_rdma_destroy_cq(void *rdma_cxt, | |
1098 | struct qed_rdma_destroy_cq_in_params *in_params, | |
1099 | struct qed_rdma_destroy_cq_out_params *out_params) | |
1100 | { | |
1101 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1102 | struct rdma_destroy_cq_output_params *p_ramrod_res; | |
1103 | struct rdma_destroy_cq_ramrod_data *p_ramrod; | |
1104 | struct qed_sp_init_data init_data; | |
1105 | struct qed_spq_entry *p_ent; | |
1106 | dma_addr_t ramrod_res_phys; | |
1107 | enum protocol_type proto; | |
1108 | int rc = -ENOMEM; | |
1109 | ||
1110 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); | |
1111 | ||
1112 | p_ramrod_res = | |
1113 | (struct rdma_destroy_cq_output_params *) | |
1114 | dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
1115 | sizeof(struct rdma_destroy_cq_output_params), | |
1116 | &ramrod_res_phys, GFP_KERNEL); | |
1117 | if (!p_ramrod_res) { | |
1118 | DP_NOTICE(p_hwfn, | |
1119 | "qed destroy cq failed: cannot allocate memory (ramrod)\n"); | |
1120 | return rc; | |
1121 | } | |
1122 | ||
1123 | /* Get SPQ entry */ | |
1124 | memset(&init_data, 0, sizeof(init_data)); | |
1125 | init_data.cid = in_params->icid; | |
1126 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1127 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1128 | proto = p_hwfn->p_rdma_info->proto; | |
1129 | /* Send destroy CQ ramrod */ | |
1130 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1131 | RDMA_RAMROD_DESTROY_CQ, | |
1132 | proto, &init_data); | |
1133 | if (rc) | |
1134 | goto err; | |
1135 | ||
1136 | p_ramrod = &p_ent->ramrod.rdma_destroy_cq; | |
1137 | DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); | |
1138 | ||
1139 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1140 | if (rc) | |
1141 | goto err; | |
1142 | ||
1143 | out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); | |
1144 | ||
1145 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
1146 | sizeof(struct rdma_destroy_cq_output_params), | |
1147 | p_ramrod_res, ramrod_res_phys); | |
1148 | ||
1149 | /* Free icid */ | |
1150 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1151 | ||
1152 | qed_bmap_release_id(p_hwfn, | |
1153 | &p_hwfn->p_rdma_info->cq_map, | |
1154 | (in_params->icid - | |
1155 | qed_cxt_get_proto_cid_start(p_hwfn, proto))); | |
1156 | ||
1157 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1158 | ||
1159 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); | |
1160 | return rc; | |
1161 | ||
1162 | err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
1163 | sizeof(struct rdma_destroy_cq_output_params), | |
1164 | p_ramrod_res, ramrod_res_phys); | |
1165 | ||
1166 | return rc; | |
1167 | } | |
1168 | ||
b71b9afd | 1169 | void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) |
f1372ee1 KM |
1170 | { |
1171 | p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); | |
1172 | p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); | |
1173 | p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); | |
1174 | } | |
1175 | ||
f1372ee1 KM |
1176 | static int qed_rdma_query_qp(void *rdma_cxt, |
1177 | struct qed_rdma_qp *qp, | |
1178 | struct qed_rdma_query_qp_out_params *out_params) | |
1179 | { | |
1180 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
67b40dcc | 1181 | int rc = 0; |
f1372ee1 KM |
1182 | |
1183 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); | |
1184 | ||
1185 | /* The following fields are filled in from qp and not FW as they can't | |
1186 | * be modified by FW | |
1187 | */ | |
1188 | out_params->mtu = qp->mtu; | |
1189 | out_params->dest_qp = qp->dest_qp; | |
1190 | out_params->incoming_atomic_en = qp->incoming_atomic_en; | |
1191 | out_params->e2e_flow_control_en = qp->e2e_flow_control_en; | |
1192 | out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; | |
1193 | out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; | |
1194 | out_params->dgid = qp->dgid; | |
1195 | out_params->flow_label = qp->flow_label; | |
1196 | out_params->hop_limit_ttl = qp->hop_limit_ttl; | |
1197 | out_params->traffic_class_tos = qp->traffic_class_tos; | |
1198 | out_params->timeout = qp->ack_timeout; | |
1199 | out_params->rnr_retry = qp->rnr_retry_cnt; | |
1200 | out_params->retry_cnt = qp->retry_cnt; | |
1201 | out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; | |
1202 | out_params->pkey_index = 0; | |
1203 | out_params->max_rd_atomic = qp->max_rd_atomic_req; | |
1204 | out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; | |
1205 | out_params->sqd_async = qp->sqd_async; | |
1206 | ||
67b40dcc KM |
1207 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1208 | qed_iwarp_query_qp(qp, out_params); | |
1209 | else | |
1210 | rc = qed_roce_query_qp(p_hwfn, qp, out_params); | |
f1372ee1 KM |
1211 | |
1212 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); | |
1213 | return rc; | |
1214 | } | |
1215 | ||
1216 | static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) | |
1217 | { | |
1218 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1219 | int rc = 0; | |
1220 | ||
1221 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); | |
1222 | ||
67b40dcc KM |
1223 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1224 | rc = qed_iwarp_destroy_qp(p_hwfn, qp); | |
1225 | else | |
1226 | rc = qed_roce_destroy_qp(p_hwfn, qp); | |
f1372ee1 KM |
1227 | |
1228 | /* free qp params struct */ | |
1229 | kfree(qp); | |
1230 | ||
1231 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); | |
1232 | return rc; | |
1233 | } | |
1234 | ||
1235 | static struct qed_rdma_qp * | |
1236 | qed_rdma_create_qp(void *rdma_cxt, | |
1237 | struct qed_rdma_create_qp_in_params *in_params, | |
1238 | struct qed_rdma_create_qp_out_params *out_params) | |
1239 | { | |
1240 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1241 | struct qed_rdma_qp *qp; | |
1242 | u8 max_stats_queues; | |
1243 | int rc; | |
1244 | ||
291d57f6 MK |
1245 | if (!rdma_cxt || !in_params || !out_params || |
1246 | !p_hwfn->p_rdma_info->active) { | |
f1372ee1 KM |
1247 | DP_ERR(p_hwfn->cdev, |
1248 | "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", | |
1249 | rdma_cxt, in_params, out_params); | |
1250 | return NULL; | |
1251 | } | |
1252 | ||
1253 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1254 | "qed rdma create qp called with qp_handle = %08x%08x\n", | |
1255 | in_params->qp_handle_hi, in_params->qp_handle_lo); | |
1256 | ||
1257 | /* Some sanity checks... */ | |
1258 | max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; | |
1259 | if (in_params->stats_queue >= max_stats_queues) { | |
1260 | DP_ERR(p_hwfn->cdev, | |
1261 | "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", | |
1262 | in_params->stats_queue, max_stats_queues); | |
1263 | return NULL; | |
1264 | } | |
1265 | ||
67b40dcc KM |
1266 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1267 | if (in_params->sq_num_pages * sizeof(struct regpair) > | |
1268 | IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { | |
1269 | DP_NOTICE(p_hwfn->cdev, | |
1270 | "Sq num pages: %d exceeds maximum\n", | |
1271 | in_params->sq_num_pages); | |
1272 | return NULL; | |
1273 | } | |
1274 | if (in_params->rq_num_pages * sizeof(struct regpair) > | |
1275 | IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { | |
1276 | DP_NOTICE(p_hwfn->cdev, | |
1277 | "Rq num pages: %d exceeds maximum\n", | |
1278 | in_params->rq_num_pages); | |
1279 | return NULL; | |
1280 | } | |
1281 | } | |
1282 | ||
f1372ee1 KM |
1283 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
1284 | if (!qp) | |
1285 | return NULL; | |
1286 | ||
f1372ee1 KM |
1287 | qp->cur_state = QED_ROCE_QP_STATE_RESET; |
1288 | qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); | |
1289 | qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); | |
1290 | qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); | |
1291 | qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); | |
1292 | qp->use_srq = in_params->use_srq; | |
1293 | qp->signal_all = in_params->signal_all; | |
1294 | qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; | |
1295 | qp->pd = in_params->pd; | |
1296 | qp->dpi = in_params->dpi; | |
1297 | qp->sq_cq_id = in_params->sq_cq_id; | |
1298 | qp->sq_num_pages = in_params->sq_num_pages; | |
1299 | qp->sq_pbl_ptr = in_params->sq_pbl_ptr; | |
1300 | qp->rq_cq_id = in_params->rq_cq_id; | |
1301 | qp->rq_num_pages = in_params->rq_num_pages; | |
1302 | qp->rq_pbl_ptr = in_params->rq_pbl_ptr; | |
1303 | qp->srq_id = in_params->srq_id; | |
1304 | qp->req_offloaded = false; | |
1305 | qp->resp_offloaded = false; | |
1306 | qp->e2e_flow_control_en = qp->use_srq ? false : true; | |
1307 | qp->stats_queue = in_params->stats_queue; | |
1308 | ||
67b40dcc KM |
1309 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1310 | rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); | |
1311 | qp->qpid = qp->icid; | |
1312 | } else { | |
1313 | rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); | |
1314 | qp->qpid = ((0xFF << 16) | qp->icid); | |
1315 | } | |
1316 | ||
1317 | if (rc) { | |
1318 | kfree(qp); | |
1319 | return NULL; | |
1320 | } | |
1321 | ||
f1372ee1 KM |
1322 | out_params->icid = qp->icid; |
1323 | out_params->qp_id = qp->qpid; | |
1324 | ||
1325 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); | |
1326 | return qp; | |
1327 | } | |
1328 | ||
f1372ee1 KM |
1329 | static int qed_rdma_modify_qp(void *rdma_cxt, |
1330 | struct qed_rdma_qp *qp, | |
1331 | struct qed_rdma_modify_qp_in_params *params) | |
1332 | { | |
1333 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1334 | enum qed_roce_qp_state prev_state; | |
1335 | int rc = 0; | |
1336 | ||
1337 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", | |
1338 | qp->icid, params->new_state); | |
1339 | ||
1340 | if (rc) { | |
1341 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1342 | return rc; | |
1343 | } | |
1344 | ||
1345 | if (GET_FIELD(params->modify_flags, | |
1346 | QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { | |
1347 | qp->incoming_rdma_read_en = params->incoming_rdma_read_en; | |
1348 | qp->incoming_rdma_write_en = params->incoming_rdma_write_en; | |
1349 | qp->incoming_atomic_en = params->incoming_atomic_en; | |
1350 | } | |
1351 | ||
1352 | /* Update QP structure with the updated values */ | |
1353 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) | |
1354 | qp->roce_mode = params->roce_mode; | |
1355 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) | |
1356 | qp->pkey = params->pkey; | |
1357 | if (GET_FIELD(params->modify_flags, | |
1358 | QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) | |
1359 | qp->e2e_flow_control_en = params->e2e_flow_control_en; | |
1360 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) | |
1361 | qp->dest_qp = params->dest_qp; | |
1362 | if (GET_FIELD(params->modify_flags, | |
1363 | QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { | |
1364 | /* Indicates that the following parameters have changed: | |
1365 | * Traffic class, flow label, hop limit, source GID, | |
1366 | * destination GID, loopback indicator | |
1367 | */ | |
1368 | qp->traffic_class_tos = params->traffic_class_tos; | |
1369 | qp->flow_label = params->flow_label; | |
1370 | qp->hop_limit_ttl = params->hop_limit_ttl; | |
1371 | ||
1372 | qp->sgid = params->sgid; | |
1373 | qp->dgid = params->dgid; | |
1374 | qp->udp_src_port = 0; | |
1375 | qp->vlan_id = params->vlan_id; | |
1376 | qp->mtu = params->mtu; | |
1377 | qp->lb_indication = params->lb_indication; | |
1378 | memcpy((u8 *)&qp->remote_mac_addr[0], | |
1379 | (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); | |
1380 | if (params->use_local_mac) { | |
1381 | memcpy((u8 *)&qp->local_mac_addr[0], | |
1382 | (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); | |
1383 | } else { | |
1384 | memcpy((u8 *)&qp->local_mac_addr[0], | |
1385 | (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); | |
1386 | } | |
1387 | } | |
1388 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) | |
1389 | qp->rq_psn = params->rq_psn; | |
1390 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) | |
1391 | qp->sq_psn = params->sq_psn; | |
1392 | if (GET_FIELD(params->modify_flags, | |
1393 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) | |
1394 | qp->max_rd_atomic_req = params->max_rd_atomic_req; | |
1395 | if (GET_FIELD(params->modify_flags, | |
1396 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) | |
1397 | qp->max_rd_atomic_resp = params->max_rd_atomic_resp; | |
1398 | if (GET_FIELD(params->modify_flags, | |
1399 | QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) | |
1400 | qp->ack_timeout = params->ack_timeout; | |
1401 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) | |
1402 | qp->retry_cnt = params->retry_cnt; | |
1403 | if (GET_FIELD(params->modify_flags, | |
1404 | QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) | |
1405 | qp->rnr_retry_cnt = params->rnr_retry_cnt; | |
1406 | if (GET_FIELD(params->modify_flags, | |
1407 | QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) | |
1408 | qp->min_rnr_nak_timer = params->min_rnr_nak_timer; | |
1409 | ||
1410 | qp->sqd_async = params->sqd_async; | |
1411 | ||
1412 | prev_state = qp->cur_state; | |
1413 | if (GET_FIELD(params->modify_flags, | |
1414 | QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { | |
1415 | qp->cur_state = params->new_state; | |
1416 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", | |
1417 | qp->cur_state); | |
1418 | } | |
1419 | ||
67b40dcc KM |
1420 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1421 | enum qed_iwarp_qp_state new_state = | |
1422 | qed_roce2iwarp_state(qp->cur_state); | |
1423 | ||
1424 | rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); | |
1425 | } else { | |
1426 | rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); | |
1427 | } | |
f1372ee1 KM |
1428 | |
1429 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); | |
1430 | return rc; | |
1431 | } | |
1432 | ||
1433 | static int | |
1434 | qed_rdma_register_tid(void *rdma_cxt, | |
1435 | struct qed_rdma_register_tid_in_params *params) | |
1436 | { | |
1437 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1438 | struct rdma_register_tid_ramrod_data *p_ramrod; | |
1439 | struct qed_sp_init_data init_data; | |
1440 | struct qed_spq_entry *p_ent; | |
1441 | enum rdma_tid_type tid_type; | |
1442 | u8 fw_return_code; | |
1443 | int rc; | |
1444 | ||
1445 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); | |
1446 | ||
1447 | /* Get SPQ entry */ | |
1448 | memset(&init_data, 0, sizeof(init_data)); | |
1449 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1450 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1451 | ||
1452 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, | |
1453 | p_hwfn->p_rdma_info->proto, &init_data); | |
1454 | if (rc) { | |
1455 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1456 | return rc; | |
1457 | } | |
1458 | ||
1459 | if (p_hwfn->p_rdma_info->last_tid < params->itid) | |
1460 | p_hwfn->p_rdma_info->last_tid = params->itid; | |
1461 | ||
1462 | p_ramrod = &p_ent->ramrod.rdma_register_tid; | |
1463 | ||
1464 | p_ramrod->flags = 0; | |
1465 | SET_FIELD(p_ramrod->flags, | |
1466 | RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, | |
1467 | params->pbl_two_level); | |
1468 | ||
1469 | SET_FIELD(p_ramrod->flags, | |
1470 | RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); | |
1471 | ||
1472 | SET_FIELD(p_ramrod->flags, | |
1473 | RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); | |
1474 | ||
1475 | /* Don't initialize D/C field, as it may override other bits. */ | |
1476 | if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) | |
1477 | SET_FIELD(p_ramrod->flags, | |
1478 | RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, | |
1479 | params->page_size_log - 12); | |
1480 | ||
1481 | SET_FIELD(p_ramrod->flags, | |
1482 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, | |
1483 | params->remote_read); | |
1484 | ||
1485 | SET_FIELD(p_ramrod->flags, | |
1486 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, | |
1487 | params->remote_write); | |
1488 | ||
1489 | SET_FIELD(p_ramrod->flags, | |
1490 | RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, | |
1491 | params->remote_atomic); | |
1492 | ||
1493 | SET_FIELD(p_ramrod->flags, | |
1494 | RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, | |
1495 | params->local_write); | |
1496 | ||
1497 | SET_FIELD(p_ramrod->flags, | |
1498 | RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); | |
1499 | ||
1500 | SET_FIELD(p_ramrod->flags, | |
1501 | RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, | |
1502 | params->mw_bind); | |
1503 | ||
1504 | SET_FIELD(p_ramrod->flags1, | |
1505 | RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, | |
1506 | params->pbl_page_size_log - 12); | |
1507 | ||
1508 | SET_FIELD(p_ramrod->flags2, | |
1509 | RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); | |
1510 | ||
1511 | switch (params->tid_type) { | |
1512 | case QED_RDMA_TID_REGISTERED_MR: | |
1513 | tid_type = RDMA_TID_REGISTERED_MR; | |
1514 | break; | |
1515 | case QED_RDMA_TID_FMR: | |
1516 | tid_type = RDMA_TID_FMR; | |
1517 | break; | |
d52c89f1 MK |
1518 | case QED_RDMA_TID_MW: |
1519 | tid_type = RDMA_TID_MW; | |
f1372ee1 KM |
1520 | break; |
1521 | default: | |
1522 | rc = -EINVAL; | |
1523 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
fb5e7438 | 1524 | qed_sp_destroy_request(p_hwfn, p_ent); |
f1372ee1 KM |
1525 | return rc; |
1526 | } | |
1527 | SET_FIELD(p_ramrod->flags1, | |
1528 | RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); | |
1529 | ||
1530 | p_ramrod->itid = cpu_to_le32(params->itid); | |
1531 | p_ramrod->key = params->key; | |
1532 | p_ramrod->pd = cpu_to_le16(params->pd); | |
1533 | p_ramrod->length_hi = (u8)(params->length >> 32); | |
1534 | p_ramrod->length_lo = DMA_LO_LE(params->length); | |
1535 | if (params->zbva) { | |
1536 | /* Lower 32 bits of the registered MR address. | |
1537 | * In case of zero based MR, will hold FBO | |
1538 | */ | |
1539 | p_ramrod->va.hi = 0; | |
1540 | p_ramrod->va.lo = cpu_to_le32(params->fbo); | |
1541 | } else { | |
1542 | DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); | |
1543 | } | |
1544 | DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); | |
1545 | ||
1546 | /* DIF */ | |
1547 | if (params->dif_enabled) { | |
1548 | SET_FIELD(p_ramrod->flags2, | |
1549 | RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); | |
1550 | DMA_REGPAIR_LE(p_ramrod->dif_error_addr, | |
1551 | params->dif_error_addr); | |
f1372ee1 KM |
1552 | } |
1553 | ||
1554 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1555 | if (rc) | |
1556 | return rc; | |
1557 | ||
1558 | if (fw_return_code != RDMA_RETURN_OK) { | |
1559 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); | |
1560 | return -EINVAL; | |
1561 | } | |
1562 | ||
1563 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); | |
1564 | return rc; | |
1565 | } | |
1566 | ||
1567 | static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) | |
1568 | { | |
1569 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1570 | struct rdma_deregister_tid_ramrod_data *p_ramrod; | |
1571 | struct qed_sp_init_data init_data; | |
1572 | struct qed_spq_entry *p_ent; | |
1573 | struct qed_ptt *p_ptt; | |
1574 | u8 fw_return_code; | |
1575 | int rc; | |
1576 | ||
1577 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); | |
1578 | ||
1579 | /* Get SPQ entry */ | |
1580 | memset(&init_data, 0, sizeof(init_data)); | |
1581 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1582 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1583 | ||
1584 | rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, | |
1585 | p_hwfn->p_rdma_info->proto, &init_data); | |
1586 | if (rc) { | |
1587 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1588 | return rc; | |
1589 | } | |
1590 | ||
1591 | p_ramrod = &p_ent->ramrod.rdma_deregister_tid; | |
1592 | p_ramrod->itid = cpu_to_le32(itid); | |
1593 | ||
1594 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1595 | if (rc) { | |
1596 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); | |
1597 | return rc; | |
1598 | } | |
1599 | ||
1600 | if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { | |
1601 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); | |
1602 | return -EINVAL; | |
1603 | } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { | |
1604 | /* Bit indicating that the TID is in use and a nig drain is | |
1605 | * required before sending the ramrod again | |
1606 | */ | |
1607 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1608 | if (!p_ptt) { | |
1609 | rc = -EBUSY; | |
1610 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1611 | "Failed to acquire PTT\n"); | |
1612 | return rc; | |
1613 | } | |
1614 | ||
1615 | rc = qed_mcp_drain(p_hwfn, p_ptt); | |
1616 | if (rc) { | |
1617 | qed_ptt_release(p_hwfn, p_ptt); | |
1618 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1619 | "Drain failed\n"); | |
1620 | return rc; | |
1621 | } | |
1622 | ||
1623 | qed_ptt_release(p_hwfn, p_ptt); | |
1624 | ||
1625 | /* Resend the ramrod */ | |
1626 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1627 | RDMA_RAMROD_DEREGISTER_MR, | |
1628 | p_hwfn->p_rdma_info->proto, | |
1629 | &init_data); | |
1630 | if (rc) { | |
1631 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1632 | "Failed to init sp-element\n"); | |
1633 | return rc; | |
1634 | } | |
1635 | ||
1636 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | |
1637 | if (rc) { | |
1638 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1639 | "Ramrod failed\n"); | |
1640 | return rc; | |
1641 | } | |
1642 | ||
1643 | if (fw_return_code != RDMA_RETURN_OK) { | |
1644 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", | |
1645 | fw_return_code); | |
1646 | return rc; | |
1647 | } | |
1648 | } | |
1649 | ||
1650 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); | |
1651 | return rc; | |
1652 | } | |
1653 | ||
f1372ee1 KM |
1654 | static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) |
1655 | { | |
1656 | return QED_LEADING_HWFN(cdev); | |
1657 | } | |
1658 | ||
39dbc646 YB |
1659 | static int qed_rdma_modify_srq(void *rdma_cxt, |
1660 | struct qed_rdma_modify_srq_in_params *in_params) | |
1661 | { | |
1662 | struct rdma_srq_modify_ramrod_data *p_ramrod; | |
1663 | struct qed_sp_init_data init_data = {}; | |
1664 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1665 | struct qed_spq_entry *p_ent; | |
1666 | u16 opaque_fid; | |
1667 | int rc; | |
1668 | ||
1669 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1670 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1671 | ||
1672 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1673 | RDMA_RAMROD_MODIFY_SRQ, | |
1674 | p_hwfn->p_rdma_info->proto, &init_data); | |
1675 | if (rc) | |
1676 | return rc; | |
1677 | ||
1678 | p_ramrod = &p_ent->ramrod.rdma_modify_srq; | |
1679 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | |
1680 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1681 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | |
1682 | p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); | |
1683 | ||
1684 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1685 | if (rc) | |
1686 | return rc; | |
1687 | ||
1688 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", | |
1689 | in_params->srq_id); | |
1690 | ||
1691 | return rc; | |
1692 | } | |
1693 | ||
1694 | static int | |
1695 | qed_rdma_destroy_srq(void *rdma_cxt, | |
1696 | struct qed_rdma_destroy_srq_in_params *in_params) | |
1697 | { | |
1698 | struct rdma_srq_destroy_ramrod_data *p_ramrod; | |
1699 | struct qed_sp_init_data init_data = {}; | |
1700 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1701 | struct qed_spq_entry *p_ent; | |
1702 | struct qed_bmap *bmap; | |
1703 | u16 opaque_fid; | |
1704 | int rc; | |
1705 | ||
1706 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1707 | ||
1708 | init_data.opaque_fid = opaque_fid; | |
1709 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1710 | ||
1711 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1712 | RDMA_RAMROD_DESTROY_SRQ, | |
1713 | p_hwfn->p_rdma_info->proto, &init_data); | |
1714 | if (rc) | |
1715 | return rc; | |
1716 | ||
1717 | p_ramrod = &p_ent->ramrod.rdma_destroy_srq; | |
1718 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); | |
1719 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | |
1720 | ||
1721 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1722 | if (rc) | |
1723 | return rc; | |
1724 | ||
1725 | bmap = &p_hwfn->p_rdma_info->srq_map; | |
1726 | ||
1727 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1728 | qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); | |
1729 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1730 | ||
1731 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", | |
1732 | in_params->srq_id); | |
1733 | ||
1734 | return rc; | |
1735 | } | |
1736 | ||
1737 | static int | |
1738 | qed_rdma_create_srq(void *rdma_cxt, | |
1739 | struct qed_rdma_create_srq_in_params *in_params, | |
1740 | struct qed_rdma_create_srq_out_params *out_params) | |
1741 | { | |
1742 | struct rdma_srq_create_ramrod_data *p_ramrod; | |
1743 | struct qed_sp_init_data init_data = {}; | |
1744 | struct qed_hwfn *p_hwfn = rdma_cxt; | |
1745 | enum qed_cxt_elem_type elem_type; | |
1746 | struct qed_spq_entry *p_ent; | |
1747 | u16 opaque_fid, srq_id; | |
1748 | struct qed_bmap *bmap; | |
1749 | u32 returned_id; | |
1750 | int rc; | |
1751 | ||
1752 | bmap = &p_hwfn->p_rdma_info->srq_map; | |
1753 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1754 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); | |
1755 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1756 | ||
1757 | if (rc) { | |
1758 | DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); | |
1759 | return rc; | |
1760 | } | |
1761 | ||
1762 | elem_type = QED_ELEM_SRQ; | |
1763 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); | |
1764 | if (rc) | |
1765 | goto err; | |
1766 | /* returned id is no greater than u16 */ | |
1767 | srq_id = (u16)returned_id; | |
1768 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1769 | ||
1770 | opaque_fid = p_hwfn->hw_info.opaque_fid; | |
1771 | init_data.opaque_fid = opaque_fid; | |
1772 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
1773 | ||
1774 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
1775 | RDMA_RAMROD_CREATE_SRQ, | |
1776 | p_hwfn->p_rdma_info->proto, &init_data); | |
1777 | if (rc) | |
1778 | goto err; | |
1779 | ||
1780 | p_ramrod = &p_ent->ramrod.rdma_create_srq; | |
1781 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); | |
1782 | p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); | |
1783 | p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); | |
1784 | p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); | |
1785 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); | |
1786 | p_ramrod->page_size = cpu_to_le16(in_params->page_size); | |
1787 | DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); | |
1788 | ||
1789 | rc = qed_spq_post(p_hwfn, p_ent, NULL); | |
1790 | if (rc) | |
1791 | goto err; | |
1792 | ||
1793 | out_params->srq_id = srq_id; | |
1794 | ||
1795 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1796 | "SRQ created Id = %x\n", out_params->srq_id); | |
1797 | ||
1798 | return rc; | |
1799 | ||
1800 | err: | |
1801 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1802 | qed_bmap_release_id(p_hwfn, bmap, returned_id); | |
1803 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1804 | ||
1805 | return rc; | |
1806 | } | |
1807 | ||
b71b9afd | 1808 | bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) |
f1372ee1 KM |
1809 | { |
1810 | bool result; | |
1811 | ||
291d57f6 MK |
1812 | /* if rdma wasn't activated yet, naturally there are no qps */ |
1813 | if (!p_hwfn->p_rdma_info->active) | |
f1372ee1 KM |
1814 | return false; |
1815 | ||
1816 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1817 | if (!p_hwfn->p_rdma_info->cid_map.bitmap) | |
1818 | result = false; | |
1819 | else | |
1820 | result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); | |
1821 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1822 | return result; | |
1823 | } | |
1824 | ||
b71b9afd | 1825 | void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
f1372ee1 KM |
1826 | { |
1827 | u32 val; | |
1828 | ||
1829 | val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; | |
1830 | ||
1831 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); | |
1832 | DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), | |
1833 | "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", | |
1834 | val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); | |
1835 | } | |
1836 | ||
f1372ee1 KM |
1837 | |
1838 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
1839 | { | |
1840 | p_hwfn->db_bar_no_edpm = true; | |
1841 | ||
1842 | qed_rdma_dpm_conf(p_hwfn, p_ptt); | |
1843 | } | |
1844 | ||
1845 | static int qed_rdma_start(void *rdma_cxt, | |
1846 | struct qed_rdma_start_in_params *params) | |
1847 | { | |
1848 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1849 | struct qed_ptt *p_ptt; | |
1850 | int rc = -EBUSY; | |
1851 | ||
1852 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | |
1853 | "desired_cnq = %08x\n", params->desired_cnq); | |
1854 | ||
1855 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1856 | if (!p_ptt) | |
1857 | goto err; | |
1858 | ||
291d57f6 | 1859 | rc = qed_rdma_alloc(p_hwfn); |
f1372ee1 KM |
1860 | if (rc) |
1861 | goto err1; | |
1862 | ||
1863 | rc = qed_rdma_setup(p_hwfn, p_ptt, params); | |
1864 | if (rc) | |
1865 | goto err2; | |
1866 | ||
1867 | qed_ptt_release(p_hwfn, p_ptt); | |
291d57f6 | 1868 | p_hwfn->p_rdma_info->active = 1; |
f1372ee1 KM |
1869 | |
1870 | return rc; | |
1871 | ||
1872 | err2: | |
1873 | qed_rdma_free(p_hwfn); | |
1874 | err1: | |
1875 | qed_ptt_release(p_hwfn, p_ptt); | |
1876 | err: | |
1877 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); | |
1878 | return rc; | |
1879 | } | |
1880 | ||
1881 | static int qed_rdma_init(struct qed_dev *cdev, | |
1882 | struct qed_rdma_start_in_params *params) | |
1883 | { | |
1884 | return qed_rdma_start(QED_LEADING_HWFN(cdev), params); | |
1885 | } | |
1886 | ||
1887 | static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) | |
1888 | { | |
1889 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | |
1890 | ||
1891 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); | |
1892 | ||
1893 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | |
1894 | qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); | |
1895 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | |
1896 | } | |
1897 | ||
1898 | static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, | |
1899 | u8 *old_mac_address, | |
1900 | u8 *new_mac_address) | |
1901 | { | |
1902 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); | |
1903 | struct qed_ptt *p_ptt; | |
1904 | int rc = 0; | |
1905 | ||
1906 | p_ptt = qed_ptt_acquire(p_hwfn); | |
1907 | if (!p_ptt) { | |
1908 | DP_ERR(cdev, | |
1909 | "qed roce ll2 mac filter set: failed to acquire PTT\n"); | |
1910 | return -EINVAL; | |
1911 | } | |
1912 | ||
1913 | if (old_mac_address) | |
1914 | qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address); | |
1915 | if (new_mac_address) | |
1916 | rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address); | |
1917 | ||
1918 | qed_ptt_release(p_hwfn, p_ptt); | |
1919 | ||
1920 | if (rc) | |
1921 | DP_ERR(cdev, | |
1922 | "qed roce ll2 mac filter set: failed to add MAC filter\n"); | |
1923 | ||
1924 | return rc; | |
1925 | } | |
1926 | ||
1927 | static const struct qed_rdma_ops qed_rdma_ops_pass = { | |
1928 | .common = &qed_common_ops_pass, | |
1929 | .fill_dev_info = &qed_fill_rdma_dev_info, | |
1930 | .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, | |
1931 | .rdma_init = &qed_rdma_init, | |
1932 | .rdma_add_user = &qed_rdma_add_user, | |
1933 | .rdma_remove_user = &qed_rdma_remove_user, | |
1934 | .rdma_stop = &qed_rdma_stop, | |
1935 | .rdma_query_port = &qed_rdma_query_port, | |
1936 | .rdma_query_device = &qed_rdma_query_device, | |
1937 | .rdma_get_start_sb = &qed_rdma_get_sb_start, | |
1938 | .rdma_get_rdma_int = &qed_rdma_get_int, | |
1939 | .rdma_set_rdma_int = &qed_rdma_set_int, | |
1940 | .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, | |
1941 | .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, | |
1942 | .rdma_alloc_pd = &qed_rdma_alloc_pd, | |
1943 | .rdma_dealloc_pd = &qed_rdma_free_pd, | |
1944 | .rdma_create_cq = &qed_rdma_create_cq, | |
1945 | .rdma_destroy_cq = &qed_rdma_destroy_cq, | |
1946 | .rdma_create_qp = &qed_rdma_create_qp, | |
1947 | .rdma_modify_qp = &qed_rdma_modify_qp, | |
1948 | .rdma_query_qp = &qed_rdma_query_qp, | |
1949 | .rdma_destroy_qp = &qed_rdma_destroy_qp, | |
1950 | .rdma_alloc_tid = &qed_rdma_alloc_tid, | |
1951 | .rdma_free_tid = &qed_rdma_free_tid, | |
1952 | .rdma_register_tid = &qed_rdma_register_tid, | |
1953 | .rdma_deregister_tid = &qed_rdma_deregister_tid, | |
39dbc646 YB |
1954 | .rdma_create_srq = &qed_rdma_create_srq, |
1955 | .rdma_modify_srq = &qed_rdma_modify_srq, | |
1956 | .rdma_destroy_srq = &qed_rdma_destroy_srq, | |
f1372ee1 KM |
1957 | .ll2_acquire_connection = &qed_ll2_acquire_connection, |
1958 | .ll2_establish_connection = &qed_ll2_establish_connection, | |
1959 | .ll2_terminate_connection = &qed_ll2_terminate_connection, | |
1960 | .ll2_release_connection = &qed_ll2_release_connection, | |
1961 | .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, | |
1962 | .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, | |
1963 | .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, | |
1964 | .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, | |
1965 | .ll2_get_stats = &qed_ll2_get_stats, | |
4b0fdd7c | 1966 | .iwarp_connect = &qed_iwarp_connect, |
65a91a6c KM |
1967 | .iwarp_create_listen = &qed_iwarp_create_listen, |
1968 | .iwarp_destroy_listen = &qed_iwarp_destroy_listen, | |
4b0fdd7c KM |
1969 | .iwarp_accept = &qed_iwarp_accept, |
1970 | .iwarp_reject = &qed_iwarp_reject, | |
1971 | .iwarp_send_rtr = &qed_iwarp_send_rtr, | |
f1372ee1 KM |
1972 | }; |
1973 | ||
1974 | const struct qed_rdma_ops *qed_get_rdma_ops(void) | |
1975 | { | |
1976 | return &qed_rdma_ops_pass; | |
1977 | } | |
1978 | EXPORT_SYMBOL(qed_get_rdma_ops); |