]>
Commit | Line | Data |
---|---|---|
fe56b9e6 YM |
1 | /* QLogic qed NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <asm/byteorder.h> | |
11 | #include <linux/io.h> | |
12 | #include <linux/bitops.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/pci.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/string.h> | |
21 | #include "qed.h" | |
22 | #include "qed_hsi.h" | |
23 | #include "qed_hw.h" | |
24 | #include "qed_init_ops.h" | |
25 | #include "qed_int.h" | |
26 | #include "qed_mcp.h" | |
27 | #include "qed_reg_addr.h" | |
28 | #include "qed_sp.h" | |
29 | ||
30 | struct qed_pi_info { | |
31 | qed_int_comp_cb_t comp_cb; | |
32 | void *cookie; | |
33 | }; | |
34 | ||
35 | struct qed_sb_sp_info { | |
36 | struct qed_sb_info sb_info; | |
37 | ||
38 | /* per protocol index data */ | |
39 | struct qed_pi_info pi_info_arr[PIS_PER_SB]; | |
40 | }; | |
41 | ||
cc875c2e YM |
42 | #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ |
43 | ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) | |
44 | ||
45 | #define ATTN_STATE_BITS (0xfff) | |
46 | #define ATTN_BITS_MASKABLE (0x3ff) | |
47 | struct qed_sb_attn_info { | |
48 | /* Virtual & Physical address of the SB */ | |
49 | struct atten_status_block *sb_attn; | |
50 | dma_addr_t sb_phys; | |
51 | ||
52 | /* Last seen running index */ | |
53 | u16 index; | |
54 | ||
55 | /* Previously asserted attentions, which are still unasserted */ | |
56 | u16 known_attn; | |
57 | ||
58 | /* Cleanup address for the link's general hw attention */ | |
59 | u32 mfw_attn_addr; | |
60 | }; | |
61 | ||
62 | static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, | |
63 | struct qed_sb_attn_info *p_sb_desc) | |
64 | { | |
65 | u16 rc = 0; | |
66 | u16 index; | |
67 | ||
68 | /* Make certain HW write took affect */ | |
69 | mmiowb(); | |
70 | ||
71 | index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); | |
72 | if (p_sb_desc->index != index) { | |
73 | p_sb_desc->index = index; | |
74 | rc = QED_SB_ATT_IDX; | |
75 | } | |
76 | ||
77 | /* Make certain we got a consistent view with HW */ | |
78 | mmiowb(); | |
79 | ||
80 | return rc; | |
81 | } | |
82 | ||
83 | /** | |
84 | * @brief qed_int_assertion - handles asserted attention bits | |
85 | * | |
86 | * @param p_hwfn | |
87 | * @param asserted_bits newly asserted bits | |
88 | * @return int | |
89 | */ | |
90 | static int qed_int_assertion(struct qed_hwfn *p_hwfn, | |
91 | u16 asserted_bits) | |
92 | { | |
93 | struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; | |
94 | u32 igu_mask; | |
95 | ||
96 | /* Mask the source of the attention in the IGU */ | |
97 | igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, | |
98 | IGU_REG_ATTENTION_ENABLE); | |
99 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", | |
100 | igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); | |
101 | igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); | |
102 | qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); | |
103 | ||
104 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
105 | "inner known ATTN state: 0x%04x --> 0x%04x\n", | |
106 | sb_attn_sw->known_attn, | |
107 | sb_attn_sw->known_attn | asserted_bits); | |
108 | sb_attn_sw->known_attn |= asserted_bits; | |
109 | ||
110 | /* Handle MCP events */ | |
111 | if (asserted_bits & 0x100) { | |
112 | qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); | |
113 | /* Clean the MCP attention */ | |
114 | qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, | |
115 | sb_attn_sw->mfw_attn_addr, 0); | |
116 | } | |
117 | ||
118 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + | |
119 | GTT_BAR0_MAP_REG_IGU_CMD + | |
120 | ((IGU_CMD_ATTN_BIT_SET_UPPER - | |
121 | IGU_CMD_INT_ACK_BASE) << 3), | |
122 | (u32)asserted_bits); | |
123 | ||
124 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", | |
125 | asserted_bits); | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | /** | |
131 | * @brief - handles deassertion of previously asserted attentions. | |
132 | * | |
133 | * @param p_hwfn | |
134 | * @param deasserted_bits - newly deasserted bits | |
135 | * @return int | |
136 | * | |
137 | */ | |
138 | static int qed_int_deassertion(struct qed_hwfn *p_hwfn, | |
139 | u16 deasserted_bits) | |
140 | { | |
141 | struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; | |
142 | u32 aeu_mask; | |
143 | ||
144 | if (deasserted_bits != 0x100) | |
145 | DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); | |
146 | ||
147 | /* Clear IGU indication for the deasserted bits */ | |
148 | DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + | |
149 | GTT_BAR0_MAP_REG_IGU_CMD + | |
150 | ((IGU_CMD_ATTN_BIT_CLR_UPPER - | |
151 | IGU_CMD_INT_ACK_BASE) << 3), | |
152 | ~((u32)deasserted_bits)); | |
153 | ||
154 | /* Unmask deasserted attentions in IGU */ | |
155 | aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, | |
156 | IGU_REG_ATTENTION_ENABLE); | |
157 | aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); | |
158 | qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); | |
159 | ||
160 | /* Clear deassertion from inner state */ | |
161 | sb_attn_sw->known_attn &= ~deasserted_bits; | |
162 | ||
163 | return 0; | |
164 | } | |
165 | ||
166 | static int qed_int_attentions(struct qed_hwfn *p_hwfn) | |
167 | { | |
168 | struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; | |
169 | struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; | |
170 | u32 attn_bits = 0, attn_acks = 0; | |
171 | u16 asserted_bits, deasserted_bits; | |
172 | __le16 index; | |
173 | int rc = 0; | |
174 | ||
175 | /* Read current attention bits/acks - safeguard against attentions | |
176 | * by guaranting work on a synchronized timeframe | |
177 | */ | |
178 | do { | |
179 | index = p_sb_attn->sb_index; | |
180 | attn_bits = le32_to_cpu(p_sb_attn->atten_bits); | |
181 | attn_acks = le32_to_cpu(p_sb_attn->atten_ack); | |
182 | } while (index != p_sb_attn->sb_index); | |
183 | p_sb_attn->sb_index = index; | |
184 | ||
185 | /* Attention / Deassertion are meaningful (and in correct state) | |
186 | * only when they differ and consistent with known state - deassertion | |
187 | * when previous attention & current ack, and assertion when current | |
188 | * attention with no previous attention | |
189 | */ | |
190 | asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & | |
191 | ~p_sb_attn_sw->known_attn; | |
192 | deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & | |
193 | p_sb_attn_sw->known_attn; | |
194 | ||
195 | if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { | |
196 | DP_INFO(p_hwfn, | |
197 | "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", | |
198 | index, attn_bits, attn_acks, asserted_bits, | |
199 | deasserted_bits, p_sb_attn_sw->known_attn); | |
200 | } else if (asserted_bits == 0x100) { | |
201 | DP_INFO(p_hwfn, | |
202 | "MFW indication via attention\n"); | |
203 | } else { | |
204 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
205 | "MFW indication [deassertion]\n"); | |
206 | } | |
207 | ||
208 | if (asserted_bits) { | |
209 | rc = qed_int_assertion(p_hwfn, asserted_bits); | |
210 | if (rc) | |
211 | return rc; | |
212 | } | |
213 | ||
214 | if (deasserted_bits) { | |
215 | rc = qed_int_deassertion(p_hwfn, deasserted_bits); | |
216 | if (rc) | |
217 | return rc; | |
218 | } | |
219 | ||
220 | return rc; | |
221 | } | |
222 | ||
223 | static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, | |
224 | void __iomem *igu_addr, | |
225 | u32 ack_cons) | |
226 | { | |
227 | struct igu_prod_cons_update igu_ack = { 0 }; | |
228 | ||
229 | igu_ack.sb_id_and_flags = | |
230 | ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | | |
231 | (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | | |
232 | (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | | |
233 | (IGU_SEG_ACCESS_ATTN << | |
234 | IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); | |
235 | ||
236 | DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); | |
237 | ||
238 | /* Both segments (interrupts & acks) are written to same place address; | |
239 | * Need to guarantee all commands will be received (in-order) by HW. | |
240 | */ | |
241 | mmiowb(); | |
242 | barrier(); | |
243 | } | |
244 | ||
fe56b9e6 YM |
245 | void qed_int_sp_dpc(unsigned long hwfn_cookie) |
246 | { | |
247 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; | |
248 | struct qed_pi_info *pi_info = NULL; | |
cc875c2e | 249 | struct qed_sb_attn_info *sb_attn; |
fe56b9e6 YM |
250 | struct qed_sb_info *sb_info; |
251 | int arr_size; | |
252 | u16 rc = 0; | |
253 | ||
fe56b9e6 YM |
254 | if (!p_hwfn->p_sp_sb) { |
255 | DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); | |
256 | return; | |
257 | } | |
258 | ||
259 | sb_info = &p_hwfn->p_sp_sb->sb_info; | |
260 | arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); | |
261 | if (!sb_info) { | |
262 | DP_ERR(p_hwfn->cdev, | |
263 | "Status block is NULL - cannot ack interrupts\n"); | |
264 | return; | |
265 | } | |
266 | ||
cc875c2e YM |
267 | if (!p_hwfn->p_sb_attn) { |
268 | DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); | |
269 | return; | |
270 | } | |
271 | sb_attn = p_hwfn->p_sb_attn; | |
272 | ||
fe56b9e6 YM |
273 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", |
274 | p_hwfn, p_hwfn->my_id); | |
275 | ||
276 | /* Disable ack for def status block. Required both for msix + | |
277 | * inta in non-mask mode, in inta does no harm. | |
278 | */ | |
279 | qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); | |
280 | ||
281 | /* Gather Interrupts/Attentions information */ | |
282 | if (!sb_info->sb_virt) { | |
283 | DP_ERR( | |
284 | p_hwfn->cdev, | |
285 | "Interrupt Status block is NULL - cannot check for new interrupts!\n"); | |
286 | } else { | |
287 | u32 tmp_index = sb_info->sb_ack; | |
288 | ||
289 | rc = qed_sb_update_sb_idx(sb_info); | |
290 | DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, | |
291 | "Interrupt indices: 0x%08x --> 0x%08x\n", | |
292 | tmp_index, sb_info->sb_ack); | |
293 | } | |
294 | ||
cc875c2e YM |
295 | if (!sb_attn || !sb_attn->sb_attn) { |
296 | DP_ERR( | |
297 | p_hwfn->cdev, | |
298 | "Attentions Status block is NULL - cannot check for new attentions!\n"); | |
299 | } else { | |
300 | u16 tmp_index = sb_attn->index; | |
301 | ||
302 | rc |= qed_attn_update_idx(p_hwfn, sb_attn); | |
303 | DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, | |
304 | "Attention indices: 0x%08x --> 0x%08x\n", | |
305 | tmp_index, sb_attn->index); | |
306 | } | |
307 | ||
fe56b9e6 YM |
308 | /* Check if we expect interrupts at this time. if not just ack them */ |
309 | if (!(rc & QED_SB_EVENT_MASK)) { | |
310 | qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); | |
311 | return; | |
312 | } | |
313 | ||
314 | /* Check the validity of the DPC ptt. If not ack interrupts and fail */ | |
315 | if (!p_hwfn->p_dpc_ptt) { | |
316 | DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); | |
317 | qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); | |
318 | return; | |
319 | } | |
320 | ||
cc875c2e YM |
321 | if (rc & QED_SB_ATT_IDX) |
322 | qed_int_attentions(p_hwfn); | |
323 | ||
fe56b9e6 YM |
324 | if (rc & QED_SB_IDX) { |
325 | int pi; | |
326 | ||
327 | /* Look for a free index */ | |
328 | for (pi = 0; pi < arr_size; pi++) { | |
329 | pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; | |
330 | if (pi_info->comp_cb) | |
331 | pi_info->comp_cb(p_hwfn, pi_info->cookie); | |
332 | } | |
333 | } | |
334 | ||
cc875c2e YM |
335 | if (sb_attn && (rc & QED_SB_ATT_IDX)) |
336 | /* This should be done before the interrupts are enabled, | |
337 | * since otherwise a new attention will be generated. | |
338 | */ | |
339 | qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); | |
340 | ||
fe56b9e6 YM |
341 | qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); |
342 | } | |
343 | ||
cc875c2e YM |
344 | static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) |
345 | { | |
346 | struct qed_dev *cdev = p_hwfn->cdev; | |
347 | struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; | |
348 | ||
349 | if (p_sb) { | |
350 | if (p_sb->sb_attn) | |
351 | dma_free_coherent(&cdev->pdev->dev, | |
352 | SB_ATTN_ALIGNED_SIZE(p_hwfn), | |
353 | p_sb->sb_attn, | |
354 | p_sb->sb_phys); | |
355 | kfree(p_sb); | |
356 | } | |
357 | } | |
358 | ||
359 | static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, | |
360 | struct qed_ptt *p_ptt) | |
361 | { | |
362 | struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; | |
363 | ||
364 | memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); | |
365 | ||
366 | sb_info->index = 0; | |
367 | sb_info->known_attn = 0; | |
368 | ||
369 | /* Configure Attention Status Block in IGU */ | |
370 | qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, | |
371 | lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); | |
372 | qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, | |
373 | upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); | |
374 | } | |
375 | ||
376 | static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, | |
377 | struct qed_ptt *p_ptt, | |
378 | void *sb_virt_addr, | |
379 | dma_addr_t sb_phy_addr) | |
380 | { | |
381 | struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; | |
382 | ||
383 | sb_info->sb_attn = sb_virt_addr; | |
384 | sb_info->sb_phys = sb_phy_addr; | |
385 | ||
386 | /* Set the address of cleanup for the mcp attention */ | |
387 | sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + | |
388 | MISC_REG_AEU_GENERAL_ATTN_0; | |
389 | ||
390 | qed_int_sb_attn_setup(p_hwfn, p_ptt); | |
391 | } | |
392 | ||
393 | static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, | |
394 | struct qed_ptt *p_ptt) | |
395 | { | |
396 | struct qed_dev *cdev = p_hwfn->cdev; | |
397 | struct qed_sb_attn_info *p_sb; | |
398 | void *p_virt; | |
399 | dma_addr_t p_phys = 0; | |
400 | ||
401 | /* SB struct */ | |
402 | p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); | |
403 | if (!p_sb) { | |
404 | DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); | |
405 | return -ENOMEM; | |
406 | } | |
407 | ||
408 | /* SB ring */ | |
409 | p_virt = dma_alloc_coherent(&cdev->pdev->dev, | |
410 | SB_ATTN_ALIGNED_SIZE(p_hwfn), | |
411 | &p_phys, GFP_KERNEL); | |
412 | ||
413 | if (!p_virt) { | |
414 | DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n"); | |
415 | kfree(p_sb); | |
416 | return -ENOMEM; | |
417 | } | |
418 | ||
419 | /* Attention setup */ | |
420 | p_hwfn->p_sb_attn = p_sb; | |
421 | qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
fe56b9e6 YM |
426 | /* coalescing timeout = timeset << (timer_res + 1) */ |
427 | #define QED_CAU_DEF_RX_USECS 24 | |
428 | #define QED_CAU_DEF_TX_USECS 48 | |
429 | ||
430 | void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, | |
431 | struct cau_sb_entry *p_sb_entry, | |
432 | u8 pf_id, | |
433 | u16 vf_number, | |
434 | u8 vf_valid) | |
435 | { | |
436 | u32 cau_state; | |
437 | ||
438 | memset(p_sb_entry, 0, sizeof(*p_sb_entry)); | |
439 | ||
440 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); | |
441 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); | |
442 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); | |
443 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); | |
444 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); | |
445 | ||
446 | /* setting the time resultion to a fixed value ( = 1) */ | |
447 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, | |
448 | QED_CAU_DEF_RX_TIMER_RES); | |
449 | SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, | |
450 | QED_CAU_DEF_TX_TIMER_RES); | |
451 | ||
452 | cau_state = CAU_HC_DISABLE_STATE; | |
453 | ||
454 | if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { | |
455 | cau_state = CAU_HC_ENABLE_STATE; | |
456 | if (!p_hwfn->cdev->rx_coalesce_usecs) | |
457 | p_hwfn->cdev->rx_coalesce_usecs = | |
458 | QED_CAU_DEF_RX_USECS; | |
459 | if (!p_hwfn->cdev->tx_coalesce_usecs) | |
460 | p_hwfn->cdev->tx_coalesce_usecs = | |
461 | QED_CAU_DEF_TX_USECS; | |
462 | } | |
463 | ||
464 | SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); | |
465 | SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); | |
466 | } | |
467 | ||
468 | void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, | |
469 | struct qed_ptt *p_ptt, | |
470 | dma_addr_t sb_phys, | |
471 | u16 igu_sb_id, | |
472 | u16 vf_number, | |
473 | u8 vf_valid) | |
474 | { | |
475 | struct cau_sb_entry sb_entry; | |
476 | u32 val; | |
477 | ||
478 | qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, | |
479 | vf_number, vf_valid); | |
480 | ||
481 | if (p_hwfn->hw_init_done) { | |
482 | val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); | |
483 | qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); | |
484 | qed_wr(p_hwfn, p_ptt, val + sizeof(u32), | |
485 | upper_32_bits(sb_phys)); | |
486 | ||
487 | val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); | |
488 | qed_wr(p_hwfn, p_ptt, val, sb_entry.data); | |
489 | qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); | |
490 | } else { | |
491 | /* Initialize Status Block Address */ | |
492 | STORE_RT_REG_AGG(p_hwfn, | |
493 | CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + | |
494 | igu_sb_id * 2, | |
495 | sb_phys); | |
496 | ||
497 | STORE_RT_REG_AGG(p_hwfn, | |
498 | CAU_REG_SB_VAR_MEMORY_RT_OFFSET + | |
499 | igu_sb_id * 2, | |
500 | sb_entry); | |
501 | } | |
502 | ||
503 | /* Configure pi coalescing if set */ | |
504 | if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { | |
505 | u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> | |
506 | (QED_CAU_DEF_RX_TIMER_RES + 1); | |
507 | u8 num_tc = 1, i; | |
508 | ||
509 | qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, | |
510 | QED_COAL_RX_STATE_MACHINE, | |
511 | timeset); | |
512 | ||
513 | timeset = p_hwfn->cdev->tx_coalesce_usecs >> | |
514 | (QED_CAU_DEF_TX_TIMER_RES + 1); | |
515 | ||
516 | for (i = 0; i < num_tc; i++) { | |
517 | qed_int_cau_conf_pi(p_hwfn, p_ptt, | |
518 | igu_sb_id, TX_PI(i), | |
519 | QED_COAL_TX_STATE_MACHINE, | |
520 | timeset); | |
521 | } | |
522 | } | |
523 | } | |
524 | ||
525 | void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, | |
526 | struct qed_ptt *p_ptt, | |
527 | u16 igu_sb_id, | |
528 | u32 pi_index, | |
529 | enum qed_coalescing_fsm coalescing_fsm, | |
530 | u8 timeset) | |
531 | { | |
532 | struct cau_pi_entry pi_entry; | |
533 | u32 sb_offset; | |
534 | u32 pi_offset; | |
535 | ||
536 | sb_offset = igu_sb_id * PIS_PER_SB; | |
537 | memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); | |
538 | ||
539 | SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); | |
540 | if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) | |
541 | SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); | |
542 | else | |
543 | SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); | |
544 | ||
545 | pi_offset = sb_offset + pi_index; | |
546 | if (p_hwfn->hw_init_done) { | |
547 | qed_wr(p_hwfn, p_ptt, | |
548 | CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), | |
549 | *((u32 *)&(pi_entry))); | |
550 | } else { | |
551 | STORE_RT_REG(p_hwfn, | |
552 | CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, | |
553 | *((u32 *)&(pi_entry))); | |
554 | } | |
555 | } | |
556 | ||
557 | void qed_int_sb_setup(struct qed_hwfn *p_hwfn, | |
558 | struct qed_ptt *p_ptt, | |
559 | struct qed_sb_info *sb_info) | |
560 | { | |
561 | /* zero status block and ack counter */ | |
562 | sb_info->sb_ack = 0; | |
563 | memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); | |
564 | ||
565 | qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, | |
566 | sb_info->igu_sb_id, 0, 0); | |
567 | } | |
568 | ||
569 | /** | |
570 | * @brief qed_get_igu_sb_id - given a sw sb_id return the | |
571 | * igu_sb_id | |
572 | * | |
573 | * @param p_hwfn | |
574 | * @param sb_id | |
575 | * | |
576 | * @return u16 | |
577 | */ | |
578 | static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, | |
579 | u16 sb_id) | |
580 | { | |
581 | u16 igu_sb_id; | |
582 | ||
583 | /* Assuming continuous set of IGU SBs dedicated for given PF */ | |
584 | if (sb_id == QED_SP_SB_ID) | |
585 | igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; | |
586 | else | |
587 | igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; | |
588 | ||
589 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", | |
590 | (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); | |
591 | ||
592 | return igu_sb_id; | |
593 | } | |
594 | ||
595 | int qed_int_sb_init(struct qed_hwfn *p_hwfn, | |
596 | struct qed_ptt *p_ptt, | |
597 | struct qed_sb_info *sb_info, | |
598 | void *sb_virt_addr, | |
599 | dma_addr_t sb_phy_addr, | |
600 | u16 sb_id) | |
601 | { | |
602 | sb_info->sb_virt = sb_virt_addr; | |
603 | sb_info->sb_phys = sb_phy_addr; | |
604 | ||
605 | sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); | |
606 | ||
607 | if (sb_id != QED_SP_SB_ID) { | |
608 | p_hwfn->sbs_info[sb_id] = sb_info; | |
609 | p_hwfn->num_sbs++; | |
610 | } | |
611 | ||
612 | sb_info->cdev = p_hwfn->cdev; | |
613 | ||
614 | /* The igu address will hold the absolute address that needs to be | |
615 | * written to for a specific status block | |
616 | */ | |
617 | sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + | |
618 | GTT_BAR0_MAP_REG_IGU_CMD + | |
619 | (sb_info->igu_sb_id << 3); | |
620 | ||
621 | sb_info->flags |= QED_SB_INFO_INIT; | |
622 | ||
623 | qed_int_sb_setup(p_hwfn, p_ptt, sb_info); | |
624 | ||
625 | return 0; | |
626 | } | |
627 | ||
628 | int qed_int_sb_release(struct qed_hwfn *p_hwfn, | |
629 | struct qed_sb_info *sb_info, | |
630 | u16 sb_id) | |
631 | { | |
632 | if (sb_id == QED_SP_SB_ID) { | |
633 | DP_ERR(p_hwfn, "Do Not free sp sb using this function"); | |
634 | return -EINVAL; | |
635 | } | |
636 | ||
637 | /* zero status block and ack counter */ | |
638 | sb_info->sb_ack = 0; | |
639 | memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); | |
640 | ||
641 | p_hwfn->sbs_info[sb_id] = NULL; | |
642 | p_hwfn->num_sbs--; | |
643 | ||
644 | return 0; | |
645 | } | |
646 | ||
647 | static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) | |
648 | { | |
649 | struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; | |
650 | ||
651 | if (p_sb) { | |
652 | if (p_sb->sb_info.sb_virt) | |
653 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
654 | SB_ALIGNED_SIZE(p_hwfn), | |
655 | p_sb->sb_info.sb_virt, | |
656 | p_sb->sb_info.sb_phys); | |
657 | kfree(p_sb); | |
658 | } | |
659 | } | |
660 | ||
661 | static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, | |
662 | struct qed_ptt *p_ptt) | |
663 | { | |
664 | struct qed_sb_sp_info *p_sb; | |
665 | dma_addr_t p_phys = 0; | |
666 | void *p_virt; | |
667 | ||
668 | /* SB struct */ | |
669 | p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); | |
670 | if (!p_sb) { | |
671 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); | |
672 | return -ENOMEM; | |
673 | } | |
674 | ||
675 | /* SB ring */ | |
676 | p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
677 | SB_ALIGNED_SIZE(p_hwfn), | |
678 | &p_phys, GFP_KERNEL); | |
679 | if (!p_virt) { | |
680 | DP_NOTICE(p_hwfn, "Failed to allocate status block\n"); | |
681 | kfree(p_sb); | |
682 | return -ENOMEM; | |
683 | } | |
684 | ||
685 | /* Status Block setup */ | |
686 | p_hwfn->p_sp_sb = p_sb; | |
687 | qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, | |
688 | p_phys, QED_SP_SB_ID); | |
689 | ||
690 | memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
695 | static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, | |
696 | struct qed_ptt *p_ptt) | |
697 | { | |
698 | if (!p_hwfn) | |
699 | return; | |
700 | ||
701 | if (p_hwfn->p_sp_sb) | |
702 | qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); | |
703 | else | |
704 | DP_NOTICE(p_hwfn->cdev, | |
705 | "Failed to setup Slow path status block - NULL pointer\n"); | |
cc875c2e YM |
706 | |
707 | if (p_hwfn->p_sb_attn) | |
708 | qed_int_sb_attn_setup(p_hwfn, p_ptt); | |
709 | else | |
710 | DP_NOTICE(p_hwfn->cdev, | |
711 | "Failed to setup attentions status block - NULL pointer\n"); | |
fe56b9e6 YM |
712 | } |
713 | ||
714 | int qed_int_register_cb(struct qed_hwfn *p_hwfn, | |
715 | qed_int_comp_cb_t comp_cb, | |
716 | void *cookie, | |
717 | u8 *sb_idx, | |
718 | __le16 **p_fw_cons) | |
719 | { | |
720 | struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; | |
721 | int qed_status = -ENOMEM; | |
722 | u8 pi; | |
723 | ||
724 | /* Look for a free index */ | |
725 | for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { | |
726 | if (!p_sp_sb->pi_info_arr[pi].comp_cb) { | |
727 | p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; | |
728 | p_sp_sb->pi_info_arr[pi].cookie = cookie; | |
729 | *sb_idx = pi; | |
730 | *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; | |
731 | qed_status = 0; | |
732 | break; | |
733 | } | |
734 | } | |
735 | ||
736 | return qed_status; | |
737 | } | |
738 | ||
739 | int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) | |
740 | { | |
741 | struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; | |
742 | int qed_status = -ENOMEM; | |
743 | ||
744 | if (p_sp_sb->pi_info_arr[pi].comp_cb) { | |
745 | p_sp_sb->pi_info_arr[pi].comp_cb = NULL; | |
746 | p_sp_sb->pi_info_arr[pi].cookie = NULL; | |
747 | qed_status = 0; | |
748 | } | |
749 | ||
750 | return qed_status; | |
751 | } | |
752 | ||
753 | u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) | |
754 | { | |
755 | return p_hwfn->p_sp_sb->sb_info.igu_sb_id; | |
756 | } | |
757 | ||
758 | void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, | |
759 | struct qed_ptt *p_ptt, | |
760 | enum qed_int_mode int_mode) | |
761 | { | |
cc875c2e | 762 | u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; |
fe56b9e6 YM |
763 | |
764 | p_hwfn->cdev->int_mode = int_mode; | |
765 | switch (p_hwfn->cdev->int_mode) { | |
766 | case QED_INT_MODE_INTA: | |
767 | igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; | |
768 | igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; | |
769 | break; | |
770 | ||
771 | case QED_INT_MODE_MSI: | |
772 | igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; | |
773 | igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; | |
774 | break; | |
775 | ||
776 | case QED_INT_MODE_MSIX: | |
777 | igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; | |
778 | break; | |
779 | case QED_INT_MODE_POLL: | |
780 | break; | |
781 | } | |
782 | ||
783 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); | |
784 | } | |
785 | ||
8f16bc97 SK |
786 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
787 | enum qed_int_mode int_mode) | |
fe56b9e6 | 788 | { |
8f16bc97 | 789 | int rc, i; |
fe56b9e6 YM |
790 | |
791 | /* Mask non-link attentions */ | |
792 | for (i = 0; i < 9; i++) | |
793 | qed_wr(p_hwfn, p_ptt, | |
794 | MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); | |
795 | ||
cc875c2e YM |
796 | /* Configure AEU signal change to produce attentions for link */ |
797 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); | |
798 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); | |
799 | ||
fe56b9e6 YM |
800 | /* Flush the writes to IGU */ |
801 | mmiowb(); | |
cc875c2e YM |
802 | |
803 | /* Unmask AEU signals toward IGU */ | |
804 | qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); | |
8f16bc97 SK |
805 | if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { |
806 | rc = qed_slowpath_irq_req(p_hwfn); | |
807 | if (rc != 0) { | |
808 | DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); | |
809 | return -EINVAL; | |
810 | } | |
811 | p_hwfn->b_int_requested = true; | |
812 | } | |
813 | /* Enable interrupt Generation */ | |
814 | qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); | |
815 | p_hwfn->b_int_enabled = 1; | |
816 | ||
817 | return rc; | |
fe56b9e6 YM |
818 | } |
819 | ||
820 | void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, | |
821 | struct qed_ptt *p_ptt) | |
822 | { | |
823 | p_hwfn->b_int_enabled = 0; | |
824 | ||
825 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); | |
826 | } | |
827 | ||
828 | #define IGU_CLEANUP_SLEEP_LENGTH (1000) | |
829 | void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, | |
830 | struct qed_ptt *p_ptt, | |
831 | u32 sb_id, | |
832 | bool cleanup_set, | |
833 | u16 opaque_fid | |
834 | ) | |
835 | { | |
836 | u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; | |
837 | u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; | |
838 | u32 data = 0; | |
839 | u32 cmd_ctrl = 0; | |
840 | u32 val = 0; | |
841 | u32 sb_bit = 0; | |
842 | u32 sb_bit_addr = 0; | |
843 | ||
844 | /* Set the data field */ | |
845 | SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); | |
846 | SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); | |
847 | SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); | |
848 | ||
849 | /* Set the control register */ | |
850 | SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); | |
851 | SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); | |
852 | SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); | |
853 | ||
854 | qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); | |
855 | ||
856 | barrier(); | |
857 | ||
858 | qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); | |
859 | ||
860 | /* Flush the write to IGU */ | |
861 | mmiowb(); | |
862 | ||
863 | /* calculate where to read the status bit from */ | |
864 | sb_bit = 1 << (sb_id % 32); | |
865 | sb_bit_addr = sb_id / 32 * sizeof(u32); | |
866 | ||
867 | sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; | |
868 | ||
869 | /* Now wait for the command to complete */ | |
870 | do { | |
871 | val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); | |
872 | ||
873 | if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) | |
874 | break; | |
875 | ||
876 | usleep_range(5000, 10000); | |
877 | } while (--sleep_cnt); | |
878 | ||
879 | if (!sleep_cnt) | |
880 | DP_NOTICE(p_hwfn, | |
881 | "Timeout waiting for clear status 0x%08x [for sb %d]\n", | |
882 | val, sb_id); | |
883 | } | |
884 | ||
885 | void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, | |
886 | struct qed_ptt *p_ptt, | |
887 | u32 sb_id, | |
888 | u16 opaque, | |
889 | bool b_set) | |
890 | { | |
891 | int pi; | |
892 | ||
893 | /* Set */ | |
894 | if (b_set) | |
895 | qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); | |
896 | ||
897 | /* Clear */ | |
898 | qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); | |
899 | ||
900 | /* Clear the CAU for the SB */ | |
901 | for (pi = 0; pi < 12; pi++) | |
902 | qed_wr(p_hwfn, p_ptt, | |
903 | CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); | |
904 | } | |
905 | ||
906 | void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, | |
907 | struct qed_ptt *p_ptt, | |
908 | bool b_set, | |
909 | bool b_slowpath) | |
910 | { | |
911 | u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; | |
912 | u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; | |
913 | u32 sb_id = 0; | |
914 | u32 val = 0; | |
915 | ||
916 | val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); | |
917 | val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; | |
918 | val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; | |
919 | qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); | |
920 | ||
921 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
922 | "IGU cleaning SBs [%d,...,%d]\n", | |
923 | igu_base_sb, igu_base_sb + igu_sb_cnt - 1); | |
924 | ||
925 | for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) | |
926 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, | |
927 | p_hwfn->hw_info.opaque_fid, | |
928 | b_set); | |
929 | ||
930 | if (b_slowpath) { | |
931 | sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; | |
932 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
933 | "IGU cleaning slowpath SB [%d]\n", sb_id); | |
934 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, | |
935 | p_hwfn->hw_info.opaque_fid, | |
936 | b_set); | |
937 | } | |
938 | } | |
939 | ||
940 | int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, | |
941 | struct qed_ptt *p_ptt) | |
942 | { | |
943 | struct qed_igu_info *p_igu_info; | |
944 | struct qed_igu_block *blk; | |
945 | u32 val; | |
946 | u16 sb_id; | |
947 | u16 prev_sb_id = 0xFF; | |
948 | ||
949 | p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); | |
950 | ||
951 | if (!p_hwfn->hw_info.p_igu_info) | |
952 | return -ENOMEM; | |
953 | ||
954 | p_igu_info = p_hwfn->hw_info.p_igu_info; | |
955 | ||
956 | /* Initialize base sb / sb cnt for PFs */ | |
957 | p_igu_info->igu_base_sb = 0xffff; | |
958 | p_igu_info->igu_sb_cnt = 0; | |
959 | p_igu_info->igu_dsb_id = 0xffff; | |
960 | p_igu_info->igu_base_sb_iov = 0xffff; | |
961 | ||
962 | for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); | |
963 | sb_id++) { | |
964 | blk = &p_igu_info->igu_map.igu_blocks[sb_id]; | |
965 | ||
966 | val = qed_rd(p_hwfn, p_ptt, | |
967 | IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); | |
968 | ||
969 | /* stop scanning when hit first invalid PF entry */ | |
970 | if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && | |
971 | GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) | |
972 | break; | |
973 | ||
974 | blk->status = QED_IGU_STATUS_VALID; | |
975 | blk->function_id = GET_FIELD(val, | |
976 | IGU_MAPPING_LINE_FUNCTION_NUMBER); | |
977 | blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); | |
978 | blk->vector_number = GET_FIELD(val, | |
979 | IGU_MAPPING_LINE_VECTOR_NUMBER); | |
980 | ||
981 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
982 | "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n", | |
983 | val, blk->function_id, blk->is_pf, | |
984 | blk->vector_number); | |
985 | ||
986 | if (blk->is_pf) { | |
987 | if (blk->function_id == p_hwfn->rel_pf_id) { | |
988 | blk->status |= QED_IGU_STATUS_PF; | |
989 | ||
990 | if (blk->vector_number == 0) { | |
991 | if (p_igu_info->igu_dsb_id == 0xffff) | |
992 | p_igu_info->igu_dsb_id = sb_id; | |
993 | } else { | |
994 | if (p_igu_info->igu_base_sb == | |
995 | 0xffff) { | |
996 | p_igu_info->igu_base_sb = sb_id; | |
997 | } else if (prev_sb_id != sb_id - 1) { | |
998 | DP_NOTICE(p_hwfn->cdev, | |
999 | "consecutive igu vectors for HWFN %x broken", | |
1000 | p_hwfn->rel_pf_id); | |
1001 | break; | |
1002 | } | |
1003 | prev_sb_id = sb_id; | |
1004 | /* we don't count the default */ | |
1005 | (p_igu_info->igu_sb_cnt)++; | |
1006 | } | |
1007 | } | |
1008 | } | |
1009 | } | |
1010 | ||
1011 | DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, | |
1012 | "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", | |
1013 | p_igu_info->igu_base_sb, | |
1014 | p_igu_info->igu_sb_cnt, | |
1015 | p_igu_info->igu_dsb_id); | |
1016 | ||
1017 | if (p_igu_info->igu_base_sb == 0xffff || | |
1018 | p_igu_info->igu_dsb_id == 0xffff || | |
1019 | p_igu_info->igu_sb_cnt == 0) { | |
1020 | DP_NOTICE(p_hwfn, | |
1021 | "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", | |
1022 | p_igu_info->igu_base_sb, | |
1023 | p_igu_info->igu_sb_cnt, | |
1024 | p_igu_info->igu_dsb_id); | |
1025 | return -EINVAL; | |
1026 | } | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | /** | |
1032 | * @brief Initialize igu runtime registers | |
1033 | * | |
1034 | * @param p_hwfn | |
1035 | */ | |
1036 | void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) | |
1037 | { | |
1038 | u32 igu_pf_conf = 0; | |
1039 | ||
1040 | igu_pf_conf |= IGU_PF_CONF_FUNC_EN; | |
1041 | ||
1042 | STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); | |
1043 | } | |
1044 | ||
1045 | u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) | |
1046 | { | |
1047 | u64 intr_status = 0; | |
1048 | u32 intr_status_lo = 0; | |
1049 | u32 intr_status_hi = 0; | |
1050 | u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - | |
1051 | IGU_CMD_INT_ACK_BASE; | |
1052 | u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - | |
1053 | IGU_CMD_INT_ACK_BASE; | |
1054 | ||
1055 | intr_status_lo = REG_RD(p_hwfn, | |
1056 | GTT_BAR0_MAP_REG_IGU_CMD + | |
1057 | lsb_igu_cmd_addr * 8); | |
1058 | intr_status_hi = REG_RD(p_hwfn, | |
1059 | GTT_BAR0_MAP_REG_IGU_CMD + | |
1060 | msb_igu_cmd_addr * 8); | |
1061 | intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; | |
1062 | ||
1063 | return intr_status; | |
1064 | } | |
1065 | ||
1066 | static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) | |
1067 | { | |
1068 | tasklet_init(p_hwfn->sp_dpc, | |
1069 | qed_int_sp_dpc, (unsigned long)p_hwfn); | |
1070 | p_hwfn->b_sp_dpc_enabled = true; | |
1071 | } | |
1072 | ||
1073 | static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) | |
1074 | { | |
1075 | p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); | |
1076 | if (!p_hwfn->sp_dpc) | |
1077 | return -ENOMEM; | |
1078 | ||
1079 | return 0; | |
1080 | } | |
1081 | ||
1082 | static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) | |
1083 | { | |
1084 | kfree(p_hwfn->sp_dpc); | |
1085 | } | |
1086 | ||
1087 | int qed_int_alloc(struct qed_hwfn *p_hwfn, | |
1088 | struct qed_ptt *p_ptt) | |
1089 | { | |
1090 | int rc = 0; | |
1091 | ||
1092 | rc = qed_int_sp_dpc_alloc(p_hwfn); | |
1093 | if (rc) { | |
1094 | DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n"); | |
1095 | return rc; | |
1096 | } | |
1097 | rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); | |
1098 | if (rc) { | |
1099 | DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n"); | |
1100 | return rc; | |
1101 | } | |
cc875c2e YM |
1102 | rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); |
1103 | if (rc) { | |
1104 | DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); | |
1105 | return rc; | |
1106 | } | |
fe56b9e6 YM |
1107 | return rc; |
1108 | } | |
1109 | ||
1110 | void qed_int_free(struct qed_hwfn *p_hwfn) | |
1111 | { | |
1112 | qed_int_sp_sb_free(p_hwfn); | |
cc875c2e | 1113 | qed_int_sb_attn_free(p_hwfn); |
fe56b9e6 YM |
1114 | qed_int_sp_dpc_free(p_hwfn); |
1115 | } | |
1116 | ||
1117 | void qed_int_setup(struct qed_hwfn *p_hwfn, | |
1118 | struct qed_ptt *p_ptt) | |
1119 | { | |
1120 | qed_int_sp_sb_setup(p_hwfn, p_ptt); | |
1121 | qed_int_sp_dpc_setup(p_hwfn); | |
1122 | } | |
1123 | ||
1124 | int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, | |
1125 | int *p_iov_blks) | |
1126 | { | |
1127 | struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; | |
1128 | ||
1129 | if (!info) | |
1130 | return 0; | |
1131 | ||
1132 | if (p_iov_blks) | |
1133 | *p_iov_blks = info->free_blks; | |
1134 | ||
1135 | return info->igu_sb_cnt; | |
1136 | } | |
8f16bc97 SK |
1137 | |
1138 | void qed_int_disable_post_isr_release(struct qed_dev *cdev) | |
1139 | { | |
1140 | int i; | |
1141 | ||
1142 | for_each_hwfn(cdev, i) | |
1143 | cdev->hwfns[i].b_int_requested = false; | |
1144 | } |