]> Git Repo - linux.git/blame - drivers/net/ethernet/qlogic/qed/qed_int.c
qed: Add support for HW attentions
[linux.git] / drivers / net / ethernet / qlogic / qed / qed_int.c
CommitLineData
fe56b9e6
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/bitops.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/errno.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_hsi.h"
23#include "qed_hw.h"
24#include "qed_init_ops.h"
25#include "qed_int.h"
26#include "qed_mcp.h"
27#include "qed_reg_addr.h"
28#include "qed_sp.h"
29
30struct qed_pi_info {
31 qed_int_comp_cb_t comp_cb;
32 void *cookie;
33};
34
35struct qed_sb_sp_info {
36 struct qed_sb_info sb_info;
37
38 /* per protocol index data */
39 struct qed_pi_info pi_info_arr[PIS_PER_SB];
40};
41
cc875c2e
YM
42#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
44
0d956e8a
YM
45struct aeu_invert_reg_bit {
46 char bit_name[30];
47
48#define ATTENTION_PARITY (1 << 0)
49
50#define ATTENTION_LENGTH_MASK (0x00000ff0)
51#define ATTENTION_LENGTH_SHIFT (4)
52#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
53 ATTENTION_LENGTH_SHIFT)
54#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
55#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
56#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
57 ATTENTION_PARITY)
58
59/* Multiple bits start with this offset */
60#define ATTENTION_OFFSET_MASK (0x000ff000)
61#define ATTENTION_OFFSET_SHIFT (12)
62 unsigned int flags;
63};
64
65struct aeu_invert_reg {
66 struct aeu_invert_reg_bit bits[32];
67};
68
69#define MAX_ATTN_GRPS (8)
70#define NUM_ATTN_REGS (9)
71
72/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
73static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
74 {
75 { /* After Invert 1 */
76 {"GPIO0 function%d",
77 (32 << ATTENTION_LENGTH_SHIFT)},
78 }
79 },
80
81 {
82 { /* After Invert 2 */
83 {"PGLUE config_space", ATTENTION_SINGLE},
84 {"PGLUE misc_flr", ATTENTION_SINGLE},
85 {"PGLUE B RBC", ATTENTION_PAR_INT},
86 {"PGLUE misc_mctp", ATTENTION_SINGLE},
87 {"Flash event", ATTENTION_SINGLE},
88 {"SMB event", ATTENTION_SINGLE},
89 {"Main Power", ATTENTION_SINGLE},
90 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
91 (1 << ATTENTION_OFFSET_SHIFT)},
92 {"PCIE glue/PXP VPD %d",
93 (16 << ATTENTION_LENGTH_SHIFT)},
94 }
95 },
96
97 {
98 { /* After Invert 3 */
99 {"General Attention %d",
100 (32 << ATTENTION_LENGTH_SHIFT)},
101 }
102 },
103
104 {
105 { /* After Invert 4 */
106 {"General Attention 32", ATTENTION_SINGLE},
107 {"General Attention %d",
108 (2 << ATTENTION_LENGTH_SHIFT) |
109 (33 << ATTENTION_OFFSET_SHIFT)},
110 {"General Attention 35", ATTENTION_SINGLE},
111 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT)},
112 {"MCP CPU", ATTENTION_SINGLE},
113 {"MCP Watchdog timer", ATTENTION_SINGLE},
114 {"MCP M2P", ATTENTION_SINGLE},
115 {"AVS stop status ready", ATTENTION_SINGLE},
116 {"MSTAT", ATTENTION_PAR_INT},
117 {"MSTAT per-path", ATTENTION_PAR_INT},
118 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT)},
119 {"NIG", ATTENTION_PAR_INT},
120 {"BMB/OPTE/MCP", ATTENTION_PAR_INT},
121 {"BTB", ATTENTION_PAR_INT},
122 {"BRB", ATTENTION_PAR_INT},
123 {"PRS", ATTENTION_PAR_INT},
124 }
125 },
126
127 {
128 { /* After Invert 5 */
129 {"SRC", ATTENTION_PAR_INT},
130 {"PB Client1", ATTENTION_PAR_INT},
131 {"PB Client2", ATTENTION_PAR_INT},
132 {"RPB", ATTENTION_PAR_INT},
133 {"PBF", ATTENTION_PAR_INT},
134 {"QM", ATTENTION_PAR_INT},
135 {"TM", ATTENTION_PAR_INT},
136 {"MCM", ATTENTION_PAR_INT},
137 {"MSDM", ATTENTION_PAR_INT},
138 {"MSEM", ATTENTION_PAR_INT},
139 {"PCM", ATTENTION_PAR_INT},
140 {"PSDM", ATTENTION_PAR_INT},
141 {"PSEM", ATTENTION_PAR_INT},
142 {"TCM", ATTENTION_PAR_INT},
143 {"TSDM", ATTENTION_PAR_INT},
144 {"TSEM", ATTENTION_PAR_INT},
145 }
146 },
147
148 {
149 { /* After Invert 6 */
150 {"UCM", ATTENTION_PAR_INT},
151 {"USDM", ATTENTION_PAR_INT},
152 {"USEM", ATTENTION_PAR_INT},
153 {"XCM", ATTENTION_PAR_INT},
154 {"XSDM", ATTENTION_PAR_INT},
155 {"XSEM", ATTENTION_PAR_INT},
156 {"YCM", ATTENTION_PAR_INT},
157 {"YSDM", ATTENTION_PAR_INT},
158 {"YSEM", ATTENTION_PAR_INT},
159 {"XYLD", ATTENTION_PAR_INT},
160 {"TMLD", ATTENTION_PAR_INT},
161 {"MYLD", ATTENTION_PAR_INT},
162 {"YULD", ATTENTION_PAR_INT},
163 {"DORQ", ATTENTION_PAR_INT},
164 {"DBG", ATTENTION_PAR_INT},
165 {"IPC", ATTENTION_PAR_INT},
166 }
167 },
168
169 {
170 { /* After Invert 7 */
171 {"CCFC", ATTENTION_PAR_INT},
172 {"CDU", ATTENTION_PAR_INT},
173 {"DMAE", ATTENTION_PAR_INT},
174 {"IGU", ATTENTION_PAR_INT},
175 {"ATC", ATTENTION_PAR_INT},
176 {"CAU", ATTENTION_PAR_INT},
177 {"PTU", ATTENTION_PAR_INT},
178 {"PRM", ATTENTION_PAR_INT},
179 {"TCFC", ATTENTION_PAR_INT},
180 {"RDIF", ATTENTION_PAR_INT},
181 {"TDIF", ATTENTION_PAR_INT},
182 {"RSS", ATTENTION_PAR_INT},
183 {"MISC", ATTENTION_PAR_INT},
184 {"MISCS", ATTENTION_PAR_INT},
185 {"PCIE", ATTENTION_PAR},
186 {"Vaux PCI core", ATTENTION_SINGLE},
187 {"PSWRQ", ATTENTION_PAR_INT},
188 }
189 },
190
191 {
192 { /* After Invert 8 */
193 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT},
194 {"PSWWR", ATTENTION_PAR_INT},
195 {"PSWWR (pci_clk)", ATTENTION_PAR_INT},
196 {"PSWRD", ATTENTION_PAR_INT},
197 {"PSWRD (pci_clk)", ATTENTION_PAR_INT},
198 {"PSWHST", ATTENTION_PAR_INT},
199 {"PSWHST (pci_clk)", ATTENTION_PAR_INT},
200 {"GRC", ATTENTION_PAR_INT},
201 {"CPMU", ATTENTION_PAR_INT},
202 {"NCSI", ATTENTION_PAR_INT},
203 {"MSEM PRAM", ATTENTION_PAR},
204 {"PSEM PRAM", ATTENTION_PAR},
205 {"TSEM PRAM", ATTENTION_PAR},
206 {"USEM PRAM", ATTENTION_PAR},
207 {"XSEM PRAM", ATTENTION_PAR},
208 {"YSEM PRAM", ATTENTION_PAR},
209 {"pxp_misc_mps", ATTENTION_PAR},
210 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE},
211 {"PERST_B assertion", ATTENTION_SINGLE},
212 {"PERST_B deassertion", ATTENTION_SINGLE},
213 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT)},
214 }
215 },
216
217 {
218 { /* After Invert 9 */
219 {"MCP Latched memory", ATTENTION_PAR},
220 {"MCP Latched scratchpad cache", ATTENTION_SINGLE},
221 {"MCP Latched ump_tx", ATTENTION_PAR},
222 {"MCP Latched scratchpad", ATTENTION_PAR},
223 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT)},
224 }
225 },
226};
227
228#define ATTN_STATE_BITS (0xfff)
cc875c2e
YM
229#define ATTN_BITS_MASKABLE (0x3ff)
230struct qed_sb_attn_info {
231 /* Virtual & Physical address of the SB */
232 struct atten_status_block *sb_attn;
0d956e8a 233 dma_addr_t sb_phys;
cc875c2e
YM
234
235 /* Last seen running index */
0d956e8a
YM
236 u16 index;
237
238 /* A mask of the AEU bits resulting in a parity error */
239 u32 parity_mask[NUM_ATTN_REGS];
240
241 /* A pointer to the attention description structure */
242 struct aeu_invert_reg *p_aeu_desc;
cc875c2e
YM
243
244 /* Previously asserted attentions, which are still unasserted */
0d956e8a 245 u16 known_attn;
cc875c2e
YM
246
247 /* Cleanup address for the link's general hw attention */
0d956e8a 248 u32 mfw_attn_addr;
cc875c2e
YM
249};
250
251static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
252 struct qed_sb_attn_info *p_sb_desc)
253{
254 u16 rc = 0;
255 u16 index;
256
257 /* Make certain HW write took affect */
258 mmiowb();
259
260 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
261 if (p_sb_desc->index != index) {
262 p_sb_desc->index = index;
263 rc = QED_SB_ATT_IDX;
264 }
265
266 /* Make certain we got a consistent view with HW */
267 mmiowb();
268
269 return rc;
270}
271
272/**
273 * @brief qed_int_assertion - handles asserted attention bits
274 *
275 * @param p_hwfn
276 * @param asserted_bits newly asserted bits
277 * @return int
278 */
279static int qed_int_assertion(struct qed_hwfn *p_hwfn,
280 u16 asserted_bits)
281{
282 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
283 u32 igu_mask;
284
285 /* Mask the source of the attention in the IGU */
286 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
287 IGU_REG_ATTENTION_ENABLE);
288 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
289 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
290 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
291 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
292
293 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
294 "inner known ATTN state: 0x%04x --> 0x%04x\n",
295 sb_attn_sw->known_attn,
296 sb_attn_sw->known_attn | asserted_bits);
297 sb_attn_sw->known_attn |= asserted_bits;
298
299 /* Handle MCP events */
300 if (asserted_bits & 0x100) {
301 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
302 /* Clean the MCP attention */
303 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
304 sb_attn_sw->mfw_attn_addr, 0);
305 }
306
307 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
308 GTT_BAR0_MAP_REG_IGU_CMD +
309 ((IGU_CMD_ATTN_BIT_SET_UPPER -
310 IGU_CMD_INT_ACK_BASE) << 3),
311 (u32)asserted_bits);
312
313 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
314 asserted_bits);
315
316 return 0;
317}
318
0d956e8a
YM
319/**
320 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
321 * cause of the attention
322 *
323 * @param p_hwfn
324 * @param p_aeu - descriptor of an AEU bit which caused the attention
325 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
326 * this bit to this group.
327 * @param bit_index - index of this bit in the aeu_en_reg
328 *
329 * @return int
330 */
331static int
332qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
333 struct aeu_invert_reg_bit *p_aeu,
334 u32 aeu_en_reg,
335 u32 bitmask)
336{
337 int rc = -EINVAL;
338 u32 val, mask = ~bitmask;
339
340 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
341 p_aeu->bit_name, bitmask);
342
343 /* Prevent this Attention from being asserted in the future */
344 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
345 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
346 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
347 p_aeu->bit_name);
348
349 return rc;
350}
351
cc875c2e
YM
352/**
353 * @brief - handles deassertion of previously asserted attentions.
354 *
355 * @param p_hwfn
356 * @param deasserted_bits - newly deasserted bits
357 * @return int
358 *
359 */
360static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
361 u16 deasserted_bits)
362{
363 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
0d956e8a
YM
364 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
365 u8 i, j, k, bit_idx;
366 int rc = 0;
367
368 /* Read the attention registers in the AEU */
369 for (i = 0; i < NUM_ATTN_REGS; i++) {
370 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
371 MISC_REG_AEU_AFTER_INVERT_1_IGU +
372 i * 0x4);
373 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
374 "Deasserted bits [%d]: %08x\n",
375 i, aeu_inv_arr[i]);
376 }
377
378 /* Find parity attentions first */
379 for (i = 0; i < NUM_ATTN_REGS; i++) {
380 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
381 u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
382 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
383 i * sizeof(u32));
384 u32 parities;
385
386 /* Skip register in which no parity bit is currently set */
387 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
388 if (!parities)
389 continue;
cc875c2e 390
0d956e8a
YM
391 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
392 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
393
394 if ((p_bit->flags & ATTENTION_PARITY) &&
395 !!(parities & (1 << bit_idx))) {
396 DP_INFO(p_hwfn,
397 "%s[%d] parity attention is set\n",
398 p_bit->bit_name, bit_idx);
399 }
400
401 bit_idx += ATTENTION_LENGTH(p_bit->flags);
402 }
403 }
404
405 /* Find non-parity cause for attention and act */
406 for (k = 0; k < MAX_ATTN_GRPS; k++) {
407 struct aeu_invert_reg_bit *p_aeu;
408
409 /* Handle only groups whose attention is currently deasserted */
410 if (!(deasserted_bits & (1 << k)))
411 continue;
412
413 for (i = 0; i < NUM_ATTN_REGS; i++) {
414 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
415 i * sizeof(u32) +
416 k * sizeof(u32) * NUM_ATTN_REGS;
417 u32 en, bits;
418
419 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
420 bits = aeu_inv_arr[i] & en;
421
422 /* Skip if no bit from this group is currently set */
423 if (!bits)
424 continue;
425
426 /* Find all set bits from current register which belong
427 * to current group, making them responsible for the
428 * previous assertion.
429 */
430 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
431 u8 bit, bit_len;
432 u32 bitmask;
433
434 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
435
436 /* No need to handle parity-only bits */
437 if (p_aeu->flags == ATTENTION_PAR)
438 continue;
439
440 bit = bit_idx;
441 bit_len = ATTENTION_LENGTH(p_aeu->flags);
442 if (p_aeu->flags & ATTENTION_PAR_INT) {
443 /* Skip Parity */
444 bit++;
445 bit_len--;
446 }
447
448 bitmask = bits & (((1 << bit_len) - 1) << bit);
449 if (bitmask) {
450 /* Handle source of the attention */
451 qed_int_deassertion_aeu_bit(p_hwfn,
452 p_aeu,
453 aeu_en,
454 bitmask);
455 }
456
457 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
458 }
459 }
460 }
cc875c2e
YM
461
462 /* Clear IGU indication for the deasserted bits */
463 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
0d956e8a
YM
464 GTT_BAR0_MAP_REG_IGU_CMD +
465 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
466 IGU_CMD_INT_ACK_BASE) << 3),
467 ~((u32)deasserted_bits));
cc875c2e
YM
468
469 /* Unmask deasserted attentions in IGU */
470 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
471 IGU_REG_ATTENTION_ENABLE);
472 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
473 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
474
475 /* Clear deassertion from inner state */
476 sb_attn_sw->known_attn &= ~deasserted_bits;
477
0d956e8a 478 return rc;
cc875c2e
YM
479}
480
481static int qed_int_attentions(struct qed_hwfn *p_hwfn)
482{
483 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
484 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
485 u32 attn_bits = 0, attn_acks = 0;
486 u16 asserted_bits, deasserted_bits;
487 __le16 index;
488 int rc = 0;
489
490 /* Read current attention bits/acks - safeguard against attentions
491 * by guaranting work on a synchronized timeframe
492 */
493 do {
494 index = p_sb_attn->sb_index;
495 attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
496 attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
497 } while (index != p_sb_attn->sb_index);
498 p_sb_attn->sb_index = index;
499
500 /* Attention / Deassertion are meaningful (and in correct state)
501 * only when they differ and consistent with known state - deassertion
502 * when previous attention & current ack, and assertion when current
503 * attention with no previous attention
504 */
505 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
506 ~p_sb_attn_sw->known_attn;
507 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
508 p_sb_attn_sw->known_attn;
509
510 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
511 DP_INFO(p_hwfn,
512 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
513 index, attn_bits, attn_acks, asserted_bits,
514 deasserted_bits, p_sb_attn_sw->known_attn);
515 } else if (asserted_bits == 0x100) {
516 DP_INFO(p_hwfn,
517 "MFW indication via attention\n");
518 } else {
519 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
520 "MFW indication [deassertion]\n");
521 }
522
523 if (asserted_bits) {
524 rc = qed_int_assertion(p_hwfn, asserted_bits);
525 if (rc)
526 return rc;
527 }
528
529 if (deasserted_bits) {
530 rc = qed_int_deassertion(p_hwfn, deasserted_bits);
531 if (rc)
532 return rc;
533 }
534
535 return rc;
536}
537
538static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
539 void __iomem *igu_addr,
540 u32 ack_cons)
541{
542 struct igu_prod_cons_update igu_ack = { 0 };
543
544 igu_ack.sb_id_and_flags =
545 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
546 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
547 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
548 (IGU_SEG_ACCESS_ATTN <<
549 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
550
551 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
552
553 /* Both segments (interrupts & acks) are written to same place address;
554 * Need to guarantee all commands will be received (in-order) by HW.
555 */
556 mmiowb();
557 barrier();
558}
559
fe56b9e6
YM
560void qed_int_sp_dpc(unsigned long hwfn_cookie)
561{
562 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
563 struct qed_pi_info *pi_info = NULL;
cc875c2e 564 struct qed_sb_attn_info *sb_attn;
fe56b9e6
YM
565 struct qed_sb_info *sb_info;
566 int arr_size;
567 u16 rc = 0;
568
fe56b9e6
YM
569 if (!p_hwfn->p_sp_sb) {
570 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
571 return;
572 }
573
574 sb_info = &p_hwfn->p_sp_sb->sb_info;
575 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
576 if (!sb_info) {
577 DP_ERR(p_hwfn->cdev,
578 "Status block is NULL - cannot ack interrupts\n");
579 return;
580 }
581
cc875c2e
YM
582 if (!p_hwfn->p_sb_attn) {
583 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
584 return;
585 }
586 sb_attn = p_hwfn->p_sb_attn;
587
fe56b9e6
YM
588 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
589 p_hwfn, p_hwfn->my_id);
590
591 /* Disable ack for def status block. Required both for msix +
592 * inta in non-mask mode, in inta does no harm.
593 */
594 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
595
596 /* Gather Interrupts/Attentions information */
597 if (!sb_info->sb_virt) {
598 DP_ERR(
599 p_hwfn->cdev,
600 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
601 } else {
602 u32 tmp_index = sb_info->sb_ack;
603
604 rc = qed_sb_update_sb_idx(sb_info);
605 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
606 "Interrupt indices: 0x%08x --> 0x%08x\n",
607 tmp_index, sb_info->sb_ack);
608 }
609
cc875c2e
YM
610 if (!sb_attn || !sb_attn->sb_attn) {
611 DP_ERR(
612 p_hwfn->cdev,
613 "Attentions Status block is NULL - cannot check for new attentions!\n");
614 } else {
615 u16 tmp_index = sb_attn->index;
616
617 rc |= qed_attn_update_idx(p_hwfn, sb_attn);
618 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
619 "Attention indices: 0x%08x --> 0x%08x\n",
620 tmp_index, sb_attn->index);
621 }
622
fe56b9e6
YM
623 /* Check if we expect interrupts at this time. if not just ack them */
624 if (!(rc & QED_SB_EVENT_MASK)) {
625 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
626 return;
627 }
628
629 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
630 if (!p_hwfn->p_dpc_ptt) {
631 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
632 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
633 return;
634 }
635
cc875c2e
YM
636 if (rc & QED_SB_ATT_IDX)
637 qed_int_attentions(p_hwfn);
638
fe56b9e6
YM
639 if (rc & QED_SB_IDX) {
640 int pi;
641
642 /* Look for a free index */
643 for (pi = 0; pi < arr_size; pi++) {
644 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
645 if (pi_info->comp_cb)
646 pi_info->comp_cb(p_hwfn, pi_info->cookie);
647 }
648 }
649
cc875c2e
YM
650 if (sb_attn && (rc & QED_SB_ATT_IDX))
651 /* This should be done before the interrupts are enabled,
652 * since otherwise a new attention will be generated.
653 */
654 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
655
fe56b9e6
YM
656 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
657}
658
cc875c2e
YM
659static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
660{
4ac801b7
YM
661 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
662
663 if (!p_sb)
664 return;
665
666 if (p_sb->sb_attn)
667 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
668 SB_ATTN_ALIGNED_SIZE(p_hwfn),
669 p_sb->sb_attn,
670 p_sb->sb_phys);
671 kfree(p_sb);
cc875c2e
YM
672}
673
674static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
675 struct qed_ptt *p_ptt)
676{
677 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
678
679 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
680
681 sb_info->index = 0;
682 sb_info->known_attn = 0;
683
684 /* Configure Attention Status Block in IGU */
685 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
686 lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
687 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
688 upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
689}
690
691static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
692 struct qed_ptt *p_ptt,
693 void *sb_virt_addr,
694 dma_addr_t sb_phy_addr)
695{
696 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
0d956e8a 697 int i, j, k;
cc875c2e
YM
698
699 sb_info->sb_attn = sb_virt_addr;
700 sb_info->sb_phys = sb_phy_addr;
701
0d956e8a
YM
702 /* Set the pointer to the AEU descriptors */
703 sb_info->p_aeu_desc = aeu_descs;
704
705 /* Calculate Parity Masks */
706 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
707 for (i = 0; i < NUM_ATTN_REGS; i++) {
708 /* j is array index, k is bit index */
709 for (j = 0, k = 0; k < 32; j++) {
710 unsigned int flags = aeu_descs[i].bits[j].flags;
711
712 if (flags & ATTENTION_PARITY)
713 sb_info->parity_mask[i] |= 1 << k;
714
715 k += ATTENTION_LENGTH(flags);
716 }
717 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
718 "Attn Mask [Reg %d]: 0x%08x\n",
719 i, sb_info->parity_mask[i]);
720 }
721
cc875c2e
YM
722 /* Set the address of cleanup for the mcp attention */
723 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
724 MISC_REG_AEU_GENERAL_ATTN_0;
725
726 qed_int_sb_attn_setup(p_hwfn, p_ptt);
727}
728
729static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt)
731{
732 struct qed_dev *cdev = p_hwfn->cdev;
733 struct qed_sb_attn_info *p_sb;
734 void *p_virt;
735 dma_addr_t p_phys = 0;
736
737 /* SB struct */
60fffb3b 738 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
cc875c2e
YM
739 if (!p_sb) {
740 DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
741 return -ENOMEM;
742 }
743
744 /* SB ring */
745 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
746 SB_ATTN_ALIGNED_SIZE(p_hwfn),
747 &p_phys, GFP_KERNEL);
748
749 if (!p_virt) {
750 DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
751 kfree(p_sb);
752 return -ENOMEM;
753 }
754
755 /* Attention setup */
756 p_hwfn->p_sb_attn = p_sb;
757 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
758
759 return 0;
760}
761
fe56b9e6
YM
762/* coalescing timeout = timeset << (timer_res + 1) */
763#define QED_CAU_DEF_RX_USECS 24
764#define QED_CAU_DEF_TX_USECS 48
765
766void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
767 struct cau_sb_entry *p_sb_entry,
768 u8 pf_id,
769 u16 vf_number,
770 u8 vf_valid)
771{
4ac801b7 772 struct qed_dev *cdev = p_hwfn->cdev;
fe56b9e6
YM
773 u32 cau_state;
774
775 memset(p_sb_entry, 0, sizeof(*p_sb_entry));
776
777 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
778 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
779 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
780 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
781 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
782
783 /* setting the time resultion to a fixed value ( = 1) */
784 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
785 QED_CAU_DEF_RX_TIMER_RES);
786 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
787 QED_CAU_DEF_TX_TIMER_RES);
788
789 cau_state = CAU_HC_DISABLE_STATE;
790
4ac801b7 791 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
fe56b9e6 792 cau_state = CAU_HC_ENABLE_STATE;
4ac801b7
YM
793 if (!cdev->rx_coalesce_usecs)
794 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
795 if (!cdev->tx_coalesce_usecs)
796 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
fe56b9e6
YM
797 }
798
799 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
800 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
801}
802
803void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
804 struct qed_ptt *p_ptt,
805 dma_addr_t sb_phys,
806 u16 igu_sb_id,
807 u16 vf_number,
808 u8 vf_valid)
809{
810 struct cau_sb_entry sb_entry;
fe56b9e6
YM
811
812 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
813 vf_number, vf_valid);
814
815 if (p_hwfn->hw_init_done) {
0a0c5d3b
YM
816 /* Wide-bus, initialize via DMAE */
817 u64 phys_addr = (u64)sb_phys;
818
819 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
820 CAU_REG_SB_ADDR_MEMORY +
821 igu_sb_id * sizeof(u64), 2, 0);
822 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
823 CAU_REG_SB_VAR_MEMORY +
824 igu_sb_id * sizeof(u64), 2, 0);
fe56b9e6
YM
825 } else {
826 /* Initialize Status Block Address */
827 STORE_RT_REG_AGG(p_hwfn,
828 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
829 igu_sb_id * 2,
830 sb_phys);
831
832 STORE_RT_REG_AGG(p_hwfn,
833 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
834 igu_sb_id * 2,
835 sb_entry);
836 }
837
838 /* Configure pi coalescing if set */
839 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
840 u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
841 (QED_CAU_DEF_RX_TIMER_RES + 1);
842 u8 num_tc = 1, i;
843
844 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
845 QED_COAL_RX_STATE_MACHINE,
846 timeset);
847
848 timeset = p_hwfn->cdev->tx_coalesce_usecs >>
849 (QED_CAU_DEF_TX_TIMER_RES + 1);
850
851 for (i = 0; i < num_tc; i++) {
852 qed_int_cau_conf_pi(p_hwfn, p_ptt,
853 igu_sb_id, TX_PI(i),
854 QED_COAL_TX_STATE_MACHINE,
855 timeset);
856 }
857 }
858}
859
860void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
861 struct qed_ptt *p_ptt,
862 u16 igu_sb_id,
863 u32 pi_index,
864 enum qed_coalescing_fsm coalescing_fsm,
865 u8 timeset)
866{
867 struct cau_pi_entry pi_entry;
868 u32 sb_offset;
869 u32 pi_offset;
870
871 sb_offset = igu_sb_id * PIS_PER_SB;
872 memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
873
874 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
875 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
876 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
877 else
878 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
879
880 pi_offset = sb_offset + pi_index;
881 if (p_hwfn->hw_init_done) {
882 qed_wr(p_hwfn, p_ptt,
883 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
884 *((u32 *)&(pi_entry)));
885 } else {
886 STORE_RT_REG(p_hwfn,
887 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
888 *((u32 *)&(pi_entry)));
889 }
890}
891
892void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
893 struct qed_ptt *p_ptt,
894 struct qed_sb_info *sb_info)
895{
896 /* zero status block and ack counter */
897 sb_info->sb_ack = 0;
898 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
899
900 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
901 sb_info->igu_sb_id, 0, 0);
902}
903
904/**
905 * @brief qed_get_igu_sb_id - given a sw sb_id return the
906 * igu_sb_id
907 *
908 * @param p_hwfn
909 * @param sb_id
910 *
911 * @return u16
912 */
913static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
914 u16 sb_id)
915{
916 u16 igu_sb_id;
917
918 /* Assuming continuous set of IGU SBs dedicated for given PF */
919 if (sb_id == QED_SP_SB_ID)
920 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
921 else
922 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
923
924 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
925 (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
926
927 return igu_sb_id;
928}
929
930int qed_int_sb_init(struct qed_hwfn *p_hwfn,
931 struct qed_ptt *p_ptt,
932 struct qed_sb_info *sb_info,
933 void *sb_virt_addr,
934 dma_addr_t sb_phy_addr,
935 u16 sb_id)
936{
937 sb_info->sb_virt = sb_virt_addr;
938 sb_info->sb_phys = sb_phy_addr;
939
940 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
941
942 if (sb_id != QED_SP_SB_ID) {
943 p_hwfn->sbs_info[sb_id] = sb_info;
944 p_hwfn->num_sbs++;
945 }
946
947 sb_info->cdev = p_hwfn->cdev;
948
949 /* The igu address will hold the absolute address that needs to be
950 * written to for a specific status block
951 */
952 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
953 GTT_BAR0_MAP_REG_IGU_CMD +
954 (sb_info->igu_sb_id << 3);
955
956 sb_info->flags |= QED_SB_INFO_INIT;
957
958 qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
959
960 return 0;
961}
962
963int qed_int_sb_release(struct qed_hwfn *p_hwfn,
964 struct qed_sb_info *sb_info,
965 u16 sb_id)
966{
967 if (sb_id == QED_SP_SB_ID) {
968 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
969 return -EINVAL;
970 }
971
972 /* zero status block and ack counter */
973 sb_info->sb_ack = 0;
974 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
975
4ac801b7
YM
976 if (p_hwfn->sbs_info[sb_id] != NULL) {
977 p_hwfn->sbs_info[sb_id] = NULL;
978 p_hwfn->num_sbs--;
979 }
fe56b9e6
YM
980
981 return 0;
982}
983
984static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
985{
986 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
987
4ac801b7
YM
988 if (!p_sb)
989 return;
990
991 if (p_sb->sb_info.sb_virt)
992 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
993 SB_ALIGNED_SIZE(p_hwfn),
994 p_sb->sb_info.sb_virt,
995 p_sb->sb_info.sb_phys);
996 kfree(p_sb);
fe56b9e6
YM
997}
998
999static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
1000 struct qed_ptt *p_ptt)
1001{
1002 struct qed_sb_sp_info *p_sb;
1003 dma_addr_t p_phys = 0;
1004 void *p_virt;
1005
1006 /* SB struct */
60fffb3b 1007 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
fe56b9e6
YM
1008 if (!p_sb) {
1009 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
1010 return -ENOMEM;
1011 }
1012
1013 /* SB ring */
1014 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1015 SB_ALIGNED_SIZE(p_hwfn),
1016 &p_phys, GFP_KERNEL);
1017 if (!p_virt) {
1018 DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
1019 kfree(p_sb);
1020 return -ENOMEM;
1021 }
1022
1023 /* Status Block setup */
1024 p_hwfn->p_sp_sb = p_sb;
1025 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1026 p_phys, QED_SP_SB_ID);
1027
1028 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1029
1030 return 0;
1031}
1032
fe56b9e6
YM
1033int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1034 qed_int_comp_cb_t comp_cb,
1035 void *cookie,
1036 u8 *sb_idx,
1037 __le16 **p_fw_cons)
1038{
1039 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
4ac801b7 1040 int rc = -ENOMEM;
fe56b9e6
YM
1041 u8 pi;
1042
1043 /* Look for a free index */
1044 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
4ac801b7
YM
1045 if (p_sp_sb->pi_info_arr[pi].comp_cb)
1046 continue;
1047
1048 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1049 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1050 *sb_idx = pi;
1051 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1052 rc = 0;
1053 break;
fe56b9e6
YM
1054 }
1055
4ac801b7 1056 return rc;
fe56b9e6
YM
1057}
1058
1059int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1060{
1061 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
fe56b9e6 1062
4ac801b7
YM
1063 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1064 return -ENOMEM;
fe56b9e6 1065
4ac801b7
YM
1066 p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1067 p_sp_sb->pi_info_arr[pi].cookie = NULL;
1068
1069 return 0;
fe56b9e6
YM
1070}
1071
1072u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1073{
1074 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1075}
1076
1077void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1078 struct qed_ptt *p_ptt,
1079 enum qed_int_mode int_mode)
1080{
cc875c2e 1081 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
fe56b9e6
YM
1082
1083 p_hwfn->cdev->int_mode = int_mode;
1084 switch (p_hwfn->cdev->int_mode) {
1085 case QED_INT_MODE_INTA:
1086 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1087 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1088 break;
1089
1090 case QED_INT_MODE_MSI:
1091 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1092 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1093 break;
1094
1095 case QED_INT_MODE_MSIX:
1096 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1097 break;
1098 case QED_INT_MODE_POLL:
1099 break;
1100 }
1101
1102 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1103}
1104
8f16bc97
SK
1105int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1106 enum qed_int_mode int_mode)
fe56b9e6 1107{
0d956e8a 1108 int rc;
fe56b9e6 1109
0d956e8a
YM
1110 /* Configure AEU signal change to produce attentions */
1111 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
cc875c2e
YM
1112 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1113 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
0d956e8a 1114 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
cc875c2e 1115
fe56b9e6
YM
1116 /* Flush the writes to IGU */
1117 mmiowb();
cc875c2e
YM
1118
1119 /* Unmask AEU signals toward IGU */
1120 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
8f16bc97
SK
1121 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1122 rc = qed_slowpath_irq_req(p_hwfn);
1123 if (rc != 0) {
1124 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1125 return -EINVAL;
1126 }
1127 p_hwfn->b_int_requested = true;
1128 }
1129 /* Enable interrupt Generation */
1130 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1131 p_hwfn->b_int_enabled = 1;
1132
1133 return rc;
fe56b9e6
YM
1134}
1135
1136void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
1137 struct qed_ptt *p_ptt)
1138{
1139 p_hwfn->b_int_enabled = 0;
1140
1141 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1142}
1143
1144#define IGU_CLEANUP_SLEEP_LENGTH (1000)
1145void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1146 struct qed_ptt *p_ptt,
1147 u32 sb_id,
1148 bool cleanup_set,
1149 u16 opaque_fid
1150 )
1151{
1152 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1153 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1154 u32 data = 0;
1155 u32 cmd_ctrl = 0;
1156 u32 val = 0;
1157 u32 sb_bit = 0;
1158 u32 sb_bit_addr = 0;
1159
1160 /* Set the data field */
1161 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1162 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1163 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1164
1165 /* Set the control register */
1166 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1167 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1168 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1169
1170 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1171
1172 barrier();
1173
1174 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1175
1176 /* Flush the write to IGU */
1177 mmiowb();
1178
1179 /* calculate where to read the status bit from */
1180 sb_bit = 1 << (sb_id % 32);
1181 sb_bit_addr = sb_id / 32 * sizeof(u32);
1182
1183 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1184
1185 /* Now wait for the command to complete */
1186 do {
1187 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1188
1189 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1190 break;
1191
1192 usleep_range(5000, 10000);
1193 } while (--sleep_cnt);
1194
1195 if (!sleep_cnt)
1196 DP_NOTICE(p_hwfn,
1197 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1198 val, sb_id);
1199}
1200
1201void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1202 struct qed_ptt *p_ptt,
1203 u32 sb_id,
1204 u16 opaque,
1205 bool b_set)
1206{
1207 int pi;
1208
1209 /* Set */
1210 if (b_set)
1211 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1212
1213 /* Clear */
1214 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1215
1216 /* Clear the CAU for the SB */
1217 for (pi = 0; pi < 12; pi++)
1218 qed_wr(p_hwfn, p_ptt,
1219 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1220}
1221
1222void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
1223 struct qed_ptt *p_ptt,
1224 bool b_set,
1225 bool b_slowpath)
1226{
1227 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1228 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1229 u32 sb_id = 0;
1230 u32 val = 0;
1231
1232 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1233 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1234 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1235 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1236
1237 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1238 "IGU cleaning SBs [%d,...,%d]\n",
1239 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1240
1241 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1242 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1243 p_hwfn->hw_info.opaque_fid,
1244 b_set);
1245
1246 if (b_slowpath) {
1247 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1248 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1249 "IGU cleaning slowpath SB [%d]\n", sb_id);
1250 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1251 p_hwfn->hw_info.opaque_fid,
1252 b_set);
1253 }
1254}
1255
4ac801b7
YM
1256static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
1257 struct qed_ptt *p_ptt,
1258 u16 sb_id)
1259{
1260 u32 val = qed_rd(p_hwfn, p_ptt,
1261 IGU_REG_MAPPING_MEMORY +
1262 sizeof(u32) * sb_id);
1263 struct qed_igu_block *p_block;
1264
1265 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1266
1267 /* stop scanning when hit first invalid PF entry */
1268 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1269 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1270 goto out;
1271
1272 /* Fill the block information */
1273 p_block->status = QED_IGU_STATUS_VALID;
1274 p_block->function_id = GET_FIELD(val,
1275 IGU_MAPPING_LINE_FUNCTION_NUMBER);
1276 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1277 p_block->vector_number = GET_FIELD(val,
1278 IGU_MAPPING_LINE_VECTOR_NUMBER);
1279
1280 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1281 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
1282 sb_id, val, p_block->function_id,
1283 p_block->is_pf, p_block->vector_number);
1284
1285out:
1286 return val;
1287}
1288
fe56b9e6
YM
1289int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
1290 struct qed_ptt *p_ptt)
1291{
1292 struct qed_igu_info *p_igu_info;
1293 struct qed_igu_block *blk;
1294 u32 val;
1295 u16 sb_id;
1296 u16 prev_sb_id = 0xFF;
1297
60fffb3b 1298 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
fe56b9e6
YM
1299
1300 if (!p_hwfn->hw_info.p_igu_info)
1301 return -ENOMEM;
1302
1303 p_igu_info = p_hwfn->hw_info.p_igu_info;
1304
1305 /* Initialize base sb / sb cnt for PFs */
1306 p_igu_info->igu_base_sb = 0xffff;
1307 p_igu_info->igu_sb_cnt = 0;
1308 p_igu_info->igu_dsb_id = 0xffff;
1309 p_igu_info->igu_base_sb_iov = 0xffff;
1310
1311 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1312 sb_id++) {
1313 blk = &p_igu_info->igu_map.igu_blocks[sb_id];
1314
4ac801b7 1315 val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
fe56b9e6
YM
1316
1317 /* stop scanning when hit first invalid PF entry */
1318 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1319 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1320 break;
1321
fe56b9e6
YM
1322 if (blk->is_pf) {
1323 if (blk->function_id == p_hwfn->rel_pf_id) {
1324 blk->status |= QED_IGU_STATUS_PF;
1325
1326 if (blk->vector_number == 0) {
1327 if (p_igu_info->igu_dsb_id == 0xffff)
1328 p_igu_info->igu_dsb_id = sb_id;
1329 } else {
1330 if (p_igu_info->igu_base_sb ==
1331 0xffff) {
1332 p_igu_info->igu_base_sb = sb_id;
1333 } else if (prev_sb_id != sb_id - 1) {
1334 DP_NOTICE(p_hwfn->cdev,
1335 "consecutive igu vectors for HWFN %x broken",
1336 p_hwfn->rel_pf_id);
1337 break;
1338 }
1339 prev_sb_id = sb_id;
1340 /* we don't count the default */
1341 (p_igu_info->igu_sb_cnt)++;
1342 }
1343 }
1344 }
1345 }
1346
1347 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1348 "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
1349 p_igu_info->igu_base_sb,
1350 p_igu_info->igu_sb_cnt,
1351 p_igu_info->igu_dsb_id);
1352
1353 if (p_igu_info->igu_base_sb == 0xffff ||
1354 p_igu_info->igu_dsb_id == 0xffff ||
1355 p_igu_info->igu_sb_cnt == 0) {
1356 DP_NOTICE(p_hwfn,
1357 "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
1358 p_igu_info->igu_base_sb,
1359 p_igu_info->igu_sb_cnt,
1360 p_igu_info->igu_dsb_id);
1361 return -EINVAL;
1362 }
1363
1364 return 0;
1365}
1366
1367/**
1368 * @brief Initialize igu runtime registers
1369 *
1370 * @param p_hwfn
1371 */
1372void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
1373{
1374 u32 igu_pf_conf = 0;
1375
1376 igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
1377
1378 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
1379}
1380
1381u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
1382{
1383 u64 intr_status = 0;
1384 u32 intr_status_lo = 0;
1385 u32 intr_status_hi = 0;
1386 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
1387 IGU_CMD_INT_ACK_BASE;
1388 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
1389 IGU_CMD_INT_ACK_BASE;
1390
1391 intr_status_lo = REG_RD(p_hwfn,
1392 GTT_BAR0_MAP_REG_IGU_CMD +
1393 lsb_igu_cmd_addr * 8);
1394 intr_status_hi = REG_RD(p_hwfn,
1395 GTT_BAR0_MAP_REG_IGU_CMD +
1396 msb_igu_cmd_addr * 8);
1397 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
1398
1399 return intr_status;
1400}
1401
1402static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
1403{
1404 tasklet_init(p_hwfn->sp_dpc,
1405 qed_int_sp_dpc, (unsigned long)p_hwfn);
1406 p_hwfn->b_sp_dpc_enabled = true;
1407}
1408
1409static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
1410{
60fffb3b 1411 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
fe56b9e6
YM
1412 if (!p_hwfn->sp_dpc)
1413 return -ENOMEM;
1414
1415 return 0;
1416}
1417
1418static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
1419{
1420 kfree(p_hwfn->sp_dpc);
1421}
1422
1423int qed_int_alloc(struct qed_hwfn *p_hwfn,
1424 struct qed_ptt *p_ptt)
1425{
1426 int rc = 0;
1427
1428 rc = qed_int_sp_dpc_alloc(p_hwfn);
1429 if (rc) {
1430 DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
1431 return rc;
1432 }
1433 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
1434 if (rc) {
1435 DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
1436 return rc;
1437 }
cc875c2e
YM
1438 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
1439 if (rc) {
1440 DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
1441 return rc;
1442 }
fe56b9e6
YM
1443 return rc;
1444}
1445
1446void qed_int_free(struct qed_hwfn *p_hwfn)
1447{
1448 qed_int_sp_sb_free(p_hwfn);
cc875c2e 1449 qed_int_sb_attn_free(p_hwfn);
fe56b9e6
YM
1450 qed_int_sp_dpc_free(p_hwfn);
1451}
1452
1453void qed_int_setup(struct qed_hwfn *p_hwfn,
1454 struct qed_ptt *p_ptt)
1455{
0d956e8a
YM
1456 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
1457 qed_int_sb_attn_setup(p_hwfn, p_ptt);
fe56b9e6
YM
1458 qed_int_sp_dpc_setup(p_hwfn);
1459}
1460
4ac801b7
YM
1461void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
1462 struct qed_sb_cnt_info *p_sb_cnt_info)
fe56b9e6
YM
1463{
1464 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
1465
4ac801b7
YM
1466 if (!info || !p_sb_cnt_info)
1467 return;
fe56b9e6 1468
4ac801b7
YM
1469 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
1470 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
1471 p_sb_cnt_info->sb_free_blk = info->free_blks;
fe56b9e6 1472}
8f16bc97
SK
1473
1474void qed_int_disable_post_isr_release(struct qed_dev *cdev)
1475{
1476 int i;
1477
1478 for_each_hwfn(cdev, i)
1479 cdev->hwfns[i].b_int_requested = false;
1480}
This page took 0.326654 seconds and 4 git commands to generate.