]> Git Repo - linux.git/blame - drivers/edac/synopsys_edac.c
Merge tag 'sound-6.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / edac / synopsys_edac.c
CommitLineData
9ae83ec8 1// SPDX-License-Identifier: GPL-2.0-only
ae9b56e3
PCK
2/*
3 * Synopsys DDR ECC Driver
4 * This driver is based on ppc4xx_edac.c drivers
5 *
6 * Copyright (C) 2012 - 2014 Xilinx, Inc.
ae9b56e3
PCK
7 */
8
9#include <linux/edac.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
591c9466 12#include <linux/spinlock.h>
b500b4a0 13#include <linux/interrupt.h>
3d02a897 14#include <linux/of.h>
ae9b56e3 15
78d88e8a 16#include "edac_module.h"
ae9b56e3
PCK
17
18/* Number of cs_rows needed per memory controller */
1b51adc6 19#define SYNPS_EDAC_NR_CSROWS 1
ae9b56e3
PCK
20
21/* Number of channels per memory controller */
1b51adc6 22#define SYNPS_EDAC_NR_CHANS 1
ae9b56e3
PCK
23
24/* Granularity of reported error in bytes */
1b51adc6 25#define SYNPS_EDAC_ERR_GRAIN 1
ae9b56e3 26
1b51adc6 27#define SYNPS_EDAC_MSG_SIZE 256
ae9b56e3 28
1b51adc6
MN
29#define SYNPS_EDAC_MOD_STRING "synps_edac"
30#define SYNPS_EDAC_MOD_VER "1"
ae9b56e3
PCK
31
32/* Synopsys DDR memory controller registers that are relevant to ECC */
1b51adc6
MN
33#define CTRL_OFST 0x0
34#define T_ZQ_OFST 0xA4
ae9b56e3
PCK
35
36/* ECC control register */
1b51adc6 37#define ECC_CTRL_OFST 0xC4
ae9b56e3 38/* ECC log register */
1b51adc6 39#define CE_LOG_OFST 0xC8
ae9b56e3 40/* ECC address register */
1b51adc6 41#define CE_ADDR_OFST 0xCC
ae9b56e3 42/* ECC data[31:0] register */
1b51adc6 43#define CE_DATA_31_0_OFST 0xD0
ae9b56e3
PCK
44
45/* Uncorrectable error info registers */
1b51adc6
MN
46#define UE_LOG_OFST 0xDC
47#define UE_ADDR_OFST 0xE0
48#define UE_DATA_31_0_OFST 0xE4
ae9b56e3 49
1b51adc6
MN
50#define STAT_OFST 0xF0
51#define SCRUB_OFST 0xF4
ae9b56e3
PCK
52
53/* Control register bit field definitions */
1b51adc6
MN
54#define CTRL_BW_MASK 0xC
55#define CTRL_BW_SHIFT 2
ae9b56e3 56
1b51adc6
MN
57#define DDRCTL_WDTH_16 1
58#define DDRCTL_WDTH_32 0
ae9b56e3
PCK
59
60/* ZQ register bit field definitions */
1b51adc6 61#define T_ZQ_DDRMODE_MASK 0x2
ae9b56e3
PCK
62
63/* ECC control register bit field definitions */
1b51adc6
MN
64#define ECC_CTRL_CLR_CE_ERR 0x2
65#define ECC_CTRL_CLR_UE_ERR 0x1
ae9b56e3
PCK
66
67/* ECC correctable/uncorrectable error log register definitions */
1b51adc6
MN
68#define LOG_VALID 0x1
69#define CE_LOG_BITPOS_MASK 0xFE
70#define CE_LOG_BITPOS_SHIFT 1
ae9b56e3
PCK
71
72/* ECC correctable/uncorrectable error address register definitions */
1b51adc6
MN
73#define ADDR_COL_MASK 0xFFF
74#define ADDR_ROW_MASK 0xFFFF000
75#define ADDR_ROW_SHIFT 12
76#define ADDR_BANK_MASK 0x70000000
77#define ADDR_BANK_SHIFT 28
ae9b56e3
PCK
78
79/* ECC statistic register definitions */
1b51adc6
MN
80#define STAT_UECNT_MASK 0xFF
81#define STAT_CECNT_MASK 0xFF00
82#define STAT_CECNT_SHIFT 8
ae9b56e3
PCK
83
84/* ECC scrub register definitions */
1b51adc6
MN
85#define SCRUB_MODE_MASK 0x7
86#define SCRUB_MODE_SECDED 0x4
ae9b56e3 87
e926ae57
MN
88/* DDR ECC Quirks */
89#define DDR_ECC_INTR_SUPPORT BIT(0)
90#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
f7824ded 91#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
e926ae57
MN
92
93/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
94/* ECC Configuration Registers */
95#define ECC_CFG0_OFST 0x70
96#define ECC_CFG1_OFST 0x74
97
98/* ECC Status Register */
99#define ECC_STAT_OFST 0x78
100
101/* ECC Clear Register */
102#define ECC_CLR_OFST 0x7C
103
104/* ECC Error count Register */
105#define ECC_ERRCNT_OFST 0x80
106
107/* ECC Corrected Error Address Register */
108#define ECC_CEADDR0_OFST 0x84
109#define ECC_CEADDR1_OFST 0x88
110
111/* ECC Syndrome Registers */
112#define ECC_CSYND0_OFST 0x8C
113#define ECC_CSYND1_OFST 0x90
114#define ECC_CSYND2_OFST 0x94
115
116/* ECC Bit Mask0 Address Register */
117#define ECC_BITMASK0_OFST 0x98
118#define ECC_BITMASK1_OFST 0x9C
119#define ECC_BITMASK2_OFST 0xA0
120
121/* ECC UnCorrected Error Address Register */
122#define ECC_UEADDR0_OFST 0xA4
123#define ECC_UEADDR1_OFST 0xA8
124
125/* ECC Syndrome Registers */
126#define ECC_UESYND0_OFST 0xAC
127#define ECC_UESYND1_OFST 0xB0
128#define ECC_UESYND2_OFST 0xB4
129
130/* ECC Poison Address Reg */
131#define ECC_POISON0_OFST 0xB8
132#define ECC_POISON1_OFST 0xBC
133
134#define ECC_ADDRMAP0_OFFSET 0x200
135
136/* Control register bitfield definitions */
137#define ECC_CTRL_BUSWIDTH_MASK 0x3000
138#define ECC_CTRL_BUSWIDTH_SHIFT 12
139#define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
140#define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
141
142/* DDR Control Register width definitions */
143#define DDRCTL_EWDTH_16 2
144#define DDRCTL_EWDTH_32 1
145#define DDRCTL_EWDTH_64 0
146
147/* ECC status register definitions */
148#define ECC_STAT_UECNT_MASK 0xF0000
149#define ECC_STAT_UECNT_SHIFT 16
150#define ECC_STAT_CECNT_MASK 0xF00
151#define ECC_STAT_CECNT_SHIFT 8
152#define ECC_STAT_BITNUM_MASK 0x7F
153
e2932d1f
SD
154/* ECC error count register definitions */
155#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
156#define ECC_ERRCNT_UECNT_SHIFT 16
157#define ECC_ERRCNT_CECNT_MASK 0xFFFF
158
e926ae57
MN
159/* DDR QOS Interrupt register definitions */
160#define DDR_QOS_IRQ_STAT_OFST 0x20200
161#define DDR_QOSUE_MASK 0x4
162#define DDR_QOSCE_MASK 0x2
163#define ECC_CE_UE_INTR_MASK 0x6
164#define DDR_QOS_IRQ_EN_OFST 0x20208
165#define DDR_QOS_IRQ_DB_OFST 0x2020C
166
f7824ded
DN
167/* DDR QOS Interrupt register definitions */
168#define DDR_UE_MASK BIT(9)
169#define DDR_CE_MASK BIT(8)
170
e926ae57
MN
171/* ECC Corrected Error Register Mask and Shifts*/
172#define ECC_CEADDR0_RW_MASK 0x3FFFF
173#define ECC_CEADDR0_RNK_MASK BIT(24)
174#define ECC_CEADDR1_BNKGRP_MASK 0x3000000
175#define ECC_CEADDR1_BNKNR_MASK 0x70000
176#define ECC_CEADDR1_BLKNR_MASK 0xFFF
177#define ECC_CEADDR1_BNKGRP_SHIFT 24
178#define ECC_CEADDR1_BNKNR_SHIFT 16
179
180/* ECC Poison register shifts */
181#define ECC_POISON0_RANK_SHIFT 24
182#define ECC_POISON0_RANK_MASK BIT(24)
183#define ECC_POISON0_COLUMN_SHIFT 0
184#define ECC_POISON0_COLUMN_MASK 0xFFF
185#define ECC_POISON1_BG_SHIFT 28
186#define ECC_POISON1_BG_MASK 0x30000000
187#define ECC_POISON1_BANKNR_SHIFT 24
188#define ECC_POISON1_BANKNR_MASK 0x7000000
189#define ECC_POISON1_ROW_SHIFT 0
190#define ECC_POISON1_ROW_MASK 0x3FFFF
191
192/* DDR Memory type defines */
193#define MEM_TYPE_DDR3 0x1
194#define MEM_TYPE_LPDDR3 0x8
195#define MEM_TYPE_DDR2 0x4
196#define MEM_TYPE_DDR4 0x10
197#define MEM_TYPE_LPDDR4 0x20
198
199/* DDRC Software control register */
200#define DDRC_SWCTL 0x320
201
202/* DDRC ECC CE & UE poison mask */
203#define ECC_CEPOISON_MASK 0x3
204#define ECC_UEPOISON_MASK 0x1
205
206/* DDRC Device config masks */
207#define DDRC_MSTR_CFG_MASK 0xC0000000
208#define DDRC_MSTR_CFG_SHIFT 30
209#define DDRC_MSTR_CFG_X4_MASK 0x0
210#define DDRC_MSTR_CFG_X8_MASK 0x1
211#define DDRC_MSTR_CFG_X16_MASK 0x2
212#define DDRC_MSTR_CFG_X32_MASK 0x3
213
214#define DDR_MAX_ROW_SHIFT 18
215#define DDR_MAX_COL_SHIFT 14
216#define DDR_MAX_BANK_SHIFT 3
217#define DDR_MAX_BANKGRP_SHIFT 2
218
219#define ROW_MAX_VAL_MASK 0xF
220#define COL_MAX_VAL_MASK 0xF
221#define BANK_MAX_VAL_MASK 0x1F
222#define BANKGRP_MAX_VAL_MASK 0x1F
223#define RANK_MAX_VAL_MASK 0x1F
224
225#define ROW_B0_BASE 6
226#define ROW_B1_BASE 7
227#define ROW_B2_BASE 8
228#define ROW_B3_BASE 9
229#define ROW_B4_BASE 10
230#define ROW_B5_BASE 11
231#define ROW_B6_BASE 12
232#define ROW_B7_BASE 13
233#define ROW_B8_BASE 14
234#define ROW_B9_BASE 15
235#define ROW_B10_BASE 16
236#define ROW_B11_BASE 17
237#define ROW_B12_BASE 18
238#define ROW_B13_BASE 19
239#define ROW_B14_BASE 20
240#define ROW_B15_BASE 21
241#define ROW_B16_BASE 22
242#define ROW_B17_BASE 23
243
244#define COL_B2_BASE 2
245#define COL_B3_BASE 3
246#define COL_B4_BASE 4
247#define COL_B5_BASE 5
248#define COL_B6_BASE 6
249#define COL_B7_BASE 7
250#define COL_B8_BASE 8
251#define COL_B9_BASE 9
252#define COL_B10_BASE 10
253#define COL_B11_BASE 11
254#define COL_B12_BASE 12
255#define COL_B13_BASE 13
256
257#define BANK_B0_BASE 2
258#define BANK_B1_BASE 3
259#define BANK_B2_BASE 4
260
261#define BANKGRP_B0_BASE 2
262#define BANKGRP_B1_BASE 3
263
264#define RANK_B0_BASE 6
265
ae9b56e3 266/**
225af74d
MN
267 * struct ecc_error_info - ECC error log information.
268 * @row: Row number.
269 * @col: Column number.
270 * @bank: Bank number.
271 * @bitpos: Bit position.
272 * @data: Data causing the error.
b500b4a0
MN
273 * @bankgrpnr: Bank group number.
274 * @blknr: Block number.
ae9b56e3
PCK
275 */
276struct ecc_error_info {
277 u32 row;
278 u32 col;
279 u32 bank;
280 u32 bitpos;
281 u32 data;
b500b4a0
MN
282 u32 bankgrpnr;
283 u32 blknr;
ae9b56e3
PCK
284};
285
286/**
225af74d
MN
287 * struct synps_ecc_status - ECC status information to report.
288 * @ce_cnt: Correctable error count.
289 * @ue_cnt: Uncorrectable error count.
290 * @ceinfo: Correctable error log information.
291 * @ueinfo: Uncorrectable error log information.
ae9b56e3
PCK
292 */
293struct synps_ecc_status {
294 u32 ce_cnt;
295 u32 ue_cnt;
296 struct ecc_error_info ceinfo;
297 struct ecc_error_info ueinfo;
298};
299
300/**
225af74d 301 * struct synps_edac_priv - DDR memory controller private instance data.
1a81361f 302 * @baseaddr: Base address of the DDR controller.
591c9466 303 * @reglock: Concurrent CSRs access lock.
1a81361f
MN
304 * @message: Buffer for framing the event specific info.
305 * @stat: ECC status information.
306 * @p_data: Platform data.
307 * @ce_cnt: Correctable Error count.
308 * @ue_cnt: Uncorrectable Error count.
309 * @poison_addr: Data poison address.
310 * @row_shift: Bit shifts for row bit.
311 * @col_shift: Bit shifts for column bit.
312 * @bank_shift: Bit shifts for bank bit.
313 * @bankgrp_shift: Bit shifts for bank group bit.
314 * @rank_shift: Bit shifts for rank bit.
ae9b56e3
PCK
315 */
316struct synps_edac_priv {
317 void __iomem *baseaddr;
591c9466 318 spinlock_t reglock;
ae9b56e3
PCK
319 char message[SYNPS_EDAC_MSG_SIZE];
320 struct synps_ecc_status stat;
3d02a897 321 const struct synps_platform_data *p_data;
ae9b56e3
PCK
322 u32 ce_cnt;
323 u32 ue_cnt;
1a81361f
MN
324#ifdef CONFIG_EDAC_DEBUG
325 ulong poison_addr;
326 u32 row_shift[18];
327 u32 col_shift[14];
328 u32 bank_shift[3];
329 u32 bankgrp_shift[2];
330 u32 rank_shift[1];
331#endif
ae9b56e3
PCK
332};
333
334/**
3d02a897
MN
335 * struct synps_platform_data - synps platform data structure.
336 * @get_error_info: Get EDAC error info.
337 * @get_mtype: Get mtype.
338 * @get_dtype: Get dtype.
339 * @get_ecc_state: Get ECC state.
340 * @quirks: To differentiate IPs.
341 */
342struct synps_platform_data {
343 int (*get_error_info)(struct synps_edac_priv *priv);
344 enum mem_type (*get_mtype)(const void __iomem *base);
345 enum dev_type (*get_dtype)(const void __iomem *base);
346 bool (*get_ecc_state)(void __iomem *base);
347 int quirks;
348};
349
350/**
351 * zynq_get_error_info - Get the current ECC error info.
352 * @priv: DDR memory controller private instance data.
ae9b56e3 353 *
3d02a897 354 * Return: one if there is no error, otherwise zero.
ae9b56e3 355 */
3d02a897 356static int zynq_get_error_info(struct synps_edac_priv *priv)
ae9b56e3 357{
3d02a897 358 struct synps_ecc_status *p;
ae9b56e3 359 u32 regval, clearval = 0;
3d02a897
MN
360 void __iomem *base;
361
362 base = priv->baseaddr;
363 p = &priv->stat;
ae9b56e3
PCK
364
365 regval = readl(base + STAT_OFST);
366 if (!regval)
367 return 1;
368
369 p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
370 p->ue_cnt = regval & STAT_UECNT_MASK;
371
372 regval = readl(base + CE_LOG_OFST);
373 if (!(p->ce_cnt && (regval & LOG_VALID)))
374 goto ue_err;
375
376 p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
377 regval = readl(base + CE_ADDR_OFST);
378 p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
379 p->ceinfo.col = regval & ADDR_COL_MASK;
380 p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
381 p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
1b51adc6 382 edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
ae9b56e3
PCK
383 p->ceinfo.data);
384 clearval = ECC_CTRL_CLR_CE_ERR;
385
386ue_err:
387 regval = readl(base + UE_LOG_OFST);
388 if (!(p->ue_cnt && (regval & LOG_VALID)))
389 goto out;
390
391 regval = readl(base + UE_ADDR_OFST);
392 p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
393 p->ueinfo.col = regval & ADDR_COL_MASK;
394 p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
395 p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
396 clearval |= ECC_CTRL_CLR_UE_ERR;
397
398out:
399 writel(clearval, base + ECC_CTRL_OFST);
400 writel(0x0, base + ECC_CTRL_OFST);
401
402 return 0;
403}
404
b500b4a0
MN
405/**
406 * zynqmp_get_error_info - Get the current ECC error info.
407 * @priv: DDR memory controller private instance data.
408 *
409 * Return: one if there is no error otherwise returns zero.
410 */
411static int zynqmp_get_error_info(struct synps_edac_priv *priv)
412{
413 struct synps_ecc_status *p;
591c9466
SS
414 u32 regval, clearval;
415 unsigned long flags;
b500b4a0
MN
416 void __iomem *base;
417
418 base = priv->baseaddr;
419 p = &priv->stat;
420
e2932d1f
SD
421 regval = readl(base + ECC_ERRCNT_OFST);
422 p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
423 p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
424 if (!p->ce_cnt)
425 goto ue_err;
426
b500b4a0
MN
427 regval = readl(base + ECC_STAT_OFST);
428 if (!regval)
429 return 1;
430
b500b4a0
MN
431 p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
432
433 regval = readl(base + ECC_CEADDR0_OFST);
434 p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
435 regval = readl(base + ECC_CEADDR1_OFST);
436 p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
437 ECC_CEADDR1_BNKNR_SHIFT;
438 p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
439 ECC_CEADDR1_BNKGRP_SHIFT;
440 p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
441 p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
442 edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
443 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
444 readl(base + ECC_CSYND2_OFST));
445ue_err:
446 if (!p->ue_cnt)
447 goto out;
448
449 regval = readl(base + ECC_UEADDR0_OFST);
450 p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
451 regval = readl(base + ECC_UEADDR1_OFST);
452 p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
453 ECC_CEADDR1_BNKGRP_SHIFT;
454 p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
455 ECC_CEADDR1_BNKNR_SHIFT;
456 p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
457 p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
458out:
591c9466
SS
459 spin_lock_irqsave(&priv->reglock, flags);
460
461 clearval = readl(base + ECC_CLR_OFST) |
462 ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
463 ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
b500b4a0 464 writel(clearval, base + ECC_CLR_OFST);
591c9466
SS
465
466 spin_unlock_irqrestore(&priv->reglock, flags);
b500b4a0
MN
467
468 return 0;
469}
470
ae9b56e3 471/**
225af74d
MN
472 * handle_error - Handle Correctable and Uncorrectable errors.
473 * @mci: EDAC memory controller instance.
474 * @p: Synopsys ECC status structure.
ae9b56e3 475 *
225af74d 476 * Handles ECC correctable and uncorrectable errors.
ae9b56e3 477 */
bb894bc4 478static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
ae9b56e3
PCK
479{
480 struct synps_edac_priv *priv = mci->pvt_info;
481 struct ecc_error_info *pinf;
482
483 if (p->ce_cnt) {
484 pinf = &p->ceinfo;
2fb3f6e1 485 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
b500b4a0 486 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
2fb3f6e1
SS
487 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
488 "CE", pinf->row, pinf->bank,
489 pinf->bankgrpnr, pinf->blknr,
b500b4a0
MN
490 pinf->bitpos, pinf->data);
491 } else {
492 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
2fb3f6e1 493 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
dfc6014e 494 "CE", pinf->row, pinf->bank, pinf->col,
b500b4a0
MN
495 pinf->bitpos, pinf->data);
496 }
497
ae9b56e3
PCK
498 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
499 p->ce_cnt, 0, 0, 0, 0, 0, -1,
500 priv->message, "");
501 }
502
503 if (p->ue_cnt) {
504 pinf = &p->ueinfo;
2fb3f6e1 505 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
b500b4a0 506 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
2fb3f6e1
SS
507 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
508 "UE", pinf->row, pinf->bank,
509 pinf->bankgrpnr, pinf->blknr);
b500b4a0
MN
510 } else {
511 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
2fb3f6e1
SS
512 "DDR ECC error type :%s Row %d Bank %d Col %d ",
513 "UE", pinf->row, pinf->bank, pinf->col);
b500b4a0
MN
514 }
515
ae9b56e3
PCK
516 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
517 p->ue_cnt, 0, 0, 0, 0, 0, -1,
518 priv->message, "");
519 }
520
521 memset(p, 0, sizeof(*p));
522}
523
4bcffe94
SS
524static void enable_intr(struct synps_edac_priv *priv)
525{
591c9466
SS
526 unsigned long flags;
527
4bcffe94 528 /* Enable UE/CE Interrupts */
591c9466 529 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
4bcffe94
SS
530 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
531 priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
532
591c9466
SS
533 return;
534 }
535
536 spin_lock_irqsave(&priv->reglock, flags);
537
538 writel(DDR_UE_MASK | DDR_CE_MASK,
539 priv->baseaddr + ECC_CLR_OFST);
540
541 spin_unlock_irqrestore(&priv->reglock, flags);
4bcffe94
SS
542}
543
544static void disable_intr(struct synps_edac_priv *priv)
545{
591c9466
SS
546 unsigned long flags;
547
4bcffe94 548 /* Disable UE/CE Interrupts */
591c9466 549 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
4bcffe94
SS
550 writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
551 priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
591c9466
SS
552
553 return;
554 }
555
556 spin_lock_irqsave(&priv->reglock, flags);
557
558 writel(0, priv->baseaddr + ECC_CLR_OFST);
559
560 spin_unlock_irqrestore(&priv->reglock, flags);
4bcffe94
SS
561}
562
b500b4a0
MN
563/**
564 * intr_handler - Interrupt Handler for ECC interrupts.
565 * @irq: IRQ number.
566 * @dev_id: Device ID.
567 *
568 * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
569 */
570static irqreturn_t intr_handler(int irq, void *dev_id)
571{
572 const struct synps_platform_data *p_data;
573 struct mem_ctl_info *mci = dev_id;
574 struct synps_edac_priv *priv;
575 int status, regval;
576
577 priv = mci->pvt_info;
578 p_data = priv->p_data;
579
f7824ded
DN
580 /*
581 * v3.0 of the controller has the ce/ue bits cleared automatically,
582 * so this condition does not apply.
583 */
584 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
585 regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
586 regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
587 if (!(regval & ECC_CE_UE_INTR_MASK))
588 return IRQ_NONE;
589 }
b500b4a0
MN
590
591 status = p_data->get_error_info(priv);
592 if (status)
593 return IRQ_NONE;
594
595 priv->ce_cnt += priv->stat.ce_cnt;
596 priv->ue_cnt += priv->stat.ue_cnt;
597 handle_error(mci, &priv->stat);
598
599 edac_dbg(3, "Total error count CE %d UE %d\n",
600 priv->ce_cnt, priv->ue_cnt);
f7824ded
DN
601 /* v3.0 of the controller does not have this register */
602 if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
603 writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
4bcffe94 604
b500b4a0
MN
605 return IRQ_HANDLED;
606}
607
ae9b56e3 608/**
225af74d
MN
609 * check_errors - Check controller for ECC errors.
610 * @mci: EDAC memory controller instance.
ae9b56e3 611 *
225af74d 612 * Check and post ECC errors. Called by the polling thread.
ae9b56e3 613 */
bb894bc4 614static void check_errors(struct mem_ctl_info *mci)
ae9b56e3 615{
b500b4a0
MN
616 const struct synps_platform_data *p_data;
617 struct synps_edac_priv *priv;
ae9b56e3
PCK
618 int status;
619
b500b4a0
MN
620 priv = mci->pvt_info;
621 p_data = priv->p_data;
622
3d02a897 623 status = p_data->get_error_info(priv);
ae9b56e3
PCK
624 if (status)
625 return;
626
627 priv->ce_cnt += priv->stat.ce_cnt;
628 priv->ue_cnt += priv->stat.ue_cnt;
bb894bc4 629 handle_error(mci, &priv->stat);
ae9b56e3 630
1b51adc6 631 edac_dbg(3, "Total error count CE %d UE %d\n",
ae9b56e3
PCK
632 priv->ce_cnt, priv->ue_cnt);
633}
634
635/**
3d02a897 636 * zynq_get_dtype - Return the controller memory width.
225af74d 637 * @base: DDR memory controller base address.
ae9b56e3
PCK
638 *
639 * Get the EDAC device type width appropriate for the current controller
640 * configuration.
641 *
642 * Return: a device type width enumeration.
643 */
3d02a897 644static enum dev_type zynq_get_dtype(const void __iomem *base)
ae9b56e3
PCK
645{
646 enum dev_type dt;
647 u32 width;
648
649 width = readl(base + CTRL_OFST);
650 width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
651
652 switch (width) {
653 case DDRCTL_WDTH_16:
654 dt = DEV_X2;
655 break;
656 case DDRCTL_WDTH_32:
657 dt = DEV_X4;
658 break;
659 default:
660 dt = DEV_UNKNOWN;
661 }
662
663 return dt;
664}
665
b500b4a0
MN
666/**
667 * zynqmp_get_dtype - Return the controller memory width.
668 * @base: DDR memory controller base address.
669 *
670 * Get the EDAC device type width appropriate for the current controller
671 * configuration.
672 *
673 * Return: a device type width enumeration.
674 */
675static enum dev_type zynqmp_get_dtype(const void __iomem *base)
676{
677 enum dev_type dt;
678 u32 width;
679
680 width = readl(base + CTRL_OFST);
681 width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
682 switch (width) {
683 case DDRCTL_EWDTH_16:
684 dt = DEV_X2;
685 break;
686 case DDRCTL_EWDTH_32:
687 dt = DEV_X4;
688 break;
689 case DDRCTL_EWDTH_64:
690 dt = DEV_X8;
691 break;
692 default:
693 dt = DEV_UNKNOWN;
694 }
695
696 return dt;
697}
698
ae9b56e3 699/**
3d02a897 700 * zynq_get_ecc_state - Return the controller ECC enable/disable status.
225af74d 701 * @base: DDR memory controller base address.
ae9b56e3 702 *
225af74d 703 * Get the ECC enable/disable status of the controller.
ae9b56e3 704 *
225af74d 705 * Return: true if enabled, otherwise false.
ae9b56e3 706 */
3d02a897 707static bool zynq_get_ecc_state(void __iomem *base)
ae9b56e3
PCK
708{
709 enum dev_type dt;
710 u32 ecctype;
ae9b56e3 711
3d02a897 712 dt = zynq_get_dtype(base);
ae9b56e3 713 if (dt == DEV_UNKNOWN)
b500b4a0 714 return false;
ae9b56e3
PCK
715
716 ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
717 if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
b500b4a0
MN
718 return true;
719
720 return false;
721}
722
723/**
724 * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
725 * @base: DDR memory controller base address.
726 *
727 * Get the ECC enable/disable status for the controller.
728 *
729 * Return: a ECC status boolean i.e true/false - enabled/disabled.
730 */
731static bool zynqmp_get_ecc_state(void __iomem *base)
732{
733 enum dev_type dt;
734 u32 ecctype;
ae9b56e3 735
b500b4a0
MN
736 dt = zynqmp_get_dtype(base);
737 if (dt == DEV_UNKNOWN)
738 return false;
739
740 ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
741 if ((ecctype == SCRUB_MODE_SECDED) &&
742 ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
743 return true;
744
745 return false;
ae9b56e3
PCK
746}
747
748/**
225af74d 749 * get_memsize - Read the size of the attached memory device.
ae9b56e3 750 *
225af74d 751 * Return: the memory size in bytes.
ae9b56e3 752 */
bb894bc4 753static u32 get_memsize(void)
ae9b56e3
PCK
754{
755 struct sysinfo inf;
756
757 si_meminfo(&inf);
758
759 return inf.totalram * inf.mem_unit;
760}
761
762/**
3d02a897 763 * zynq_get_mtype - Return the controller memory type.
225af74d 764 * @base: Synopsys ECC status structure.
ae9b56e3
PCK
765 *
766 * Get the EDAC memory type appropriate for the current controller
767 * configuration.
768 *
769 * Return: a memory type enumeration.
770 */
3d02a897 771static enum mem_type zynq_get_mtype(const void __iomem *base)
ae9b56e3
PCK
772{
773 enum mem_type mt;
774 u32 memtype;
775
776 memtype = readl(base + T_ZQ_OFST);
777
778 if (memtype & T_ZQ_DDRMODE_MASK)
779 mt = MEM_DDR3;
780 else
781 mt = MEM_DDR2;
782
783 return mt;
784}
785
b500b4a0
MN
786/**
787 * zynqmp_get_mtype - Returns controller memory type.
788 * @base: Synopsys ECC status structure.
789 *
790 * Get the EDAC memory type appropriate for the current controller
791 * configuration.
792 *
793 * Return: a memory type enumeration.
794 */
795static enum mem_type zynqmp_get_mtype(const void __iomem *base)
796{
797 enum mem_type mt;
798 u32 memtype;
799
800 memtype = readl(base + CTRL_OFST);
801
802 if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
803 mt = MEM_DDR3;
804 else if (memtype & MEM_TYPE_DDR2)
805 mt = MEM_RDDR2;
806 else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
807 mt = MEM_DDR4;
808 else
809 mt = MEM_EMPTY;
810
811 return mt;
812}
813
ae9b56e3 814/**
225af74d
MN
815 * init_csrows - Initialize the csrow data.
816 * @mci: EDAC memory controller instance.
ae9b56e3 817 *
225af74d
MN
818 * Initialize the chip select rows associated with the EDAC memory
819 * controller instance.
ae9b56e3 820 */
fa9f6b9e 821static void init_csrows(struct mem_ctl_info *mci)
ae9b56e3 822{
1b51adc6 823 struct synps_edac_priv *priv = mci->pvt_info;
3d02a897 824 const struct synps_platform_data *p_data;
ae9b56e3
PCK
825 struct csrow_info *csi;
826 struct dimm_info *dimm;
1b51adc6
MN
827 u32 size, row;
828 int j;
ae9b56e3 829
3d02a897
MN
830 p_data = priv->p_data;
831
ae9b56e3
PCK
832 for (row = 0; row < mci->nr_csrows; row++) {
833 csi = mci->csrows[row];
bb894bc4 834 size = get_memsize();
ae9b56e3
PCK
835
836 for (j = 0; j < csi->nr_channels; j++) {
1b51adc6 837 dimm = csi->channels[j]->dimm;
5297cfa6 838 dimm->edac_mode = EDAC_SECDED;
3d02a897 839 dimm->mtype = p_data->get_mtype(priv->baseaddr);
1b51adc6
MN
840 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
841 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
3d02a897 842 dimm->dtype = p_data->get_dtype(priv->baseaddr);
ae9b56e3
PCK
843 }
844 }
ae9b56e3
PCK
845}
846
847/**
225af74d
MN
848 * mc_init - Initialize one driver instance.
849 * @mci: EDAC memory controller instance.
850 * @pdev: platform device.
ae9b56e3 851 *
225af74d 852 * Perform initialization of the EDAC memory controller instance and
ae9b56e3
PCK
853 * related driver-private data associated with the memory controller the
854 * instance is bound to.
ae9b56e3 855 */
fa9f6b9e 856static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
ae9b56e3 857{
ae9b56e3
PCK
858 struct synps_edac_priv *priv;
859
860 mci->pdev = &pdev->dev;
861 priv = mci->pvt_info;
862 platform_set_drvdata(pdev, mci);
863
864 /* Initialize controller capabilities and configuration */
865 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
866 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
867 mci->scrub_cap = SCRUB_HW_SRC;
868 mci->scrub_mode = SCRUB_NONE;
869
870 mci->edac_cap = EDAC_FLAG_SECDED;
871 mci->ctl_name = "synps_ddr_controller";
872 mci->dev_name = SYNPS_EDAC_MOD_STRING;
873 mci->mod_name = SYNPS_EDAC_MOD_VER;
ae9b56e3 874
b500b4a0
MN
875 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
876 edac_op_state = EDAC_OPSTATE_INT;
877 } else {
878 edac_op_state = EDAC_OPSTATE_POLL;
879 mci->edac_check = check_errors;
880 }
881
ae9b56e3
PCK
882 mci->ctl_page_to_phys = NULL;
883
fa9f6b9e 884 init_csrows(mci);
ae9b56e3
PCK
885}
886
b500b4a0
MN
887static int setup_irq(struct mem_ctl_info *mci,
888 struct platform_device *pdev)
889{
890 struct synps_edac_priv *priv = mci->pvt_info;
891 int ret, irq;
892
893 irq = platform_get_irq(pdev, 0);
894 if (irq < 0) {
895 edac_printk(KERN_ERR, EDAC_MC,
896 "No IRQ %d in DT\n", irq);
897 return irq;
898 }
899
900 ret = devm_request_irq(&pdev->dev, irq, intr_handler,
901 0, dev_name(&pdev->dev), mci);
902 if (ret < 0) {
903 edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
904 return ret;
905 }
906
907 enable_intr(priv);
908
909 return 0;
910}
911
3d02a897
MN
912static const struct synps_platform_data zynq_edac_def = {
913 .get_error_info = zynq_get_error_info,
914 .get_mtype = zynq_get_mtype,
915 .get_dtype = zynq_get_dtype,
916 .get_ecc_state = zynq_get_ecc_state,
917 .quirks = 0,
918};
919
b500b4a0
MN
920static const struct synps_platform_data zynqmp_edac_def = {
921 .get_error_info = zynqmp_get_error_info,
922 .get_mtype = zynqmp_get_mtype,
923 .get_dtype = zynqmp_get_dtype,
924 .get_ecc_state = zynqmp_get_ecc_state,
1a81361f
MN
925 .quirks = (DDR_ECC_INTR_SUPPORT
926#ifdef CONFIG_EDAC_DEBUG
927 | DDR_ECC_DATA_POISON_SUPPORT
928#endif
929 ),
b500b4a0
MN
930};
931
f7824ded
DN
932static const struct synps_platform_data synopsys_edac_def = {
933 .get_error_info = zynqmp_get_error_info,
934 .get_mtype = zynqmp_get_mtype,
935 .get_dtype = zynqmp_get_dtype,
936 .get_ecc_state = zynqmp_get_ecc_state,
937 .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
938#ifdef CONFIG_EDAC_DEBUG
939 | DDR_ECC_DATA_POISON_SUPPORT
940#endif
941 ),
942};
943
944
3d02a897 945static const struct of_device_id synps_edac_match[] = {
b500b4a0
MN
946 {
947 .compatible = "xlnx,zynq-ddrc-a05",
948 .data = (void *)&zynq_edac_def
949 },
950 {
951 .compatible = "xlnx,zynqmp-ddrc-2.40a",
952 .data = (void *)&zynqmp_edac_def
953 },
f7824ded
DN
954 {
955 .compatible = "snps,ddrc-3.80a",
956 .data = (void *)&synopsys_edac_def
957 },
b500b4a0
MN
958 {
959 /* end of table */
960 }
3d02a897
MN
961};
962
963MODULE_DEVICE_TABLE(of, synps_edac_match);
964
1a81361f
MN
965#ifdef CONFIG_EDAC_DEBUG
966#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
967
968/**
969 * ddr_poison_setup - Update poison registers.
970 * @priv: DDR memory controller private instance data.
971 *
972 * Update poison registers as per DDR mapping.
973 * Return: none.
974 */
975static void ddr_poison_setup(struct synps_edac_priv *priv)
976{
977 int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
978 int index;
979 ulong hif_addr = 0;
980
981 hif_addr = priv->poison_addr >> 3;
982
983 for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
984 if (priv->row_shift[index])
985 row |= (((hif_addr >> priv->row_shift[index]) &
986 BIT(0)) << index);
987 else
988 break;
989 }
990
991 for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
992 if (priv->col_shift[index] || index < 3)
993 col |= (((hif_addr >> priv->col_shift[index]) &
994 BIT(0)) << index);
995 else
996 break;
997 }
998
999 for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
1000 if (priv->bank_shift[index])
1001 bank |= (((hif_addr >> priv->bank_shift[index]) &
1002 BIT(0)) << index);
1003 else
1004 break;
1005 }
1006
1007 for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
1008 if (priv->bankgrp_shift[index])
1009 bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
1010 & BIT(0)) << index);
1011 else
1012 break;
1013 }
1014
1015 if (priv->rank_shift[0])
1016 rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1017
1018 regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1019 regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1020 writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1021
1022 regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1023 regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1024 regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1025 writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1026}
1027
1028static ssize_t inject_data_error_show(struct device *dev,
1029 struct device_attribute *mattr,
1030 char *data)
1031{
1032 struct mem_ctl_info *mci = to_mci(dev);
1033 struct synps_edac_priv *priv = mci->pvt_info;
1034
1035 return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1036 "Error injection Address: 0x%lx\n\r",
1037 readl(priv->baseaddr + ECC_POISON0_OFST),
1038 readl(priv->baseaddr + ECC_POISON1_OFST),
1039 priv->poison_addr);
1040}
1041
1042static ssize_t inject_data_error_store(struct device *dev,
1043 struct device_attribute *mattr,
1044 const char *data, size_t count)
1045{
1046 struct mem_ctl_info *mci = to_mci(dev);
1047 struct synps_edac_priv *priv = mci->pvt_info;
1048
1049 if (kstrtoul(data, 0, &priv->poison_addr))
1050 return -EINVAL;
1051
1052 ddr_poison_setup(priv);
1053
1054 return count;
1055}
1056
1057static ssize_t inject_data_poison_show(struct device *dev,
1058 struct device_attribute *mattr,
1059 char *data)
1060{
1061 struct mem_ctl_info *mci = to_mci(dev);
1062 struct synps_edac_priv *priv = mci->pvt_info;
1063
1064 return sprintf(data, "Data Poisoning: %s\n\r",
1065 (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1066 ? ("Correctable Error") : ("UnCorrectable Error"));
1067}
1068
1069static ssize_t inject_data_poison_store(struct device *dev,
1070 struct device_attribute *mattr,
1071 const char *data, size_t count)
1072{
1073 struct mem_ctl_info *mci = to_mci(dev);
1074 struct synps_edac_priv *priv = mci->pvt_info;
1075
1076 writel(0, priv->baseaddr + DDRC_SWCTL);
1077 if (strncmp(data, "CE", 2) == 0)
1078 writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1079 else
1080 writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1081 writel(1, priv->baseaddr + DDRC_SWCTL);
1082
1083 return count;
1084}
1085
1086static DEVICE_ATTR_RW(inject_data_error);
1087static DEVICE_ATTR_RW(inject_data_poison);
1088
1089static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1090{
1091 int rc;
1092
1093 rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1094 if (rc < 0)
1095 return rc;
1096 rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1097 if (rc < 0)
1098 return rc;
1099 return 0;
1100}
1101
1102static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1103{
1104 device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1105 device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1106}
1107
1108static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1109{
1110 u32 addrmap_row_b2_10;
1111 int index;
1112
1113 priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1114 priv->row_shift[1] = ((addrmap[5] >> 8) &
1115 ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1116
1117 addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1118 if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1119 for (index = 2; index < 11; index++)
1120 priv->row_shift[index] = addrmap_row_b2_10 +
1121 index + ROW_B0_BASE;
1122
1123 } else {
1124 priv->row_shift[2] = (addrmap[9] &
1125 ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1126 priv->row_shift[3] = ((addrmap[9] >> 8) &
1127 ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1128 priv->row_shift[4] = ((addrmap[9] >> 16) &
1129 ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1130 priv->row_shift[5] = ((addrmap[9] >> 24) &
1131 ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1132 priv->row_shift[6] = (addrmap[10] &
1133 ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1134 priv->row_shift[7] = ((addrmap[10] >> 8) &
1135 ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1136 priv->row_shift[8] = ((addrmap[10] >> 16) &
1137 ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1138 priv->row_shift[9] = ((addrmap[10] >> 24) &
1139 ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1140 priv->row_shift[10] = (addrmap[11] &
1141 ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1142 }
1143
1144 priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1145 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1146 ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1147 priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1148 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1149 ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1150 priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1151 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1152 ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1153 priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1154 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1155 ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1156 priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1157 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1158 ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1159 priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1160 ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1161 ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1162 priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1163 ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1164 ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1165}
1166
1167static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1168{
1169 u32 width, memtype;
1170 int index;
1171
1172 memtype = readl(priv->baseaddr + CTRL_OFST);
1173 width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1174
1175 priv->col_shift[0] = 0;
1176 priv->col_shift[1] = 1;
1177 priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1178 priv->col_shift[3] = ((addrmap[2] >> 8) &
1179 COL_MAX_VAL_MASK) + COL_B3_BASE;
1180 priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1181 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1182 COL_MAX_VAL_MASK) + COL_B4_BASE);
1183 priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1184 COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1185 COL_MAX_VAL_MASK) + COL_B5_BASE);
1186 priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1187 COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1188 COL_MAX_VAL_MASK) + COL_B6_BASE);
1189 priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1190 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1191 COL_MAX_VAL_MASK) + COL_B7_BASE);
1192 priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1193 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1194 COL_MAX_VAL_MASK) + COL_B8_BASE);
1195 priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1196 COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1197 COL_MAX_VAL_MASK) + COL_B9_BASE);
1198 if (width == DDRCTL_EWDTH_64) {
1199 if (memtype & MEM_TYPE_LPDDR3) {
1200 priv->col_shift[10] = ((addrmap[4] &
1201 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1202 ((addrmap[4] & COL_MAX_VAL_MASK) +
1203 COL_B10_BASE);
1204 priv->col_shift[11] = (((addrmap[4] >> 8) &
1205 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1206 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1207 COL_B11_BASE);
1208 } else {
1209 priv->col_shift[11] = ((addrmap[4] &
1210 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1211 ((addrmap[4] & COL_MAX_VAL_MASK) +
1212 COL_B10_BASE);
1213 priv->col_shift[13] = (((addrmap[4] >> 8) &
1214 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1215 (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1216 COL_B11_BASE);
1217 }
1218 } else if (width == DDRCTL_EWDTH_32) {
1219 if (memtype & MEM_TYPE_LPDDR3) {
1220 priv->col_shift[10] = (((addrmap[3] >> 24) &
1221 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1222 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1223 COL_B9_BASE);
1224 priv->col_shift[11] = ((addrmap[4] &
1225 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1226 ((addrmap[4] & COL_MAX_VAL_MASK) +
1227 COL_B10_BASE);
1228 } else {
1229 priv->col_shift[11] = (((addrmap[3] >> 24) &
1230 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1231 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1232 COL_B9_BASE);
1233 priv->col_shift[13] = ((addrmap[4] &
1234 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1235 ((addrmap[4] & COL_MAX_VAL_MASK) +
1236 COL_B10_BASE);
1237 }
1238 } else {
1239 if (memtype & MEM_TYPE_LPDDR3) {
1240 priv->col_shift[10] = (((addrmap[3] >> 16) &
1241 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1242 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1243 COL_B8_BASE);
1244 priv->col_shift[11] = (((addrmap[3] >> 24) &
1245 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1246 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1247 COL_B9_BASE);
1248 priv->col_shift[13] = ((addrmap[4] &
1249 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1250 ((addrmap[4] & COL_MAX_VAL_MASK) +
1251 COL_B10_BASE);
1252 } else {
1253 priv->col_shift[11] = (((addrmap[3] >> 16) &
1254 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1255 (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1256 COL_B8_BASE);
1257 priv->col_shift[13] = (((addrmap[3] >> 24) &
1258 COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1259 (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1260 COL_B9_BASE);
1261 }
1262 }
1263
1264 if (width) {
1265 for (index = 9; index > width; index--) {
1266 priv->col_shift[index] = priv->col_shift[index - width];
1267 priv->col_shift[index - width] = 0;
1268 }
1269 }
1270
1271}
1272
1273static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1274{
1275 priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1276 priv->bank_shift[1] = ((addrmap[1] >> 8) &
1277 BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1278 priv->bank_shift[2] = (((addrmap[1] >> 16) &
1279 BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1280 (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1281 BANK_B2_BASE);
1282
1283}
1284
1285static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1286{
1287 priv->bankgrp_shift[0] = (addrmap[8] &
1288 BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1289 priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1290 BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1291 & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1292
1293}
1294
1295static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1296{
1297 priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1298 RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1299 RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1300}
1301
1302/**
1303 * setup_address_map - Set Address Map by querying ADDRMAP registers.
1304 * @priv: DDR memory controller private instance data.
1305 *
1306 * Set Address Map by querying ADDRMAP registers.
1307 *
1308 * Return: none.
1309 */
1310static void setup_address_map(struct synps_edac_priv *priv)
1311{
1312 u32 addrmap[12];
1313 int index;
1314
1315 for (index = 0; index < 12; index++) {
1316 u32 addrmap_offset;
1317
1318 addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1319 addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1320 }
1321
1322 setup_row_address_map(priv, addrmap);
1323
1324 setup_column_address_map(priv, addrmap);
1325
1326 setup_bank_address_map(priv, addrmap);
1327
1328 setup_bg_address_map(priv, addrmap);
1329
1330 setup_rank_address_map(priv, addrmap);
1331}
1332#endif /* CONFIG_EDAC_DEBUG */
1333
ae9b56e3 1334/**
225af74d
MN
1335 * mc_probe - Check controller and bind driver.
1336 * @pdev: platform device.
ae9b56e3 1337 *
225af74d 1338 * Probe a specific controller instance for binding with the driver.
ae9b56e3
PCK
1339 *
1340 * Return: 0 if the controller instance was successfully bound to the
1341 * driver; otherwise, < 0 on error.
1342 */
bb894bc4 1343static int mc_probe(struct platform_device *pdev)
ae9b56e3 1344{
3d02a897 1345 const struct synps_platform_data *p_data;
ae9b56e3
PCK
1346 struct edac_mc_layer layers[2];
1347 struct synps_edac_priv *priv;
1b51adc6 1348 struct mem_ctl_info *mci;
ae9b56e3 1349 void __iomem *baseaddr;
1b51adc6 1350 int rc;
ae9b56e3 1351
b57c1a1e 1352 baseaddr = devm_platform_ioremap_resource(pdev, 0);
ae9b56e3
PCK
1353 if (IS_ERR(baseaddr))
1354 return PTR_ERR(baseaddr);
1355
3d02a897 1356 p_data = of_device_get_match_data(&pdev->dev);
84de0b49
MN
1357 if (!p_data)
1358 return -ENODEV;
1359
3d02a897 1360 if (!p_data->get_ecc_state(baseaddr)) {
ae9b56e3
PCK
1361 edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1362 return -ENXIO;
1363 }
1364
1365 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1366 layers[0].size = SYNPS_EDAC_NR_CSROWS;
1367 layers[0].is_virt_csrow = true;
1368 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1369 layers[1].size = SYNPS_EDAC_NR_CHANS;
1370 layers[1].is_virt_csrow = false;
1371
1372 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1373 sizeof(struct synps_edac_priv));
1374 if (!mci) {
1375 edac_printk(KERN_ERR, EDAC_MC,
1376 "Failed memory allocation for mc instance\n");
1377 return -ENOMEM;
1378 }
1379
1380 priv = mci->pvt_info;
1381 priv->baseaddr = baseaddr;
3d02a897 1382 priv->p_data = p_data;
591c9466 1383 spin_lock_init(&priv->reglock);
3d02a897 1384
fa9f6b9e 1385 mc_init(mci, pdev);
ae9b56e3 1386
b500b4a0
MN
1387 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1388 rc = setup_irq(mci, pdev);
1389 if (rc)
1390 goto free_edac_mc;
1391 }
1392
ae9b56e3
PCK
1393 rc = edac_mc_add_mc(mci);
1394 if (rc) {
1395 edac_printk(KERN_ERR, EDAC_MC,
1396 "Failed to register with EDAC core\n");
1397 goto free_edac_mc;
1398 }
1399
1a81361f
MN
1400#ifdef CONFIG_EDAC_DEBUG
1401 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
61d35648
ZX
1402 rc = edac_create_sysfs_attributes(mci);
1403 if (rc) {
1a81361f
MN
1404 edac_printk(KERN_ERR, EDAC_MC,
1405 "Failed to create sysfs entries\n");
1406 goto free_edac_mc;
1407 }
1408 }
1409
bd1d6da1 1410 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1a81361f
MN
1411 setup_address_map(priv);
1412#endif
1413
ae9b56e3
PCK
1414 /*
1415 * Start capturing the correctable and uncorrectable errors. A write of
1416 * 0 starts the counters.
1417 */
b500b4a0
MN
1418 if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1419 writel(0x0, baseaddr + ECC_CTRL_OFST);
1420
ae9b56e3
PCK
1421 return rc;
1422
1423free_edac_mc:
1424 edac_mc_free(mci);
1425
1426 return rc;
1427}
1428
1429/**
225af74d
MN
1430 * mc_remove - Unbind driver from controller.
1431 * @pdev: Platform device.
ae9b56e3
PCK
1432 *
1433 * Return: Unconditionally 0
1434 */
f30e2fac 1435static void mc_remove(struct platform_device *pdev)
ae9b56e3
PCK
1436{
1437 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
b500b4a0
MN
1438 struct synps_edac_priv *priv = mci->pvt_info;
1439
1440 if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1441 disable_intr(priv);
ae9b56e3 1442
1a81361f
MN
1443#ifdef CONFIG_EDAC_DEBUG
1444 if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1445 edac_remove_sysfs_attributes(mci);
1446#endif
1447
ae9b56e3
PCK
1448 edac_mc_del_mc(&pdev->dev);
1449 edac_mc_free(mci);
ae9b56e3
PCK
1450}
1451
ae9b56e3
PCK
1452static struct platform_driver synps_edac_mc_driver = {
1453 .driver = {
1454 .name = "synopsys-edac",
1455 .of_match_table = synps_edac_match,
1456 },
bb894bc4 1457 .probe = mc_probe,
f30e2fac 1458 .remove_new = mc_remove,
ae9b56e3
PCK
1459};
1460
1461module_platform_driver(synps_edac_mc_driver);
1462
1463MODULE_AUTHOR("Xilinx Inc");
1464MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1465MODULE_LICENSE("GPL v2");
This page took 0.763872 seconds and 4 git commands to generate.