]> Git Repo - J-u-boot.git/blame - drivers/ddr/altera/sequencer.c
Merge tag 'u-boot-imx-master-20250127' of https://gitlab.denx.de/u-boot/custodians...
[J-u-boot.git] / drivers / ddr / altera / sequencer.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: BSD-3-Clause
3da42859
DN
2/*
3 * Copyright Altera Corporation (C) 2012-2015
3da42859
DN
4 */
5
f7ae49fc 6#include <log.h>
03de305e 7#include <linux/string.h>
3da42859
DN
8#include <asm/io.h>
9#include <asm/arch/sdram.h>
04372fb8 10#include <errno.h>
9a5a90ad 11#include <hang.h>
3da42859 12#include "sequencer.h"
9c76df51 13
285b3cb9 14static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
139823ec
MV
15 (struct socfpga_sdr_rw_load_manager *)
16 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
285b3cb9
SG
17static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs
18 = (struct socfpga_sdr_rw_load_jump_manager *)
139823ec 19 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
285b3cb9 20static const struct socfpga_sdr_reg_file *sdr_reg_file =
a1c654a8 21 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
285b3cb9 22static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
139823ec
MV
23 (struct socfpga_sdr_scc_mgr *)
24 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
285b3cb9 25static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
1bc6f14a 26 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
285b3cb9 27static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
139823ec
MV
28 (struct socfpga_phy_mgr_cfg *)
29 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
285b3cb9 30static const struct socfpga_data_mgr *data_mgr =
c4815f76 31 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
285b3cb9 32static const struct socfpga_sdr_ctrl *sdr_ctrl =
6cb9f167
MV
33 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
34
3da42859 35#define DELTA_D 1
3da42859
DN
36
37/*
38 * In order to reduce ROM size, most of the selectable calibration steps are
39 * decided at compile time based on the user's calibration mode selection,
40 * as captured by the STATIC_CALIB_STEPS selection below.
41 *
42 * However, to support simulation-time selection of fast simulation mode, where
43 * we skip everything except the bare minimum, we need a few of the steps to
44 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
45 * check, which is based on the rtl-supplied value, or we dynamically compute
46 * the value to use based on the dynamically-chosen calibration mode
47 */
48
49#define DLEVEL 0
50#define STATIC_IN_RTL_SIM 0
51#define STATIC_SKIP_DELAY_LOOPS 0
52
53#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
54 STATIC_SKIP_DELAY_LOOPS)
55
3da42859 56#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
285b3cb9 57 ((non_skip_value) & seq->skip_delay_mask)
3da42859 58
9a5a90ad
MV
59bool dram_is_ddr(const u8 ddr)
60{
61 const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config();
62 const u8 type = (cfg->ctrl_cfg >> SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB) &
63 SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK;
64
65 if (ddr == 2 && type == 1) /* DDR2 */
66 return true;
67
68 if (ddr == 3 && type == 2) /* DDR3 */
69 return true;
70
71 return false;
72}
73
285b3cb9
SG
74static void set_failing_group_stage(struct socfpga_sdrseq *seq,
75 u32 group, u32 stage, u32 substage)
3da42859
DN
76{
77 /*
78 * Only set the global stage if there was not been any other
79 * failing group
80 */
285b3cb9
SG
81 if (seq->gbl.error_stage == CAL_STAGE_NIL) {
82 seq->gbl.error_substage = substage;
83 seq->gbl.error_stage = stage;
84 seq->gbl.error_group = group;
3da42859
DN
85 }
86}
87
2c0d2d9c 88static void reg_file_set_group(u16 set_group)
3da42859 89{
2c0d2d9c 90 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
3da42859
DN
91}
92
2c0d2d9c 93static void reg_file_set_stage(u8 set_stage)
3da42859 94{
2c0d2d9c 95 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
3da42859
DN
96}
97
2c0d2d9c 98static void reg_file_set_sub_stage(u8 set_sub_stage)
3da42859 99{
2c0d2d9c
MV
100 set_sub_stage &= 0xff;
101 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
3da42859
DN
102}
103
7c89c2d9
MV
104/**
105 * phy_mgr_initialize() - Initialize PHY Manager
106 *
107 * Initialize PHY Manager.
108 */
285b3cb9 109static void phy_mgr_initialize(struct socfpga_sdrseq *seq)
3da42859 110{
7c89c2d9
MV
111 u32 ratio;
112
3da42859 113 debug("%s:%d\n", __func__, __LINE__);
7c89c2d9 114 /* Calibration has control over path to memory */
3da42859
DN
115 /*
116 * In Hard PHY this is a 2-bit control:
117 * 0: AFI Mux Select
118 * 1: DDIO Mux Select
119 */
1273dd9e 120 writel(0x3, &phy_mgr_cfg->mux_sel);
3da42859
DN
121
122 /* USER memory clock is not stable we begin initialization */
1273dd9e 123 writel(0, &phy_mgr_cfg->reset_mem_stbl);
3da42859
DN
124
125 /* USER calibration status all set to zero */
1273dd9e 126 writel(0, &phy_mgr_cfg->cal_status);
3da42859 127
1273dd9e 128 writel(0, &phy_mgr_cfg->cal_debug_info);
3da42859 129
7c89c2d9 130 /* Init params only if we do NOT skip calibration. */
285b3cb9 131 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
7c89c2d9
MV
132 return;
133
285b3cb9
SG
134 ratio = seq->rwcfg->mem_dq_per_read_dqs /
135 seq->rwcfg->mem_virtual_groups_per_read_dqs;
136 seq->param.read_correct_mask_vg = (1 << ratio) - 1;
137 seq->param.write_correct_mask_vg = (1 << ratio) - 1;
138 seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs)
139 - 1;
140 seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs)
141 - 1;
3da42859
DN
142}
143
080bf64e
MV
144/**
145 * set_rank_and_odt_mask() - Set Rank and ODT mask
146 * @rank: Rank mask
147 * @odt_mode: ODT mode, OFF or READ_WRITE
148 *
149 * Set Rank and ODT mask (On-Die Termination).
150 */
285b3cb9
SG
151static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq,
152 const u32 rank, const u32 odt_mode)
3da42859 153{
b2dfd100
MV
154 u32 odt_mask_0 = 0;
155 u32 odt_mask_1 = 0;
156 u32 cs_and_odt_mask;
3da42859 157
b2dfd100
MV
158 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
159 odt_mask_0 = 0x0;
160 odt_mask_1 = 0x0;
161 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
285b3cb9 162 switch (seq->rwcfg->mem_number_of_ranks) {
287cdf6b
MV
163 case 1: /* 1 Rank */
164 /* Read: ODT = 0 ; Write: ODT = 1 */
3da42859
DN
165 odt_mask_0 = 0x0;
166 odt_mask_1 = 0x1;
287cdf6b
MV
167 break;
168 case 2: /* 2 Ranks */
285b3cb9 169 if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) {
080bf64e
MV
170 /*
171 * - Dual-Slot , Single-Rank (1 CS per DIMM)
172 * OR
173 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
174 *
175 * Since MEM_NUMBER_OF_RANKS is 2, they
176 * are both single rank with 2 CS each
177 * (special for RDIMM).
178 *
3da42859
DN
179 * Read: Turn on ODT on the opposite rank
180 * Write: Turn on ODT on all ranks
181 */
182 odt_mask_0 = 0x3 & ~(1 << rank);
183 odt_mask_1 = 0x3;
9a5a90ad
MV
184 if (dram_is_ddr(2))
185 odt_mask_1 &= ~(1 << rank);
3da42859
DN
186 } else {
187 /*
080bf64e
MV
188 * - Single-Slot , Dual-Rank (2 CS per DIMM)
189 *
190 * Read: Turn on ODT off on all ranks
191 * Write: Turn on ODT on active rank
3da42859
DN
192 */
193 odt_mask_0 = 0x0;
194 odt_mask_1 = 0x3 & (1 << rank);
195 }
287cdf6b
MV
196 break;
197 case 4: /* 4 Ranks */
9a5a90ad
MV
198 /*
199 * DDR3 Read, DDR2 Read/Write:
3da42859 200 * ----------+-----------------------+
3da42859 201 * | ODT |
9a5a90ad 202 * +-----------------------+
3da42859
DN
203 * Rank | 3 | 2 | 1 | 0 |
204 * ----------+-----+-----+-----+-----+
205 * 0 | 0 | 1 | 0 | 0 |
206 * 1 | 1 | 0 | 0 | 0 |
207 * 2 | 0 | 0 | 0 | 1 |
208 * 3 | 0 | 0 | 1 | 0 |
209 * ----------+-----+-----+-----+-----+
210 *
9a5a90ad 211 * DDR3 Write:
3da42859 212 * ----------+-----------------------+
3da42859
DN
213 * | ODT |
214 * Write To +-----------------------+
215 * Rank | 3 | 2 | 1 | 0 |
216 * ----------+-----+-----+-----+-----+
217 * 0 | 0 | 1 | 0 | 1 |
218 * 1 | 1 | 0 | 1 | 0 |
219 * 2 | 0 | 1 | 0 | 1 |
220 * 3 | 1 | 0 | 1 | 0 |
221 * ----------+-----+-----+-----+-----+
222 */
223 switch (rank) {
224 case 0:
225 odt_mask_0 = 0x4;
9a5a90ad
MV
226 if (dram_is_ddr(2))
227 odt_mask_1 = 0x4;
228 else if (dram_is_ddr(3))
229 odt_mask_1 = 0x5;
3da42859
DN
230 break;
231 case 1:
232 odt_mask_0 = 0x8;
9a5a90ad
MV
233 if (dram_is_ddr(2))
234 odt_mask_1 = 0x8;
235 else if (dram_is_ddr(3))
236 odt_mask_1 = 0xA;
3da42859
DN
237 break;
238 case 2:
239 odt_mask_0 = 0x1;
9a5a90ad
MV
240 if (dram_is_ddr(2))
241 odt_mask_1 = 0x1;
242 else if (dram_is_ddr(3))
243 odt_mask_1 = 0x5;
3da42859
DN
244 break;
245 case 3:
246 odt_mask_0 = 0x2;
9a5a90ad
MV
247 if (dram_is_ddr(2))
248 odt_mask_1 = 0x2;
249 else if (dram_is_ddr(3))
250 odt_mask_1 = 0xA;
3da42859
DN
251 break;
252 }
287cdf6b 253 break;
3da42859 254 }
3da42859
DN
255 }
256
b2dfd100
MV
257 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
258 ((0xFF & odt_mask_0) << 8) |
259 ((0xFF & odt_mask_1) << 16);
1273dd9e
MV
260 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
261 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
3da42859
DN
262}
263
c76976d9
MV
264/**
265 * scc_mgr_set() - Set SCC Manager register
266 * @off: Base offset in SCC Manager space
267 * @grp: Read/Write group
268 * @val: Value to be set
269 *
270 * This function sets the SCC Manager (Scan Chain Control Manager) register.
271 */
272static void scc_mgr_set(u32 off, u32 grp, u32 val)
3da42859 273{
c76976d9
MV
274 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
275}
3da42859 276
e893f4dc
MV
277/**
278 * scc_mgr_initialize() - Initialize SCC Manager registers
279 *
280 * Initialize SCC Manager registers.
281 */
c76976d9
MV
282static void scc_mgr_initialize(void)
283{
3da42859 284 /*
e893f4dc
MV
285 * Clear register file for HPS. 16 (2^4) is the size of the
286 * full register file in the scc mgr:
287 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
288 * MEM_IF_READ_DQS_WIDTH - 1);
3da42859 289 */
c76976d9 290 int i;
e893f4dc 291
3da42859 292 for (i = 0; i < 16; i++) {
ea9aa241 293 debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n",
3da42859 294 __func__, __LINE__, i);
8e9e62c9 295 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
3da42859
DN
296 }
297}
298
5ded7320 299static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
5ff825b8 300{
c76976d9 301 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
5ff825b8
MV
302}
303
5ded7320 304static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
3da42859 305{
c76976d9 306 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
3da42859
DN
307}
308
5ded7320 309static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
5ff825b8 310{
c76976d9 311 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
5ff825b8
MV
312}
313
5ded7320 314static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
5ff825b8 315{
c76976d9 316 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
5ff825b8
MV
317}
318
70ed80af
MV
319static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
320{
321 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
322}
323
285b3cb9
SG
324static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq,
325 u32 delay)
3da42859 326{
285b3cb9
SG
327 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
328 seq->rwcfg->mem_dq_per_write_dqs, delay);
3da42859
DN
329}
330
285b3cb9
SG
331static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm,
332 u32 delay)
3da42859 333{
70ed80af 334 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
285b3cb9 335 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
70ed80af 336 delay);
5ff825b8
MV
337}
338
5ded7320 339static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
5ff825b8 340{
c76976d9 341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
5ff825b8
MV
342}
343
285b3cb9
SG
344static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq,
345 u32 delay)
5ff825b8 346{
285b3cb9
SG
347 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
348 seq->rwcfg->mem_dq_per_write_dqs, delay);
5ff825b8
MV
349}
350
285b3cb9
SG
351static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm,
352 u32 delay)
5ff825b8 353{
c76976d9 354 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
285b3cb9 355 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
c76976d9 356 delay);
5ff825b8
MV
357}
358
359/* load up dqs config settings */
5ded7320 360static void scc_mgr_load_dqs(u32 dqs)
5ff825b8
MV
361{
362 writel(dqs, &sdr_scc_mgr->dqs_ena);
363}
364
365/* load up dqs io config settings */
366static void scc_mgr_load_dqs_io(void)
367{
368 writel(0, &sdr_scc_mgr->dqs_io_ena);
369}
370
371/* load up dq config settings */
5ded7320 372static void scc_mgr_load_dq(u32 dq_in_group)
5ff825b8
MV
373{
374 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
375}
376
377/* load up dm config settings */
5ded7320 378static void scc_mgr_load_dm(u32 dm)
5ff825b8
MV
379{
380 writel(dm, &sdr_scc_mgr->dm_ena);
3da42859
DN
381}
382
0b69b807
MV
383/**
384 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
385 * @off: Base offset in SCC Manager space
386 * @grp: Read/Write group
387 * @val: Value to be set
388 * @update: If non-zero, trigger SCC Manager update for all ranks
389 *
390 * This function sets the SCC Manager (Scan Chain Control Manager) register
391 * and optionally triggers the SCC update for all ranks.
392 */
285b3cb9
SG
393static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq,
394 const u32 off, const u32 grp, const u32 val,
0b69b807 395 const int update)
3da42859 396{
0b69b807 397 u32 r;
3da42859 398
285b3cb9 399 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
3da42859 400 r += NUM_RANKS_PER_SHADOW_REG) {
0b69b807
MV
401 scc_mgr_set(off, grp, val);
402
403 if (update || (r == 0)) {
404 writel(grp, &sdr_scc_mgr->dqs_ena);
1273dd9e 405 writel(0, &sdr_scc_mgr->update);
3da42859
DN
406 }
407 }
408}
409
285b3cb9
SG
410static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq,
411 u32 read_group, u32 phase)
0b69b807
MV
412{
413 /*
414 * USER although the h/w doesn't support different phases per
415 * shadow register, for simplicity our scc manager modeling
416 * keeps different phase settings per shadow reg, and it's
417 * important for us to keep them in sync to match h/w.
418 * for efficiency, the scan chain update should occur only
419 * once to sr0.
420 */
285b3cb9 421 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET,
0b69b807
MV
422 read_group, phase, 0);
423}
424
285b3cb9
SG
425static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq,
426 u32 write_group, u32 phase)
3da42859 427{
0b69b807
MV
428 /*
429 * USER although the h/w doesn't support different phases per
430 * shadow register, for simplicity our scc manager modeling
431 * keeps different phase settings per shadow reg, and it's
432 * important for us to keep them in sync to match h/w.
433 * for efficiency, the scan chain update should occur only
434 * once to sr0.
435 */
285b3cb9 436 scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
0b69b807 437 write_group, phase, 0);
3da42859
DN
438}
439
285b3cb9
SG
440static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq,
441 u32 read_group, u32 delay)
3da42859 442{
3da42859
DN
443 /*
444 * In shadow register mode, the T11 settings are stored in
445 * registers in the core, which are updated by the DQS_ENA
446 * signals. Not issuing the SCC_MGR_UPD command allows us to
447 * save lots of rank switching overhead, by calling
448 * select_shadow_regs_for_update with update_scan_chains
449 * set to 0.
450 */
285b3cb9 451 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET,
0b69b807 452 read_group, delay, 1);
3da42859
DN
453}
454
5be355c1
MV
455/**
456 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
457 * @write_group: Write group
458 * @delay: Delay value
459 *
460 * This function sets the OCT output delay in SCC manager.
461 */
285b3cb9
SG
462static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq,
463 const u32 write_group, const u32 delay)
3da42859 464{
285b3cb9
SG
465 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
466 seq->rwcfg->mem_if_write_dqs_width;
5be355c1
MV
467 const int base = write_group * ratio;
468 int i;
3da42859
DN
469 /*
470 * Load the setting in the SCC manager
471 * Although OCT affects only write data, the OCT delay is controlled
472 * by the DQS logic block which is instantiated once per read group.
473 * For protocols where a write group consists of multiple read groups,
474 * the setting must be set multiple times.
475 */
5be355c1
MV
476 for (i = 0; i < ratio; i++)
477 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
3da42859
DN
478}
479
37a37ca7
MV
480/**
481 * scc_mgr_set_hhp_extras() - Set HHP extras.
482 *
483 * Load the fixed setting in the SCC manager HHP extras.
484 */
3da42859
DN
485static void scc_mgr_set_hhp_extras(void)
486{
487 /*
488 * Load the fixed setting in the SCC manager
37a37ca7
MV
489 * bits: 0:0 = 1'b1 - DQS bypass
490 * bits: 1:1 = 1'b1 - DQ bypass
491 * bits: 4:2 = 3'b001 - rfifo_mode
492 * bits: 6:5 = 2'b01 - rfifo clock_select
493 * bits: 7:7 = 1'b0 - separate gating from ungating setting
494 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
3da42859 495 */
37a37ca7
MV
496 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
497 (1 << 2) | (1 << 1) | (1 << 0);
498 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
499 SCC_MGR_HHP_GLOBALS_OFFSET |
500 SCC_MGR_HHP_EXTRAS_OFFSET;
501
ea9aa241 502 debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n",
37a37ca7
MV
503 __func__, __LINE__);
504 writel(value, addr);
ea9aa241 505 debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n",
37a37ca7 506 __func__, __LINE__);
3da42859
DN
507}
508
f42af35b
MV
509/**
510 * scc_mgr_zero_all() - Zero all DQS config
511 *
512 * Zero all DQS config.
3da42859 513 */
285b3cb9 514static void scc_mgr_zero_all(struct socfpga_sdrseq *seq)
3da42859 515{
f42af35b 516 int i, r;
3da42859
DN
517
518 /*
519 * USER Zero all DQS config settings, across all groups and all
520 * shadow registers
521 */
285b3cb9 522 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
f42af35b 523 r += NUM_RANKS_PER_SHADOW_REG) {
285b3cb9 524 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
3da42859
DN
525 /*
526 * The phases actually don't exist on a per-rank basis,
527 * but there's no harm updating them several times, so
528 * let's keep the code simple.
529 */
285b3cb9
SG
530 scc_mgr_set_dqs_bus_in_delay(i,
531 seq->iocfg->dqs_in_reserve
532 );
3da42859
DN
533 scc_mgr_set_dqs_en_phase(i, 0);
534 scc_mgr_set_dqs_en_delay(i, 0);
535 }
536
285b3cb9 537 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
3da42859 538 scc_mgr_set_dqdqs_output_phase(i, 0);
f42af35b 539 /* Arria V/Cyclone V don't have out2. */
285b3cb9
SG
540 scc_mgr_set_oct_out1_delay(seq, i,
541 seq->iocfg->dqs_out_reserve);
3da42859
DN
542 }
543 }
544
f42af35b 545 /* Multicast to all DQS group enables. */
1273dd9e
MV
546 writel(0xff, &sdr_scc_mgr->dqs_ena);
547 writel(0, &sdr_scc_mgr->update);
3da42859
DN
548}
549
c5c5f537
MV
550/**
551 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
552 * @write_group: Write group
553 *
554 * Set bypass mode and trigger SCC update.
555 */
556static void scc_set_bypass_mode(const u32 write_group)
3da42859 557{
c5c5f537 558 /* Multicast to all DQ enables. */
1273dd9e
MV
559 writel(0xff, &sdr_scc_mgr->dq_ena);
560 writel(0xff, &sdr_scc_mgr->dm_ena);
3da42859 561
c5c5f537 562 /* Update current DQS IO enable. */
1273dd9e 563 writel(0, &sdr_scc_mgr->dqs_io_ena);
3da42859 564
c5c5f537 565 /* Update the DQS logic. */
1273dd9e 566 writel(write_group, &sdr_scc_mgr->dqs_ena);
3da42859 567
c5c5f537 568 /* Hit update. */
1273dd9e 569 writel(0, &sdr_scc_mgr->update);
3da42859
DN
570}
571
5e837896
MV
572/**
573 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
574 * @write_group: Write group
575 *
576 * Load DQS settings for Write Group, do not trigger SCC update.
577 */
285b3cb9
SG
578static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq,
579 const u32 write_group)
5ff825b8 580{
285b3cb9
SG
581 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
582 seq->rwcfg->mem_if_write_dqs_width;
5e837896
MV
583 const int base = write_group * ratio;
584 int i;
5ff825b8 585 /*
5e837896 586 * Load the setting in the SCC manager
5ff825b8
MV
587 * Although OCT affects only write data, the OCT delay is controlled
588 * by the DQS logic block which is instantiated once per read group.
589 * For protocols where a write group consists of multiple read groups,
5e837896 590 * the setting must be set multiple times.
5ff825b8 591 */
5e837896
MV
592 for (i = 0; i < ratio; i++)
593 writel(base + i, &sdr_scc_mgr->dqs_ena);
5ff825b8
MV
594}
595
d41ea93a
MV
596/**
597 * scc_mgr_zero_group() - Zero all configs for a group
598 *
599 * Zero DQ, DM, DQS and OCT configs for a group.
600 */
285b3cb9
SG
601static void scc_mgr_zero_group(struct socfpga_sdrseq *seq,
602 const u32 write_group, const int out_only)
3da42859 603{
d41ea93a 604 int i, r;
3da42859 605
285b3cb9 606 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
d41ea93a
MV
607 r += NUM_RANKS_PER_SHADOW_REG) {
608 /* Zero all DQ config settings. */
285b3cb9 609 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
07aee5bd 610 scc_mgr_set_dq_out1_delay(i, 0);
3da42859 611 if (!out_only)
07aee5bd 612 scc_mgr_set_dq_in_delay(i, 0);
3da42859
DN
613 }
614
d41ea93a 615 /* Multicast to all DQ enables. */
1273dd9e 616 writel(0xff, &sdr_scc_mgr->dq_ena);
3da42859 617
d41ea93a 618 /* Zero all DM config settings. */
70ed80af
MV
619 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
620 if (!out_only)
285b3cb9
SG
621 scc_mgr_set_dm_in_delay(seq, i, 0);
622 scc_mgr_set_dm_out1_delay(seq, i, 0);
70ed80af 623 }
3da42859 624
d41ea93a 625 /* Multicast to all DM enables. */
1273dd9e 626 writel(0xff, &sdr_scc_mgr->dm_ena);
3da42859 627
d41ea93a 628 /* Zero all DQS IO settings. */
3da42859 629 if (!out_only)
285b3cb9 630 scc_mgr_set_dqs_io_in_delay(seq, 0);
d41ea93a
MV
631
632 /* Arria V/Cyclone V don't have out2. */
285b3cb9
SG
633 scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve);
634 scc_mgr_set_oct_out1_delay(seq, write_group,
635 seq->iocfg->dqs_out_reserve);
636 scc_mgr_load_dqs_for_write_group(seq, write_group);
3da42859 637
d41ea93a 638 /* Multicast to all DQS IO enables (only 1 in total). */
1273dd9e 639 writel(0, &sdr_scc_mgr->dqs_io_ena);
3da42859 640
d41ea93a 641 /* Hit update to zero everything. */
1273dd9e 642 writel(0, &sdr_scc_mgr->update);
3da42859
DN
643 }
644}
645
3da42859
DN
646/*
647 * apply and load a particular input delay for the DQ pins in a group
648 * group_bgn is the index of the first dq pin (in the write group)
649 */
285b3cb9
SG
650static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq,
651 u32 group_bgn, u32 delay)
3da42859 652{
5ded7320 653 u32 i, p;
3da42859 654
285b3cb9
SG
655 for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs;
656 i++, p++) {
07aee5bd 657 scc_mgr_set_dq_in_delay(p, delay);
3da42859
DN
658 scc_mgr_load_dq(p);
659 }
660}
661
300c2e62 662/**
285b3cb9
SG
663 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the
664 * DQ pins in a group
300c2e62
MV
665 * @delay: Delay value
666 *
667 * Apply and load a particular output delay for the DQ pins in a group.
668 */
285b3cb9
SG
669static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq,
670 const u32 delay)
3da42859 671{
300c2e62 672 int i;
3da42859 673
285b3cb9 674 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
300c2e62 675 scc_mgr_set_dq_out1_delay(i, delay);
3da42859
DN
676 scc_mgr_load_dq(i);
677 }
678}
679
680/* apply and load a particular output delay for the DM pins in a group */
285b3cb9
SG
681static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq,
682 u32 delay1)
3da42859 683{
5ded7320 684 u32 i;
3da42859
DN
685
686 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
285b3cb9 687 scc_mgr_set_dm_out1_delay(seq, i, delay1);
3da42859
DN
688 scc_mgr_load_dm(i);
689 }
690}
691
3da42859 692/* apply and load delay on both DQS and OCT out1 */
285b3cb9
SG
693static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq,
694 u32 write_group, u32 delay)
3da42859 695{
285b3cb9 696 scc_mgr_set_dqs_out1_delay(seq, delay);
3da42859
DN
697 scc_mgr_load_dqs_io();
698
285b3cb9
SG
699 scc_mgr_set_oct_out1_delay(seq, write_group, delay);
700 scc_mgr_load_dqs_for_write_group(seq, write_group);
3da42859
DN
701}
702
5cb1b508 703/**
285b3cb9
SG
704 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
705 * side: DQ, DM, DQS, OCT
5cb1b508
MV
706 * @write_group: Write group
707 * @delay: Delay value
708 *
709 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
710 */
285b3cb9
SG
711static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq,
712 const u32 write_group,
8eccde3e
MV
713 const u32 delay)
714{
715 u32 i, new_delay;
3da42859 716
8eccde3e 717 /* DQ shift */
285b3cb9 718 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++)
3da42859 719 scc_mgr_load_dq(i);
3da42859 720
8eccde3e
MV
721 /* DM shift */
722 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
3da42859 723 scc_mgr_load_dm(i);
3da42859 724
5cb1b508
MV
725 /* DQS shift */
726 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
285b3cb9 727 if (new_delay > seq->iocfg->io_out2_delay_max) {
ea9aa241 728 debug_cond(DLEVEL >= 1,
5cb1b508
MV
729 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
730 __func__, __LINE__, write_group, delay, new_delay,
285b3cb9
SG
731 seq->iocfg->io_out2_delay_max,
732 new_delay - seq->iocfg->io_out2_delay_max);
733 new_delay -= seq->iocfg->io_out2_delay_max;
734 scc_mgr_set_dqs_out1_delay(seq, new_delay);
3da42859
DN
735 }
736
737 scc_mgr_load_dqs_io();
738
5cb1b508
MV
739 /* OCT shift */
740 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
285b3cb9 741 if (new_delay > seq->iocfg->io_out2_delay_max) {
ea9aa241 742 debug_cond(DLEVEL >= 1,
5cb1b508
MV
743 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
744 __func__, __LINE__, write_group, delay,
285b3cb9
SG
745 new_delay, seq->iocfg->io_out2_delay_max,
746 new_delay - seq->iocfg->io_out2_delay_max);
747 new_delay -= seq->iocfg->io_out2_delay_max;
748 scc_mgr_set_oct_out1_delay(seq, write_group, new_delay);
3da42859
DN
749 }
750
285b3cb9 751 scc_mgr_load_dqs_for_write_group(seq, write_group);
3da42859
DN
752}
753
f51a7d35 754/**
285b3cb9
SG
755 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
756 * side to all ranks
f51a7d35
MV
757 * @write_group: Write group
758 * @delay: Delay value
759 *
760 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
3da42859 761 */
f51a7d35 762static void
285b3cb9
SG
763scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq,
764 const u32 write_group,
f51a7d35 765 const u32 delay)
3da42859 766{
f51a7d35 767 int r;
3da42859 768
285b3cb9 769 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
f51a7d35 770 r += NUM_RANKS_PER_SHADOW_REG) {
285b3cb9 771 scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay);
1273dd9e 772 writel(0, &sdr_scc_mgr->update);
3da42859
DN
773 }
774}
775
f936f94f
MV
776/**
777 * set_jump_as_return() - Return instruction optimization
778 *
779 * Optimization used to recover some slots in ddr3 inst_rom could be
780 * applied to other protocols if we wanted to
781 */
285b3cb9 782static void set_jump_as_return(struct socfpga_sdrseq *seq)
3da42859 783{
3da42859 784 /*
f936f94f 785 * To save space, we replace return with jump to special shared
3da42859 786 * RETURN instruction so we set the counter to large value so that
f936f94f 787 * we always jump.
3da42859 788 */
1273dd9e 789 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
285b3cb9 790 writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859
DN
791}
792
3de9622e
MV
793/**
794 * delay_for_n_mem_clocks() - Delay for N memory clocks
795 * @clocks: Length of the delay
796 *
797 * Delay for N memory clocks.
3da42859 798 */
285b3cb9
SG
799static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq,
800 const u32 clocks)
3da42859 801{
90a584b7 802 u32 afi_clocks;
6a39be6c
MV
803 u16 c_loop;
804 u8 inner;
805 u8 outer;
3da42859
DN
806
807 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
808
cbcaf460 809 /* Scale (rounding up) to get afi clocks. */
285b3cb9 810 afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio);
cbcaf460
MV
811 if (afi_clocks) /* Temporary underflow protection */
812 afi_clocks--;
3da42859
DN
813
814 /*
90a584b7
MV
815 * Note, we don't bother accounting for being off a little
816 * bit because of a few extra instructions in outer loops.
817 * Note, the loops have a test at the end, and do the test
818 * before the decrement, and so always perform the loop
3da42859
DN
819 * 1 time more than the counter value
820 */
6a39be6c
MV
821 c_loop = afi_clocks >> 16;
822 outer = c_loop ? 0xff : (afi_clocks >> 8);
823 inner = outer ? 0xff : afi_clocks;
3da42859
DN
824
825 /*
826 * rom instructions are structured as follows:
827 *
828 * IDLE_LOOP2: jnz cntr0, TARGET_A
829 * IDLE_LOOP1: jnz cntr1, TARGET_B
830 * return
831 *
832 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
833 * TARGET_B is set to IDLE_LOOP2 as well
834 *
835 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
836 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
837 *
838 * a little confusing, but it helps save precious space in the inst_rom
839 * and sequencer rom and keeps the delays more accurate and reduces
840 * overhead
841 */
cbcaf460 842 if (afi_clocks < 0x100) {
1273dd9e 843 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
139823ec 844 &sdr_rw_load_mgr_regs->load_cntr1);
3da42859 845
285b3cb9 846 writel(seq->rwcfg->idle_loop1,
139823ec 847 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859 848
285b3cb9 849 writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1273dd9e 850 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3da42859 851 } else {
1273dd9e 852 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
139823ec 853 &sdr_rw_load_mgr_regs->load_cntr0);
3da42859 854
1273dd9e 855 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
139823ec 856 &sdr_rw_load_mgr_regs->load_cntr1);
3da42859 857
285b3cb9 858 writel(seq->rwcfg->idle_loop2,
139823ec 859 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859 860
285b3cb9 861 writel(seq->rwcfg->idle_loop2,
139823ec 862 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859 863
0c1b81bd 864 do {
285b3cb9 865 writel(seq->rwcfg->idle_loop2,
139823ec
MV
866 SDR_PHYGRP_RWMGRGRP_ADDRESS |
867 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
0c1b81bd 868 } while (c_loop-- != 0);
3da42859
DN
869 }
870 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
871}
872
9a5a90ad
MV
873static void delay_for_n_ns(struct socfpga_sdrseq *seq, const u32 ns)
874{
875 delay_for_n_mem_clocks(seq, (ns * seq->misccfg->afi_clk_freq *
876 seq->misccfg->afi_rate_ratio) / 1000);
877}
878
944fe719
MV
879/**
880 * rw_mgr_mem_init_load_regs() - Load instruction registers
881 * @cntr0: Counter 0 value
882 * @cntr1: Counter 1 value
883 * @cntr2: Counter 2 value
884 * @jump: Jump instruction value
885 *
886 * Load instruction registers.
887 */
285b3cb9
SG
888static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq,
889 u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
944fe719 890{
5ded7320 891 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
944fe719
MV
892 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
893
894 /* Load counters */
895 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
896 &sdr_rw_load_mgr_regs->load_cntr0);
897 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
898 &sdr_rw_load_mgr_regs->load_cntr1);
899 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
900 &sdr_rw_load_mgr_regs->load_cntr2);
901
902 /* Load jump address */
903 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
904 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
905 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
906
907 /* Execute count instruction */
908 writel(jump, grpaddr);
909}
910
ecd2334a 911/**
9a5a90ad
MV
912 * rw_mgr_mem_load_user_ddr2() - Load user calibration values for DDR2
913 * @handoff: Indicate whether this is initialization or handoff phase
914 *
915 * Load user calibration values and optionally precharge the banks.
916 */
917static void rw_mgr_mem_load_user_ddr2(struct socfpga_sdrseq *seq,
918 const int handoff)
919{
920 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
921 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
922 u32 r;
923
924 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
925 /* set rank */
926 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
927
928 /* precharge all banks ... */
929 writel(seq->rwcfg->precharge_all, grpaddr);
930
931 writel(seq->rwcfg->emr2, grpaddr);
932 writel(seq->rwcfg->emr3, grpaddr);
933 writel(seq->rwcfg->emr, grpaddr);
934
935 if (handoff) {
936 writel(seq->rwcfg->mr_user, grpaddr);
937 continue;
938 }
939
940 writel(seq->rwcfg->mr_dll_reset, grpaddr);
941
942 writel(seq->rwcfg->precharge_all, grpaddr);
943
944 writel(seq->rwcfg->refresh, grpaddr);
945 delay_for_n_ns(seq, 200);
946 writel(seq->rwcfg->refresh, grpaddr);
947 delay_for_n_ns(seq, 200);
948
949 writel(seq->rwcfg->mr_calib, grpaddr);
950 writel(/*seq->rwcfg->*/0x0b, grpaddr); // EMR_OCD_ENABLE
951 writel(seq->rwcfg->emr, grpaddr);
952 delay_for_n_mem_clocks(seq, 200);
953 }
954}
955
956/**
957 * rw_mgr_mem_load_user_ddr3() - Load user calibration values
ecd2334a
MV
958 * @fin1: Final instruction 1
959 * @fin2: Final instruction 2
960 * @precharge: If 1, precharge the banks at the end
961 *
962 * Load user calibration values and optionally precharge the banks.
963 */
9a5a90ad 964static void rw_mgr_mem_load_user_ddr3(struct socfpga_sdrseq *seq,
285b3cb9 965 const u32 fin1, const u32 fin2,
ecd2334a 966 const int precharge)
3da42859 967{
ecd2334a
MV
968 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
969 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
970 u32 r;
971
285b3cb9 972 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
ecd2334a 973 /* set rank */
285b3cb9 974 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
ecd2334a
MV
975
976 /* precharge all banks ... */
977 if (precharge)
285b3cb9 978 writel(seq->rwcfg->precharge_all, grpaddr);
3da42859 979
ecd2334a
MV
980 /*
981 * USER Use Mirror-ed commands for odd ranks if address
982 * mirrorring is on
983 */
285b3cb9
SG
984 if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) {
985 set_jump_as_return(seq);
986 writel(seq->rwcfg->mrs2_mirr, grpaddr);
987 delay_for_n_mem_clocks(seq, 4);
988 set_jump_as_return(seq);
989 writel(seq->rwcfg->mrs3_mirr, grpaddr);
990 delay_for_n_mem_clocks(seq, 4);
991 set_jump_as_return(seq);
992 writel(seq->rwcfg->mrs1_mirr, grpaddr);
993 delay_for_n_mem_clocks(seq, 4);
994 set_jump_as_return(seq);
ecd2334a
MV
995 writel(fin1, grpaddr);
996 } else {
285b3cb9
SG
997 set_jump_as_return(seq);
998 writel(seq->rwcfg->mrs2, grpaddr);
999 delay_for_n_mem_clocks(seq, 4);
1000 set_jump_as_return(seq);
1001 writel(seq->rwcfg->mrs3, grpaddr);
1002 delay_for_n_mem_clocks(seq, 4);
1003 set_jump_as_return(seq);
1004 writel(seq->rwcfg->mrs1, grpaddr);
1005 set_jump_as_return(seq);
ecd2334a
MV
1006 writel(fin2, grpaddr);
1007 }
1008
1009 if (precharge)
1010 continue;
1011
285b3cb9
SG
1012 set_jump_as_return(seq);
1013 writel(seq->rwcfg->zqcl, grpaddr);
ecd2334a
MV
1014
1015 /* tZQinit = tDLLK = 512 ck cycles */
285b3cb9 1016 delay_for_n_mem_clocks(seq, 512);
ecd2334a
MV
1017 }
1018}
1019
9a5a90ad
MV
1020/**
1021 * rw_mgr_mem_load_user() - Load user calibration values
1022 * @fin1: Final instruction 1
1023 * @fin2: Final instruction 2
1024 * @precharge: If 1, precharge the banks at the end
1025 *
1026 * Load user calibration values and optionally precharge the banks.
1027 */
1028static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq,
1029 const u32 fin1, const u32 fin2,
1030 const int precharge)
1031{
1032 if (dram_is_ddr(2))
1033 rw_mgr_mem_load_user_ddr2(seq, precharge);
1034 else if (dram_is_ddr(3))
1035 rw_mgr_mem_load_user_ddr3(seq, fin1, fin2, precharge);
1036 else
1037 hang();
1038}
8e9d7d04
MV
1039/**
1040 * rw_mgr_mem_initialize() - Initialize RW Manager
1041 *
1042 * Initialize RW Manager.
1043 */
285b3cb9 1044static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq)
ecd2334a 1045{
3da42859
DN
1046 debug("%s:%d\n", __func__, __LINE__);
1047
1048 /* The reset / cke part of initialization is broadcasted to all ranks */
9a5a90ad
MV
1049 if (dram_is_ddr(3)) {
1050 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1051 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
1052 }
3da42859
DN
1053
1054 /*
1055 * Here's how you load register for a loop
1056 * Counters are located @ 0x800
1057 * Jump address are located @ 0xC00
1058 * For both, registers 0 to 3 are selected using bits 3 and 2, like
1059 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
1060 * I know this ain't pretty, but Avalon bus throws away the 2 least
1061 * significant bits
1062 */
1063
8e9d7d04 1064 /* Start with memory RESET activated */
3da42859
DN
1065
1066 /* tINIT = 200us */
1067
1068 /*
1069 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
1070 * If a and b are the number of iteration in 2 nested loops
1071 * it takes the following number of cycles to complete the operation:
1072 * number_of_cycles = ((2 + n) * a + 2) * b
1073 * where n is the number of instruction in the inner loop
1074 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
1075 * b = 6A
1076 */
285b3cb9
SG
1077 rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val,
1078 seq->misccfg->tinit_cntr1_val,
1079 seq->misccfg->tinit_cntr2_val,
1080 seq->rwcfg->init_reset_0_cke_0);
3da42859 1081
8e9d7d04 1082 /* Indicate that memory is stable. */
1273dd9e 1083 writel(1, &phy_mgr_cfg->reset_mem_stbl);
3da42859 1084
9a5a90ad
MV
1085 if (dram_is_ddr(2)) {
1086 writel(seq->rwcfg->nop, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1087 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3da42859 1088
9a5a90ad 1089 /* Bring up clock enable. */
3da42859 1090
9a5a90ad
MV
1091 /* tXRP < 400 ck cycles */
1092 delay_for_n_ns(seq, 400);
1093 } else if (dram_is_ddr(3)) {
1094 /*
1095 * transition the RESET to high
1096 * Wait for 500us
1097 */
1098
1099 /*
1100 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1101 * If a and b are the number of iteration in 2 nested loops
1102 * it takes the following number of cycles to complete the
1103 * operation number_of_cycles = ((2 + n) * a + 2) * b
1104 * where n is the number of instruction in the inner loop
1105 * One possible solution is
1106 * n = 2 , a = 131 , b = 256 => a = 83, b = FF
1107 */
1108 rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val,
1109 seq->misccfg->treset_cntr1_val,
1110 seq->misccfg->treset_cntr2_val,
1111 seq->rwcfg->init_reset_1_cke_0);
1112 /* Bring up clock enable. */
3da42859 1113
9a5a90ad
MV
1114 /* tXRP < 250 ck cycles */
1115 delay_for_n_mem_clocks(seq, 250);
1116 }
3da42859 1117
285b3cb9
SG
1118 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr,
1119 seq->rwcfg->mrs0_dll_reset, 0);
3da42859
DN
1120}
1121
f1f22f72
MV
1122/**
1123 * rw_mgr_mem_handoff() - Hand off the memory to user
1124 *
1125 * At the end of calibration we have to program the user settings in
1126 * and hand off the memory to the user.
3da42859 1127 */
285b3cb9 1128static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq)
3da42859 1129{
285b3cb9
SG
1130 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr,
1131 seq->rwcfg->mrs0_user, 1);
ecd2334a 1132 /*
f1f22f72
MV
1133 * Need to wait tMOD (12CK or 15ns) time before issuing other
1134 * commands, but we will have plenty of NIOS cycles before actual
1135 * handoff so its okay.
ecd2334a 1136 */
3da42859
DN
1137}
1138
8371c2ee
MV
1139/**
1140 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1141 * @group: Write Group
1142 * @use_dm: Use DM
1143 *
1144 * Issue write test command. Two variants are provided, one that just tests
1145 * a write pattern and another that tests datamask functionality.
ad64769c 1146 */
285b3cb9
SG
1147static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq,
1148 u32 group, u32 test_dm)
ad64769c 1149{
8371c2ee
MV
1150 const u32 quick_write_mode =
1151 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
285b3cb9 1152 seq->misccfg->enable_super_quick_calibration;
8371c2ee
MV
1153 u32 mcc_instruction;
1154 u32 rw_wl_nop_cycles;
ad64769c
MV
1155
1156 /*
1157 * Set counter and jump addresses for the right
1158 * number of NOP cycles.
1159 * The number of supported NOP cycles can range from -1 to infinity
1160 * Three different cases are handled:
1161 *
1162 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1163 * mechanism will be used to insert the right number of NOPs
1164 *
1165 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1166 * issuing the write command will jump straight to the
1167 * micro-instruction that turns on DQS (for DDRx), or outputs write
1168 * data (for RLD), skipping
1169 * the NOP micro-instruction all together
1170 *
1171 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1172 * turned on in the same micro-instruction that issues the write
1173 * command. Then we need
1174 * to directly jump to the micro-instruction that sends out the data
1175 *
1176 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1177 * (2 and 3). One jump-counter (0) is used to perform multiple
1178 * write-read operations.
1179 * one counter left to issue this command in "multiple-group" mode
1180 */
1181
285b3cb9 1182 rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles;
ad64769c
MV
1183
1184 if (rw_wl_nop_cycles == -1) {
1185 /*
1186 * CNTR 2 - We want to execute the special write operation that
1187 * turns on DQS right away and then skip directly to the
1188 * instruction that sends out the data. We set the counter to a
1189 * large number so that the jump is always taken.
1190 */
1191 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1192
1193 /* CNTR 3 - Not used */
1194 if (test_dm) {
285b3cb9
SG
1195 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1196 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data,
ad64769c 1197 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
285b3cb9 1198 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
ad64769c
MV
1199 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1200 } else {
285b3cb9
SG
1201 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1;
1202 writel(seq->rwcfg->lfsr_wr_rd_bank_0_data,
139823ec 1203 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
285b3cb9 1204 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
139823ec 1205 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
ad64769c
MV
1206 }
1207 } else if (rw_wl_nop_cycles == 0) {
1208 /*
1209 * CNTR 2 - We want to skip the NOP operation and go straight
1210 * to the DQS enable instruction. We set the counter to a large
1211 * number so that the jump is always taken.
1212 */
1213 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1214
1215 /* CNTR 3 - Not used */
1216 if (test_dm) {
285b3cb9
SG
1217 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1218 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
ad64769c
MV
1219 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1220 } else {
285b3cb9
SG
1221 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1222 writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs,
139823ec 1223 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
ad64769c
MV
1224 }
1225 } else {
1226 /*
1227 * CNTR 2 - In this case we want to execute the next instruction
1228 * and NOT take the jump. So we set the counter to 0. The jump
1229 * address doesn't count.
1230 */
1231 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1232 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1233
1234 /*
1235 * CNTR 3 - Set the nop counter to the number of cycles we
1236 * need to loop for, minus 1.
1237 */
1238 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1239 if (test_dm) {
285b3cb9
SG
1240 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1241 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
139823ec 1242 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
ad64769c 1243 } else {
285b3cb9
SG
1244 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1245 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
139823ec 1246 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
ad64769c
MV
1247 }
1248 }
1249
1250 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1251 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1252
1253 if (quick_write_mode)
1254 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1255 else
1256 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1257
1258 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1259
1260 /*
1261 * CNTR 1 - This is used to ensure enough time elapses
1262 * for read data to come back.
1263 */
1264 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1265
1266 if (test_dm) {
285b3cb9 1267 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait,
139823ec 1268 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
ad64769c 1269 } else {
285b3cb9 1270 writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait,
139823ec 1271 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
ad64769c
MV
1272 }
1273
8371c2ee
MV
1274 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1275 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1276 (group << 2));
ad64769c
MV
1277}
1278
4a82854b 1279/**
285b3cb9
SG
1280 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple
1281 * pass
4a82854b
MV
1282 * @rank_bgn: Rank number
1283 * @write_group: Write Group
1284 * @use_dm: Use DM
1285 * @all_correct: All bits must be correct in the mask
1286 * @bit_chk: Resulting bit mask after the test
1287 * @all_ranks: Test all ranks
1288 *
1289 * Test writes, can check for a single bit pass or multiple bit pass.
1290 */
b9452ea0 1291static int
285b3cb9
SG
1292rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq,
1293 const u32 rank_bgn, const u32 write_group,
b9452ea0
MV
1294 const u32 use_dm, const u32 all_correct,
1295 u32 *bit_chk, const u32 all_ranks)
ad64769c 1296{
b9452ea0 1297 const u32 rank_end = all_ranks ?
285b3cb9 1298 seq->rwcfg->mem_number_of_ranks :
b9452ea0 1299 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
285b3cb9
SG
1300 const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs /
1301 seq->rwcfg->mem_virtual_groups_per_write_dqs;
1302 const u32 correct_mask_vg = seq->param.write_correct_mask_vg;
b9452ea0 1303
285b3cb9 1304 u32 tmp_bit_chk, base_rw_mgr, group;
b9452ea0 1305 int vg, r;
ad64769c 1306
285b3cb9 1307 *bit_chk = seq->param.write_correct_mask;
ad64769c
MV
1308
1309 for (r = rank_bgn; r < rank_end; r++) {
b9452ea0 1310 /* Set rank */
285b3cb9 1311 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
ad64769c
MV
1312
1313 tmp_bit_chk = 0;
285b3cb9 1314 for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1;
b9452ea0
MV
1315 vg >= 0; vg--) {
1316 /* Reset the FIFOs to get pointers to known state. */
ad64769c
MV
1317 writel(0, &phy_mgr_cmd->fifo_reset);
1318
285b3cb9
SG
1319 group = write_group *
1320 seq->rwcfg->mem_virtual_groups_per_write_dqs
1321 + vg;
1322 rw_mgr_mem_calibrate_write_test_issue(seq, group,
1323 use_dm);
ad64769c 1324
b9452ea0
MV
1325 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1326 tmp_bit_chk <<= shift_ratio;
1327 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
ad64769c 1328 }
b9452ea0 1329
ad64769c
MV
1330 *bit_chk &= tmp_bit_chk;
1331 }
1332
285b3cb9 1333 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
ad64769c 1334 if (all_correct) {
ea9aa241 1335 debug_cond(DLEVEL >= 2,
b9452ea0
MV
1336 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1337 write_group, use_dm, *bit_chk,
285b3cb9
SG
1338 seq->param.write_correct_mask,
1339 *bit_chk == seq->param.write_correct_mask);
1340 return *bit_chk == seq->param.write_correct_mask;
ad64769c 1341 } else {
ea9aa241 1342 debug_cond(DLEVEL >= 2,
b9452ea0
MV
1343 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1344 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
ad64769c
MV
1345 return *bit_chk != 0x00;
1346 }
1347}
1348
d844c7d4
MV
1349/**
1350 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1351 * @rank_bgn: Rank number
1352 * @group: Read/Write Group
1353 * @all_ranks: Test all ranks
1354 *
1355 * Performs a guaranteed read on the patterns we are going to use during a
1356 * read test to ensure memory works.
3da42859 1357 */
d844c7d4 1358static int
285b3cb9
SG
1359rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq,
1360 const u32 rank_bgn, const u32 group,
d844c7d4 1361 const u32 all_ranks)
3da42859 1362{
d844c7d4
MV
1363 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1364 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1365 const u32 addr_offset =
285b3cb9
SG
1366 (group * seq->rwcfg->mem_virtual_groups_per_read_dqs)
1367 << 2;
d844c7d4 1368 const u32 rank_end = all_ranks ?
285b3cb9 1369 seq->rwcfg->mem_number_of_ranks :
d844c7d4 1370 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
285b3cb9
SG
1371 const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs /
1372 seq->rwcfg->mem_virtual_groups_per_read_dqs;
1373 const u32 correct_mask_vg = seq->param.read_correct_mask_vg;
3da42859 1374
d844c7d4
MV
1375 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1376 int vg, r;
1377 int ret = 0;
1378
285b3cb9 1379 bit_chk = seq->param.read_correct_mask;
3da42859
DN
1380
1381 for (r = rank_bgn; r < rank_end; r++) {
d844c7d4 1382 /* Set rank */
285b3cb9 1383 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
3da42859
DN
1384
1385 /* Load up a constant bursts of read commands */
1273dd9e 1386 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
285b3cb9 1387 writel(seq->rwcfg->guaranteed_read,
139823ec 1388 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859 1389
1273dd9e 1390 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
285b3cb9 1391 writel(seq->rwcfg->guaranteed_read_cont,
139823ec 1392 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859
DN
1393
1394 tmp_bit_chk = 0;
285b3cb9 1395 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
d844c7d4
MV
1396 vg >= 0; vg--) {
1397 /* Reset the FIFOs to get pointers to known state. */
1273dd9e
MV
1398 writel(0, &phy_mgr_cmd->fifo_reset);
1399 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1400 RW_MGR_RESET_READ_DATAPATH_OFFSET);
285b3cb9 1401 writel(seq->rwcfg->guaranteed_read,
d844c7d4 1402 addr + addr_offset + (vg << 2));
3da42859 1403
1273dd9e 1404 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
d844c7d4
MV
1405 tmp_bit_chk <<= shift_ratio;
1406 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
3da42859 1407 }
d844c7d4
MV
1408
1409 bit_chk &= tmp_bit_chk;
3da42859
DN
1410 }
1411
285b3cb9 1412 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
3da42859 1413
285b3cb9 1414 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
d844c7d4 1415
285b3cb9 1416 if (bit_chk != seq->param.read_correct_mask)
d844c7d4
MV
1417 ret = -EIO;
1418
ea9aa241 1419 debug_cond(DLEVEL >= 1,
d844c7d4
MV
1420 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1421 __func__, __LINE__, group, bit_chk,
285b3cb9 1422 seq->param.read_correct_mask, ret);
d844c7d4
MV
1423
1424 return ret;
3da42859
DN
1425}
1426
b6cb7f9e 1427/**
285b3cb9
SG
1428 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read
1429 * test
b6cb7f9e
MV
1430 * @rank_bgn: Rank number
1431 * @all_ranks: Test all ranks
1432 *
1433 * Load up the patterns we are going to use during a read test.
1434 */
285b3cb9
SG
1435static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq,
1436 const u32 rank_bgn,
b6cb7f9e 1437 const int all_ranks)
3da42859 1438{
b6cb7f9e 1439 const u32 rank_end = all_ranks ?
285b3cb9 1440 seq->rwcfg->mem_number_of_ranks :
b6cb7f9e
MV
1441 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1442 u32 r;
3da42859
DN
1443
1444 debug("%s:%d\n", __func__, __LINE__);
b6cb7f9e 1445
3da42859 1446 for (r = rank_bgn; r < rank_end; r++) {
3da42859 1447 /* set rank */
285b3cb9 1448 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
3da42859
DN
1449
1450 /* Load up a constant bursts */
1273dd9e 1451 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
3da42859 1452
285b3cb9 1453 writel(seq->rwcfg->guaranteed_write_wait0,
139823ec 1454 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859 1455
1273dd9e 1456 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
3da42859 1457
285b3cb9 1458 writel(seq->rwcfg->guaranteed_write_wait1,
139823ec 1459 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859 1460
1273dd9e 1461 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
3da42859 1462
285b3cb9 1463 writel(seq->rwcfg->guaranteed_write_wait2,
139823ec 1464 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
3da42859 1465
1273dd9e 1466 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
3da42859 1467
285b3cb9 1468 writel(seq->rwcfg->guaranteed_write_wait3,
139823ec 1469 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
3da42859 1470
285b3cb9
SG
1471 writel(seq->rwcfg->guaranteed_write,
1472 SDR_PHYGRP_RWMGRGRP_ADDRESS |
1473 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3da42859
DN
1474 }
1475
285b3cb9 1476 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
3da42859
DN
1477}
1478
783fcf59
MV
1479/**
1480 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1481 * @rank_bgn: Rank number
1482 * @group: Read/Write group
1483 * @num_tries: Number of retries of the test
1484 * @all_correct: All bits must be correct in the mask
1485 * @bit_chk: Resulting bit mask after the test
1486 * @all_groups: Test all R/W groups
1487 * @all_ranks: Test all ranks
1488 *
1489 * Try a read and see if it returns correct data back. Test has dummy reads
1490 * inserted into the mix used to align DQS enable. Test has more thorough
1491 * checks than the regular read test.
3da42859 1492 */
3cb8bf3f 1493static int
285b3cb9
SG
1494rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq,
1495 const u32 rank_bgn, const u32 group,
3cb8bf3f
MV
1496 const u32 num_tries, const u32 all_correct,
1497 u32 *bit_chk,
1498 const u32 all_groups, const u32 all_ranks)
3da42859 1499{
285b3cb9 1500 const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks :
3da42859 1501 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
3cb8bf3f
MV
1502 const u32 quick_read_mode =
1503 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
285b3cb9
SG
1504 seq->misccfg->enable_super_quick_calibration);
1505 u32 correct_mask_vg = seq->param.read_correct_mask_vg;
3cb8bf3f
MV
1506 u32 tmp_bit_chk;
1507 u32 base_rw_mgr;
1508 u32 addr;
3da42859 1509
3cb8bf3f 1510 int r, vg, ret;
3853d65e 1511
285b3cb9 1512 *bit_chk = seq->param.read_correct_mask;
3da42859
DN
1513
1514 for (r = rank_bgn; r < rank_end; r++) {
3da42859 1515 /* set rank */
285b3cb9 1516 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
3da42859 1517
1273dd9e 1518 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
3da42859 1519
285b3cb9 1520 writel(seq->rwcfg->read_b2b_wait1,
139823ec 1521 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859 1522
1273dd9e 1523 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
285b3cb9 1524 writel(seq->rwcfg->read_b2b_wait2,
139823ec 1525 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
3da42859 1526
3da42859 1527 if (quick_read_mode)
1273dd9e 1528 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
3da42859
DN
1529 /* need at least two (1+1) reads to capture failures */
1530 else if (all_groups)
1273dd9e 1531 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
3da42859 1532 else
1273dd9e 1533 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
3da42859 1534
285b3cb9 1535 writel(seq->rwcfg->read_b2b,
139823ec 1536 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859 1537 if (all_groups)
285b3cb9
SG
1538 writel(seq->rwcfg->mem_if_read_dqs_width *
1539 seq->rwcfg->mem_virtual_groups_per_read_dqs - 1,
1273dd9e 1540 &sdr_rw_load_mgr_regs->load_cntr3);
3da42859 1541 else
1273dd9e 1542 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
3da42859 1543
285b3cb9 1544 writel(seq->rwcfg->read_b2b,
139823ec 1545 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
3da42859
DN
1546
1547 tmp_bit_chk = 0;
285b3cb9
SG
1548 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
1549 vg >= 0; vg--) {
ba522c76 1550 /* Reset the FIFOs to get pointers to known state. */
1273dd9e
MV
1551 writel(0, &phy_mgr_cmd->fifo_reset);
1552 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1553 RW_MGR_RESET_READ_DATAPATH_OFFSET);
3da42859 1554
ba522c76
MV
1555 if (all_groups) {
1556 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1557 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1558 } else {
1559 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1560 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1561 }
c4815f76 1562
285b3cb9 1563 writel(seq->rwcfg->read_b2b, addr +
139823ec 1564 ((group *
285b3cb9 1565 seq->rwcfg->mem_virtual_groups_per_read_dqs +
139823ec 1566 vg) << 2));
3da42859 1567
1273dd9e 1568 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
285b3cb9
SG
1569 tmp_bit_chk <<=
1570 seq->rwcfg->mem_dq_per_read_dqs /
1571 seq->rwcfg->mem_virtual_groups_per_read_dqs;
ba522c76 1572 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
3da42859 1573 }
7ce23bb6 1574
3da42859
DN
1575 *bit_chk &= tmp_bit_chk;
1576 }
1577
c4815f76 1578 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
285b3cb9 1579 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
3da42859 1580
285b3cb9 1581 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
3853d65e 1582
3da42859 1583 if (all_correct) {
285b3cb9 1584 ret = (*bit_chk == seq->param.read_correct_mask);
ea9aa241 1585 debug_cond(DLEVEL >= 2,
3853d65e
MV
1586 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1587 __func__, __LINE__, group, all_groups, *bit_chk,
285b3cb9 1588 seq->param.read_correct_mask, ret);
3da42859 1589 } else {
3853d65e 1590 ret = (*bit_chk != 0x00);
ea9aa241 1591 debug_cond(DLEVEL >= 2,
3853d65e
MV
1592 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1593 __func__, __LINE__, group, all_groups, *bit_chk,
1594 0, ret);
3da42859 1595 }
3853d65e
MV
1596
1597 return ret;
3da42859
DN
1598}
1599
96df6036
MV
1600/**
1601 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1602 * @grp: Read/Write group
1603 * @num_tries: Number of retries of the test
1604 * @all_correct: All bits must be correct in the mask
1605 * @all_groups: Test all R/W groups
1606 *
1607 * Perform a READ test across all memory ranks.
1608 */
1609static int
285b3cb9
SG
1610rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq,
1611 const u32 grp, const u32 num_tries,
96df6036
MV
1612 const u32 all_correct,
1613 const u32 all_groups)
3da42859 1614{
96df6036 1615 u32 bit_chk;
285b3cb9
SG
1616 return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries,
1617 all_correct, &bit_chk, all_groups,
1618 1);
3da42859
DN
1619}
1620
60bb8a8a
MV
1621/**
1622 * rw_mgr_incr_vfifo() - Increase VFIFO value
1623 * @grp: Read/Write group
60bb8a8a
MV
1624 *
1625 * Increase VFIFO value.
1626 */
8c887b6e 1627static void rw_mgr_incr_vfifo(const u32 grp)
3da42859 1628{
1273dd9e 1629 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
3da42859
DN
1630}
1631
60bb8a8a
MV
1632/**
1633 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1634 * @grp: Read/Write group
60bb8a8a
MV
1635 *
1636 * Decrease VFIFO value.
1637 */
285b3cb9 1638static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp)
3da42859 1639{
60bb8a8a 1640 u32 i;
3da42859 1641
285b3cb9 1642 for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++)
8c887b6e 1643 rw_mgr_incr_vfifo(grp);
3da42859
DN
1644}
1645
d145ca9f
MV
1646/**
1647 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1648 * @grp: Read/Write group
1649 *
1650 * Push VFIFO until a failing read happens.
1651 */
285b3cb9
SG
1652static int find_vfifo_failing_read(struct socfpga_sdrseq *seq,
1653 const u32 grp)
3da42859 1654{
96df6036 1655 u32 v, ret, fail_cnt = 0;
3da42859 1656
285b3cb9 1657 for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) {
ea9aa241 1658 debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n",
3da42859 1659 __func__, __LINE__, v);
285b3cb9
SG
1660 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1661 PASS_ONE_BIT, 0);
d145ca9f 1662 if (!ret) {
3da42859
DN
1663 fail_cnt++;
1664
1665 if (fail_cnt == 2)
d145ca9f 1666 return v;
3da42859
DN
1667 }
1668
d145ca9f 1669 /* Fiddle with FIFO. */
8c887b6e 1670 rw_mgr_incr_vfifo(grp);
3da42859
DN
1671 }
1672
d145ca9f 1673 /* No failing read found! Something must have gone wrong. */
ea9aa241 1674 debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
d145ca9f 1675 return 0;
3da42859
DN
1676}
1677
52e8f217
MV
1678/**
1679 * sdr_find_phase_delay() - Find DQS enable phase or delay
1680 * @working: If 1, look for working phase/delay, if 0, look for non-working
1681 * @delay: If 1, look for delay, if 0, look for phase
1682 * @grp: Read/Write group
1683 * @work: Working window position
1684 * @work_inc: Working window increment
1685 * @pd: DQS Phase/Delay Iterator
1686 *
1687 * Find working or non-working DQS enable phase setting.
1688 */
285b3cb9
SG
1689static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working,
1690 int delay, const u32 grp, u32 *work,
1691 const u32 work_inc, u32 *pd)
52e8f217 1692{
285b3cb9
SG
1693 const u32 max = delay ? seq->iocfg->dqs_en_delay_max :
1694 seq->iocfg->dqs_en_phase_max;
96df6036 1695 u32 ret;
52e8f217
MV
1696
1697 for (; *pd <= max; (*pd)++) {
1698 if (delay)
285b3cb9 1699 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd);
52e8f217 1700 else
285b3cb9 1701 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd);
52e8f217 1702
285b3cb9
SG
1703 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1704 PASS_ONE_BIT, 0);
52e8f217
MV
1705 if (!working)
1706 ret = !ret;
1707
1708 if (ret)
1709 return 0;
1710
1711 if (work)
1712 *work += work_inc;
1713 }
1714
1715 return -EINVAL;
1716}
192d6f9f
MV
1717/**
1718 * sdr_find_phase() - Find DQS enable phase
1719 * @working: If 1, look for working phase, if 0, look for non-working phase
1720 * @grp: Read/Write group
192d6f9f
MV
1721 * @work: Working window position
1722 * @i: Iterator
1723 * @p: DQS Phase Iterator
192d6f9f
MV
1724 *
1725 * Find working or non-working DQS enable phase setting.
1726 */
285b3cb9
SG
1727static int sdr_find_phase(struct socfpga_sdrseq *seq, int working,
1728 const u32 grp, u32 *work, u32 *i, u32 *p)
3da42859 1729{
285b3cb9 1730 const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1);
52e8f217 1731 int ret;
3da42859 1732
192d6f9f
MV
1733 for (; *i < end; (*i)++) {
1734 if (working)
1735 *p = 0;
1736
285b3cb9
SG
1737 ret = sdr_find_phase_delay(seq, working, 0, grp, work,
1738 seq->iocfg->delay_per_opa_tap, p);
52e8f217
MV
1739 if (!ret)
1740 return 0;
192d6f9f 1741
285b3cb9 1742 if (*p > seq->iocfg->dqs_en_phase_max) {
192d6f9f 1743 /* Fiddle with FIFO. */
8c887b6e 1744 rw_mgr_incr_vfifo(grp);
192d6f9f
MV
1745 if (!working)
1746 *p = 0;
3da42859 1747 }
3da42859
DN
1748 }
1749
192d6f9f
MV
1750 return -EINVAL;
1751}
1752
4c5e584b
MV
1753/**
1754 * sdr_working_phase() - Find working DQS enable phase
1755 * @grp: Read/Write group
1756 * @work_bgn: Working window start position
4c5e584b
MV
1757 * @d: dtaps output value
1758 * @p: DQS Phase Iterator
1759 * @i: Iterator
1760 *
1761 * Find working DQS enable phase setting.
1762 */
285b3cb9
SG
1763static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp,
1764 u32 *work_bgn, u32 *d, u32 *p, u32 *i)
192d6f9f 1765{
285b3cb9
SG
1766 const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1767 seq->iocfg->delay_per_dqs_en_dchain_tap;
192d6f9f
MV
1768 int ret;
1769
1770 *work_bgn = 0;
1771
1772 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1773 *i = 0;
285b3cb9
SG
1774 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d);
1775 ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p);
192d6f9f
MV
1776 if (!ret)
1777 return 0;
285b3cb9 1778 *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap;
192d6f9f
MV
1779 }
1780
38ed6922 1781 /* Cannot find working solution */
ea9aa241 1782 debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
192d6f9f
MV
1783 __func__, __LINE__);
1784 return -EINVAL;
3da42859
DN
1785}
1786
4c5e584b
MV
1787/**
1788 * sdr_backup_phase() - Find DQS enable backup phase
1789 * @grp: Read/Write group
1790 * @work_bgn: Working window start position
4c5e584b
MV
1791 * @p: DQS Phase Iterator
1792 *
1793 * Find DQS enable backup phase setting.
1794 */
285b3cb9
SG
1795static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp,
1796 u32 *work_bgn, u32 *p)
3da42859 1797{
96df6036 1798 u32 tmp_delay, d;
4c5e584b 1799 int ret;
3da42859
DN
1800
1801 /* Special case code for backing up a phase */
1802 if (*p == 0) {
285b3cb9
SG
1803 *p = seq->iocfg->dqs_en_phase_max;
1804 rw_mgr_decr_vfifo(seq, grp);
3da42859
DN
1805 } else {
1806 (*p)--;
1807 }
285b3cb9
SG
1808 tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap;
1809 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p);
3da42859 1810
285b3cb9 1811 for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
139823ec 1812 d++) {
285b3cb9 1813 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
3da42859 1814
285b3cb9
SG
1815 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1816 PASS_ONE_BIT, 0);
4c5e584b 1817 if (ret) {
3da42859
DN
1818 *work_bgn = tmp_delay;
1819 break;
1820 }
49891df6 1821
285b3cb9 1822 tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap;
3da42859
DN
1823 }
1824
4c5e584b 1825 /* Restore VFIFO to old state before we decremented it (if needed). */
3da42859 1826 (*p)++;
285b3cb9 1827 if (*p > seq->iocfg->dqs_en_phase_max) {
3da42859 1828 *p = 0;
8c887b6e 1829 rw_mgr_incr_vfifo(grp);
3da42859
DN
1830 }
1831
285b3cb9 1832 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
3da42859
DN
1833}
1834
4c5e584b
MV
1835/**
1836 * sdr_nonworking_phase() - Find non-working DQS enable phase
1837 * @grp: Read/Write group
1838 * @work_end: Working window end position
4c5e584b
MV
1839 * @p: DQS Phase Iterator
1840 * @i: Iterator
1841 *
1842 * Find non-working DQS enable phase setting.
1843 */
285b3cb9
SG
1844static int sdr_nonworking_phase(struct socfpga_sdrseq *seq,
1845 const u32 grp, u32 *work_end, u32 *p, u32 *i)
3da42859 1846{
192d6f9f 1847 int ret;
3da42859
DN
1848
1849 (*p)++;
285b3cb9
SG
1850 *work_end += seq->iocfg->delay_per_opa_tap;
1851 if (*p > seq->iocfg->dqs_en_phase_max) {
192d6f9f 1852 /* Fiddle with FIFO. */
3da42859 1853 *p = 0;
8c887b6e 1854 rw_mgr_incr_vfifo(grp);
3da42859
DN
1855 }
1856
285b3cb9 1857 ret = sdr_find_phase(seq, 0, grp, work_end, i, p);
192d6f9f
MV
1858 if (ret) {
1859 /* Cannot see edge of failing read. */
ea9aa241 1860 debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n",
192d6f9f 1861 __func__, __LINE__);
3da42859
DN
1862 }
1863
192d6f9f 1864 return ret;
3da42859
DN
1865}
1866
0a13a0fb
MV
1867/**
1868 * sdr_find_window_center() - Find center of the working DQS window.
1869 * @grp: Read/Write group
1870 * @work_bgn: First working settings
1871 * @work_end: Last working settings
0a13a0fb
MV
1872 *
1873 * Find center of the working DQS enable window.
1874 */
285b3cb9
SG
1875static int sdr_find_window_center(struct socfpga_sdrseq *seq,
1876 const u32 grp, const u32 work_bgn,
8c887b6e 1877 const u32 work_end)
3da42859 1878{
96df6036 1879 u32 work_mid;
3da42859 1880 int tmp_delay = 0;
28fd242a 1881 int i, p, d;
3da42859 1882
28fd242a 1883 work_mid = (work_bgn + work_end) / 2;
3da42859 1884
ea9aa241 1885 debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n",
28fd242a 1886 work_bgn, work_end, work_mid);
3da42859 1887 /* Get the middle delay to be less than a VFIFO delay */
285b3cb9
SG
1888 tmp_delay = (seq->iocfg->dqs_en_phase_max + 1)
1889 * seq->iocfg->delay_per_opa_tap;
28fd242a 1890
ea9aa241 1891 debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay);
cbb0b7e0 1892 work_mid %= tmp_delay;
ea9aa241 1893 debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid);
3da42859 1894
285b3cb9
SG
1895 tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap);
1896 if (tmp_delay > seq->iocfg->dqs_en_phase_max
1897 * seq->iocfg->delay_per_opa_tap) {
1898 tmp_delay = seq->iocfg->dqs_en_phase_max
1899 * seq->iocfg->delay_per_opa_tap;
1900 }
1901 p = tmp_delay / seq->iocfg->delay_per_opa_tap;
cbb0b7e0 1902
ea9aa241 1903 debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
cbb0b7e0 1904
139823ec 1905 d = DIV_ROUND_UP(work_mid - tmp_delay,
285b3cb9
SG
1906 seq->iocfg->delay_per_dqs_en_dchain_tap);
1907 if (d > seq->iocfg->dqs_en_delay_max)
1908 d = seq->iocfg->dqs_en_delay_max;
1909 tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap;
28fd242a 1910
ea9aa241 1911 debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
3da42859 1912
285b3cb9
SG
1913 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
1914 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
3da42859
DN
1915
1916 /*
1917 * push vfifo until we can successfully calibrate. We can do this
1918 * because the largest possible margin in 1 VFIFO cycle.
1919 */
285b3cb9 1920 for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) {
ea9aa241 1921 debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n");
285b3cb9 1922 if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
3da42859 1923 PASS_ONE_BIT,
96df6036 1924 0)) {
ea9aa241 1925 debug_cond(DLEVEL >= 2,
8c887b6e
MV
1926 "%s:%d center: found: ptap=%u dtap=%u\n",
1927 __func__, __LINE__, p, d);
0a13a0fb 1928 return 0;
3da42859
DN
1929 }
1930
0a13a0fb 1931 /* Fiddle with FIFO. */
8c887b6e 1932 rw_mgr_incr_vfifo(grp);
3da42859
DN
1933 }
1934
ea9aa241 1935 debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n",
0a13a0fb
MV
1936 __func__, __LINE__);
1937 return -EINVAL;
3da42859
DN
1938}
1939
33756893 1940/**
285b3cb9
SG
1941 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to
1942 * use
33756893
MV
1943 * @grp: Read/Write Group
1944 *
1945 * Find a good DQS enable to use.
1946 */
285b3cb9
SG
1947static int
1948rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq,
1949 const u32 grp)
3da42859 1950{
5735540f
MV
1951 u32 d, p, i;
1952 u32 dtaps_per_ptap;
1953 u32 work_bgn, work_end;
35e47b71 1954 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
5735540f 1955 int ret;
3da42859
DN
1956
1957 debug("%s:%d %u\n", __func__, __LINE__, grp);
1958
1959 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1960
285b3cb9
SG
1961 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
1962 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0);
3da42859 1963
2f3589ca 1964 /* Step 0: Determine number of delay taps for each phase tap. */
285b3cb9
SG
1965 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1966 seq->iocfg->delay_per_dqs_en_dchain_tap;
3da42859 1967
2f3589ca 1968 /* Step 1: First push vfifo until we get a failing read. */
285b3cb9 1969 find_vfifo_failing_read(seq, grp);
3da42859 1970
2f3589ca 1971 /* Step 2: Find first working phase, increment in ptaps. */
3da42859 1972 work_bgn = 0;
285b3cb9 1973 ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i);
914546e7
MV
1974 if (ret)
1975 return ret;
3da42859
DN
1976
1977 work_end = work_bgn;
1978
1979 /*
2f3589ca
MV
1980 * If d is 0 then the working window covers a phase tap and we can
1981 * follow the old procedure. Otherwise, we've found the beginning
3da42859
DN
1982 * and we need to increment the dtaps until we find the end.
1983 */
1984 if (d == 0) {
2f3589ca
MV
1985 /*
1986 * Step 3a: If we have room, back off by one and
1987 * increment in dtaps.
1988 */
285b3cb9 1989 sdr_backup_phase(seq, grp, &work_bgn, &p);
3da42859 1990
2f3589ca
MV
1991 /*
1992 * Step 4a: go forward from working phase to non working
1993 * phase, increment in ptaps.
1994 */
285b3cb9 1995 ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i);
914546e7
MV
1996 if (ret)
1997 return ret;
3da42859 1998
2f3589ca 1999 /* Step 5a: Back off one from last, increment in dtaps. */
3da42859
DN
2000
2001 /* Special case code for backing up a phase */
2002 if (p == 0) {
285b3cb9
SG
2003 p = seq->iocfg->dqs_en_phase_max;
2004 rw_mgr_decr_vfifo(seq, grp);
3da42859
DN
2005 } else {
2006 p = p - 1;
2007 }
2008
285b3cb9
SG
2009 work_end -= seq->iocfg->delay_per_opa_tap;
2010 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
3da42859 2011
3da42859
DN
2012 d = 0;
2013
ea9aa241 2014 debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n",
2f3589ca 2015 __func__, __LINE__, p);
3da42859
DN
2016 }
2017
2f3589ca 2018 /* The dtap increment to find the failing edge is done here. */
285b3cb9
SG
2019 sdr_find_phase_delay(seq, 0, 1, grp, &work_end,
2020 seq->iocfg->delay_per_dqs_en_dchain_tap, &d);
3da42859
DN
2021
2022 /* Go back to working dtap */
2023 if (d != 0)
285b3cb9 2024 work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap;
3da42859 2025
ea9aa241 2026 debug_cond(DLEVEL >= 2,
2f3589ca
MV
2027 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
2028 __func__, __LINE__, p, d - 1, work_end);
3da42859
DN
2029
2030 if (work_end < work_bgn) {
2031 /* nil range */
ea9aa241 2032 debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n",
2f3589ca 2033 __func__, __LINE__);
914546e7 2034 return -EINVAL;
3da42859
DN
2035 }
2036
ea9aa241 2037 debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n",
3da42859
DN
2038 __func__, __LINE__, work_bgn, work_end);
2039
3da42859 2040 /*
2f3589ca
MV
2041 * We need to calculate the number of dtaps that equal a ptap.
2042 * To do that we'll back up a ptap and re-find the edge of the
2043 * window using dtaps
3da42859 2044 */
ea9aa241 2045 debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
2f3589ca 2046 __func__, __LINE__);
3da42859
DN
2047
2048 /* Special case code for backing up a phase */
2049 if (p == 0) {
285b3cb9
SG
2050 p = seq->iocfg->dqs_en_phase_max;
2051 rw_mgr_decr_vfifo(seq, grp);
ea9aa241 2052 debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n",
2f3589ca 2053 __func__, __LINE__, p);
3da42859
DN
2054 } else {
2055 p = p - 1;
ea9aa241 2056 debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u",
2f3589ca 2057 __func__, __LINE__, p);
3da42859
DN
2058 }
2059
285b3cb9 2060 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
3da42859
DN
2061
2062 /*
2063 * Increase dtap until we first see a passing read (in case the
2f3589ca
MV
2064 * window is smaller than a ptap), and then a failing read to
2065 * mark the edge of the window again.
3da42859
DN
2066 */
2067
2f3589ca 2068 /* Find a passing read. */
ea9aa241 2069 debug_cond(DLEVEL >= 2, "%s:%d find passing read\n",
3da42859 2070 __func__, __LINE__);
3da42859 2071
52e8f217 2072 initial_failing_dtap = d;
3da42859 2073
285b3cb9 2074 found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d);
3da42859 2075 if (found_passing_read) {
2f3589ca 2076 /* Find a failing read. */
ea9aa241 2077 debug_cond(DLEVEL >= 2, "%s:%d find failing read\n",
2f3589ca 2078 __func__, __LINE__);
52e8f217 2079 d++;
285b3cb9
SG
2080 found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL,
2081 0, &d);
3da42859 2082 } else {
ea9aa241 2083 debug_cond(DLEVEL >= 1,
2f3589ca
MV
2084 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
2085 __func__, __LINE__);
3da42859
DN
2086 }
2087
2088 /*
2089 * The dynamically calculated dtaps_per_ptap is only valid if we
2090 * found a passing/failing read. If we didn't, it means d hit the max
285b3cb9 2091 * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
3da42859
DN
2092 * statically calculated value.
2093 */
2094 if (found_passing_read && found_failing_read)
2095 dtaps_per_ptap = d - initial_failing_dtap;
2096
1273dd9e 2097 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
ea9aa241 2098 debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
2f3589ca 2099 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
3da42859 2100
2f3589ca 2101 /* Step 6: Find the centre of the window. */
285b3cb9 2102 ret = sdr_find_window_center(seq, grp, work_bgn, work_end);
3da42859 2103
914546e7 2104 return ret;
3da42859
DN
2105}
2106
901dc36e
MV
2107/**
2108 * search_stop_check() - Check if the detected edge is valid
2109 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2110 * @d: DQS delay
2111 * @rank_bgn: Rank number
2112 * @write_group: Write Group
2113 * @read_group: Read Group
2114 * @bit_chk: Resulting bit mask after the test
2115 * @sticky_bit_chk: Resulting sticky bit mask after the test
2116 * @use_read_test: Perform read test
2117 *
2118 * Test if the found edge is valid.
2119 */
285b3cb9
SG
2120static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write,
2121 const int d, const int rank_bgn,
901dc36e
MV
2122 const u32 write_group, const u32 read_group,
2123 u32 *bit_chk, u32 *sticky_bit_chk,
2124 const u32 use_read_test)
2125{
285b3cb9
SG
2126 const u32 ratio = seq->rwcfg->mem_if_read_dqs_width /
2127 seq->rwcfg->mem_if_write_dqs_width;
2128 const u32 correct_mask = write ? seq->param.write_correct_mask :
2129 seq->param.read_correct_mask;
2130 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2131 seq->rwcfg->mem_dq_per_read_dqs;
901dc36e
MV
2132 u32 ret;
2133 /*
2134 * Stop searching when the read test doesn't pass AND when
2135 * we've seen a passing read on every bit.
2136 */
2137 if (write) { /* WRITE-ONLY */
285b3cb9
SG
2138 ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn,
2139 write_group, 0,
2140 PASS_ONE_BIT, bit_chk,
2141 0);
901dc36e 2142 } else if (use_read_test) { /* READ-ONLY */
285b3cb9 2143 ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group,
901dc36e
MV
2144 NUM_READ_PB_TESTS,
2145 PASS_ONE_BIT, bit_chk,
2146 0, 0);
2147 } else { /* READ-ONLY */
285b3cb9 2148 rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0,
901dc36e
MV
2149 PASS_ONE_BIT, bit_chk, 0);
2150 *bit_chk = *bit_chk >> (per_dqs *
2151 (read_group - (write_group * ratio)));
2152 ret = (*bit_chk == 0);
2153 }
2154 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2155 ret = ret && (*sticky_bit_chk == correct_mask);
ea9aa241 2156 debug_cond(DLEVEL >= 2,
901dc36e
MV
2157 "%s:%d center(left): dtap=%u => %u == %u && %u",
2158 __func__, __LINE__, d,
2159 *sticky_bit_chk, correct_mask, ret);
2160 return ret;
2161}
2162
71120773
MV
2163/**
2164 * search_left_edge() - Find left edge of DQ/DQS working phase
2165 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2166 * @rank_bgn: Rank number
2167 * @write_group: Write Group
2168 * @read_group: Read Group
2169 * @test_bgn: Rank number to begin the test
71120773
MV
2170 * @sticky_bit_chk: Resulting sticky bit mask after the test
2171 * @left_edge: Left edge of the DQ/DQS phase
2172 * @right_edge: Right edge of the DQ/DQS phase
2173 * @use_read_test: Perform read test
2174 *
2175 * Find left edge of DQ/DQS working phase.
2176 */
285b3cb9
SG
2177static void search_left_edge(struct socfpga_sdrseq *seq, const int write,
2178 const int rank_bgn, const u32 write_group,
2179 const u32 read_group, const u32 test_bgn,
2180 u32 *sticky_bit_chk, int *left_edge,
2181 int *right_edge, const u32 use_read_test)
2182{
2183 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2184 seq->iocfg->io_in_delay_max;
2185 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2186 seq->iocfg->dqs_in_delay_max;
2187 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2188 seq->rwcfg->mem_dq_per_read_dqs;
0c4be198 2189 u32 stop, bit_chk;
71120773
MV
2190 int i, d;
2191
2192 for (d = 0; d <= dqs_max; d++) {
2193 if (write)
285b3cb9 2194 scc_mgr_apply_group_dq_out1_delay(seq, d);
71120773 2195 else
285b3cb9 2196 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d);
71120773
MV
2197
2198 writel(0, &sdr_scc_mgr->update);
2199
285b3cb9 2200 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
0c4be198 2201 read_group, &bit_chk, sticky_bit_chk,
901dc36e 2202 use_read_test);
71120773
MV
2203 if (stop == 1)
2204 break;
2205
2206 /* stop != 1 */
2207 for (i = 0; i < per_dqs; i++) {
0c4be198 2208 if (bit_chk & 1) {
71120773
MV
2209 /*
2210 * Remember a passing test as
2211 * the left_edge.
2212 */
2213 left_edge[i] = d;
2214 } else {
2215 /*
2216 * If a left edge has not been seen
2217 * yet, then a future passing test
2218 * will mark this edge as the right
2219 * edge.
2220 */
2221 if (left_edge[i] == delay_max + 1)
2222 right_edge[i] = -(d + 1);
2223 }
0c4be198 2224 bit_chk >>= 1;
71120773
MV
2225 }
2226 }
2227
2228 /* Reset DQ delay chains to 0 */
2229 if (write)
285b3cb9 2230 scc_mgr_apply_group_dq_out1_delay(seq, 0);
71120773 2231 else
285b3cb9 2232 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
71120773
MV
2233
2234 *sticky_bit_chk = 0;
2235 for (i = per_dqs - 1; i >= 0; i--) {
ea9aa241 2236 debug_cond(DLEVEL >= 2,
71120773
MV
2237 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2238 __func__, __LINE__, i, left_edge[i],
2239 i, right_edge[i]);
2240
2241 /*
2242 * Check for cases where we haven't found the left edge,
2243 * which makes our assignment of the the right edge invalid.
2244 * Reset it to the illegal value.
2245 */
2246 if ((left_edge[i] == delay_max + 1) &&
2247 (right_edge[i] != delay_max + 1)) {
2248 right_edge[i] = delay_max + 1;
ea9aa241 2249 debug_cond(DLEVEL >= 2,
71120773
MV
2250 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2251 __func__, __LINE__, i, right_edge[i]);
2252 }
2253
2254 /*
2255 * Reset sticky bit
2256 * READ: except for bits where we have seen both
2257 * the left and right edge.
2258 * WRITE: except for bits where we have seen the
2259 * left edge.
2260 */
2261 *sticky_bit_chk <<= 1;
2262 if (write) {
2263 if (left_edge[i] != delay_max + 1)
2264 *sticky_bit_chk |= 1;
2265 } else {
2266 if ((left_edge[i] != delay_max + 1) &&
2267 (right_edge[i] != delay_max + 1))
2268 *sticky_bit_chk |= 1;
2269 }
2270 }
71120773
MV
2271}
2272
c4907898
MV
2273/**
2274 * search_right_edge() - Find right edge of DQ/DQS working phase
2275 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2276 * @rank_bgn: Rank number
2277 * @write_group: Write Group
2278 * @read_group: Read Group
2279 * @start_dqs: DQS start phase
2280 * @start_dqs_en: DQS enable start phase
c4907898
MV
2281 * @sticky_bit_chk: Resulting sticky bit mask after the test
2282 * @left_edge: Left edge of the DQ/DQS phase
2283 * @right_edge: Right edge of the DQ/DQS phase
2284 * @use_read_test: Perform read test
2285 *
2286 * Find right edge of DQ/DQS working phase.
2287 */
285b3cb9
SG
2288static int search_right_edge(struct socfpga_sdrseq *seq, const int write,
2289 const int rank_bgn, const u32 write_group,
2290 const u32 read_group, const int start_dqs,
2291 const int start_dqs_en, u32 *sticky_bit_chk,
2292 int *left_edge, int *right_edge,
2293 const u32 use_read_test)
2294{
2295 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2296 seq->iocfg->io_in_delay_max;
2297 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2298 seq->iocfg->dqs_in_delay_max;
2299 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2300 seq->rwcfg->mem_dq_per_read_dqs;
0c4be198 2301 u32 stop, bit_chk;
c4907898
MV
2302 int i, d;
2303
2304 for (d = 0; d <= dqs_max - start_dqs; d++) {
2305 if (write) { /* WRITE-ONLY */
285b3cb9
SG
2306 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
2307 write_group,
c4907898
MV
2308 d + start_dqs);
2309 } else { /* READ-ONLY */
2310 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
285b3cb9 2311 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
5ded7320 2312 u32 delay = d + start_dqs_en;
285b3cb9
SG
2313 if (delay > seq->iocfg->dqs_en_delay_max)
2314 delay = seq->iocfg->dqs_en_delay_max;
c4907898
MV
2315 scc_mgr_set_dqs_en_delay(read_group, delay);
2316 }
2317 scc_mgr_load_dqs(read_group);
2318 }
2319
2320 writel(0, &sdr_scc_mgr->update);
2321
285b3cb9 2322 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
0c4be198 2323 read_group, &bit_chk, sticky_bit_chk,
901dc36e 2324 use_read_test);
c4907898
MV
2325 if (stop == 1) {
2326 if (write && (d == 0)) { /* WRITE-ONLY */
285b3cb9
SG
2327 for (i = 0;
2328 i < seq->rwcfg->mem_dq_per_write_dqs;
139823ec 2329 i++) {
c4907898
MV
2330 /*
2331 * d = 0 failed, but it passed when
2332 * testing the left edge, so it must be
2333 * marginal, set it to -1
2334 */
2335 if (right_edge[i] == delay_max + 1 &&
2336 left_edge[i] != delay_max + 1)
2337 right_edge[i] = -1;
2338 }
2339 }
2340 break;
2341 }
2342
2343 /* stop != 1 */
2344 for (i = 0; i < per_dqs; i++) {
0c4be198 2345 if (bit_chk & 1) {
c4907898
MV
2346 /*
2347 * Remember a passing test as
2348 * the right_edge.
2349 */
2350 right_edge[i] = d;
2351 } else {
2352 if (d != 0) {
2353 /*
2354 * If a right edge has not
2355 * been seen yet, then a future
2356 * passing test will mark this
2357 * edge as the left edge.
2358 */
2359 if (right_edge[i] == delay_max + 1)
2360 left_edge[i] = -(d + 1);
2361 } else {
2362 /*
2363 * d = 0 failed, but it passed
2364 * when testing the left edge,
2365 * so it must be marginal, set
2366 * it to -1
2367 */
2368 if (right_edge[i] == delay_max + 1 &&
2369 left_edge[i] != delay_max + 1)
2370 right_edge[i] = -1;
2371 /*
2372 * If a right edge has not been
2373 * seen yet, then a future
2374 * passing test will mark this
2375 * edge as the left edge.
2376 */
2377 else if (right_edge[i] == delay_max + 1)
2378 left_edge[i] = -(d + 1);
2379 }
2380 }
2381
ea9aa241 2382 debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ",
c4907898 2383 __func__, __LINE__, d);
ea9aa241 2384 debug_cond(DLEVEL >= 2,
c4907898 2385 "bit_chk_test=%i left_edge[%u]: %d ",
0c4be198 2386 bit_chk & 1, i, left_edge[i]);
ea9aa241 2387 debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i,
c4907898 2388 right_edge[i]);
0c4be198 2389 bit_chk >>= 1;
c4907898
MV
2390 }
2391 }
2392
2393 /* Check that all bits have a window */
2394 for (i = 0; i < per_dqs; i++) {
ea9aa241 2395 debug_cond(DLEVEL >= 2,
c4907898
MV
2396 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2397 __func__, __LINE__, i, left_edge[i],
2398 i, right_edge[i]);
2399 if ((left_edge[i] == dqs_max + 1) ||
2400 (right_edge[i] == dqs_max + 1))
2401 return i + 1; /* FIXME: If we fail, retval > 0 */
2402 }
2403
2404 return 0;
2405}
2406
afb3eb84
MV
2407/**
2408 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2409 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2410 * @left_edge: Left edge of the DQ/DQS phase
2411 * @right_edge: Right edge of the DQ/DQS phase
2412 * @mid_min: Best DQ/DQS phase middle setting
2413 *
2414 * Find index and value of the middle of the DQ/DQS working phase.
2415 */
285b3cb9
SG
2416static int get_window_mid_index(struct socfpga_sdrseq *seq,
2417 const int write, int *left_edge,
afb3eb84
MV
2418 int *right_edge, int *mid_min)
2419{
285b3cb9
SG
2420 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2421 seq->rwcfg->mem_dq_per_read_dqs;
afb3eb84
MV
2422 int i, mid, min_index;
2423
2424 /* Find middle of window for each DQ bit */
2425 *mid_min = left_edge[0] - right_edge[0];
2426 min_index = 0;
2427 for (i = 1; i < per_dqs; i++) {
2428 mid = left_edge[i] - right_edge[i];
2429 if (mid < *mid_min) {
2430 *mid_min = mid;
2431 min_index = i;
2432 }
2433 }
2434
2435 /*
2436 * -mid_min/2 represents the amount that we need to move DQS.
2437 * If mid_min is odd and positive we'll need to add one to make
2438 * sure the rounding in further calculations is correct (always
2439 * bias to the right), so just add 1 for all positive values.
2440 */
2441 if (*mid_min > 0)
2442 (*mid_min)++;
2443 *mid_min = *mid_min / 2;
2444
ea9aa241 2445 debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
afb3eb84
MV
2446 __func__, __LINE__, *mid_min, min_index);
2447 return min_index;
2448}
2449
ffb8b66e
MV
2450/**
2451 * center_dq_windows() - Center the DQ/DQS windows
2452 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2453 * @left_edge: Left edge of the DQ/DQS phase
2454 * @right_edge: Right edge of the DQ/DQS phase
2455 * @mid_min: Adjusted DQ/DQS phase middle setting
2456 * @orig_mid_min: Original DQ/DQS phase middle setting
2457 * @min_index: DQ/DQS phase middle setting index
2458 * @test_bgn: Rank number to begin the test
2459 * @dq_margin: Amount of shift for the DQ
2460 * @dqs_margin: Amount of shift for the DQS
2461 *
2462 * Align the DQ/DQS windows in each group.
2463 */
285b3cb9
SG
2464static void center_dq_windows(struct socfpga_sdrseq *seq,
2465 const int write, int *left_edge, int *right_edge,
ffb8b66e
MV
2466 const int mid_min, const int orig_mid_min,
2467 const int min_index, const int test_bgn,
2468 int *dq_margin, int *dqs_margin)
2469{
285b3cb9
SG
2470 const s32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2471 seq->iocfg->io_in_delay_max;
2472 const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2473 seq->rwcfg->mem_dq_per_read_dqs;
e026b984 2474 const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
ffb8b66e 2475 SCC_MGR_IO_IN_DELAY_OFFSET;
e026b984 2476 const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
ffb8b66e 2477
e026b984 2478 s32 temp_dq_io_delay1;
ffb8b66e
MV
2479 int shift_dq, i, p;
2480
2481 /* Initialize data for export structures */
2482 *dqs_margin = delay_max + 1;
2483 *dq_margin = delay_max + 1;
2484
2485 /* add delay to bring centre of all DQ windows to the same "level" */
2486 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2487 /* Use values before divide by 2 to reduce round off error */
2488 shift_dq = (left_edge[i] - right_edge[i] -
2489 (left_edge[min_index] - right_edge[min_index]))/2 +
2490 (orig_mid_min - mid_min);
2491
ea9aa241 2492 debug_cond(DLEVEL >= 2,
ffb8b66e
MV
2493 "vfifo_center: before: shift_dq[%u]=%d\n",
2494 i, shift_dq);
2495
e026b984 2496 temp_dq_io_delay1 = readl(addr + (i << 2));
ffb8b66e
MV
2497
2498 if (shift_dq + temp_dq_io_delay1 > delay_max)
e026b984 2499 shift_dq = delay_max - temp_dq_io_delay1;
ffb8b66e
MV
2500 else if (shift_dq + temp_dq_io_delay1 < 0)
2501 shift_dq = -temp_dq_io_delay1;
2502
ea9aa241 2503 debug_cond(DLEVEL >= 2,
ffb8b66e
MV
2504 "vfifo_center: after: shift_dq[%u]=%d\n",
2505 i, shift_dq);
2506
2507 if (write)
139823ec
MV
2508 scc_mgr_set_dq_out1_delay(i,
2509 temp_dq_io_delay1 + shift_dq);
ffb8b66e 2510 else
139823ec
MV
2511 scc_mgr_set_dq_in_delay(p,
2512 temp_dq_io_delay1 + shift_dq);
ffb8b66e
MV
2513
2514 scc_mgr_load_dq(p);
2515
ea9aa241 2516 debug_cond(DLEVEL >= 2,
ffb8b66e
MV
2517 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2518 left_edge[i] - shift_dq + (-mid_min),
2519 right_edge[i] + shift_dq - (-mid_min));
2520
2521 /* To determine values for export structures */
2522 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2523 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2524
2525 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2526 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2527 }
ffb8b66e
MV
2528}
2529
ac63b9ad
MV
2530/**
2531 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2532 * @rank_bgn: Rank number
2533 * @rw_group: Read/Write Group
2534 * @test_bgn: Rank at which the test begins
2535 * @use_read_test: Perform a read test
2536 * @update_fom: Update FOM
2537 *
2538 * Per-bit deskew DQ and centering.
2539 */
285b3cb9
SG
2540static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq,
2541 const u32 rank_bgn,
2542 const u32 rw_group,
2543 const u32 test_bgn,
2544 const int use_read_test,
2545 const int update_fom)
3da42859 2546{
5d6db444
MV
2547 const u32 addr =
2548 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
0113c3e1 2549 (rw_group << 2);
3da42859
DN
2550 /*
2551 * Store these as signed since there are comparisons with
2552 * signed numbers.
2553 */
5ded7320 2554 u32 sticky_bit_chk;
285b3cb9
SG
2555 s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs];
2556 s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs];
2557 s32 orig_mid_min, mid_min;
2558 s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
2559 s32 dq_margin, dqs_margin;
5d6db444 2560 int i, min_index;
c4907898 2561 int ret;
3da42859 2562
0113c3e1 2563 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
3da42859 2564
5d6db444 2565 start_dqs = readl(addr);
285b3cb9
SG
2566 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
2567 start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset);
3da42859
DN
2568
2569 /* set the left and right edge of each bit to an illegal value */
285b3cb9 2570 /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */
3da42859 2571 sticky_bit_chk = 0;
285b3cb9
SG
2572 for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) {
2573 left_edge[i] = seq->iocfg->io_in_delay_max + 1;
2574 right_edge[i] = seq->iocfg->io_in_delay_max + 1;
3da42859
DN
2575 }
2576
3da42859 2577 /* Search for the left edge of the window for each bit */
285b3cb9 2578 search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn,
0c4be198 2579 &sticky_bit_chk,
71120773 2580 left_edge, right_edge, use_read_test);
3da42859 2581
3da42859 2582 /* Search for the right edge of the window for each bit */
285b3cb9 2583 ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group,
c4907898 2584 start_dqs, start_dqs_en,
0c4be198 2585 &sticky_bit_chk,
c4907898
MV
2586 left_edge, right_edge, use_read_test);
2587 if (ret) {
3da42859 2588 /*
c4907898
MV
2589 * Restore delay chain settings before letting the loop
2590 * in rw_mgr_mem_calibrate_vfifo to retry different
2591 * dqs/ck relationships.
3da42859 2592 */
0113c3e1 2593 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
285b3cb9 2594 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
0113c3e1 2595 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
3da42859 2596
0113c3e1 2597 scc_mgr_load_dqs(rw_group);
c4907898 2598 writel(0, &sdr_scc_mgr->update);
3da42859 2599
ea9aa241 2600 debug_cond(DLEVEL >= 1,
c4907898
MV
2601 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2602 __func__, __LINE__, i, left_edge[i], right_edge[i]);
2603 if (use_read_test) {
285b3cb9
SG
2604 set_failing_group_stage(seq, rw_group *
2605 seq->rwcfg->mem_dq_per_read_dqs + i,
c4907898
MV
2606 CAL_STAGE_VFIFO,
2607 CAL_SUBSTAGE_VFIFO_CENTER);
3da42859 2608 } else {
285b3cb9
SG
2609 set_failing_group_stage(seq, rw_group *
2610 seq->rwcfg->mem_dq_per_read_dqs + i,
c4907898
MV
2611 CAL_STAGE_VFIFO_AFTER_WRITES,
2612 CAL_SUBSTAGE_VFIFO_CENTER);
3da42859 2613 }
98668247 2614 return -EIO;
3da42859
DN
2615 }
2616
285b3cb9
SG
2617 min_index = get_window_mid_index(seq, 0, left_edge, right_edge,
2618 &mid_min);
3da42859
DN
2619
2620 /* Determine the amount we can change DQS (which is -mid_min) */
2621 orig_mid_min = mid_min;
2622 new_dqs = start_dqs - mid_min;
285b3cb9
SG
2623 if (new_dqs > seq->iocfg->dqs_in_delay_max)
2624 new_dqs = seq->iocfg->dqs_in_delay_max;
3da42859
DN
2625 else if (new_dqs < 0)
2626 new_dqs = 0;
2627
2628 mid_min = start_dqs - new_dqs;
ea9aa241 2629 debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
3da42859
DN
2630 mid_min, new_dqs);
2631
285b3cb9
SG
2632 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
2633 if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max)
139823ec 2634 mid_min += start_dqs_en - mid_min -
285b3cb9 2635 seq->iocfg->dqs_en_delay_max;
3da42859
DN
2636 else if (start_dqs_en - mid_min < 0)
2637 mid_min += start_dqs_en - mid_min;
2638 }
2639 new_dqs = start_dqs - mid_min;
2640
ea9aa241 2641 debug_cond(DLEVEL >= 1,
f0712c35
MV
2642 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2643 start_dqs,
285b3cb9 2644 seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
3da42859
DN
2645 new_dqs, mid_min);
2646
ffb8b66e 2647 /* Add delay to bring centre of all DQ windows to the same "level". */
285b3cb9 2648 center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min,
ffb8b66e 2649 min_index, test_bgn, &dq_margin, &dqs_margin);
3da42859 2650
3da42859 2651 /* Move DQS-en */
285b3cb9 2652 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
5d6db444 2653 final_dqs_en = start_dqs_en - mid_min;
0113c3e1
MV
2654 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2655 scc_mgr_load_dqs(rw_group);
3da42859
DN
2656 }
2657
2658 /* Move DQS */
0113c3e1
MV
2659 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2660 scc_mgr_load_dqs(rw_group);
ea9aa241 2661 debug_cond(DLEVEL >= 2,
f0712c35
MV
2662 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2663 __func__, __LINE__, dq_margin, dqs_margin);
3da42859
DN
2664
2665 /*
2666 * Do not remove this line as it makes sure all of our decisions
2667 * have been applied. Apply the update bit.
2668 */
1273dd9e 2669 writel(0, &sdr_scc_mgr->update);
3da42859 2670
98668247
MV
2671 if ((dq_margin < 0) || (dqs_margin < 0))
2672 return -EINVAL;
2673
2674 return 0;
3da42859
DN
2675}
2676
04372fb8 2677/**
285b3cb9
SG
2678 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the
2679 * device
04372fb8
MV
2680 * @rw_group: Read/Write Group
2681 * @phase: DQ/DQS phase
2682 *
2683 * Because initially no communication ca be reliably performed with the memory
2684 * device, the sequencer uses a guaranteed write mechanism to write data into
2685 * the memory device.
2686 */
285b3cb9
SG
2687static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq,
2688 const u32 rw_group,
04372fb8
MV
2689 const u32 phase)
2690{
04372fb8
MV
2691 int ret;
2692
2693 /* Set a particular DQ/DQS phase. */
285b3cb9 2694 scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase);
04372fb8 2695
ea9aa241 2696 debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n",
04372fb8
MV
2697 __func__, __LINE__, rw_group, phase);
2698
2699 /*
2700 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2701 * Load up the patterns used by read calibration using the
2702 * current DQDQS phase.
2703 */
285b3cb9 2704 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
04372fb8 2705
285b3cb9 2706 if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
04372fb8
MV
2707 return 0;
2708
2709 /*
2710 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2711 * Back-to-Back reads of the patterns used for calibration.
2712 */
285b3cb9 2713 ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1);
d844c7d4 2714 if (ret)
ea9aa241 2715 debug_cond(DLEVEL >= 1,
04372fb8
MV
2716 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2717 __func__, __LINE__, rw_group, phase);
d844c7d4 2718 return ret;
04372fb8
MV
2719}
2720
f09da11e
MV
2721/**
2722 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2723 * @rw_group: Read/Write Group
2724 * @test_bgn: Rank at which the test begins
2725 *
2726 * DQS enable calibration ensures reliable capture of the DQ signal without
2727 * glitches on the DQS line.
2728 */
285b3cb9
SG
2729static int
2730rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq,
2731 const u32 rw_group,
2732 const u32 test_bgn)
f09da11e 2733{
f09da11e
MV
2734 /*
2735 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2736 * DQS and DQS Eanble Signal Relationships.
2737 */
28ea827d
MV
2738
2739 /* We start at zero, so have one less dq to devide among */
285b3cb9
SG
2740 const u32 delay_step = seq->iocfg->io_in_delay_max /
2741 (seq->rwcfg->mem_dq_per_read_dqs - 1);
914546e7 2742 int ret;
28ea827d
MV
2743 u32 i, p, d, r;
2744
2745 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2746
2747 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
285b3cb9 2748 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
28ea827d
MV
2749 r += NUM_RANKS_PER_SHADOW_REG) {
2750 for (i = 0, p = test_bgn, d = 0;
285b3cb9 2751 i < seq->rwcfg->mem_dq_per_read_dqs;
28ea827d 2752 i++, p++, d += delay_step) {
ea9aa241 2753 debug_cond(DLEVEL >= 1,
28ea827d
MV
2754 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2755 __func__, __LINE__, rw_group, r, i, p, d);
2756
2757 scc_mgr_set_dq_in_delay(p, d);
2758 scc_mgr_load_dq(p);
2759 }
2760
2761 writel(0, &sdr_scc_mgr->update);
2762 }
2763
2764 /*
2765 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2766 * dq_in_delay values
2767 */
285b3cb9 2768 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group);
28ea827d 2769
ea9aa241 2770 debug_cond(DLEVEL >= 1,
0776c5fb 2771 "%s:%d: g=%u found=%u; Resetting delay chain to zero\n",
914546e7 2772 __func__, __LINE__, rw_group, !ret);
28ea827d 2773
285b3cb9 2774 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
28ea827d 2775 r += NUM_RANKS_PER_SHADOW_REG) {
285b3cb9 2776 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
28ea827d
MV
2777 writel(0, &sdr_scc_mgr->update);
2778 }
2779
914546e7 2780 return ret;
f09da11e
MV
2781}
2782
16cfc4b9
MV
2783/**
2784 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2785 * @rw_group: Read/Write Group
2786 * @test_bgn: Rank at which the test begins
2787 * @use_read_test: Perform a read test
2788 * @update_fom: Update FOM
2789 *
2790 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2791 * within a group.
2792 */
2793static int
285b3cb9
SG
2794rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq,
2795 const u32 rw_group, const u32 test_bgn,
16cfc4b9
MV
2796 const int use_read_test,
2797 const int update_fom)
2798
2799{
2800 int ret, grp_calibrated;
2801 u32 rank_bgn, sr;
2802
2803 /*
2804 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2805 * Read per-bit deskew can be done on a per shadow register basis.
2806 */
2807 grp_calibrated = 1;
2808 for (rank_bgn = 0, sr = 0;
285b3cb9 2809 rank_bgn < seq->rwcfg->mem_number_of_ranks;
16cfc4b9 2810 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
285b3cb9 2811 ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group,
0113c3e1 2812 test_bgn,
16cfc4b9
MV
2813 use_read_test,
2814 update_fom);
98668247 2815 if (!ret)
16cfc4b9
MV
2816 continue;
2817
2818 grp_calibrated = 0;
2819 }
2820
2821 if (!grp_calibrated)
2822 return -EIO;
2823
2824 return 0;
2825}
2826
bce24efa
MV
2827/**
2828 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2829 * @rw_group: Read/Write Group
2830 * @test_bgn: Rank at which the test begins
2831 *
2832 * Stage 1: Calibrate the read valid prediction FIFO.
2833 *
2834 * This function implements UniPHY calibration Stage 1, as explained in
2835 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3da42859 2836 *
bce24efa
MV
2837 * - read valid prediction will consist of finding:
2838 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2839 * - DQS input phase and DQS input delay (DQ/DQS Centering)
3da42859
DN
2840 * - we also do a per-bit deskew on the DQ lines.
2841 */
285b3cb9
SG
2842static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq,
2843 const u32 rw_group, const u32 test_bgn)
3da42859 2844{
5ded7320
MV
2845 u32 p, d;
2846 u32 dtaps_per_ptap;
2847 u32 failed_substage;
3da42859 2848
04372fb8
MV
2849 int ret;
2850
c336ca3e 2851 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
3da42859 2852
7c0a9df3
MV
2853 /* Update info for sims */
2854 reg_file_set_group(rw_group);
3da42859 2855 reg_file_set_stage(CAL_STAGE_VFIFO);
7c0a9df3 2856 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
3da42859 2857
7c0a9df3
MV
2858 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2859
2860 /* USER Determine number of delay taps for each phase tap. */
285b3cb9
SG
2861 dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
2862 seq->iocfg->delay_per_dqs_en_dchain_tap)
2863 - 1;
3da42859 2864
fe2d0a2d 2865 for (d = 0; d <= dtaps_per_ptap; d += 2) {
3da42859
DN
2866 /*
2867 * In RLDRAMX we may be messing the delay of pins in
c336ca3e
MV
2868 * the same write rw_group but outside of the current read
2869 * the rw_group, but that's ok because we haven't calibrated
ac70d2f3 2870 * output side yet.
3da42859
DN
2871 */
2872 if (d > 0) {
285b3cb9
SG
2873 scc_mgr_apply_group_all_out_delay_add_all_ranks(seq,
2874 rw_group,
2875 d);
3da42859
DN
2876 }
2877
285b3cb9 2878 for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) {
04372fb8 2879 /* 1) Guaranteed Write */
285b3cb9
SG
2880 ret = rw_mgr_mem_calibrate_guaranteed_write(seq,
2881 rw_group,
2882 p);
04372fb8
MV
2883 if (ret)
2884 break;
3da42859 2885
f09da11e 2886 /* 2) DQS Enable Calibration */
285b3cb9
SG
2887 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq,
2888 rw_group,
f09da11e
MV
2889 test_bgn);
2890 if (ret) {
3da42859 2891 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
fe2d0a2d
MV
2892 continue;
2893 }
2894
16cfc4b9 2895 /* 3) Centering DQ/DQS */
fe2d0a2d 2896 /*
16cfc4b9
MV
2897 * If doing read after write calibration, do not update
2898 * FOM now. Do it then.
fe2d0a2d 2899 */
285b3cb9
SG
2900 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq,
2901 rw_group,
2902 test_bgn,
2903 1, 0);
16cfc4b9 2904 if (ret) {
fe2d0a2d 2905 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
16cfc4b9 2906 continue;
3da42859 2907 }
fe2d0a2d 2908
16cfc4b9
MV
2909 /* All done. */
2910 goto cal_done_ok;
3da42859
DN
2911 }
2912 }
2913
fe2d0a2d 2914 /* Calibration Stage 1 failed. */
285b3cb9
SG
2915 set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO,
2916 failed_substage);
fe2d0a2d 2917 return 0;
3da42859 2918
fe2d0a2d
MV
2919 /* Calibration Stage 1 completed OK. */
2920cal_done_ok:
3da42859
DN
2921 /*
2922 * Reset the delay chains back to zero if they have moved > 1
2923 * (check for > 1 because loop will increase d even when pass in
2924 * first case).
2925 */
2926 if (d > 2)
285b3cb9 2927 scc_mgr_zero_group(seq, rw_group, 1);
3da42859
DN
2928
2929 return 1;
2930}
2931
78cdd7d0
MV
2932/**
2933 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2934 * @rw_group: Read/Write Group
2935 * @test_bgn: Rank at which the test begins
2936 *
2937 * Stage 3: DQ/DQS Centering.
2938 *
2939 * This function implements UniPHY calibration Stage 3, as explained in
2940 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2941 */
285b3cb9
SG
2942static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq,
2943 const u32 rw_group,
78cdd7d0 2944 const u32 test_bgn)
3da42859 2945{
78cdd7d0 2946 int ret;
3da42859 2947
78cdd7d0 2948 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
3da42859 2949
78cdd7d0
MV
2950 /* Update info for sims. */
2951 reg_file_set_group(rw_group);
3da42859
DN
2952 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2953 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2954
285b3cb9
SG
2955 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0,
2956 1);
78cdd7d0 2957 if (ret)
285b3cb9 2958 set_failing_group_stage(seq, rw_group,
3da42859
DN
2959 CAL_STAGE_VFIFO_AFTER_WRITES,
2960 CAL_SUBSTAGE_VFIFO_CENTER);
78cdd7d0 2961 return ret;
3da42859
DN
2962}
2963
c984278a
MV
2964/**
2965 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2966 *
2967 * Stage 4: Minimize latency.
2968 *
2969 * This function implements UniPHY calibration Stage 4, as explained in
2970 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2971 * Calibrate LFIFO to find smallest read latency.
2972 */
285b3cb9 2973static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq)
3da42859 2974{
c984278a 2975 int found_one = 0;
3da42859
DN
2976
2977 debug("%s:%d\n", __func__, __LINE__);
2978
c984278a 2979 /* Update info for sims. */
3da42859
DN
2980 reg_file_set_stage(CAL_STAGE_LFIFO);
2981 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2982
2983 /* Load up the patterns used by read calibration for all ranks */
285b3cb9 2984 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
3da42859 2985
3da42859 2986 do {
285b3cb9 2987 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
ea9aa241 2988 debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u",
285b3cb9 2989 __func__, __LINE__, seq->gbl.curr_read_lat);
3da42859 2990
285b3cb9
SG
2991 if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0,
2992 NUM_READ_TESTS,
c984278a 2993 PASS_ALL_BITS, 1))
3da42859 2994 break;
3da42859
DN
2995
2996 found_one = 1;
c984278a
MV
2997 /*
2998 * Reduce read latency and see if things are
2999 * working correctly.
3000 */
285b3cb9
SG
3001 seq->gbl.curr_read_lat--;
3002 } while (seq->gbl.curr_read_lat > 0);
3da42859 3003
c984278a 3004 /* Reset the fifos to get pointers to known state. */
1273dd9e 3005 writel(0, &phy_mgr_cmd->fifo_reset);
3da42859
DN
3006
3007 if (found_one) {
c984278a 3008 /* Add a fudge factor to the read latency that was determined */
285b3cb9
SG
3009 seq->gbl.curr_read_lat += 2;
3010 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
ea9aa241 3011 debug_cond(DLEVEL >= 2,
c984278a 3012 "%s:%d lfifo: success: using read_lat=%u\n",
285b3cb9 3013 __func__, __LINE__, seq->gbl.curr_read_lat);
3da42859 3014 } else {
285b3cb9 3015 set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO,
3da42859
DN
3016 CAL_SUBSTAGE_READ_LATENCY);
3017
ea9aa241 3018 debug_cond(DLEVEL >= 2,
c984278a 3019 "%s:%d lfifo: failed at initial read_lat=%u\n",
285b3cb9 3020 __func__, __LINE__, seq->gbl.curr_read_lat);
3da42859 3021 }
c984278a
MV
3022
3023 return found_one;
3da42859
DN
3024}
3025
c8570afa
MV
3026/**
3027 * search_window() - Search for the/part of the window with DM/DQS shift
285b3cb9
SG
3028 * @search_dm: If 1, search for the DM shift, if 0, search for DQS
3029 * shift
c8570afa
MV
3030 * @rank_bgn: Rank number
3031 * @write_group: Write Group
3032 * @bgn_curr: Current window begin
3033 * @end_curr: Current window end
3034 * @bgn_best: Current best window begin
3035 * @end_best: Current best window end
3036 * @win_best: Size of the best window
3037 * @new_dqs: New DQS value (only applicable if search_dm = 0).
3038 *
3039 * Search for the/part of the window with DM/DQS shift.
3040 */
285b3cb9
SG
3041static void search_window(struct socfpga_sdrseq *seq,
3042 const int search_dm, const u32 rank_bgn,
3043 const u32 write_group, int *bgn_curr, int *end_curr,
3044 int *bgn_best, int *end_best, int *win_best,
3045 int new_dqs)
c8570afa
MV
3046{
3047 u32 bit_chk;
285b3cb9 3048 const int max = seq->iocfg->io_out1_delay_max - new_dqs;
c8570afa
MV
3049 int d, di;
3050
3051 /* Search for the/part of the window with DM/DQS shift. */
3052 for (di = max; di >= 0; di -= DELTA_D) {
3053 if (search_dm) {
3054 d = di;
285b3cb9 3055 scc_mgr_apply_group_dm_out1_delay(seq, d);
c8570afa
MV
3056 } else {
3057 /* For DQS, we go from 0...max */
3058 d = max - di;
3059 /*
139823ec
MV
3060 * Note: This only shifts DQS, so are we limiting
3061 * ourselves to width of DQ unnecessarily.
c8570afa 3062 */
285b3cb9
SG
3063 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
3064 write_group,
c8570afa
MV
3065 d + new_dqs);
3066 }
3067
3068 writel(0, &sdr_scc_mgr->update);
3069
285b3cb9
SG
3070 if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group,
3071 1, PASS_ALL_BITS, &bit_chk,
c8570afa
MV
3072 0)) {
3073 /* Set current end of the window. */
3074 *end_curr = search_dm ? -d : d;
3075
3076 /*
3077 * If a starting edge of our window has not been seen
3078 * this is our current start of the DM window.
3079 */
285b3cb9 3080 if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1)
c8570afa
MV
3081 *bgn_curr = search_dm ? -d : d;
3082
3083 /*
3084 * If current window is bigger than best seen.
3085 * Set best seen to be current window.
3086 */
3087 if ((*end_curr - *bgn_curr + 1) > *win_best) {
3088 *win_best = *end_curr - *bgn_curr + 1;
3089 *bgn_best = *bgn_curr;
3090 *end_best = *end_curr;
3091 }
3092 } else {
3093 /* We just saw a failing test. Reset temp edge. */
285b3cb9
SG
3094 *bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3095 *end_curr = seq->iocfg->io_out1_delay_max + 1;
c8570afa
MV
3096
3097 /* Early exit is only applicable to DQS. */
3098 if (search_dm)
3099 continue;
3100
3101 /*
3102 * Early exit optimization: if the remaining delay
3103 * chain space is less than already seen largest
3104 * window we can exit.
3105 */
285b3cb9
SG
3106 if (*win_best - 1 > seq->iocfg->io_out1_delay_max
3107 - new_dqs - d)
c8570afa
MV
3108 break;
3109 }
3110 }
3111}
3112
3da42859 3113/*
a386a50e
MV
3114 * rw_mgr_mem_calibrate_writes_center() - Center all windows
3115 * @rank_bgn: Rank number
3116 * @write_group: Write group
3117 * @test_bgn: Rank at which the test begins
3118 *
3119 * Center all windows. Do per-bit-deskew to possibly increase size of
3da42859
DN
3120 * certain windows.
3121 */
3b44f55c 3122static int
285b3cb9
SG
3123rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq,
3124 const u32 rank_bgn, const u32 write_group,
3b44f55c 3125 const u32 test_bgn)
3da42859 3126{
c8570afa 3127 int i;
3b44f55c
MV
3128 u32 sticky_bit_chk;
3129 u32 min_index;
285b3cb9
SG
3130 int left_edge[seq->rwcfg->mem_dq_per_write_dqs];
3131 int right_edge[seq->rwcfg->mem_dq_per_write_dqs];
3b44f55c
MV
3132 int mid;
3133 int mid_min, orig_mid_min;
3134 int new_dqs, start_dqs;
3135 int dq_margin, dqs_margin, dm_margin;
285b3cb9
SG
3136 int bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3137 int end_curr = seq->iocfg->io_out1_delay_max + 1;
3138 int bgn_best = seq->iocfg->io_out1_delay_max + 1;
3139 int end_best = seq->iocfg->io_out1_delay_max + 1;
3b44f55c 3140 int win_best = 0;
3da42859 3141
c4907898
MV
3142 int ret;
3143
3da42859
DN
3144 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
3145
3146 dm_margin = 0;
3147
c6540872
MV
3148 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
3149 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
285b3cb9 3150 (seq->rwcfg->mem_dq_per_write_dqs << 2));
3da42859 3151
3b44f55c 3152 /* Per-bit deskew. */
3da42859
DN
3153
3154 /*
3b44f55c 3155 * Set the left and right edge of each bit to an illegal value.
285b3cb9 3156 * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value.
3da42859
DN
3157 */
3158 sticky_bit_chk = 0;
285b3cb9
SG
3159 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
3160 left_edge[i] = seq->iocfg->io_out1_delay_max + 1;
3161 right_edge[i] = seq->iocfg->io_out1_delay_max + 1;
3da42859
DN
3162 }
3163
3b44f55c 3164 /* Search for the left edge of the window for each bit. */
285b3cb9 3165 search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn,
0c4be198 3166 &sticky_bit_chk,
71120773 3167 left_edge, right_edge, 0);
3da42859 3168
3b44f55c 3169 /* Search for the right edge of the window for each bit. */
285b3cb9 3170 ret = search_right_edge(seq, 1, rank_bgn, write_group, 0,
c4907898 3171 start_dqs, 0,
0c4be198 3172 &sticky_bit_chk,
c4907898
MV
3173 left_edge, right_edge, 0);
3174 if (ret) {
285b3cb9
SG
3175 set_failing_group_stage(seq, test_bgn + ret - 1,
3176 CAL_STAGE_WRITES,
c4907898 3177 CAL_SUBSTAGE_WRITES_CENTER);
d043ee5b 3178 return -EINVAL;
3da42859
DN
3179 }
3180
285b3cb9
SG
3181 min_index = get_window_mid_index(seq, 1, left_edge, right_edge,
3182 &mid_min);
3da42859 3183
3b44f55c 3184 /* Determine the amount we can change DQS (which is -mid_min). */
3da42859
DN
3185 orig_mid_min = mid_min;
3186 new_dqs = start_dqs;
3187 mid_min = 0;
ea9aa241 3188 debug_cond(DLEVEL >= 1,
3b44f55c
MV
3189 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3190 __func__, __LINE__, start_dqs, new_dqs, mid_min);
3da42859 3191
ffb8b66e 3192 /* Add delay to bring centre of all DQ windows to the same "level". */
285b3cb9 3193 center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min,
ffb8b66e 3194 min_index, 0, &dq_margin, &dqs_margin);
3da42859
DN
3195
3196 /* Move DQS */
285b3cb9 3197 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
1273dd9e 3198 writel(0, &sdr_scc_mgr->update);
3da42859
DN
3199
3200 /* Centre DM */
ea9aa241 3201 debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3da42859 3202
3b44f55c 3203 /* Search for the/part of the window with DM shift. */
285b3cb9 3204 search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr,
c8570afa 3205 &bgn_best, &end_best, &win_best, 0);
3da42859 3206
3b44f55c 3207 /* Reset DM delay chains to 0. */
285b3cb9 3208 scc_mgr_apply_group_dm_out1_delay(seq, 0);
3da42859
DN
3209
3210 /*
3211 * Check to see if the current window nudges up aganist 0 delay.
3212 * If so we need to continue the search by shifting DQS otherwise DQS
3b44f55c
MV
3213 * search begins as a new search.
3214 */
3da42859 3215 if (end_curr != 0) {
285b3cb9
SG
3216 bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3217 end_curr = seq->iocfg->io_out1_delay_max + 1;
3da42859
DN
3218 }
3219
3b44f55c 3220 /* Search for the/part of the window with DQS shifts. */
285b3cb9 3221 search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr,
c8570afa 3222 &bgn_best, &end_best, &win_best, new_dqs);
3da42859 3223
3b44f55c
MV
3224 /* Assign left and right edge for cal and reporting. */
3225 left_edge[0] = -1 * bgn_best;
3da42859
DN
3226 right_edge[0] = end_best;
3227
ea9aa241 3228 debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n",
3b44f55c 3229 __func__, __LINE__, left_edge[0], right_edge[0]);
3da42859 3230
3b44f55c 3231 /* Move DQS (back to orig). */
285b3cb9 3232 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
3da42859
DN
3233
3234 /* Move DM */
3235
3b44f55c 3236 /* Find middle of window for the DM bit. */
3da42859
DN
3237 mid = (left_edge[0] - right_edge[0]) / 2;
3238
3b44f55c 3239 /* Only move right, since we are not moving DQS/DQ. */
3da42859
DN
3240 if (mid < 0)
3241 mid = 0;
3242
3b44f55c 3243 /* dm_marign should fail if we never find a window. */
3da42859
DN
3244 if (win_best == 0)
3245 dm_margin = -1;
3246 else
3247 dm_margin = left_edge[0] - mid;
3248
285b3cb9 3249 scc_mgr_apply_group_dm_out1_delay(seq, mid);
1273dd9e 3250 writel(0, &sdr_scc_mgr->update);
3da42859 3251
ea9aa241 3252 debug_cond(DLEVEL >= 2,
3b44f55c
MV
3253 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3254 __func__, __LINE__, left_edge[0], right_edge[0],
3255 mid, dm_margin);
3256 /* Export values. */
285b3cb9 3257 seq->gbl.fom_out += dq_margin + dqs_margin;
3da42859 3258
ea9aa241 3259 debug_cond(DLEVEL >= 2,
3b44f55c
MV
3260 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3261 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
3da42859
DN
3262
3263 /*
3264 * Do not remove this line as it makes sure all of our
3265 * decisions have been applied.
3266 */
1273dd9e 3267 writel(0, &sdr_scc_mgr->update);
3b44f55c 3268
d043ee5b
MV
3269 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3270 return -EINVAL;
3271
3272 return 0;
3da42859
DN
3273}
3274
db3a6061
MV
3275/**
3276 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3277 * @rank_bgn: Rank number
3278 * @group: Read/Write Group
3279 * @test_bgn: Rank at which the test begins
3280 *
3281 * Stage 2: Write Calibration Part One.
3282 *
3283 * This function implements UniPHY calibration Stage 2, as explained in
3284 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3285 */
285b3cb9
SG
3286static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq,
3287 const u32 rank_bgn, const u32 group,
db3a6061 3288 const u32 test_bgn)
3da42859 3289{
db3a6061
MV
3290 int ret;
3291
3292 /* Update info for sims */
3293 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3da42859 3294
db3a6061 3295 reg_file_set_group(group);
3da42859
DN
3296 reg_file_set_stage(CAL_STAGE_WRITES);
3297 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3298
285b3cb9
SG
3299 ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group,
3300 test_bgn);
d043ee5b 3301 if (ret)
285b3cb9 3302 set_failing_group_stage(seq, group, CAL_STAGE_WRITES,
3da42859 3303 CAL_SUBSTAGE_WRITES_CENTER);
3da42859 3304
d043ee5b 3305 return ret;
3da42859
DN
3306}
3307
4b0ac26a
MV
3308/**
3309 * mem_precharge_and_activate() - Precharge all banks and activate
3310 *
3311 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3312 */
285b3cb9 3313static void mem_precharge_and_activate(struct socfpga_sdrseq *seq)
3da42859 3314{
4b0ac26a 3315 int r;
3da42859 3316
285b3cb9 3317 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
4b0ac26a 3318 /* Set rank. */
285b3cb9 3319 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
3da42859 3320
4b0ac26a 3321 /* Precharge all banks. */
285b3cb9 3322 writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1273dd9e 3323 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3da42859 3324
1273dd9e 3325 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
285b3cb9 3326 writel(seq->rwcfg->activate_0_and_1_wait1,
139823ec 3327 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3da42859 3328
1273dd9e 3329 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
285b3cb9 3330 writel(seq->rwcfg->activate_0_and_1_wait2,
139823ec 3331 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3da42859 3332
4b0ac26a 3333 /* Activate rows. */
285b3cb9
SG
3334 writel(seq->rwcfg->activate_0_and_1,
3335 SDR_PHYGRP_RWMGRGRP_ADDRESS |
3336 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3da42859
DN
3337 }
3338}
3339
16502a0b
MV
3340/**
3341 * mem_init_latency() - Configure memory RLAT and WLAT settings
3342 *
3343 * Configure memory RLAT and WLAT parameters.
3344 */
285b3cb9 3345static void mem_init_latency(struct socfpga_sdrseq *seq)
3da42859 3346{
3da42859 3347 /*
16502a0b
MV
3348 * For AV/CV, LFIFO is hardened and always runs at full rate
3349 * so max latency in AFI clocks, used here, is correspondingly
3350 * smaller.
3da42859 3351 */
285b3cb9
SG
3352 const u32 max_latency = (1 << seq->misccfg->max_latency_count_width)
3353 - 1;
16502a0b 3354 u32 rlat, wlat;
3da42859 3355
16502a0b 3356 debug("%s:%d\n", __func__, __LINE__);
3da42859
DN
3357
3358 /*
16502a0b
MV
3359 * Read in write latency.
3360 * WL for Hard PHY does not include additive latency.
3da42859 3361 */
16502a0b
MV
3362 wlat = readl(&data_mgr->t_wl_add);
3363 wlat += readl(&data_mgr->mem_t_add);
3da42859 3364
285b3cb9 3365 seq->gbl.rw_wl_nop_cycles = wlat - 1;
3da42859 3366
16502a0b
MV
3367 /* Read in readl latency. */
3368 rlat = readl(&data_mgr->t_rl_add);
3da42859 3369
16502a0b 3370 /* Set a pretty high read latency initially. */
285b3cb9
SG
3371 seq->gbl.curr_read_lat = rlat + 16;
3372 if (seq->gbl.curr_read_lat > max_latency)
3373 seq->gbl.curr_read_lat = max_latency;
3da42859 3374
285b3cb9 3375 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
3da42859 3376
16502a0b
MV
3377 /* Advertise write latency. */
3378 writel(wlat, &phy_mgr_cfg->afi_wlat);
3da42859
DN
3379}
3380
51cea0b6
MV
3381/**
3382 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3383 *
3384 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3385 */
285b3cb9 3386static void mem_skip_calibrate(struct socfpga_sdrseq *seq)
3da42859 3387{
5ded7320
MV
3388 u32 vfifo_offset;
3389 u32 i, j, r;
3da42859
DN
3390
3391 debug("%s:%d\n", __func__, __LINE__);
3392 /* Need to update every shadow register set used by the interface */
285b3cb9 3393 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
51cea0b6 3394 r += NUM_RANKS_PER_SHADOW_REG) {
3da42859
DN
3395 /*
3396 * Set output phase alignment settings appropriate for
3397 * skip calibration.
3398 */
285b3cb9 3399 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
3da42859 3400 scc_mgr_set_dqs_en_phase(i, 0);
285b3cb9 3401 if (seq->iocfg->dll_chain_length == 6)
160695d8
MV
3402 scc_mgr_set_dqdqs_output_phase(i, 6);
3403 else
3404 scc_mgr_set_dqdqs_output_phase(i, 7);
3da42859
DN
3405 /*
3406 * Case:33398
3407 *
3408 * Write data arrives to the I/O two cycles before write
3409 * latency is reached (720 deg).
3410 * -> due to bit-slip in a/c bus
3411 * -> to allow board skew where dqs is longer than ck
3412 * -> how often can this happen!?
3413 * -> can claim back some ptaps for high freq
3414 * support if we can relax this, but i digress...
3415 *
3416 * The write_clk leads mem_ck by 90 deg
3417 * The minimum ptap of the OPA is 180 deg
3418 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3419 * The write_clk is always delayed by 2 ptaps
3420 *
3421 * Hence, to make DQS aligned to CK, we need to delay
3422 * DQS by:
139823ec 3423 * (720 - 90 - 180 - 2) *
285b3cb9 3424 * (360 / seq->iocfg->dll_chain_length)
3da42859 3425 *
285b3cb9
SG
3426 * Dividing the above by
3427 (360 / seq->iocfg->dll_chain_length)
3da42859
DN
3428 * gives us the number of ptaps, which simplies to:
3429 *
285b3cb9 3430 * (1.25 * seq->iocfg->dll_chain_length - 2)
3da42859 3431 */
51cea0b6 3432 scc_mgr_set_dqdqs_output_phase(i,
285b3cb9
SG
3433 ((125 * seq->iocfg->dll_chain_length)
3434 / 100) - 2);
3da42859 3435 }
1273dd9e
MV
3436 writel(0xff, &sdr_scc_mgr->dqs_ena);
3437 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3da42859 3438
285b3cb9 3439 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
1273dd9e
MV
3440 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3441 SCC_MGR_GROUP_COUNTER_OFFSET);
3da42859 3442 }
1273dd9e
MV
3443 writel(0xff, &sdr_scc_mgr->dq_ena);
3444 writel(0xff, &sdr_scc_mgr->dm_ena);
3445 writel(0, &sdr_scc_mgr->update);
3da42859
DN
3446 }
3447
3448 /* Compensate for simulation model behaviour */
285b3cb9 3449 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
3da42859
DN
3450 scc_mgr_set_dqs_bus_in_delay(i, 10);
3451 scc_mgr_load_dqs(i);
3452 }
1273dd9e 3453 writel(0, &sdr_scc_mgr->update);
3da42859
DN
3454
3455 /*
3456 * ArriaV has hard FIFOs that can only be initialized by incrementing
3457 * in sequencer.
3458 */
285b3cb9 3459 vfifo_offset = seq->misccfg->calib_vfifo_offset;
51cea0b6 3460 for (j = 0; j < vfifo_offset; j++)
1273dd9e 3461 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
1273dd9e 3462 writel(0, &phy_mgr_cmd->fifo_reset);
3da42859
DN
3463
3464 /*
51cea0b6
MV
3465 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3466 * setting from generation-time constant.
3da42859 3467 */
285b3cb9
SG
3468 seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset;
3469 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
3da42859
DN
3470}
3471
3589fbfb
MV
3472/**
3473 * mem_calibrate() - Memory calibration entry point.
3474 *
3475 * Perform memory calibration.
3476 */
285b3cb9 3477static u32 mem_calibrate(struct socfpga_sdrseq *seq)
3da42859 3478{
5ded7320
MV
3479 u32 i;
3480 u32 rank_bgn, sr;
3481 u32 write_group, write_test_bgn;
3482 u32 read_group, read_test_bgn;
3483 u32 run_groups, current_run;
3484 u32 failing_groups = 0;
3485 u32 group_failed = 0;
3da42859 3486
285b3cb9
SG
3487 const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width /
3488 seq->rwcfg->mem_if_write_dqs_width;
33c42bb8 3489
3da42859 3490 debug("%s:%d\n", __func__, __LINE__);
3da42859 3491
16502a0b 3492 /* Initialize the data settings */
285b3cb9
SG
3493 seq->gbl.error_substage = CAL_SUBSTAGE_NIL;
3494 seq->gbl.error_stage = CAL_STAGE_NIL;
3495 seq->gbl.error_group = 0xff;
3496 seq->gbl.fom_in = 0;
3497 seq->gbl.fom_out = 0;
3da42859 3498
16502a0b 3499 /* Initialize WLAT and RLAT. */
285b3cb9 3500 mem_init_latency(seq);
16502a0b
MV
3501
3502 /* Initialize bit slips. */
285b3cb9 3503 mem_precharge_and_activate(seq);
3da42859 3504
285b3cb9 3505 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
1273dd9e
MV
3506 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3507 SCC_MGR_GROUP_COUNTER_OFFSET);
fa5d821b
MV
3508 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3509 if (i == 0)
3510 scc_mgr_set_hhp_extras();
3511
c5c5f537 3512 scc_set_bypass_mode(i);
3da42859
DN
3513 }
3514
722c9685 3515 /* Calibration is skipped. */
285b3cb9 3516 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3da42859
DN
3517 /*
3518 * Set VFIFO and LFIFO to instant-on settings in skip
3519 * calibration mode.
3520 */
285b3cb9 3521 mem_skip_calibrate(seq);
3da42859 3522
722c9685
MV
3523 /*
3524 * Do not remove this line as it makes sure all of our
3525 * decisions have been applied.
3526 */
3527 writel(0, &sdr_scc_mgr->update);
3528 return 1;
3529 }
3da42859 3530
722c9685
MV
3531 /* Calibration is not skipped. */
3532 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3533 /*
3534 * Zero all delay chain/phase settings for all
3535 * groups and all shadow register sets.
3536 */
285b3cb9 3537 scc_mgr_zero_all(seq);
722c9685 3538
f085ac3b 3539 run_groups = ~0;
722c9685
MV
3540
3541 for (write_group = 0, write_test_bgn = 0; write_group
285b3cb9
SG
3542 < seq->rwcfg->mem_if_write_dqs_width; write_group++,
3543 write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) {
c452dcd0 3544 /* Initialize the group failure */
722c9685
MV
3545 group_failed = 0;
3546
3547 current_run = run_groups & ((1 <<
3548 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3549 run_groups = run_groups >>
3550 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3551
3552 if (current_run == 0)
3553 continue;
3554
3555 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3556 SCC_MGR_GROUP_COUNTER_OFFSET);
285b3cb9 3557 scc_mgr_zero_group(seq, write_group, 0);
722c9685 3558
33c42bb8
MV
3559 for (read_group = write_group * rwdqs_ratio,
3560 read_test_bgn = 0;
c452dcd0 3561 read_group < (write_group + 1) * rwdqs_ratio;
33c42bb8 3562 read_group++,
285b3cb9 3563 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
33c42bb8
MV
3564 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3565 continue;
3566
722c9685 3567 /* Calibrate the VFIFO */
285b3cb9 3568 if (rw_mgr_mem_calibrate_vfifo(seq, read_group,
33c42bb8
MV
3569 read_test_bgn))
3570 continue;
3571
285b3cb9 3572 if (!(seq->gbl.phy_debug_mode_flags &
139823ec 3573 PHY_DEBUG_SWEEP_ALL_GROUPS))
33c42bb8 3574 return 0;
c452dcd0
MV
3575
3576 /* The group failed, we're done. */
3577 goto grp_failed;
722c9685 3578 }
3da42859 3579
722c9685 3580 /* Calibrate the output side */
c452dcd0 3581 for (rank_bgn = 0, sr = 0;
285b3cb9 3582 rank_bgn < seq->rwcfg->mem_number_of_ranks;
c452dcd0
MV
3583 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3584 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3585 continue;
4ac21610 3586
c452dcd0 3587 /* Not needed in quick mode! */
139823ec
MV
3588 if (STATIC_CALIB_STEPS &
3589 CALIB_SKIP_DELAY_SWEEPS)
c452dcd0 3590 continue;
4ac21610 3591
c452dcd0 3592 /* Calibrate WRITEs */
285b3cb9 3593 if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn,
139823ec
MV
3594 write_group,
3595 write_test_bgn))
c452dcd0 3596 continue;
4ac21610 3597
c452dcd0 3598 group_failed = 1;
285b3cb9 3599 if (!(seq->gbl.phy_debug_mode_flags &
139823ec 3600 PHY_DEBUG_SWEEP_ALL_GROUPS))
c452dcd0 3601 return 0;
722c9685 3602 }
3da42859 3603
c452dcd0
MV
3604 /* Some group failed, we're done. */
3605 if (group_failed)
3606 goto grp_failed;
3607
3608 for (read_group = write_group * rwdqs_ratio,
3609 read_test_bgn = 0;
3610 read_group < (write_group + 1) * rwdqs_ratio;
3611 read_group++,
285b3cb9 3612 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
c452dcd0
MV
3613 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3614 continue;
3615
285b3cb9
SG
3616 if (!rw_mgr_mem_calibrate_vfifo_end(seq,
3617 read_group,
139823ec 3618 read_test_bgn))
c452dcd0
MV
3619 continue;
3620
285b3cb9 3621 if (!(seq->gbl.phy_debug_mode_flags &
139823ec 3622 PHY_DEBUG_SWEEP_ALL_GROUPS))
c452dcd0
MV
3623 return 0;
3624
3625 /* The group failed, we're done. */
3626 goto grp_failed;
3da42859
DN
3627 }
3628
c452dcd0
MV
3629 /* No group failed, continue as usual. */
3630 continue;
3631
3632grp_failed: /* A group failed, increment the counter. */
3633 failing_groups++;
722c9685
MV
3634 }
3635
3636 /*
3637 * USER If there are any failing groups then report
3638 * the failure.
3639 */
3640 if (failing_groups != 0)
3641 return 0;
3642
c50ae303
MV
3643 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3644 continue;
3645
722c9685 3646 /* Calibrate the LFIFO */
285b3cb9 3647 if (!rw_mgr_mem_calibrate_lfifo(seq))
c50ae303 3648 return 0;
3da42859
DN
3649 }
3650
3651 /*
3652 * Do not remove this line as it makes sure all of our decisions
3653 * have been applied.
3654 */
1273dd9e 3655 writel(0, &sdr_scc_mgr->update);
3da42859
DN
3656 return 1;
3657}
3658
23a040c0
MV
3659/**
3660 * run_mem_calibrate() - Perform memory calibration
3661 *
3662 * This function triggers the entire memory calibration procedure.
3663 */
285b3cb9 3664static int run_mem_calibrate(struct socfpga_sdrseq *seq)
3da42859 3665{
23a040c0 3666 int pass;
bba77110 3667 u32 ctrl_cfg;
3da42859
DN
3668
3669 debug("%s:%d\n", __func__, __LINE__);
3670
3671 /* Reset pass/fail status shown on afi_cal_success/fail */
1273dd9e 3672 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3da42859 3673
23a040c0 3674 /* Stop tracking manager. */
bba77110
MV
3675 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3676 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3677 &sdr_ctrl->ctrl_cfg);
3da42859 3678
285b3cb9
SG
3679 phy_mgr_initialize(seq);
3680 rw_mgr_mem_initialize(seq);
3da42859 3681
23a040c0 3682 /* Perform the actual memory calibration. */
285b3cb9 3683 pass = mem_calibrate(seq);
3da42859 3684
285b3cb9 3685 mem_precharge_and_activate(seq);
1273dd9e 3686 writel(0, &phy_mgr_cmd->fifo_reset);
3da42859 3687
23a040c0 3688 /* Handoff. */
285b3cb9 3689 rw_mgr_mem_handoff(seq);
3da42859 3690 /*
23a040c0
MV
3691 * In Hard PHY this is a 2-bit control:
3692 * 0: AFI Mux Select
3693 * 1: DDIO Mux Select
3da42859 3694 */
23a040c0 3695 writel(0x2, &phy_mgr_cfg->mux_sel);
3da42859 3696
23a040c0 3697 /* Start tracking manager. */
bba77110 3698 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
23a040c0
MV
3699
3700 return pass;
3701}
3702
3703/**
3704 * debug_mem_calibrate() - Report result of memory calibration
3705 * @pass: Value indicating whether calibration passed or failed
3706 *
3707 * This function reports the results of the memory calibration
3708 * and writes debug information into the register file.
3709 */
285b3cb9 3710static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass)
23a040c0 3711{
5ded7320 3712 u32 debug_info;
3da42859
DN
3713
3714 if (pass) {
532010da 3715 debug(KBUILD_BASENAME ": CALIBRATION PASSED\n");
3da42859 3716
285b3cb9
SG
3717 seq->gbl.fom_in /= 2;
3718 seq->gbl.fom_out /= 2;
3da42859 3719
285b3cb9
SG
3720 if (seq->gbl.fom_in > 0xff)
3721 seq->gbl.fom_in = 0xff;
3da42859 3722
285b3cb9
SG
3723 if (seq->gbl.fom_out > 0xff)
3724 seq->gbl.fom_out = 0xff;
3da42859
DN
3725
3726 /* Update the FOM in the register file */
285b3cb9
SG
3727 debug_info = seq->gbl.fom_in;
3728 debug_info |= seq->gbl.fom_out << 8;
1273dd9e 3729 writel(debug_info, &sdr_reg_file->fom);
3da42859 3730
1273dd9e
MV
3731 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3732 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3da42859 3733 } else {
532010da 3734 debug(KBUILD_BASENAME ": CALIBRATION FAILED\n");
3da42859 3735
285b3cb9
SG
3736 debug_info = seq->gbl.error_stage;
3737 debug_info |= seq->gbl.error_substage << 8;
3738 debug_info |= seq->gbl.error_group << 16;
3da42859 3739
1273dd9e
MV
3740 writel(debug_info, &sdr_reg_file->failing_stage);
3741 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3742 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3da42859
DN
3743
3744 /* Update the failing group/stage in the register file */
285b3cb9
SG
3745 debug_info = seq->gbl.error_stage;
3746 debug_info |= seq->gbl.error_substage << 8;
3747 debug_info |= seq->gbl.error_group << 16;
1273dd9e 3748 writel(debug_info, &sdr_reg_file->failing_stage);
3da42859
DN
3749 }
3750
532010da 3751 debug(KBUILD_BASENAME ": Calibration complete\n");
3da42859
DN
3752}
3753
bb06434b
MV
3754/**
3755 * hc_initialize_rom_data() - Initialize ROM data
3756 *
3757 * Initialize ROM data.
3758 */
3da42859
DN
3759static void hc_initialize_rom_data(void)
3760{
04955cf2
MV
3761 unsigned int nelem = 0;
3762 const u32 *rom_init;
bb06434b 3763 u32 i, addr;
3da42859 3764
04955cf2 3765 socfpga_get_seq_inst_init(&rom_init, &nelem);
c4815f76 3766 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
04955cf2
MV
3767 for (i = 0; i < nelem; i++)
3768 writel(rom_init[i], addr + (i << 2));
3da42859 3769
04955cf2 3770 socfpga_get_seq_ac_init(&rom_init, &nelem);
c4815f76 3771 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
04955cf2
MV
3772 for (i = 0; i < nelem; i++)
3773 writel(rom_init[i], addr + (i << 2));
3da42859
DN
3774}
3775
9c1ab2ca
MV
3776/**
3777 * initialize_reg_file() - Initialize SDR register file
3778 *
3779 * Initialize SDR register file.
3780 */
285b3cb9 3781static void initialize_reg_file(struct socfpga_sdrseq *seq)
3da42859 3782{
3da42859 3783 /* Initialize the register file with the correct data */
285b3cb9
SG
3784 writel(seq->misccfg->reg_file_init_seq_signature,
3785 &sdr_reg_file->signature);
1273dd9e
MV
3786 writel(0, &sdr_reg_file->debug_data_addr);
3787 writel(0, &sdr_reg_file->cur_stage);
3788 writel(0, &sdr_reg_file->fom);
3789 writel(0, &sdr_reg_file->failing_stage);
3790 writel(0, &sdr_reg_file->debug1);
3791 writel(0, &sdr_reg_file->debug2);
3da42859
DN
3792}
3793
2ca151f8
MV
3794/**
3795 * initialize_hps_phy() - Initialize HPS PHY
3796 *
3797 * Initialize HPS PHY.
3798 */
3da42859
DN
3799static void initialize_hps_phy(void)
3800{
5ded7320 3801 u32 reg;
3da42859
DN
3802 /*
3803 * Tracking also gets configured here because it's in the
3804 * same register.
3805 */
5ded7320
MV
3806 u32 trk_sample_count = 7500;
3807 u32 trk_long_idle_sample_count = (10 << 16) | 100;
3da42859
DN
3808 /*
3809 * Format is number of outer loops in the 16 MSB, sample
3810 * count in 16 LSB.
3811 */
3812
3813 reg = 0;
3814 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3815 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3816 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3817 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3818 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3819 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3820 /*
3821 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3822 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3823 */
3824 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3825 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3826 trk_sample_count);
6cb9f167 3827 writel(reg, &sdr_ctrl->phy_ctrl0);
3da42859
DN
3828
3829 reg = 0;
3830 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3831 trk_sample_count >>
3832 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3833 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3834 trk_long_idle_sample_count);
6cb9f167 3835 writel(reg, &sdr_ctrl->phy_ctrl1);
3da42859
DN
3836
3837 reg = 0;
3838 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3839 trk_long_idle_sample_count >>
3840 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
6cb9f167 3841 writel(reg, &sdr_ctrl->phy_ctrl2);
3da42859
DN
3842}
3843
880e46f2
MV
3844/**
3845 * initialize_tracking() - Initialize tracking
3846 *
3847 * Initialize the register file with usable initial data.
3848 */
285b3cb9 3849static void initialize_tracking(struct socfpga_sdrseq *seq)
3da42859 3850{
880e46f2
MV
3851 /*
3852 * Initialize the register file with the correct data.
3853 * Compute usable version of value in case we skip full
3854 * computation later.
3855 */
285b3cb9
SG
3856 writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
3857 seq->iocfg->delay_per_dchain_tap) - 1,
880e46f2
MV
3858 &sdr_reg_file->dtaps_per_ptap);
3859
3860 /* trk_sample_count */
3861 writel(7500, &sdr_reg_file->trk_sample_count);
3862
3863 /* longidle outer loop [15:0] */
3864 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3da42859
DN
3865
3866 /*
880e46f2
MV
3867 * longidle sample count [31:24]
3868 * trfc, worst case of 933Mhz 4Gb [23:16]
3869 * trcd, worst case [15:8]
3870 * vfifo wait [7:0]
3da42859 3871 */
880e46f2
MV
3872 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3873 &sdr_reg_file->delays);
3da42859 3874
880e46f2 3875 /* mux delay */
9a5a90ad
MV
3876 if (dram_is_ddr(2)) {
3877 writel(0, &sdr_reg_file->trk_rw_mgr_addr);
3878 } else if (dram_is_ddr(3)) {
3879 writel((seq->rwcfg->idle << 24) |
3880 (seq->rwcfg->activate_1 << 16) |
3881 (seq->rwcfg->sgle_read << 8) |
3882 (seq->rwcfg->precharge_all << 0),
3883 &sdr_reg_file->trk_rw_mgr_addr);
3884 }
880e46f2 3885
285b3cb9 3886 writel(seq->rwcfg->mem_if_read_dqs_width,
880e46f2
MV
3887 &sdr_reg_file->trk_read_dqs_width);
3888
3889 /* trefi [7:0] */
9a5a90ad
MV
3890 if (dram_is_ddr(2)) {
3891 writel(1000 << 0, &sdr_reg_file->trk_rfsh);
3892 } else if (dram_is_ddr(3)) {
3893 writel((seq->rwcfg->refresh_all << 24) | (1000 << 0),
3894 &sdr_reg_file->trk_rfsh);
3895 }
3da42859
DN
3896}
3897
29873c74 3898int sdram_calibration_full(struct socfpga_sdr *sdr)
3da42859 3899{
5ded7320 3900 u32 pass;
285b3cb9 3901 struct socfpga_sdrseq seq;
84e0b0cf 3902
29873c74
SG
3903 /*
3904 * For size reasons, this file uses hard coded addresses.
3905 * Check if we are called with the correct address.
3906 */
3907 if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS)
3908 return -ENODEV;
3909
285b3cb9 3910 memset(&seq, 0, sizeof(seq));
3da42859 3911
285b3cb9
SG
3912 seq.rwcfg = socfpga_get_sdram_rwmgr_config();
3913 seq.iocfg = socfpga_get_sdram_io_config();
3914 seq.misccfg = socfpga_get_sdram_misc_config();
d718a26b 3915
3da42859 3916 /* Set the calibration enabled by default */
285b3cb9 3917 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3da42859
DN
3918 /*
3919 * Only sweep all groups (regardless of fail state) by default
3920 * Set enabled read test by default.
3921 */
3922#if DISABLE_GUARANTEED_READ
285b3cb9 3923 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3da42859
DN
3924#endif
3925 /* Initialize the register file */
285b3cb9 3926 initialize_reg_file(&seq);
3da42859
DN
3927
3928 /* Initialize any PHY CSR */
3929 initialize_hps_phy();
3930
3931 scc_mgr_initialize();
3932
285b3cb9 3933 initialize_tracking(&seq);
3da42859 3934
532010da 3935 debug(KBUILD_BASENAME ": Preparing to start memory calibration\n");
3da42859
DN
3936
3937 debug("%s:%d\n", __func__, __LINE__);
ea9aa241 3938 debug_cond(DLEVEL >= 1,
23f62b36 3939 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
285b3cb9
SG
3940 seq.rwcfg->mem_number_of_ranks,
3941 seq.rwcfg->mem_number_of_cs_per_dimm,
3942 seq.rwcfg->mem_dq_per_read_dqs,
3943 seq.rwcfg->mem_dq_per_write_dqs,
3944 seq.rwcfg->mem_virtual_groups_per_read_dqs,
3945 seq.rwcfg->mem_virtual_groups_per_write_dqs);
ea9aa241 3946 debug_cond(DLEVEL >= 1,
23f62b36 3947 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
285b3cb9
SG
3948 seq.rwcfg->mem_if_read_dqs_width,
3949 seq.rwcfg->mem_if_write_dqs_width,
3950 seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width,
3951 seq.iocfg->delay_per_opa_tap,
3952 seq.iocfg->delay_per_dchain_tap);
ea9aa241 3953 debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u",
285b3cb9
SG
3954 seq.iocfg->delay_per_dqs_en_dchain_tap,
3955 seq.iocfg->dll_chain_length);
ea9aa241 3956 debug_cond(DLEVEL >= 1,
139823ec 3957 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
285b3cb9
SG
3958 seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max,
3959 seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max);
ea9aa241 3960 debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
285b3cb9
SG
3961 seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max,
3962 seq.iocfg->io_out2_delay_max);
ea9aa241 3963 debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
285b3cb9 3964 seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve);
3da42859
DN
3965
3966 hc_initialize_rom_data();
3967
3968 /* update info for sims */
3969 reg_file_set_stage(CAL_STAGE_NIL);
3970 reg_file_set_group(0);
3971
3972 /*
3973 * Load global needed for those actions that require
3974 * some dynamic calibration support.
3975 */
285b3cb9 3976 seq.dyn_calib_steps = STATIC_CALIB_STEPS;
3da42859
DN
3977 /*
3978 * Load global to allow dynamic selection of delay loop settings
3979 * based on calibration mode.
3980 */
285b3cb9
SG
3981 if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3982 seq.skip_delay_mask = 0xff;
3da42859 3983 else
285b3cb9 3984 seq.skip_delay_mask = 0x0;
3da42859 3985
285b3cb9
SG
3986 pass = run_mem_calibrate(&seq);
3987 debug_mem_calibrate(&seq, pass);
3da42859
DN
3988 return pass;
3989}
This page took 1.150189 seconds and 5 git commands to generate.