1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2008-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017-2021 NXP Semiconductor
8 #include <fsl_ddr_sdram.h>
10 #include <asm/bitops.h>
14 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
16 compute_cas_latency(const unsigned int ctrl_num,
17 const dimm_params_t *dimm_params,
18 common_timing_params_t *outpdimm,
19 unsigned int number_of_dimms)
22 unsigned int common_caslat;
23 unsigned int caslat_actual;
24 unsigned int retry = 16;
25 unsigned int tmp = ~0;
26 unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num);
27 #ifdef CONFIG_SYS_FSL_DDR3
28 const unsigned int taamax = 20000;
30 const unsigned int taamax = 18000;
33 /* compute the common CAS latency supported between slots */
34 for (i = 0; i < number_of_dimms; i++) {
35 if (dimm_params[i].n_ranks)
36 tmp &= dimm_params[i].caslat_x;
41 printf("DDR clock (MCLK cycle was 0 ps), So setting it to slowest DIMM(s) (tCKmin %u ps).\n",
42 outpdimm->tckmin_x_ps);
43 mclk_ps = outpdimm->tckmin_x_ps;
46 /* validate if the memory clk is in the range of dimms */
47 if (mclk_ps < outpdimm->tckmin_x_ps) {
48 printf("DDR clock (MCLK cycle %u ps) is faster than "
49 "the slowest DIMM(s) (tCKmin %u ps) can support.\n",
50 mclk_ps, outpdimm->tckmin_x_ps);
52 #ifdef CONFIG_SYS_FSL_DDR4
53 if (mclk_ps > outpdimm->tckmax_ps) {
54 printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n",
55 mclk_ps, outpdimm->tckmax_ps);
58 /* determine the acutal cas latency */
59 caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps;
60 /* check if the dimms support the CAS latency */
61 while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
65 /* once the caculation of caslat_actual is completed
66 * we must verify that this CAS latency value does not
67 * exceed tAAmax, which is 20 ns for all DDR3 speed grades,
68 * 18ns for all DDR4 speed grades.
70 if (caslat_actual * mclk_ps > taamax) {
71 printf("The chosen cas latency %d is too large\n",
74 outpdimm->lowest_common_spd_caslat = caslat_actual;
75 debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual);
79 #else /* for DDR1 and DDR2 */
81 compute_cas_latency(const unsigned int ctrl_num,
82 const dimm_params_t *dimm_params,
83 common_timing_params_t *outpdimm,
84 unsigned int number_of_dimms)
87 const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num);
88 unsigned int lowest_good_caslat;
90 unsigned int temp1, temp2;
92 debug("using mclk_ps = %u\n", mclk_ps);
93 if (mclk_ps > outpdimm->tckmax_ps) {
94 printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n",
95 mclk_ps, outpdimm->tckmax_ps);
99 * Compute a CAS latency suitable for all DIMMs
101 * Strategy for SPD-defined latencies: compute only
102 * CAS latency defined by all DIMMs.
106 * Step 1: find CAS latency common to all DIMMs using bitwise
110 for (i = 0; i < number_of_dimms; i++) {
111 if (dimm_params[i].n_ranks) {
113 temp2 |= 1 << dimm_params[i].caslat_x;
114 temp2 |= 1 << dimm_params[i].caslat_x_minus_1;
115 temp2 |= 1 << dimm_params[i].caslat_x_minus_2;
117 * If there was no entry for X-2 (X-1) in
118 * the SPD, then caslat_x_minus_2
119 * (caslat_x_minus_1) contains either 255 or
120 * 0xFFFFFFFF because that's what the glorious
121 * __ilog2 function returns for an input of 0.
122 * On 32-bit PowerPC, left shift counts with bit
123 * 26 set (that the value of 255 or 0xFFFFFFFF
124 * will have), cause the destination register to
125 * be 0. That is why this works.
132 * Step 2: check each common CAS latency against tCK of each
135 lowest_good_caslat = 0;
139 temp2 = __ilog2(temp1);
140 debug("checking common caslat = %u\n", temp2);
142 /* Check if this CAS latency will work on all DIMMs at tCK. */
143 for (i = 0; i < number_of_dimms; i++) {
144 if (!dimm_params[i].n_ranks)
147 if (dimm_params[i].caslat_x == temp2) {
148 if (mclk_ps >= dimm_params[i].tckmin_x_ps) {
149 debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n",
151 dimm_params[i].tckmin_x_ps);
158 if (dimm_params[i].caslat_x_minus_1 == temp2) {
159 unsigned int tckmin_x_minus_1_ps
160 = dimm_params[i].tckmin_x_minus_1_ps;
161 if (mclk_ps >= tckmin_x_minus_1_ps) {
162 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n",
164 tckmin_x_minus_1_ps);
171 if (dimm_params[i].caslat_x_minus_2 == temp2) {
172 unsigned int tckmin_x_minus_2_ps
173 = dimm_params[i].tckmin_x_minus_2_ps;
174 if (mclk_ps >= tckmin_x_minus_2_ps) {
175 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n",
177 tckmin_x_minus_2_ps);
186 lowest_good_caslat = temp2;
188 temp1 &= ~(1 << temp2);
191 debug("lowest common SPD-defined CAS latency = %u\n",
193 outpdimm->lowest_common_spd_caslat = lowest_good_caslat;
197 * Compute a common 'de-rated' CAS latency.
199 * The strategy here is to find the *highest* dereated cas latency
200 * with the assumption that all of the DIMMs will support a dereated
201 * CAS latency higher than or equal to their lowest dereated value.
204 for (i = 0; i < number_of_dimms; i++)
205 temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
207 outpdimm->highest_common_derated_caslat = temp1;
208 debug("highest common dereated CAS latency = %u\n", temp1);
215 * compute_lowest_common_dimm_parameters()
217 * Determine the worst-case DIMM timing parameters from the set of DIMMs
218 * whose parameters have been computed into the array pointed to
222 compute_lowest_common_dimm_parameters(const unsigned int ctrl_num,
223 const dimm_params_t *dimm_params,
224 common_timing_params_t *outpdimm,
225 const unsigned int number_of_dimms)
229 unsigned int tckmin_x_ps = 0;
230 unsigned int tckmax_ps = 0xFFFFFFFF;
231 unsigned int trcd_ps = 0;
232 unsigned int trp_ps = 0;
233 unsigned int tras_ps = 0;
234 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
235 unsigned int taamin_ps = 0;
237 #ifdef CONFIG_SYS_FSL_DDR4
238 unsigned int twr_ps = 15000;
239 unsigned int trfc1_ps = 0;
240 unsigned int trfc2_ps = 0;
241 unsigned int trfc4_ps = 0;
242 unsigned int trrds_ps = 0;
243 unsigned int trrdl_ps = 0;
244 unsigned int tccdl_ps = 0;
245 unsigned int trfc_slr_ps = 0;
247 unsigned int twr_ps = 0;
248 unsigned int twtr_ps = 0;
249 unsigned int trfc_ps = 0;
250 unsigned int trrd_ps = 0;
251 unsigned int trtp_ps = 0;
253 unsigned int trc_ps = 0;
254 unsigned int refresh_rate_ps = 0;
255 unsigned int extended_op_srt = 1;
256 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
257 unsigned int tis_ps = 0;
258 unsigned int tih_ps = 0;
259 unsigned int tds_ps = 0;
260 unsigned int tdh_ps = 0;
261 unsigned int tdqsq_max_ps = 0;
262 unsigned int tqhs_ps = 0;
264 unsigned int temp1, temp2;
265 unsigned int additive_latency = 0;
268 for (i = 0; i < number_of_dimms; i++) {
270 * If there are no ranks on this DIMM,
271 * it probably doesn't exist, so skip it.
273 if (dimm_params[i].n_ranks == 0) {
277 if (dimm_params[i].n_ranks == 4 && i != 0) {
278 printf("Found Quad-rank DIMM in wrong bank, ignored."
279 " Software may not run as expected.\n");
285 * check if quad-rank DIMM is plugged if
286 * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined
287 * Only the board with proper design is capable
289 #ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
290 if (dimm_params[i].n_ranks == 4 && \
291 CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) {
292 printf("Found Quad-rank DIMM, not able to support.");
298 * Find minimum tckmax_ps to find fastest slow speed,
299 * i.e., this is the slowest the whole system can go.
301 tckmax_ps = min(tckmax_ps,
302 (unsigned int)dimm_params[i].tckmax_ps);
303 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
304 taamin_ps = max(taamin_ps,
305 (unsigned int)dimm_params[i].taa_ps);
307 tckmin_x_ps = max(tckmin_x_ps,
308 (unsigned int)dimm_params[i].tckmin_x_ps);
309 trcd_ps = max(trcd_ps, (unsigned int)dimm_params[i].trcd_ps);
310 trp_ps = max(trp_ps, (unsigned int)dimm_params[i].trp_ps);
311 tras_ps = max(tras_ps, (unsigned int)dimm_params[i].tras_ps);
312 #ifdef CONFIG_SYS_FSL_DDR4
313 trfc1_ps = max(trfc1_ps,
314 (unsigned int)dimm_params[i].trfc1_ps);
315 trfc2_ps = max(trfc2_ps,
316 (unsigned int)dimm_params[i].trfc2_ps);
317 trfc4_ps = max(trfc4_ps,
318 (unsigned int)dimm_params[i].trfc4_ps);
319 trrds_ps = max(trrds_ps,
320 (unsigned int)dimm_params[i].trrds_ps);
321 trrdl_ps = max(trrdl_ps,
322 (unsigned int)dimm_params[i].trrdl_ps);
323 tccdl_ps = max(tccdl_ps,
324 (unsigned int)dimm_params[i].tccdl_ps);
325 trfc_slr_ps = max(trfc_slr_ps,
326 (unsigned int)dimm_params[i].trfc_slr_ps);
328 twr_ps = max(twr_ps, (unsigned int)dimm_params[i].twr_ps);
329 twtr_ps = max(twtr_ps, (unsigned int)dimm_params[i].twtr_ps);
330 trfc_ps = max(trfc_ps, (unsigned int)dimm_params[i].trfc_ps);
331 trrd_ps = max(trrd_ps, (unsigned int)dimm_params[i].trrd_ps);
332 trtp_ps = max(trtp_ps, (unsigned int)dimm_params[i].trtp_ps);
334 trc_ps = max(trc_ps, (unsigned int)dimm_params[i].trc_ps);
335 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
336 tis_ps = max(tis_ps, (unsigned int)dimm_params[i].tis_ps);
337 tih_ps = max(tih_ps, (unsigned int)dimm_params[i].tih_ps);
338 tds_ps = max(tds_ps, (unsigned int)dimm_params[i].tds_ps);
339 tdh_ps = max(tdh_ps, (unsigned int)dimm_params[i].tdh_ps);
340 tqhs_ps = max(tqhs_ps, (unsigned int)dimm_params[i].tqhs_ps);
342 * Find maximum tdqsq_max_ps to find slowest.
344 * FIXME: is finding the slowest value the correct
345 * strategy for this parameter?
347 tdqsq_max_ps = max(tdqsq_max_ps,
348 (unsigned int)dimm_params[i].tdqsq_max_ps);
350 refresh_rate_ps = max(refresh_rate_ps,
351 (unsigned int)dimm_params[i].refresh_rate_ps);
352 /* extended_op_srt is either 0 or 1, 0 having priority */
353 extended_op_srt = min(extended_op_srt,
354 (unsigned int)dimm_params[i].extended_op_srt);
357 outpdimm->ndimms_present = number_of_dimms - temp1;
359 if (temp1 == number_of_dimms) {
360 debug("no dimms this memory controller\n");
364 outpdimm->tckmin_x_ps = tckmin_x_ps;
365 outpdimm->tckmax_ps = tckmax_ps;
366 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
367 outpdimm->taamin_ps = taamin_ps;
369 outpdimm->trcd_ps = trcd_ps;
370 outpdimm->trp_ps = trp_ps;
371 outpdimm->tras_ps = tras_ps;
372 #ifdef CONFIG_SYS_FSL_DDR4
373 outpdimm->trfc1_ps = trfc1_ps;
374 outpdimm->trfc2_ps = trfc2_ps;
375 outpdimm->trfc4_ps = trfc4_ps;
376 outpdimm->trrds_ps = trrds_ps;
377 outpdimm->trrdl_ps = trrdl_ps;
378 outpdimm->tccdl_ps = tccdl_ps;
379 outpdimm->trfc_slr_ps = trfc_slr_ps;
381 outpdimm->twtr_ps = twtr_ps;
382 outpdimm->trfc_ps = trfc_ps;
383 outpdimm->trrd_ps = trrd_ps;
384 outpdimm->trtp_ps = trtp_ps;
386 outpdimm->twr_ps = twr_ps;
387 outpdimm->trc_ps = trc_ps;
388 outpdimm->refresh_rate_ps = refresh_rate_ps;
389 outpdimm->extended_op_srt = extended_op_srt;
390 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
391 outpdimm->tis_ps = tis_ps;
392 outpdimm->tih_ps = tih_ps;
393 outpdimm->tds_ps = tds_ps;
394 outpdimm->tdh_ps = tdh_ps;
395 outpdimm->tdqsq_max_ps = tdqsq_max_ps;
396 outpdimm->tqhs_ps = tqhs_ps;
399 /* Determine common burst length for all DIMMs. */
401 for (i = 0; i < number_of_dimms; i++) {
402 if (dimm_params[i].n_ranks) {
403 temp1 &= dimm_params[i].burst_lengths_bitmask;
406 outpdimm->all_dimms_burst_lengths_bitmask = temp1;
408 /* Determine if all DIMMs registered buffered. */
410 for (i = 0; i < number_of_dimms; i++) {
411 if (dimm_params[i].n_ranks) {
412 if (dimm_params[i].registered_dimm) {
414 #ifndef CONFIG_SPL_BUILD
415 printf("Detected RDIMM %s\n",
416 dimm_params[i].mpart);
420 #ifndef CONFIG_SPL_BUILD
421 printf("Detected UDIMM %s\n",
422 dimm_params[i].mpart);
428 outpdimm->all_dimms_registered = 0;
429 outpdimm->all_dimms_unbuffered = 0;
430 if (temp1 && !temp2) {
431 outpdimm->all_dimms_registered = 1;
432 } else if (!temp1 && temp2) {
433 outpdimm->all_dimms_unbuffered = 1;
435 printf("ERROR: Mix of registered buffered and unbuffered "
436 "DIMMs detected!\n");
440 if (outpdimm->all_dimms_registered)
441 for (j = 0; j < 16; j++) {
442 outpdimm->rcw[j] = dimm_params[0].rcw[j];
443 for (i = 1; i < number_of_dimms; i++) {
444 if (!dimm_params[i].n_ranks)
446 if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) {
454 printf("ERROR: Mix different RDIMM detected!\n");
456 /* calculate cas latency for all DDR types */
457 if (compute_cas_latency(ctrl_num, dimm_params,
458 outpdimm, number_of_dimms))
461 /* Determine if all DIMMs ECC capable. */
463 for (i = 0; i < number_of_dimms; i++) {
464 if (dimm_params[i].n_ranks &&
465 !(dimm_params[i].edc_config & EDC_ECC)) {
471 debug("all DIMMs ECC capable\n");
473 debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
475 outpdimm->all_dimms_ecc_capable = temp1;
478 * Compute additive latency.
480 * For DDR1, additive latency should be 0.
482 * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
483 * which comes from Trcd, and also note that:
484 * add_lat + caslat must be >= 4
486 * For DDR3, we use the AL=0
488 * When to use additive latency for DDR2:
490 * I. Because you are using CL=3 and need to do ODT on writes and
491 * want functionality.
492 * 1. Are you going to use ODT? (Does your board not have
493 * additional termination circuitry for DQ, DQS, DQS_,
494 * DM, RDQS, RDQS_ for x4/x8 configs?)
495 * 2. If so, is your lowest supported CL going to be 3?
496 * 3. If so, then you must set AL=1 because
498 * WL >= 3 for ODT on writes
507 * RL >= 3 for ODT on reads
510 * Since CL aren't usually less than 2, AL=0 is a minimum,
511 * so the WL-derived AL should be the -- FIXME?
513 * II. Because you are using auto-precharge globally and want to
514 * use additive latency (posted CAS) to get more bandwidth.
515 * 1. Are you going to use auto-precharge mode globally?
517 * Use addtivie latency and compute AL to be 1 cycle less than
518 * tRCD, i.e. the READ or WRITE command is in the cycle
519 * immediately following the ACTIVATE command..
521 * III. Because you feel like it or want to do some sort of
522 * degraded-performance experiment.
523 * 1. Do you just want to use additive latency because you feel
526 * Validation: AL is less than tRCD, and within the other
527 * read-to-precharge constraints.
530 additive_latency = 0;
532 #if defined(CONFIG_SYS_FSL_DDR2)
533 if ((outpdimm->lowest_common_spd_caslat < 4) &&
534 (picos_to_mclk(ctrl_num, trcd_ps) >
535 outpdimm->lowest_common_spd_caslat)) {
536 additive_latency = picos_to_mclk(ctrl_num, trcd_ps) -
537 outpdimm->lowest_common_spd_caslat;
538 if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) {
539 additive_latency = picos_to_mclk(ctrl_num, trcd_ps);
540 debug("setting additive_latency to %u because it was "
541 " greater than tRCD_ps\n", additive_latency);
547 * Validate additive latency
551 if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) {
552 printf("Error: invalid additive latency exceeds tRCD(min).\n");
557 * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
558 * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
559 * ADD_LAT (the register) must be set to a value less
560 * than ACTTORW if WL = 1, then AL must be set to 1
561 * RD_TO_PRE (the register) must be set to a minimum
562 * tRTP + AL if AL is nonzero
566 * Additive latency will be applied only if the memctl option to
569 outpdimm->additive_latency = additive_latency;
571 debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps);
572 debug("trcd_ps = %u\n", outpdimm->trcd_ps);
573 debug("trp_ps = %u\n", outpdimm->trp_ps);
574 debug("tras_ps = %u\n", outpdimm->tras_ps);
575 #ifdef CONFIG_SYS_FSL_DDR4
576 debug("trfc1_ps = %u\n", trfc1_ps);
577 debug("trfc2_ps = %u\n", trfc2_ps);
578 debug("trfc4_ps = %u\n", trfc4_ps);
579 debug("trrds_ps = %u\n", trrds_ps);
580 debug("trrdl_ps = %u\n", trrdl_ps);
581 debug("tccdl_ps = %u\n", tccdl_ps);
582 debug("trfc_slr_ps = %u\n", trfc_slr_ps);
584 debug("twtr_ps = %u\n", outpdimm->twtr_ps);
585 debug("trfc_ps = %u\n", outpdimm->trfc_ps);
586 debug("trrd_ps = %u\n", outpdimm->trrd_ps);
588 debug("twr_ps = %u\n", outpdimm->twr_ps);
589 debug("trc_ps = %u\n", outpdimm->trc_ps);