1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSDHC controller driver.
5 * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6 * Copyright (c) 2009 MontaVista Software, Inc.
12 #include <linux/err.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
27 #define VENDOR_V_22 0x12
28 #define VENDOR_V_23 0x13
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
32 struct esdhc_clk_fixup {
33 const unsigned int sd_dflt_max_clk;
34 const unsigned int max_clk[MMC_TIMING_NUM];
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 .sd_dflt_max_clk = 25000000,
39 .max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 .max_clk[MMC_TIMING_SD_HS] = 46500000,
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 .sd_dflt_max_clk = 25000000,
45 .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 .sd_dflt_max_clk = 25000000,
51 .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 .sd_dflt_max_clk = 20000000,
57 .max_clk[MMC_TIMING_LEGACY] = 20000000,
58 .max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 .max_clk[MMC_TIMING_SD_HS] = 40000000,
62 static const struct of_device_id sdhci_esdhc_of_match[] = {
63 { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
67 { .compatible = "fsl,mpc8379-esdhc" },
68 { .compatible = "fsl,mpc8536-esdhc" },
69 { .compatible = "fsl,esdhc" },
72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
77 bool quirk_incorrect_hostver;
78 bool quirk_limited_clk_division;
79 bool quirk_unreliable_pulse_detection;
80 bool quirk_fixup_tuning;
81 bool quirk_ignore_data_inhibit;
82 unsigned int peripheral_clock;
83 const struct esdhc_clk_fixup *clk_fixup;
88 * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
89 * to make it compatible with SD spec.
91 * @host: pointer to sdhci_host
92 * @spec_reg: SD spec register address
93 * @value: 32bit eSDHC register value on spec_reg address
95 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
96 * registers are 32 bits. There are differences in register size, register
97 * address, register function, bit position and function between eSDHC spec
100 * Return a fixed up register value
102 static u32 esdhc_readl_fixup(struct sdhci_host *host,
103 int spec_reg, u32 value)
105 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
106 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
110 * The bit of ADMA flag in eSDHC is not compatible with standard
111 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
112 * supported by eSDHC.
113 * And for many FSL eSDHC controller, the reset value of field
114 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
115 * only these vendor version is greater than 2.2/0x12 support ADMA.
117 if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
118 if (esdhc->vendor_ver > VENDOR_V_22) {
119 ret = value | SDHCI_CAN_DO_ADMA2;
124 * The DAT[3:0] line signal levels and the CMD line signal level are
125 * not compatible with standard SDHC register. The line signal levels
126 * DAT[7:0] are at bits 31:24 and the command line signal level is at
127 * bit 23. All other bits are the same as in the standard SDHC
130 if (spec_reg == SDHCI_PRESENT_STATE) {
131 ret = value & 0x000fffff;
132 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
133 ret |= (value << 1) & SDHCI_CMD_LVL;
138 * DTS properties of mmc host are used to enable each speed mode
139 * according to soc and board capability. So clean up
140 * SDR50/SDR104/DDR50 support bits here.
142 if (spec_reg == SDHCI_CAPABILITIES_1) {
143 ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
144 SDHCI_SUPPORT_DDR50);
149 * Some controllers have unreliable Data Line Active
150 * bit for commands with busy signal. This affects
151 * Command Inhibit (data) bit. Just ignore it since
152 * MMC core driver has already polled card status
153 * with CMD13 after any command with busy siganl.
155 if ((spec_reg == SDHCI_PRESENT_STATE) &&
156 (esdhc->quirk_ignore_data_inhibit == true)) {
157 ret = value & ~SDHCI_DATA_INHIBIT;
165 static u16 esdhc_readw_fixup(struct sdhci_host *host,
166 int spec_reg, u32 value)
168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
169 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
171 int shift = (spec_reg & 0x2) * 8;
173 if (spec_reg == SDHCI_HOST_VERSION)
174 ret = value & 0xffff;
176 ret = (value >> shift) & 0xffff;
177 /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
178 * vendor version and spec version information.
180 if ((spec_reg == SDHCI_HOST_VERSION) &&
181 (esdhc->quirk_incorrect_hostver))
182 ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
186 static u8 esdhc_readb_fixup(struct sdhci_host *host,
187 int spec_reg, u32 value)
191 int shift = (spec_reg & 0x3) * 8;
193 ret = (value >> shift) & 0xff;
196 * "DMA select" locates at offset 0x28 in SD specification, but on
197 * P5020 or P3041, it locates at 0x29.
199 if (spec_reg == SDHCI_HOST_CONTROL) {
200 /* DMA select is 22,23 bits in Protocol Control Register */
201 dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
202 /* fixup the result */
203 ret &= ~SDHCI_CTRL_DMA_MASK;
210 * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
211 * written into eSDHC register.
213 * @host: pointer to sdhci_host
214 * @spec_reg: SD spec register address
215 * @value: 8/16/32bit SD spec register value that would be written
216 * @old_value: 32bit eSDHC register value on spec_reg address
218 * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
219 * registers are 32 bits. There are differences in register size, register
220 * address, register function, bit position and function between eSDHC spec
223 * Return a fixed up register value
225 static u32 esdhc_writel_fixup(struct sdhci_host *host,
226 int spec_reg, u32 value, u32 old_value)
231 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
232 * when SYSCTL[RSTD] is set for some special operations.
233 * No any impact on other operation.
235 if (spec_reg == SDHCI_INT_ENABLE)
236 ret = value | SDHCI_INT_BLK_GAP;
243 static u32 esdhc_writew_fixup(struct sdhci_host *host,
244 int spec_reg, u16 value, u32 old_value)
246 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
247 int shift = (spec_reg & 0x2) * 8;
251 case SDHCI_TRANSFER_MODE:
253 * Postpone this write, we must do it together with a
254 * command write that is down below. Return old value.
256 pltfm_host->xfer_mode_shadow = value;
259 ret = (value << 16) | pltfm_host->xfer_mode_shadow;
263 ret = old_value & (~(0xffff << shift));
264 ret |= (value << shift);
266 if (spec_reg == SDHCI_BLOCK_SIZE) {
268 * Two last DMA bits are reserved, and first one is used for
269 * non-standard blksz of 4096 bytes that we don't support
270 * yet. So clear the DMA boundary bits.
272 ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
277 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
278 int spec_reg, u8 value, u32 old_value)
283 int shift = (spec_reg & 0x3) * 8;
286 * eSDHC doesn't have a standard power control register, so we do
287 * nothing here to avoid incorrect operation.
289 if (spec_reg == SDHCI_POWER_CONTROL)
292 * "DMA select" location is offset 0x28 in SD specification, but on
293 * P5020 or P3041, it's located at 0x29.
295 if (spec_reg == SDHCI_HOST_CONTROL) {
297 * If host control register is not standard, exit
300 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
303 /* DMA select is 22,23 bits in Protocol Control Register */
304 dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
305 ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
306 tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
307 (old_value & SDHCI_CTRL_DMA_MASK);
308 ret = (ret & (~0xff)) | tmp;
310 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
311 ret &= ~ESDHC_HOST_CONTROL_RES;
315 ret = (old_value & (~(0xff << shift))) | (value << shift);
319 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
324 if (reg == SDHCI_CAPABILITIES_1)
325 value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
327 value = ioread32be(host->ioaddr + reg);
329 ret = esdhc_readl_fixup(host, reg, value);
334 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
339 if (reg == SDHCI_CAPABILITIES_1)
340 value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
342 value = ioread32(host->ioaddr + reg);
344 ret = esdhc_readl_fixup(host, reg, value);
349 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
353 int base = reg & ~0x3;
355 value = ioread32be(host->ioaddr + base);
356 ret = esdhc_readw_fixup(host, reg, value);
360 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
364 int base = reg & ~0x3;
366 value = ioread32(host->ioaddr + base);
367 ret = esdhc_readw_fixup(host, reg, value);
371 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
375 int base = reg & ~0x3;
377 value = ioread32be(host->ioaddr + base);
378 ret = esdhc_readb_fixup(host, reg, value);
382 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
386 int base = reg & ~0x3;
388 value = ioread32(host->ioaddr + base);
389 ret = esdhc_readb_fixup(host, reg, value);
393 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
397 value = esdhc_writel_fixup(host, reg, val, 0);
398 iowrite32be(value, host->ioaddr + reg);
401 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
405 value = esdhc_writel_fixup(host, reg, val, 0);
406 iowrite32(value, host->ioaddr + reg);
409 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
411 int base = reg & ~0x3;
415 value = ioread32be(host->ioaddr + base);
416 ret = esdhc_writew_fixup(host, reg, val, value);
417 if (reg != SDHCI_TRANSFER_MODE)
418 iowrite32be(ret, host->ioaddr + base);
421 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
423 int base = reg & ~0x3;
427 value = ioread32(host->ioaddr + base);
428 ret = esdhc_writew_fixup(host, reg, val, value);
429 if (reg != SDHCI_TRANSFER_MODE)
430 iowrite32(ret, host->ioaddr + base);
433 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
435 int base = reg & ~0x3;
439 value = ioread32be(host->ioaddr + base);
440 ret = esdhc_writeb_fixup(host, reg, val, value);
441 iowrite32be(ret, host->ioaddr + base);
444 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
446 int base = reg & ~0x3;
450 value = ioread32(host->ioaddr + base);
451 ret = esdhc_writeb_fixup(host, reg, val, value);
452 iowrite32(ret, host->ioaddr + base);
456 * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
457 * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
458 * and Block Gap Event(IRQSTAT[BGE]) are also set.
459 * For Continue, apply soft reset for data(SYSCTL[RSTD]);
460 * and re-issue the entire read transaction from beginning.
462 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
464 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
465 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
470 applicable = (intmask & SDHCI_INT_DATA_END) &&
471 (intmask & SDHCI_INT_BLK_GAP) &&
472 (esdhc->vendor_ver == VENDOR_V_23);
476 host->data->error = 0;
477 dmastart = sg_dma_address(host->data->sg);
478 dmanow = dmastart + host->data->bytes_xfered;
480 * Force update to the next DMA block boundary.
482 dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
483 SDHCI_DEFAULT_BOUNDARY_SIZE;
484 host->data->bytes_xfered = dmanow - dmastart;
485 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
488 static int esdhc_of_enable_dma(struct sdhci_host *host)
491 struct device *dev = mmc_dev(host->mmc);
493 if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
494 of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
495 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
497 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
499 if (of_dma_is_coherent(dev->of_node))
500 value |= ESDHC_DMA_SNOOP;
502 value &= ~ESDHC_DMA_SNOOP;
504 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
508 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
510 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
511 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
513 if (esdhc->peripheral_clock)
514 return esdhc->peripheral_clock;
516 return pltfm_host->clock;
519 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
521 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
522 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
525 if (esdhc->peripheral_clock)
526 clock = esdhc->peripheral_clock;
528 clock = pltfm_host->clock;
529 return clock / 256 / 16;
532 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
537 val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
540 val |= ESDHC_CLOCK_SDCLKEN;
542 val &= ~ESDHC_CLOCK_SDCLKEN;
544 sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
547 timeout = ktime_add_ms(ktime_get(), 20);
548 val = ESDHC_CLOCK_STABLE;
550 bool timedout = ktime_after(ktime_get(), timeout);
552 if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
555 pr_err("%s: Internal clock never stabilised.\n",
556 mmc_hostname(host->mmc));
563 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
565 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
566 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
574 host->mmc->actual_clock = 0;
577 esdhc_clock_enable(host, false);
581 /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
582 if (esdhc->vendor_ver < VENDOR_V_23)
585 if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
586 esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
587 fixup = esdhc->clk_fixup->sd_dflt_max_clk;
588 else if (esdhc->clk_fixup)
589 fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
591 if (fixup && clock > fixup)
594 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
595 temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
596 ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
597 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
599 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
602 while (host->max_clk / pre_div / div > clock && div < 16)
605 if (esdhc->quirk_limited_clk_division &&
606 clock == MMC_HS200_MAX_DTR &&
607 (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
608 host->flags & SDHCI_HS400_TUNING)) {
609 division = pre_div * div;
613 } else if (division <= 8) {
616 } else if (division <= 12) {
620 pr_warn("%s: using unsupported clock division.\n",
621 mmc_hostname(host->mmc));
625 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
626 clock, host->max_clk / pre_div / div);
627 host->mmc->actual_clock = host->max_clk / pre_div / div;
628 esdhc->div_ratio = pre_div * div;
632 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
633 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
634 | (div << ESDHC_DIVIDER_SHIFT)
635 | (pre_div << ESDHC_PREDIV_SHIFT));
636 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
638 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
639 clock == MMC_HS200_MAX_DTR) {
640 temp = sdhci_readl(host, ESDHC_TBCTL);
641 sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
642 temp = sdhci_readl(host, ESDHC_SDCLKCTL);
643 sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
644 esdhc_clock_enable(host, true);
646 temp = sdhci_readl(host, ESDHC_DLLCFG0);
647 temp |= ESDHC_DLL_ENABLE;
648 if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
649 temp |= ESDHC_DLL_FREQ_SEL;
650 sdhci_writel(host, temp, ESDHC_DLLCFG0);
651 temp = sdhci_readl(host, ESDHC_TBCTL);
652 sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
654 esdhc_clock_enable(host, false);
655 temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
656 temp |= ESDHC_FLUSH_ASYNC_FIFO;
657 sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
661 timeout = ktime_add_ms(ktime_get(), 20);
663 bool timedout = ktime_after(ktime_get(), timeout);
665 if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
668 pr_err("%s: Internal clock never stabilised.\n",
669 mmc_hostname(host->mmc));
675 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
676 temp |= ESDHC_CLOCK_SDCLKEN;
677 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
680 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
684 ctrl = sdhci_readl(host, ESDHC_PROCTL);
685 ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
687 case MMC_BUS_WIDTH_8:
688 ctrl |= ESDHC_CTRL_8BITBUS;
691 case MMC_BUS_WIDTH_4:
692 ctrl |= ESDHC_CTRL_4BITBUS;
699 sdhci_writel(host, ctrl, ESDHC_PROCTL);
702 static void esdhc_reset(struct sdhci_host *host, u8 mask)
704 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
705 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
708 sdhci_reset(host, mask);
710 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
711 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
713 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
716 if (mask & SDHCI_RESET_ALL) {
717 val = sdhci_readl(host, ESDHC_TBCTL);
719 sdhci_writel(host, val, ESDHC_TBCTL);
721 if (esdhc->quirk_unreliable_pulse_detection) {
722 val = sdhci_readl(host, ESDHC_DLLCFG1);
723 val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
724 sdhci_writel(host, val, ESDHC_DLLCFG1);
729 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
730 * configuration and status registers for the device. There is a
731 * SDHC IO VSEL control register on SCFG for some platforms. It's
732 * used to support SDHC IO voltage switching.
734 static const struct of_device_id scfg_device_ids[] = {
735 { .compatible = "fsl,t1040-scfg", },
736 { .compatible = "fsl,ls1012a-scfg", },
737 { .compatible = "fsl,ls1046a-scfg", },
741 /* SDHC IO VSEL control register definition */
742 #define SCFG_SDHCIOVSELCR 0x408
743 #define SDHCIOVSELCR_TGLEN 0x80000000
744 #define SDHCIOVSELCR_VSELVAL 0x60000000
745 #define SDHCIOVSELCR_SDHC_VS 0x00000001
747 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
750 struct sdhci_host *host = mmc_priv(mmc);
751 struct device_node *scfg_node;
752 void __iomem *scfg_base = NULL;
757 * Signal Voltage Switching is only applicable for Host Controllers
760 if (host->version < SDHCI_SPEC_300)
763 val = sdhci_readl(host, ESDHC_PROCTL);
765 switch (ios->signal_voltage) {
766 case MMC_SIGNAL_VOLTAGE_330:
767 val &= ~ESDHC_VOLT_SEL;
768 sdhci_writel(host, val, ESDHC_PROCTL);
770 case MMC_SIGNAL_VOLTAGE_180:
771 scfg_node = of_find_matching_node(NULL, scfg_device_ids);
773 scfg_base = of_iomap(scfg_node, 0);
775 sdhciovselcr = SDHCIOVSELCR_TGLEN |
776 SDHCIOVSELCR_VSELVAL;
777 iowrite32be(sdhciovselcr,
778 scfg_base + SCFG_SDHCIOVSELCR);
780 val |= ESDHC_VOLT_SEL;
781 sdhci_writel(host, val, ESDHC_PROCTL);
784 sdhciovselcr = SDHCIOVSELCR_TGLEN |
785 SDHCIOVSELCR_SDHC_VS;
786 iowrite32be(sdhciovselcr,
787 scfg_base + SCFG_SDHCIOVSELCR);
790 val |= ESDHC_VOLT_SEL;
791 sdhci_writel(host, val, ESDHC_PROCTL);
799 static struct soc_device_attribute soc_fixup_tuning[] = {
800 { .family = "QorIQ T1040", .revision = "1.0", },
801 { .family = "QorIQ T2080", .revision = "1.0", },
802 { .family = "QorIQ T1023", .revision = "1.0", },
803 { .family = "QorIQ LS1021A", .revision = "1.0", },
804 { .family = "QorIQ LS1080A", .revision = "1.0", },
805 { .family = "QorIQ LS2080A", .revision = "1.0", },
806 { .family = "QorIQ LS1012A", .revision = "1.0", },
807 { .family = "QorIQ LS1043A", .revision = "1.*", },
808 { .family = "QorIQ LS1046A", .revision = "1.0", },
812 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
816 esdhc_clock_enable(host, false);
818 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
819 val |= ESDHC_FLUSH_ASYNC_FIFO;
820 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
822 val = sdhci_readl(host, ESDHC_TBCTL);
827 sdhci_writel(host, val, ESDHC_TBCTL);
829 esdhc_clock_enable(host, true);
832 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
834 struct sdhci_host *host = mmc_priv(mmc);
835 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
836 struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
842 /* For tuning mode, the sd clock divisor value
843 * must be larger than 3 according to reference manual.
845 clk = esdhc->peripheral_clock / 3;
846 if (host->clock > clk)
847 esdhc_of_set_clock(host, clk);
849 if (esdhc->quirk_limited_clk_division &&
850 host->flags & SDHCI_HS400_TUNING)
851 esdhc_of_set_clock(host, host->clock);
853 esdhc_tuning_block_enable(host, true);
855 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
856 ret = sdhci_execute_tuning(mmc, opcode);
859 val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
860 val |= ESDHC_FLW_CTL_BG;
861 sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
864 if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
866 /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
867 * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
869 val = sdhci_readl(host, ESDHC_TBPTR);
870 val = (val & ~((0x7f << 8) | 0x7f)) |
871 (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
872 sdhci_writel(host, val, ESDHC_TBPTR);
874 /* program the software tuning mode by setting
875 * TBCTL[TB_MODE]=2'h3
877 val = sdhci_readl(host, ESDHC_TBCTL);
879 sdhci_writel(host, val, ESDHC_TBCTL);
880 sdhci_execute_tuning(mmc, opcode);
885 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
888 if (timing == MMC_TIMING_MMC_HS400)
889 esdhc_tuning_block_enable(host, true);
891 sdhci_set_uhs_signaling(host, timing);
894 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
898 if (of_find_compatible_node(NULL, NULL,
899 "fsl,p2020-esdhc")) {
900 command = SDHCI_GET_CMD(sdhci_readw(host,
902 if (command == MMC_WRITE_MULTIPLE_BLOCK &&
903 sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
904 intmask & SDHCI_INT_DATA_END) {
905 intmask &= ~SDHCI_INT_DATA_END;
906 sdhci_writel(host, SDHCI_INT_DATA_END,
913 #ifdef CONFIG_PM_SLEEP
914 static u32 esdhc_proctl;
915 static int esdhc_of_suspend(struct device *dev)
917 struct sdhci_host *host = dev_get_drvdata(dev);
919 esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
921 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
922 mmc_retune_needed(host->mmc);
924 return sdhci_suspend_host(host);
927 static int esdhc_of_resume(struct device *dev)
929 struct sdhci_host *host = dev_get_drvdata(dev);
930 int ret = sdhci_resume_host(host);
933 /* Isn't this already done by sdhci_resume_host() ? --rmk */
934 esdhc_of_enable_dma(host);
935 sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
941 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
945 static const struct sdhci_ops sdhci_esdhc_be_ops = {
946 .read_l = esdhc_be_readl,
947 .read_w = esdhc_be_readw,
948 .read_b = esdhc_be_readb,
949 .write_l = esdhc_be_writel,
950 .write_w = esdhc_be_writew,
951 .write_b = esdhc_be_writeb,
952 .set_clock = esdhc_of_set_clock,
953 .enable_dma = esdhc_of_enable_dma,
954 .get_max_clock = esdhc_of_get_max_clock,
955 .get_min_clock = esdhc_of_get_min_clock,
956 .adma_workaround = esdhc_of_adma_workaround,
957 .set_bus_width = esdhc_pltfm_set_bus_width,
958 .reset = esdhc_reset,
959 .set_uhs_signaling = esdhc_set_uhs_signaling,
963 static const struct sdhci_ops sdhci_esdhc_le_ops = {
964 .read_l = esdhc_le_readl,
965 .read_w = esdhc_le_readw,
966 .read_b = esdhc_le_readb,
967 .write_l = esdhc_le_writel,
968 .write_w = esdhc_le_writew,
969 .write_b = esdhc_le_writeb,
970 .set_clock = esdhc_of_set_clock,
971 .enable_dma = esdhc_of_enable_dma,
972 .get_max_clock = esdhc_of_get_max_clock,
973 .get_min_clock = esdhc_of_get_min_clock,
974 .adma_workaround = esdhc_of_adma_workaround,
975 .set_bus_width = esdhc_pltfm_set_bus_width,
976 .reset = esdhc_reset,
977 .set_uhs_signaling = esdhc_set_uhs_signaling,
981 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
982 .quirks = ESDHC_DEFAULT_QUIRKS |
984 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
986 SDHCI_QUIRK_NO_CARD_NO_RESET |
987 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
988 .ops = &sdhci_esdhc_be_ops,
991 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
992 .quirks = ESDHC_DEFAULT_QUIRKS |
993 SDHCI_QUIRK_NO_CARD_NO_RESET |
994 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
995 .ops = &sdhci_esdhc_le_ops,
998 static struct soc_device_attribute soc_incorrect_hostver[] = {
999 { .family = "QorIQ T4240", .revision = "1.0", },
1000 { .family = "QorIQ T4240", .revision = "2.0", },
1004 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1005 { .family = "QorIQ LX2160A", .revision = "1.0", },
1006 { .family = "QorIQ LX2160A", .revision = "2.0", },
1007 { .family = "QorIQ LS1028A", .revision = "1.0", },
1011 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1012 { .family = "QorIQ LX2160A", .revision = "1.0", },
1016 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1018 const struct of_device_id *match;
1019 struct sdhci_pltfm_host *pltfm_host;
1020 struct sdhci_esdhc *esdhc;
1021 struct device_node *np;
1026 pltfm_host = sdhci_priv(host);
1027 esdhc = sdhci_pltfm_priv(pltfm_host);
1029 host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1030 esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1031 SDHCI_VENDOR_VER_SHIFT;
1032 esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1033 if (soc_device_match(soc_incorrect_hostver))
1034 esdhc->quirk_incorrect_hostver = true;
1036 esdhc->quirk_incorrect_hostver = false;
1038 if (soc_device_match(soc_fixup_sdhc_clkdivs))
1039 esdhc->quirk_limited_clk_division = true;
1041 esdhc->quirk_limited_clk_division = false;
1043 if (soc_device_match(soc_unreliable_pulse_detection))
1044 esdhc->quirk_unreliable_pulse_detection = true;
1046 esdhc->quirk_unreliable_pulse_detection = false;
1048 match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1050 esdhc->clk_fixup = match->data;
1051 np = pdev->dev.of_node;
1052 clk = of_clk_get(np, 0);
1055 * esdhc->peripheral_clock would be assigned with a value
1056 * which is eSDHC base clock when use periperal clock.
1057 * For some platforms, the clock value got by common clk
1058 * API is peripheral clock while the eSDHC base clock is
1059 * 1/2 peripheral clock.
1061 if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1062 of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
1063 esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1065 esdhc->peripheral_clock = clk_get_rate(clk);
1070 if (esdhc->peripheral_clock) {
1071 esdhc_clock_enable(host, false);
1072 val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1073 val |= ESDHC_PERIPHERAL_CLK_SEL;
1074 sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1075 esdhc_clock_enable(host, true);
1079 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1081 esdhc_tuning_block_enable(mmc_priv(mmc), false);
1085 static int sdhci_esdhc_probe(struct platform_device *pdev)
1087 struct sdhci_host *host;
1088 struct device_node *np;
1089 struct sdhci_pltfm_host *pltfm_host;
1090 struct sdhci_esdhc *esdhc;
1093 np = pdev->dev.of_node;
1095 if (of_property_read_bool(np, "little-endian"))
1096 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1097 sizeof(struct sdhci_esdhc));
1099 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1100 sizeof(struct sdhci_esdhc));
1103 return PTR_ERR(host);
1105 host->mmc_host_ops.start_signal_voltage_switch =
1106 esdhc_signal_voltage_switch;
1107 host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1108 host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1109 host->tuning_delay = 1;
1111 esdhc_init(pdev, host);
1113 sdhci_get_of_property(pdev);
1115 pltfm_host = sdhci_priv(host);
1116 esdhc = sdhci_pltfm_priv(pltfm_host);
1117 if (soc_device_match(soc_fixup_tuning))
1118 esdhc->quirk_fixup_tuning = true;
1120 esdhc->quirk_fixup_tuning = false;
1122 if (esdhc->vendor_ver == VENDOR_V_22)
1123 host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1125 if (esdhc->vendor_ver > VENDOR_V_22)
1126 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1128 if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1129 host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1130 host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1133 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1134 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1135 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1136 of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1137 of_device_is_compatible(np, "fsl,t1040-esdhc"))
1138 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1140 if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1141 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1143 esdhc->quirk_ignore_data_inhibit = false;
1144 if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1146 * Freescale messed up with P2020 as it has a non-standard
1147 * host control register
1149 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1150 esdhc->quirk_ignore_data_inhibit = true;
1153 /* call to generic mmc_of_parse to support additional capabilities */
1154 ret = mmc_of_parse(host->mmc);
1158 mmc_of_parse_voltage(np, &host->ocr_mask);
1160 ret = sdhci_add_host(host);
1166 sdhci_pltfm_free(pdev);
1170 static struct platform_driver sdhci_esdhc_driver = {
1172 .name = "sdhci-esdhc",
1173 .of_match_table = sdhci_esdhc_of_match,
1174 .pm = &esdhc_of_dev_pm_ops,
1176 .probe = sdhci_esdhc_probe,
1177 .remove = sdhci_pltfm_unregister,
1180 module_platform_driver(sdhci_esdhc_driver);
1182 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1185 MODULE_LICENSE("GPL v2");