1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2011, Marvell Semiconductor Inc.
6 * Back ported to the 8xx platform (from the 8260 platform) by
18 #include <asm/cache.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/printk.h>
24 #include <power/regulator.h>
26 static void sdhci_reset(struct sdhci_host *host, u8 mask)
28 unsigned long timeout;
32 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
33 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
35 log_warning("Reset %#x never completed\n", mask);
43 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
46 if (cmd->resp_type & MMC_RSP_136) {
47 /* CRC is stripped so we need to do some shifting. */
48 for (i = 0; i < 4; i++) {
49 cmd->response[i] = sdhci_readl(host,
50 SDHCI_RESPONSE + (3-i)*4) << 8;
52 cmd->response[i] |= sdhci_readb(host,
53 SDHCI_RESPONSE + (3-i)*4-1);
56 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
60 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
64 for (i = 0; i < data->blocksize; i += 4) {
65 offs = data->dest + i;
66 if (data->flags == MMC_DATA_READ)
67 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
69 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
73 #if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
74 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
75 int *is_aligned, int trans_bytes)
81 if (data->flags == MMC_DATA_READ)
84 buf = (void *)data->src;
86 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
87 ctrl &= ~SDHCI_CTRL_DMA_MASK;
88 if (host->flags & USE_ADMA64)
89 ctrl |= SDHCI_CTRL_ADMA64;
90 else if (host->flags & USE_ADMA)
91 ctrl |= SDHCI_CTRL_ADMA32;
92 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
94 if (host->flags & USE_SDMA &&
95 (host->force_align_buffer ||
96 (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
97 ((unsigned long)buf & 0x7) != 0x0))) {
99 if (data->flags != MMC_DATA_READ)
100 memcpy(host->align_buffer, buf, trans_bytes);
101 buf = host->align_buffer;
104 host->start_addr = dma_map_single(buf, trans_bytes,
105 mmc_get_dma_dir(data));
107 if (host->flags & USE_SDMA) {
108 dma_addr = dev_phys_to_bus(mmc_to_dev(host->mmc), host->start_addr);
109 sdhci_writel(host, dma_addr, SDHCI_DMA_ADDRESS);
111 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
112 else if (host->flags & (USE_ADMA | USE_ADMA64)) {
113 sdhci_prepare_adma_table(host, host->adma_desc_table, data,
116 sdhci_writel(host, lower_32_bits(host->adma_addr),
118 if (host->flags & USE_ADMA64)
119 sdhci_writel(host, upper_32_bits(host->adma_addr),
120 SDHCI_ADMA_ADDRESS_HI);
125 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
126 int *is_aligned, int trans_bytes)
129 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
131 dma_addr_t start_addr = host->start_addr;
132 unsigned int stat, rdy, mask, timeout, block = 0;
133 bool transfer_done = false;
136 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
137 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
139 stat = sdhci_readl(host, SDHCI_INT_STATUS);
140 if (stat & SDHCI_INT_ERROR) {
141 log_debug("Error detected in status(%#x)!\n", stat);
144 if (!transfer_done && (stat & rdy)) {
145 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
147 sdhci_writel(host, rdy, SDHCI_INT_STATUS);
148 sdhci_transfer_pio(host, data);
149 data->dest += data->blocksize;
150 if (++block >= data->blocks) {
151 /* Keep looping until the SDHCI_INT_DATA_END is
152 * cleared, even if we finished sending all the
155 transfer_done = true;
159 if ((host->flags & USE_DMA) && !transfer_done &&
160 (stat & SDHCI_INT_DMA_END)) {
161 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
162 if (host->flags & USE_SDMA) {
164 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
165 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
166 start_addr = dev_phys_to_bus(mmc_to_dev(host->mmc),
168 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
174 log_err("Transfer data timeout\n");
177 } while (!(stat & SDHCI_INT_DATA_END));
179 #if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
180 dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
181 mmc_get_dma_dir(data));
188 * No command will be sent by driver if card is busy, so driver must wait
189 * for card ready state.
190 * Every time when card is busy after timeout then (last) timeout value will be
191 * increased twice but only if it doesn't exceed global defined maximum.
192 * Each function call will use last timeout value.
194 #define SDHCI_CMD_MAX_TIMEOUT 3200
195 #define SDHCI_CMD_DEFAULT_TIMEOUT 100
196 #define SDHCI_READ_STATUS_TIMEOUT 1000
199 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
200 struct mmc_data *data)
202 struct mmc *mmc = mmc_get_mmc_dev(dev);
205 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
206 struct mmc_data *data)
209 struct sdhci_host *host = mmc->priv;
210 unsigned int stat = 0;
212 int trans_bytes = 0, is_aligned = 1;
213 u32 mask, flags, mode = 0;
214 unsigned int time = 0;
215 int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
216 ulong start = get_timer(0);
218 host->start_addr = 0;
219 /* Timeout unit - ms */
220 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
222 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
224 /* We shouldn't wait for data inihibit for stop commands, even
225 though they might use busy signaling */
226 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
227 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
228 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
229 mask &= ~SDHCI_DATA_INHIBIT;
231 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
232 if (time >= cmd_timeout) {
233 log_warning("mmc%d busy ", mmc_dev);
234 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
235 cmd_timeout += cmd_timeout;
236 log_warning("timeout increasing to: %u ms\n",
239 log_warning("timeout\n");
247 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
249 mask = SDHCI_INT_RESPONSE;
250 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
251 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
252 mask = SDHCI_INT_DATA_AVAIL;
254 if (!(cmd->resp_type & MMC_RSP_PRESENT))
255 flags = SDHCI_CMD_RESP_NONE;
256 else if (cmd->resp_type & MMC_RSP_136)
257 flags = SDHCI_CMD_RESP_LONG;
258 else if (cmd->resp_type & MMC_RSP_BUSY) {
259 flags = SDHCI_CMD_RESP_SHORT_BUSY;
260 mask |= SDHCI_INT_DATA_END;
262 flags = SDHCI_CMD_RESP_SHORT;
264 if (cmd->resp_type & MMC_RSP_CRC)
265 flags |= SDHCI_CMD_CRC;
266 if (cmd->resp_type & MMC_RSP_OPCODE)
267 flags |= SDHCI_CMD_INDEX;
268 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
269 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
270 flags |= SDHCI_CMD_DATA;
272 /* Set Transfer mode regarding to data flag */
274 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
276 if (!(host->quirks & SDHCI_QUIRK_SUPPORT_SINGLE))
277 mode = SDHCI_TRNS_BLK_CNT_EN;
278 trans_bytes = data->blocks * data->blocksize;
279 if (data->blocks > 1)
280 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_BLK_CNT_EN;
282 if (data->flags == MMC_DATA_READ)
283 mode |= SDHCI_TRNS_READ;
285 if (host->flags & USE_DMA) {
286 mode |= SDHCI_TRNS_DMA;
287 sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
290 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
293 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
294 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
295 } else if (cmd->resp_type & MMC_RSP_BUSY) {
296 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
299 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
300 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
301 start = get_timer(0);
303 stat = sdhci_readl(host, SDHCI_INT_STATUS);
304 if (stat & SDHCI_INT_ERROR)
307 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B &&
308 cmd->resp_type & MMC_RSP_BUSY && !data) {
310 sdhci_readl(host, SDHCI_PRESENT_STATE);
312 if (!(state & SDHCI_DAT_ACTIVE))
316 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
317 log_warning("Timeout for status update: %08x %08x\n",
321 } while ((stat & mask) != mask);
323 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
324 sdhci_cmd_done(host, cmd);
325 sdhci_writel(host, mask, SDHCI_INT_STATUS);
330 ret = sdhci_transfer_data(host, data);
332 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
335 stat = sdhci_readl(host, SDHCI_INT_STATUS);
336 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
338 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
339 !is_aligned && (data->flags == MMC_DATA_READ))
340 memcpy(data->dest, host->align_buffer, trans_bytes);
344 sdhci_reset(host, SDHCI_RESET_CMD);
345 sdhci_reset(host, SDHCI_RESET_DATA);
346 if (stat & SDHCI_INT_TIMEOUT)
352 #if defined(CONFIG_DM_MMC) && CONFIG_IS_ENABLED(MMC_SUPPORTS_TUNING)
353 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
356 struct mmc *mmc = mmc_get_mmc_dev(dev);
357 struct sdhci_host *host = mmc->priv;
359 log_debug("sdhci tuning\n");
361 if (host->ops && host->ops->platform_execute_tuning) {
362 err = host->ops->platform_execute_tuning(mmc, opcode);
370 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
372 struct sdhci_host *host = mmc->priv;
373 unsigned int div, clk = 0, timeout;
378 while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
379 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
381 log_err("Timeout waiting for cmd & data inhibit\n");
389 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
394 if (host->ops && host->ops->set_delay) {
395 ret = host->ops->set_delay(host);
397 log_err("Error while setting tap delay\n");
402 if (host->ops && host->ops->config_dll) {
403 ret = host->ops->config_dll(host, clock, false);
405 log_err("Error configuring dll\n");
410 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
412 * Check if the Host Controller supports Programmable Clock
416 for (div = 1; div <= 1024; div++) {
417 if ((host->max_clk / div) <= clock)
422 * Set Programmable Clock Mode in the Clock
425 clk = SDHCI_PROG_CLOCK_MODE;
428 /* Version 3.00 divisors must be a multiple of 2. */
429 if (host->max_clk <= clock) {
433 div < SDHCI_MAX_DIV_SPEC_300;
435 if ((host->max_clk / div) <= clock)
442 /* Version 2.00 divisors must be a power of 2. */
443 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
444 if ((host->max_clk / div) <= clock)
450 if (host->ops && host->ops->set_clock)
451 host->ops->set_clock(host, div);
453 if (host->ops && host->ops->config_dll) {
454 ret = host->ops->config_dll(host, clock, true);
456 log_err("Error while configuring dll\n");
461 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
462 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
463 << SDHCI_DIVIDER_HI_SHIFT;
464 clk |= SDHCI_CLOCK_INT_EN;
465 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
469 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
470 & SDHCI_CLOCK_INT_STABLE)) {
472 log_err("Internal clock never stabilised.\n");
479 clk |= SDHCI_CLOCK_CARD_EN;
480 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
484 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
488 if (power != (unsigned short)-1) {
489 switch (1 << power) {
490 case MMC_VDD_165_195:
491 pwr = SDHCI_POWER_180;
495 pwr = SDHCI_POWER_300;
499 pwr = SDHCI_POWER_330;
505 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
509 pwr |= SDHCI_POWER_ON;
511 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
514 void sdhci_set_uhs_timing(struct sdhci_host *host)
516 struct mmc *mmc = host->mmc;
519 reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
520 reg &= ~SDHCI_CTRL_UHS_MASK;
522 switch (mmc->selected_mode) {
525 reg |= SDHCI_CTRL_UHS_SDR25;
529 reg |= SDHCI_CTRL_UHS_SDR50;
533 reg |= SDHCI_CTRL_UHS_DDR50;
537 reg |= SDHCI_CTRL_UHS_SDR104;
541 reg |= SDHCI_CTRL_HS400;
544 reg |= SDHCI_CTRL_UHS_SDR12;
547 sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
550 static void sdhci_set_voltage(struct sdhci_host *host)
552 if (IS_ENABLED(CONFIG_MMC_IO_VOLTAGE)) {
553 struct mmc *mmc = (struct mmc *)host->mmc;
556 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
558 switch (mmc->signal_voltage) {
559 case MMC_SIGNAL_VOLTAGE_330:
560 #if CONFIG_IS_ENABLED(DM_REGULATOR)
561 if (mmc->vqmmc_supply) {
562 if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
563 pr_err("failed to disable vqmmc-supply\n");
567 if (regulator_set_value(mmc->vqmmc_supply, 3300000)) {
568 pr_err("failed to set vqmmc-voltage to 3.3V\n");
572 if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
573 pr_err("failed to enable vqmmc-supply\n");
579 ctrl &= ~SDHCI_CTRL_VDD_180;
580 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
586 /* 3.3V regulator output should be stable within 5 ms */
588 if (ctrl & SDHCI_CTRL_VDD_180) {
589 pr_err("3.3V regulator output did not become stable\n");
595 case MMC_SIGNAL_VOLTAGE_180:
596 #if CONFIG_IS_ENABLED(DM_REGULATOR)
597 if (mmc->vqmmc_supply) {
598 if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
599 pr_err("failed to disable vqmmc-supply\n");
603 if (regulator_set_value(mmc->vqmmc_supply, 1800000)) {
604 pr_err("failed to set vqmmc-voltage to 1.8V\n");
608 if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
609 pr_err("failed to enable vqmmc-supply\n");
615 ctrl |= SDHCI_CTRL_VDD_180;
616 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
622 /* 1.8V regulator output has to be stable within 5 ms */
624 if (!(ctrl & SDHCI_CTRL_VDD_180)) {
625 pr_err("1.8V regulator output did not become stable\n");
632 /* No signal voltage switch required */
638 void sdhci_set_control_reg(struct sdhci_host *host)
640 sdhci_set_voltage(host);
641 sdhci_set_uhs_timing(host);
645 static int sdhci_set_ios(struct udevice *dev)
647 struct mmc *mmc = mmc_get_mmc_dev(dev);
649 static int sdhci_set_ios(struct mmc *mmc)
653 struct sdhci_host *host = mmc->priv;
654 bool no_hispd_bit = false;
656 if (host->ops && host->ops->set_control_reg)
657 host->ops->set_control_reg(host);
659 if (mmc->clock != host->clock)
660 sdhci_set_clock(mmc, mmc->clock);
662 if (mmc->clk_disable)
663 sdhci_set_clock(mmc, 0);
666 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
667 if (mmc->bus_width == 8) {
668 ctrl &= ~SDHCI_CTRL_4BITBUS;
669 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
670 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
671 ctrl |= SDHCI_CTRL_8BITBUS;
673 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
674 (host->quirks & SDHCI_QUIRK_USE_WIDE8))
675 ctrl &= ~SDHCI_CTRL_8BITBUS;
676 if (mmc->bus_width == 4)
677 ctrl |= SDHCI_CTRL_4BITBUS;
679 ctrl &= ~SDHCI_CTRL_4BITBUS;
682 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
683 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
684 ctrl &= ~SDHCI_CTRL_HISPD;
689 if (mmc->selected_mode == MMC_HS ||
690 mmc->selected_mode == SD_HS ||
691 mmc->selected_mode == MMC_HS_52 ||
692 mmc->selected_mode == MMC_DDR_52 ||
693 mmc->selected_mode == MMC_HS_200 ||
694 mmc->selected_mode == MMC_HS_400 ||
695 mmc->selected_mode == MMC_HS_400_ES ||
696 mmc->selected_mode == UHS_SDR25 ||
697 mmc->selected_mode == UHS_SDR50 ||
698 mmc->selected_mode == UHS_SDR104 ||
699 mmc->selected_mode == UHS_DDR50)
700 ctrl |= SDHCI_CTRL_HISPD;
702 ctrl &= ~SDHCI_CTRL_HISPD;
705 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
707 /* If available, call the driver specific "post" set_ios() function */
708 if (host->ops && host->ops->set_ios_post)
709 return host->ops->set_ios_post(host);
714 static int sdhci_init(struct mmc *mmc)
716 struct sdhci_host *host = mmc->priv;
717 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
718 struct udevice *dev = mmc->dev;
720 gpio_request_by_name(dev, "cd-gpios", 0,
721 &host->cd_gpio, GPIOD_IS_IN);
724 sdhci_reset(host, SDHCI_RESET_ALL);
726 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
727 host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
729 * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
732 host->force_align_buffer = true;
734 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
735 host->align_buffer = memalign(8, 512 * 1024);
736 if (!host->align_buffer) {
737 log_err("Aligned buffer alloc failed\n");
743 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
745 if (host->ops && host->ops->get_cd)
746 host->ops->get_cd(host);
748 /* Enable only interrupts served by the SD controller */
749 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
751 /* Mask all sdhci interrupt sources */
752 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
758 int sdhci_probe(struct udevice *dev)
760 struct mmc *mmc = mmc_get_mmc_dev(dev);
762 return sdhci_init(mmc);
765 static int sdhci_deferred_probe(struct udevice *dev)
768 struct mmc *mmc = mmc_get_mmc_dev(dev);
769 struct sdhci_host *host = mmc->priv;
771 if (host->ops && host->ops->deferred_probe) {
772 err = host->ops->deferred_probe(host);
779 static int sdhci_get_cd(struct udevice *dev)
781 struct mmc *mmc = mmc_get_mmc_dev(dev);
782 struct sdhci_host *host = mmc->priv;
785 /* If nonremovable, assume that the card is always present. */
786 if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
788 /* If polling, assume that the card is always present. */
789 if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
792 #if CONFIG_IS_ENABLED(DM_GPIO)
793 value = dm_gpio_get_value(&host->cd_gpio);
795 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
801 value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
803 if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
809 static int sdhci_wait_dat0(struct udevice *dev, int state,
813 struct mmc *mmc = mmc_get_mmc_dev(dev);
814 struct sdhci_host *host = mmc->priv;
815 unsigned long timeout = timer_get_us() + timeout_us;
817 // readx_poll_timeout is unsuitable because sdhci_readl accepts
820 tmp = sdhci_readl(host, SDHCI_PRESENT_STATE);
821 if (!!(tmp & SDHCI_DATA_0_LVL_MASK) == !!state)
823 } while (!timeout_us || !time_after(timer_get_us(), timeout));
828 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
829 static int sdhci_set_enhanced_strobe(struct udevice *dev)
831 struct mmc *mmc = mmc_get_mmc_dev(dev);
832 struct sdhci_host *host = mmc->priv;
834 if (host->ops && host->ops->set_enhanced_strobe)
835 return host->ops->set_enhanced_strobe(host);
841 const struct dm_mmc_ops sdhci_ops = {
842 .send_cmd = sdhci_send_command,
843 .set_ios = sdhci_set_ios,
844 .get_cd = sdhci_get_cd,
845 .deferred_probe = sdhci_deferred_probe,
846 #if CONFIG_IS_ENABLED(MMC_SUPPORTS_TUNING)
847 .execute_tuning = sdhci_execute_tuning,
849 .wait_dat0 = sdhci_wait_dat0,
850 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
851 .set_enhanced_strobe = sdhci_set_enhanced_strobe,
855 static const struct mmc_ops sdhci_ops = {
856 .send_cmd = sdhci_send_command,
857 .set_ios = sdhci_set_ios,
862 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
863 u32 f_max, u32 f_min)
865 u32 caps, caps_1 = 0;
866 #if CONFIG_IS_ENABLED(DM_MMC)
867 u64 dt_caps, dt_caps_mask;
869 dt_caps_mask = dev_read_u64_default(host->mmc->dev,
870 "sdhci-caps-mask", 0);
871 dt_caps = dev_read_u64_default(host->mmc->dev,
873 caps = ~lower_32_bits(dt_caps_mask) &
874 sdhci_readl(host, SDHCI_CAPABILITIES);
875 caps |= lower_32_bits(dt_caps);
877 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
879 log_debug("caps: %#x\n", caps);
881 #if CONFIG_IS_ENABLED(MMC_SDHCI_SDMA)
882 if ((caps & SDHCI_CAN_DO_SDMA)) {
883 host->flags |= USE_SDMA;
885 log_debug("Controller doesn't support SDMA\n");
888 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
889 if (!(caps & SDHCI_CAN_DO_ADMA2)) {
890 log_err("Controller doesn't support ADMA\n");
893 if (!host->adma_desc_table) {
894 host->adma_desc_table = sdhci_adma_init();
895 host->adma_addr = virt_to_phys(host->adma_desc_table);
898 if (IS_ENABLED(CONFIG_MMC_SDHCI_ADMA_64BIT))
899 host->flags |= USE_ADMA64;
901 host->flags |= USE_ADMA;
903 if (host->quirks & SDHCI_QUIRK_REG32_RW)
905 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
907 host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
909 cfg->name = host->name;
910 #ifndef CONFIG_DM_MMC
911 cfg->ops = &sdhci_ops;
914 /* Check whether the clock multiplier is supported or not */
915 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
916 #if CONFIG_IS_ENABLED(DM_MMC)
917 caps_1 = ~upper_32_bits(dt_caps_mask) &
918 sdhci_readl(host, SDHCI_CAPABILITIES_1);
919 caps_1 |= upper_32_bits(dt_caps);
921 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
923 log_debug("caps_1: %#x\n", caps_1);
924 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
925 SDHCI_CLOCK_MUL_SHIFT;
928 * In case the value in Clock Multiplier is 0, then programmable
929 * clock mode is not supported, otherwise the actual clock
930 * multiplier is one more than the value of Clock Multiplier
931 * in the Capabilities Register.
937 if (host->max_clk == 0) {
938 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
939 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
940 SDHCI_CLOCK_BASE_SHIFT;
942 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
943 SDHCI_CLOCK_BASE_SHIFT;
944 host->max_clk *= 1000000;
946 host->max_clk *= host->clk_mul;
948 if (host->max_clk == 0) {
949 log_err("Hardware doesn't specify base clock frequency\n");
952 if (f_max && (f_max < host->max_clk))
955 cfg->f_max = host->max_clk;
959 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
960 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
962 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
965 if (caps & SDHCI_CAN_VDD_330)
966 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
967 if (caps & SDHCI_CAN_VDD_300)
968 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
969 if (caps & SDHCI_CAN_VDD_180)
970 cfg->voltages |= MMC_VDD_165_195;
972 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
973 cfg->voltages |= host->voltages;
975 if (caps & SDHCI_CAN_DO_HISPD)
976 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
978 cfg->host_caps |= MMC_MODE_4BIT;
980 /* Since Host Controller Version3.0 */
981 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
982 if (!(caps & SDHCI_CAN_DO_8BIT))
983 cfg->host_caps &= ~MMC_MODE_8BIT;
986 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
987 cfg->host_caps &= ~MMC_MODE_HS;
988 cfg->host_caps &= ~MMC_MODE_HS_52MHz;
991 if (!(cfg->voltages & MMC_VDD_165_195) ||
992 (host->quirks & SDHCI_QUIRK_NO_1_8_V))
993 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
994 SDHCI_SUPPORT_DDR50);
996 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
997 SDHCI_SUPPORT_DDR50))
998 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
1000 if (caps_1 & SDHCI_SUPPORT_SDR104) {
1001 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
1003 * SD3.0: SDR104 is supported so (for eMMC) the caps2
1004 * field can be promoted to support HS200.
1006 cfg->host_caps |= MMC_CAP(MMC_HS_200);
1007 } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
1008 cfg->host_caps |= MMC_CAP(UHS_SDR50);
1011 if ((host->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_HS400) &&
1012 (caps_1 & SDHCI_SUPPORT_HS400))
1013 cfg->host_caps |= MMC_CAP(MMC_HS_400);
1015 if (caps_1 & SDHCI_SUPPORT_DDR50)
1016 cfg->host_caps |= MMC_CAP(UHS_DDR50);
1018 if (host->host_caps)
1019 cfg->host_caps |= host->host_caps;
1021 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
1027 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
1029 return mmc_bind(dev, mmc, cfg);
1032 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
1036 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
1040 host->mmc = mmc_create(&host->cfg, host);
1041 if (host->mmc == NULL) {
1042 log_err("mmc create fail\n");