1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2012 SAMSUNG Electronics
16 #include <power/regulator.h>
18 #define PAGE_SIZE 4096
20 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
22 unsigned long timeout = 1000;
25 dwmci_writel(host, DWMCI_CTRL, value);
28 ctrl = dwmci_readl(host, DWMCI_CTRL);
29 if (!(ctrl & DWMCI_RESET_ALL))
35 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
36 u32 desc0, u32 desc1, u32 desc2)
38 struct dwmci_idmac *desc = idmac;
43 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
46 static void dwmci_prepare_data(struct dwmci_host *host,
47 struct mmc_data *data,
48 struct dwmci_idmac *cur_idmac,
52 unsigned int i = 0, flags, cnt, blk_cnt;
53 ulong data_start, data_end;
56 blk_cnt = data->blocks;
58 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
60 /* Clear IDMAC interrupt */
61 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
63 data_start = (ulong)cur_idmac;
64 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
67 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
68 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
70 flags |= DWMCI_IDMAC_LD;
71 cnt = data->blocksize * blk_cnt;
73 cnt = data->blocksize * 8;
75 dwmci_set_idma_desc(cur_idmac, flags, cnt,
76 (ulong)bounce_buffer + (i * PAGE_SIZE));
85 data_end = (ulong)cur_idmac;
86 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
88 ctrl = dwmci_readl(host, DWMCI_CTRL);
89 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
90 dwmci_writel(host, DWMCI_CTRL, ctrl);
92 ctrl = dwmci_readl(host, DWMCI_BMOD);
93 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
94 dwmci_writel(host, DWMCI_BMOD, ctrl);
96 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
97 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
100 static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
104 *len = dwmci_readl(host, DWMCI_STATUS);
105 while (--timeout && (*len & bit)) {
107 *len = dwmci_readl(host, DWMCI_STATUS);
111 debug("%s: FIFO underflow timeout\n", __func__);
118 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
120 unsigned int timeout;
122 timeout = size * 8 * 1000; /* counting in bits and msec */
123 timeout *= 2; /* wait twice as long */
124 timeout /= mmc->clock;
125 timeout /= mmc->bus_width;
126 timeout /= mmc->ddr_mode ? 2 : 1;
127 timeout = (timeout < 1000) ? 1000 : timeout;
132 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
134 struct mmc *mmc = host->mmc;
136 u32 timeout, mask, size, i, len = 0;
138 ulong start = get_timer(0);
139 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
140 RX_WMARK_SHIFT) + 1) * 2;
142 size = data->blocksize * data->blocks;
143 if (data->flags == MMC_DATA_READ)
144 buf = (unsigned int *)data->dest;
146 buf = (unsigned int *)data->src;
148 timeout = dwmci_get_timeout(mmc, size);
153 mask = dwmci_readl(host, DWMCI_RINTSTS);
154 /* Error during data transfer. */
155 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
156 debug("%s: DATA ERROR!\n", __func__);
161 if (host->fifo_mode && size) {
163 if (data->flags == MMC_DATA_READ &&
164 (mask & DWMCI_INTMSK_RXDR)) {
166 ret = dwmci_fifo_ready(host,
172 len = (len >> DWMCI_FIFO_SHIFT) &
174 len = min(size, len);
175 for (i = 0; i < len; i++)
177 dwmci_readl(host, DWMCI_DATA);
178 size = size > len ? (size - len) : 0;
180 dwmci_writel(host, DWMCI_RINTSTS,
182 } else if (data->flags == MMC_DATA_WRITE &&
183 (mask & DWMCI_INTMSK_TXDR)) {
185 ret = dwmci_fifo_ready(host,
191 len = fifo_depth - ((len >>
194 len = min(size, len);
195 for (i = 0; i < len; i++)
196 dwmci_writel(host, DWMCI_DATA,
198 size = size > len ? (size - len) : 0;
200 dwmci_writel(host, DWMCI_RINTSTS,
205 /* Data arrived correctly. */
206 if (mask & DWMCI_INTMSK_DTO) {
211 /* Check for timeout. */
212 if (get_timer(start) > timeout) {
213 debug("%s: Timeout waiting for data!\n",
220 dwmci_writel(host, DWMCI_RINTSTS, mask);
225 static int dwmci_set_transfer_mode(struct dwmci_host *host,
226 struct mmc_data *data)
230 mode = DWMCI_CMD_DATA_EXP;
231 if (data->flags & MMC_DATA_WRITE)
232 mode |= DWMCI_CMD_RW;
238 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
239 struct mmc_data *data)
241 struct mmc *mmc = mmc_get_mmc_dev(dev);
243 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
244 struct mmc_data *data)
247 struct dwmci_host *host = mmc->priv;
248 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
249 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
250 int ret = 0, flags = 0, i;
251 unsigned int timeout = 500;
254 ulong start = get_timer(0);
255 struct bounce_buffer bbstate;
257 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
258 if (get_timer(start) > timeout) {
259 debug("%s: Timeout on data busy\n", __func__);
264 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
267 if (host->fifo_mode) {
268 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
269 dwmci_writel(host, DWMCI_BYTCNT,
270 data->blocksize * data->blocks);
271 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
273 if (data->flags == MMC_DATA_READ) {
274 ret = bounce_buffer_start(&bbstate,
277 data->blocks, GEN_BB_WRITE);
279 ret = bounce_buffer_start(&bbstate,
282 data->blocks, GEN_BB_READ);
288 dwmci_prepare_data(host, data, cur_idmac,
289 bbstate.bounce_buffer);
293 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
296 flags = dwmci_set_transfer_mode(host, data);
298 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
301 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
302 flags |= DWMCI_CMD_ABORT_STOP;
304 flags |= DWMCI_CMD_PRV_DAT_WAIT;
306 if (cmd->resp_type & MMC_RSP_PRESENT) {
307 flags |= DWMCI_CMD_RESP_EXP;
308 if (cmd->resp_type & MMC_RSP_136)
309 flags |= DWMCI_CMD_RESP_LENGTH;
312 if (cmd->resp_type & MMC_RSP_CRC)
313 flags |= DWMCI_CMD_CHECK_CRC;
315 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
317 debug("Sending CMD%d\n",cmd->cmdidx);
319 dwmci_writel(host, DWMCI_CMD, flags);
321 for (i = 0; i < retry; i++) {
322 mask = dwmci_readl(host, DWMCI_RINTSTS);
323 if (mask & DWMCI_INTMSK_CDONE) {
325 dwmci_writel(host, DWMCI_RINTSTS, mask);
331 debug("%s: Timeout.\n", __func__);
335 if (mask & DWMCI_INTMSK_RTO) {
337 * Timeout here is not necessarily fatal. (e)MMC cards
338 * will splat here when they receive CMD55 as they do
339 * not support this command and that is exactly the way
340 * to tell them apart from SD cards. Thus, this output
341 * below shall be debug(). eMMC cards also do not favor
342 * CMD8, please keep that in mind.
344 debug("%s: Response Timeout.\n", __func__);
346 } else if (mask & DWMCI_INTMSK_RE) {
347 debug("%s: Response Error.\n", __func__);
349 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
350 (mask & DWMCI_INTMSK_RCRC)) {
351 debug("%s: Response CRC Error.\n", __func__);
356 if (cmd->resp_type & MMC_RSP_PRESENT) {
357 if (cmd->resp_type & MMC_RSP_136) {
358 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
359 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
360 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
361 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
363 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
368 ret = dwmci_data_transfer(host, data);
370 /* only dma mode need it */
371 if (!host->fifo_mode) {
372 if (data->flags == MMC_DATA_READ)
373 mask = DWMCI_IDINTEN_RI;
375 mask = DWMCI_IDINTEN_TI;
376 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
377 mask, true, 1000, false);
379 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
381 /* clear interrupts */
382 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
384 ctrl = dwmci_readl(host, DWMCI_CTRL);
385 ctrl &= ~(DWMCI_DMA_EN);
386 dwmci_writel(host, DWMCI_CTRL, ctrl);
387 bounce_buffer_stop(&bbstate);
396 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
402 if ((freq == host->clock) || (freq == 0))
405 * If host->get_mmc_clk isn't defined,
406 * then assume that host->bus_hz is source clock value.
407 * host->bus_hz should be set by user.
409 if (host->get_mmc_clk)
410 sclk = host->get_mmc_clk(host, freq);
411 else if (host->bus_hz)
414 debug("%s: Didn't get source clock value.\n", __func__);
419 div = 0; /* bypass mode */
421 div = DIV_ROUND_UP(sclk, 2 * freq);
423 dwmci_writel(host, DWMCI_CLKENA, 0);
424 dwmci_writel(host, DWMCI_CLKSRC, 0);
426 dwmci_writel(host, DWMCI_CLKDIV, div);
427 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
428 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
431 status = dwmci_readl(host, DWMCI_CMD);
433 debug("%s: Timeout!\n", __func__);
436 } while (status & DWMCI_CMD_START);
438 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
439 DWMCI_CLKEN_LOW_PWR);
441 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
442 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
446 status = dwmci_readl(host, DWMCI_CMD);
448 debug("%s: Timeout!\n", __func__);
451 } while (status & DWMCI_CMD_START);
459 static int dwmci_set_ios(struct udevice *dev)
461 struct mmc *mmc = mmc_get_mmc_dev(dev);
463 static int dwmci_set_ios(struct mmc *mmc)
466 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
469 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
471 dwmci_setup_bus(host, mmc->clock);
472 switch (mmc->bus_width) {
474 ctype = DWMCI_CTYPE_8BIT;
477 ctype = DWMCI_CTYPE_4BIT;
480 ctype = DWMCI_CTYPE_1BIT;
484 dwmci_writel(host, DWMCI_CTYPE, ctype);
486 regs = dwmci_readl(host, DWMCI_UHS_REG);
488 regs |= DWMCI_DDR_MODE;
490 regs &= ~DWMCI_DDR_MODE;
492 dwmci_writel(host, DWMCI_UHS_REG, regs);
497 #if CONFIG_IS_ENABLED(DM_REGULATOR)
498 if (mmc->vqmmc_supply) {
501 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
502 regulator_set_value(mmc->vqmmc_supply, 1800000);
504 regulator_set_value(mmc->vqmmc_supply, 3300000);
506 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
515 static int dwmci_init(struct mmc *mmc)
517 struct dwmci_host *host = mmc->priv;
519 if (host->board_init)
520 host->board_init(host);
522 dwmci_writel(host, DWMCI_PWREN, 1);
524 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
525 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
529 /* Enumerate at 400KHz */
530 dwmci_setup_bus(host, mmc->cfg->f_min);
532 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
533 dwmci_writel(host, DWMCI_INTMASK, 0);
535 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
537 dwmci_writel(host, DWMCI_IDINTEN, 0);
538 dwmci_writel(host, DWMCI_BMOD, 1);
540 if (!host->fifoth_val) {
543 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
544 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
545 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
546 TX_WMARK(fifo_size / 2);
548 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
550 dwmci_writel(host, DWMCI_CLKENA, 0);
551 dwmci_writel(host, DWMCI_CLKSRC, 0);
553 if (!host->fifo_mode)
554 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
560 int dwmci_probe(struct udevice *dev)
562 struct mmc *mmc = mmc_get_mmc_dev(dev);
564 return dwmci_init(mmc);
567 const struct dm_mmc_ops dm_dwmci_ops = {
568 .send_cmd = dwmci_send_cmd,
569 .set_ios = dwmci_set_ios,
573 static const struct mmc_ops dwmci_ops = {
574 .send_cmd = dwmci_send_cmd,
575 .set_ios = dwmci_set_ios,
580 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
581 u32 max_clk, u32 min_clk)
583 cfg->name = host->name;
584 #ifndef CONFIG_DM_MMC
585 cfg->ops = &dwmci_ops;
587 cfg->f_min = min_clk;
588 cfg->f_max = max_clk;
590 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
592 cfg->host_caps = host->caps;
594 if (host->buswidth == 8) {
595 cfg->host_caps |= MMC_MODE_8BIT;
596 cfg->host_caps &= ~MMC_MODE_4BIT;
598 cfg->host_caps |= MMC_MODE_4BIT;
599 cfg->host_caps &= ~MMC_MODE_8BIT;
601 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
603 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
607 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
609 return mmc_bind(dev, mmc, cfg);
612 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
614 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
616 host->mmc = mmc_create(&host->cfg, host);
617 if (host->mmc == NULL)