2 * (C) Copyright 2007-2011
3 * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
6 * MMC driver for allwinner sunxi platform.
8 * SPDX-License-Identifier: GPL-2.0+
15 #include <asm/arch/clock.h>
16 #include <asm/arch/cpu.h>
17 #include <asm/arch/mmc.h>
19 struct sunxi_mmc_des {
21 u32 dic:1; /* disable interrupt on completion */
22 u32 last_des:1; /* 1-this data buffer is the last buffer */
23 u32 first_des:1; /* 1-data buffer is the first buffer,
24 0-data buffer contained in the next
25 descriptor is 1st buffer */
26 u32 des_chain:1; /* 1-the 2nd address in the descriptor is the
27 next descriptor address */
28 u32 end_of_ring:1; /* 1-last descriptor flag when using dual
29 data buffer in descriptor */
31 u32 card_err_sum:1; /* transfer error flag */
32 u32 own:1; /* des owner:1-idma owns it, 0-host owns it */
33 #define SDXC_DES_NUM_SHIFT 16
34 #define SDXC_DES_BUFFER_MAX_LEN (1 << SDXC_DES_NUM_SHIFT)
41 struct sunxi_mmc_host {
47 struct sunxi_mmc *reg;
48 struct mmc_config cfg;
51 /* support 4 mmc hosts */
52 struct sunxi_mmc_host mmc_host[4];
54 static int mmc_resource_init(int sdc_no)
56 struct sunxi_mmc_host *mmchost = &mmc_host[sdc_no];
57 struct sunxi_ccm_reg *ccm = (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
59 debug("init mmc %d resource\n", sdc_no);
63 mmchost->reg = (struct sunxi_mmc *)SUNXI_MMC0_BASE;
64 mmchost->mclkreg = &ccm->sd0_clk_cfg;
67 mmchost->reg = (struct sunxi_mmc *)SUNXI_MMC1_BASE;
68 mmchost->mclkreg = &ccm->sd1_clk_cfg;
71 mmchost->reg = (struct sunxi_mmc *)SUNXI_MMC2_BASE;
72 mmchost->mclkreg = &ccm->sd2_clk_cfg;
75 mmchost->reg = (struct sunxi_mmc *)SUNXI_MMC3_BASE;
76 mmchost->mclkreg = &ccm->sd3_clk_cfg;
79 printf("Wrong mmc number %d\n", sdc_no);
82 mmchost->database = (unsigned int)mmchost->reg + 0x100;
83 mmchost->mmc_no = sdc_no;
88 static int mmc_clk_io_on(int sdc_no)
92 struct sunxi_mmc_host *mmchost = &mmc_host[sdc_no];
93 struct sunxi_ccm_reg *ccm = (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
95 debug("init mmc %d clock and io\n", sdc_no);
97 /* config ahb clock */
98 setbits_le32(&ccm->ahb_gate0, 1 << AHB_GATE_OFFSET_MMC(sdc_no));
100 /* config mod clock */
101 pll_clk = clock_get_pll6();
102 /* should be close to 100 MHz but no more, so round up */
103 divider = ((pll_clk + 99999999) / 100000000) - 1;
104 writel(CCM_MMC_CTRL_ENABLE | CCM_MMC_CTRL_PLL6 | divider,
106 mmchost->mod_clk = pll_clk / (divider + 1);
111 static int mmc_update_clk(struct mmc *mmc)
113 struct sunxi_mmc_host *mmchost = mmc->priv;
115 unsigned timeout_msecs = 2000;
117 cmd = SUNXI_MMC_CMD_START |
118 SUNXI_MMC_CMD_UPCLK_ONLY |
119 SUNXI_MMC_CMD_WAIT_PRE_OVER;
120 writel(cmd, &mmchost->reg->cmd);
121 while (readl(&mmchost->reg->cmd) & SUNXI_MMC_CMD_START) {
122 if (!timeout_msecs--)
127 /* clock update sets various irq status bits, clear these */
128 writel(readl(&mmchost->reg->rint), &mmchost->reg->rint);
133 static int mmc_config_clock(struct mmc *mmc, unsigned div)
135 struct sunxi_mmc_host *mmchost = mmc->priv;
136 unsigned rval = readl(&mmchost->reg->clkcr);
139 rval &= ~SUNXI_MMC_CLK_ENABLE;
140 writel(rval, &mmchost->reg->clkcr);
141 if (mmc_update_clk(mmc))
144 /* Change Divider Factor */
145 rval &= ~SUNXI_MMC_CLK_DIVIDER_MASK;
147 writel(rval, &mmchost->reg->clkcr);
148 if (mmc_update_clk(mmc))
150 /* Re-enable Clock */
151 rval |= SUNXI_MMC_CLK_ENABLE;
152 writel(rval, &mmchost->reg->clkcr);
154 if (mmc_update_clk(mmc))
160 static void mmc_set_ios(struct mmc *mmc)
162 struct sunxi_mmc_host *mmchost = mmc->priv;
163 unsigned int clkdiv = 0;
165 debug("set ios: bus_width: %x, clock: %d, mod_clk: %d\n",
166 mmc->bus_width, mmc->clock, mmchost->mod_clk);
168 /* Change clock first */
169 clkdiv = (mmchost->mod_clk + (mmc->clock >> 1)) / mmc->clock / 2;
171 if (mmc_config_clock(mmc, clkdiv)) {
172 mmchost->fatal_err = 1;
177 /* Change bus width */
178 if (mmc->bus_width == 8)
179 writel(0x2, &mmchost->reg->width);
180 else if (mmc->bus_width == 4)
181 writel(0x1, &mmchost->reg->width);
183 writel(0x0, &mmchost->reg->width);
186 static int mmc_core_init(struct mmc *mmc)
188 struct sunxi_mmc_host *mmchost = mmc->priv;
190 /* Reset controller */
191 writel(SUNXI_MMC_GCTRL_RESET, &mmchost->reg->gctrl);
196 static int mmc_trans_data_by_cpu(struct mmc *mmc, struct mmc_data *data)
198 struct sunxi_mmc_host *mmchost = mmc->priv;
199 const int reading = !!(data->flags & MMC_DATA_READ);
200 const uint32_t status_bit = reading ? SUNXI_MMC_STATUS_FIFO_EMPTY :
201 SUNXI_MMC_STATUS_FIFO_FULL;
203 unsigned byte_cnt = data->blocksize * data->blocks;
204 unsigned timeout_msecs = 2000;
205 unsigned *buff = (unsigned int *)(reading ? data->dest : data->src);
207 for (i = 0; i < (byte_cnt >> 2); i++) {
208 while (readl(&mmchost->reg->status) & status_bit) {
209 if (!timeout_msecs--)
215 buff[i] = readl(mmchost->database);
217 writel(buff[i], mmchost->database);
223 static int mmc_trans_data_by_dma(struct mmc *mmc, struct mmc_data *data)
225 struct sunxi_mmc_host *mmchost = mmc->priv;
226 unsigned byte_cnt = data->blocksize * data->blocks;
228 unsigned des_idx = 0;
229 unsigned buff_frag_num =
230 (byte_cnt + SDXC_DES_BUFFER_MAX_LEN - 1) >> SDXC_DES_NUM_SHIFT;
233 ALLOC_CACHE_ALIGN_BUFFER(struct sunxi_mmc_des, pdes, buff_frag_num);
235 buff = data->flags & MMC_DATA_READ ?
236 (unsigned char *)data->dest : (unsigned char *)data->src;
237 remain = byte_cnt & (SDXC_DES_BUFFER_MAX_LEN - 1);
239 flush_cache((unsigned long)buff, (unsigned long)byte_cnt);
240 for (i = 0; i < buff_frag_num; i++, des_idx++) {
241 memset((void *)&pdes[des_idx], 0, sizeof(struct sunxi_mmc_des));
242 pdes[des_idx].des_chain = 1;
243 pdes[des_idx].own = 1;
244 pdes[des_idx].dic = 1;
245 if (buff_frag_num > 1 && i != buff_frag_num - 1)
246 pdes[des_idx].data_buf1_sz = 0; /* 0 == max_len */
248 pdes[des_idx].data_buf1_sz = remain;
250 pdes[des_idx].buf_addr_ptr1 =
251 (u32) buff + i * SDXC_DES_BUFFER_MAX_LEN;
253 pdes[des_idx].first_des = 1;
255 if (i == buff_frag_num - 1) {
256 pdes[des_idx].dic = 0;
257 pdes[des_idx].last_des = 1;
258 pdes[des_idx].end_of_ring = 1;
259 pdes[des_idx].buf_addr_ptr2 = 0;
261 pdes[des_idx].buf_addr_ptr2 = (u32)&pdes[des_idx + 1];
264 flush_cache((unsigned long)pdes,
265 sizeof(struct sunxi_mmc_des) * (des_idx + 1));
267 rval = readl(&mmchost->reg->gctrl);
269 writel(rval | SUNXI_MMC_GCTRL_DMA_RESET | SUNXI_MMC_GCTRL_DMA_ENABLE,
270 &mmchost->reg->gctrl);
272 writel(SUNXI_MMC_IDMAC_RESET, &mmchost->reg->dmac);
274 writel(SUNXI_MMC_IDMAC_FIXBURST | SUNXI_MMC_IDMAC_ENABLE,
275 &mmchost->reg->dmac);
276 rval = readl(&mmchost->reg->idie) &
277 ~(SUNXI_MMC_IDIE_TXIRQ|SUNXI_MMC_IDIE_RXIRQ);
278 if (data->flags & MMC_DATA_WRITE)
279 rval |= SUNXI_MMC_IDIE_TXIRQ;
281 rval |= SUNXI_MMC_IDIE_RXIRQ;
282 writel(rval, &mmchost->reg->idie);
283 writel((u32) pdes, &mmchost->reg->dlba);
284 writel((0x2 << 28) | (0x7 << 16) | (0x01 << 3),
285 &mmchost->reg->ftrglevel);
290 static void mmc_enable_dma_accesses(struct mmc *mmc, int dma)
292 struct sunxi_mmc_host *mmchost = mmc->priv;
294 unsigned int gctrl = readl(&mmchost->reg->gctrl);
296 gctrl &= ~SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
298 gctrl |= SUNXI_MMC_GCTRL_ACCESS_BY_AHB;
299 writel(gctrl, &mmchost->reg->gctrl);
302 static int mmc_rint_wait(struct mmc *mmc, unsigned int timeout_msecs,
303 unsigned int done_bit, const char *what)
305 struct sunxi_mmc_host *mmchost = mmc->priv;
309 status = readl(&mmchost->reg->rint);
310 if (!timeout_msecs-- ||
311 (status & SUNXI_MMC_RINT_INTERRUPT_ERROR_BIT)) {
312 debug("%s timeout %x\n", what,
313 status & SUNXI_MMC_RINT_INTERRUPT_ERROR_BIT);
317 } while (!(status & done_bit));
322 static int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
323 struct mmc_data *data)
325 struct sunxi_mmc_host *mmchost = mmc->priv;
326 unsigned int cmdval = SUNXI_MMC_CMD_START;
327 unsigned int timeout_msecs;
329 unsigned int status = 0;
330 unsigned int usedma = 0;
331 unsigned int bytecnt = 0;
333 if (mmchost->fatal_err)
335 if (cmd->resp_type & MMC_RSP_BUSY)
336 debug("mmc cmd %d check rsp busy\n", cmd->cmdidx);
337 if (cmd->cmdidx == 12)
341 cmdval |= SUNXI_MMC_CMD_SEND_INIT_SEQ;
342 if (cmd->resp_type & MMC_RSP_PRESENT)
343 cmdval |= SUNXI_MMC_CMD_RESP_EXPIRE;
344 if (cmd->resp_type & MMC_RSP_136)
345 cmdval |= SUNXI_MMC_CMD_LONG_RESPONSE;
346 if (cmd->resp_type & MMC_RSP_CRC)
347 cmdval |= SUNXI_MMC_CMD_CHK_RESPONSE_CRC;
350 if ((u32) data->dest & 0x3) {
355 cmdval |= SUNXI_MMC_CMD_DATA_EXPIRE|SUNXI_MMC_CMD_WAIT_PRE_OVER;
356 if (data->flags & MMC_DATA_WRITE)
357 cmdval |= SUNXI_MMC_CMD_WRITE;
358 if (data->blocks > 1)
359 cmdval |= SUNXI_MMC_CMD_AUTO_STOP;
360 writel(data->blocksize, &mmchost->reg->blksz);
361 writel(data->blocks * data->blocksize, &mmchost->reg->bytecnt);
364 debug("mmc %d, cmd %d(0x%08x), arg 0x%08x\n", mmchost->mmc_no,
365 cmd->cmdidx, cmdval | cmd->cmdidx, cmd->cmdarg);
366 writel(cmd->cmdarg, &mmchost->reg->arg);
369 writel(cmdval | cmd->cmdidx, &mmchost->reg->cmd);
372 * transfer data and check status
373 * STATREG[2] : FIFO empty
374 * STATREG[3] : FIFO full
379 bytecnt = data->blocksize * data->blocks;
380 debug("trans data %d bytes\n", bytecnt);
381 #if defined(CONFIG_MMC_SUNXI_USE_DMA) && !defined(CONFIG_SPL_BUILD)
387 mmc_enable_dma_accesses(mmc, 1);
388 ret = mmc_trans_data_by_dma(mmc, data);
389 writel(cmdval | cmd->cmdidx, &mmchost->reg->cmd);
391 mmc_enable_dma_accesses(mmc, 0);
392 writel(cmdval | cmd->cmdidx, &mmchost->reg->cmd);
393 ret = mmc_trans_data_by_cpu(mmc, data);
396 error = readl(&mmchost->reg->rint) & \
397 SUNXI_MMC_RINT_INTERRUPT_ERROR_BIT;
403 error = mmc_rint_wait(mmc, 0xfffff, SUNXI_MMC_RINT_COMMAND_DONE, "cmd");
408 timeout_msecs = usedma ? 120 * bytecnt : 120;
409 debug("cacl timeout %x msec\n", timeout_msecs);
410 error = mmc_rint_wait(mmc, timeout_msecs,
412 SUNXI_MMC_RINT_AUTO_COMMAND_DONE :
413 SUNXI_MMC_RINT_DATA_OVER,
419 if (cmd->resp_type & MMC_RSP_BUSY) {
420 timeout_msecs = 2000;
422 status = readl(&mmchost->reg->status);
423 if (!timeout_msecs--) {
424 debug("busy timeout\n");
429 } while (status & SUNXI_MMC_STATUS_CARD_DATA_BUSY);
432 if (cmd->resp_type & MMC_RSP_136) {
433 cmd->response[0] = readl(&mmchost->reg->resp3);
434 cmd->response[1] = readl(&mmchost->reg->resp2);
435 cmd->response[2] = readl(&mmchost->reg->resp1);
436 cmd->response[3] = readl(&mmchost->reg->resp0);
437 debug("mmc resp 0x%08x 0x%08x 0x%08x 0x%08x\n",
438 cmd->response[3], cmd->response[2],
439 cmd->response[1], cmd->response[0]);
441 cmd->response[0] = readl(&mmchost->reg->resp0);
442 debug("mmc resp 0x%08x\n", cmd->response[0]);
445 if (data && usedma) {
447 * IDST[0] : idma tx int
448 * IDST[1] : idma rx int
449 * IDST[2] : idma fatal bus error
450 * IDST[4] : idma descriptor invalid
451 * IDST[5] : idma error summary
452 * IDST[8] : idma normal interrupt sumary
453 * IDST[9] : idma abnormal interrupt sumary
455 status = readl(&mmchost->reg->idst);
456 writel(status, &mmchost->reg->idst);
457 writel(0, &mmchost->reg->idie);
458 writel(0, &mmchost->reg->dmac);
459 writel(readl(&mmchost->reg->gctrl) & ~SUNXI_MMC_GCTRL_DMA_ENABLE,
460 &mmchost->reg->gctrl);
463 writel(SUNXI_MMC_GCTRL_RESET, &mmchost->reg->gctrl);
466 writel(0xffffffff, &mmchost->reg->rint);
467 writel(readl(&mmchost->reg->gctrl) | SUNXI_MMC_GCTRL_FIFO_RESET,
468 &mmchost->reg->gctrl);
473 static const struct mmc_ops sunxi_mmc_ops = {
474 .send_cmd = mmc_send_cmd,
475 .set_ios = mmc_set_ios,
476 .init = mmc_core_init,
479 int sunxi_mmc_init(int sdc_no)
481 struct mmc_config *cfg = &mmc_host[sdc_no].cfg;
483 memset(&mmc_host[sdc_no], 0, sizeof(struct sunxi_mmc_host));
485 cfg->name = "SUNXI SD/MMC";
486 cfg->ops = &sunxi_mmc_ops;
488 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34;
489 cfg->host_caps = MMC_MODE_4BIT;
490 cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
491 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
494 cfg->f_max = 52000000;
496 mmc_resource_init(sdc_no);
497 mmc_clk_io_on(sdc_no);
499 if (mmc_create(cfg, &mmc_host[sdc_no]) == NULL)