1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
15 #include <dm/device-internal.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <power/regulator.h>
24 #include <linux/list.h>
26 #include "mmc_private.h"
28 #define DEFAULT_CMD6_TIMEOUT_MS 500
30 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32 #if !CONFIG_IS_ENABLED(DM_MMC)
34 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
39 __weak int board_mmc_getwp(struct mmc *mmc)
44 int mmc_getwp(struct mmc *mmc)
48 wp = board_mmc_getwp(mmc);
51 if (mmc->cfg->ops->getwp)
52 wp = mmc->cfg->ops->getwp(mmc);
60 __weak int board_mmc_getcd(struct mmc *mmc)
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69 printf("CMD_SEND:%d\n", cmd->cmdidx);
70 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79 printf("\t\tRET\t\t\t %d\n", ret);
81 switch (cmd->resp_type) {
83 printf("\t\tMMC_RSP_NONE\n");
86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
90 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
94 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 printf("\t\t \t\t 0x%08x \n",
98 printf("\t\t \t\t 0x%08x \n",
100 printf("\t\t \t\t 0x%08x \n",
103 printf("\t\t\t\t\tDUMPING DATA\n");
104 for (i = 0; i < 4; i++) {
106 printf("\t\t\t\t\t%03d - ", i*4);
107 ptr = (u8 *)&cmd->response[i];
109 for (j = 0; j < 4; j++)
110 printf("%02x ", *ptr--);
115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
119 printf("\t\tERROR MMC rsp not supported\n");
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 printf("CURR STATE:%d\n", status);
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
137 static const char *const names[] = {
138 [MMC_LEGACY] = "MMC legacy",
139 [MMC_HS] = "MMC High Speed (26MHz)",
140 [SD_HS] = "SD High Speed (50MHz)",
141 [UHS_SDR12] = "UHS SDR12 (25MHz)",
142 [UHS_SDR25] = "UHS SDR25 (50MHz)",
143 [UHS_SDR50] = "UHS SDR50 (100MHz)",
144 [UHS_SDR104] = "UHS SDR104 (208MHz)",
145 [UHS_DDR50] = "UHS DDR50 (50MHz)",
146 [MMC_HS_52] = "MMC High Speed (52MHz)",
147 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
148 [MMC_HS_200] = "HS200 (200MHz)",
149 [MMC_HS_400] = "HS400 (200MHz)",
150 [MMC_HS_400_ES] = "HS400ES (200MHz)",
153 if (mode >= MMC_MODES_END)
154 return "Unknown mode";
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162 static const int freqs[] = {
163 [MMC_LEGACY] = 25000000,
166 [MMC_HS_52] = 52000000,
167 [MMC_DDR_52] = 52000000,
168 [UHS_SDR12] = 25000000,
169 [UHS_SDR25] = 50000000,
170 [UHS_SDR50] = 100000000,
171 [UHS_DDR50] = 50000000,
172 [UHS_SDR104] = 208000000,
173 [MMC_HS_200] = 200000000,
174 [MMC_HS_400] = 200000000,
175 [MMC_HS_400_ES] = 200000000,
178 if (mode == MMC_LEGACY)
179 return mmc->legacy_speed;
180 else if (mode >= MMC_MODES_END)
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188 mmc->selected_mode = mode;
189 mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 mmc->tran_speed / 1000000);
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
201 mmmc_trace_before_send(mmc, cmd);
202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 mmmc_trace_after_send(mmc, cmd, ret);
209 int mmc_send_status(struct mmc *mmc, unsigned int *status)
212 int err, retries = 5;
214 cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 cmd.resp_type = MMC_RSP_R1;
216 if (!mmc_host_is_spi(mmc))
217 cmd.cmdarg = mmc->rca << 16;
220 err = mmc_send_cmd(mmc, &cmd, NULL);
222 mmc_trace_state(mmc, &cmd);
223 *status = cmd.response[0];
227 mmc_trace_state(mmc, &cmd);
231 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
236 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
241 err = mmc_send_status(mmc, &status);
245 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
246 (status & MMC_STATUS_CURR_STATE) !=
250 if (status & MMC_STATUS_MASK) {
251 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
252 pr_err("Status Error: 0x%08x\n", status);
257 if (timeout_ms-- <= 0)
263 if (timeout_ms <= 0) {
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
273 int mmc_set_blocklen(struct mmc *mmc, int len)
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
285 err = mmc_send_cmd(mmc, &cmd, NULL);
287 #ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
291 * It has been seen that SET_BLOCKLEN may fail on the first
292 * attempt, let's try a few more time
295 err = mmc_send_cmd(mmc, &cmd, NULL);
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
317 static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
357 cmd.resp_type = MMC_RSP_R1;
359 data.dest = (void *)data_buf;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
364 err = mmc_send_cmd(mmc, &cmd, &data);
368 if (memcmp(data_buf, tuning_block_pattern, size))
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
379 struct mmc_data data;
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 if (mmc->high_capacity)
389 cmd.cmdarg = start * mmc->read_bl_len;
391 cmd.resp_type = MMC_RSP_R1;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
398 if (mmc_send_cmd(mmc, &cmd, &data))
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
416 #if !CONFIG_IS_ENABLED(DM_MMC)
417 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
419 if (mmc->cfg->ops->get_b_max)
420 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
422 return mmc->cfg->b_max;
426 #if CONFIG_IS_ENABLED(BLK)
427 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
429 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
433 #if CONFIG_IS_ENABLED(BLK)
434 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
436 int dev_num = block_dev->devnum;
438 lbaint_t cur, blocks_todo = blkcnt;
444 struct mmc *mmc = find_mmc_device(dev_num);
448 if (CONFIG_IS_ENABLED(MMC_TINY))
449 err = mmc_switch_part(mmc, block_dev->hwpart);
451 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
456 if ((start + blkcnt) > block_dev->lba) {
457 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
458 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
459 start + blkcnt, block_dev->lba);
464 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
465 pr_debug("%s: Failed to set blocklen\n", __func__);
469 b_max = mmc_get_b_max(mmc, dst, blkcnt);
472 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
473 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
474 pr_debug("%s: Failed to read blocks\n", __func__);
479 dst += cur * mmc->read_bl_len;
480 } while (blocks_todo > 0);
485 static int mmc_go_idle(struct mmc *mmc)
492 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
494 cmd.resp_type = MMC_RSP_NONE;
496 err = mmc_send_cmd(mmc, &cmd, NULL);
506 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
507 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
513 * Send CMD11 only if the request is to switch the card to
516 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
517 return mmc_set_signal_voltage(mmc, signal_voltage);
519 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
521 cmd.resp_type = MMC_RSP_R1;
523 err = mmc_send_cmd(mmc, &cmd, NULL);
527 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
531 * The card should drive cmd and dat[0:3] low immediately
532 * after the response of cmd11, but wait 100 us to be sure
534 err = mmc_wait_dat0(mmc, 0, 100);
541 * During a signal voltage level switch, the clock must be gated
542 * for 5 ms according to the SD spec
544 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
546 err = mmc_set_signal_voltage(mmc, signal_voltage);
550 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
552 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
555 * Failure to switch is indicated by the card holding
556 * dat[0:3] low. Wait for at least 1 ms according to spec
558 err = mmc_wait_dat0(mmc, 1, 1000);
568 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
575 cmd.cmdidx = MMC_CMD_APP_CMD;
576 cmd.resp_type = MMC_RSP_R1;
579 err = mmc_send_cmd(mmc, &cmd, NULL);
584 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
585 cmd.resp_type = MMC_RSP_R3;
588 * Most cards do not answer if some reserved bits
589 * in the ocr are set. However, Some controller
590 * can set bit 7 (reserved for low voltages), but
591 * how to manage low voltages SD card is not yet
594 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
595 (mmc->cfg->voltages & 0xff8000);
597 if (mmc->version == SD_VERSION_2)
598 cmd.cmdarg |= OCR_HCS;
601 cmd.cmdarg |= OCR_S18R;
603 err = mmc_send_cmd(mmc, &cmd, NULL);
608 if (cmd.response[0] & OCR_BUSY)
617 if (mmc->version != SD_VERSION_2)
618 mmc->version = SD_VERSION_1_0;
620 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
621 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
622 cmd.resp_type = MMC_RSP_R3;
625 err = mmc_send_cmd(mmc, &cmd, NULL);
631 mmc->ocr = cmd.response[0];
633 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
634 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
636 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
642 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
648 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
653 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
654 cmd.resp_type = MMC_RSP_R3;
656 if (use_arg && !mmc_host_is_spi(mmc))
657 cmd.cmdarg = OCR_HCS |
658 (mmc->cfg->voltages &
659 (mmc->ocr & OCR_VOLTAGE_MASK)) |
660 (mmc->ocr & OCR_ACCESS_MODE);
662 err = mmc_send_cmd(mmc, &cmd, NULL);
665 mmc->ocr = cmd.response[0];
669 static int mmc_send_op_cond(struct mmc *mmc)
675 /* Some cards seem to need this */
678 start = get_timer(0);
679 /* Asking to the card its capabilities */
681 err = mmc_send_op_cond_iter(mmc, i != 0);
685 /* exit if not busy (flag seems to be inverted) */
686 if (mmc->ocr & OCR_BUSY)
689 if (get_timer(start) > timeout)
693 mmc->op_cond_pending = 1;
697 static int mmc_complete_op_cond(struct mmc *mmc)
704 mmc->op_cond_pending = 0;
705 if (!(mmc->ocr & OCR_BUSY)) {
706 /* Some cards seem to need this */
709 start = get_timer(0);
711 err = mmc_send_op_cond_iter(mmc, 1);
714 if (mmc->ocr & OCR_BUSY)
716 if (get_timer(start) > timeout)
722 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
723 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
724 cmd.resp_type = MMC_RSP_R3;
727 err = mmc_send_cmd(mmc, &cmd, NULL);
732 mmc->ocr = cmd.response[0];
735 mmc->version = MMC_VERSION_UNKNOWN;
737 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
744 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
747 struct mmc_data data;
750 /* Get the Card Status Register */
751 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
752 cmd.resp_type = MMC_RSP_R1;
755 data.dest = (char *)ext_csd;
757 data.blocksize = MMC_MAX_BLOCK_LEN;
758 data.flags = MMC_DATA_READ;
760 err = mmc_send_cmd(mmc, &cmd, &data);
765 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
768 unsigned int status, start;
770 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
771 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
772 (index == EXT_CSD_PART_CONF);
776 if (mmc->gen_cmd6_time)
777 timeout_ms = mmc->gen_cmd6_time * 10;
779 if (is_part_switch && mmc->part_switch_time)
780 timeout_ms = mmc->part_switch_time * 10;
782 cmd.cmdidx = MMC_CMD_SWITCH;
783 cmd.resp_type = MMC_RSP_R1b;
784 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
789 ret = mmc_send_cmd(mmc, &cmd, NULL);
790 } while (ret && retries-- > 0);
795 start = get_timer(0);
797 /* poll dat0 for rdy/buys status */
798 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
799 if (ret && ret != -ENOSYS)
803 * In cases when not allowed to poll by using CMD13 or because we aren't
804 * capable of polling by using mmc_wait_dat0, then rely on waiting the
805 * stated timeout to be sufficient.
807 if (ret == -ENOSYS && !send_status)
810 /* Finally wait until the card is ready or indicates a failure
811 * to switch. It doesn't hurt to use CMD13 here even if send_status
812 * is false, because by now (after 'timeout_ms' ms) the bus should be
816 ret = mmc_send_status(mmc, &status);
818 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
819 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
823 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
826 } while (get_timer(start) < timeout_ms);
831 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
833 return __mmc_switch(mmc, set, index, value, true);
836 int mmc_boot_wp(struct mmc *mmc)
838 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
841 #if !CONFIG_IS_ENABLED(MMC_TINY)
842 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
848 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
854 speed_bits = EXT_CSD_TIMING_HS;
856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
858 speed_bits = EXT_CSD_TIMING_HS200;
861 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
863 speed_bits = EXT_CSD_TIMING_HS400;
866 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
868 speed_bits = EXT_CSD_TIMING_HS400;
872 speed_bits = EXT_CSD_TIMING_LEGACY;
878 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
879 speed_bits, !hsdowngrade);
883 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
884 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
886 * In case the eMMC is in HS200/HS400 mode and we are downgrading
887 * to HS mode, the card clock are still running much faster than
888 * the supported HS mode clock, so we can not reliably read out
889 * Extended CSD. Reconfigure the controller to run at HS mode.
892 mmc_select_mode(mmc, MMC_HS);
893 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
897 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
898 /* Now check to see that it worked */
899 err = mmc_send_ext_csd(mmc, test_csd);
903 /* No high-speed support */
904 if (!test_csd[EXT_CSD_HS_TIMING])
911 static int mmc_get_capabilities(struct mmc *mmc)
913 u8 *ext_csd = mmc->ext_csd;
916 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
918 if (mmc_host_is_spi(mmc))
921 /* Only version 4 supports high-speed */
922 if (mmc->version < MMC_VERSION_4)
926 pr_err("No ext_csd found!\n"); /* this should enver happen */
930 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
932 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
933 mmc->cardtype = cardtype;
935 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
936 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
937 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
938 mmc->card_caps |= MMC_MODE_HS200;
941 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
942 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
943 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
944 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
945 mmc->card_caps |= MMC_MODE_HS400;
948 if (cardtype & EXT_CSD_CARD_TYPE_52) {
949 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
950 mmc->card_caps |= MMC_MODE_DDR_52MHz;
951 mmc->card_caps |= MMC_MODE_HS_52MHz;
953 if (cardtype & EXT_CSD_CARD_TYPE_26)
954 mmc->card_caps |= MMC_MODE_HS;
956 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
957 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
958 (mmc->card_caps & MMC_MODE_HS400)) {
959 mmc->card_caps |= MMC_MODE_HS400_ES;
967 static int mmc_set_capacity(struct mmc *mmc, int part_num)
971 mmc->capacity = mmc->capacity_user;
975 mmc->capacity = mmc->capacity_boot;
978 mmc->capacity = mmc->capacity_rpmb;
984 mmc->capacity = mmc->capacity_gp[part_num - 4];
990 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
995 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1001 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1003 (mmc->part_config & ~PART_ACCESS_MASK)
1004 | (part_num & PART_ACCESS_MASK));
1005 } while (ret && retry--);
1008 * Set the capacity if the switch succeeded or was intended
1009 * to return to representing the raw device.
1011 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1012 ret = mmc_set_capacity(mmc, part_num);
1013 mmc_get_blk_desc(mmc)->hwpart = part_num;
1019 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1020 int mmc_hwpart_config(struct mmc *mmc,
1021 const struct mmc_hwpart_conf *conf,
1022 enum mmc_hwpart_conf_mode mode)
1027 u32 gp_size_mult[4];
1028 u32 max_enh_size_mult;
1029 u32 tot_enh_size_mult = 0;
1032 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1034 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1037 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1038 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1039 return -EMEDIUMTYPE;
1042 if (!(mmc->part_support & PART_SUPPORT)) {
1043 pr_err("Card does not support partitioning\n");
1044 return -EMEDIUMTYPE;
1047 if (!mmc->hc_wp_grp_size) {
1048 pr_err("Card does not define HC WP group size\n");
1049 return -EMEDIUMTYPE;
1052 /* check partition alignment and total enhanced size */
1053 if (conf->user.enh_size) {
1054 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1055 conf->user.enh_start % mmc->hc_wp_grp_size) {
1056 pr_err("User data enhanced area not HC WP group "
1060 part_attrs |= EXT_CSD_ENH_USR;
1061 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1062 if (mmc->high_capacity) {
1063 enh_start_addr = conf->user.enh_start;
1065 enh_start_addr = (conf->user.enh_start << 9);
1071 tot_enh_size_mult += enh_size_mult;
1073 for (pidx = 0; pidx < 4; pidx++) {
1074 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1075 pr_err("GP%i partition not HC WP group size "
1076 "aligned\n", pidx+1);
1079 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1080 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1081 part_attrs |= EXT_CSD_ENH_GP(pidx);
1082 tot_enh_size_mult += gp_size_mult[pidx];
1086 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1087 pr_err("Card does not support enhanced attribute\n");
1088 return -EMEDIUMTYPE;
1091 err = mmc_send_ext_csd(mmc, ext_csd);
1096 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1097 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1098 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1099 if (tot_enh_size_mult > max_enh_size_mult) {
1100 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1101 tot_enh_size_mult, max_enh_size_mult);
1102 return -EMEDIUMTYPE;
1105 /* The default value of EXT_CSD_WR_REL_SET is device
1106 * dependent, the values can only be changed if the
1107 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1108 * changed only once and before partitioning is completed. */
1109 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1110 if (conf->user.wr_rel_change) {
1111 if (conf->user.wr_rel_set)
1112 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1114 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1116 for (pidx = 0; pidx < 4; pidx++) {
1117 if (conf->gp_part[pidx].wr_rel_change) {
1118 if (conf->gp_part[pidx].wr_rel_set)
1119 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1121 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1125 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1126 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1127 puts("Card does not support host controlled partition write "
1128 "reliability settings\n");
1129 return -EMEDIUMTYPE;
1132 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1133 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1134 pr_err("Card already partitioned\n");
1138 if (mode == MMC_HWPART_CONF_CHECK)
1141 /* Partitioning requires high-capacity size definitions */
1142 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1143 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1144 EXT_CSD_ERASE_GROUP_DEF, 1);
1149 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1151 #if CONFIG_IS_ENABLED(MMC_WRITE)
1152 /* update erase group size to be high-capacity */
1153 mmc->erase_grp_size =
1154 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1159 /* all OK, write the configuration */
1160 for (i = 0; i < 4; i++) {
1161 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1162 EXT_CSD_ENH_START_ADDR+i,
1163 (enh_start_addr >> (i*8)) & 0xFF);
1167 for (i = 0; i < 3; i++) {
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_ENH_SIZE_MULT+i,
1170 (enh_size_mult >> (i*8)) & 0xFF);
1174 for (pidx = 0; pidx < 4; pidx++) {
1175 for (i = 0; i < 3; i++) {
1176 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1177 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1178 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1183 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1188 if (mode == MMC_HWPART_CONF_SET)
1191 /* The WR_REL_SET is a write-once register but shall be
1192 * written before setting PART_SETTING_COMPLETED. As it is
1193 * write-once we can only write it when completing the
1195 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1196 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1197 EXT_CSD_WR_REL_SET, wr_rel_set);
1202 /* Setting PART_SETTING_COMPLETED confirms the partition
1203 * configuration but it only becomes effective after power
1204 * cycle, so we do not adjust the partition related settings
1205 * in the mmc struct. */
1207 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1208 EXT_CSD_PARTITION_SETTING,
1209 EXT_CSD_PARTITION_SETTING_COMPLETED);
1217 #if !CONFIG_IS_ENABLED(DM_MMC)
1218 int mmc_getcd(struct mmc *mmc)
1222 cd = board_mmc_getcd(mmc);
1225 if (mmc->cfg->ops->getcd)
1226 cd = mmc->cfg->ops->getcd(mmc);
1235 #if !CONFIG_IS_ENABLED(MMC_TINY)
1236 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1239 struct mmc_data data;
1241 /* Switch the frequency */
1242 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1243 cmd.resp_type = MMC_RSP_R1;
1244 cmd.cmdarg = (mode << 31) | 0xffffff;
1245 cmd.cmdarg &= ~(0xf << (group * 4));
1246 cmd.cmdarg |= value << (group * 4);
1248 data.dest = (char *)resp;
1249 data.blocksize = 64;
1251 data.flags = MMC_DATA_READ;
1253 return mmc_send_cmd(mmc, &cmd, &data);
1256 static int sd_get_capabilities(struct mmc *mmc)
1260 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1261 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1262 struct mmc_data data;
1264 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1268 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1270 if (mmc_host_is_spi(mmc))
1273 /* Read the SCR to find out if this card supports higher speeds */
1274 cmd.cmdidx = MMC_CMD_APP_CMD;
1275 cmd.resp_type = MMC_RSP_R1;
1276 cmd.cmdarg = mmc->rca << 16;
1278 err = mmc_send_cmd(mmc, &cmd, NULL);
1283 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1284 cmd.resp_type = MMC_RSP_R1;
1290 data.dest = (char *)scr;
1293 data.flags = MMC_DATA_READ;
1295 err = mmc_send_cmd(mmc, &cmd, &data);
1304 mmc->scr[0] = __be32_to_cpu(scr[0]);
1305 mmc->scr[1] = __be32_to_cpu(scr[1]);
1307 switch ((mmc->scr[0] >> 24) & 0xf) {
1309 mmc->version = SD_VERSION_1_0;
1312 mmc->version = SD_VERSION_1_10;
1315 mmc->version = SD_VERSION_2;
1316 if ((mmc->scr[0] >> 15) & 0x1)
1317 mmc->version = SD_VERSION_3;
1320 mmc->version = SD_VERSION_1_0;
1324 if (mmc->scr[0] & SD_DATA_4BIT)
1325 mmc->card_caps |= MMC_MODE_4BIT;
1327 /* Version 1.0 doesn't support switching */
1328 if (mmc->version == SD_VERSION_1_0)
1333 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1334 (u8 *)switch_status);
1339 /* The high-speed function is busy. Try again */
1340 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1344 /* If high-speed isn't supported, we return */
1345 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1346 mmc->card_caps |= MMC_CAP(SD_HS);
1348 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1349 /* Version before 3.0 don't support UHS modes */
1350 if (mmc->version < SD_VERSION_3)
1353 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1354 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1355 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1356 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1357 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1358 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1359 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1360 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1361 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1362 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1363 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1369 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1373 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1376 /* SD version 1.00 and 1.01 does not support CMD 6 */
1377 if (mmc->version == SD_VERSION_1_0)
1382 speed = UHS_SDR12_BUS_SPEED;
1385 speed = HIGH_SPEED_BUS_SPEED;
1387 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1389 speed = UHS_SDR12_BUS_SPEED;
1392 speed = UHS_SDR25_BUS_SPEED;
1395 speed = UHS_SDR50_BUS_SPEED;
1398 speed = UHS_DDR50_BUS_SPEED;
1401 speed = UHS_SDR104_BUS_SPEED;
1408 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1412 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1418 static int sd_select_bus_width(struct mmc *mmc, int w)
1423 if ((w != 4) && (w != 1))
1426 cmd.cmdidx = MMC_CMD_APP_CMD;
1427 cmd.resp_type = MMC_RSP_R1;
1428 cmd.cmdarg = mmc->rca << 16;
1430 err = mmc_send_cmd(mmc, &cmd, NULL);
1434 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1435 cmd.resp_type = MMC_RSP_R1;
1440 err = mmc_send_cmd(mmc, &cmd, NULL);
1448 #if CONFIG_IS_ENABLED(MMC_WRITE)
1449 static int sd_read_ssr(struct mmc *mmc)
1451 static const unsigned int sd_au_size[] = {
1452 0, SZ_16K / 512, SZ_32K / 512,
1453 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1454 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1455 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1456 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1461 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1462 struct mmc_data data;
1464 unsigned int au, eo, et, es;
1466 cmd.cmdidx = MMC_CMD_APP_CMD;
1467 cmd.resp_type = MMC_RSP_R1;
1468 cmd.cmdarg = mmc->rca << 16;
1470 err = mmc_send_cmd(mmc, &cmd, NULL);
1471 #ifdef CONFIG_MMC_QUIRKS
1472 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1475 * It has been seen that APP_CMD may fail on the first
1476 * attempt, let's try a few more times
1479 err = mmc_send_cmd(mmc, &cmd, NULL);
1482 } while (retries--);
1488 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 cmd.resp_type = MMC_RSP_R1;
1493 data.dest = (char *)ssr;
1494 data.blocksize = 64;
1496 data.flags = MMC_DATA_READ;
1498 err = mmc_send_cmd(mmc, &cmd, &data);
1506 for (i = 0; i < 16; i++)
1507 ssr[i] = be32_to_cpu(ssr[i]);
1509 au = (ssr[2] >> 12) & 0xF;
1510 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1511 mmc->ssr.au = sd_au_size[au];
1512 es = (ssr[3] >> 24) & 0xFF;
1513 es |= (ssr[2] & 0xFF) << 8;
1514 et = (ssr[3] >> 18) & 0x3F;
1516 eo = (ssr[3] >> 16) & 0x3;
1517 mmc->ssr.erase_timeout = (et * 1000) / es;
1518 mmc->ssr.erase_offset = eo * 1000;
1521 pr_debug("Invalid Allocation Unit Size.\n");
1527 /* frequency bases */
1528 /* divided by 10 to be nice to platforms without floating point */
1529 static const int fbase[] = {
1536 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1537 * to platforms without floating point.
1539 static const u8 multipliers[] = {
1558 static inline int bus_width(uint cap)
1560 if (cap == MMC_MODE_8BIT)
1562 if (cap == MMC_MODE_4BIT)
1564 if (cap == MMC_MODE_1BIT)
1566 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1570 #if !CONFIG_IS_ENABLED(DM_MMC)
1571 #ifdef MMC_SUPPORTS_TUNING
1572 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1578 static int mmc_set_ios(struct mmc *mmc)
1582 if (mmc->cfg->ops->set_ios)
1583 ret = mmc->cfg->ops->set_ios(mmc);
1588 static int mmc_host_power_cycle(struct mmc *mmc)
1592 if (mmc->cfg->ops->host_power_cycle)
1593 ret = mmc->cfg->ops->host_power_cycle(mmc);
1599 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1602 if (clock > mmc->cfg->f_max)
1603 clock = mmc->cfg->f_max;
1605 if (clock < mmc->cfg->f_min)
1606 clock = mmc->cfg->f_min;
1610 mmc->clk_disable = disable;
1612 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1614 return mmc_set_ios(mmc);
1617 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1619 mmc->bus_width = width;
1621 return mmc_set_ios(mmc);
1624 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1626 * helper function to display the capabilities in a human
1627 * friendly manner. The capabilities include bus width and
1630 void mmc_dump_capabilities(const char *text, uint caps)
1634 pr_debug("%s: widths [", text);
1635 if (caps & MMC_MODE_8BIT)
1637 if (caps & MMC_MODE_4BIT)
1639 if (caps & MMC_MODE_1BIT)
1641 pr_debug("\b\b] modes [");
1642 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1643 if (MMC_CAP(mode) & caps)
1644 pr_debug("%s, ", mmc_mode_name(mode));
1645 pr_debug("\b\b]\n");
1649 struct mode_width_tuning {
1652 #ifdef MMC_SUPPORTS_TUNING
1657 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1658 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1661 case MMC_SIGNAL_VOLTAGE_000: return 0;
1662 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1663 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1664 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1669 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1673 if (mmc->signal_voltage == signal_voltage)
1676 mmc->signal_voltage = signal_voltage;
1677 err = mmc_set_ios(mmc);
1679 pr_debug("unable to set voltage (err %d)\n", err);
1684 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1690 #if !CONFIG_IS_ENABLED(MMC_TINY)
1691 static const struct mode_width_tuning sd_modes_by_pref[] = {
1692 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1693 #ifdef MMC_SUPPORTS_TUNING
1696 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1697 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1702 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1717 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1720 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1725 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1729 #define for_each_sd_mode_by_pref(caps, mwt) \
1730 for (mwt = sd_modes_by_pref;\
1731 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1733 if (caps & MMC_CAP(mwt->mode))
1735 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1738 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1739 const struct mode_width_tuning *mwt;
1740 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1741 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1743 bool uhs_en = false;
1748 mmc_dump_capabilities("sd card", card_caps);
1749 mmc_dump_capabilities("host", mmc->host_caps);
1752 if (mmc_host_is_spi(mmc)) {
1753 mmc_set_bus_width(mmc, 1);
1754 mmc_select_mode(mmc, MMC_LEGACY);
1755 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1759 /* Restrict card's capabilities by what the host can do */
1760 caps = card_caps & mmc->host_caps;
1765 for_each_sd_mode_by_pref(caps, mwt) {
1768 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1769 if (*w & caps & mwt->widths) {
1770 pr_debug("trying mode %s width %d (at %d MHz)\n",
1771 mmc_mode_name(mwt->mode),
1773 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1775 /* configure the bus width (card + host) */
1776 err = sd_select_bus_width(mmc, bus_width(*w));
1779 mmc_set_bus_width(mmc, bus_width(*w));
1781 /* configure the bus mode (card) */
1782 err = sd_set_card_speed(mmc, mwt->mode);
1786 /* configure the bus mode (host) */
1787 mmc_select_mode(mmc, mwt->mode);
1788 mmc_set_clock(mmc, mmc->tran_speed,
1791 #ifdef MMC_SUPPORTS_TUNING
1792 /* execute tuning if needed */
1793 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1794 err = mmc_execute_tuning(mmc,
1797 pr_debug("tuning failed\n");
1803 #if CONFIG_IS_ENABLED(MMC_WRITE)
1804 err = sd_read_ssr(mmc);
1806 pr_warn("unable to read ssr\n");
1812 /* revert to a safer bus speed */
1813 mmc_select_mode(mmc, MMC_LEGACY);
1814 mmc_set_clock(mmc, mmc->tran_speed,
1820 pr_err("unable to select a mode\n");
1825 * read the compare the part of ext csd that is constant.
1826 * This can be used to check that the transfer is working
1829 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1832 const u8 *ext_csd = mmc->ext_csd;
1833 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1835 if (mmc->version < MMC_VERSION_4)
1838 err = mmc_send_ext_csd(mmc, test_csd);
1842 /* Only compare read only fields */
1843 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1844 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1845 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1846 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1847 ext_csd[EXT_CSD_REV]
1848 == test_csd[EXT_CSD_REV] &&
1849 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1850 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1851 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1852 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1858 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1859 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1860 uint32_t allowed_mask)
1868 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1869 EXT_CSD_CARD_TYPE_HS400_1_8V))
1870 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1871 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1872 EXT_CSD_CARD_TYPE_HS400_1_2V))
1873 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1876 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1877 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1878 MMC_SIGNAL_VOLTAGE_180;
1879 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1880 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1887 while (card_mask & allowed_mask) {
1888 enum mmc_voltage best_match;
1890 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1891 if (!mmc_set_signal_voltage(mmc, best_match))
1894 allowed_mask &= ~best_match;
1900 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1901 uint32_t allowed_mask)
1907 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1908 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1910 .mode = MMC_HS_400_ES,
1911 .widths = MMC_MODE_8BIT,
1914 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1917 .widths = MMC_MODE_8BIT,
1918 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1921 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1924 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1925 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1934 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1938 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1942 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1946 #define for_each_mmc_mode_by_pref(caps, mwt) \
1947 for (mwt = mmc_modes_by_pref;\
1948 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1950 if (caps & MMC_CAP(mwt->mode))
1952 static const struct ext_csd_bus_width {
1956 } ext_csd_bus_width[] = {
1957 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1958 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1959 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1960 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1961 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1964 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1965 static int mmc_select_hs400(struct mmc *mmc)
1969 /* Set timing to HS200 for tuning */
1970 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1974 /* configure the bus mode (host) */
1975 mmc_select_mode(mmc, MMC_HS_200);
1976 mmc_set_clock(mmc, mmc->tran_speed, false);
1978 /* execute tuning if needed */
1979 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1981 debug("tuning failed\n");
1985 /* Set back to HS */
1986 mmc_set_card_speed(mmc, MMC_HS, true);
1988 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1989 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1993 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1997 mmc_select_mode(mmc, MMC_HS_400);
1998 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2005 static int mmc_select_hs400(struct mmc *mmc)
2011 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2012 #if !CONFIG_IS_ENABLED(DM_MMC)
2013 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2018 static int mmc_select_hs400es(struct mmc *mmc)
2022 err = mmc_set_card_speed(mmc, MMC_HS, true);
2026 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2027 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2028 EXT_CSD_BUS_WIDTH_STROBE);
2030 printf("switch to bus width for hs400 failed\n");
2033 /* TODO: driver strength */
2034 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2038 mmc_select_mode(mmc, MMC_HS_400_ES);
2039 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2043 return mmc_set_enhanced_strobe(mmc);
2046 static int mmc_select_hs400es(struct mmc *mmc)
2052 #define for_each_supported_width(caps, ddr, ecbv) \
2053 for (ecbv = ext_csd_bus_width;\
2054 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2056 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2058 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2061 const struct mode_width_tuning *mwt;
2062 const struct ext_csd_bus_width *ecbw;
2065 mmc_dump_capabilities("mmc", card_caps);
2066 mmc_dump_capabilities("host", mmc->host_caps);
2069 if (mmc_host_is_spi(mmc)) {
2070 mmc_set_bus_width(mmc, 1);
2071 mmc_select_mode(mmc, MMC_LEGACY);
2072 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2076 /* Restrict card's capabilities by what the host can do */
2077 card_caps &= mmc->host_caps;
2079 /* Only version 4 of MMC supports wider bus widths */
2080 if (mmc->version < MMC_VERSION_4)
2083 if (!mmc->ext_csd) {
2084 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2088 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2089 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2091 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2092 * before doing anything else, since a transition from either of
2093 * the HS200/HS400 mode directly to legacy mode is not supported.
2095 if (mmc->selected_mode == MMC_HS_200 ||
2096 mmc->selected_mode == MMC_HS_400)
2097 mmc_set_card_speed(mmc, MMC_HS, true);
2100 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2102 for_each_mmc_mode_by_pref(card_caps, mwt) {
2103 for_each_supported_width(card_caps & mwt->widths,
2104 mmc_is_mode_ddr(mwt->mode), ecbw) {
2105 enum mmc_voltage old_voltage;
2106 pr_debug("trying mode %s width %d (at %d MHz)\n",
2107 mmc_mode_name(mwt->mode),
2108 bus_width(ecbw->cap),
2109 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2110 old_voltage = mmc->signal_voltage;
2111 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2112 MMC_ALL_SIGNAL_VOLTAGE);
2116 /* configure the bus width (card + host) */
2117 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2119 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2122 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2124 if (mwt->mode == MMC_HS_400) {
2125 err = mmc_select_hs400(mmc);
2127 printf("Select HS400 failed %d\n", err);
2130 } else if (mwt->mode == MMC_HS_400_ES) {
2131 err = mmc_select_hs400es(mmc);
2133 printf("Select HS400ES failed %d\n",
2138 /* configure the bus speed (card) */
2139 err = mmc_set_card_speed(mmc, mwt->mode, false);
2144 * configure the bus width AND the ddr mode
2145 * (card). The host side will be taken care
2146 * of in the next step
2148 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2149 err = mmc_switch(mmc,
2150 EXT_CSD_CMD_SET_NORMAL,
2152 ecbw->ext_csd_bits);
2157 /* configure the bus mode (host) */
2158 mmc_select_mode(mmc, mwt->mode);
2159 mmc_set_clock(mmc, mmc->tran_speed,
2161 #ifdef MMC_SUPPORTS_TUNING
2163 /* execute tuning if needed */
2165 err = mmc_execute_tuning(mmc,
2168 pr_debug("tuning failed\n");
2175 /* do a transfer to check the configuration */
2176 err = mmc_read_and_compare_ext_csd(mmc);
2180 mmc_set_signal_voltage(mmc, old_voltage);
2181 /* if an error occured, revert to a safer bus mode */
2182 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2183 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2184 mmc_select_mode(mmc, MMC_LEGACY);
2185 mmc_set_bus_width(mmc, 1);
2189 pr_err("unable to select a mode\n");
2195 #if CONFIG_IS_ENABLED(MMC_TINY)
2196 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2199 static int mmc_startup_v4(struct mmc *mmc)
2203 bool has_parts = false;
2204 bool part_completed;
2205 static const u32 mmc_versions[] = {
2217 #if CONFIG_IS_ENABLED(MMC_TINY)
2218 u8 *ext_csd = ext_csd_bkup;
2220 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2224 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2226 err = mmc_send_ext_csd(mmc, ext_csd);
2230 /* store the ext csd for future reference */
2232 mmc->ext_csd = ext_csd;
2234 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2236 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2239 /* check ext_csd version and capacity */
2240 err = mmc_send_ext_csd(mmc, ext_csd);
2244 /* store the ext csd for future reference */
2246 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2249 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2251 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2254 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2256 if (mmc->version >= MMC_VERSION_4_2) {
2258 * According to the JEDEC Standard, the value of
2259 * ext_csd's capacity is valid if the value is more
2262 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2263 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2264 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2265 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2266 capacity *= MMC_MAX_BLOCK_LEN;
2267 if ((capacity >> 20) > 2 * 1024)
2268 mmc->capacity_user = capacity;
2271 if (mmc->version >= MMC_VERSION_4_5)
2272 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2274 /* The partition data may be non-zero but it is only
2275 * effective if PARTITION_SETTING_COMPLETED is set in
2276 * EXT_CSD, so ignore any data if this bit is not set,
2277 * except for enabling the high-capacity group size
2278 * definition (see below).
2280 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2281 EXT_CSD_PARTITION_SETTING_COMPLETED);
2283 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2284 /* Some eMMC set the value too low so set a minimum */
2285 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2286 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2288 /* store the partition info of emmc */
2289 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2290 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2291 ext_csd[EXT_CSD_BOOT_MULT])
2292 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2293 if (part_completed &&
2294 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2295 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2297 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2299 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2301 for (i = 0; i < 4; i++) {
2302 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2303 uint mult = (ext_csd[idx + 2] << 16) +
2304 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2307 if (!part_completed)
2309 mmc->capacity_gp[i] = mult;
2310 mmc->capacity_gp[i] *=
2311 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2312 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2313 mmc->capacity_gp[i] <<= 19;
2316 #ifndef CONFIG_SPL_BUILD
2317 if (part_completed) {
2318 mmc->enh_user_size =
2319 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2320 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2321 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2322 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2323 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2324 mmc->enh_user_size <<= 19;
2325 mmc->enh_user_start =
2326 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2327 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2328 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2329 ext_csd[EXT_CSD_ENH_START_ADDR];
2330 if (mmc->high_capacity)
2331 mmc->enh_user_start <<= 9;
2336 * Host needs to enable ERASE_GRP_DEF bit if device is
2337 * partitioned. This bit will be lost every time after a reset
2338 * or power off. This will affect erase size.
2342 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2343 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2346 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2347 EXT_CSD_ERASE_GROUP_DEF, 1);
2352 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2355 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2356 #if CONFIG_IS_ENABLED(MMC_WRITE)
2357 /* Read out group size from ext_csd */
2358 mmc->erase_grp_size =
2359 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2362 * if high capacity and partition setting completed
2363 * SEC_COUNT is valid even if it is smaller than 2 GiB
2364 * JEDEC Standard JESD84-B45, 6.2.4
2366 if (mmc->high_capacity && part_completed) {
2367 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2368 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2369 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2370 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2371 capacity *= MMC_MAX_BLOCK_LEN;
2372 mmc->capacity_user = capacity;
2375 #if CONFIG_IS_ENABLED(MMC_WRITE)
2377 /* Calculate the group size from the csd value. */
2378 int erase_gsz, erase_gmul;
2380 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2381 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2382 mmc->erase_grp_size = (erase_gsz + 1)
2386 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2387 mmc->hc_wp_grp_size = 1024
2388 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2389 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2392 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2397 #if !CONFIG_IS_ENABLED(MMC_TINY)
2400 mmc->ext_csd = NULL;
2405 static int mmc_startup(struct mmc *mmc)
2411 struct blk_desc *bdesc;
2413 #ifdef CONFIG_MMC_SPI_CRC_ON
2414 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2415 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2416 cmd.resp_type = MMC_RSP_R1;
2418 err = mmc_send_cmd(mmc, &cmd, NULL);
2424 /* Put the Card in Identify Mode */
2425 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2426 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2427 cmd.resp_type = MMC_RSP_R2;
2430 err = mmc_send_cmd(mmc, &cmd, NULL);
2432 #ifdef CONFIG_MMC_QUIRKS
2433 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2436 * It has been seen that SEND_CID may fail on the first
2437 * attempt, let's try a few more time
2440 err = mmc_send_cmd(mmc, &cmd, NULL);
2443 } while (retries--);
2450 memcpy(mmc->cid, cmd.response, 16);
2453 * For MMC cards, set the Relative Address.
2454 * For SD cards, get the Relatvie Address.
2455 * This also puts the cards into Standby State
2457 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2458 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2459 cmd.cmdarg = mmc->rca << 16;
2460 cmd.resp_type = MMC_RSP_R6;
2462 err = mmc_send_cmd(mmc, &cmd, NULL);
2468 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2471 /* Get the Card-Specific Data */
2472 cmd.cmdidx = MMC_CMD_SEND_CSD;
2473 cmd.resp_type = MMC_RSP_R2;
2474 cmd.cmdarg = mmc->rca << 16;
2476 err = mmc_send_cmd(mmc, &cmd, NULL);
2481 mmc->csd[0] = cmd.response[0];
2482 mmc->csd[1] = cmd.response[1];
2483 mmc->csd[2] = cmd.response[2];
2484 mmc->csd[3] = cmd.response[3];
2486 if (mmc->version == MMC_VERSION_UNKNOWN) {
2487 int version = (cmd.response[0] >> 26) & 0xf;
2491 mmc->version = MMC_VERSION_1_2;
2494 mmc->version = MMC_VERSION_1_4;
2497 mmc->version = MMC_VERSION_2_2;
2500 mmc->version = MMC_VERSION_3;
2503 mmc->version = MMC_VERSION_4;
2506 mmc->version = MMC_VERSION_1_2;
2511 /* divide frequency by 10, since the mults are 10x bigger */
2512 freq = fbase[(cmd.response[0] & 0x7)];
2513 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2515 mmc->legacy_speed = freq * mult;
2516 mmc_select_mode(mmc, MMC_LEGACY);
2518 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2519 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2520 #if CONFIG_IS_ENABLED(MMC_WRITE)
2523 mmc->write_bl_len = mmc->read_bl_len;
2525 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2528 if (mmc->high_capacity) {
2529 csize = (mmc->csd[1] & 0x3f) << 16
2530 | (mmc->csd[2] & 0xffff0000) >> 16;
2533 csize = (mmc->csd[1] & 0x3ff) << 2
2534 | (mmc->csd[2] & 0xc0000000) >> 30;
2535 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2538 mmc->capacity_user = (csize + 1) << (cmult + 2);
2539 mmc->capacity_user *= mmc->read_bl_len;
2540 mmc->capacity_boot = 0;
2541 mmc->capacity_rpmb = 0;
2542 for (i = 0; i < 4; i++)
2543 mmc->capacity_gp[i] = 0;
2545 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2546 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2548 #if CONFIG_IS_ENABLED(MMC_WRITE)
2549 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2550 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2553 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2554 cmd.cmdidx = MMC_CMD_SET_DSR;
2555 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2556 cmd.resp_type = MMC_RSP_NONE;
2557 if (mmc_send_cmd(mmc, &cmd, NULL))
2558 pr_warn("MMC: SET_DSR failed\n");
2561 /* Select the card, and put it into Transfer Mode */
2562 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2563 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2564 cmd.resp_type = MMC_RSP_R1;
2565 cmd.cmdarg = mmc->rca << 16;
2566 err = mmc_send_cmd(mmc, &cmd, NULL);
2573 * For SD, its erase group is always one sector
2575 #if CONFIG_IS_ENABLED(MMC_WRITE)
2576 mmc->erase_grp_size = 1;
2578 mmc->part_config = MMCPART_NOAVAILABLE;
2580 err = mmc_startup_v4(mmc);
2584 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2588 #if CONFIG_IS_ENABLED(MMC_TINY)
2589 mmc_set_clock(mmc, mmc->legacy_speed, false);
2590 mmc_select_mode(mmc, MMC_LEGACY);
2591 mmc_set_bus_width(mmc, 1);
2594 err = sd_get_capabilities(mmc);
2597 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2599 err = mmc_get_capabilities(mmc);
2602 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2608 mmc->best_mode = mmc->selected_mode;
2610 /* Fix the block length for DDR mode */
2611 if (mmc->ddr_mode) {
2612 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2613 #if CONFIG_IS_ENABLED(MMC_WRITE)
2614 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2618 /* fill in device description */
2619 bdesc = mmc_get_blk_desc(mmc);
2623 bdesc->blksz = mmc->read_bl_len;
2624 bdesc->log2blksz = LOG2(bdesc->blksz);
2625 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2626 #if !defined(CONFIG_SPL_BUILD) || \
2627 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2628 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2629 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2630 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2631 (mmc->cid[3] >> 16) & 0xffff);
2632 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2633 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2634 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2635 (mmc->cid[2] >> 24) & 0xff);
2636 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2637 (mmc->cid[2] >> 16) & 0xf);
2639 bdesc->vendor[0] = 0;
2640 bdesc->product[0] = 0;
2641 bdesc->revision[0] = 0;
2644 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2651 static int mmc_send_if_cond(struct mmc *mmc)
2656 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2657 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2658 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2659 cmd.resp_type = MMC_RSP_R7;
2661 err = mmc_send_cmd(mmc, &cmd, NULL);
2666 if ((cmd.response[0] & 0xff) != 0xaa)
2669 mmc->version = SD_VERSION_2;
2674 #if !CONFIG_IS_ENABLED(DM_MMC)
2675 /* board-specific MMC power initializations. */
2676 __weak void board_mmc_power_init(void)
2681 static int mmc_power_init(struct mmc *mmc)
2683 #if CONFIG_IS_ENABLED(DM_MMC)
2684 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2687 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2690 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2692 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2693 &mmc->vqmmc_supply);
2695 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2697 #else /* !CONFIG_DM_MMC */
2699 * Driver model should use a regulator, as above, rather than calling
2700 * out to board code.
2702 board_mmc_power_init();
2708 * put the host in the initial state:
2709 * - turn on Vdd (card power supply)
2710 * - configure the bus width and clock to minimal values
2712 static void mmc_set_initial_state(struct mmc *mmc)
2716 /* First try to set 3.3V. If it fails set to 1.8V */
2717 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2719 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2721 pr_warn("mmc: failed to set signal voltage\n");
2723 mmc_select_mode(mmc, MMC_LEGACY);
2724 mmc_set_bus_width(mmc, 1);
2725 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2728 static int mmc_power_on(struct mmc *mmc)
2730 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2731 if (mmc->vmmc_supply) {
2732 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2735 puts("Error enabling VMMC supply\n");
2743 static int mmc_power_off(struct mmc *mmc)
2745 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2746 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2747 if (mmc->vmmc_supply) {
2748 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2751 pr_debug("Error disabling VMMC supply\n");
2759 static int mmc_power_cycle(struct mmc *mmc)
2763 ret = mmc_power_off(mmc);
2767 ret = mmc_host_power_cycle(mmc);
2772 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2773 * to be on the safer side.
2776 return mmc_power_on(mmc);
2779 int mmc_get_op_cond(struct mmc *mmc)
2781 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2787 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2788 mmc_adapter_card_type_ident();
2790 err = mmc_power_init(mmc);
2794 #ifdef CONFIG_MMC_QUIRKS
2795 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2796 MMC_QUIRK_RETRY_SEND_CID |
2797 MMC_QUIRK_RETRY_APP_CMD;
2800 err = mmc_power_cycle(mmc);
2803 * if power cycling is not supported, we should not try
2804 * to use the UHS modes, because we wouldn't be able to
2805 * recover from an error during the UHS initialization.
2807 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2809 mmc->host_caps &= ~UHS_CAPS;
2810 err = mmc_power_on(mmc);
2815 #if CONFIG_IS_ENABLED(DM_MMC)
2816 /* The device has already been probed ready for use */
2818 /* made sure it's not NULL earlier */
2819 err = mmc->cfg->ops->init(mmc);
2826 mmc_set_initial_state(mmc);
2828 /* Reset the Card */
2829 err = mmc_go_idle(mmc);
2834 /* The internal partition reset to user partition(0) at every CMD0*/
2835 mmc_get_blk_desc(mmc)->hwpart = 0;
2837 /* Test for SD version 2 */
2838 err = mmc_send_if_cond(mmc);
2840 /* Now try to get the SD card's operating condition */
2841 err = sd_send_op_cond(mmc, uhs_en);
2842 if (err && uhs_en) {
2844 mmc_power_cycle(mmc);
2848 /* If the command timed out, we check for an MMC card */
2849 if (err == -ETIMEDOUT) {
2850 err = mmc_send_op_cond(mmc);
2853 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2854 pr_err("Card did not respond to voltage select!\n");
2863 int mmc_start_init(struct mmc *mmc)
2869 * all hosts are capable of 1 bit bus-width and able to use the legacy
2872 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2873 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2874 #if CONFIG_IS_ENABLED(DM_MMC)
2875 mmc_deferred_probe(mmc);
2877 #if !defined(CONFIG_MMC_BROKEN_CD)
2878 no_card = mmc_getcd(mmc) == 0;
2882 #if !CONFIG_IS_ENABLED(DM_MMC)
2883 /* we pretend there's no card when init is NULL */
2884 no_card = no_card || (mmc->cfg->ops->init == NULL);
2888 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2889 pr_err("MMC: no card present\n");
2894 err = mmc_get_op_cond(mmc);
2897 mmc->init_in_progress = 1;
2902 static int mmc_complete_init(struct mmc *mmc)
2906 mmc->init_in_progress = 0;
2907 if (mmc->op_cond_pending)
2908 err = mmc_complete_op_cond(mmc);
2911 err = mmc_startup(mmc);
2919 int mmc_init(struct mmc *mmc)
2922 __maybe_unused ulong start;
2923 #if CONFIG_IS_ENABLED(DM_MMC)
2924 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2931 start = get_timer(0);
2933 if (!mmc->init_in_progress)
2934 err = mmc_start_init(mmc);
2937 err = mmc_complete_init(mmc);
2939 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2944 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2945 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2946 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2947 int mmc_deinit(struct mmc *mmc)
2955 caps_filtered = mmc->card_caps &
2956 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2957 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2958 MMC_CAP(UHS_SDR104));
2960 return sd_select_mode_and_width(mmc, caps_filtered);
2962 caps_filtered = mmc->card_caps &
2963 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2965 return mmc_select_mode_and_width(mmc, caps_filtered);
2970 int mmc_set_dsr(struct mmc *mmc, u16 val)
2976 /* CPU-specific MMC initializations */
2977 __weak int cpu_mmc_init(bd_t *bis)
2982 /* board-specific MMC initializations. */
2983 __weak int board_mmc_init(bd_t *bis)
2988 void mmc_set_preinit(struct mmc *mmc, int preinit)
2990 mmc->preinit = preinit;
2993 #if CONFIG_IS_ENABLED(DM_MMC)
2994 static int mmc_probe(bd_t *bis)
2998 struct udevice *dev;
3000 ret = uclass_get(UCLASS_MMC, &uc);
3005 * Try to add them in sequence order. Really with driver model we
3006 * should allow holes, but the current MMC list does not allow that.
3007 * So if we request 0, 1, 3 we will get 0, 1, 2.
3009 for (i = 0; ; i++) {
3010 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3014 uclass_foreach_dev(dev, uc) {
3015 ret = device_probe(dev);
3017 pr_err("%s - probe failed: %d\n", dev->name, ret);
3023 static int mmc_probe(bd_t *bis)
3025 if (board_mmc_init(bis) < 0)
3032 int mmc_initialize(bd_t *bis)
3034 static int initialized = 0;
3036 if (initialized) /* Avoid initializing mmc multiple times */
3040 #if !CONFIG_IS_ENABLED(BLK)
3041 #if !CONFIG_IS_ENABLED(MMC_TINY)
3045 ret = mmc_probe(bis);
3049 #ifndef CONFIG_SPL_BUILD
3050 print_mmc_devices(',');
3057 #if CONFIG_IS_ENABLED(DM_MMC)
3058 int mmc_init_device(int num)
3060 struct udevice *dev;
3064 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3068 m = mmc_get_mmc_dev(dev);
3071 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3072 mmc_set_preinit(m, 1);
3081 #ifdef CONFIG_CMD_BKOPS_ENABLE
3082 int mmc_set_bkops_enable(struct mmc *mmc)
3085 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3087 err = mmc_send_ext_csd(mmc, ext_csd);
3089 puts("Could not get ext_csd register values\n");
3093 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3094 puts("Background operations not supported on device\n");
3095 return -EMEDIUMTYPE;
3098 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3099 puts("Background operations already enabled\n");
3103 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3105 puts("Failed to enable manual background operations\n");
3109 puts("Enabled manual background operations\n");