1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
6 * Based vaguely on the Linux code
15 #include <dm/device-internal.h>
19 #include <linux/delay.h>
20 #include <power/regulator.h>
23 #include <linux/list.h>
25 #include "mmc_private.h"
27 #define DEFAULT_CMD6_TIMEOUT_MS 500
29 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
31 #if !CONFIG_IS_ENABLED(DM_MMC)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
38 __weak int board_mmc_getwp(struct mmc *mmc)
43 int mmc_getwp(struct mmc *mmc)
47 wp = board_mmc_getwp(mmc);
50 if (mmc->cfg->ops->getwp)
51 wp = mmc->cfg->ops->getwp(mmc);
59 __weak int board_mmc_getcd(struct mmc *mmc)
65 #ifdef CONFIG_MMC_TRACE
66 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
68 printf("CMD_SEND:%d\n", cmd->cmdidx);
69 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
72 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
78 printf("\t\tRET\t\t\t %d\n", ret);
80 switch (cmd->resp_type) {
82 printf("\t\tMMC_RSP_NONE\n");
85 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
89 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
93 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
95 printf("\t\t \t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
102 printf("\t\t\t\t\tDUMPING DATA\n");
103 for (i = 0; i < 4; i++) {
105 printf("\t\t\t\t\t%03d - ", i*4);
106 ptr = (u8 *)&cmd->response[i];
108 for (j = 0; j < 4; j++)
109 printf("%02x ", *ptr--);
114 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
118 printf("\t\tERROR MMC rsp not supported\n");
124 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
128 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
129 printf("CURR STATE:%d\n", status);
133 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
134 const char *mmc_mode_name(enum bus_mode mode)
136 static const char *const names[] = {
137 [MMC_LEGACY] = "MMC legacy",
138 [MMC_HS] = "MMC High Speed (26MHz)",
139 [SD_HS] = "SD High Speed (50MHz)",
140 [UHS_SDR12] = "UHS SDR12 (25MHz)",
141 [UHS_SDR25] = "UHS SDR25 (50MHz)",
142 [UHS_SDR50] = "UHS SDR50 (100MHz)",
143 [UHS_SDR104] = "UHS SDR104 (208MHz)",
144 [UHS_DDR50] = "UHS DDR50 (50MHz)",
145 [MMC_HS_52] = "MMC High Speed (52MHz)",
146 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
147 [MMC_HS_200] = "HS200 (200MHz)",
148 [MMC_HS_400] = "HS400 (200MHz)",
149 [MMC_HS_400_ES] = "HS400ES (200MHz)",
152 if (mode >= MMC_MODES_END)
153 return "Unknown mode";
159 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
161 static const int freqs[] = {
162 [MMC_LEGACY] = 25000000,
165 [MMC_HS_52] = 52000000,
166 [MMC_DDR_52] = 52000000,
167 [UHS_SDR12] = 25000000,
168 [UHS_SDR25] = 50000000,
169 [UHS_SDR50] = 100000000,
170 [UHS_DDR50] = 50000000,
171 [UHS_SDR104] = 208000000,
172 [MMC_HS_200] = 200000000,
173 [MMC_HS_400] = 200000000,
174 [MMC_HS_400_ES] = 200000000,
177 if (mode == MMC_LEGACY)
178 return mmc->legacy_speed;
179 else if (mode >= MMC_MODES_END)
185 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
187 mmc->selected_mode = mode;
188 mmc->tran_speed = mmc_mode2freq(mmc, mode);
189 mmc->ddr_mode = mmc_is_mode_ddr(mode);
190 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
191 mmc->tran_speed / 1000000);
195 #if !CONFIG_IS_ENABLED(DM_MMC)
196 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
200 mmmc_trace_before_send(mmc, cmd);
201 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
202 mmmc_trace_after_send(mmc, cmd, ret);
208 int mmc_send_status(struct mmc *mmc, unsigned int *status)
211 int err, retries = 5;
213 cmd.cmdidx = MMC_CMD_SEND_STATUS;
214 cmd.resp_type = MMC_RSP_R1;
215 if (!mmc_host_is_spi(mmc))
216 cmd.cmdarg = mmc->rca << 16;
219 err = mmc_send_cmd(mmc, &cmd, NULL);
221 mmc_trace_state(mmc, &cmd);
222 *status = cmd.response[0];
226 mmc_trace_state(mmc, &cmd);
230 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
235 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
240 err = mmc_send_status(mmc, &status);
244 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
245 (status & MMC_STATUS_CURR_STATE) !=
249 if (status & MMC_STATUS_MASK) {
250 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
251 pr_err("Status Error: 0x%08x\n", status);
256 if (timeout_ms-- <= 0)
262 if (timeout_ms <= 0) {
263 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
264 pr_err("Timeout waiting card ready\n");
272 int mmc_set_blocklen(struct mmc *mmc, int len)
280 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
281 cmd.resp_type = MMC_RSP_R1;
284 err = mmc_send_cmd(mmc, &cmd, NULL);
286 #ifdef CONFIG_MMC_QUIRKS
287 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
290 * It has been seen that SET_BLOCKLEN may fail on the first
291 * attempt, let's try a few more time
294 err = mmc_send_cmd(mmc, &cmd, NULL);
304 #ifdef MMC_SUPPORTS_TUNING
305 static const u8 tuning_blk_pattern_4bit[] = {
306 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
307 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
308 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
309 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
310 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
311 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
312 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
313 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
316 static const u8 tuning_blk_pattern_8bit[] = {
317 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
318 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
319 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
320 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
321 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
322 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
323 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
324 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
325 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
326 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
327 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
328 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
329 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
330 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
331 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
332 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
335 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
338 struct mmc_data data;
339 const u8 *tuning_block_pattern;
342 if (mmc->bus_width == 8) {
343 tuning_block_pattern = tuning_blk_pattern_8bit;
344 size = sizeof(tuning_blk_pattern_8bit);
345 } else if (mmc->bus_width == 4) {
346 tuning_block_pattern = tuning_blk_pattern_4bit;
347 size = sizeof(tuning_blk_pattern_4bit);
352 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
356 cmd.resp_type = MMC_RSP_R1;
358 data.dest = (void *)data_buf;
360 data.blocksize = size;
361 data.flags = MMC_DATA_READ;
363 err = mmc_send_cmd(mmc, &cmd, &data);
367 if (memcmp(data_buf, tuning_block_pattern, size))
374 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
378 struct mmc_data data;
381 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
383 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
385 if (mmc->high_capacity)
388 cmd.cmdarg = start * mmc->read_bl_len;
390 cmd.resp_type = MMC_RSP_R1;
393 data.blocks = blkcnt;
394 data.blocksize = mmc->read_bl_len;
395 data.flags = MMC_DATA_READ;
397 if (mmc_send_cmd(mmc, &cmd, &data))
401 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
403 cmd.resp_type = MMC_RSP_R1b;
404 if (mmc_send_cmd(mmc, &cmd, NULL)) {
405 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
406 pr_err("mmc fail to send stop cmd\n");
415 #if !CONFIG_IS_ENABLED(DM_MMC)
416 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
418 if (mmc->cfg->ops->get_b_max)
419 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
421 return mmc->cfg->b_max;
425 #if CONFIG_IS_ENABLED(BLK)
426 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
428 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
432 #if CONFIG_IS_ENABLED(BLK)
433 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
435 int dev_num = block_dev->devnum;
437 lbaint_t cur, blocks_todo = blkcnt;
443 struct mmc *mmc = find_mmc_device(dev_num);
447 if (CONFIG_IS_ENABLED(MMC_TINY))
448 err = mmc_switch_part(mmc, block_dev->hwpart);
450 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
455 if ((start + blkcnt) > block_dev->lba) {
456 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
457 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
458 start + blkcnt, block_dev->lba);
463 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
464 pr_debug("%s: Failed to set blocklen\n", __func__);
468 b_max = mmc_get_b_max(mmc, dst, blkcnt);
471 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
472 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
473 pr_debug("%s: Failed to read blocks\n", __func__);
478 dst += cur * mmc->read_bl_len;
479 } while (blocks_todo > 0);
484 static int mmc_go_idle(struct mmc *mmc)
491 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
493 cmd.resp_type = MMC_RSP_NONE;
495 err = mmc_send_cmd(mmc, &cmd, NULL);
505 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
506 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
512 * Send CMD11 only if the request is to switch the card to
515 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
516 return mmc_set_signal_voltage(mmc, signal_voltage);
518 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
520 cmd.resp_type = MMC_RSP_R1;
522 err = mmc_send_cmd(mmc, &cmd, NULL);
526 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
530 * The card should drive cmd and dat[0:3] low immediately
531 * after the response of cmd11, but wait 100 us to be sure
533 err = mmc_wait_dat0(mmc, 0, 100);
540 * During a signal voltage level switch, the clock must be gated
541 * for 5 ms according to the SD spec
543 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
545 err = mmc_set_signal_voltage(mmc, signal_voltage);
549 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
551 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
554 * Failure to switch is indicated by the card holding
555 * dat[0:3] low. Wait for at least 1 ms according to spec
557 err = mmc_wait_dat0(mmc, 1, 1000);
567 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
574 cmd.cmdidx = MMC_CMD_APP_CMD;
575 cmd.resp_type = MMC_RSP_R1;
578 err = mmc_send_cmd(mmc, &cmd, NULL);
583 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
584 cmd.resp_type = MMC_RSP_R3;
587 * Most cards do not answer if some reserved bits
588 * in the ocr are set. However, Some controller
589 * can set bit 7 (reserved for low voltages), but
590 * how to manage low voltages SD card is not yet
593 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
594 (mmc->cfg->voltages & 0xff8000);
596 if (mmc->version == SD_VERSION_2)
597 cmd.cmdarg |= OCR_HCS;
600 cmd.cmdarg |= OCR_S18R;
602 err = mmc_send_cmd(mmc, &cmd, NULL);
607 if (cmd.response[0] & OCR_BUSY)
616 if (mmc->version != SD_VERSION_2)
617 mmc->version = SD_VERSION_1_0;
619 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
620 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
621 cmd.resp_type = MMC_RSP_R3;
624 err = mmc_send_cmd(mmc, &cmd, NULL);
630 mmc->ocr = cmd.response[0];
632 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
633 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
635 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
641 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
647 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
652 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
653 cmd.resp_type = MMC_RSP_R3;
655 if (use_arg && !mmc_host_is_spi(mmc))
656 cmd.cmdarg = OCR_HCS |
657 (mmc->cfg->voltages &
658 (mmc->ocr & OCR_VOLTAGE_MASK)) |
659 (mmc->ocr & OCR_ACCESS_MODE);
661 err = mmc_send_cmd(mmc, &cmd, NULL);
664 mmc->ocr = cmd.response[0];
668 static int mmc_send_op_cond(struct mmc *mmc)
672 /* Some cards seem to need this */
675 /* Asking to the card its capabilities */
676 for (i = 0; i < 2; i++) {
677 err = mmc_send_op_cond_iter(mmc, i != 0);
681 /* exit if not busy (flag seems to be inverted) */
682 if (mmc->ocr & OCR_BUSY)
685 mmc->op_cond_pending = 1;
689 static int mmc_complete_op_cond(struct mmc *mmc)
696 mmc->op_cond_pending = 0;
697 if (!(mmc->ocr & OCR_BUSY)) {
698 /* Some cards seem to need this */
701 start = get_timer(0);
703 err = mmc_send_op_cond_iter(mmc, 1);
706 if (mmc->ocr & OCR_BUSY)
708 if (get_timer(start) > timeout)
714 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
715 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
716 cmd.resp_type = MMC_RSP_R3;
719 err = mmc_send_cmd(mmc, &cmd, NULL);
724 mmc->ocr = cmd.response[0];
727 mmc->version = MMC_VERSION_UNKNOWN;
729 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
736 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
739 struct mmc_data data;
742 /* Get the Card Status Register */
743 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
744 cmd.resp_type = MMC_RSP_R1;
747 data.dest = (char *)ext_csd;
749 data.blocksize = MMC_MAX_BLOCK_LEN;
750 data.flags = MMC_DATA_READ;
752 err = mmc_send_cmd(mmc, &cmd, &data);
757 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
760 unsigned int status, start;
762 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
763 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
764 (index == EXT_CSD_PART_CONF);
768 if (mmc->gen_cmd6_time)
769 timeout_ms = mmc->gen_cmd6_time * 10;
771 if (is_part_switch && mmc->part_switch_time)
772 timeout_ms = mmc->part_switch_time * 10;
774 cmd.cmdidx = MMC_CMD_SWITCH;
775 cmd.resp_type = MMC_RSP_R1b;
776 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
781 ret = mmc_send_cmd(mmc, &cmd, NULL);
782 } while (ret && retries-- > 0);
787 start = get_timer(0);
789 /* poll dat0 for rdy/buys status */
790 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
791 if (ret && ret != -ENOSYS)
795 * In cases when not allowed to poll by using CMD13 or because we aren't
796 * capable of polling by using mmc_wait_dat0, then rely on waiting the
797 * stated timeout to be sufficient.
799 if (ret == -ENOSYS && !send_status)
802 /* Finally wait until the card is ready or indicates a failure
803 * to switch. It doesn't hurt to use CMD13 here even if send_status
804 * is false, because by now (after 'timeout_ms' ms) the bus should be
808 ret = mmc_send_status(mmc, &status);
810 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
811 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
815 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
818 } while (get_timer(start) < timeout_ms);
823 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
825 return __mmc_switch(mmc, set, index, value, true);
828 int mmc_boot_wp(struct mmc *mmc)
830 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
833 #if !CONFIG_IS_ENABLED(MMC_TINY)
834 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
840 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
846 speed_bits = EXT_CSD_TIMING_HS;
848 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
850 speed_bits = EXT_CSD_TIMING_HS200;
853 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
855 speed_bits = EXT_CSD_TIMING_HS400;
858 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
860 speed_bits = EXT_CSD_TIMING_HS400;
864 speed_bits = EXT_CSD_TIMING_LEGACY;
870 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
871 speed_bits, !hsdowngrade);
875 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
876 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
878 * In case the eMMC is in HS200/HS400 mode and we are downgrading
879 * to HS mode, the card clock are still running much faster than
880 * the supported HS mode clock, so we can not reliably read out
881 * Extended CSD. Reconfigure the controller to run at HS mode.
884 mmc_select_mode(mmc, MMC_HS);
885 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
889 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
890 /* Now check to see that it worked */
891 err = mmc_send_ext_csd(mmc, test_csd);
895 /* No high-speed support */
896 if (!test_csd[EXT_CSD_HS_TIMING])
903 static int mmc_get_capabilities(struct mmc *mmc)
905 u8 *ext_csd = mmc->ext_csd;
908 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
910 if (mmc_host_is_spi(mmc))
913 /* Only version 4 supports high-speed */
914 if (mmc->version < MMC_VERSION_4)
918 pr_err("No ext_csd found!\n"); /* this should enver happen */
922 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
924 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
925 mmc->cardtype = cardtype;
927 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
928 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
929 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
930 mmc->card_caps |= MMC_MODE_HS200;
933 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
934 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
935 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
936 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
937 mmc->card_caps |= MMC_MODE_HS400;
940 if (cardtype & EXT_CSD_CARD_TYPE_52) {
941 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
942 mmc->card_caps |= MMC_MODE_DDR_52MHz;
943 mmc->card_caps |= MMC_MODE_HS_52MHz;
945 if (cardtype & EXT_CSD_CARD_TYPE_26)
946 mmc->card_caps |= MMC_MODE_HS;
948 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
949 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
950 (mmc->card_caps & MMC_MODE_HS400)) {
951 mmc->card_caps |= MMC_MODE_HS400_ES;
959 static int mmc_set_capacity(struct mmc *mmc, int part_num)
963 mmc->capacity = mmc->capacity_user;
967 mmc->capacity = mmc->capacity_boot;
970 mmc->capacity = mmc->capacity_rpmb;
976 mmc->capacity = mmc->capacity_gp[part_num - 4];
982 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
987 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
993 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
995 (mmc->part_config & ~PART_ACCESS_MASK)
996 | (part_num & PART_ACCESS_MASK));
997 } while (ret && retry--);
1000 * Set the capacity if the switch succeeded or was intended
1001 * to return to representing the raw device.
1003 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1004 ret = mmc_set_capacity(mmc, part_num);
1005 mmc_get_blk_desc(mmc)->hwpart = part_num;
1011 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1012 int mmc_hwpart_config(struct mmc *mmc,
1013 const struct mmc_hwpart_conf *conf,
1014 enum mmc_hwpart_conf_mode mode)
1019 u32 gp_size_mult[4];
1020 u32 max_enh_size_mult;
1021 u32 tot_enh_size_mult = 0;
1024 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1026 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1029 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1030 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1031 return -EMEDIUMTYPE;
1034 if (!(mmc->part_support & PART_SUPPORT)) {
1035 pr_err("Card does not support partitioning\n");
1036 return -EMEDIUMTYPE;
1039 if (!mmc->hc_wp_grp_size) {
1040 pr_err("Card does not define HC WP group size\n");
1041 return -EMEDIUMTYPE;
1044 /* check partition alignment and total enhanced size */
1045 if (conf->user.enh_size) {
1046 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1047 conf->user.enh_start % mmc->hc_wp_grp_size) {
1048 pr_err("User data enhanced area not HC WP group "
1052 part_attrs |= EXT_CSD_ENH_USR;
1053 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1054 if (mmc->high_capacity) {
1055 enh_start_addr = conf->user.enh_start;
1057 enh_start_addr = (conf->user.enh_start << 9);
1063 tot_enh_size_mult += enh_size_mult;
1065 for (pidx = 0; pidx < 4; pidx++) {
1066 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1067 pr_err("GP%i partition not HC WP group size "
1068 "aligned\n", pidx+1);
1071 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1072 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1073 part_attrs |= EXT_CSD_ENH_GP(pidx);
1074 tot_enh_size_mult += gp_size_mult[pidx];
1078 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1079 pr_err("Card does not support enhanced attribute\n");
1080 return -EMEDIUMTYPE;
1083 err = mmc_send_ext_csd(mmc, ext_csd);
1088 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1089 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1090 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1091 if (tot_enh_size_mult > max_enh_size_mult) {
1092 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1093 tot_enh_size_mult, max_enh_size_mult);
1094 return -EMEDIUMTYPE;
1097 /* The default value of EXT_CSD_WR_REL_SET is device
1098 * dependent, the values can only be changed if the
1099 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1100 * changed only once and before partitioning is completed. */
1101 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1102 if (conf->user.wr_rel_change) {
1103 if (conf->user.wr_rel_set)
1104 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1106 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1108 for (pidx = 0; pidx < 4; pidx++) {
1109 if (conf->gp_part[pidx].wr_rel_change) {
1110 if (conf->gp_part[pidx].wr_rel_set)
1111 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1113 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1117 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1118 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1119 puts("Card does not support host controlled partition write "
1120 "reliability settings\n");
1121 return -EMEDIUMTYPE;
1124 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1125 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1126 pr_err("Card already partitioned\n");
1130 if (mode == MMC_HWPART_CONF_CHECK)
1133 /* Partitioning requires high-capacity size definitions */
1134 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1135 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1136 EXT_CSD_ERASE_GROUP_DEF, 1);
1141 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1143 #if CONFIG_IS_ENABLED(MMC_WRITE)
1144 /* update erase group size to be high-capacity */
1145 mmc->erase_grp_size =
1146 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1151 /* all OK, write the configuration */
1152 for (i = 0; i < 4; i++) {
1153 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1154 EXT_CSD_ENH_START_ADDR+i,
1155 (enh_start_addr >> (i*8)) & 0xFF);
1159 for (i = 0; i < 3; i++) {
1160 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1161 EXT_CSD_ENH_SIZE_MULT+i,
1162 (enh_size_mult >> (i*8)) & 0xFF);
1166 for (pidx = 0; pidx < 4; pidx++) {
1167 for (i = 0; i < 3; i++) {
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1170 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1175 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1176 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1180 if (mode == MMC_HWPART_CONF_SET)
1183 /* The WR_REL_SET is a write-once register but shall be
1184 * written before setting PART_SETTING_COMPLETED. As it is
1185 * write-once we can only write it when completing the
1187 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1188 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1189 EXT_CSD_WR_REL_SET, wr_rel_set);
1194 /* Setting PART_SETTING_COMPLETED confirms the partition
1195 * configuration but it only becomes effective after power
1196 * cycle, so we do not adjust the partition related settings
1197 * in the mmc struct. */
1199 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1200 EXT_CSD_PARTITION_SETTING,
1201 EXT_CSD_PARTITION_SETTING_COMPLETED);
1209 #if !CONFIG_IS_ENABLED(DM_MMC)
1210 int mmc_getcd(struct mmc *mmc)
1214 cd = board_mmc_getcd(mmc);
1217 if (mmc->cfg->ops->getcd)
1218 cd = mmc->cfg->ops->getcd(mmc);
1227 #if !CONFIG_IS_ENABLED(MMC_TINY)
1228 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1231 struct mmc_data data;
1233 /* Switch the frequency */
1234 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1235 cmd.resp_type = MMC_RSP_R1;
1236 cmd.cmdarg = (mode << 31) | 0xffffff;
1237 cmd.cmdarg &= ~(0xf << (group * 4));
1238 cmd.cmdarg |= value << (group * 4);
1240 data.dest = (char *)resp;
1241 data.blocksize = 64;
1243 data.flags = MMC_DATA_READ;
1245 return mmc_send_cmd(mmc, &cmd, &data);
1248 static int sd_get_capabilities(struct mmc *mmc)
1252 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1253 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1254 struct mmc_data data;
1256 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1260 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1262 if (mmc_host_is_spi(mmc))
1265 /* Read the SCR to find out if this card supports higher speeds */
1266 cmd.cmdidx = MMC_CMD_APP_CMD;
1267 cmd.resp_type = MMC_RSP_R1;
1268 cmd.cmdarg = mmc->rca << 16;
1270 err = mmc_send_cmd(mmc, &cmd, NULL);
1275 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1276 cmd.resp_type = MMC_RSP_R1;
1282 data.dest = (char *)scr;
1285 data.flags = MMC_DATA_READ;
1287 err = mmc_send_cmd(mmc, &cmd, &data);
1296 mmc->scr[0] = __be32_to_cpu(scr[0]);
1297 mmc->scr[1] = __be32_to_cpu(scr[1]);
1299 switch ((mmc->scr[0] >> 24) & 0xf) {
1301 mmc->version = SD_VERSION_1_0;
1304 mmc->version = SD_VERSION_1_10;
1307 mmc->version = SD_VERSION_2;
1308 if ((mmc->scr[0] >> 15) & 0x1)
1309 mmc->version = SD_VERSION_3;
1312 mmc->version = SD_VERSION_1_0;
1316 if (mmc->scr[0] & SD_DATA_4BIT)
1317 mmc->card_caps |= MMC_MODE_4BIT;
1319 /* Version 1.0 doesn't support switching */
1320 if (mmc->version == SD_VERSION_1_0)
1325 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1326 (u8 *)switch_status);
1331 /* The high-speed function is busy. Try again */
1332 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1336 /* If high-speed isn't supported, we return */
1337 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1338 mmc->card_caps |= MMC_CAP(SD_HS);
1340 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1341 /* Version before 3.0 don't support UHS modes */
1342 if (mmc->version < SD_VERSION_3)
1345 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1346 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1347 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1348 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1349 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1350 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1351 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1352 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1353 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1354 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1355 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1361 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1365 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1368 /* SD version 1.00 and 1.01 does not support CMD 6 */
1369 if (mmc->version == SD_VERSION_1_0)
1374 speed = UHS_SDR12_BUS_SPEED;
1377 speed = HIGH_SPEED_BUS_SPEED;
1379 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1381 speed = UHS_SDR12_BUS_SPEED;
1384 speed = UHS_SDR25_BUS_SPEED;
1387 speed = UHS_SDR50_BUS_SPEED;
1390 speed = UHS_DDR50_BUS_SPEED;
1393 speed = UHS_SDR104_BUS_SPEED;
1400 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1404 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1410 static int sd_select_bus_width(struct mmc *mmc, int w)
1415 if ((w != 4) && (w != 1))
1418 cmd.cmdidx = MMC_CMD_APP_CMD;
1419 cmd.resp_type = MMC_RSP_R1;
1420 cmd.cmdarg = mmc->rca << 16;
1422 err = mmc_send_cmd(mmc, &cmd, NULL);
1426 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1427 cmd.resp_type = MMC_RSP_R1;
1432 err = mmc_send_cmd(mmc, &cmd, NULL);
1440 #if CONFIG_IS_ENABLED(MMC_WRITE)
1441 static int sd_read_ssr(struct mmc *mmc)
1443 static const unsigned int sd_au_size[] = {
1444 0, SZ_16K / 512, SZ_32K / 512,
1445 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1446 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1447 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1448 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1453 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1454 struct mmc_data data;
1456 unsigned int au, eo, et, es;
1458 cmd.cmdidx = MMC_CMD_APP_CMD;
1459 cmd.resp_type = MMC_RSP_R1;
1460 cmd.cmdarg = mmc->rca << 16;
1462 err = mmc_send_cmd(mmc, &cmd, NULL);
1463 #ifdef CONFIG_MMC_QUIRKS
1464 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1467 * It has been seen that APP_CMD may fail on the first
1468 * attempt, let's try a few more times
1471 err = mmc_send_cmd(mmc, &cmd, NULL);
1474 } while (retries--);
1480 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1481 cmd.resp_type = MMC_RSP_R1;
1485 data.dest = (char *)ssr;
1486 data.blocksize = 64;
1488 data.flags = MMC_DATA_READ;
1490 err = mmc_send_cmd(mmc, &cmd, &data);
1498 for (i = 0; i < 16; i++)
1499 ssr[i] = be32_to_cpu(ssr[i]);
1501 au = (ssr[2] >> 12) & 0xF;
1502 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1503 mmc->ssr.au = sd_au_size[au];
1504 es = (ssr[3] >> 24) & 0xFF;
1505 es |= (ssr[2] & 0xFF) << 8;
1506 et = (ssr[3] >> 18) & 0x3F;
1508 eo = (ssr[3] >> 16) & 0x3;
1509 mmc->ssr.erase_timeout = (et * 1000) / es;
1510 mmc->ssr.erase_offset = eo * 1000;
1513 pr_debug("Invalid Allocation Unit Size.\n");
1519 /* frequency bases */
1520 /* divided by 10 to be nice to platforms without floating point */
1521 static const int fbase[] = {
1528 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1529 * to platforms without floating point.
1531 static const u8 multipliers[] = {
1550 static inline int bus_width(uint cap)
1552 if (cap == MMC_MODE_8BIT)
1554 if (cap == MMC_MODE_4BIT)
1556 if (cap == MMC_MODE_1BIT)
1558 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1562 #if !CONFIG_IS_ENABLED(DM_MMC)
1563 #ifdef MMC_SUPPORTS_TUNING
1564 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1570 static int mmc_set_ios(struct mmc *mmc)
1574 if (mmc->cfg->ops->set_ios)
1575 ret = mmc->cfg->ops->set_ios(mmc);
1580 static int mmc_host_power_cycle(struct mmc *mmc)
1584 if (mmc->cfg->ops->host_power_cycle)
1585 ret = mmc->cfg->ops->host_power_cycle(mmc);
1591 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1594 if (clock > mmc->cfg->f_max)
1595 clock = mmc->cfg->f_max;
1597 if (clock < mmc->cfg->f_min)
1598 clock = mmc->cfg->f_min;
1602 mmc->clk_disable = disable;
1604 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1606 return mmc_set_ios(mmc);
1609 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1611 mmc->bus_width = width;
1613 return mmc_set_ios(mmc);
1616 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1618 * helper function to display the capabilities in a human
1619 * friendly manner. The capabilities include bus width and
1622 void mmc_dump_capabilities(const char *text, uint caps)
1626 pr_debug("%s: widths [", text);
1627 if (caps & MMC_MODE_8BIT)
1629 if (caps & MMC_MODE_4BIT)
1631 if (caps & MMC_MODE_1BIT)
1633 pr_debug("\b\b] modes [");
1634 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1635 if (MMC_CAP(mode) & caps)
1636 pr_debug("%s, ", mmc_mode_name(mode));
1637 pr_debug("\b\b]\n");
1641 struct mode_width_tuning {
1644 #ifdef MMC_SUPPORTS_TUNING
1649 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1650 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1653 case MMC_SIGNAL_VOLTAGE_000: return 0;
1654 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1655 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1656 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1661 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1665 if (mmc->signal_voltage == signal_voltage)
1668 mmc->signal_voltage = signal_voltage;
1669 err = mmc_set_ios(mmc);
1671 pr_debug("unable to set voltage (err %d)\n", err);
1676 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1682 #if !CONFIG_IS_ENABLED(MMC_TINY)
1683 static const struct mode_width_tuning sd_modes_by_pref[] = {
1684 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1685 #ifdef MMC_SUPPORTS_TUNING
1688 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1689 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1694 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1702 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1707 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1709 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1712 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1717 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1721 #define for_each_sd_mode_by_pref(caps, mwt) \
1722 for (mwt = sd_modes_by_pref;\
1723 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1725 if (caps & MMC_CAP(mwt->mode))
1727 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1730 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1731 const struct mode_width_tuning *mwt;
1732 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1733 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1735 bool uhs_en = false;
1740 mmc_dump_capabilities("sd card", card_caps);
1741 mmc_dump_capabilities("host", mmc->host_caps);
1744 if (mmc_host_is_spi(mmc)) {
1745 mmc_set_bus_width(mmc, 1);
1746 mmc_select_mode(mmc, MMC_LEGACY);
1747 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1751 /* Restrict card's capabilities by what the host can do */
1752 caps = card_caps & mmc->host_caps;
1757 for_each_sd_mode_by_pref(caps, mwt) {
1760 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1761 if (*w & caps & mwt->widths) {
1762 pr_debug("trying mode %s width %d (at %d MHz)\n",
1763 mmc_mode_name(mwt->mode),
1765 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1767 /* configure the bus width (card + host) */
1768 err = sd_select_bus_width(mmc, bus_width(*w));
1771 mmc_set_bus_width(mmc, bus_width(*w));
1773 /* configure the bus mode (card) */
1774 err = sd_set_card_speed(mmc, mwt->mode);
1778 /* configure the bus mode (host) */
1779 mmc_select_mode(mmc, mwt->mode);
1780 mmc_set_clock(mmc, mmc->tran_speed,
1783 #ifdef MMC_SUPPORTS_TUNING
1784 /* execute tuning if needed */
1785 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1786 err = mmc_execute_tuning(mmc,
1789 pr_debug("tuning failed\n");
1795 #if CONFIG_IS_ENABLED(MMC_WRITE)
1796 err = sd_read_ssr(mmc);
1798 pr_warn("unable to read ssr\n");
1804 /* revert to a safer bus speed */
1805 mmc_select_mode(mmc, MMC_LEGACY);
1806 mmc_set_clock(mmc, mmc->tran_speed,
1812 pr_err("unable to select a mode\n");
1817 * read the compare the part of ext csd that is constant.
1818 * This can be used to check that the transfer is working
1821 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1824 const u8 *ext_csd = mmc->ext_csd;
1825 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1827 if (mmc->version < MMC_VERSION_4)
1830 err = mmc_send_ext_csd(mmc, test_csd);
1834 /* Only compare read only fields */
1835 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1836 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1837 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1838 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1839 ext_csd[EXT_CSD_REV]
1840 == test_csd[EXT_CSD_REV] &&
1841 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1842 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1843 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1844 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1850 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1851 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1852 uint32_t allowed_mask)
1860 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1861 EXT_CSD_CARD_TYPE_HS400_1_8V))
1862 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1863 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1864 EXT_CSD_CARD_TYPE_HS400_1_2V))
1865 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1868 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1869 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1870 MMC_SIGNAL_VOLTAGE_180;
1871 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1872 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1875 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1879 while (card_mask & allowed_mask) {
1880 enum mmc_voltage best_match;
1882 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1883 if (!mmc_set_signal_voltage(mmc, best_match))
1886 allowed_mask &= ~best_match;
1892 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1893 uint32_t allowed_mask)
1899 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1900 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1902 .mode = MMC_HS_400_ES,
1903 .widths = MMC_MODE_8BIT,
1906 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1909 .widths = MMC_MODE_8BIT,
1910 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1913 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1916 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1917 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1922 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1926 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1934 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1938 #define for_each_mmc_mode_by_pref(caps, mwt) \
1939 for (mwt = mmc_modes_by_pref;\
1940 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1942 if (caps & MMC_CAP(mwt->mode))
1944 static const struct ext_csd_bus_width {
1948 } ext_csd_bus_width[] = {
1949 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1950 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1951 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1952 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1953 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1956 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1957 static int mmc_select_hs400(struct mmc *mmc)
1961 /* Set timing to HS200 for tuning */
1962 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1966 /* configure the bus mode (host) */
1967 mmc_select_mode(mmc, MMC_HS_200);
1968 mmc_set_clock(mmc, mmc->tran_speed, false);
1970 /* execute tuning if needed */
1971 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1973 debug("tuning failed\n");
1977 /* Set back to HS */
1978 mmc_set_card_speed(mmc, MMC_HS, true);
1980 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1981 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1985 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1989 mmc_select_mode(mmc, MMC_HS_400);
1990 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1997 static int mmc_select_hs400(struct mmc *mmc)
2003 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2004 #if !CONFIG_IS_ENABLED(DM_MMC)
2005 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2010 static int mmc_select_hs400es(struct mmc *mmc)
2014 err = mmc_set_card_speed(mmc, MMC_HS, true);
2018 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2019 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2020 EXT_CSD_BUS_WIDTH_STROBE);
2022 printf("switch to bus width for hs400 failed\n");
2025 /* TODO: driver strength */
2026 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2030 mmc_select_mode(mmc, MMC_HS_400_ES);
2031 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2035 return mmc_set_enhanced_strobe(mmc);
2038 static int mmc_select_hs400es(struct mmc *mmc)
2044 #define for_each_supported_width(caps, ddr, ecbv) \
2045 for (ecbv = ext_csd_bus_width;\
2046 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2048 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2050 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2053 const struct mode_width_tuning *mwt;
2054 const struct ext_csd_bus_width *ecbw;
2057 mmc_dump_capabilities("mmc", card_caps);
2058 mmc_dump_capabilities("host", mmc->host_caps);
2061 if (mmc_host_is_spi(mmc)) {
2062 mmc_set_bus_width(mmc, 1);
2063 mmc_select_mode(mmc, MMC_LEGACY);
2064 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2068 /* Restrict card's capabilities by what the host can do */
2069 card_caps &= mmc->host_caps;
2071 /* Only version 4 of MMC supports wider bus widths */
2072 if (mmc->version < MMC_VERSION_4)
2075 if (!mmc->ext_csd) {
2076 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2080 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2081 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2083 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2084 * before doing anything else, since a transition from either of
2085 * the HS200/HS400 mode directly to legacy mode is not supported.
2087 if (mmc->selected_mode == MMC_HS_200 ||
2088 mmc->selected_mode == MMC_HS_400)
2089 mmc_set_card_speed(mmc, MMC_HS, true);
2092 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2094 for_each_mmc_mode_by_pref(card_caps, mwt) {
2095 for_each_supported_width(card_caps & mwt->widths,
2096 mmc_is_mode_ddr(mwt->mode), ecbw) {
2097 enum mmc_voltage old_voltage;
2098 pr_debug("trying mode %s width %d (at %d MHz)\n",
2099 mmc_mode_name(mwt->mode),
2100 bus_width(ecbw->cap),
2101 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2102 old_voltage = mmc->signal_voltage;
2103 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2104 MMC_ALL_SIGNAL_VOLTAGE);
2108 /* configure the bus width (card + host) */
2109 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2111 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2114 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2116 if (mwt->mode == MMC_HS_400) {
2117 err = mmc_select_hs400(mmc);
2119 printf("Select HS400 failed %d\n", err);
2122 } else if (mwt->mode == MMC_HS_400_ES) {
2123 err = mmc_select_hs400es(mmc);
2125 printf("Select HS400ES failed %d\n",
2130 /* configure the bus speed (card) */
2131 err = mmc_set_card_speed(mmc, mwt->mode, false);
2136 * configure the bus width AND the ddr mode
2137 * (card). The host side will be taken care
2138 * of in the next step
2140 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2141 err = mmc_switch(mmc,
2142 EXT_CSD_CMD_SET_NORMAL,
2144 ecbw->ext_csd_bits);
2149 /* configure the bus mode (host) */
2150 mmc_select_mode(mmc, mwt->mode);
2151 mmc_set_clock(mmc, mmc->tran_speed,
2153 #ifdef MMC_SUPPORTS_TUNING
2155 /* execute tuning if needed */
2157 err = mmc_execute_tuning(mmc,
2160 pr_debug("tuning failed\n");
2167 /* do a transfer to check the configuration */
2168 err = mmc_read_and_compare_ext_csd(mmc);
2172 mmc_set_signal_voltage(mmc, old_voltage);
2173 /* if an error occured, revert to a safer bus mode */
2174 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2175 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2176 mmc_select_mode(mmc, MMC_LEGACY);
2177 mmc_set_bus_width(mmc, 1);
2181 pr_err("unable to select a mode\n");
2187 #if CONFIG_IS_ENABLED(MMC_TINY)
2188 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2191 static int mmc_startup_v4(struct mmc *mmc)
2195 bool has_parts = false;
2196 bool part_completed;
2197 static const u32 mmc_versions[] = {
2209 #if CONFIG_IS_ENABLED(MMC_TINY)
2210 u8 *ext_csd = ext_csd_bkup;
2212 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2216 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2218 err = mmc_send_ext_csd(mmc, ext_csd);
2222 /* store the ext csd for future reference */
2224 mmc->ext_csd = ext_csd;
2226 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2228 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2231 /* check ext_csd version and capacity */
2232 err = mmc_send_ext_csd(mmc, ext_csd);
2236 /* store the ext csd for future reference */
2238 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2241 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2243 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2246 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2248 if (mmc->version >= MMC_VERSION_4_2) {
2250 * According to the JEDEC Standard, the value of
2251 * ext_csd's capacity is valid if the value is more
2254 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2255 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2256 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2257 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2258 capacity *= MMC_MAX_BLOCK_LEN;
2259 if ((capacity >> 20) > 2 * 1024)
2260 mmc->capacity_user = capacity;
2263 if (mmc->version >= MMC_VERSION_4_5)
2264 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2266 /* The partition data may be non-zero but it is only
2267 * effective if PARTITION_SETTING_COMPLETED is set in
2268 * EXT_CSD, so ignore any data if this bit is not set,
2269 * except for enabling the high-capacity group size
2270 * definition (see below).
2272 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2273 EXT_CSD_PARTITION_SETTING_COMPLETED);
2275 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2276 /* Some eMMC set the value too low so set a minimum */
2277 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2278 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2280 /* store the partition info of emmc */
2281 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2282 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2283 ext_csd[EXT_CSD_BOOT_MULT])
2284 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2285 if (part_completed &&
2286 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2287 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2289 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2291 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2293 for (i = 0; i < 4; i++) {
2294 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2295 uint mult = (ext_csd[idx + 2] << 16) +
2296 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2299 if (!part_completed)
2301 mmc->capacity_gp[i] = mult;
2302 mmc->capacity_gp[i] *=
2303 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2304 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2305 mmc->capacity_gp[i] <<= 19;
2308 #ifndef CONFIG_SPL_BUILD
2309 if (part_completed) {
2310 mmc->enh_user_size =
2311 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2312 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2313 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2314 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2315 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2316 mmc->enh_user_size <<= 19;
2317 mmc->enh_user_start =
2318 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2319 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2320 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2321 ext_csd[EXT_CSD_ENH_START_ADDR];
2322 if (mmc->high_capacity)
2323 mmc->enh_user_start <<= 9;
2328 * Host needs to enable ERASE_GRP_DEF bit if device is
2329 * partitioned. This bit will be lost every time after a reset
2330 * or power off. This will affect erase size.
2334 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2335 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2338 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2339 EXT_CSD_ERASE_GROUP_DEF, 1);
2344 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2347 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2348 #if CONFIG_IS_ENABLED(MMC_WRITE)
2349 /* Read out group size from ext_csd */
2350 mmc->erase_grp_size =
2351 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2354 * if high capacity and partition setting completed
2355 * SEC_COUNT is valid even if it is smaller than 2 GiB
2356 * JEDEC Standard JESD84-B45, 6.2.4
2358 if (mmc->high_capacity && part_completed) {
2359 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2360 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2361 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2362 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2363 capacity *= MMC_MAX_BLOCK_LEN;
2364 mmc->capacity_user = capacity;
2367 #if CONFIG_IS_ENABLED(MMC_WRITE)
2369 /* Calculate the group size from the csd value. */
2370 int erase_gsz, erase_gmul;
2372 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2373 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2374 mmc->erase_grp_size = (erase_gsz + 1)
2378 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2379 mmc->hc_wp_grp_size = 1024
2380 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2381 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2384 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2389 #if !CONFIG_IS_ENABLED(MMC_TINY)
2392 mmc->ext_csd = NULL;
2397 static int mmc_startup(struct mmc *mmc)
2403 struct blk_desc *bdesc;
2405 #ifdef CONFIG_MMC_SPI_CRC_ON
2406 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2407 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2408 cmd.resp_type = MMC_RSP_R1;
2410 err = mmc_send_cmd(mmc, &cmd, NULL);
2416 /* Put the Card in Identify Mode */
2417 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2418 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2419 cmd.resp_type = MMC_RSP_R2;
2422 err = mmc_send_cmd(mmc, &cmd, NULL);
2424 #ifdef CONFIG_MMC_QUIRKS
2425 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2428 * It has been seen that SEND_CID may fail on the first
2429 * attempt, let's try a few more time
2432 err = mmc_send_cmd(mmc, &cmd, NULL);
2435 } while (retries--);
2442 memcpy(mmc->cid, cmd.response, 16);
2445 * For MMC cards, set the Relative Address.
2446 * For SD cards, get the Relatvie Address.
2447 * This also puts the cards into Standby State
2449 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2450 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2451 cmd.cmdarg = mmc->rca << 16;
2452 cmd.resp_type = MMC_RSP_R6;
2454 err = mmc_send_cmd(mmc, &cmd, NULL);
2460 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2463 /* Get the Card-Specific Data */
2464 cmd.cmdidx = MMC_CMD_SEND_CSD;
2465 cmd.resp_type = MMC_RSP_R2;
2466 cmd.cmdarg = mmc->rca << 16;
2468 err = mmc_send_cmd(mmc, &cmd, NULL);
2473 mmc->csd[0] = cmd.response[0];
2474 mmc->csd[1] = cmd.response[1];
2475 mmc->csd[2] = cmd.response[2];
2476 mmc->csd[3] = cmd.response[3];
2478 if (mmc->version == MMC_VERSION_UNKNOWN) {
2479 int version = (cmd.response[0] >> 26) & 0xf;
2483 mmc->version = MMC_VERSION_1_2;
2486 mmc->version = MMC_VERSION_1_4;
2489 mmc->version = MMC_VERSION_2_2;
2492 mmc->version = MMC_VERSION_3;
2495 mmc->version = MMC_VERSION_4;
2498 mmc->version = MMC_VERSION_1_2;
2503 /* divide frequency by 10, since the mults are 10x bigger */
2504 freq = fbase[(cmd.response[0] & 0x7)];
2505 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2507 mmc->legacy_speed = freq * mult;
2508 mmc_select_mode(mmc, MMC_LEGACY);
2510 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2511 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2512 #if CONFIG_IS_ENABLED(MMC_WRITE)
2515 mmc->write_bl_len = mmc->read_bl_len;
2517 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2520 if (mmc->high_capacity) {
2521 csize = (mmc->csd[1] & 0x3f) << 16
2522 | (mmc->csd[2] & 0xffff0000) >> 16;
2525 csize = (mmc->csd[1] & 0x3ff) << 2
2526 | (mmc->csd[2] & 0xc0000000) >> 30;
2527 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2530 mmc->capacity_user = (csize + 1) << (cmult + 2);
2531 mmc->capacity_user *= mmc->read_bl_len;
2532 mmc->capacity_boot = 0;
2533 mmc->capacity_rpmb = 0;
2534 for (i = 0; i < 4; i++)
2535 mmc->capacity_gp[i] = 0;
2537 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2538 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2540 #if CONFIG_IS_ENABLED(MMC_WRITE)
2541 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2542 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2545 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2546 cmd.cmdidx = MMC_CMD_SET_DSR;
2547 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2548 cmd.resp_type = MMC_RSP_NONE;
2549 if (mmc_send_cmd(mmc, &cmd, NULL))
2550 pr_warn("MMC: SET_DSR failed\n");
2553 /* Select the card, and put it into Transfer Mode */
2554 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2555 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2556 cmd.resp_type = MMC_RSP_R1;
2557 cmd.cmdarg = mmc->rca << 16;
2558 err = mmc_send_cmd(mmc, &cmd, NULL);
2565 * For SD, its erase group is always one sector
2567 #if CONFIG_IS_ENABLED(MMC_WRITE)
2568 mmc->erase_grp_size = 1;
2570 mmc->part_config = MMCPART_NOAVAILABLE;
2572 err = mmc_startup_v4(mmc);
2576 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2580 #if CONFIG_IS_ENABLED(MMC_TINY)
2581 mmc_set_clock(mmc, mmc->legacy_speed, false);
2582 mmc_select_mode(mmc, MMC_LEGACY);
2583 mmc_set_bus_width(mmc, 1);
2586 err = sd_get_capabilities(mmc);
2589 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2591 err = mmc_get_capabilities(mmc);
2594 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2600 mmc->best_mode = mmc->selected_mode;
2602 /* Fix the block length for DDR mode */
2603 if (mmc->ddr_mode) {
2604 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2605 #if CONFIG_IS_ENABLED(MMC_WRITE)
2606 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2610 /* fill in device description */
2611 bdesc = mmc_get_blk_desc(mmc);
2615 bdesc->blksz = mmc->read_bl_len;
2616 bdesc->log2blksz = LOG2(bdesc->blksz);
2617 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2618 #if !defined(CONFIG_SPL_BUILD) || \
2619 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2620 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2621 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2622 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2623 (mmc->cid[3] >> 16) & 0xffff);
2624 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2625 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2626 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2627 (mmc->cid[2] >> 24) & 0xff);
2628 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2629 (mmc->cid[2] >> 16) & 0xf);
2631 bdesc->vendor[0] = 0;
2632 bdesc->product[0] = 0;
2633 bdesc->revision[0] = 0;
2636 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2643 static int mmc_send_if_cond(struct mmc *mmc)
2648 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2649 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2650 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2651 cmd.resp_type = MMC_RSP_R7;
2653 err = mmc_send_cmd(mmc, &cmd, NULL);
2658 if ((cmd.response[0] & 0xff) != 0xaa)
2661 mmc->version = SD_VERSION_2;
2666 #if !CONFIG_IS_ENABLED(DM_MMC)
2667 /* board-specific MMC power initializations. */
2668 __weak void board_mmc_power_init(void)
2673 static int mmc_power_init(struct mmc *mmc)
2675 #if CONFIG_IS_ENABLED(DM_MMC)
2676 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2679 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2682 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2684 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2685 &mmc->vqmmc_supply);
2687 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2689 #else /* !CONFIG_DM_MMC */
2691 * Driver model should use a regulator, as above, rather than calling
2692 * out to board code.
2694 board_mmc_power_init();
2700 * put the host in the initial state:
2701 * - turn on Vdd (card power supply)
2702 * - configure the bus width and clock to minimal values
2704 static void mmc_set_initial_state(struct mmc *mmc)
2708 /* First try to set 3.3V. If it fails set to 1.8V */
2709 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2711 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2713 pr_warn("mmc: failed to set signal voltage\n");
2715 mmc_select_mode(mmc, MMC_LEGACY);
2716 mmc_set_bus_width(mmc, 1);
2717 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2720 static int mmc_power_on(struct mmc *mmc)
2722 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2723 if (mmc->vmmc_supply) {
2724 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2727 puts("Error enabling VMMC supply\n");
2735 static int mmc_power_off(struct mmc *mmc)
2737 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2738 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2739 if (mmc->vmmc_supply) {
2740 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2743 pr_debug("Error disabling VMMC supply\n");
2751 static int mmc_power_cycle(struct mmc *mmc)
2755 ret = mmc_power_off(mmc);
2759 ret = mmc_host_power_cycle(mmc);
2764 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2765 * to be on the safer side.
2768 return mmc_power_on(mmc);
2771 int mmc_get_op_cond(struct mmc *mmc)
2773 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2779 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2780 mmc_adapter_card_type_ident();
2782 err = mmc_power_init(mmc);
2786 #ifdef CONFIG_MMC_QUIRKS
2787 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2788 MMC_QUIRK_RETRY_SEND_CID |
2789 MMC_QUIRK_RETRY_APP_CMD;
2792 err = mmc_power_cycle(mmc);
2795 * if power cycling is not supported, we should not try
2796 * to use the UHS modes, because we wouldn't be able to
2797 * recover from an error during the UHS initialization.
2799 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2801 mmc->host_caps &= ~UHS_CAPS;
2802 err = mmc_power_on(mmc);
2807 #if CONFIG_IS_ENABLED(DM_MMC)
2808 /* The device has already been probed ready for use */
2810 /* made sure it's not NULL earlier */
2811 err = mmc->cfg->ops->init(mmc);
2818 mmc_set_initial_state(mmc);
2820 /* Reset the Card */
2821 err = mmc_go_idle(mmc);
2826 /* The internal partition reset to user partition(0) at every CMD0*/
2827 mmc_get_blk_desc(mmc)->hwpart = 0;
2829 /* Test for SD version 2 */
2830 err = mmc_send_if_cond(mmc);
2832 /* Now try to get the SD card's operating condition */
2833 err = sd_send_op_cond(mmc, uhs_en);
2834 if (err && uhs_en) {
2836 mmc_power_cycle(mmc);
2840 /* If the command timed out, we check for an MMC card */
2841 if (err == -ETIMEDOUT) {
2842 err = mmc_send_op_cond(mmc);
2845 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2846 pr_err("Card did not respond to voltage select!\n");
2855 int mmc_start_init(struct mmc *mmc)
2861 * all hosts are capable of 1 bit bus-width and able to use the legacy
2864 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2865 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2866 #if CONFIG_IS_ENABLED(DM_MMC)
2867 mmc_deferred_probe(mmc);
2869 #if !defined(CONFIG_MMC_BROKEN_CD)
2870 no_card = mmc_getcd(mmc) == 0;
2874 #if !CONFIG_IS_ENABLED(DM_MMC)
2875 /* we pretend there's no card when init is NULL */
2876 no_card = no_card || (mmc->cfg->ops->init == NULL);
2880 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2881 pr_err("MMC: no card present\n");
2886 err = mmc_get_op_cond(mmc);
2889 mmc->init_in_progress = 1;
2894 static int mmc_complete_init(struct mmc *mmc)
2898 mmc->init_in_progress = 0;
2899 if (mmc->op_cond_pending)
2900 err = mmc_complete_op_cond(mmc);
2903 err = mmc_startup(mmc);
2911 int mmc_init(struct mmc *mmc)
2914 __maybe_unused ulong start;
2915 #if CONFIG_IS_ENABLED(DM_MMC)
2916 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2923 start = get_timer(0);
2925 if (!mmc->init_in_progress)
2926 err = mmc_start_init(mmc);
2929 err = mmc_complete_init(mmc);
2931 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2936 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2937 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2938 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2939 int mmc_deinit(struct mmc *mmc)
2947 caps_filtered = mmc->card_caps &
2948 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2949 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2950 MMC_CAP(UHS_SDR104));
2952 return sd_select_mode_and_width(mmc, caps_filtered);
2954 caps_filtered = mmc->card_caps &
2955 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2957 return mmc_select_mode_and_width(mmc, caps_filtered);
2962 int mmc_set_dsr(struct mmc *mmc, u16 val)
2968 /* CPU-specific MMC initializations */
2969 __weak int cpu_mmc_init(bd_t *bis)
2974 /* board-specific MMC initializations. */
2975 __weak int board_mmc_init(bd_t *bis)
2980 void mmc_set_preinit(struct mmc *mmc, int preinit)
2982 mmc->preinit = preinit;
2985 #if CONFIG_IS_ENABLED(DM_MMC)
2986 static int mmc_probe(bd_t *bis)
2990 struct udevice *dev;
2992 ret = uclass_get(UCLASS_MMC, &uc);
2997 * Try to add them in sequence order. Really with driver model we
2998 * should allow holes, but the current MMC list does not allow that.
2999 * So if we request 0, 1, 3 we will get 0, 1, 2.
3001 for (i = 0; ; i++) {
3002 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3006 uclass_foreach_dev(dev, uc) {
3007 ret = device_probe(dev);
3009 pr_err("%s - probe failed: %d\n", dev->name, ret);
3015 static int mmc_probe(bd_t *bis)
3017 if (board_mmc_init(bis) < 0)
3024 int mmc_initialize(bd_t *bis)
3026 static int initialized = 0;
3028 if (initialized) /* Avoid initializing mmc multiple times */
3032 #if !CONFIG_IS_ENABLED(BLK)
3033 #if !CONFIG_IS_ENABLED(MMC_TINY)
3037 ret = mmc_probe(bis);
3041 #ifndef CONFIG_SPL_BUILD
3042 print_mmc_devices(',');
3049 #if CONFIG_IS_ENABLED(DM_MMC)
3050 int mmc_init_device(int num)
3052 struct udevice *dev;
3056 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3060 m = mmc_get_mmc_dev(dev);
3063 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3064 mmc_set_preinit(m, 1);
3073 #ifdef CONFIG_CMD_BKOPS_ENABLE
3074 int mmc_set_bkops_enable(struct mmc *mmc)
3077 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3079 err = mmc_send_ext_csd(mmc, ext_csd);
3081 puts("Could not get ext_csd register values\n");
3085 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3086 puts("Background operations not supported on device\n");
3087 return -EMEDIUMTYPE;
3090 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3091 puts("Background operations already enabled\n");
3095 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3097 puts("Failed to enable manual background operations\n");
3101 puts("Enabled manual background operations\n");