1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2008, Freescale Semiconductor, Inc
7 * Based vaguely on the Linux code
16 #include <dm/device-internal.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
25 #include <linux/list.h>
27 #include "mmc_private.h"
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33 #if !CONFIG_IS_ENABLED(DM_MMC)
35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
40 __weak int board_mmc_getwp(struct mmc *mmc)
45 int mmc_getwp(struct mmc *mmc)
49 wp = board_mmc_getwp(mmc);
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
61 __weak int board_mmc_getcd(struct mmc *mmc)
67 #ifdef CONFIG_MMC_TRACE
68 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
74 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
80 printf("\t\tRET\t\t\t %d\n", ret);
82 switch (cmd->resp_type) {
84 printf("\t\tMMC_RSP_NONE\n");
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
97 printf("\t\t \t\t 0x%08x \n",
99 printf("\t\t \t\t 0x%08x \n",
101 printf("\t\t \t\t 0x%08x \n",
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 printf("\t\tERROR MMC rsp not supported\n");
126 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
135 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136 const char *mmc_mode_name(enum bus_mode mode)
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
161 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
187 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
197 #if !CONFIG_IS_ENABLED(DM_MMC)
198 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
211 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
213 * @dev: device to receive the command
214 * @cmd: command to send
215 * @data: additional data to send/receive
216 * @retries: how many times to retry; mmc_send_cmd is always called at least
218 * @return 0 if ok, -ve on error
220 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data, uint retries)
226 ret = mmc_send_cmd(mmc, cmd, data);
227 } while (ret && retries--);
233 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
234 * specific quirk is enabled
236 * @dev: device to receive the command
237 * @cmd: command to send
238 * @data: additional data to send/receive
239 * @quirk: retry only if this quirk is enabled
240 * @retries: how many times to retry; mmc_send_cmd is always called at least
242 * @return 0 if ok, -ve on error
244 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 struct mmc_data *data, u32 quirk, uint retries)
247 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 return mmc_send_cmd_retry(mmc, cmd, data, retries);
250 return mmc_send_cmd(mmc, cmd, data);
253 int mmc_send_status(struct mmc *mmc, unsigned int *status)
258 cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 cmd.resp_type = MMC_RSP_R1;
260 if (!mmc_host_is_spi(mmc))
261 cmd.cmdarg = mmc->rca << 16;
263 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
264 mmc_trace_state(mmc, &cmd);
266 *status = cmd.response[0];
271 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
276 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
281 err = mmc_send_status(mmc, &status);
285 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 (status & MMC_STATUS_CURR_STATE) !=
290 if (status & MMC_STATUS_MASK) {
291 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
292 pr_err("Status Error: 0x%08x\n", status);
297 if (timeout_ms-- <= 0)
303 if (timeout_ms <= 0) {
304 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
305 pr_err("Timeout waiting card ready\n");
313 int mmc_set_blocklen(struct mmc *mmc, int len)
320 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 cmd.resp_type = MMC_RSP_R1;
324 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
328 #ifdef MMC_SUPPORTS_TUNING
329 static const u8 tuning_blk_pattern_4bit[] = {
330 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
340 static const u8 tuning_blk_pattern_8bit[] = {
341 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
359 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
362 struct mmc_data data;
363 const u8 *tuning_block_pattern;
366 if (mmc->bus_width == 8) {
367 tuning_block_pattern = tuning_blk_pattern_8bit;
368 size = sizeof(tuning_blk_pattern_8bit);
369 } else if (mmc->bus_width == 4) {
370 tuning_block_pattern = tuning_blk_pattern_4bit;
371 size = sizeof(tuning_blk_pattern_4bit);
376 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
380 cmd.resp_type = MMC_RSP_R1;
382 data.dest = (void *)data_buf;
384 data.blocksize = size;
385 data.flags = MMC_DATA_READ;
387 err = mmc_send_cmd(mmc, &cmd, &data);
391 if (memcmp(data_buf, tuning_block_pattern, size))
398 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
402 struct mmc_data data;
405 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
407 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
409 if (mmc->high_capacity)
412 cmd.cmdarg = start * mmc->read_bl_len;
414 cmd.resp_type = MMC_RSP_R1;
417 data.blocks = blkcnt;
418 data.blocksize = mmc->read_bl_len;
419 data.flags = MMC_DATA_READ;
421 if (mmc_send_cmd(mmc, &cmd, &data))
425 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
427 cmd.resp_type = MMC_RSP_R1b;
428 if (mmc_send_cmd(mmc, &cmd, NULL)) {
429 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
430 pr_err("mmc fail to send stop cmd\n");
439 #if !CONFIG_IS_ENABLED(DM_MMC)
440 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
442 if (mmc->cfg->ops->get_b_max)
443 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
445 return mmc->cfg->b_max;
449 #if CONFIG_IS_ENABLED(BLK)
450 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
452 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
456 #if CONFIG_IS_ENABLED(BLK)
457 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
459 int dev_num = block_dev->devnum;
461 lbaint_t cur, blocks_todo = blkcnt;
467 struct mmc *mmc = find_mmc_device(dev_num);
471 if (CONFIG_IS_ENABLED(MMC_TINY))
472 err = mmc_switch_part(mmc, block_dev->hwpart);
474 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
479 if ((start + blkcnt) > block_dev->lba) {
480 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
481 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 start + blkcnt, block_dev->lba);
487 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
488 pr_debug("%s: Failed to set blocklen\n", __func__);
492 b_max = mmc_get_b_max(mmc, dst, blkcnt);
495 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
496 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
497 pr_debug("%s: Failed to read blocks\n", __func__);
502 dst += cur * mmc->read_bl_len;
503 } while (blocks_todo > 0);
508 static int mmc_go_idle(struct mmc *mmc)
515 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
517 cmd.resp_type = MMC_RSP_NONE;
519 err = mmc_send_cmd(mmc, &cmd, NULL);
529 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
530 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
536 * Send CMD11 only if the request is to switch the card to
539 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 return mmc_set_signal_voltage(mmc, signal_voltage);
542 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
544 cmd.resp_type = MMC_RSP_R1;
546 err = mmc_send_cmd(mmc, &cmd, NULL);
550 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
554 * The card should drive cmd and dat[0:3] low immediately
555 * after the response of cmd11, but wait 100 us to be sure
557 err = mmc_wait_dat0(mmc, 0, 100);
564 * During a signal voltage level switch, the clock must be gated
565 * for 5 ms according to the SD spec
567 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
569 err = mmc_set_signal_voltage(mmc, signal_voltage);
573 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
575 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
578 * Failure to switch is indicated by the card holding
579 * dat[0:3] low. Wait for at least 1 ms according to spec
581 err = mmc_wait_dat0(mmc, 1, 1000);
591 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
598 cmd.cmdidx = MMC_CMD_APP_CMD;
599 cmd.resp_type = MMC_RSP_R1;
602 err = mmc_send_cmd(mmc, &cmd, NULL);
607 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 cmd.resp_type = MMC_RSP_R3;
611 * Most cards do not answer if some reserved bits
612 * in the ocr are set. However, Some controller
613 * can set bit 7 (reserved for low voltages), but
614 * how to manage low voltages SD card is not yet
617 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
618 (mmc->cfg->voltages & 0xff8000);
620 if (mmc->version == SD_VERSION_2)
621 cmd.cmdarg |= OCR_HCS;
624 cmd.cmdarg |= OCR_S18R;
626 err = mmc_send_cmd(mmc, &cmd, NULL);
631 if (cmd.response[0] & OCR_BUSY)
640 if (mmc->version != SD_VERSION_2)
641 mmc->version = SD_VERSION_1_0;
643 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
644 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 cmd.resp_type = MMC_RSP_R3;
648 err = mmc_send_cmd(mmc, &cmd, NULL);
654 mmc->ocr = cmd.response[0];
656 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
657 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
659 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
665 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
671 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
676 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 cmd.resp_type = MMC_RSP_R3;
679 if (use_arg && !mmc_host_is_spi(mmc))
680 cmd.cmdarg = OCR_HCS |
681 (mmc->cfg->voltages &
682 (mmc->ocr & OCR_VOLTAGE_MASK)) |
683 (mmc->ocr & OCR_ACCESS_MODE);
685 err = mmc_send_cmd(mmc, &cmd, NULL);
688 mmc->ocr = cmd.response[0];
692 static int mmc_send_op_cond(struct mmc *mmc)
698 /* Some cards seem to need this */
701 start = get_timer(0);
702 /* Asking to the card its capabilities */
704 err = mmc_send_op_cond_iter(mmc, i != 0);
708 /* exit if not busy (flag seems to be inverted) */
709 if (mmc->ocr & OCR_BUSY)
712 if (get_timer(start) > timeout)
716 mmc->op_cond_pending = 1;
720 static int mmc_complete_op_cond(struct mmc *mmc)
727 mmc->op_cond_pending = 0;
728 if (!(mmc->ocr & OCR_BUSY)) {
729 /* Some cards seem to need this */
732 start = get_timer(0);
734 err = mmc_send_op_cond_iter(mmc, 1);
737 if (mmc->ocr & OCR_BUSY)
739 if (get_timer(start) > timeout)
745 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
746 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 cmd.resp_type = MMC_RSP_R3;
750 err = mmc_send_cmd(mmc, &cmd, NULL);
755 mmc->ocr = cmd.response[0];
758 mmc->version = MMC_VERSION_UNKNOWN;
760 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
767 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
770 struct mmc_data data;
773 /* Get the Card Status Register */
774 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 cmd.resp_type = MMC_RSP_R1;
778 data.dest = (char *)ext_csd;
780 data.blocksize = MMC_MAX_BLOCK_LEN;
781 data.flags = MMC_DATA_READ;
783 err = mmc_send_cmd(mmc, &cmd, &data);
788 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
791 unsigned int status, start;
793 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
794 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 (index == EXT_CSD_PART_CONF);
798 if (mmc->gen_cmd6_time)
799 timeout_ms = mmc->gen_cmd6_time * 10;
801 if (is_part_switch && mmc->part_switch_time)
802 timeout_ms = mmc->part_switch_time * 10;
804 cmd.cmdidx = MMC_CMD_SWITCH;
805 cmd.resp_type = MMC_RSP_R1b;
806 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
810 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
814 start = get_timer(0);
816 /* poll dat0 for rdy/buys status */
817 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
818 if (ret && ret != -ENOSYS)
822 * In cases when not allowed to poll by using CMD13 or because we aren't
823 * capable of polling by using mmc_wait_dat0, then rely on waiting the
824 * stated timeout to be sufficient.
826 if (ret == -ENOSYS && !send_status) {
831 /* Finally wait until the card is ready or indicates a failure
832 * to switch. It doesn't hurt to use CMD13 here even if send_status
833 * is false, because by now (after 'timeout_ms' ms) the bus should be
837 ret = mmc_send_status(mmc, &status);
839 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
844 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
847 } while (get_timer(start) < timeout_ms);
852 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
854 return __mmc_switch(mmc, set, index, value, true);
857 int mmc_boot_wp(struct mmc *mmc)
859 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
862 #if !CONFIG_IS_ENABLED(MMC_TINY)
863 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
869 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
875 speed_bits = EXT_CSD_TIMING_HS;
877 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
879 speed_bits = EXT_CSD_TIMING_HS200;
882 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
884 speed_bits = EXT_CSD_TIMING_HS400;
887 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
889 speed_bits = EXT_CSD_TIMING_HS400;
893 speed_bits = EXT_CSD_TIMING_LEGACY;
899 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
900 speed_bits, !hsdowngrade);
904 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
905 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
907 * In case the eMMC is in HS200/HS400 mode and we are downgrading
908 * to HS mode, the card clock are still running much faster than
909 * the supported HS mode clock, so we can not reliably read out
910 * Extended CSD. Reconfigure the controller to run at HS mode.
913 mmc_select_mode(mmc, MMC_HS);
914 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
918 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
919 /* Now check to see that it worked */
920 err = mmc_send_ext_csd(mmc, test_csd);
924 /* No high-speed support */
925 if (!test_csd[EXT_CSD_HS_TIMING])
932 static int mmc_get_capabilities(struct mmc *mmc)
934 u8 *ext_csd = mmc->ext_csd;
937 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
939 if (mmc_host_is_spi(mmc))
942 /* Only version 4 supports high-speed */
943 if (mmc->version < MMC_VERSION_4)
947 pr_err("No ext_csd found!\n"); /* this should enver happen */
951 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
953 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
954 mmc->cardtype = cardtype;
956 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
957 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
958 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
959 mmc->card_caps |= MMC_MODE_HS200;
962 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
963 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
964 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
965 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
966 mmc->card_caps |= MMC_MODE_HS400;
969 if (cardtype & EXT_CSD_CARD_TYPE_52) {
970 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
971 mmc->card_caps |= MMC_MODE_DDR_52MHz;
972 mmc->card_caps |= MMC_MODE_HS_52MHz;
974 if (cardtype & EXT_CSD_CARD_TYPE_26)
975 mmc->card_caps |= MMC_MODE_HS;
977 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
978 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
979 (mmc->card_caps & MMC_MODE_HS400)) {
980 mmc->card_caps |= MMC_MODE_HS400_ES;
988 static int mmc_set_capacity(struct mmc *mmc, int part_num)
992 mmc->capacity = mmc->capacity_user;
996 mmc->capacity = mmc->capacity_boot;
999 mmc->capacity = mmc->capacity_rpmb;
1005 mmc->capacity = mmc->capacity_gp[part_num - 4];
1011 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1016 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1022 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1024 (mmc->part_config & ~PART_ACCESS_MASK)
1025 | (part_num & PART_ACCESS_MASK));
1026 } while (ret && retry--);
1029 * Set the capacity if the switch succeeded or was intended
1030 * to return to representing the raw device.
1032 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1033 ret = mmc_set_capacity(mmc, part_num);
1034 mmc_get_blk_desc(mmc)->hwpart = part_num;
1040 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1041 int mmc_hwpart_config(struct mmc *mmc,
1042 const struct mmc_hwpart_conf *conf,
1043 enum mmc_hwpart_conf_mode mode)
1048 u32 gp_size_mult[4];
1049 u32 max_enh_size_mult;
1050 u32 tot_enh_size_mult = 0;
1053 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1055 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1058 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1059 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1060 return -EMEDIUMTYPE;
1063 if (!(mmc->part_support & PART_SUPPORT)) {
1064 pr_err("Card does not support partitioning\n");
1065 return -EMEDIUMTYPE;
1068 if (!mmc->hc_wp_grp_size) {
1069 pr_err("Card does not define HC WP group size\n");
1070 return -EMEDIUMTYPE;
1073 /* check partition alignment and total enhanced size */
1074 if (conf->user.enh_size) {
1075 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1076 conf->user.enh_start % mmc->hc_wp_grp_size) {
1077 pr_err("User data enhanced area not HC WP group "
1081 part_attrs |= EXT_CSD_ENH_USR;
1082 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1083 if (mmc->high_capacity) {
1084 enh_start_addr = conf->user.enh_start;
1086 enh_start_addr = (conf->user.enh_start << 9);
1092 tot_enh_size_mult += enh_size_mult;
1094 for (pidx = 0; pidx < 4; pidx++) {
1095 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1096 pr_err("GP%i partition not HC WP group size "
1097 "aligned\n", pidx+1);
1100 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1101 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1102 part_attrs |= EXT_CSD_ENH_GP(pidx);
1103 tot_enh_size_mult += gp_size_mult[pidx];
1107 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1108 pr_err("Card does not support enhanced attribute\n");
1109 return -EMEDIUMTYPE;
1112 err = mmc_send_ext_csd(mmc, ext_csd);
1117 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1118 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1119 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1120 if (tot_enh_size_mult > max_enh_size_mult) {
1121 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1122 tot_enh_size_mult, max_enh_size_mult);
1123 return -EMEDIUMTYPE;
1126 /* The default value of EXT_CSD_WR_REL_SET is device
1127 * dependent, the values can only be changed if the
1128 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1129 * changed only once and before partitioning is completed. */
1130 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1131 if (conf->user.wr_rel_change) {
1132 if (conf->user.wr_rel_set)
1133 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1135 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1137 for (pidx = 0; pidx < 4; pidx++) {
1138 if (conf->gp_part[pidx].wr_rel_change) {
1139 if (conf->gp_part[pidx].wr_rel_set)
1140 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1142 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1146 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1147 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1148 puts("Card does not support host controlled partition write "
1149 "reliability settings\n");
1150 return -EMEDIUMTYPE;
1153 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1154 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1155 pr_err("Card already partitioned\n");
1159 if (mode == MMC_HWPART_CONF_CHECK)
1162 /* Partitioning requires high-capacity size definitions */
1163 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1164 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1165 EXT_CSD_ERASE_GROUP_DEF, 1);
1170 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1172 #if CONFIG_IS_ENABLED(MMC_WRITE)
1173 /* update erase group size to be high-capacity */
1174 mmc->erase_grp_size =
1175 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1180 /* all OK, write the configuration */
1181 for (i = 0; i < 4; i++) {
1182 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1183 EXT_CSD_ENH_START_ADDR+i,
1184 (enh_start_addr >> (i*8)) & 0xFF);
1188 for (i = 0; i < 3; i++) {
1189 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1190 EXT_CSD_ENH_SIZE_MULT+i,
1191 (enh_size_mult >> (i*8)) & 0xFF);
1195 for (pidx = 0; pidx < 4; pidx++) {
1196 for (i = 0; i < 3; i++) {
1197 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1198 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1199 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1204 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1205 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1209 if (mode == MMC_HWPART_CONF_SET)
1212 /* The WR_REL_SET is a write-once register but shall be
1213 * written before setting PART_SETTING_COMPLETED. As it is
1214 * write-once we can only write it when completing the
1216 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1217 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1218 EXT_CSD_WR_REL_SET, wr_rel_set);
1223 /* Setting PART_SETTING_COMPLETED confirms the partition
1224 * configuration but it only becomes effective after power
1225 * cycle, so we do not adjust the partition related settings
1226 * in the mmc struct. */
1228 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1229 EXT_CSD_PARTITION_SETTING,
1230 EXT_CSD_PARTITION_SETTING_COMPLETED);
1238 #if !CONFIG_IS_ENABLED(DM_MMC)
1239 int mmc_getcd(struct mmc *mmc)
1243 cd = board_mmc_getcd(mmc);
1246 if (mmc->cfg->ops->getcd)
1247 cd = mmc->cfg->ops->getcd(mmc);
1256 #if !CONFIG_IS_ENABLED(MMC_TINY)
1257 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1260 struct mmc_data data;
1262 /* Switch the frequency */
1263 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1264 cmd.resp_type = MMC_RSP_R1;
1265 cmd.cmdarg = (mode << 31) | 0xffffff;
1266 cmd.cmdarg &= ~(0xf << (group * 4));
1267 cmd.cmdarg |= value << (group * 4);
1269 data.dest = (char *)resp;
1270 data.blocksize = 64;
1272 data.flags = MMC_DATA_READ;
1274 return mmc_send_cmd(mmc, &cmd, &data);
1277 static int sd_get_capabilities(struct mmc *mmc)
1281 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1282 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1283 struct mmc_data data;
1285 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1289 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1291 if (mmc_host_is_spi(mmc))
1294 /* Read the SCR to find out if this card supports higher speeds */
1295 cmd.cmdidx = MMC_CMD_APP_CMD;
1296 cmd.resp_type = MMC_RSP_R1;
1297 cmd.cmdarg = mmc->rca << 16;
1299 err = mmc_send_cmd(mmc, &cmd, NULL);
1304 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1305 cmd.resp_type = MMC_RSP_R1;
1308 data.dest = (char *)scr;
1311 data.flags = MMC_DATA_READ;
1313 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1318 mmc->scr[0] = __be32_to_cpu(scr[0]);
1319 mmc->scr[1] = __be32_to_cpu(scr[1]);
1321 switch ((mmc->scr[0] >> 24) & 0xf) {
1323 mmc->version = SD_VERSION_1_0;
1326 mmc->version = SD_VERSION_1_10;
1329 mmc->version = SD_VERSION_2;
1330 if ((mmc->scr[0] >> 15) & 0x1)
1331 mmc->version = SD_VERSION_3;
1334 mmc->version = SD_VERSION_1_0;
1338 if (mmc->scr[0] & SD_DATA_4BIT)
1339 mmc->card_caps |= MMC_MODE_4BIT;
1341 /* Version 1.0 doesn't support switching */
1342 if (mmc->version == SD_VERSION_1_0)
1347 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1348 (u8 *)switch_status);
1353 /* The high-speed function is busy. Try again */
1354 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1358 /* If high-speed isn't supported, we return */
1359 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1360 mmc->card_caps |= MMC_CAP(SD_HS);
1362 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1363 /* Version before 3.0 don't support UHS modes */
1364 if (mmc->version < SD_VERSION_3)
1367 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1368 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1369 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1370 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1371 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1372 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1373 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1374 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1375 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1376 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1377 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1383 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1387 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1390 /* SD version 1.00 and 1.01 does not support CMD 6 */
1391 if (mmc->version == SD_VERSION_1_0)
1396 speed = UHS_SDR12_BUS_SPEED;
1399 speed = HIGH_SPEED_BUS_SPEED;
1401 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1403 speed = UHS_SDR12_BUS_SPEED;
1406 speed = UHS_SDR25_BUS_SPEED;
1409 speed = UHS_SDR50_BUS_SPEED;
1412 speed = UHS_DDR50_BUS_SPEED;
1415 speed = UHS_SDR104_BUS_SPEED;
1422 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1426 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1432 static int sd_select_bus_width(struct mmc *mmc, int w)
1437 if ((w != 4) && (w != 1))
1440 cmd.cmdidx = MMC_CMD_APP_CMD;
1441 cmd.resp_type = MMC_RSP_R1;
1442 cmd.cmdarg = mmc->rca << 16;
1444 err = mmc_send_cmd(mmc, &cmd, NULL);
1448 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1449 cmd.resp_type = MMC_RSP_R1;
1454 err = mmc_send_cmd(mmc, &cmd, NULL);
1462 #if CONFIG_IS_ENABLED(MMC_WRITE)
1463 static int sd_read_ssr(struct mmc *mmc)
1465 static const unsigned int sd_au_size[] = {
1466 0, SZ_16K / 512, SZ_32K / 512,
1467 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1468 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1469 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1470 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1475 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1476 struct mmc_data data;
1477 unsigned int au, eo, et, es;
1479 cmd.cmdidx = MMC_CMD_APP_CMD;
1480 cmd.resp_type = MMC_RSP_R1;
1481 cmd.cmdarg = mmc->rca << 16;
1483 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1487 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1488 cmd.resp_type = MMC_RSP_R1;
1491 data.dest = (char *)ssr;
1492 data.blocksize = 64;
1494 data.flags = MMC_DATA_READ;
1496 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1500 for (i = 0; i < 16; i++)
1501 ssr[i] = be32_to_cpu(ssr[i]);
1503 au = (ssr[2] >> 12) & 0xF;
1504 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1505 mmc->ssr.au = sd_au_size[au];
1506 es = (ssr[3] >> 24) & 0xFF;
1507 es |= (ssr[2] & 0xFF) << 8;
1508 et = (ssr[3] >> 18) & 0x3F;
1510 eo = (ssr[3] >> 16) & 0x3;
1511 mmc->ssr.erase_timeout = (et * 1000) / es;
1512 mmc->ssr.erase_offset = eo * 1000;
1515 pr_debug("Invalid Allocation Unit Size.\n");
1521 /* frequency bases */
1522 /* divided by 10 to be nice to platforms without floating point */
1523 static const int fbase[] = {
1530 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1531 * to platforms without floating point.
1533 static const u8 multipliers[] = {
1552 static inline int bus_width(uint cap)
1554 if (cap == MMC_MODE_8BIT)
1556 if (cap == MMC_MODE_4BIT)
1558 if (cap == MMC_MODE_1BIT)
1560 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1564 #if !CONFIG_IS_ENABLED(DM_MMC)
1565 #ifdef MMC_SUPPORTS_TUNING
1566 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1572 static int mmc_set_ios(struct mmc *mmc)
1576 if (mmc->cfg->ops->set_ios)
1577 ret = mmc->cfg->ops->set_ios(mmc);
1582 static int mmc_host_power_cycle(struct mmc *mmc)
1586 if (mmc->cfg->ops->host_power_cycle)
1587 ret = mmc->cfg->ops->host_power_cycle(mmc);
1593 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1596 if (clock > mmc->cfg->f_max)
1597 clock = mmc->cfg->f_max;
1599 if (clock < mmc->cfg->f_min)
1600 clock = mmc->cfg->f_min;
1604 mmc->clk_disable = disable;
1606 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1608 return mmc_set_ios(mmc);
1611 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1613 mmc->bus_width = width;
1615 return mmc_set_ios(mmc);
1618 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1620 * helper function to display the capabilities in a human
1621 * friendly manner. The capabilities include bus width and
1624 void mmc_dump_capabilities(const char *text, uint caps)
1628 pr_debug("%s: widths [", text);
1629 if (caps & MMC_MODE_8BIT)
1631 if (caps & MMC_MODE_4BIT)
1633 if (caps & MMC_MODE_1BIT)
1635 pr_debug("\b\b] modes [");
1636 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1637 if (MMC_CAP(mode) & caps)
1638 pr_debug("%s, ", mmc_mode_name(mode));
1639 pr_debug("\b\b]\n");
1643 struct mode_width_tuning {
1646 #ifdef MMC_SUPPORTS_TUNING
1651 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1652 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1655 case MMC_SIGNAL_VOLTAGE_000: return 0;
1656 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1657 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1658 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1663 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1667 if (mmc->signal_voltage == signal_voltage)
1670 mmc->signal_voltage = signal_voltage;
1671 err = mmc_set_ios(mmc);
1673 pr_debug("unable to set voltage (err %d)\n", err);
1678 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1684 #if !CONFIG_IS_ENABLED(MMC_TINY)
1685 static const struct mode_width_tuning sd_modes_by_pref[] = {
1686 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1687 #ifdef MMC_SUPPORTS_TUNING
1690 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1691 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1696 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1700 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1704 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1709 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1714 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1719 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1723 #define for_each_sd_mode_by_pref(caps, mwt) \
1724 for (mwt = sd_modes_by_pref;\
1725 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1727 if (caps & MMC_CAP(mwt->mode))
1729 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1732 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1733 const struct mode_width_tuning *mwt;
1734 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1735 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1737 bool uhs_en = false;
1742 mmc_dump_capabilities("sd card", card_caps);
1743 mmc_dump_capabilities("host", mmc->host_caps);
1746 if (mmc_host_is_spi(mmc)) {
1747 mmc_set_bus_width(mmc, 1);
1748 mmc_select_mode(mmc, MMC_LEGACY);
1749 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1750 #if CONFIG_IS_ENABLED(MMC_WRITE)
1751 err = sd_read_ssr(mmc);
1753 pr_warn("unable to read ssr\n");
1758 /* Restrict card's capabilities by what the host can do */
1759 caps = card_caps & mmc->host_caps;
1764 for_each_sd_mode_by_pref(caps, mwt) {
1767 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1768 if (*w & caps & mwt->widths) {
1769 pr_debug("trying mode %s width %d (at %d MHz)\n",
1770 mmc_mode_name(mwt->mode),
1772 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1774 /* configure the bus width (card + host) */
1775 err = sd_select_bus_width(mmc, bus_width(*w));
1778 mmc_set_bus_width(mmc, bus_width(*w));
1780 /* configure the bus mode (card) */
1781 err = sd_set_card_speed(mmc, mwt->mode);
1785 /* configure the bus mode (host) */
1786 mmc_select_mode(mmc, mwt->mode);
1787 mmc_set_clock(mmc, mmc->tran_speed,
1790 #ifdef MMC_SUPPORTS_TUNING
1791 /* execute tuning if needed */
1792 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1793 err = mmc_execute_tuning(mmc,
1796 pr_debug("tuning failed\n");
1802 #if CONFIG_IS_ENABLED(MMC_WRITE)
1803 err = sd_read_ssr(mmc);
1805 pr_warn("unable to read ssr\n");
1811 /* revert to a safer bus speed */
1812 mmc_select_mode(mmc, MMC_LEGACY);
1813 mmc_set_clock(mmc, mmc->tran_speed,
1819 pr_err("unable to select a mode\n");
1824 * read the compare the part of ext csd that is constant.
1825 * This can be used to check that the transfer is working
1828 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1831 const u8 *ext_csd = mmc->ext_csd;
1832 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1834 if (mmc->version < MMC_VERSION_4)
1837 err = mmc_send_ext_csd(mmc, test_csd);
1841 /* Only compare read only fields */
1842 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1843 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1844 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1845 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1846 ext_csd[EXT_CSD_REV]
1847 == test_csd[EXT_CSD_REV] &&
1848 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1849 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1850 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1851 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1857 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1858 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1859 uint32_t allowed_mask)
1867 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1868 EXT_CSD_CARD_TYPE_HS400_1_8V))
1869 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1870 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1871 EXT_CSD_CARD_TYPE_HS400_1_2V))
1872 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1875 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1876 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1877 MMC_SIGNAL_VOLTAGE_180;
1878 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1879 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1882 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1886 while (card_mask & allowed_mask) {
1887 enum mmc_voltage best_match;
1889 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1890 if (!mmc_set_signal_voltage(mmc, best_match))
1893 allowed_mask &= ~best_match;
1899 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1900 uint32_t allowed_mask)
1906 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1907 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1909 .mode = MMC_HS_400_ES,
1910 .widths = MMC_MODE_8BIT,
1913 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1916 .widths = MMC_MODE_8BIT,
1917 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1920 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1923 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1924 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1929 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1933 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1937 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1941 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1945 #define for_each_mmc_mode_by_pref(caps, mwt) \
1946 for (mwt = mmc_modes_by_pref;\
1947 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1949 if (caps & MMC_CAP(mwt->mode))
1951 static const struct ext_csd_bus_width {
1955 } ext_csd_bus_width[] = {
1956 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1957 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1958 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1959 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1960 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1963 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1964 static int mmc_select_hs400(struct mmc *mmc)
1968 /* Set timing to HS200 for tuning */
1969 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1973 /* configure the bus mode (host) */
1974 mmc_select_mode(mmc, MMC_HS_200);
1975 mmc_set_clock(mmc, mmc->tran_speed, false);
1977 /* execute tuning if needed */
1978 mmc->hs400_tuning = 1;
1979 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1980 mmc->hs400_tuning = 0;
1982 debug("tuning failed\n");
1986 /* Set back to HS */
1987 mmc_set_card_speed(mmc, MMC_HS, true);
1989 err = mmc_hs400_prepare_ddr(mmc);
1993 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1994 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1998 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2002 mmc_select_mode(mmc, MMC_HS_400);
2003 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2010 static int mmc_select_hs400(struct mmc *mmc)
2016 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2017 #if !CONFIG_IS_ENABLED(DM_MMC)
2018 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2023 static int mmc_select_hs400es(struct mmc *mmc)
2027 err = mmc_set_card_speed(mmc, MMC_HS, true);
2031 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2032 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2033 EXT_CSD_BUS_WIDTH_STROBE);
2035 printf("switch to bus width for hs400 failed\n");
2038 /* TODO: driver strength */
2039 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2043 mmc_select_mode(mmc, MMC_HS_400_ES);
2044 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2048 return mmc_set_enhanced_strobe(mmc);
2051 static int mmc_select_hs400es(struct mmc *mmc)
2057 #define for_each_supported_width(caps, ddr, ecbv) \
2058 for (ecbv = ext_csd_bus_width;\
2059 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2061 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2063 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2066 const struct mode_width_tuning *mwt;
2067 const struct ext_csd_bus_width *ecbw;
2070 mmc_dump_capabilities("mmc", card_caps);
2071 mmc_dump_capabilities("host", mmc->host_caps);
2074 if (mmc_host_is_spi(mmc)) {
2075 mmc_set_bus_width(mmc, 1);
2076 mmc_select_mode(mmc, MMC_LEGACY);
2077 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2081 /* Restrict card's capabilities by what the host can do */
2082 card_caps &= mmc->host_caps;
2084 /* Only version 4 of MMC supports wider bus widths */
2085 if (mmc->version < MMC_VERSION_4)
2088 if (!mmc->ext_csd) {
2089 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2093 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2094 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2096 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2097 * before doing anything else, since a transition from either of
2098 * the HS200/HS400 mode directly to legacy mode is not supported.
2100 if (mmc->selected_mode == MMC_HS_200 ||
2101 mmc->selected_mode == MMC_HS_400)
2102 mmc_set_card_speed(mmc, MMC_HS, true);
2105 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2107 for_each_mmc_mode_by_pref(card_caps, mwt) {
2108 for_each_supported_width(card_caps & mwt->widths,
2109 mmc_is_mode_ddr(mwt->mode), ecbw) {
2110 enum mmc_voltage old_voltage;
2111 pr_debug("trying mode %s width %d (at %d MHz)\n",
2112 mmc_mode_name(mwt->mode),
2113 bus_width(ecbw->cap),
2114 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2115 old_voltage = mmc->signal_voltage;
2116 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2117 MMC_ALL_SIGNAL_VOLTAGE);
2121 /* configure the bus width (card + host) */
2122 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2124 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2127 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2129 if (mwt->mode == MMC_HS_400) {
2130 err = mmc_select_hs400(mmc);
2132 printf("Select HS400 failed %d\n", err);
2135 } else if (mwt->mode == MMC_HS_400_ES) {
2136 err = mmc_select_hs400es(mmc);
2138 printf("Select HS400ES failed %d\n",
2143 /* configure the bus speed (card) */
2144 err = mmc_set_card_speed(mmc, mwt->mode, false);
2149 * configure the bus width AND the ddr mode
2150 * (card). The host side will be taken care
2151 * of in the next step
2153 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2154 err = mmc_switch(mmc,
2155 EXT_CSD_CMD_SET_NORMAL,
2157 ecbw->ext_csd_bits);
2162 /* configure the bus mode (host) */
2163 mmc_select_mode(mmc, mwt->mode);
2164 mmc_set_clock(mmc, mmc->tran_speed,
2166 #ifdef MMC_SUPPORTS_TUNING
2168 /* execute tuning if needed */
2170 err = mmc_execute_tuning(mmc,
2173 pr_debug("tuning failed : %d\n", err);
2180 /* do a transfer to check the configuration */
2181 err = mmc_read_and_compare_ext_csd(mmc);
2185 mmc_set_signal_voltage(mmc, old_voltage);
2186 /* if an error occurred, revert to a safer bus mode */
2187 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2188 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2189 mmc_select_mode(mmc, MMC_LEGACY);
2190 mmc_set_bus_width(mmc, 1);
2194 pr_err("unable to select a mode : %d\n", err);
2200 #if CONFIG_IS_ENABLED(MMC_TINY)
2201 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2204 static int mmc_startup_v4(struct mmc *mmc)
2208 bool has_parts = false;
2209 bool part_completed;
2210 static const u32 mmc_versions[] = {
2222 #if CONFIG_IS_ENABLED(MMC_TINY)
2223 u8 *ext_csd = ext_csd_bkup;
2225 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2229 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2231 err = mmc_send_ext_csd(mmc, ext_csd);
2235 /* store the ext csd for future reference */
2237 mmc->ext_csd = ext_csd;
2239 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2241 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2244 /* check ext_csd version and capacity */
2245 err = mmc_send_ext_csd(mmc, ext_csd);
2249 /* store the ext csd for future reference */
2251 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2254 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2256 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2259 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2261 if (mmc->version >= MMC_VERSION_4_2) {
2263 * According to the JEDEC Standard, the value of
2264 * ext_csd's capacity is valid if the value is more
2267 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2268 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2269 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2270 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2271 capacity *= MMC_MAX_BLOCK_LEN;
2272 if ((capacity >> 20) > 2 * 1024)
2273 mmc->capacity_user = capacity;
2276 if (mmc->version >= MMC_VERSION_4_5)
2277 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2279 /* The partition data may be non-zero but it is only
2280 * effective if PARTITION_SETTING_COMPLETED is set in
2281 * EXT_CSD, so ignore any data if this bit is not set,
2282 * except for enabling the high-capacity group size
2283 * definition (see below).
2285 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2286 EXT_CSD_PARTITION_SETTING_COMPLETED);
2288 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2289 /* Some eMMC set the value too low so set a minimum */
2290 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2291 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2293 /* store the partition info of emmc */
2294 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2295 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2296 ext_csd[EXT_CSD_BOOT_MULT])
2297 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2298 if (part_completed &&
2299 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2300 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2302 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2304 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2306 for (i = 0; i < 4; i++) {
2307 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2308 uint mult = (ext_csd[idx + 2] << 16) +
2309 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2312 if (!part_completed)
2314 mmc->capacity_gp[i] = mult;
2315 mmc->capacity_gp[i] *=
2316 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2317 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2318 mmc->capacity_gp[i] <<= 19;
2321 #ifndef CONFIG_SPL_BUILD
2322 if (part_completed) {
2323 mmc->enh_user_size =
2324 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2325 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2326 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2327 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2328 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2329 mmc->enh_user_size <<= 19;
2330 mmc->enh_user_start =
2331 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2332 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2333 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2334 ext_csd[EXT_CSD_ENH_START_ADDR];
2335 if (mmc->high_capacity)
2336 mmc->enh_user_start <<= 9;
2341 * Host needs to enable ERASE_GRP_DEF bit if device is
2342 * partitioned. This bit will be lost every time after a reset
2343 * or power off. This will affect erase size.
2347 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2348 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2351 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2352 EXT_CSD_ERASE_GROUP_DEF, 1);
2357 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2360 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2361 #if CONFIG_IS_ENABLED(MMC_WRITE)
2362 /* Read out group size from ext_csd */
2363 mmc->erase_grp_size =
2364 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2367 * if high capacity and partition setting completed
2368 * SEC_COUNT is valid even if it is smaller than 2 GiB
2369 * JEDEC Standard JESD84-B45, 6.2.4
2371 if (mmc->high_capacity && part_completed) {
2372 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2373 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2374 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2375 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2376 capacity *= MMC_MAX_BLOCK_LEN;
2377 mmc->capacity_user = capacity;
2380 #if CONFIG_IS_ENABLED(MMC_WRITE)
2382 /* Calculate the group size from the csd value. */
2383 int erase_gsz, erase_gmul;
2385 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2386 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2387 mmc->erase_grp_size = (erase_gsz + 1)
2391 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2392 mmc->hc_wp_grp_size = 1024
2393 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2394 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2397 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2402 #if !CONFIG_IS_ENABLED(MMC_TINY)
2405 mmc->ext_csd = NULL;
2410 static int mmc_startup(struct mmc *mmc)
2416 struct blk_desc *bdesc;
2418 #ifdef CONFIG_MMC_SPI_CRC_ON
2419 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2420 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2421 cmd.resp_type = MMC_RSP_R1;
2423 err = mmc_send_cmd(mmc, &cmd, NULL);
2429 /* Put the Card in Identify Mode */
2430 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2431 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2432 cmd.resp_type = MMC_RSP_R2;
2435 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2439 memcpy(mmc->cid, cmd.response, 16);
2442 * For MMC cards, set the Relative Address.
2443 * For SD cards, get the Relatvie Address.
2444 * This also puts the cards into Standby State
2446 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2447 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2448 cmd.cmdarg = mmc->rca << 16;
2449 cmd.resp_type = MMC_RSP_R6;
2451 err = mmc_send_cmd(mmc, &cmd, NULL);
2457 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2460 /* Get the Card-Specific Data */
2461 cmd.cmdidx = MMC_CMD_SEND_CSD;
2462 cmd.resp_type = MMC_RSP_R2;
2463 cmd.cmdarg = mmc->rca << 16;
2465 err = mmc_send_cmd(mmc, &cmd, NULL);
2470 mmc->csd[0] = cmd.response[0];
2471 mmc->csd[1] = cmd.response[1];
2472 mmc->csd[2] = cmd.response[2];
2473 mmc->csd[3] = cmd.response[3];
2475 if (mmc->version == MMC_VERSION_UNKNOWN) {
2476 int version = (cmd.response[0] >> 26) & 0xf;
2480 mmc->version = MMC_VERSION_1_2;
2483 mmc->version = MMC_VERSION_1_4;
2486 mmc->version = MMC_VERSION_2_2;
2489 mmc->version = MMC_VERSION_3;
2492 mmc->version = MMC_VERSION_4;
2495 mmc->version = MMC_VERSION_1_2;
2500 /* divide frequency by 10, since the mults are 10x bigger */
2501 freq = fbase[(cmd.response[0] & 0x7)];
2502 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2504 mmc->legacy_speed = freq * mult;
2505 mmc_select_mode(mmc, MMC_LEGACY);
2507 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2508 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2509 #if CONFIG_IS_ENABLED(MMC_WRITE)
2512 mmc->write_bl_len = mmc->read_bl_len;
2514 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2517 if (mmc->high_capacity) {
2518 csize = (mmc->csd[1] & 0x3f) << 16
2519 | (mmc->csd[2] & 0xffff0000) >> 16;
2522 csize = (mmc->csd[1] & 0x3ff) << 2
2523 | (mmc->csd[2] & 0xc0000000) >> 30;
2524 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2527 mmc->capacity_user = (csize + 1) << (cmult + 2);
2528 mmc->capacity_user *= mmc->read_bl_len;
2529 mmc->capacity_boot = 0;
2530 mmc->capacity_rpmb = 0;
2531 for (i = 0; i < 4; i++)
2532 mmc->capacity_gp[i] = 0;
2534 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2535 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2537 #if CONFIG_IS_ENABLED(MMC_WRITE)
2538 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2539 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2542 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2543 cmd.cmdidx = MMC_CMD_SET_DSR;
2544 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2545 cmd.resp_type = MMC_RSP_NONE;
2546 if (mmc_send_cmd(mmc, &cmd, NULL))
2547 pr_warn("MMC: SET_DSR failed\n");
2550 /* Select the card, and put it into Transfer Mode */
2551 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2552 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2553 cmd.resp_type = MMC_RSP_R1;
2554 cmd.cmdarg = mmc->rca << 16;
2555 err = mmc_send_cmd(mmc, &cmd, NULL);
2562 * For SD, its erase group is always one sector
2564 #if CONFIG_IS_ENABLED(MMC_WRITE)
2565 mmc->erase_grp_size = 1;
2567 mmc->part_config = MMCPART_NOAVAILABLE;
2569 err = mmc_startup_v4(mmc);
2573 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2577 #if CONFIG_IS_ENABLED(MMC_TINY)
2578 mmc_set_clock(mmc, mmc->legacy_speed, false);
2579 mmc_select_mode(mmc, MMC_LEGACY);
2580 mmc_set_bus_width(mmc, 1);
2583 err = sd_get_capabilities(mmc);
2586 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2588 err = mmc_get_capabilities(mmc);
2591 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2597 mmc->best_mode = mmc->selected_mode;
2599 /* Fix the block length for DDR mode */
2600 if (mmc->ddr_mode) {
2601 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2602 #if CONFIG_IS_ENABLED(MMC_WRITE)
2603 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2607 /* fill in device description */
2608 bdesc = mmc_get_blk_desc(mmc);
2612 bdesc->blksz = mmc->read_bl_len;
2613 bdesc->log2blksz = LOG2(bdesc->blksz);
2614 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2615 #if !defined(CONFIG_SPL_BUILD) || \
2616 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2617 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2618 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2619 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2620 (mmc->cid[3] >> 16) & 0xffff);
2621 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2622 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2623 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2624 (mmc->cid[2] >> 24) & 0xff);
2625 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2626 (mmc->cid[2] >> 16) & 0xf);
2628 bdesc->vendor[0] = 0;
2629 bdesc->product[0] = 0;
2630 bdesc->revision[0] = 0;
2633 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2640 static int mmc_send_if_cond(struct mmc *mmc)
2645 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2646 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2647 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2648 cmd.resp_type = MMC_RSP_R7;
2650 err = mmc_send_cmd(mmc, &cmd, NULL);
2655 if ((cmd.response[0] & 0xff) != 0xaa)
2658 mmc->version = SD_VERSION_2;
2663 #if !CONFIG_IS_ENABLED(DM_MMC)
2664 /* board-specific MMC power initializations. */
2665 __weak void board_mmc_power_init(void)
2670 static int mmc_power_init(struct mmc *mmc)
2672 #if CONFIG_IS_ENABLED(DM_MMC)
2673 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2676 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2679 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2681 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2682 &mmc->vqmmc_supply);
2684 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2686 #else /* !CONFIG_DM_MMC */
2688 * Driver model should use a regulator, as above, rather than calling
2689 * out to board code.
2691 board_mmc_power_init();
2697 * put the host in the initial state:
2698 * - turn on Vdd (card power supply)
2699 * - configure the bus width and clock to minimal values
2701 static void mmc_set_initial_state(struct mmc *mmc)
2705 /* First try to set 3.3V. If it fails set to 1.8V */
2706 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2708 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2710 pr_warn("mmc: failed to set signal voltage\n");
2712 mmc_select_mode(mmc, MMC_LEGACY);
2713 mmc_set_bus_width(mmc, 1);
2714 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2717 static int mmc_power_on(struct mmc *mmc)
2719 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2720 if (mmc->vmmc_supply) {
2721 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2723 if (ret && ret != -EACCES) {
2724 printf("Error enabling VMMC supply : %d\n", ret);
2732 static int mmc_power_off(struct mmc *mmc)
2734 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2735 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2736 if (mmc->vmmc_supply) {
2737 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2739 if (ret && ret != -EACCES) {
2740 pr_debug("Error disabling VMMC supply : %d\n", ret);
2748 static int mmc_power_cycle(struct mmc *mmc)
2752 ret = mmc_power_off(mmc);
2756 ret = mmc_host_power_cycle(mmc);
2761 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2762 * to be on the safer side.
2765 return mmc_power_on(mmc);
2768 int mmc_get_op_cond(struct mmc *mmc)
2770 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2776 err = mmc_power_init(mmc);
2780 #ifdef CONFIG_MMC_QUIRKS
2781 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2782 MMC_QUIRK_RETRY_SEND_CID |
2783 MMC_QUIRK_RETRY_APP_CMD;
2786 err = mmc_power_cycle(mmc);
2789 * if power cycling is not supported, we should not try
2790 * to use the UHS modes, because we wouldn't be able to
2791 * recover from an error during the UHS initialization.
2793 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2795 mmc->host_caps &= ~UHS_CAPS;
2796 err = mmc_power_on(mmc);
2801 #if CONFIG_IS_ENABLED(DM_MMC)
2803 * Re-initialization is needed to clear old configuration for
2806 err = mmc_reinit(mmc);
2808 /* made sure it's not NULL earlier */
2809 err = mmc->cfg->ops->init(mmc);
2816 mmc_set_initial_state(mmc);
2818 /* Reset the Card */
2819 err = mmc_go_idle(mmc);
2824 /* The internal partition reset to user partition(0) at every CMD0 */
2825 mmc_get_blk_desc(mmc)->hwpart = 0;
2827 /* Test for SD version 2 */
2828 err = mmc_send_if_cond(mmc);
2830 /* Now try to get the SD card's operating condition */
2831 err = sd_send_op_cond(mmc, uhs_en);
2832 if (err && uhs_en) {
2834 mmc_power_cycle(mmc);
2838 /* If the command timed out, we check for an MMC card */
2839 if (err == -ETIMEDOUT) {
2840 err = mmc_send_op_cond(mmc);
2843 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2844 pr_err("Card did not respond to voltage select! : %d\n", err);
2853 int mmc_start_init(struct mmc *mmc)
2859 * all hosts are capable of 1 bit bus-width and able to use the legacy
2862 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2863 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2864 #if CONFIG_IS_ENABLED(DM_MMC)
2865 mmc_deferred_probe(mmc);
2867 #if !defined(CONFIG_MMC_BROKEN_CD)
2868 no_card = mmc_getcd(mmc) == 0;
2872 #if !CONFIG_IS_ENABLED(DM_MMC)
2873 /* we pretend there's no card when init is NULL */
2874 no_card = no_card || (mmc->cfg->ops->init == NULL);
2878 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2879 pr_err("MMC: no card present\n");
2884 err = mmc_get_op_cond(mmc);
2887 mmc->init_in_progress = 1;
2892 static int mmc_complete_init(struct mmc *mmc)
2896 mmc->init_in_progress = 0;
2897 if (mmc->op_cond_pending)
2898 err = mmc_complete_op_cond(mmc);
2901 err = mmc_startup(mmc);
2909 int mmc_init(struct mmc *mmc)
2912 __maybe_unused ulong start;
2913 #if CONFIG_IS_ENABLED(DM_MMC)
2914 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2921 start = get_timer(0);
2923 if (!mmc->init_in_progress)
2924 err = mmc_start_init(mmc);
2927 err = mmc_complete_init(mmc);
2929 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2934 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2935 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2936 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2937 int mmc_deinit(struct mmc *mmc)
2945 caps_filtered = mmc->card_caps &
2946 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2947 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2948 MMC_CAP(UHS_SDR104));
2950 return sd_select_mode_and_width(mmc, caps_filtered);
2952 caps_filtered = mmc->card_caps &
2953 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2955 return mmc_select_mode_and_width(mmc, caps_filtered);
2960 int mmc_set_dsr(struct mmc *mmc, u16 val)
2966 /* CPU-specific MMC initializations */
2967 __weak int cpu_mmc_init(struct bd_info *bis)
2972 /* board-specific MMC initializations. */
2973 __weak int board_mmc_init(struct bd_info *bis)
2978 void mmc_set_preinit(struct mmc *mmc, int preinit)
2980 mmc->preinit = preinit;
2983 #if CONFIG_IS_ENABLED(DM_MMC)
2984 static int mmc_probe(struct bd_info *bis)
2988 struct udevice *dev;
2990 ret = uclass_get(UCLASS_MMC, &uc);
2995 * Try to add them in sequence order. Really with driver model we
2996 * should allow holes, but the current MMC list does not allow that.
2997 * So if we request 0, 1, 3 we will get 0, 1, 2.
2999 for (i = 0; ; i++) {
3000 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3004 uclass_foreach_dev(dev, uc) {
3005 ret = device_probe(dev);
3007 pr_err("%s - probe failed: %d\n", dev->name, ret);
3013 static int mmc_probe(struct bd_info *bis)
3015 if (board_mmc_init(bis) < 0)
3022 int mmc_initialize(struct bd_info *bis)
3024 static int initialized = 0;
3026 if (initialized) /* Avoid initializing mmc multiple times */
3030 #if !CONFIG_IS_ENABLED(BLK)
3031 #if !CONFIG_IS_ENABLED(MMC_TINY)
3035 ret = mmc_probe(bis);
3039 #ifndef CONFIG_SPL_BUILD
3040 print_mmc_devices(',');
3047 #if CONFIG_IS_ENABLED(DM_MMC)
3048 int mmc_init_device(int num)
3050 struct udevice *dev;
3054 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3058 m = mmc_get_mmc_dev(dev);
3068 #ifdef CONFIG_CMD_BKOPS_ENABLE
3069 int mmc_set_bkops_enable(struct mmc *mmc)
3072 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3074 err = mmc_send_ext_csd(mmc, ext_csd);
3076 puts("Could not get ext_csd register values\n");
3080 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3081 puts("Background operations not supported on device\n");
3082 return -EMEDIUMTYPE;
3085 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3086 puts("Background operations already enabled\n");
3090 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3092 puts("Failed to enable manual background operations\n");
3096 puts("Enabled manual background operations\n");
3102 __weak int mmc_get_env_dev(void)
3104 #ifdef CONFIG_SYS_MMC_ENV_DEV
3105 return CONFIG_SYS_MMC_ENV_DEV;