1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 Google, Inc.
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
16 #include <linux/of_device.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/reset.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/gpio/consumer.h>
25 #include <linux/ktime.h>
27 #include "sdhci-pltfm.h"
30 /* Tegra SDHOST controller vendor register definitions */
31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
32 #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
34 #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
49 #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
50 #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
52 #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
60 #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
77 #define TUNING_WORD_BIT_SIZE 32
79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
80 #define SDHCI_AUTO_CAL_START BIT(31)
81 #define SDHCI_AUTO_CAL_ENABLE BIT(29)
82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
90 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
93 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
96 #define NVQUIRK_ENABLE_SDR50 BIT(3)
97 #define NVQUIRK_ENABLE_SDR104 BIT(4)
98 #define NVQUIRK_ENABLE_DDR50 BIT(5)
100 * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
103 #define NVQUIRK_HAS_PADCALIB BIT(6)
105 * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
106 * 3V3/1V8 pad selection happens through pinctrl state selection depending
107 * on the signaling mode.
109 #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
114 * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
115 * SDMMC hardware data timeout.
117 #define NVQUIRK_HAS_TMCLK BIT(10)
119 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
120 #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
122 #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
123 SDHCI_TRNS_BLK_CNT_EN | \
126 struct sdhci_tegra_soc_data {
127 const struct sdhci_pltfm_data *pdata;
134 /* Magic pull up and pull down pad calibration offsets */
135 struct sdhci_tegra_autocal_offsets {
138 u32 pull_up_3v3_timeout;
139 u32 pull_down_3v3_timeout;
142 u32 pull_up_1v8_timeout;
143 u32 pull_down_1v8_timeout;
145 u32 pull_down_sdr104;
151 const struct sdhci_tegra_soc_data *soc_data;
152 struct gpio_desc *power_gpio;
155 bool pad_calib_required;
156 bool pad_control_available;
158 struct reset_control *rst;
159 struct pinctrl *pinctrl_sdmmc;
160 struct pinctrl_state *pinctrl_state_3v3;
161 struct pinctrl_state *pinctrl_state_1v8;
162 struct pinctrl_state *pinctrl_state_3v3_drv;
163 struct pinctrl_state *pinctrl_state_1v8_drv;
165 struct sdhci_tegra_autocal_offsets autocal_offsets;
172 unsigned long curr_clk_rate;
176 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
178 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
179 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
180 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
182 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
183 (reg == SDHCI_HOST_VERSION))) {
184 /* Erratum: Version register is invalid in HW. */
185 return SDHCI_SPEC_200;
188 return readw(host->ioaddr + reg);
191 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
193 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
196 case SDHCI_TRANSFER_MODE:
198 * Postpone this write, we must do it together with a
199 * command write that is down below.
201 pltfm_host->xfer_mode_shadow = val;
204 writel((val << 16) | pltfm_host->xfer_mode_shadow,
205 host->ioaddr + SDHCI_TRANSFER_MODE);
209 writew(val, host->ioaddr + reg);
212 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
214 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
215 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
216 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
218 /* Seems like we're getting spurious timeout and crc errors, so
219 * disable signalling of them. In case of real errors software
220 * timers should take care of eventually detecting them.
222 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
223 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
225 writel(val, host->ioaddr + reg);
227 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
228 (reg == SDHCI_INT_ENABLE))) {
229 /* Erratum: Must enable block gap interrupt detection */
230 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
231 if (val & SDHCI_INT_CARD_INT)
235 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
239 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
244 reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
245 status = !!(reg & SDHCI_CLOCK_CARD_EN);
247 if (status == enable)
251 reg |= SDHCI_CLOCK_CARD_EN;
253 reg &= ~SDHCI_CLOCK_CARD_EN;
255 sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
260 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
262 bool is_tuning_cmd = 0;
266 if (reg == SDHCI_COMMAND) {
267 cmd = SDHCI_GET_CMD(val);
268 is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
269 cmd == MMC_SEND_TUNING_BLOCK_HS200;
273 clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
275 writew(val, host->ioaddr + reg);
279 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
280 tegra_sdhci_configure_card_clk(host, clk_enabled);
284 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
287 * Write-enable shall be assumed if GPIO is missing in a board's
288 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
291 return mmc_gpio_get_ro(host->mmc);
294 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
296 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
297 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
298 int has_1v8, has_3v3;
301 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
302 * voltage configuration in order to perform voltage switching. This
303 * means that valid pinctrl info is required on SDHCI instances capable
304 * of performing voltage switching. Whether or not an SDHCI instance is
305 * capable of voltage switching is determined based on the regulator.
308 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
311 if (IS_ERR(host->mmc->supply.vqmmc))
314 has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
317 has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
320 if (has_1v8 == 1 && has_3v3 == 1)
321 return tegra_host->pad_control_available;
323 /* Fixed voltage, no pad control required. */
327 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
329 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
330 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
331 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
332 bool card_clk_enabled = false;
336 * Touching the tap values is a bit tricky on some SoC generations.
337 * The quirk enables a workaround for a glitch that sometimes occurs if
338 * the tap values are changed.
341 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
342 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
344 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
345 reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
346 reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
347 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
349 if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
352 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
353 tegra_sdhci_configure_card_clk(host, card_clk_enabled);
357 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
360 struct sdhci_host *host = mmc_priv(mmc);
363 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
365 if (ios->enhanced_strobe)
366 val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
368 val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
370 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
374 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
376 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
377 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
378 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
379 u32 misc_ctrl, clk_ctrl, pad_ctrl;
381 sdhci_reset(host, mask);
383 if (!(mask & SDHCI_RESET_ALL))
386 tegra_sdhci_set_tap(host, tegra_host->default_tap);
388 misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
389 clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
391 misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
392 SDHCI_MISC_CTRL_ENABLE_SDR50 |
393 SDHCI_MISC_CTRL_ENABLE_DDR50 |
394 SDHCI_MISC_CTRL_ENABLE_SDR104);
396 clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
397 SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
399 if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
400 /* Erratum: Enable SDHCI spec v3.00 support */
401 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
402 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
403 /* Advertise UHS modes as supported by host */
404 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
405 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
406 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
407 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
408 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
409 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
410 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
411 clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
414 clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
416 sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
417 sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
419 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
420 pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
421 pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
422 pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
423 sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
425 tegra_host->pad_calib_required = true;
428 tegra_host->ddr_signaling = false;
431 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
436 * Enable or disable the additional I/O pad used by the drive strength
437 * calibration process.
439 val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
442 val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
444 val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
446 sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
452 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
457 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
458 reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
460 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
463 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
466 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
467 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
468 struct sdhci_tegra_autocal_offsets *offsets =
469 &tegra_host->autocal_offsets;
470 struct pinctrl_state *pinctrl_drvupdn = NULL;
472 u8 drvup = 0, drvdn = 0;
475 if (!state_drvupdn) {
476 /* PADS Drive Strength */
477 if (voltage == MMC_SIGNAL_VOLTAGE_180) {
478 if (tegra_host->pinctrl_state_1v8_drv) {
480 tegra_host->pinctrl_state_1v8_drv;
482 drvup = offsets->pull_up_1v8_timeout;
483 drvdn = offsets->pull_down_1v8_timeout;
486 if (tegra_host->pinctrl_state_3v3_drv) {
488 tegra_host->pinctrl_state_3v3_drv;
490 drvup = offsets->pull_up_3v3_timeout;
491 drvdn = offsets->pull_down_3v3_timeout;
495 if (pinctrl_drvupdn != NULL) {
496 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
499 dev_err(mmc_dev(host->mmc),
500 "failed pads drvupdn, ret: %d\n", ret);
501 } else if ((drvup) || (drvdn)) {
502 reg = sdhci_readl(host,
503 SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
504 reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
505 reg |= (drvup << 20) | (drvdn << 12);
506 sdhci_writel(host, reg,
507 SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
511 /* Dual Voltage PADS Voltage selection */
512 if (!tegra_host->pad_control_available)
515 if (voltage == MMC_SIGNAL_VOLTAGE_180) {
516 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
517 tegra_host->pinctrl_state_1v8);
519 dev_err(mmc_dev(host->mmc),
520 "setting 1.8V failed, ret: %d\n", ret);
522 ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
523 tegra_host->pinctrl_state_3v3);
525 dev_err(mmc_dev(host->mmc),
526 "setting 3.3V failed, ret: %d\n", ret);
533 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
535 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
536 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
537 struct sdhci_tegra_autocal_offsets offsets =
538 tegra_host->autocal_offsets;
539 struct mmc_ios *ios = &host->mmc->ios;
540 bool card_clk_enabled;
545 switch (ios->timing) {
546 case MMC_TIMING_UHS_SDR104:
547 pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
549 case MMC_TIMING_MMC_HS400:
550 pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
553 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
554 pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
556 pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
559 /* Set initial offset before auto-calibration */
560 tegra_sdhci_set_pad_autocal_offset(host, pdpu);
562 card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
564 tegra_sdhci_configure_cal_pad(host, true);
566 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
567 reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
568 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
572 ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
573 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
576 tegra_sdhci_configure_cal_pad(host, false);
578 tegra_sdhci_configure_card_clk(host, card_clk_enabled);
581 dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
583 /* Disable automatic cal and use fixed Drive Strengths */
584 reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
585 reg &= ~SDHCI_AUTO_CAL_ENABLE;
586 sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
588 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
590 dev_err(mmc_dev(host->mmc),
591 "Setting drive strengths failed: %d\n", ret);
595 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
597 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
598 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
599 struct sdhci_tegra_autocal_offsets *autocal =
600 &tegra_host->autocal_offsets;
603 err = device_property_read_u32(mmc_dev(host->mmc),
604 "nvidia,pad-autocal-pull-up-offset-3v3",
605 &autocal->pull_up_3v3);
607 autocal->pull_up_3v3 = 0;
609 err = device_property_read_u32(mmc_dev(host->mmc),
610 "nvidia,pad-autocal-pull-down-offset-3v3",
611 &autocal->pull_down_3v3);
613 autocal->pull_down_3v3 = 0;
615 err = device_property_read_u32(mmc_dev(host->mmc),
616 "nvidia,pad-autocal-pull-up-offset-1v8",
617 &autocal->pull_up_1v8);
619 autocal->pull_up_1v8 = 0;
621 err = device_property_read_u32(mmc_dev(host->mmc),
622 "nvidia,pad-autocal-pull-down-offset-1v8",
623 &autocal->pull_down_1v8);
625 autocal->pull_down_1v8 = 0;
627 err = device_property_read_u32(mmc_dev(host->mmc),
628 "nvidia,pad-autocal-pull-up-offset-sdr104",
629 &autocal->pull_up_sdr104);
631 autocal->pull_up_sdr104 = autocal->pull_up_1v8;
633 err = device_property_read_u32(mmc_dev(host->mmc),
634 "nvidia,pad-autocal-pull-down-offset-sdr104",
635 &autocal->pull_down_sdr104);
637 autocal->pull_down_sdr104 = autocal->pull_down_1v8;
639 err = device_property_read_u32(mmc_dev(host->mmc),
640 "nvidia,pad-autocal-pull-up-offset-hs400",
641 &autocal->pull_up_hs400);
643 autocal->pull_up_hs400 = autocal->pull_up_1v8;
645 err = device_property_read_u32(mmc_dev(host->mmc),
646 "nvidia,pad-autocal-pull-down-offset-hs400",
647 &autocal->pull_down_hs400);
649 autocal->pull_down_hs400 = autocal->pull_down_1v8;
652 * Different fail-safe drive strength values based on the signaling
653 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
654 * So, avoid reading below device tree properties for SoCs that don't
655 * have NVQUIRK_NEEDS_PAD_CONTROL.
657 if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
660 err = device_property_read_u32(mmc_dev(host->mmc),
661 "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
662 &autocal->pull_up_3v3_timeout);
664 if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
665 (tegra_host->pinctrl_state_3v3_drv == NULL))
666 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
667 mmc_hostname(host->mmc));
668 autocal->pull_up_3v3_timeout = 0;
671 err = device_property_read_u32(mmc_dev(host->mmc),
672 "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
673 &autocal->pull_down_3v3_timeout);
675 if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
676 (tegra_host->pinctrl_state_3v3_drv == NULL))
677 pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
678 mmc_hostname(host->mmc));
679 autocal->pull_down_3v3_timeout = 0;
682 err = device_property_read_u32(mmc_dev(host->mmc),
683 "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
684 &autocal->pull_up_1v8_timeout);
686 if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
687 (tegra_host->pinctrl_state_1v8_drv == NULL))
688 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
689 mmc_hostname(host->mmc));
690 autocal->pull_up_1v8_timeout = 0;
693 err = device_property_read_u32(mmc_dev(host->mmc),
694 "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
695 &autocal->pull_down_1v8_timeout);
697 if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
698 (tegra_host->pinctrl_state_1v8_drv == NULL))
699 pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
700 mmc_hostname(host->mmc));
701 autocal->pull_down_1v8_timeout = 0;
705 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
707 struct sdhci_host *host = mmc_priv(mmc);
708 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
709 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
710 ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
712 /* 100 ms calibration interval is specified in the TRM */
713 if (ktime_to_ms(since_calib) > 100) {
714 tegra_sdhci_pad_autocalib(host);
715 tegra_host->last_calib = ktime_get();
718 sdhci_request(mmc, mrq);
721 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
723 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
724 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
727 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
728 &tegra_host->default_tap);
730 tegra_host->default_tap = 0;
732 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
733 &tegra_host->default_trim);
735 tegra_host->default_trim = 0;
737 err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
738 &tegra_host->dqs_trim);
740 tegra_host->dqs_trim = 0x11;
743 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
745 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
746 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
748 if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
749 tegra_host->enable_hwcq = true;
751 tegra_host->enable_hwcq = false;
753 tegra_sdhci_parse_pad_autocal_dt(host);
754 tegra_sdhci_parse_tap_and_trim(host);
757 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
759 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
760 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
761 unsigned long host_clk;
764 return sdhci_set_clock(host, clock);
767 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
768 * divider to be configured to divided the host clock by two. The SDHCI
769 * clock divider is calculated as part of sdhci_set_clock() by
770 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
771 * the requested clock rate.
773 * By setting the host->max_clk to clock * 2 the divider calculation
774 * will always result in the correct value for DDR50/52 modes,
775 * regardless of clock rate rounding, which may happen if the value
776 * from clk_get_rate() is used.
778 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
779 clk_set_rate(pltfm_host->clk, host_clk);
780 tegra_host->curr_clk_rate = host_clk;
781 if (tegra_host->ddr_signaling)
782 host->max_clk = host_clk;
784 host->max_clk = clk_get_rate(pltfm_host->clk);
786 sdhci_set_clock(host, clock);
788 if (tegra_host->pad_calib_required) {
789 tegra_sdhci_pad_autocalib(host);
790 tegra_host->pad_calib_required = false;
794 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
796 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
798 return clk_round_rate(pltfm_host->clk, UINT_MAX);
801 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
805 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
806 val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
807 val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
808 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
811 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
816 reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
817 reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
818 sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
820 /* 1 ms sleep, 5 ms timeout */
821 err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
822 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
825 dev_err(mmc_dev(host->mmc),
826 "HS400 delay line calibration timed out\n");
829 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
830 u8 thd_low, u8 fixed_tap)
832 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
833 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
835 u8 word, bit, edge1, tap, window;
837 bool start_fail = false;
838 bool start_pass = false;
839 bool end_pass = false;
840 bool first_fail = false;
841 bool first_pass = false;
842 u8 start_pass_tap = 0;
844 u8 first_fail_tap = 0;
845 u8 first_pass_tap = 0;
846 u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
849 * Read auto-tuned results and extract good valid passing window by
850 * filtering out un-wanted bubble/partial/merged windows.
852 for (word = 0; word < total_tuning_words; word++) {
853 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
854 val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
856 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
857 tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
859 while (bit < TUNING_WORD_BIT_SIZE) {
860 tap = word * TUNING_WORD_BIT_SIZE + bit;
861 tap_result = tun_status & (1 << bit);
862 if (!tap_result && !start_fail) {
865 first_fail_tap = tap;
869 } else if (tap_result && start_fail && !start_pass) {
870 start_pass_tap = tap;
873 first_pass_tap = tap;
877 } else if (!tap_result && start_fail && start_pass &&
879 end_pass_tap = tap - 1;
881 } else if (tap_result && start_pass && start_fail &&
883 window = end_pass_tap - start_pass_tap;
884 /* discard merged window and bubble window */
885 if (window >= thd_up || window < thd_low) {
886 start_pass_tap = tap;
889 /* set tap at middle of valid window */
890 tap = start_pass_tap + window / 2;
891 tegra_host->tuned_tap_delay = tap;
901 WARN(1, "no edge detected, continue with hw tuned delay.\n");
902 } else if (first_pass) {
903 /* set tap location at fixed tap relative to the first edge */
904 edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
905 if (edge1 - 1 > fixed_tap)
906 tegra_host->tuned_tap_delay = edge1 - fixed_tap;
908 tegra_host->tuned_tap_delay = edge1 + fixed_tap;
912 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
914 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
915 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
916 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
917 u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
918 u8 fixed_tap, start_tap, end_tap, window_width;
919 u8 thdupper, thdlower;
921 u32 clk_rate_mhz, period_ps, bestcase, worstcase;
923 /* retain HW tuned tap to use incase if no correction is needed */
924 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
925 tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
926 SDHCI_CLOCK_CTRL_TAP_SHIFT;
927 if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
928 min_tap_dly = soc_data->min_tap_delay;
929 max_tap_dly = soc_data->max_tap_delay;
930 clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
931 period_ps = USEC_PER_SEC / clk_rate_mhz;
932 bestcase = period_ps / min_tap_dly;
933 worstcase = period_ps / max_tap_dly;
935 * Upper and Lower bound thresholds used to detect merged and
938 thdupper = (2 * worstcase + bestcase) / 2;
939 thdlower = worstcase / 4;
941 * fixed tap is used when HW tuning result contains single edge
942 * and tap is set at fixed tap delay relative to the first edge
944 avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
945 fixed_tap = avg_tap_dly / 2;
947 val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
948 start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
949 end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
950 SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
951 window_width = end_tap - start_tap;
952 num_iter = host->tuning_loop_count;
954 * partial window includes edges of the tuning range.
955 * merged window includes more taps so window width is higher
956 * than upper threshold.
958 if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
959 (end_tap == num_iter - 2) || window_width >= thdupper) {
960 pr_debug("%s: Apply tuning correction\n",
961 mmc_hostname(host->mmc));
962 tegra_sdhci_tap_correction(host, thdupper, thdlower,
967 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
970 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
972 struct sdhci_host *host = mmc_priv(mmc);
975 err = sdhci_execute_tuning(mmc, opcode);
976 if (!err && !host->tuning_err)
977 tegra_sdhci_post_tuning(host);
982 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
985 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
986 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
987 bool set_default_tap = false;
988 bool set_dqs_trim = false;
989 bool do_hs400_dll_cal = false;
993 tegra_host->ddr_signaling = false;
995 case MMC_TIMING_UHS_SDR50:
997 case MMC_TIMING_UHS_SDR104:
998 case MMC_TIMING_MMC_HS200:
999 /* Don't set default tap on tunable modes. */
1002 case MMC_TIMING_MMC_HS400:
1003 set_dqs_trim = true;
1004 do_hs400_dll_cal = true;
1007 case MMC_TIMING_MMC_DDR52:
1008 case MMC_TIMING_UHS_DDR50:
1009 tegra_host->ddr_signaling = true;
1010 set_default_tap = true;
1013 set_default_tap = true;
1017 val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1018 val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1019 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1020 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1021 val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1022 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1023 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1024 sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1025 sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1027 host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1029 sdhci_set_uhs_signaling(host, timing);
1031 tegra_sdhci_pad_autocalib(host);
1033 if (tegra_host->tuned_tap_delay && !set_default_tap)
1034 tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1036 tegra_sdhci_set_tap(host, tegra_host->default_tap);
1039 tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1041 if (do_hs400_dll_cal)
1042 tegra_sdhci_hs400_dll_cal(host);
1045 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1047 unsigned int min, max;
1050 * Start search for minimum tap value at 10, as smaller values are
1051 * may wrongly be reported as working but fail at higher speeds,
1052 * according to the TRM.
1056 tegra_sdhci_set_tap(host, min);
1057 if (!mmc_send_tuning(host->mmc, opcode, NULL))
1062 /* Find the maximum tap value that still passes. */
1065 tegra_sdhci_set_tap(host, max);
1066 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1073 /* The TRM states the ideal tap value is at 75% in the passing range. */
1074 tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1076 return mmc_send_tuning(host->mmc, opcode, NULL);
1079 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1080 struct mmc_ios *ios)
1082 struct sdhci_host *host = mmc_priv(mmc);
1083 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1084 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1087 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1088 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1091 ret = sdhci_start_signal_voltage_switch(mmc, ios);
1092 } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1093 ret = sdhci_start_signal_voltage_switch(mmc, ios);
1096 ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1099 if (tegra_host->pad_calib_required)
1100 tegra_sdhci_pad_autocalib(host);
1105 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1106 struct sdhci_tegra *tegra_host)
1108 tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1109 if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1110 dev_dbg(dev, "No pinctrl info, err: %ld\n",
1111 PTR_ERR(tegra_host->pinctrl_sdmmc));
1115 tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1116 tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1117 if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1118 if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1119 tegra_host->pinctrl_state_1v8_drv = NULL;
1122 tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1123 tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1124 if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1125 if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1126 tegra_host->pinctrl_state_3v3_drv = NULL;
1129 tegra_host->pinctrl_state_3v3 =
1130 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1131 if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1132 dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1133 PTR_ERR(tegra_host->pinctrl_state_3v3));
1137 tegra_host->pinctrl_state_1v8 =
1138 pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1139 if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1140 dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1141 PTR_ERR(tegra_host->pinctrl_state_1v8));
1145 tegra_host->pad_control_available = true;
1150 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1152 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1153 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1154 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1156 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1157 tegra_host->pad_calib_required = true;
1160 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1162 struct mmc_host *mmc = cq_host->mmc;
1163 struct sdhci_host *host = mmc_priv(mmc);
1169 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1170 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1171 * to be re-configured.
1172 * Tegra CQHCI/SDHCI prevents write access to block size register when
1173 * CQE is unhalted. So handling CQE resume sequence here to configure
1174 * SDHCI block registers prior to exiting CQE halt state.
1176 if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1177 cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1178 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1179 sdhci_cqe_enable(mmc);
1180 writel(val, cq_host->mmio + reg);
1181 timeout = ktime_add_us(ktime_get(), 50);
1183 timed_out = ktime_compare(ktime_get(), timeout) > 0;
1184 ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1185 if (!(ctrl & CQHCI_HALT) || timed_out)
1189 * CQE usually resumes very quick, but incase if Tegra CQE
1190 * doesn't resume retry unhalt.
1193 writel(val, cq_host->mmio + reg);
1195 writel(val, cq_host->mmio + reg);
1199 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1200 struct mmc_request *mrq, u64 *data)
1202 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1203 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1204 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1206 if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1207 mrq->cmd->flags & MMC_RSP_R1B)
1208 *data |= CQHCI_CMD_TIMING(1);
1211 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1213 struct cqhci_host *cq_host = mmc->cqe_private;
1214 struct sdhci_host *host = mmc_priv(mmc);
1218 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1219 * register when CQE is enabled and unhalted.
1220 * CQHCI driver enables CQE prior to activation, so disable CQE before
1221 * programming block size in sdhci controller and enable it back.
1223 if (!cq_host->activated) {
1224 val = cqhci_readl(cq_host, CQHCI_CFG);
1225 if (val & CQHCI_ENABLE)
1226 cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1228 sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1229 sdhci_cqe_enable(mmc);
1230 if (val & CQHCI_ENABLE)
1231 cqhci_writel(cq_host, val, CQHCI_CFG);
1235 * CMD CRC errors are seen sometimes with some eMMC devices when status
1236 * command is sent during transfer of last data block which is the
1237 * default case as send status command block counter (CBC) is 1.
1238 * Recommended fix to set CBC to 0 allowing send status command only
1239 * when data lines are idle.
1241 val = cqhci_readl(cq_host, CQHCI_SSC1);
1242 val &= ~CQHCI_SSC1_CBC_MASK;
1243 cqhci_writel(cq_host, val, CQHCI_SSC1);
1246 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1248 sdhci_dumpregs(mmc_priv(mmc));
1251 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1256 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1259 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1264 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1265 struct mmc_command *cmd)
1270 * HW busy detection timeout is based on programmed data timeout
1271 * counter and maximum supported timeout is 11s which may not be
1272 * enough for long operations like cache flush, sleep awake, erase.
1274 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1275 * host controller to wait for busy state until the card is busy
1276 * without HW timeout.
1278 * So, use infinite busy wait mode for operations that may take
1279 * more than maximum HW busy timeout of 11s otherwise use finite
1282 val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1283 if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1284 val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1286 val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1287 sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1289 __sdhci_set_timeout(host, cmd);
1292 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1294 struct cqhci_host *cq_host = mmc->cqe_private;
1297 reg = cqhci_readl(cq_host, CQHCI_CFG);
1298 reg |= CQHCI_ENABLE;
1299 cqhci_writel(cq_host, reg, CQHCI_CFG);
1302 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1304 struct cqhci_host *cq_host = mmc->cqe_private;
1305 struct sdhci_host *host = mmc_priv(mmc);
1308 reg = cqhci_readl(cq_host, CQHCI_CFG);
1309 reg &= ~CQHCI_ENABLE;
1310 cqhci_writel(cq_host, reg, CQHCI_CFG);
1311 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1314 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1315 .write_l = tegra_cqhci_writel,
1316 .enable = sdhci_tegra_cqe_enable,
1317 .disable = sdhci_cqe_disable,
1318 .dumpregs = sdhci_tegra_dumpregs,
1319 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1320 .pre_enable = sdhci_tegra_cqe_pre_enable,
1321 .post_disable = sdhci_tegra_cqe_post_disable,
1324 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1326 struct sdhci_pltfm_host *platform = sdhci_priv(host);
1327 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1328 const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1329 struct device *dev = mmc_dev(host->mmc);
1332 return dma_set_mask_and_coherent(dev, soc->dma_mask);
1337 static const struct sdhci_ops tegra_sdhci_ops = {
1338 .get_ro = tegra_sdhci_get_ro,
1339 .read_w = tegra_sdhci_readw,
1340 .write_l = tegra_sdhci_writel,
1341 .set_clock = tegra_sdhci_set_clock,
1342 .set_dma_mask = tegra_sdhci_set_dma_mask,
1343 .set_bus_width = sdhci_set_bus_width,
1344 .reset = tegra_sdhci_reset,
1345 .platform_execute_tuning = tegra_sdhci_execute_tuning,
1346 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1347 .voltage_switch = tegra_sdhci_voltage_switch,
1348 .get_max_clock = tegra_sdhci_get_max_clock,
1351 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1352 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1353 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1354 SDHCI_QUIRK_NO_HISPD_BIT |
1355 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1356 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1357 .ops = &tegra_sdhci_ops,
1360 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1361 .pdata = &sdhci_tegra20_pdata,
1362 .dma_mask = DMA_BIT_MASK(32),
1363 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1364 NVQUIRK_ENABLE_BLOCK_GAP_DET,
1367 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1368 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1369 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1370 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1371 SDHCI_QUIRK_NO_HISPD_BIT |
1372 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1373 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1374 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1375 SDHCI_QUIRK2_BROKEN_HS200 |
1377 * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1378 * though no command operation was in progress."
1380 * The exact reason is unknown, as the same hardware seems
1381 * to support Auto CMD23 on a downstream 3.1 kernel.
1383 SDHCI_QUIRK2_ACMD23_BROKEN,
1384 .ops = &tegra_sdhci_ops,
1387 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1388 .pdata = &sdhci_tegra30_pdata,
1389 .dma_mask = DMA_BIT_MASK(32),
1390 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1391 NVQUIRK_ENABLE_SDR50 |
1392 NVQUIRK_ENABLE_SDR104 |
1393 NVQUIRK_HAS_PADCALIB,
1396 static const struct sdhci_ops tegra114_sdhci_ops = {
1397 .get_ro = tegra_sdhci_get_ro,
1398 .read_w = tegra_sdhci_readw,
1399 .write_w = tegra_sdhci_writew,
1400 .write_l = tegra_sdhci_writel,
1401 .set_clock = tegra_sdhci_set_clock,
1402 .set_dma_mask = tegra_sdhci_set_dma_mask,
1403 .set_bus_width = sdhci_set_bus_width,
1404 .reset = tegra_sdhci_reset,
1405 .platform_execute_tuning = tegra_sdhci_execute_tuning,
1406 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1407 .voltage_switch = tegra_sdhci_voltage_switch,
1408 .get_max_clock = tegra_sdhci_get_max_clock,
1411 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1412 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1413 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1414 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1415 SDHCI_QUIRK_NO_HISPD_BIT |
1416 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1417 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1418 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1419 .ops = &tegra114_sdhci_ops,
1422 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1423 .pdata = &sdhci_tegra114_pdata,
1424 .dma_mask = DMA_BIT_MASK(32),
1427 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1428 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1429 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1430 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1431 SDHCI_QUIRK_NO_HISPD_BIT |
1432 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1434 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1435 .ops = &tegra114_sdhci_ops,
1438 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1439 .pdata = &sdhci_tegra124_pdata,
1440 .dma_mask = DMA_BIT_MASK(34),
1443 static const struct sdhci_ops tegra210_sdhci_ops = {
1444 .get_ro = tegra_sdhci_get_ro,
1445 .read_w = tegra_sdhci_readw,
1446 .write_w = tegra210_sdhci_writew,
1447 .write_l = tegra_sdhci_writel,
1448 .set_clock = tegra_sdhci_set_clock,
1449 .set_dma_mask = tegra_sdhci_set_dma_mask,
1450 .set_bus_width = sdhci_set_bus_width,
1451 .reset = tegra_sdhci_reset,
1452 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1453 .voltage_switch = tegra_sdhci_voltage_switch,
1454 .get_max_clock = tegra_sdhci_get_max_clock,
1455 .set_timeout = tegra_sdhci_set_timeout,
1458 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1459 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1460 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1461 SDHCI_QUIRK_NO_HISPD_BIT |
1462 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1463 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1464 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1465 .ops = &tegra210_sdhci_ops,
1468 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1469 .pdata = &sdhci_tegra210_pdata,
1470 .dma_mask = DMA_BIT_MASK(34),
1471 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1472 NVQUIRK_HAS_PADCALIB |
1473 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1474 NVQUIRK_ENABLE_SDR50 |
1475 NVQUIRK_ENABLE_SDR104 |
1477 .min_tap_delay = 106,
1478 .max_tap_delay = 185,
1481 static const struct sdhci_ops tegra186_sdhci_ops = {
1482 .get_ro = tegra_sdhci_get_ro,
1483 .read_w = tegra_sdhci_readw,
1484 .write_l = tegra_sdhci_writel,
1485 .set_clock = tegra_sdhci_set_clock,
1486 .set_dma_mask = tegra_sdhci_set_dma_mask,
1487 .set_bus_width = sdhci_set_bus_width,
1488 .reset = tegra_sdhci_reset,
1489 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1490 .voltage_switch = tegra_sdhci_voltage_switch,
1491 .get_max_clock = tegra_sdhci_get_max_clock,
1492 .irq = sdhci_tegra_cqhci_irq,
1493 .set_timeout = tegra_sdhci_set_timeout,
1496 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1497 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1498 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1499 SDHCI_QUIRK_NO_HISPD_BIT |
1500 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1501 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1502 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1503 .ops = &tegra186_sdhci_ops,
1506 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1507 .pdata = &sdhci_tegra186_pdata,
1508 .dma_mask = DMA_BIT_MASK(40),
1509 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1510 NVQUIRK_HAS_PADCALIB |
1511 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1512 NVQUIRK_ENABLE_SDR50 |
1513 NVQUIRK_ENABLE_SDR104 |
1515 NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1516 .min_tap_delay = 84,
1517 .max_tap_delay = 136,
1520 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1521 .pdata = &sdhci_tegra186_pdata,
1522 .dma_mask = DMA_BIT_MASK(39),
1523 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1524 NVQUIRK_HAS_PADCALIB |
1525 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1526 NVQUIRK_ENABLE_SDR50 |
1527 NVQUIRK_ENABLE_SDR104 |
1529 .min_tap_delay = 96,
1530 .max_tap_delay = 139,
1533 static const struct of_device_id sdhci_tegra_dt_match[] = {
1534 { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1535 { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1536 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1537 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1538 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1539 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1540 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1543 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1545 static int sdhci_tegra_add_host(struct sdhci_host *host)
1547 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1548 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1549 struct cqhci_host *cq_host;
1553 if (!tegra_host->enable_hwcq)
1554 return sdhci_add_host(host);
1556 sdhci_enable_v4_mode(host);
1558 ret = sdhci_setup_host(host);
1562 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1564 cq_host = devm_kzalloc(mmc_dev(host->mmc),
1565 sizeof(*cq_host), GFP_KERNEL);
1571 cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1572 cq_host->ops = &sdhci_tegra_cqhci_ops;
1574 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1576 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1578 ret = cqhci_init(cq_host, host->mmc, dma64);
1582 ret = __sdhci_add_host(host);
1589 sdhci_cleanup_host(host);
1593 static int sdhci_tegra_probe(struct platform_device *pdev)
1595 const struct of_device_id *match;
1596 const struct sdhci_tegra_soc_data *soc_data;
1597 struct sdhci_host *host;
1598 struct sdhci_pltfm_host *pltfm_host;
1599 struct sdhci_tegra *tegra_host;
1603 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1606 soc_data = match->data;
1608 host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1610 return PTR_ERR(host);
1611 pltfm_host = sdhci_priv(host);
1613 tegra_host = sdhci_pltfm_priv(pltfm_host);
1614 tegra_host->ddr_signaling = false;
1615 tegra_host->pad_calib_required = false;
1616 tegra_host->pad_control_available = false;
1617 tegra_host->soc_data = soc_data;
1619 if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1620 rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1622 host->mmc_host_ops.start_signal_voltage_switch =
1623 sdhci_tegra_start_signal_voltage_switch;
1626 /* Hook to periodically rerun pad calibration */
1627 if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1628 host->mmc_host_ops.request = tegra_sdhci_request;
1630 host->mmc_host_ops.hs400_enhanced_strobe =
1631 tegra_sdhci_hs400_enhanced_strobe;
1633 if (!host->ops->platform_execute_tuning)
1634 host->mmc_host_ops.execute_tuning =
1635 tegra_sdhci_execute_hw_tuning;
1637 rc = mmc_of_parse(host->mmc);
1641 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1642 host->mmc->caps |= MMC_CAP_1_8V_DDR;
1644 /* HW busy detection is supported, but R1B responses are required. */
1645 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1647 tegra_sdhci_parse_dt(host);
1649 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1651 if (IS_ERR(tegra_host->power_gpio)) {
1652 rc = PTR_ERR(tegra_host->power_gpio);
1657 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1658 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1659 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1660 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1662 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1663 * 12Mhz TMCLK which is advertised in host capability register.
1664 * With TMCLK of 12Mhz provides maximum data timeout period that can
1665 * be achieved is 11s better than using SDCLK for data timeout.
1667 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1668 * supporting separate TMCLK.
1671 if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1672 clk = devm_clk_get(&pdev->dev, "tmclk");
1675 if (rc == -EPROBE_DEFER)
1678 dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1682 clk_set_rate(clk, 12000000);
1683 rc = clk_prepare_enable(clk);
1686 "failed to enable tmclk: %d\n", rc);
1690 tegra_host->tmclk = clk;
1693 clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1695 rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1696 "failed to get clock\n");
1699 clk_prepare_enable(clk);
1700 pltfm_host->clk = clk;
1702 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1704 if (IS_ERR(tegra_host->rst)) {
1705 rc = PTR_ERR(tegra_host->rst);
1706 dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1710 rc = reset_control_assert(tegra_host->rst);
1714 usleep_range(2000, 4000);
1716 rc = reset_control_deassert(tegra_host->rst);
1720 usleep_range(2000, 4000);
1722 rc = sdhci_tegra_add_host(host);
1729 reset_control_assert(tegra_host->rst);
1731 clk_disable_unprepare(pltfm_host->clk);
1733 clk_disable_unprepare(tegra_host->tmclk);
1736 sdhci_pltfm_free(pdev);
1740 static int sdhci_tegra_remove(struct platform_device *pdev)
1742 struct sdhci_host *host = platform_get_drvdata(pdev);
1743 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1744 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1746 sdhci_remove_host(host, 0);
1748 reset_control_assert(tegra_host->rst);
1749 usleep_range(2000, 4000);
1750 clk_disable_unprepare(pltfm_host->clk);
1751 clk_disable_unprepare(tegra_host->tmclk);
1753 sdhci_pltfm_free(pdev);
1758 #ifdef CONFIG_PM_SLEEP
1759 static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1761 struct sdhci_host *host = dev_get_drvdata(dev);
1762 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1765 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1766 ret = cqhci_suspend(host->mmc);
1771 ret = sdhci_suspend_host(host);
1773 cqhci_resume(host->mmc);
1777 clk_disable_unprepare(pltfm_host->clk);
1781 static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1783 struct sdhci_host *host = dev_get_drvdata(dev);
1784 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1787 ret = clk_prepare_enable(pltfm_host->clk);
1791 ret = sdhci_resume_host(host);
1795 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1796 ret = cqhci_resume(host->mmc);
1804 sdhci_suspend_host(host);
1806 clk_disable_unprepare(pltfm_host->clk);
1811 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1812 sdhci_tegra_resume);
1814 static struct platform_driver sdhci_tegra_driver = {
1816 .name = "sdhci-tegra",
1817 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1818 .of_match_table = sdhci_tegra_dt_match,
1819 .pm = &sdhci_tegra_dev_pm_ops,
1821 .probe = sdhci_tegra_probe,
1822 .remove = sdhci_tegra_remove,
1825 module_platform_driver(sdhci_tegra_driver);
1827 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1828 MODULE_AUTHOR("Google, Inc.");
1829 MODULE_LICENSE("GPL v2");