1 /* Driver for Realtek PCI-Express card reader
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/idr.h>
30 #include <linux/platform_device.h>
31 #include <linux/mfd/core.h>
32 #include <linux/mfd/rtsx_pci.h>
33 #include <linux/mmc/card.h>
34 #include <asm/unaligned.h>
38 static bool msi_en = true;
39 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(msi_en, "Enable MSI");
42 static DEFINE_IDR(rtsx_pci_idr);
43 static DEFINE_SPINLOCK(rtsx_pci_lock);
45 static struct mfd_cell rtsx_pcr_cells[] = {
47 .name = DRV_NAME_RTSX_PCI_SDMMC,
50 .name = DRV_NAME_RTSX_PCI_MS,
54 static const struct pci_device_id rtsx_pci_ids[] = {
55 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
60 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
61 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
63 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
64 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
68 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
70 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
72 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
76 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
78 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
82 int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
84 rtsx_pci_write_register(pcr, MSGTXDATA0,
85 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
86 rtsx_pci_write_register(pcr, MSGTXDATA1,
87 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
88 rtsx_pci_write_register(pcr, MSGTXDATA2,
89 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
90 rtsx_pci_write_register(pcr, MSGTXDATA3,
91 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
92 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
93 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
98 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
100 if (pcr->ops->set_ltr_latency)
101 return pcr->ops->set_ltr_latency(pcr, latency);
103 return rtsx_comm_set_ltr_latency(pcr, latency);
106 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
108 struct rtsx_cr_option *option = &pcr->option;
110 if (pcr->aspm_enabled == enable)
113 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
115 rtsx_pci_enable_aspm(pcr);
117 rtsx_pci_disable_aspm(pcr);
118 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
119 u8 mask = FORCE_ASPM_VAL_MASK;
124 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
127 pcr->aspm_enabled = enable;
130 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
132 if (pcr->ops->set_aspm)
133 pcr->ops->set_aspm(pcr, false);
135 rtsx_comm_set_aspm(pcr, false);
138 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
140 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
145 void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
147 if (pcr->ops->set_l1off_cfg_sub_d0)
148 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
151 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
153 struct rtsx_cr_option *option = &pcr->option;
155 rtsx_disable_aspm(pcr);
157 if (option->ltr_enabled)
158 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
160 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
161 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
164 void rtsx_pm_full_on(struct rtsx_pcr *pcr)
166 if (pcr->ops->full_on)
167 pcr->ops->full_on(pcr);
169 rtsx_comm_pm_full_on(pcr);
172 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
174 /* If pci device removed, don't queue idle work any more */
178 if (pcr->state != PDEV_STAT_RUN) {
179 pcr->state = PDEV_STAT_RUN;
180 if (pcr->ops->enable_auto_blink)
181 pcr->ops->enable_auto_blink(pcr);
182 rtsx_pm_full_on(pcr);
185 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
187 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
189 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
192 u32 val = HAIMR_WRITE_START;
194 val |= (u32)(addr & 0x3FFF) << 16;
195 val |= (u32)mask << 8;
198 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
200 for (i = 0; i < MAX_RW_REG_CNT; i++) {
201 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
202 if ((val & HAIMR_TRANS_END) == 0) {
211 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
213 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
215 u32 val = HAIMR_READ_START;
218 val |= (u32)(addr & 0x3FFF) << 16;
219 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
221 for (i = 0; i < MAX_RW_REG_CNT; i++) {
222 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
223 if ((val & HAIMR_TRANS_END) == 0)
227 if (i >= MAX_RW_REG_CNT)
231 *data = (u8)(val & 0xFF);
235 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
237 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
239 int err, i, finished = 0;
242 rtsx_pci_init_cmd(pcr);
244 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
245 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
246 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
247 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
249 err = rtsx_pci_send_cmd(pcr, 100);
253 for (i = 0; i < 100000; i++) {
254 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
270 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
272 if (pcr->ops->write_phy)
273 return pcr->ops->write_phy(pcr, addr, val);
275 return __rtsx_pci_write_phy_register(pcr, addr, val);
277 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
279 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
281 int err, i, finished = 0;
285 rtsx_pci_init_cmd(pcr);
287 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
290 err = rtsx_pci_send_cmd(pcr, 100);
294 for (i = 0; i < 100000; i++) {
295 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
308 rtsx_pci_init_cmd(pcr);
310 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
311 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
313 err = rtsx_pci_send_cmd(pcr, 100);
317 ptr = rtsx_pci_get_cmd_data(pcr);
318 data = ((u16)ptr[1] << 8) | ptr[0];
326 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
328 if (pcr->ops->read_phy)
329 return pcr->ops->read_phy(pcr, addr, val);
331 return __rtsx_pci_read_phy_register(pcr, addr, val);
333 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
335 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
337 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
338 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
340 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
341 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
343 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
345 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
346 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
350 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
352 val |= (u32)(cmd_type & 0x03) << 30;
353 val |= (u32)(reg_addr & 0x3FFF) << 16;
354 val |= (u32)mask << 8;
357 spin_lock_irqsave(&pcr->lock, flags);
359 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
360 put_unaligned_le32(val, ptr);
364 spin_unlock_irqrestore(&pcr->lock, flags);
366 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
368 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
372 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
374 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
375 /* Hardware Auto Response */
377 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
379 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
381 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
383 struct completion trans_done;
389 spin_lock_irqsave(&pcr->lock, flags);
391 /* set up data structures for the wakeup system */
392 pcr->done = &trans_done;
393 pcr->trans_result = TRANS_NOT_READY;
394 init_completion(&trans_done);
396 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
398 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
399 /* Hardware Auto Response */
401 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
403 spin_unlock_irqrestore(&pcr->lock, flags);
405 /* Wait for TRANS_OK_INT */
406 timeleft = wait_for_completion_interruptible_timeout(
407 &trans_done, msecs_to_jiffies(timeout));
409 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
411 goto finish_send_cmd;
414 spin_lock_irqsave(&pcr->lock, flags);
415 if (pcr->trans_result == TRANS_RESULT_FAIL)
417 else if (pcr->trans_result == TRANS_RESULT_OK)
419 else if (pcr->trans_result == TRANS_NO_DEVICE)
421 spin_unlock_irqrestore(&pcr->lock, flags);
424 spin_lock_irqsave(&pcr->lock, flags);
426 spin_unlock_irqrestore(&pcr->lock, flags);
428 if ((err < 0) && (err != -ENODEV))
429 rtsx_pci_stop_cmd(pcr);
432 complete(pcr->finish_me);
436 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
438 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
439 dma_addr_t addr, unsigned int len, int end)
441 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
443 u8 option = SG_VALID | SG_TRANS_DATA;
445 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
449 val = ((u64)addr << 32) | ((u64)len << 12) | option;
451 put_unaligned_le64(val, ptr);
455 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
456 int num_sg, bool read, int timeout)
460 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
461 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
464 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
466 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
468 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
472 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
474 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
475 int num_sg, bool read)
477 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
482 if ((sglist == NULL) || (num_sg <= 0))
485 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
487 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
489 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
490 int num_sg, bool read)
492 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
494 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
496 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
498 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
499 int count, bool read, int timeout)
501 struct completion trans_done;
502 struct scatterlist *sg;
509 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
514 if ((sglist == NULL) || (count < 1))
517 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
519 for_each_sg(sglist, sg, count, i) {
520 addr = sg_dma_address(sg);
521 len = sg_dma_len(sg);
522 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
525 spin_lock_irqsave(&pcr->lock, flags);
527 pcr->done = &trans_done;
528 pcr->trans_result = TRANS_NOT_READY;
529 init_completion(&trans_done);
530 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
531 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
533 spin_unlock_irqrestore(&pcr->lock, flags);
535 timeleft = wait_for_completion_interruptible_timeout(
536 &trans_done, msecs_to_jiffies(timeout));
538 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
543 spin_lock_irqsave(&pcr->lock, flags);
544 if (pcr->trans_result == TRANS_RESULT_FAIL) {
546 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
547 pcr->dma_error_count++;
550 else if (pcr->trans_result == TRANS_NO_DEVICE)
552 spin_unlock_irqrestore(&pcr->lock, flags);
555 spin_lock_irqsave(&pcr->lock, flags);
557 spin_unlock_irqrestore(&pcr->lock, flags);
559 if ((err < 0) && (err != -ENODEV))
560 rtsx_pci_stop_cmd(pcr);
563 complete(pcr->finish_me);
567 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
569 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
581 for (i = 0; i < buf_len / 256; i++) {
582 rtsx_pci_init_cmd(pcr);
584 for (j = 0; j < 256; j++)
585 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
587 err = rtsx_pci_send_cmd(pcr, 250);
591 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
596 rtsx_pci_init_cmd(pcr);
598 for (j = 0; j < buf_len % 256; j++)
599 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
601 err = rtsx_pci_send_cmd(pcr, 250);
606 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
610 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
612 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
624 for (i = 0; i < buf_len / 256; i++) {
625 rtsx_pci_init_cmd(pcr);
627 for (j = 0; j < 256; j++) {
628 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
633 err = rtsx_pci_send_cmd(pcr, 250);
639 rtsx_pci_init_cmd(pcr);
641 for (j = 0; j < buf_len % 256; j++) {
642 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
647 err = rtsx_pci_send_cmd(pcr, 250);
654 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
656 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
658 rtsx_pci_init_cmd(pcr);
660 while (*tbl & 0xFFFF0000) {
661 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
662 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
666 return rtsx_pci_send_cmd(pcr, 100);
669 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
673 if (card == RTSX_SD_CARD)
674 tbl = pcr->sd_pull_ctl_enable_tbl;
675 else if (card == RTSX_MS_CARD)
676 tbl = pcr->ms_pull_ctl_enable_tbl;
680 return rtsx_pci_set_pull_ctl(pcr, tbl);
682 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
684 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
688 if (card == RTSX_SD_CARD)
689 tbl = pcr->sd_pull_ctl_disable_tbl;
690 else if (card == RTSX_MS_CARD)
691 tbl = pcr->ms_pull_ctl_disable_tbl;
696 return rtsx_pci_set_pull_ctl(pcr, tbl);
698 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
700 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
702 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
704 if (pcr->num_slots > 1)
705 pcr->bier |= MS_INT_EN;
707 /* Enable Bus Interrupt */
708 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
710 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
713 static inline u8 double_ssc_depth(u8 depth)
715 return ((depth > 1) ? (depth - 1) : depth);
718 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
720 if (div > CLK_DIV_1) {
721 if (ssc_depth > (div - 1))
722 ssc_depth -= (div - 1);
724 ssc_depth = SSC_DEPTH_4M;
730 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
731 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
734 u8 n, clk_divider, mcu_cnt, div;
735 static const u8 depth[] = {
736 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
737 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
738 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
739 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
740 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
744 /* We use 250k(around) here, in initial stage */
745 clk_divider = SD_CLK_DIVIDE_128;
746 card_clock = 30000000;
748 clk_divider = SD_CLK_DIVIDE_0;
750 err = rtsx_pci_write_register(pcr, SD_CFG1,
751 SD_CLK_DIVIDE_MASK, clk_divider);
755 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
756 if (card_clock == UHS_SDR104_MAX_DTR &&
757 pcr->dma_error_count &&
758 PCI_PID(pcr) == RTS5227_DEVICE_ID)
759 card_clock = UHS_SDR104_MAX_DTR -
760 (pcr->dma_error_count * 20000000);
762 card_clock /= 1000000;
763 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
766 if (!initial_mode && double_clk)
767 clk = card_clock * 2;
768 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
769 clk, pcr->cur_clock);
771 if (clk == pcr->cur_clock)
774 if (pcr->ops->conv_clk_and_div_n)
775 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
778 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
781 mcu_cnt = (u8)(125/clk + 3);
785 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
787 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
788 if (pcr->ops->conv_clk_and_div_n) {
789 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
791 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
798 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
800 ssc_depth = depth[ssc_depth];
802 ssc_depth = double_ssc_depth(ssc_depth);
804 ssc_depth = revise_ssc_depth(ssc_depth, div);
805 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
807 rtsx_pci_init_cmd(pcr);
808 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
809 CLK_LOW_FREQ, CLK_LOW_FREQ);
810 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
811 0xFF, (div << 4) | mcu_cnt);
812 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
813 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
814 SSC_DEPTH_MASK, ssc_depth);
815 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
818 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
820 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
821 PHASE_NOT_RESET, PHASE_NOT_RESET);
824 err = rtsx_pci_send_cmd(pcr, 2000);
828 /* Wait SSC clock stable */
830 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
834 pcr->cur_clock = clk;
837 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
839 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
841 if (pcr->ops->card_power_on)
842 return pcr->ops->card_power_on(pcr, card);
846 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
848 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
850 if (pcr->ops->card_power_off)
851 return pcr->ops->card_power_off(pcr, card);
855 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
857 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
859 static const unsigned int cd_mask[] = {
860 [RTSX_SD_CARD] = SD_EXIST,
861 [RTSX_MS_CARD] = MS_EXIST
864 if (!(pcr->flags & PCR_MS_PMOS)) {
865 /* When using single PMOS, accessing card is not permitted
866 * if the existing card is not the designated one.
868 if (pcr->card_exist & (~cd_mask[card]))
874 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
876 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
878 if (pcr->ops->switch_output_voltage)
879 return pcr->ops->switch_output_voltage(pcr, voltage);
883 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
885 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
889 val = rtsx_pci_readl(pcr, RTSX_BIPR);
890 if (pcr->ops->cd_deglitch)
891 val = pcr->ops->cd_deglitch(pcr);
895 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
897 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
899 struct completion finish;
901 pcr->finish_me = &finish;
902 init_completion(&finish);
907 if (!pcr->remove_pci)
908 rtsx_pci_stop_cmd(pcr);
910 wait_for_completion_interruptible_timeout(&finish,
911 msecs_to_jiffies(2));
912 pcr->finish_me = NULL;
914 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
916 static void rtsx_pci_card_detect(struct work_struct *work)
918 struct delayed_work *dwork;
919 struct rtsx_pcr *pcr;
921 unsigned int card_detect = 0, card_inserted, card_removed;
924 dwork = to_delayed_work(work);
925 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
927 pcr_dbg(pcr, "--> %s\n", __func__);
929 mutex_lock(&pcr->pcr_mutex);
930 spin_lock_irqsave(&pcr->lock, flags);
932 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
933 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
935 irq_status &= CARD_EXIST;
936 card_inserted = pcr->card_inserted & irq_status;
937 card_removed = pcr->card_removed;
938 pcr->card_inserted = 0;
939 pcr->card_removed = 0;
941 spin_unlock_irqrestore(&pcr->lock, flags);
943 if (card_inserted || card_removed) {
944 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
945 card_inserted, card_removed);
947 if (pcr->ops->cd_deglitch)
948 card_inserted = pcr->ops->cd_deglitch(pcr);
950 card_detect = card_inserted | card_removed;
952 pcr->card_exist |= card_inserted;
953 pcr->card_exist &= ~card_removed;
956 mutex_unlock(&pcr->pcr_mutex);
958 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
959 pcr->slots[RTSX_SD_CARD].card_event(
960 pcr->slots[RTSX_SD_CARD].p_dev);
961 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
962 pcr->slots[RTSX_MS_CARD].card_event(
963 pcr->slots[RTSX_MS_CARD].p_dev);
966 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
968 struct rtsx_pcr *pcr = dev_id;
974 spin_lock(&pcr->lock);
976 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
977 /* Clear interrupt flag */
978 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
979 if ((int_reg & pcr->bier) == 0) {
980 spin_unlock(&pcr->lock);
983 if (int_reg == 0xFFFFFFFF) {
984 spin_unlock(&pcr->lock);
988 int_reg &= (pcr->bier | 0x7FFFFF);
990 if (int_reg & SD_INT) {
991 if (int_reg & SD_EXIST) {
992 pcr->card_inserted |= SD_EXIST;
994 pcr->card_removed |= SD_EXIST;
995 pcr->card_inserted &= ~SD_EXIST;
997 pcr->dma_error_count = 0;
1000 if (int_reg & MS_INT) {
1001 if (int_reg & MS_EXIST) {
1002 pcr->card_inserted |= MS_EXIST;
1004 pcr->card_removed |= MS_EXIST;
1005 pcr->card_inserted &= ~MS_EXIST;
1009 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1010 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1011 pcr->trans_result = TRANS_RESULT_FAIL;
1013 complete(pcr->done);
1014 } else if (int_reg & TRANS_OK_INT) {
1015 pcr->trans_result = TRANS_RESULT_OK;
1017 complete(pcr->done);
1021 if (pcr->card_inserted || pcr->card_removed)
1022 schedule_delayed_work(&pcr->carddet_work,
1023 msecs_to_jiffies(200));
1025 spin_unlock(&pcr->lock);
1029 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1031 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1032 __func__, pcr->msi_en, pcr->pci->irq);
1034 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1035 pcr->msi_en ? 0 : IRQF_SHARED,
1036 DRV_NAME_RTSX_PCI, pcr)) {
1037 dev_err(&(pcr->pci->dev),
1038 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1043 pcr->irq = pcr->pci->irq;
1044 pci_intx(pcr->pci, !pcr->msi_en);
1049 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1051 if (pcr->ops->set_aspm)
1052 pcr->ops->set_aspm(pcr, true);
1054 rtsx_comm_set_aspm(pcr, true);
1057 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1059 struct rtsx_cr_option *option = &pcr->option;
1061 if (option->ltr_enabled) {
1062 u32 latency = option->ltr_l1off_latency;
1064 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1065 mdelay(option->l1_snooze_delay);
1067 rtsx_set_ltr_latency(pcr, latency);
1070 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1071 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1073 rtsx_enable_aspm(pcr);
1076 void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1078 if (pcr->ops->power_saving)
1079 pcr->ops->power_saving(pcr);
1081 rtsx_comm_pm_power_saving(pcr);
1084 static void rtsx_pci_idle_work(struct work_struct *work)
1086 struct delayed_work *dwork = to_delayed_work(work);
1087 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1089 pcr_dbg(pcr, "--> %s\n", __func__);
1091 mutex_lock(&pcr->pcr_mutex);
1093 pcr->state = PDEV_STAT_IDLE;
1095 if (pcr->ops->disable_auto_blink)
1096 pcr->ops->disable_auto_blink(pcr);
1097 if (pcr->ops->turn_off_led)
1098 pcr->ops->turn_off_led(pcr);
1100 rtsx_pm_power_saving(pcr);
1102 mutex_unlock(&pcr->pcr_mutex);
1106 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1108 if (pcr->ops->turn_off_led)
1109 pcr->ops->turn_off_led(pcr);
1111 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1114 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1115 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1117 if (pcr->ops->force_power_down)
1118 pcr->ops->force_power_down(pcr, pm_state);
1122 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1126 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1127 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1129 rtsx_pci_enable_bus_int(pcr);
1132 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1136 /* Wait SSC power stable */
1139 rtsx_pci_disable_aspm(pcr);
1140 if (pcr->ops->optimize_phy) {
1141 err = pcr->ops->optimize_phy(pcr);
1146 rtsx_pci_init_cmd(pcr);
1148 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1149 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1151 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1152 /* Disable card clock */
1153 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1154 /* Reset delink mode */
1155 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1156 /* Card driving select */
1157 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1158 0xFF, pcr->card_drive_sel);
1159 /* Enable SSC Clock */
1160 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1161 0xFF, SSC_8X_EN | SSC_SEL_4M);
1162 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1163 /* Disable cd_pwr_save */
1164 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1165 /* Clear Link Ready Interrupt */
1166 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1167 LINK_RDY_INT, LINK_RDY_INT);
1168 /* Enlarge the estimation window of PERST# glitch
1169 * to reduce the chance of invalid card interrupt
1171 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1172 /* Update RC oscillator to 400k
1173 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1176 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1177 /* Set interrupt write clear
1178 * bit 1: U_elbi_if_rd_clr_en
1179 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1180 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1182 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1184 err = rtsx_pci_send_cmd(pcr, 100);
1188 switch (PCI_PID(pcr)) {
1192 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1198 /* Enable clk_request_n to enable clock power management */
1199 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1200 /* Enter L1 when host tx idle */
1201 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1203 if (pcr->ops->extra_init_hw) {
1204 err = pcr->ops->extra_init_hw(pcr);
1209 /* No CD interrupt if probing driver with card inserted.
1210 * So we need to initialize pcr->card_exist here.
1212 if (pcr->ops->cd_deglitch)
1213 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1215 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1220 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1224 spin_lock_init(&pcr->lock);
1225 mutex_init(&pcr->pcr_mutex);
1227 switch (PCI_PID(pcr)) {
1230 rts5209_init_params(pcr);
1234 rts5229_init_params(pcr);
1238 rtl8411_init_params(pcr);
1242 rts5227_init_params(pcr);
1246 rts522a_init_params(pcr);
1250 rts5249_init_params(pcr);
1254 rts524a_init_params(pcr);
1258 rts525a_init_params(pcr);
1262 rtl8411b_init_params(pcr);
1266 rtl8402_init_params(pcr);
1270 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1271 PCI_PID(pcr), pcr->ic_version);
1273 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1278 if (pcr->ops->fetch_vendor_settings)
1279 pcr->ops->fetch_vendor_settings(pcr);
1281 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1282 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1283 pcr->sd30_drive_sel_1v8);
1284 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1285 pcr->sd30_drive_sel_3v3);
1286 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1287 pcr->card_drive_sel);
1288 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1290 pcr->state = PDEV_STAT_IDLE;
1291 err = rtsx_pci_init_hw(pcr);
1300 static int rtsx_pci_probe(struct pci_dev *pcidev,
1301 const struct pci_device_id *id)
1303 struct rtsx_pcr *pcr;
1304 struct pcr_handle *handle;
1306 int ret, i, bar = 0;
1308 dev_dbg(&(pcidev->dev),
1309 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1310 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1311 (int)pcidev->revision);
1313 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1317 ret = pci_enable_device(pcidev);
1321 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1325 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1331 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1338 idr_preload(GFP_KERNEL);
1339 spin_lock(&rtsx_pci_lock);
1340 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1343 spin_unlock(&rtsx_pci_lock);
1349 dev_set_drvdata(&pcidev->dev, handle);
1351 if (CHK_PCI_PID(pcr, 0x525A))
1353 len = pci_resource_len(pcidev, bar);
1354 base = pci_resource_start(pcidev, bar);
1355 pcr->remap_addr = ioremap_nocache(base, len);
1356 if (!pcr->remap_addr) {
1361 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1362 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1364 if (pcr->rtsx_resv_buf == NULL) {
1368 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1369 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1370 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1371 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1373 pcr->card_inserted = 0;
1374 pcr->card_removed = 0;
1375 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1376 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1378 pcr->msi_en = msi_en;
1380 ret = pci_enable_msi(pcidev);
1382 pcr->msi_en = false;
1385 ret = rtsx_pci_acquire_irq(pcr);
1389 pci_set_master(pcidev);
1390 synchronize_irq(pcr->irq);
1392 ret = rtsx_pci_init_chip(pcr);
1396 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1397 rtsx_pcr_cells[i].platform_data = handle;
1398 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1400 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1401 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1405 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1410 free_irq(pcr->irq, (void *)pcr);
1413 pci_disable_msi(pcr->pci);
1414 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1415 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1417 iounmap(pcr->remap_addr);
1423 pci_release_regions(pcidev);
1425 pci_disable_device(pcidev);
1430 static void rtsx_pci_remove(struct pci_dev *pcidev)
1432 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1433 struct rtsx_pcr *pcr = handle->pcr;
1435 pcr->remove_pci = true;
1437 /* Disable interrupts at the pcr level */
1438 spin_lock_irq(&pcr->lock);
1439 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1441 spin_unlock_irq(&pcr->lock);
1443 cancel_delayed_work_sync(&pcr->carddet_work);
1444 cancel_delayed_work_sync(&pcr->idle_work);
1446 mfd_remove_devices(&pcidev->dev);
1448 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1449 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1450 free_irq(pcr->irq, (void *)pcr);
1452 pci_disable_msi(pcr->pci);
1453 iounmap(pcr->remap_addr);
1455 pci_release_regions(pcidev);
1456 pci_disable_device(pcidev);
1458 spin_lock(&rtsx_pci_lock);
1459 idr_remove(&rtsx_pci_idr, pcr->id);
1460 spin_unlock(&rtsx_pci_lock);
1466 dev_dbg(&(pcidev->dev),
1467 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1468 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1473 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1475 struct pcr_handle *handle;
1476 struct rtsx_pcr *pcr;
1478 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1480 handle = pci_get_drvdata(pcidev);
1483 cancel_delayed_work(&pcr->carddet_work);
1484 cancel_delayed_work(&pcr->idle_work);
1486 mutex_lock(&pcr->pcr_mutex);
1488 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1490 pci_save_state(pcidev);
1491 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1492 pci_disable_device(pcidev);
1493 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1495 mutex_unlock(&pcr->pcr_mutex);
1499 static int rtsx_pci_resume(struct pci_dev *pcidev)
1501 struct pcr_handle *handle;
1502 struct rtsx_pcr *pcr;
1505 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1507 handle = pci_get_drvdata(pcidev);
1510 mutex_lock(&pcr->pcr_mutex);
1512 pci_set_power_state(pcidev, PCI_D0);
1513 pci_restore_state(pcidev);
1514 ret = pci_enable_device(pcidev);
1517 pci_set_master(pcidev);
1519 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1523 ret = rtsx_pci_init_hw(pcr);
1527 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1530 mutex_unlock(&pcr->pcr_mutex);
1534 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1536 struct pcr_handle *handle;
1537 struct rtsx_pcr *pcr;
1539 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1541 handle = pci_get_drvdata(pcidev);
1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1545 pci_disable_device(pcidev);
1546 free_irq(pcr->irq, (void *)pcr);
1548 pci_disable_msi(pcr->pci);
1551 #else /* CONFIG_PM */
1553 #define rtsx_pci_suspend NULL
1554 #define rtsx_pci_resume NULL
1555 #define rtsx_pci_shutdown NULL
1557 #endif /* CONFIG_PM */
1559 static struct pci_driver rtsx_pci_driver = {
1560 .name = DRV_NAME_RTSX_PCI,
1561 .id_table = rtsx_pci_ids,
1562 .probe = rtsx_pci_probe,
1563 .remove = rtsx_pci_remove,
1564 .suspend = rtsx_pci_suspend,
1565 .resume = rtsx_pci_resume,
1566 .shutdown = rtsx_pci_shutdown,
1568 module_pci_driver(rtsx_pci_driver);
1570 MODULE_LICENSE("GPL");
1572 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");