1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <asm/unaligned.h>
28 static bool msi_en = true;
29 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
30 MODULE_PARM_DESC(msi_en, "Enable MSI");
32 static DEFINE_IDR(rtsx_pci_idr);
33 static DEFINE_SPINLOCK(rtsx_pci_lock);
35 static struct mfd_cell rtsx_pcr_cells[] = {
37 .name = DRV_NAME_RTSX_PCI_SDMMC,
41 static const struct pci_device_id rtsx_pci_ids[] = {
42 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
43 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
44 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
60 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
62 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
63 PCI_EXP_LNKCTL_ASPMC, 0);
66 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
68 rtsx_pci_write_register(pcr, MSGTXDATA0,
69 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA1,
71 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
72 rtsx_pci_write_register(pcr, MSGTXDATA2,
73 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
74 rtsx_pci_write_register(pcr, MSGTXDATA3,
75 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
76 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
77 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
82 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
84 return rtsx_comm_set_ltr_latency(pcr, latency);
87 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
89 if (pcr->aspm_enabled == enable)
92 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
94 enable ? pcr->aspm_en : 0);
96 pcr->aspm_enabled = enable;
99 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
101 if (pcr->ops->set_aspm)
102 pcr->ops->set_aspm(pcr, false);
104 rtsx_comm_set_aspm(pcr, false);
107 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
109 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
114 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
116 if (pcr->ops->set_l1off_cfg_sub_d0)
117 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
120 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
122 struct rtsx_cr_option *option = &pcr->option;
124 rtsx_disable_aspm(pcr);
126 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
129 if (option->ltr_enabled)
130 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
132 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
133 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
136 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
138 rtsx_comm_pm_full_on(pcr);
141 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
143 /* If pci device removed, don't queue idle work any more */
147 if (pcr->state != PDEV_STAT_RUN) {
148 pcr->state = PDEV_STAT_RUN;
149 if (pcr->ops->enable_auto_blink)
150 pcr->ops->enable_auto_blink(pcr);
151 rtsx_pm_full_on(pcr);
154 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
156 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
158 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
161 u32 val = HAIMR_WRITE_START;
163 val |= (u32)(addr & 0x3FFF) << 16;
164 val |= (u32)mask << 8;
167 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
169 for (i = 0; i < MAX_RW_REG_CNT; i++) {
170 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
171 if ((val & HAIMR_TRANS_END) == 0) {
180 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
182 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
184 u32 val = HAIMR_READ_START;
187 val |= (u32)(addr & 0x3FFF) << 16;
188 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
190 for (i = 0; i < MAX_RW_REG_CNT; i++) {
191 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
192 if ((val & HAIMR_TRANS_END) == 0)
196 if (i >= MAX_RW_REG_CNT)
200 *data = (u8)(val & 0xFF);
204 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
206 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
208 int err, i, finished = 0;
211 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
212 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
213 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
214 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
216 for (i = 0; i < 100000; i++) {
217 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
233 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
235 if (pcr->ops->write_phy)
236 return pcr->ops->write_phy(pcr, addr, val);
238 return __rtsx_pci_write_phy_register(pcr, addr, val);
240 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
242 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
244 int err, i, finished = 0;
248 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
249 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
251 for (i = 0; i < 100000; i++) {
252 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
265 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
266 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
267 data = val1 | (val2 << 8);
275 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
277 if (pcr->ops->read_phy)
278 return pcr->ops->read_phy(pcr, addr, val);
280 return __rtsx_pci_read_phy_register(pcr, addr, val);
282 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
284 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
286 if (pcr->ops->stop_cmd)
287 return pcr->ops->stop_cmd(pcr);
289 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
290 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
292 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
293 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
295 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
297 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
298 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
302 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
304 val |= (u32)(cmd_type & 0x03) << 30;
305 val |= (u32)(reg_addr & 0x3FFF) << 16;
306 val |= (u32)mask << 8;
309 spin_lock_irqsave(&pcr->lock, flags);
311 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
312 put_unaligned_le32(val, ptr);
316 spin_unlock_irqrestore(&pcr->lock, flags);
318 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
320 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
324 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
326 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
327 /* Hardware Auto Response */
329 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
331 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
333 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
335 struct completion trans_done;
341 spin_lock_irqsave(&pcr->lock, flags);
343 /* set up data structures for the wakeup system */
344 pcr->done = &trans_done;
345 pcr->trans_result = TRANS_NOT_READY;
346 init_completion(&trans_done);
348 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
350 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
351 /* Hardware Auto Response */
353 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
355 spin_unlock_irqrestore(&pcr->lock, flags);
357 /* Wait for TRANS_OK_INT */
358 timeleft = wait_for_completion_interruptible_timeout(
359 &trans_done, msecs_to_jiffies(timeout));
361 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
363 goto finish_send_cmd;
366 spin_lock_irqsave(&pcr->lock, flags);
367 if (pcr->trans_result == TRANS_RESULT_FAIL)
369 else if (pcr->trans_result == TRANS_RESULT_OK)
371 else if (pcr->trans_result == TRANS_NO_DEVICE)
373 spin_unlock_irqrestore(&pcr->lock, flags);
376 spin_lock_irqsave(&pcr->lock, flags);
378 spin_unlock_irqrestore(&pcr->lock, flags);
380 if ((err < 0) && (err != -ENODEV))
381 rtsx_pci_stop_cmd(pcr);
384 complete(pcr->finish_me);
388 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
390 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
391 dma_addr_t addr, unsigned int len, int end)
393 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
395 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
397 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
400 option |= RTSX_SG_END;
402 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
404 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
405 | (((u64)len >> 16) << 6) | option;
407 val = ((u64)addr << 32) | ((u64)len << 16) | option;
409 val = ((u64)addr << 32) | ((u64)len << 12) | option;
411 put_unaligned_le64(val, ptr);
415 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
416 int num_sg, bool read, int timeout)
420 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
421 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
424 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
426 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
428 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
432 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
434 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
435 int num_sg, bool read)
437 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
442 if ((sglist == NULL) || (num_sg <= 0))
445 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
447 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
449 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
450 int num_sg, bool read)
452 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
454 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
456 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
458 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
459 int count, bool read, int timeout)
461 struct completion trans_done;
462 struct scatterlist *sg;
469 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
474 if ((sglist == NULL) || (count < 1))
477 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
479 for_each_sg(sglist, sg, count, i) {
480 addr = sg_dma_address(sg);
481 len = sg_dma_len(sg);
482 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
485 spin_lock_irqsave(&pcr->lock, flags);
487 pcr->done = &trans_done;
488 pcr->trans_result = TRANS_NOT_READY;
489 init_completion(&trans_done);
490 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
491 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
493 spin_unlock_irqrestore(&pcr->lock, flags);
495 timeleft = wait_for_completion_interruptible_timeout(
496 &trans_done, msecs_to_jiffies(timeout));
498 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
503 spin_lock_irqsave(&pcr->lock, flags);
504 if (pcr->trans_result == TRANS_RESULT_FAIL) {
506 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
507 pcr->dma_error_count++;
510 else if (pcr->trans_result == TRANS_NO_DEVICE)
512 spin_unlock_irqrestore(&pcr->lock, flags);
515 spin_lock_irqsave(&pcr->lock, flags);
517 spin_unlock_irqrestore(&pcr->lock, flags);
519 if ((err < 0) && (err != -ENODEV))
520 rtsx_pci_stop_cmd(pcr);
523 complete(pcr->finish_me);
527 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
529 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
541 for (i = 0; i < buf_len / 256; i++) {
542 rtsx_pci_init_cmd(pcr);
544 for (j = 0; j < 256; j++)
545 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
547 err = rtsx_pci_send_cmd(pcr, 250);
551 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
556 rtsx_pci_init_cmd(pcr);
558 for (j = 0; j < buf_len % 256; j++)
559 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
561 err = rtsx_pci_send_cmd(pcr, 250);
566 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
570 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
572 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
584 for (i = 0; i < buf_len / 256; i++) {
585 rtsx_pci_init_cmd(pcr);
587 for (j = 0; j < 256; j++) {
588 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
593 err = rtsx_pci_send_cmd(pcr, 250);
599 rtsx_pci_init_cmd(pcr);
601 for (j = 0; j < buf_len % 256; j++) {
602 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
607 err = rtsx_pci_send_cmd(pcr, 250);
614 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
616 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
618 rtsx_pci_init_cmd(pcr);
620 while (*tbl & 0xFFFF0000) {
621 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
622 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
626 return rtsx_pci_send_cmd(pcr, 100);
629 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
633 if (card == RTSX_SD_CARD)
634 tbl = pcr->sd_pull_ctl_enable_tbl;
635 else if (card == RTSX_MS_CARD)
636 tbl = pcr->ms_pull_ctl_enable_tbl;
640 return rtsx_pci_set_pull_ctl(pcr, tbl);
642 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
644 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
648 if (card == RTSX_SD_CARD)
649 tbl = pcr->sd_pull_ctl_disable_tbl;
650 else if (card == RTSX_MS_CARD)
651 tbl = pcr->ms_pull_ctl_disable_tbl;
655 return rtsx_pci_set_pull_ctl(pcr, tbl);
657 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
659 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
661 struct rtsx_hw_param *hw_param = &pcr->hw_param;
663 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
664 | hw_param->interrupt_en;
666 if (pcr->num_slots > 1)
667 pcr->bier |= MS_INT_EN;
669 /* Enable Bus Interrupt */
670 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
672 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
675 static inline u8 double_ssc_depth(u8 depth)
677 return ((depth > 1) ? (depth - 1) : depth);
680 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
682 if (div > CLK_DIV_1) {
683 if (ssc_depth > (div - 1))
684 ssc_depth -= (div - 1);
686 ssc_depth = SSC_DEPTH_4M;
692 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
693 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
696 u8 n, clk_divider, mcu_cnt, div;
697 static const u8 depth[] = {
698 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
699 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
700 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
701 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
702 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
705 if (PCI_PID(pcr) == PID_5261)
706 return rts5261_pci_switch_clock(pcr, card_clock,
707 ssc_depth, initial_mode, double_clk, vpclk);
708 if (PCI_PID(pcr) == PID_5228)
709 return rts5228_pci_switch_clock(pcr, card_clock,
710 ssc_depth, initial_mode, double_clk, vpclk);
713 /* We use 250k(around) here, in initial stage */
714 clk_divider = SD_CLK_DIVIDE_128;
715 card_clock = 30000000;
717 clk_divider = SD_CLK_DIVIDE_0;
719 err = rtsx_pci_write_register(pcr, SD_CFG1,
720 SD_CLK_DIVIDE_MASK, clk_divider);
724 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
725 if (card_clock == UHS_SDR104_MAX_DTR &&
726 pcr->dma_error_count &&
727 PCI_PID(pcr) == RTS5227_DEVICE_ID)
728 card_clock = UHS_SDR104_MAX_DTR -
729 (pcr->dma_error_count * 20000000);
731 card_clock /= 1000000;
732 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
735 if (!initial_mode && double_clk)
736 clk = card_clock * 2;
737 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
738 clk, pcr->cur_clock);
740 if (clk == pcr->cur_clock)
743 if (pcr->ops->conv_clk_and_div_n)
744 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
747 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
750 mcu_cnt = (u8)(125/clk + 3);
754 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
756 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
757 if (pcr->ops->conv_clk_and_div_n) {
758 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
760 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
767 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
769 ssc_depth = depth[ssc_depth];
771 ssc_depth = double_ssc_depth(ssc_depth);
773 ssc_depth = revise_ssc_depth(ssc_depth, div);
774 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
776 rtsx_pci_init_cmd(pcr);
777 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
778 CLK_LOW_FREQ, CLK_LOW_FREQ);
779 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
780 0xFF, (div << 4) | mcu_cnt);
781 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
782 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
783 SSC_DEPTH_MASK, ssc_depth);
784 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
785 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
787 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
789 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
790 PHASE_NOT_RESET, PHASE_NOT_RESET);
793 err = rtsx_pci_send_cmd(pcr, 2000);
797 /* Wait SSC clock stable */
798 udelay(SSC_CLOCK_STABLE_WAIT);
799 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
803 pcr->cur_clock = clk;
806 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
808 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
810 if (pcr->ops->card_power_on)
811 return pcr->ops->card_power_on(pcr, card);
815 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
817 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
819 if (pcr->ops->card_power_off)
820 return pcr->ops->card_power_off(pcr, card);
824 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
826 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
828 static const unsigned int cd_mask[] = {
829 [RTSX_SD_CARD] = SD_EXIST,
830 [RTSX_MS_CARD] = MS_EXIST
833 if (!(pcr->flags & PCR_MS_PMOS)) {
834 /* When using single PMOS, accessing card is not permitted
835 * if the existing card is not the designated one.
837 if (pcr->card_exist & (~cd_mask[card]))
843 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
845 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
847 if (pcr->ops->switch_output_voltage)
848 return pcr->ops->switch_output_voltage(pcr, voltage);
852 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
854 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
858 val = rtsx_pci_readl(pcr, RTSX_BIPR);
859 if (pcr->ops->cd_deglitch)
860 val = pcr->ops->cd_deglitch(pcr);
864 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
866 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
868 struct completion finish;
870 pcr->finish_me = &finish;
871 init_completion(&finish);
876 if (!pcr->remove_pci)
877 rtsx_pci_stop_cmd(pcr);
879 wait_for_completion_interruptible_timeout(&finish,
880 msecs_to_jiffies(2));
881 pcr->finish_me = NULL;
883 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
885 static void rtsx_pci_card_detect(struct work_struct *work)
887 struct delayed_work *dwork;
888 struct rtsx_pcr *pcr;
890 unsigned int card_detect = 0, card_inserted, card_removed;
893 dwork = to_delayed_work(work);
894 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
896 pcr_dbg(pcr, "--> %s\n", __func__);
898 mutex_lock(&pcr->pcr_mutex);
899 spin_lock_irqsave(&pcr->lock, flags);
901 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
902 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
904 irq_status &= CARD_EXIST;
905 card_inserted = pcr->card_inserted & irq_status;
906 card_removed = pcr->card_removed;
907 pcr->card_inserted = 0;
908 pcr->card_removed = 0;
910 spin_unlock_irqrestore(&pcr->lock, flags);
912 if (card_inserted || card_removed) {
913 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
914 card_inserted, card_removed);
916 if (pcr->ops->cd_deglitch)
917 card_inserted = pcr->ops->cd_deglitch(pcr);
919 card_detect = card_inserted | card_removed;
921 pcr->card_exist |= card_inserted;
922 pcr->card_exist &= ~card_removed;
925 mutex_unlock(&pcr->pcr_mutex);
927 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
928 pcr->slots[RTSX_SD_CARD].card_event(
929 pcr->slots[RTSX_SD_CARD].p_dev);
930 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
931 pcr->slots[RTSX_MS_CARD].card_event(
932 pcr->slots[RTSX_MS_CARD].p_dev);
935 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
937 if (pcr->ops->process_ocp) {
938 pcr->ops->process_ocp(pcr);
940 if (!pcr->option.ocp_en)
942 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
943 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
944 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
945 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
946 rtsx_pci_clear_ocpstat(pcr);
952 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
954 if (pcr->option.ocp_en)
955 rtsx_pci_process_ocp(pcr);
960 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
962 struct rtsx_pcr *pcr = dev_id;
968 spin_lock(&pcr->lock);
970 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
971 /* Clear interrupt flag */
972 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
973 if ((int_reg & pcr->bier) == 0) {
974 spin_unlock(&pcr->lock);
977 if (int_reg == 0xFFFFFFFF) {
978 spin_unlock(&pcr->lock);
982 int_reg &= (pcr->bier | 0x7FFFFF);
984 if (int_reg & SD_OC_INT)
985 rtsx_pci_process_ocp_interrupt(pcr);
987 if (int_reg & SD_INT) {
988 if (int_reg & SD_EXIST) {
989 pcr->card_inserted |= SD_EXIST;
991 pcr->card_removed |= SD_EXIST;
992 pcr->card_inserted &= ~SD_EXIST;
994 pcr->dma_error_count = 0;
997 if (int_reg & MS_INT) {
998 if (int_reg & MS_EXIST) {
999 pcr->card_inserted |= MS_EXIST;
1001 pcr->card_removed |= MS_EXIST;
1002 pcr->card_inserted &= ~MS_EXIST;
1006 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1007 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1008 pcr->trans_result = TRANS_RESULT_FAIL;
1010 complete(pcr->done);
1011 } else if (int_reg & TRANS_OK_INT) {
1012 pcr->trans_result = TRANS_RESULT_OK;
1014 complete(pcr->done);
1018 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1019 schedule_delayed_work(&pcr->carddet_work,
1020 msecs_to_jiffies(200));
1022 spin_unlock(&pcr->lock);
1026 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1028 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1029 __func__, pcr->msi_en, pcr->pci->irq);
1031 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1032 pcr->msi_en ? 0 : IRQF_SHARED,
1033 DRV_NAME_RTSX_PCI, pcr)) {
1034 dev_err(&(pcr->pci->dev),
1035 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1040 pcr->irq = pcr->pci->irq;
1041 pci_intx(pcr->pci, !pcr->msi_en);
1046 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1048 if (pcr->ops->set_aspm)
1049 pcr->ops->set_aspm(pcr, true);
1051 rtsx_comm_set_aspm(pcr, true);
1054 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1056 struct rtsx_cr_option *option = &pcr->option;
1058 if (option->ltr_enabled) {
1059 u32 latency = option->ltr_l1off_latency;
1061 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1062 mdelay(option->l1_snooze_delay);
1064 rtsx_set_ltr_latency(pcr, latency);
1067 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1068 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1070 rtsx_enable_aspm(pcr);
1073 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1075 rtsx_comm_pm_power_saving(pcr);
1078 static void rtsx_pci_idle_work(struct work_struct *work)
1080 struct delayed_work *dwork = to_delayed_work(work);
1081 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1083 pcr_dbg(pcr, "--> %s\n", __func__);
1085 mutex_lock(&pcr->pcr_mutex);
1087 pcr->state = PDEV_STAT_IDLE;
1089 if (pcr->ops->disable_auto_blink)
1090 pcr->ops->disable_auto_blink(pcr);
1091 if (pcr->ops->turn_off_led)
1092 pcr->ops->turn_off_led(pcr);
1094 rtsx_pm_power_saving(pcr);
1096 mutex_unlock(&pcr->pcr_mutex);
1099 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1101 if (pcr->ops->turn_off_led)
1102 pcr->ops->turn_off_led(pcr);
1104 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1107 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1108 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1110 if (pcr->ops->force_power_down)
1111 pcr->ops->force_power_down(pcr, pm_state);
1114 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1116 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1118 if (pcr->ops->enable_ocp) {
1119 pcr->ops->enable_ocp(pcr);
1121 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1122 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1127 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1129 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1131 if (pcr->ops->disable_ocp) {
1132 pcr->ops->disable_ocp(pcr);
1134 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1135 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1140 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1142 if (pcr->ops->init_ocp) {
1143 pcr->ops->init_ocp(pcr);
1145 struct rtsx_cr_option *option = &(pcr->option);
1147 if (option->ocp_en) {
1148 u8 val = option->sd_800mA_ocp_thd;
1150 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1151 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1152 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1153 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1154 SD_OCP_THD_MASK, val);
1155 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1156 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1157 rtsx_pci_enable_ocp(pcr);
1160 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1166 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1168 if (pcr->ops->get_ocpstat)
1169 return pcr->ops->get_ocpstat(pcr, val);
1171 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1174 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1176 if (pcr->ops->clear_ocpstat) {
1177 pcr->ops->clear_ocpstat(pcr);
1179 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1180 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1182 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1184 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1188 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1192 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1193 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1195 rtsx_pci_write_phy_register(pcr, 0x01, val);
1197 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1198 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1199 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1200 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1204 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1208 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1209 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1211 rtsx_pci_write_phy_register(pcr, 0x01, val);
1213 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1214 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1218 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1220 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1221 MS_CLK_EN | SD40_CLK_EN, 0);
1222 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1223 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1227 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1232 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1234 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1235 MS_CLK_EN | SD40_CLK_EN, 0);
1237 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1239 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1240 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1245 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1247 struct pci_dev *pdev = pcr->pci;
1250 if (PCI_PID(pcr) == PID_5228)
1251 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1252 RTS5228_LDO1_SR_0_5);
1254 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1256 rtsx_pci_enable_bus_int(pcr);
1259 if (PCI_PID(pcr) == PID_5261) {
1260 /* Gating real mcu clock */
1261 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1262 RTS5261_MCU_CLOCK_GATING, 0);
1263 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1266 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1271 /* Wait SSC power stable */
1274 rtsx_pci_disable_aspm(pcr);
1275 if (pcr->ops->optimize_phy) {
1276 err = pcr->ops->optimize_phy(pcr);
1281 rtsx_pci_init_cmd(pcr);
1283 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1287 /* Disable card clock */
1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1289 /* Reset delink mode */
1290 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1291 /* Card driving select */
1292 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1293 0xFF, pcr->card_drive_sel);
1294 /* Enable SSC Clock */
1295 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1296 0xFF, SSC_8X_EN | SSC_SEL_4M);
1297 if (PCI_PID(pcr) == PID_5261)
1298 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1299 RTS5261_SSC_DEPTH_2M);
1300 else if (PCI_PID(pcr) == PID_5228)
1301 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1302 RTS5228_SSC_DEPTH_2M);
1304 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1306 /* Disable cd_pwr_save */
1307 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1308 /* Clear Link Ready Interrupt */
1309 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1310 LINK_RDY_INT, LINK_RDY_INT);
1311 /* Enlarge the estimation window of PERST# glitch
1312 * to reduce the chance of invalid card interrupt
1314 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1315 /* Update RC oscillator to 400k
1316 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1319 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1320 /* Set interrupt write clear
1321 * bit 1: U_elbi_if_rd_clr_en
1322 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1323 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1325 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1327 err = rtsx_pci_send_cmd(pcr, 100);
1331 switch (PCI_PID(pcr)) {
1338 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1345 rtsx_pci_init_ocp(pcr);
1347 /* Enable clk_request_n to enable clock power management */
1348 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
1349 PCI_EXP_LNKCTL_CLKREQ_EN);
1350 /* Enter L1 when host tx idle */
1351 pci_write_config_byte(pdev, 0x70F, 0x5B);
1353 if (pcr->ops->extra_init_hw) {
1354 err = pcr->ops->extra_init_hw(pcr);
1359 /* No CD interrupt if probing driver with card inserted.
1360 * So we need to initialize pcr->card_exist here.
1362 if (pcr->ops->cd_deglitch)
1363 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1365 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1370 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1374 spin_lock_init(&pcr->lock);
1375 mutex_init(&pcr->pcr_mutex);
1377 switch (PCI_PID(pcr)) {
1380 rts5209_init_params(pcr);
1384 rts5229_init_params(pcr);
1388 rtl8411_init_params(pcr);
1392 rts5227_init_params(pcr);
1396 rts522a_init_params(pcr);
1400 rts5249_init_params(pcr);
1404 rts524a_init_params(pcr);
1408 rts525a_init_params(pcr);
1412 rtl8411b_init_params(pcr);
1416 rtl8402_init_params(pcr);
1420 rts5260_init_params(pcr);
1424 rts5261_init_params(pcr);
1428 rts5228_init_params(pcr);
1432 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1433 PCI_PID(pcr), pcr->ic_version);
1435 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1440 if (pcr->ops->fetch_vendor_settings)
1441 pcr->ops->fetch_vendor_settings(pcr);
1443 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1444 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1445 pcr->sd30_drive_sel_1v8);
1446 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1447 pcr->sd30_drive_sel_3v3);
1448 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1449 pcr->card_drive_sel);
1450 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1452 pcr->state = PDEV_STAT_IDLE;
1453 err = rtsx_pci_init_hw(pcr);
1462 static int rtsx_pci_probe(struct pci_dev *pcidev,
1463 const struct pci_device_id *id)
1465 struct rtsx_pcr *pcr;
1466 struct pcr_handle *handle;
1468 int ret, i, bar = 0;
1470 dev_dbg(&(pcidev->dev),
1471 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1472 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1473 (int)pcidev->revision);
1475 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1479 ret = pci_enable_device(pcidev);
1483 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1487 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1493 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1500 idr_preload(GFP_KERNEL);
1501 spin_lock(&rtsx_pci_lock);
1502 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1505 spin_unlock(&rtsx_pci_lock);
1511 dev_set_drvdata(&pcidev->dev, handle);
1513 if (CHK_PCI_PID(pcr, 0x525A))
1515 len = pci_resource_len(pcidev, bar);
1516 base = pci_resource_start(pcidev, bar);
1517 pcr->remap_addr = ioremap(base, len);
1518 if (!pcr->remap_addr) {
1523 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1524 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1526 if (pcr->rtsx_resv_buf == NULL) {
1530 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1531 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1532 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1533 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1535 pcr->card_inserted = 0;
1536 pcr->card_removed = 0;
1537 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1538 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1540 pcr->msi_en = msi_en;
1542 ret = pci_enable_msi(pcidev);
1544 pcr->msi_en = false;
1547 ret = rtsx_pci_acquire_irq(pcr);
1551 pci_set_master(pcidev);
1552 synchronize_irq(pcr->irq);
1554 ret = rtsx_pci_init_chip(pcr);
1558 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1559 rtsx_pcr_cells[i].platform_data = handle;
1560 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1562 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1563 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1567 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1572 free_irq(pcr->irq, (void *)pcr);
1575 pci_disable_msi(pcr->pci);
1576 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1577 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1579 iounmap(pcr->remap_addr);
1585 pci_release_regions(pcidev);
1587 pci_disable_device(pcidev);
1592 static void rtsx_pci_remove(struct pci_dev *pcidev)
1594 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1595 struct rtsx_pcr *pcr = handle->pcr;
1597 pcr->remove_pci = true;
1599 /* Disable interrupts at the pcr level */
1600 spin_lock_irq(&pcr->lock);
1601 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1603 spin_unlock_irq(&pcr->lock);
1605 cancel_delayed_work_sync(&pcr->carddet_work);
1606 cancel_delayed_work_sync(&pcr->idle_work);
1608 mfd_remove_devices(&pcidev->dev);
1610 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1611 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1612 free_irq(pcr->irq, (void *)pcr);
1614 pci_disable_msi(pcr->pci);
1615 iounmap(pcr->remap_addr);
1617 pci_release_regions(pcidev);
1618 pci_disable_device(pcidev);
1620 spin_lock(&rtsx_pci_lock);
1621 idr_remove(&rtsx_pci_idr, pcr->id);
1622 spin_unlock(&rtsx_pci_lock);
1628 dev_dbg(&(pcidev->dev),
1629 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1630 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1633 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1635 struct pci_dev *pcidev = to_pci_dev(dev_d);
1636 struct pcr_handle *handle;
1637 struct rtsx_pcr *pcr;
1639 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1641 handle = pci_get_drvdata(pcidev);
1644 cancel_delayed_work(&pcr->carddet_work);
1645 cancel_delayed_work(&pcr->idle_work);
1647 mutex_lock(&pcr->pcr_mutex);
1649 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1651 device_wakeup_disable(dev_d);
1653 mutex_unlock(&pcr->pcr_mutex);
1657 static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1659 struct pci_dev *pcidev = to_pci_dev(dev_d);
1660 struct pcr_handle *handle;
1661 struct rtsx_pcr *pcr;
1664 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1666 handle = pci_get_drvdata(pcidev);
1669 mutex_lock(&pcr->pcr_mutex);
1671 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1675 ret = rtsx_pci_init_hw(pcr);
1679 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1682 mutex_unlock(&pcr->pcr_mutex);
1688 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1690 struct pcr_handle *handle;
1691 struct rtsx_pcr *pcr;
1693 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1695 handle = pci_get_drvdata(pcidev);
1697 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1699 pci_disable_device(pcidev);
1700 free_irq(pcr->irq, (void *)pcr);
1702 pci_disable_msi(pcr->pci);
1705 #else /* CONFIG_PM */
1707 #define rtsx_pci_shutdown NULL
1709 #endif /* CONFIG_PM */
1711 static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
1713 static struct pci_driver rtsx_pci_driver = {
1714 .name = DRV_NAME_RTSX_PCI,
1715 .id_table = rtsx_pci_ids,
1716 .probe = rtsx_pci_probe,
1717 .remove = rtsx_pci_remove,
1718 .driver.pm = &rtsx_pci_pm_ops,
1719 .shutdown = rtsx_pci_shutdown,
1721 module_pci_driver(rtsx_pci_driver);
1723 MODULE_LICENSE("GPL");
1725 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");