1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
41 #include "t7xx_pcie_mac.h"
43 #include "t7xx_state_monitor.h"
45 #define T7XX_PCI_IREG_BASE 0
46 #define T7XX_PCI_EREG_BASE 2
48 #define PM_SLEEP_DIS_TIMEOUT_MS 20
49 #define PM_ACK_TIMEOUT_MS 1500
50 #define PM_AUTOSUSPEND_MS 20000
51 #define PM_RESOURCE_POLL_TIMEOUT_US 10000
52 #define PM_RESOURCE_POLL_STEP_US 100
56 MTK_PM_INIT, /* Device initialized, but handshake not completed */
61 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
63 void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
66 value = ioread32(ctrl_reg);
69 value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
71 value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
73 iowrite32(value, ctrl_reg);
76 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
80 ret = read_poll_timeout(ioread32, val,
81 (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
82 PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
83 IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
84 if (ret == -ETIMEDOUT)
85 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
90 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
92 struct pci_dev *pdev = t7xx_dev->pdev;
94 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
95 mutex_init(&t7xx_dev->md_pm_entity_mtx);
96 spin_lock_init(&t7xx_dev->md_pm_lock);
97 init_completion(&t7xx_dev->sleep_lock_acquire);
98 init_completion(&t7xx_dev->pm_sr_ack);
99 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
101 device_init_wakeup(&pdev->dev, true);
102 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
103 DPM_FLAG_NO_DIRECT_COMPLETE);
105 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
106 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
107 pm_runtime_use_autosuspend(&pdev->dev);
109 return t7xx_wait_pm_config(t7xx_dev);
112 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
114 /* Enable the PCIe resource lock only after MD deep sleep is done */
115 t7xx_mhccif_mask_clr(t7xx_dev,
116 D2H_INT_DS_LOCK_ACK |
117 D2H_INT_SUSPEND_ACK |
119 D2H_INT_SUSPEND_ACK_AP |
120 D2H_INT_RESUME_ACK_AP);
121 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
122 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
124 pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
125 pm_runtime_allow(&t7xx_dev->pdev->dev);
126 pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
129 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
131 /* The device is kept in FSM re-init flow
132 * so just roll back PM setting to the init setting.
134 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
136 pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
138 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
139 return t7xx_wait_pm_config(t7xx_dev);
142 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
144 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
145 t7xx_wait_pm_config(t7xx_dev);
146 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
149 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
151 struct md_pm_entity *entity;
153 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
154 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
155 if (entity->id == pm_entity->id) {
156 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
161 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
162 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
166 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
168 struct md_pm_entity *entity, *tmp_entity;
170 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
171 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
172 if (entity->id == pm_entity->id) {
173 list_del(&pm_entity->entity);
174 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
179 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
184 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
186 struct device *dev = &t7xx_dev->pdev->dev;
189 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
190 msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
192 dev_err_ratelimited(dev, "Resource wait complete timed out\n");
198 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
199 * @t7xx_dev: MTK device.
201 * Lock the deep sleep capability, note that the device can still go into deep sleep
202 * state while device is in D0 state, from the host's point-of-view.
204 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
206 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
210 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
211 t7xx_dev->sleep_disable_count++;
212 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
213 goto unlock_and_complete;
215 if (t7xx_dev->sleep_disable_count == 1) {
218 reinit_completion(&t7xx_dev->sleep_lock_acquire);
219 t7xx_dev_set_sleep_capability(t7xx_dev, false);
221 status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
222 if (status & T7XX_PCIE_RESOURCE_STS_MSK)
223 goto unlock_and_complete;
225 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
227 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
231 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
232 complete_all(&t7xx_dev->sleep_lock_acquire);
236 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
237 * @t7xx_dev: MTK device.
239 * After enabling deep sleep, device can enter into deep sleep state.
241 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
245 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
246 t7xx_dev->sleep_disable_count--;
247 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
250 if (t7xx_dev->sleep_disable_count == 0)
251 t7xx_dev_set_sleep_capability(t7xx_dev, true);
254 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
257 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
259 unsigned long wait_ret;
261 reinit_completion(&t7xx_dev->pm_sr_ack);
262 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
263 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
264 msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
271 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
273 enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
274 struct t7xx_pci_dev *t7xx_dev;
275 struct md_pm_entity *entity;
278 t7xx_dev = pci_get_drvdata(pdev);
279 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
280 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
284 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
285 ret = t7xx_wait_pm_config(t7xx_dev);
287 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
291 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
292 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
293 t7xx_dev->rgu_pci_irq_en = false;
295 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
296 if (!entity->suspend)
299 ret = entity->suspend(t7xx_dev, entity->entity_param);
301 entity_id = entity->id;
302 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
307 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
309 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
313 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
315 t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
316 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
320 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
321 if (entity->suspend_late)
322 entity->suspend_late(t7xx_dev, entity->entity_param);
325 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
329 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
330 if (entity_id == entity->id)
334 entity->resume(t7xx_dev, entity->entity_param);
337 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
338 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
339 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
343 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
345 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
347 /* Disable interrupt first and let the IPs enable them */
348 iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
350 /* Device disables PCIe interrupts during resume and
351 * following function will re-enable PCIe interrupts.
353 t7xx_pcie_mac_interrupts_en(t7xx_dev);
354 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
357 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
361 ret = pcim_enable_device(t7xx_dev->pdev);
365 t7xx_pcie_mac_atr_init(t7xx_dev);
366 t7xx_pcie_interrupt_reinit(t7xx_dev);
369 t7xx_mhccif_init(t7xx_dev);
370 return t7xx_pci_pm_reinit(t7xx_dev);
376 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
378 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
379 struct device *dev = &t7xx_dev->pdev->dev;
384 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
388 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
389 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
390 t7xx_dev->rgu_pci_irq_en = true;
391 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
392 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
400 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
405 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
407 struct t7xx_pci_dev *t7xx_dev;
408 struct md_pm_entity *entity;
412 t7xx_dev = pci_get_drvdata(pdev);
413 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
414 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
418 t7xx_pcie_mac_interrupts_en(t7xx_dev);
419 prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
422 /* For D3/L3 resume, the device could boot so quickly that the
423 * initial value of the dummy register might be overwritten.
424 * Identify new boots if the ATR source address register is not initialized.
426 u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
427 ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
428 if (prev_state == PM_RESUME_REG_STATE_L3 ||
429 (prev_state == PM_RESUME_REG_STATE_INIT &&
430 atr_reg_val == ATR_SRC_ADDR_INVALID)) {
431 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
435 ret = t7xx_pcie_reinit(t7xx_dev, true);
439 t7xx_clear_rgu_irq(t7xx_dev);
440 return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
443 if (prev_state == PM_RESUME_REG_STATE_EXP ||
444 prev_state == PM_RESUME_REG_STATE_L2_EXP) {
445 if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
446 ret = t7xx_pcie_reinit(t7xx_dev, false);
451 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
452 t7xx_dev->rgu_pci_irq_en = true;
453 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
455 t7xx_mhccif_mask_clr(t7xx_dev,
456 D2H_INT_EXCEPTION_INIT |
457 D2H_INT_EXCEPTION_INIT_DONE |
458 D2H_INT_EXCEPTION_CLEARQ_DONE |
459 D2H_INT_EXCEPTION_ALLQ_RESET |
465 if (prev_state == PM_RESUME_REG_STATE_L2) {
466 ret = t7xx_pcie_reinit(t7xx_dev, false);
470 } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
471 prev_state != PM_RESUME_REG_STATE_INIT) {
472 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
476 t7xx_clear_rgu_irq(t7xx_dev);
477 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
482 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
483 t7xx_wait_pm_config(t7xx_dev);
485 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
486 if (entity->resume_early)
487 entity->resume_early(t7xx_dev, entity->entity_param);
490 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
492 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
494 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
496 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
498 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
499 if (entity->resume) {
500 ret = entity->resume(t7xx_dev, entity->entity_param);
502 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
507 t7xx_dev->rgu_pci_irq_en = true;
508 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
509 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
510 pm_runtime_mark_last_busy(&pdev->dev);
511 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
516 static int t7xx_pci_pm_resume_noirq(struct device *dev)
518 struct pci_dev *pdev = to_pci_dev(dev);
519 struct t7xx_pci_dev *t7xx_dev;
521 t7xx_dev = pci_get_drvdata(pdev);
522 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
527 static void t7xx_pci_shutdown(struct pci_dev *pdev)
529 __t7xx_pci_pm_suspend(pdev);
532 static int t7xx_pci_pm_suspend(struct device *dev)
534 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
537 static int t7xx_pci_pm_resume(struct device *dev)
539 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
542 static int t7xx_pci_pm_thaw(struct device *dev)
544 return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
547 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
549 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
552 static int t7xx_pci_pm_runtime_resume(struct device *dev)
554 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
557 static const struct dev_pm_ops t7xx_pci_pm_ops = {
558 .suspend = t7xx_pci_pm_suspend,
559 .resume = t7xx_pci_pm_resume,
560 .resume_noirq = t7xx_pci_pm_resume_noirq,
561 .freeze = t7xx_pci_pm_suspend,
562 .thaw = t7xx_pci_pm_thaw,
563 .poweroff = t7xx_pci_pm_suspend,
564 .restore = t7xx_pci_pm_resume,
565 .restore_noirq = t7xx_pci_pm_resume_noirq,
566 .runtime_suspend = t7xx_pci_pm_runtime_suspend,
567 .runtime_resume = t7xx_pci_pm_runtime_resume
570 static int t7xx_request_irq(struct pci_dev *pdev)
572 struct t7xx_pci_dev *t7xx_dev;
575 t7xx_dev = pci_get_drvdata(pdev);
577 for (i = 0; i < EXT_INT_NUM; i++) {
578 const char *irq_descr;
581 if (!t7xx_dev->intr_handler[i])
584 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
585 dev_driver_string(&pdev->dev), i);
591 irq_vec = pci_irq_vector(pdev, i);
592 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
593 t7xx_dev->intr_thread[i], 0, irq_descr,
594 t7xx_dev->callback_param[i]);
596 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
603 if (!t7xx_dev->intr_handler[i])
606 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
613 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
615 struct pci_dev *pdev = t7xx_dev->pdev;
618 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
619 ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
621 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
625 ret = t7xx_request_irq(pdev);
627 pci_free_irq_vectors(pdev);
631 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
635 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
639 if (!t7xx_dev->pdev->msix_cap)
642 ret = t7xx_setup_msix(t7xx_dev);
646 /* IPs enable interrupts when ready */
647 for (i = 0; i < EXT_INT_NUM; i++)
648 t7xx_pcie_mac_set_int(t7xx_dev, i);
653 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
655 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
656 INFRACFG_AO_DEV_CHIP -
657 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
660 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
662 struct t7xx_pci_dev *t7xx_dev;
665 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
669 pci_set_drvdata(pdev, t7xx_dev);
670 t7xx_dev->pdev = pdev;
672 ret = pcim_enable_device(pdev);
676 pci_set_master(pdev);
678 ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
681 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
685 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
687 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
691 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
693 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
697 IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
698 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
700 ret = t7xx_pci_pm_init(t7xx_dev);
704 t7xx_pcie_mac_atr_init(t7xx_dev);
705 t7xx_pci_infracfg_ao_calc(t7xx_dev);
706 t7xx_mhccif_init(t7xx_dev);
708 ret = t7xx_md_init(t7xx_dev);
712 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
714 ret = t7xx_interrupt_init(t7xx_dev);
716 t7xx_md_exit(t7xx_dev);
720 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
721 t7xx_pcie_mac_interrupts_en(t7xx_dev);
726 static void t7xx_pci_remove(struct pci_dev *pdev)
728 struct t7xx_pci_dev *t7xx_dev;
731 t7xx_dev = pci_get_drvdata(pdev);
732 t7xx_md_exit(t7xx_dev);
734 for (i = 0; i < EXT_INT_NUM; i++) {
735 if (!t7xx_dev->intr_handler[i])
738 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
741 pci_free_irq_vectors(t7xx_dev->pdev);
744 static const struct pci_device_id t7xx_pci_table[] = {
745 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
748 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
750 static struct pci_driver t7xx_pci_driver = {
752 .id_table = t7xx_pci_table,
753 .probe = t7xx_pci_probe,
754 .remove = t7xx_pci_remove,
755 .driver.pm = &t7xx_pci_pm_ops,
756 .shutdown = t7xx_pci_shutdown,
759 module_pci_driver(t7xx_pci_driver);
761 MODULE_AUTHOR("MediaTek Inc");
762 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
763 MODULE_LICENSE("GPL");