]> Git Repo - linux.git/blob - drivers/net/wwan/t7xx/t7xx_pci.c
Linux 6.14-rc3
[linux.git] / drivers / net / wwan / t7xx / t7xx_pci.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <[email protected]>
8  *  Ricardo Martinez <[email protected]>
9  *  Sreehari Kancharla <[email protected]>
10  *
11  * Contributors:
12  *  Amir Hanania <[email protected]>
13  *  Andy Shevchenko <[email protected]>
14  *  Chiranjeevi Rapolu <[email protected]>
15  *  Eliot Lee <[email protected]>
16  *  Moises Veleta <[email protected]>
17  */
18
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
37
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_pcie_mac.h"
42 #include "t7xx_reg.h"
43 #include "t7xx_state_monitor.h"
44 #include "t7xx_port_proxy.h"
45
46 #define DRIVER_NAME "mtk_t7xx"
47
48 #define T7XX_PCI_IREG_BASE              0
49 #define T7XX_PCI_EREG_BASE              2
50
51 #define T7XX_INIT_TIMEOUT               20
52 #define PM_SLEEP_DIS_TIMEOUT_MS         20
53 #define PM_ACK_TIMEOUT_MS               1500
54 #define PM_AUTOSUSPEND_MS               5000
55 #define PM_RESOURCE_POLL_TIMEOUT_US     10000
56 #define PM_RESOURCE_POLL_STEP_US        100
57
58 static const char * const t7xx_mode_names[] = {
59         [T7XX_UNKNOWN] = "unknown",
60         [T7XX_READY] = "ready",
61         [T7XX_RESET] = "reset",
62         [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
63         [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
64         [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
65 };
66
67 static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
68
69 static ssize_t t7xx_mode_store(struct device *dev,
70                                struct device_attribute *attr,
71                                const char *buf, size_t count)
72 {
73         struct t7xx_pci_dev *t7xx_dev;
74         struct pci_dev *pdev;
75         enum t7xx_mode mode;
76         int index = 0;
77
78         pdev = to_pci_dev(dev);
79         t7xx_dev = pci_get_drvdata(pdev);
80         if (!t7xx_dev)
81                 return -ENODEV;
82
83         mode = READ_ONCE(t7xx_dev->mode);
84
85         index = sysfs_match_string(t7xx_mode_names, buf);
86         if (index == mode)
87                 return -EBUSY;
88
89         if (index == T7XX_FASTBOOT_SWITCHING) {
90                 if (mode == T7XX_FASTBOOT_DOWNLOAD)
91                         return count;
92
93                 WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
94                 pm_runtime_resume(dev);
95                 t7xx_reset_device(t7xx_dev, FASTBOOT);
96         } else if (index == T7XX_RESET) {
97                 pm_runtime_resume(dev);
98                 t7xx_reset_device(t7xx_dev, PLDR);
99         }
100
101         return count;
102 };
103
104 static ssize_t t7xx_mode_show(struct device *dev,
105                               struct device_attribute *attr,
106                               char *buf)
107 {
108         enum t7xx_mode mode = T7XX_UNKNOWN;
109         struct t7xx_pci_dev *t7xx_dev;
110         struct pci_dev *pdev;
111
112         pdev = to_pci_dev(dev);
113         t7xx_dev = pci_get_drvdata(pdev);
114         if (!t7xx_dev)
115                 return -ENODEV;
116
117         mode = READ_ONCE(t7xx_dev->mode);
118         if (mode < T7XX_MODE_LAST)
119                 return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
120
121         return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
122 }
123
124 static DEVICE_ATTR_RW(t7xx_mode);
125
126 static ssize_t t7xx_debug_ports_store(struct device *dev,
127                                       struct device_attribute *attr,
128                                       const char *buf, size_t count)
129 {
130         struct t7xx_pci_dev *t7xx_dev;
131         struct pci_dev *pdev;
132         bool show;
133         int ret;
134
135         pdev = to_pci_dev(dev);
136         t7xx_dev = pci_get_drvdata(pdev);
137         if (!t7xx_dev)
138                 return -ENODEV;
139
140         ret = kstrtobool(buf, &show);
141         if (ret < 0)
142                 return ret;
143
144         t7xx_proxy_debug_ports_show(t7xx_dev, show);
145         WRITE_ONCE(t7xx_dev->debug_ports_show, show);
146
147         return count;
148 };
149
150 static ssize_t t7xx_debug_ports_show(struct device *dev,
151                                      struct device_attribute *attr,
152                                      char *buf)
153 {
154         struct t7xx_pci_dev *t7xx_dev;
155         struct pci_dev *pdev;
156         bool show;
157
158         pdev = to_pci_dev(dev);
159         t7xx_dev = pci_get_drvdata(pdev);
160         if (!t7xx_dev)
161                 return -ENODEV;
162
163         show = READ_ONCE(t7xx_dev->debug_ports_show);
164
165         return sysfs_emit(buf, "%d\n", show);
166 }
167
168 static DEVICE_ATTR_RW(t7xx_debug_ports);
169
170 static struct attribute *t7xx_attr[] = {
171         &dev_attr_t7xx_mode.attr,
172         &dev_attr_t7xx_debug_ports.attr,
173         NULL
174 };
175
176 static const struct attribute_group t7xx_attribute_group = {
177         .attrs = t7xx_attr,
178 };
179
180 void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
181 {
182         if (!t7xx_dev)
183                 return;
184
185         WRITE_ONCE(t7xx_dev->mode, mode);
186         sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
187 }
188
189 enum t7xx_pm_state {
190         MTK_PM_EXCEPTION,
191         MTK_PM_INIT,            /* Device initialized, but handshake not completed */
192         MTK_PM_SUSPENDED,
193         MTK_PM_RESUMED,
194 };
195
196 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
197 {
198         void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
199         u32 value;
200
201         value = ioread32(ctrl_reg);
202
203         if (enable)
204                 value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
205         else
206                 value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
207
208         iowrite32(value, ctrl_reg);
209 }
210
211 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
212 {
213         int ret, val;
214
215         ret = read_poll_timeout(ioread32, val,
216                                 (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
217                                 PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
218                                 IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
219         if (ret == -ETIMEDOUT)
220                 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
221
222         return ret;
223 }
224
225 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
226 {
227         struct pci_dev *pdev = t7xx_dev->pdev;
228
229         INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
230         mutex_init(&t7xx_dev->md_pm_entity_mtx);
231         spin_lock_init(&t7xx_dev->md_pm_lock);
232         init_completion(&t7xx_dev->sleep_lock_acquire);
233         init_completion(&t7xx_dev->pm_sr_ack);
234         init_completion(&t7xx_dev->init_done);
235         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
236
237         device_init_wakeup(&pdev->dev, true);
238         dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
239                                 DPM_FLAG_NO_DIRECT_COMPLETE);
240
241         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
242         pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
243         pm_runtime_use_autosuspend(&pdev->dev);
244
245         return 0;
246 }
247
248 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
249 {
250         /* Enable the PCIe resource lock only after MD deep sleep is done */
251         t7xx_mhccif_mask_clr(t7xx_dev,
252                              D2H_INT_DS_LOCK_ACK |
253                              D2H_INT_SUSPEND_ACK |
254                              D2H_INT_RESUME_ACK |
255                              D2H_INT_SUSPEND_ACK_AP |
256                              D2H_INT_RESUME_ACK_AP);
257         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
258         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
259
260         pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
261         pm_runtime_allow(&t7xx_dev->pdev->dev);
262         pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
263         complete_all(&t7xx_dev->init_done);
264 }
265
266 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
267 {
268         /* The device is kept in FSM re-init flow
269          * so just roll back PM setting to the init setting.
270          */
271         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
272
273         pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
274
275         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
276         return t7xx_wait_pm_config(t7xx_dev);
277 }
278
279 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
280 {
281         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
282         t7xx_wait_pm_config(t7xx_dev);
283         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
284 }
285
286 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
287 {
288         struct md_pm_entity *entity;
289
290         mutex_lock(&t7xx_dev->md_pm_entity_mtx);
291         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
292                 if (entity->id == pm_entity->id) {
293                         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
294                         return -EEXIST;
295                 }
296         }
297
298         list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
299         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
300         return 0;
301 }
302
303 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
304 {
305         struct md_pm_entity *entity, *tmp_entity;
306
307         mutex_lock(&t7xx_dev->md_pm_entity_mtx);
308         list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
309                 if (entity->id == pm_entity->id) {
310                         list_del(&pm_entity->entity);
311                         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
312                         return 0;
313                 }
314         }
315
316         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
317
318         return -ENXIO;
319 }
320
321 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
322 {
323         struct device *dev = &t7xx_dev->pdev->dev;
324         int ret;
325
326         ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
327                                           msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
328         if (!ret)
329                 dev_err_ratelimited(dev, "Resource wait complete timed out\n");
330
331         return ret;
332 }
333
334 /**
335  * t7xx_pci_disable_sleep() - Disable deep sleep capability.
336  * @t7xx_dev: MTK device.
337  *
338  * Lock the deep sleep capability, note that the device can still go into deep sleep
339  * state while device is in D0 state, from the host's point-of-view.
340  *
341  * If device is in deep sleep state, wake up the device and disable deep sleep capability.
342  */
343 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
344 {
345         unsigned long flags;
346
347         spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
348         t7xx_dev->sleep_disable_count++;
349         if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
350                 goto unlock_and_complete;
351
352         if (t7xx_dev->sleep_disable_count == 1) {
353                 u32 status;
354
355                 reinit_completion(&t7xx_dev->sleep_lock_acquire);
356                 t7xx_dev_set_sleep_capability(t7xx_dev, false);
357
358                 status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
359                 if (status & T7XX_PCIE_RESOURCE_STS_MSK)
360                         goto unlock_and_complete;
361
362                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
363         }
364         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
365         return;
366
367 unlock_and_complete:
368         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
369         complete_all(&t7xx_dev->sleep_lock_acquire);
370 }
371
372 /**
373  * t7xx_pci_enable_sleep() - Enable deep sleep capability.
374  * @t7xx_dev: MTK device.
375  *
376  * After enabling deep sleep, device can enter into deep sleep state.
377  */
378 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
379 {
380         unsigned long flags;
381
382         spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
383         t7xx_dev->sleep_disable_count--;
384         if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
385                 goto unlock;
386
387         if (t7xx_dev->sleep_disable_count == 0)
388                 t7xx_dev_set_sleep_capability(t7xx_dev, true);
389
390 unlock:
391         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
392 }
393
394 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
395 {
396         unsigned long wait_ret;
397
398         reinit_completion(&t7xx_dev->pm_sr_ack);
399         t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
400         wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
401                                                msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
402         if (!wait_ret)
403                 return -ETIMEDOUT;
404
405         return 0;
406 }
407
408 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
409 {
410         enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
411         struct t7xx_pci_dev *t7xx_dev;
412         struct md_pm_entity *entity;
413         int ret;
414
415         t7xx_dev = pci_get_drvdata(pdev);
416         if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
417             READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
418                 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
419                 return -EFAULT;
420         }
421
422         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
423         ret = t7xx_wait_pm_config(t7xx_dev);
424         if (ret) {
425                 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
426                 return ret;
427         }
428
429         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
430         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
431         t7xx_dev->rgu_pci_irq_en = false;
432
433         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
434                 if (!entity->suspend)
435                         continue;
436
437                 ret = entity->suspend(t7xx_dev, entity->entity_param);
438                 if (ret) {
439                         entity_id = entity->id;
440                         dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
441                         goto abort_suspend;
442                 }
443         }
444
445         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
446         if (ret) {
447                 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
448                 goto abort_suspend;
449         }
450
451         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
452         if (ret) {
453                 t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
454                 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
455                 goto abort_suspend;
456         }
457
458         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
459                 if (entity->suspend_late)
460                         entity->suspend_late(t7xx_dev, entity->entity_param);
461         }
462
463         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
464         return 0;
465
466 abort_suspend:
467         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
468                 if (entity_id == entity->id)
469                         break;
470
471                 if (entity->resume)
472                         entity->resume(t7xx_dev, entity->entity_param);
473         }
474
475         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
476         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
477         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
478         return ret;
479 }
480
481 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
482 {
483         t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
484
485         /* Disable interrupt first and let the IPs enable them */
486         iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
487
488         /* Device disables PCIe interrupts during resume and
489          * following function will re-enable PCIe interrupts.
490          */
491         t7xx_pcie_mac_interrupts_en(t7xx_dev);
492         t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
493 }
494
495 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
496 {
497         int ret;
498
499         ret = pcim_enable_device(t7xx_dev->pdev);
500         if (ret)
501                 return ret;
502
503         t7xx_pcie_mac_atr_init(t7xx_dev);
504         t7xx_pcie_interrupt_reinit(t7xx_dev);
505
506         if (is_d3) {
507                 t7xx_mhccif_init(t7xx_dev);
508                 t7xx_pci_pm_reinit(t7xx_dev);
509         }
510
511         return 0;
512 }
513
514 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
515 {
516         struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
517         struct device *dev = &t7xx_dev->pdev->dev;
518         int ret = -EINVAL;
519
520         switch (event) {
521         case FSM_CMD_STOP:
522                 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
523                 break;
524
525         case FSM_CMD_START:
526                 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
527                 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
528                 t7xx_dev->rgu_pci_irq_en = true;
529                 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
530                 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
531                 break;
532
533         default:
534                 break;
535         }
536
537         if (ret)
538                 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
539
540         return ret;
541 }
542
543 int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
544 {
545         enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
546         int ret;
547
548         if (mode == T7XX_FASTBOOT_DOWNLOAD)
549                 pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
550
551         ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
552         if (ret)
553                 return ret;
554
555         return 0;
556 }
557
558 int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
559 {
560         int ret;
561
562         ret = t7xx_pcie_reinit(t7xx_dev, boot);
563         if (ret)
564                 return ret;
565
566         t7xx_clear_rgu_irq(t7xx_dev);
567         return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
568 }
569
570 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
571 {
572         struct t7xx_pci_dev *t7xx_dev;
573         struct md_pm_entity *entity;
574         u32 prev_state;
575         int ret = 0;
576
577         t7xx_dev = pci_get_drvdata(pdev);
578         if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
579                 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
580                 return 0;
581         }
582
583         t7xx_pcie_mac_interrupts_en(t7xx_dev);
584         prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
585
586         if (state_check) {
587                 /* For D3/L3 resume, the device could boot so quickly that the
588                  * initial value of the dummy register might be overwritten.
589                  * Identify new boots if the ATR source address register is not initialized.
590                  */
591                 u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
592                                            ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
593                 if (prev_state == PM_RESUME_REG_STATE_L3 ||
594                     (prev_state == PM_RESUME_REG_STATE_INIT &&
595                      atr_reg_val == ATR_SRC_ADDR_INVALID)) {
596                         ret = t7xx_pci_reprobe_early(t7xx_dev);
597                         if (ret)
598                                 return ret;
599
600                         return t7xx_pci_reprobe(t7xx_dev, true);
601                 }
602
603                 if (prev_state == PM_RESUME_REG_STATE_EXP ||
604                     prev_state == PM_RESUME_REG_STATE_L2_EXP) {
605                         if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
606                                 ret = t7xx_pcie_reinit(t7xx_dev, false);
607                                 if (ret)
608                                         return ret;
609                         }
610
611                         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
612                         t7xx_dev->rgu_pci_irq_en = true;
613                         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
614
615                         t7xx_mhccif_mask_clr(t7xx_dev,
616                                              D2H_INT_EXCEPTION_INIT |
617                                              D2H_INT_EXCEPTION_INIT_DONE |
618                                              D2H_INT_EXCEPTION_CLEARQ_DONE |
619                                              D2H_INT_EXCEPTION_ALLQ_RESET |
620                                              D2H_INT_PORT_ENUM);
621
622                         return ret;
623                 }
624
625                 if (prev_state == PM_RESUME_REG_STATE_L2) {
626                         ret = t7xx_pcie_reinit(t7xx_dev, false);
627                         if (ret)
628                                 return ret;
629
630                 } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
631                            prev_state != PM_RESUME_REG_STATE_INIT) {
632                         ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
633                         if (ret)
634                                 return ret;
635
636                         t7xx_clear_rgu_irq(t7xx_dev);
637                         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
638                         return 0;
639                 }
640         }
641
642         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
643         t7xx_wait_pm_config(t7xx_dev);
644
645         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
646                 if (entity->resume_early)
647                         entity->resume_early(t7xx_dev, entity->entity_param);
648         }
649
650         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
651         if (ret)
652                 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
653
654         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
655         if (ret)
656                 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
657
658         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
659                 if (entity->resume) {
660                         ret = entity->resume(t7xx_dev, entity->entity_param);
661                         if (ret)
662                                 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
663                                         entity->id, ret);
664                 }
665         }
666
667         t7xx_dev->rgu_pci_irq_en = true;
668         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
669         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
670         pm_runtime_mark_last_busy(&pdev->dev);
671         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
672
673         return ret;
674 }
675
676 static int t7xx_pci_pm_resume_noirq(struct device *dev)
677 {
678         struct pci_dev *pdev = to_pci_dev(dev);
679         struct t7xx_pci_dev *t7xx_dev;
680
681         t7xx_dev = pci_get_drvdata(pdev);
682         t7xx_pcie_mac_interrupts_dis(t7xx_dev);
683
684         return 0;
685 }
686
687 static void t7xx_pci_shutdown(struct pci_dev *pdev)
688 {
689         __t7xx_pci_pm_suspend(pdev);
690 }
691
692 static int t7xx_pci_pm_prepare(struct device *dev)
693 {
694         struct pci_dev *pdev = to_pci_dev(dev);
695         struct t7xx_pci_dev *t7xx_dev;
696
697         t7xx_dev = pci_get_drvdata(pdev);
698         if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
699                 dev_warn(dev, "Not ready for system sleep.\n");
700                 return -ETIMEDOUT;
701         }
702
703         return 0;
704 }
705
706 static int t7xx_pci_pm_suspend(struct device *dev)
707 {
708         return __t7xx_pci_pm_suspend(to_pci_dev(dev));
709 }
710
711 static int t7xx_pci_pm_resume(struct device *dev)
712 {
713         return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
714 }
715
716 static int t7xx_pci_pm_thaw(struct device *dev)
717 {
718         return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
719 }
720
721 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
722 {
723         return __t7xx_pci_pm_suspend(to_pci_dev(dev));
724 }
725
726 static int t7xx_pci_pm_runtime_resume(struct device *dev)
727 {
728         return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
729 }
730
731 static const struct dev_pm_ops t7xx_pci_pm_ops = {
732         .prepare = t7xx_pci_pm_prepare,
733         .suspend = t7xx_pci_pm_suspend,
734         .resume = t7xx_pci_pm_resume,
735         .resume_noirq = t7xx_pci_pm_resume_noirq,
736         .freeze = t7xx_pci_pm_suspend,
737         .thaw = t7xx_pci_pm_thaw,
738         .poweroff = t7xx_pci_pm_suspend,
739         .restore = t7xx_pci_pm_resume,
740         .restore_noirq = t7xx_pci_pm_resume_noirq,
741         .runtime_suspend = t7xx_pci_pm_runtime_suspend,
742         .runtime_resume = t7xx_pci_pm_runtime_resume
743 };
744
745 static int t7xx_request_irq(struct pci_dev *pdev)
746 {
747         struct t7xx_pci_dev *t7xx_dev;
748         int ret = 0, i;
749
750         t7xx_dev = pci_get_drvdata(pdev);
751
752         for (i = 0; i < EXT_INT_NUM; i++) {
753                 const char *irq_descr;
754                 int irq_vec;
755
756                 if (!t7xx_dev->intr_handler[i])
757                         continue;
758
759                 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
760                                            dev_driver_string(&pdev->dev), i);
761                 if (!irq_descr) {
762                         ret = -ENOMEM;
763                         break;
764                 }
765
766                 irq_vec = pci_irq_vector(pdev, i);
767                 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
768                                            t7xx_dev->intr_thread[i], 0, irq_descr,
769                                            t7xx_dev->callback_param[i]);
770                 if (ret) {
771                         dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
772                         break;
773                 }
774         }
775
776         if (ret) {
777                 while (i--) {
778                         if (!t7xx_dev->intr_handler[i])
779                                 continue;
780
781                         free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
782                 }
783         }
784
785         return ret;
786 }
787
788 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
789 {
790         struct pci_dev *pdev = t7xx_dev->pdev;
791         int ret;
792
793         /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
794         ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
795         if (ret < 0) {
796                 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
797                 return ret;
798         }
799
800         ret = t7xx_request_irq(pdev);
801         if (ret) {
802                 pci_free_irq_vectors(pdev);
803                 return ret;
804         }
805
806         t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
807         return 0;
808 }
809
810 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
811 {
812         int ret, i;
813
814         if (!t7xx_dev->pdev->msix_cap)
815                 return -EINVAL;
816
817         ret = t7xx_setup_msix(t7xx_dev);
818         if (ret)
819                 return ret;
820
821         /* IPs enable interrupts when ready */
822         for (i = 0; i < EXT_INT_NUM; i++)
823                 t7xx_pcie_mac_set_int(t7xx_dev, i);
824
825         return 0;
826 }
827
828 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
829 {
830         t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
831                                               INFRACFG_AO_DEV_CHIP -
832                                               t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
833 }
834
835 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
836 {
837         struct t7xx_pci_dev *t7xx_dev;
838         void __iomem *iomem;
839         int ret;
840
841         t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
842         if (!t7xx_dev)
843                 return -ENOMEM;
844
845         pci_set_drvdata(pdev, t7xx_dev);
846         t7xx_dev->pdev = pdev;
847
848         ret = pcim_enable_device(pdev);
849         if (ret)
850                 return ret;
851
852         pci_set_master(pdev);
853
854         iomem = pcim_iomap_region(pdev, T7XX_PCI_IREG_BASE, DRIVER_NAME);
855         ret = PTR_ERR_OR_ZERO(iomem);
856         if (ret) {
857                 dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret);
858                 return -ENOMEM;
859         }
860         IREG_BASE(t7xx_dev) = iomem;
861
862         iomem = pcim_iomap_region(pdev, T7XX_PCI_EREG_BASE, DRIVER_NAME);
863         ret = PTR_ERR_OR_ZERO(iomem);
864         if (ret) {
865                 dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret);
866                 return -ENOMEM;
867         }
868         t7xx_dev->base_addr.pcie_ext_reg_base = iomem;
869
870         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
871         if (ret) {
872                 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
873                 return ret;
874         }
875
876         ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
877         if (ret) {
878                 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
879                 return ret;
880         }
881
882         ret = t7xx_pci_pm_init(t7xx_dev);
883         if (ret)
884                 return ret;
885
886         t7xx_pcie_mac_atr_init(t7xx_dev);
887         t7xx_pci_infracfg_ao_calc(t7xx_dev);
888         t7xx_mhccif_init(t7xx_dev);
889
890         ret = t7xx_md_init(t7xx_dev);
891         if (ret)
892                 return ret;
893
894         t7xx_pcie_mac_interrupts_dis(t7xx_dev);
895
896         ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
897                                  &t7xx_attribute_group);
898         if (ret)
899                 goto err_md_exit;
900
901         ret = t7xx_interrupt_init(t7xx_dev);
902         if (ret)
903                 goto err_remove_group;
904
905
906         t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
907         t7xx_pcie_mac_interrupts_en(t7xx_dev);
908
909         return 0;
910
911 err_remove_group:
912         sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
913                            &t7xx_attribute_group);
914
915 err_md_exit:
916         t7xx_md_exit(t7xx_dev);
917         return ret;
918 }
919
920 static void t7xx_pci_remove(struct pci_dev *pdev)
921 {
922         struct t7xx_pci_dev *t7xx_dev;
923         int i;
924
925         t7xx_dev = pci_get_drvdata(pdev);
926
927         sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
928                            &t7xx_attribute_group);
929         t7xx_md_exit(t7xx_dev);
930
931         for (i = 0; i < EXT_INT_NUM; i++) {
932                 if (!t7xx_dev->intr_handler[i])
933                         continue;
934
935                 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
936         }
937
938         pci_free_irq_vectors(t7xx_dev->pdev);
939 }
940
941 static const struct pci_device_id t7xx_pci_table[] = {
942         { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
943         { PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e
944         { }
945 };
946 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
947
948 static struct pci_driver t7xx_pci_driver = {
949         .name = DRIVER_NAME,
950         .id_table = t7xx_pci_table,
951         .probe = t7xx_pci_probe,
952         .remove = t7xx_pci_remove,
953         .driver.pm = &t7xx_pci_pm_ops,
954         .shutdown = t7xx_pci_shutdown,
955 };
956
957 module_pci_driver(t7xx_pci_driver);
958
959 MODULE_AUTHOR("MediaTek Inc");
960 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
961 MODULE_LICENSE("GPL");
This page took 0.0902269999999999 seconds and 4 git commands to generate.