]> Git Repo - J-linux.git/blob - drivers/net/wwan/t7xx/t7xx_pci.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / drivers / net / wwan / t7xx / t7xx_pci.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <[email protected]>
8  *  Ricardo Martinez <[email protected]>
9  *  Sreehari Kancharla <[email protected]>
10  *
11  * Contributors:
12  *  Amir Hanania <[email protected]>
13  *  Andy Shevchenko <[email protected]>
14  *  Chiranjeevi Rapolu <[email protected]>
15  *  Eliot Lee <[email protected]>
16  *  Moises Veleta <[email protected]>
17  */
18
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
37
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_pcie_mac.h"
42 #include "t7xx_reg.h"
43 #include "t7xx_state_monitor.h"
44
45 #define T7XX_PCI_IREG_BASE              0
46 #define T7XX_PCI_EREG_BASE              2
47
48 #define T7XX_INIT_TIMEOUT               20
49 #define PM_SLEEP_DIS_TIMEOUT_MS         20
50 #define PM_ACK_TIMEOUT_MS               1500
51 #define PM_AUTOSUSPEND_MS               20000
52 #define PM_RESOURCE_POLL_TIMEOUT_US     10000
53 #define PM_RESOURCE_POLL_STEP_US        100
54
55 static const char * const t7xx_mode_names[] = {
56         [T7XX_UNKNOWN] = "unknown",
57         [T7XX_READY] = "ready",
58         [T7XX_RESET] = "reset",
59         [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
60         [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
61         [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
62 };
63
64 static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
65
66 static ssize_t t7xx_mode_store(struct device *dev,
67                                struct device_attribute *attr,
68                                const char *buf, size_t count)
69 {
70         struct t7xx_pci_dev *t7xx_dev;
71         struct pci_dev *pdev;
72         int index = 0;
73
74         pdev = to_pci_dev(dev);
75         t7xx_dev = pci_get_drvdata(pdev);
76         if (!t7xx_dev)
77                 return -ENODEV;
78
79         index = sysfs_match_string(t7xx_mode_names, buf);
80         if (index == T7XX_FASTBOOT_SWITCHING) {
81                 WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
82         } else if (index == T7XX_RESET) {
83                 WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
84                 t7xx_acpi_pldr_func(t7xx_dev);
85         }
86
87         return count;
88 };
89
90 static ssize_t t7xx_mode_show(struct device *dev,
91                               struct device_attribute *attr,
92                               char *buf)
93 {
94         enum t7xx_mode mode = T7XX_UNKNOWN;
95         struct t7xx_pci_dev *t7xx_dev;
96         struct pci_dev *pdev;
97
98         pdev = to_pci_dev(dev);
99         t7xx_dev = pci_get_drvdata(pdev);
100         if (!t7xx_dev)
101                 return -ENODEV;
102
103         mode = READ_ONCE(t7xx_dev->mode);
104         if (mode < T7XX_MODE_LAST)
105                 return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
106
107         return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
108 }
109
110 static DEVICE_ATTR_RW(t7xx_mode);
111
112 static struct attribute *t7xx_mode_attr[] = {
113         &dev_attr_t7xx_mode.attr,
114         NULL
115 };
116
117 static const struct attribute_group t7xx_mode_attribute_group = {
118         .attrs = t7xx_mode_attr,
119 };
120
121 void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
122 {
123         if (!t7xx_dev)
124                 return;
125
126         WRITE_ONCE(t7xx_dev->mode, mode);
127         sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
128 }
129
130 enum t7xx_pm_state {
131         MTK_PM_EXCEPTION,
132         MTK_PM_INIT,            /* Device initialized, but handshake not completed */
133         MTK_PM_SUSPENDED,
134         MTK_PM_RESUMED,
135 };
136
137 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
138 {
139         void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
140         u32 value;
141
142         value = ioread32(ctrl_reg);
143
144         if (enable)
145                 value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
146         else
147                 value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
148
149         iowrite32(value, ctrl_reg);
150 }
151
152 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
153 {
154         int ret, val;
155
156         ret = read_poll_timeout(ioread32, val,
157                                 (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
158                                 PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
159                                 IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
160         if (ret == -ETIMEDOUT)
161                 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
162
163         return ret;
164 }
165
166 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
167 {
168         struct pci_dev *pdev = t7xx_dev->pdev;
169
170         INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
171         mutex_init(&t7xx_dev->md_pm_entity_mtx);
172         spin_lock_init(&t7xx_dev->md_pm_lock);
173         init_completion(&t7xx_dev->sleep_lock_acquire);
174         init_completion(&t7xx_dev->pm_sr_ack);
175         init_completion(&t7xx_dev->init_done);
176         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
177
178         device_init_wakeup(&pdev->dev, true);
179         dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
180                                 DPM_FLAG_NO_DIRECT_COMPLETE);
181
182         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
183         pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
184         pm_runtime_use_autosuspend(&pdev->dev);
185
186         return 0;
187 }
188
189 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
190 {
191         /* Enable the PCIe resource lock only after MD deep sleep is done */
192         t7xx_mhccif_mask_clr(t7xx_dev,
193                              D2H_INT_DS_LOCK_ACK |
194                              D2H_INT_SUSPEND_ACK |
195                              D2H_INT_RESUME_ACK |
196                              D2H_INT_SUSPEND_ACK_AP |
197                              D2H_INT_RESUME_ACK_AP);
198         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
199         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
200
201         pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
202         pm_runtime_allow(&t7xx_dev->pdev->dev);
203         pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
204         complete_all(&t7xx_dev->init_done);
205 }
206
207 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
208 {
209         /* The device is kept in FSM re-init flow
210          * so just roll back PM setting to the init setting.
211          */
212         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
213
214         pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
215
216         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
217         return t7xx_wait_pm_config(t7xx_dev);
218 }
219
220 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
221 {
222         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
223         t7xx_wait_pm_config(t7xx_dev);
224         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
225 }
226
227 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
228 {
229         struct md_pm_entity *entity;
230
231         mutex_lock(&t7xx_dev->md_pm_entity_mtx);
232         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
233                 if (entity->id == pm_entity->id) {
234                         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
235                         return -EEXIST;
236                 }
237         }
238
239         list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
240         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
241         return 0;
242 }
243
244 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
245 {
246         struct md_pm_entity *entity, *tmp_entity;
247
248         mutex_lock(&t7xx_dev->md_pm_entity_mtx);
249         list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
250                 if (entity->id == pm_entity->id) {
251                         list_del(&pm_entity->entity);
252                         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
253                         return 0;
254                 }
255         }
256
257         mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
258
259         return -ENXIO;
260 }
261
262 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
263 {
264         struct device *dev = &t7xx_dev->pdev->dev;
265         int ret;
266
267         ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
268                                           msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
269         if (!ret)
270                 dev_err_ratelimited(dev, "Resource wait complete timed out\n");
271
272         return ret;
273 }
274
275 /**
276  * t7xx_pci_disable_sleep() - Disable deep sleep capability.
277  * @t7xx_dev: MTK device.
278  *
279  * Lock the deep sleep capability, note that the device can still go into deep sleep
280  * state while device is in D0 state, from the host's point-of-view.
281  *
282  * If device is in deep sleep state, wake up the device and disable deep sleep capability.
283  */
284 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
285 {
286         unsigned long flags;
287
288         spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
289         t7xx_dev->sleep_disable_count++;
290         if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
291                 goto unlock_and_complete;
292
293         if (t7xx_dev->sleep_disable_count == 1) {
294                 u32 status;
295
296                 reinit_completion(&t7xx_dev->sleep_lock_acquire);
297                 t7xx_dev_set_sleep_capability(t7xx_dev, false);
298
299                 status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
300                 if (status & T7XX_PCIE_RESOURCE_STS_MSK)
301                         goto unlock_and_complete;
302
303                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
304         }
305         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
306         return;
307
308 unlock_and_complete:
309         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
310         complete_all(&t7xx_dev->sleep_lock_acquire);
311 }
312
313 /**
314  * t7xx_pci_enable_sleep() - Enable deep sleep capability.
315  * @t7xx_dev: MTK device.
316  *
317  * After enabling deep sleep, device can enter into deep sleep state.
318  */
319 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
320 {
321         unsigned long flags;
322
323         spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
324         t7xx_dev->sleep_disable_count--;
325         if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
326                 goto unlock;
327
328         if (t7xx_dev->sleep_disable_count == 0)
329                 t7xx_dev_set_sleep_capability(t7xx_dev, true);
330
331 unlock:
332         spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
333 }
334
335 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
336 {
337         unsigned long wait_ret;
338
339         reinit_completion(&t7xx_dev->pm_sr_ack);
340         t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
341         wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
342                                                msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
343         if (!wait_ret)
344                 return -ETIMEDOUT;
345
346         return 0;
347 }
348
349 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
350 {
351         enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
352         struct t7xx_pci_dev *t7xx_dev;
353         struct md_pm_entity *entity;
354         int ret;
355
356         t7xx_dev = pci_get_drvdata(pdev);
357         if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
358             READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
359                 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
360                 return -EFAULT;
361         }
362
363         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
364         ret = t7xx_wait_pm_config(t7xx_dev);
365         if (ret) {
366                 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
367                 return ret;
368         }
369
370         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
371         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
372         t7xx_dev->rgu_pci_irq_en = false;
373
374         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
375                 if (!entity->suspend)
376                         continue;
377
378                 ret = entity->suspend(t7xx_dev, entity->entity_param);
379                 if (ret) {
380                         entity_id = entity->id;
381                         dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
382                         goto abort_suspend;
383                 }
384         }
385
386         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
387         if (ret) {
388                 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
389                 goto abort_suspend;
390         }
391
392         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
393         if (ret) {
394                 t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
395                 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
396                 goto abort_suspend;
397         }
398
399         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
400                 if (entity->suspend_late)
401                         entity->suspend_late(t7xx_dev, entity->entity_param);
402         }
403
404         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
405         return 0;
406
407 abort_suspend:
408         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
409                 if (entity_id == entity->id)
410                         break;
411
412                 if (entity->resume)
413                         entity->resume(t7xx_dev, entity->entity_param);
414         }
415
416         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
417         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
418         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
419         return ret;
420 }
421
422 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
423 {
424         t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
425
426         /* Disable interrupt first and let the IPs enable them */
427         iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
428
429         /* Device disables PCIe interrupts during resume and
430          * following function will re-enable PCIe interrupts.
431          */
432         t7xx_pcie_mac_interrupts_en(t7xx_dev);
433         t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
434 }
435
436 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
437 {
438         int ret;
439
440         ret = pcim_enable_device(t7xx_dev->pdev);
441         if (ret)
442                 return ret;
443
444         t7xx_pcie_mac_atr_init(t7xx_dev);
445         t7xx_pcie_interrupt_reinit(t7xx_dev);
446
447         if (is_d3) {
448                 t7xx_mhccif_init(t7xx_dev);
449                 return t7xx_pci_pm_reinit(t7xx_dev);
450         }
451
452         return 0;
453 }
454
455 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
456 {
457         struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
458         struct device *dev = &t7xx_dev->pdev->dev;
459         int ret = -EINVAL;
460
461         switch (event) {
462         case FSM_CMD_STOP:
463                 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
464                 break;
465
466         case FSM_CMD_START:
467                 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
468                 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
469                 t7xx_dev->rgu_pci_irq_en = true;
470                 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
471                 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
472                 break;
473
474         default:
475                 break;
476         }
477
478         if (ret)
479                 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
480
481         return ret;
482 }
483
484 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
485 {
486         struct t7xx_pci_dev *t7xx_dev;
487         struct md_pm_entity *entity;
488         u32 prev_state;
489         int ret = 0;
490
491         t7xx_dev = pci_get_drvdata(pdev);
492         if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
493                 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
494                 return 0;
495         }
496
497         t7xx_pcie_mac_interrupts_en(t7xx_dev);
498         prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
499
500         if (state_check) {
501                 /* For D3/L3 resume, the device could boot so quickly that the
502                  * initial value of the dummy register might be overwritten.
503                  * Identify new boots if the ATR source address register is not initialized.
504                  */
505                 u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
506                                            ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
507                 if (prev_state == PM_RESUME_REG_STATE_L3 ||
508                     (prev_state == PM_RESUME_REG_STATE_INIT &&
509                      atr_reg_val == ATR_SRC_ADDR_INVALID)) {
510                         ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
511                         if (ret)
512                                 return ret;
513
514                         ret = t7xx_pcie_reinit(t7xx_dev, true);
515                         if (ret)
516                                 return ret;
517
518                         t7xx_clear_rgu_irq(t7xx_dev);
519                         return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
520                 }
521
522                 if (prev_state == PM_RESUME_REG_STATE_EXP ||
523                     prev_state == PM_RESUME_REG_STATE_L2_EXP) {
524                         if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
525                                 ret = t7xx_pcie_reinit(t7xx_dev, false);
526                                 if (ret)
527                                         return ret;
528                         }
529
530                         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
531                         t7xx_dev->rgu_pci_irq_en = true;
532                         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
533
534                         t7xx_mhccif_mask_clr(t7xx_dev,
535                                              D2H_INT_EXCEPTION_INIT |
536                                              D2H_INT_EXCEPTION_INIT_DONE |
537                                              D2H_INT_EXCEPTION_CLEARQ_DONE |
538                                              D2H_INT_EXCEPTION_ALLQ_RESET |
539                                              D2H_INT_PORT_ENUM);
540
541                         return ret;
542                 }
543
544                 if (prev_state == PM_RESUME_REG_STATE_L2) {
545                         ret = t7xx_pcie_reinit(t7xx_dev, false);
546                         if (ret)
547                                 return ret;
548
549                 } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
550                            prev_state != PM_RESUME_REG_STATE_INIT) {
551                         ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
552                         if (ret)
553                                 return ret;
554
555                         t7xx_clear_rgu_irq(t7xx_dev);
556                         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
557                         return 0;
558                 }
559         }
560
561         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
562         t7xx_wait_pm_config(t7xx_dev);
563
564         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
565                 if (entity->resume_early)
566                         entity->resume_early(t7xx_dev, entity->entity_param);
567         }
568
569         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
570         if (ret)
571                 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
572
573         ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
574         if (ret)
575                 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
576
577         list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
578                 if (entity->resume) {
579                         ret = entity->resume(t7xx_dev, entity->entity_param);
580                         if (ret)
581                                 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
582                                         entity->id, ret);
583                 }
584         }
585
586         t7xx_dev->rgu_pci_irq_en = true;
587         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
588         iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
589         pm_runtime_mark_last_busy(&pdev->dev);
590         atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
591
592         return ret;
593 }
594
595 static int t7xx_pci_pm_resume_noirq(struct device *dev)
596 {
597         struct pci_dev *pdev = to_pci_dev(dev);
598         struct t7xx_pci_dev *t7xx_dev;
599
600         t7xx_dev = pci_get_drvdata(pdev);
601         t7xx_pcie_mac_interrupts_dis(t7xx_dev);
602
603         return 0;
604 }
605
606 static void t7xx_pci_shutdown(struct pci_dev *pdev)
607 {
608         __t7xx_pci_pm_suspend(pdev);
609 }
610
611 static int t7xx_pci_pm_prepare(struct device *dev)
612 {
613         struct pci_dev *pdev = to_pci_dev(dev);
614         struct t7xx_pci_dev *t7xx_dev;
615
616         t7xx_dev = pci_get_drvdata(pdev);
617         if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
618                 dev_warn(dev, "Not ready for system sleep.\n");
619                 return -ETIMEDOUT;
620         }
621
622         return 0;
623 }
624
625 static int t7xx_pci_pm_suspend(struct device *dev)
626 {
627         return __t7xx_pci_pm_suspend(to_pci_dev(dev));
628 }
629
630 static int t7xx_pci_pm_resume(struct device *dev)
631 {
632         return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
633 }
634
635 static int t7xx_pci_pm_thaw(struct device *dev)
636 {
637         return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
638 }
639
640 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
641 {
642         return __t7xx_pci_pm_suspend(to_pci_dev(dev));
643 }
644
645 static int t7xx_pci_pm_runtime_resume(struct device *dev)
646 {
647         return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
648 }
649
650 static const struct dev_pm_ops t7xx_pci_pm_ops = {
651         .prepare = t7xx_pci_pm_prepare,
652         .suspend = t7xx_pci_pm_suspend,
653         .resume = t7xx_pci_pm_resume,
654         .resume_noirq = t7xx_pci_pm_resume_noirq,
655         .freeze = t7xx_pci_pm_suspend,
656         .thaw = t7xx_pci_pm_thaw,
657         .poweroff = t7xx_pci_pm_suspend,
658         .restore = t7xx_pci_pm_resume,
659         .restore_noirq = t7xx_pci_pm_resume_noirq,
660         .runtime_suspend = t7xx_pci_pm_runtime_suspend,
661         .runtime_resume = t7xx_pci_pm_runtime_resume
662 };
663
664 static int t7xx_request_irq(struct pci_dev *pdev)
665 {
666         struct t7xx_pci_dev *t7xx_dev;
667         int ret = 0, i;
668
669         t7xx_dev = pci_get_drvdata(pdev);
670
671         for (i = 0; i < EXT_INT_NUM; i++) {
672                 const char *irq_descr;
673                 int irq_vec;
674
675                 if (!t7xx_dev->intr_handler[i])
676                         continue;
677
678                 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
679                                            dev_driver_string(&pdev->dev), i);
680                 if (!irq_descr) {
681                         ret = -ENOMEM;
682                         break;
683                 }
684
685                 irq_vec = pci_irq_vector(pdev, i);
686                 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
687                                            t7xx_dev->intr_thread[i], 0, irq_descr,
688                                            t7xx_dev->callback_param[i]);
689                 if (ret) {
690                         dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
691                         break;
692                 }
693         }
694
695         if (ret) {
696                 while (i--) {
697                         if (!t7xx_dev->intr_handler[i])
698                                 continue;
699
700                         free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
701                 }
702         }
703
704         return ret;
705 }
706
707 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
708 {
709         struct pci_dev *pdev = t7xx_dev->pdev;
710         int ret;
711
712         /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
713         ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
714         if (ret < 0) {
715                 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
716                 return ret;
717         }
718
719         ret = t7xx_request_irq(pdev);
720         if (ret) {
721                 pci_free_irq_vectors(pdev);
722                 return ret;
723         }
724
725         t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
726         return 0;
727 }
728
729 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
730 {
731         int ret, i;
732
733         if (!t7xx_dev->pdev->msix_cap)
734                 return -EINVAL;
735
736         ret = t7xx_setup_msix(t7xx_dev);
737         if (ret)
738                 return ret;
739
740         /* IPs enable interrupts when ready */
741         for (i = 0; i < EXT_INT_NUM; i++)
742                 t7xx_pcie_mac_set_int(t7xx_dev, i);
743
744         return 0;
745 }
746
747 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
748 {
749         t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
750                                               INFRACFG_AO_DEV_CHIP -
751                                               t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
752 }
753
754 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
755 {
756         struct t7xx_pci_dev *t7xx_dev;
757         int ret;
758
759         t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
760         if (!t7xx_dev)
761                 return -ENOMEM;
762
763         pci_set_drvdata(pdev, t7xx_dev);
764         t7xx_dev->pdev = pdev;
765
766         ret = pcim_enable_device(pdev);
767         if (ret)
768                 return ret;
769
770         pci_set_master(pdev);
771
772         ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
773                                  pci_name(pdev));
774         if (ret) {
775                 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
776                 return -ENOMEM;
777         }
778
779         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
780         if (ret) {
781                 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
782                 return ret;
783         }
784
785         ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
786         if (ret) {
787                 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
788                 return ret;
789         }
790
791         IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
792         t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
793
794         ret = t7xx_pci_pm_init(t7xx_dev);
795         if (ret)
796                 return ret;
797
798         t7xx_pcie_mac_atr_init(t7xx_dev);
799         t7xx_pci_infracfg_ao_calc(t7xx_dev);
800         t7xx_mhccif_init(t7xx_dev);
801
802         ret = t7xx_md_init(t7xx_dev);
803         if (ret)
804                 return ret;
805
806         t7xx_pcie_mac_interrupts_dis(t7xx_dev);
807
808         ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
809                                  &t7xx_mode_attribute_group);
810         if (ret)
811                 goto err_md_exit;
812
813         ret = t7xx_interrupt_init(t7xx_dev);
814         if (ret)
815                 goto err_remove_group;
816
817
818         t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
819         t7xx_pcie_mac_interrupts_en(t7xx_dev);
820
821         return 0;
822
823 err_remove_group:
824         sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
825                            &t7xx_mode_attribute_group);
826
827 err_md_exit:
828         t7xx_md_exit(t7xx_dev);
829         return ret;
830 }
831
832 static void t7xx_pci_remove(struct pci_dev *pdev)
833 {
834         struct t7xx_pci_dev *t7xx_dev;
835         int i;
836
837         t7xx_dev = pci_get_drvdata(pdev);
838
839         sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
840                            &t7xx_mode_attribute_group);
841         t7xx_md_exit(t7xx_dev);
842
843         for (i = 0; i < EXT_INT_NUM; i++) {
844                 if (!t7xx_dev->intr_handler[i])
845                         continue;
846
847                 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
848         }
849
850         pci_free_irq_vectors(t7xx_dev->pdev);
851 }
852
853 static const struct pci_device_id t7xx_pci_table[] = {
854         { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
855         { }
856 };
857 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
858
859 static struct pci_driver t7xx_pci_driver = {
860         .name = "mtk_t7xx",
861         .id_table = t7xx_pci_table,
862         .probe = t7xx_pci_probe,
863         .remove = t7xx_pci_remove,
864         .driver.pm = &t7xx_pci_pm_ops,
865         .shutdown = t7xx_pci_shutdown,
866 };
867
868 module_pci_driver(t7xx_pci_driver);
869
870 MODULE_AUTHOR("MediaTek Inc");
871 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
872 MODULE_LICENSE("GPL");
This page took 0.079047 seconds and 4 git commands to generate.