]> Git Repo - J-linux.git/blob - drivers/net/wwan/t7xx/t7xx_modem_ops.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / wwan / t7xx / t7xx_modem_ops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <[email protected]>
8  *  Eliot Lee <[email protected]>
9  *  Moises Veleta <[email protected]>
10  *  Ricardo Martinez <[email protected]>
11  *
12  * Contributors:
13  *  Amir Hanania <[email protected]>
14  *  Chiranjeevi Rapolu <[email protected]>
15  *  Sreehari Kancharla <[email protected]>
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/gfp.h>
24 #include <linux/io.h>
25 #include <linux/irqreturn.h>
26 #include <linux/kthread.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/wait.h>
32 #include <linux/workqueue.h>
33
34 #include "t7xx_cldma.h"
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_pcie_mac.h"
41 #include "t7xx_port.h"
42 #include "t7xx_port_proxy.h"
43 #include "t7xx_reg.h"
44 #include "t7xx_state_monitor.h"
45
46 #define RT_ID_MD_PORT_ENUM      0
47 #define RT_ID_AP_PORT_ENUM      1
48 /* Modem feature query identification code - "ICCC" */
49 #define MD_FEATURE_QUERY_ID     0x49434343
50
51 #define FEATURE_VER             GENMASK(7, 4)
52 #define FEATURE_MSK             GENMASK(3, 0)
53
54 #define RGU_RESET_DELAY_MS      10
55 #define PORT_RESET_DELAY_MS     2000
56 #define FASTBOOT_RESET_DELAY_MS 2000
57 #define EX_HS_TIMEOUT_MS        5000
58 #define EX_HS_POLL_DELAY_MS     10
59
60 enum mtk_feature_support_type {
61         MTK_FEATURE_DOES_NOT_EXIST,
62         MTK_FEATURE_NOT_SUPPORTED,
63         MTK_FEATURE_MUST_BE_SUPPORTED,
64 };
65
66 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
67 {
68         return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
69 }
70
71 /**
72  * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
73  * @t7xx_dev: MTK device.
74  *
75  * Check the interrupt status and queue commands accordingly.
76  *
77  * Returns:
78  ** 0           - Success.
79  ** -EINVAL     - Failure to get FSM control.
80  */
81 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
82 {
83         struct t7xx_modem *md = t7xx_dev->md;
84         struct t7xx_fsm_ctl *ctl;
85         unsigned int int_sta;
86         int ret = 0;
87         u32 mask;
88
89         ctl = md->fsm_ctl;
90         if (!ctl) {
91                 dev_err_ratelimited(&t7xx_dev->pdev->dev,
92                                     "MHCCIF interrupt received before initializing MD monitor\n");
93                 return -EINVAL;
94         }
95
96         spin_lock_bh(&md->exp_lock);
97         int_sta = t7xx_get_interrupt_status(t7xx_dev);
98         md->exp_id |= int_sta;
99         if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
100                 if (ctl->md_state == MD_STATE_INVALID ||
101                     ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
102                     ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
103                     ctl->md_state == MD_STATE_READY) {
104                         md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
105                         ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
106                 }
107         } else if (md->exp_id & D2H_INT_PORT_ENUM) {
108                 md->exp_id &= ~D2H_INT_PORT_ENUM;
109
110                 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
111                     ctl->curr_state == FSM_STATE_STOPPED)
112                         ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
113         } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
114                 mask = t7xx_mhccif_mask_get(t7xx_dev);
115                 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
116                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
117                         queue_work(md->handshake_wq, &md->handshake_work);
118                 }
119         }
120         spin_unlock_bh(&md->exp_lock);
121
122         return ret;
123 }
124
125 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
126 {
127         struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
128         void __iomem *reset_pcie_reg;
129         u32 val;
130
131         reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
132                           pbase_addr->pcie_dev_reg_trsl_addr;
133         val = ioread32(reset_pcie_reg);
134         iowrite32(val, reset_pcie_reg);
135 }
136
137 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
138 {
139         /* Clear L2 */
140         t7xx_clr_device_irq_via_pcie(t7xx_dev);
141         /* Clear L1 */
142         t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
143 }
144
145 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
146 {
147 #ifdef CONFIG_ACPI
148         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
149         struct device *dev = &t7xx_dev->pdev->dev;
150         acpi_status acpi_ret;
151         acpi_handle handle;
152
153         handle = ACPI_HANDLE(dev);
154         if (!handle) {
155                 dev_err(dev, "ACPI handle not found\n");
156                 return -EFAULT;
157         }
158
159         if (!acpi_has_method(handle, fn_name)) {
160                 dev_err(dev, "%s method not found\n", fn_name);
161                 return -EFAULT;
162         }
163
164         acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
165         if (ACPI_FAILURE(acpi_ret)) {
166                 dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
167                 return -EFAULT;
168         }
169
170         kfree(buffer.pointer);
171 #else
172         struct device *dev = &t7xx_dev->pdev->dev;
173         int ret;
174
175         ret = pci_reset_function(t7xx_dev->pdev);
176         if (ret) {
177                 dev_err(dev, "Failed to reset device, error:%d\n", ret);
178                 return ret;
179         }
180 #endif
181         return 0;
182 }
183
184 static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
185 {
186         u32 value;
187
188         value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
189         value &= ~HOST_EVENT_MASK;
190         value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
191         iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
192 }
193
194 int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
195 {
196         int ret = 0;
197
198         pci_save_state(t7xx_dev->pdev);
199         t7xx_pci_reprobe_early(t7xx_dev);
200         t7xx_mode_update(t7xx_dev, T7XX_RESET);
201         WRITE_ONCE(t7xx_dev->debug_ports_show, false);
202
203         if (type == FLDR) {
204                 ret = t7xx_acpi_reset(t7xx_dev, "_RST");
205         } else if (type == PLDR) {
206                 ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
207         } else if (type == FASTBOOT) {
208                 t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
209                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
210                 msleep(FASTBOOT_RESET_DELAY_MS);
211         }
212
213         pci_restore_state(t7xx_dev->pdev);
214         if (ret)
215                 return ret;
216
217         return t7xx_pci_reprobe(t7xx_dev, true);
218 }
219
220 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
221 {
222         u32 val;
223
224         val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
225         if (val & MISC_RESET_TYPE_PLDR)
226                 t7xx_reset_device(t7xx_dev, PLDR);
227         else if (val & MISC_RESET_TYPE_FLDR)
228                 t7xx_reset_device(t7xx_dev, FLDR);
229 }
230
231 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
232 {
233         struct t7xx_pci_dev *t7xx_dev = data;
234
235         msleep(RGU_RESET_DELAY_MS);
236         t7xx_reset_device_via_pmic(t7xx_dev);
237         return IRQ_HANDLED;
238 }
239
240 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
241 {
242         struct t7xx_pci_dev *t7xx_dev = data;
243         struct t7xx_modem *modem;
244
245         t7xx_clear_rgu_irq(t7xx_dev);
246         if (!t7xx_dev->rgu_pci_irq_en)
247                 return IRQ_HANDLED;
248
249         modem = t7xx_dev->md;
250         modem->rgu_irq_asserted = true;
251         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
252         return IRQ_WAKE_THREAD;
253 }
254
255 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
256 {
257         /* Registers RGU callback ISR with PCIe driver */
258         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
259         t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
260
261         t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
262         t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
263         t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
264         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
265 }
266
267 /**
268  * t7xx_cldma_exception() - CLDMA exception handler.
269  * @md_ctrl: modem control struct.
270  * @stage: exception stage.
271  *
272  * Part of the modem exception recovery.
273  * Stages are one after the other as describe below:
274  * HIF_EX_INIT:         Disable and clear TXQ.
275  * HIF_EX_CLEARQ_DONE:  Disable RX, flush TX/RX workqueues and clear RX.
276  * HIF_EX_ALLQ_RESET:   HW is back in safe mode for re-initialization and restart.
277  */
278
279 /* Modem Exception Handshake Flow
280  *
281  * Modem HW Exception interrupt received
282  *           (MD_IRQ_CCIF_EX)
283  *                   |
284  *         +---------v--------+
285  *         |   HIF_EX_INIT    | : Disable and clear TXQ
286  *         +------------------+
287  *                   |
288  *         +---------v--------+
289  *         | HIF_EX_INIT_DONE | : Wait for the init to be done
290  *         +------------------+
291  *                   |
292  *         +---------v--------+
293  *         |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
294  *         +------------------+ : Flush TX/RX workqueues
295  *                   |
296  *         +---------v--------+
297  *         |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
298  *         +------------------+
299  */
300 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
301 {
302         switch (stage) {
303         case HIF_EX_INIT:
304                 t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
305                 t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
306                 break;
307
308         case HIF_EX_CLEARQ_DONE:
309                 /* We do not want to get CLDMA IRQ when MD is
310                  * resetting CLDMA after it got clearq_ack.
311                  */
312                 t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
313                 t7xx_cldma_stop(md_ctrl);
314
315                 if (md_ctrl->hif_id == CLDMA_ID_MD)
316                         t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
317
318                 t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
319                 break;
320
321         case HIF_EX_ALLQ_RESET:
322                 t7xx_cldma_hw_init(&md_ctrl->hw_info);
323                 t7xx_cldma_start(md_ctrl);
324                 break;
325
326         default:
327                 break;
328         }
329 }
330
331 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
332 {
333         struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
334
335         if (stage == HIF_EX_CLEARQ_DONE) {
336                 /* Give DHL time to flush data */
337                 msleep(PORT_RESET_DELAY_MS);
338                 t7xx_port_proxy_reset(md->port_prox);
339         }
340
341         t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
342         t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
343
344         if (stage == HIF_EX_INIT)
345                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
346         else if (stage == HIF_EX_CLEARQ_DONE)
347                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
348 }
349
350 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
351 {
352         unsigned int waited_time_ms = 0;
353
354         do {
355                 if (md->exp_id & event_id)
356                         return 0;
357
358                 waited_time_ms += EX_HS_POLL_DELAY_MS;
359                 msleep(EX_HS_POLL_DELAY_MS);
360         } while (waited_time_ms < EX_HS_TIMEOUT_MS);
361
362         return -EFAULT;
363 }
364
365 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
366 {
367         /* Register the MHCCIF ISR for MD exception, port enum and
368          * async handshake notifications.
369          */
370         t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
371         t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
372
373         /* Register RGU IRQ handler for sAP exception notification */
374         t7xx_dev->rgu_pci_irq_en = true;
375         t7xx_pcie_register_rgu_isr(t7xx_dev);
376 }
377
378 struct feature_query {
379         __le32 head_pattern;
380         u8 feature_set[FEATURE_COUNT];
381         __le32 tail_pattern;
382 };
383
384 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
385 {
386         struct feature_query *ft_query;
387         struct sk_buff *skb;
388
389         skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
390         if (!skb)
391                 return;
392
393         ft_query = skb_put(skb, sizeof(*ft_query));
394         ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
395         memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
396         ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
397
398         /* Send HS1 message to device */
399         t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
400 }
401
402 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
403                                        void *data)
404 {
405         struct feature_query *md_feature = data;
406         struct mtk_runtime_feature *rt_feature;
407         unsigned int i, rt_data_len = 0;
408         struct sk_buff *skb;
409
410         /* Parse MD runtime data query */
411         if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
412             le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
413                 dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
414                         le32_to_cpu(md_feature->head_pattern),
415                         le32_to_cpu(md_feature->tail_pattern));
416                 return -EINVAL;
417         }
418
419         for (i = 0; i < FEATURE_COUNT; i++) {
420                 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
421                     MTK_FEATURE_MUST_BE_SUPPORTED)
422                         rt_data_len += sizeof(*rt_feature);
423         }
424
425         skb = t7xx_ctrl_alloc_skb(rt_data_len);
426         if (!skb)
427                 return -ENOMEM;
428
429         rt_feature = skb_put(skb, rt_data_len);
430         memset(rt_feature, 0, rt_data_len);
431
432         /* Fill runtime feature */
433         for (i = 0; i < FEATURE_COUNT; i++) {
434                 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
435
436                 if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
437                         continue;
438
439                 rt_feature->feature_id = i;
440                 if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
441                         rt_feature->support_info = md_feature->feature_set[i];
442
443                 rt_feature++;
444         }
445
446         /* Send HS3 message to device */
447         t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
448         return 0;
449 }
450
451 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
452                                    struct device *dev, void *data, int data_length)
453 {
454         enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
455         struct mtk_runtime_feature *rt_feature;
456         int i, offset;
457
458         offset = sizeof(struct feature_query);
459         for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
460                 rt_feature = data + offset;
461                 offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
462
463                 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
464                 if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
465                         continue;
466
467                 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
468                 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
469                         return -EINVAL;
470
471                 if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
472                         t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
473         }
474
475         return 0;
476 }
477
478 static int t7xx_core_reset(struct t7xx_modem *md)
479 {
480         struct device *dev = &md->t7xx_dev->pdev->dev;
481         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
482
483         md->core_md.ready = false;
484
485         if (!ctl) {
486                 dev_err(dev, "FSM is not initialized\n");
487                 return -EINVAL;
488         }
489
490         if (md->core_md.handshake_ongoing) {
491                 int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
492
493                 if (ret)
494                         return ret;
495         }
496
497         md->core_md.handshake_ongoing = false;
498         return 0;
499 }
500
501 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
502                                  struct t7xx_fsm_ctl *ctl,
503                                  enum t7xx_fsm_event_state event_id,
504                                  enum t7xx_fsm_event_state err_detect)
505 {
506         struct t7xx_fsm_event *event = NULL, *event_next;
507         struct device *dev = &md->t7xx_dev->pdev->dev;
508         unsigned long flags;
509         int ret;
510
511         t7xx_prepare_host_rt_data_query(core_info);
512
513         while (!kthread_should_stop()) {
514                 bool event_received = false;
515
516                 spin_lock_irqsave(&ctl->event_lock, flags);
517                 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
518                         if (event->event_id == err_detect) {
519                                 list_del(&event->entry);
520                                 spin_unlock_irqrestore(&ctl->event_lock, flags);
521                                 dev_err(dev, "Core handshake error event received\n");
522                                 goto err_free_event;
523                         } else if (event->event_id == event_id) {
524                                 list_del(&event->entry);
525                                 event_received = true;
526                                 break;
527                         }
528                 }
529                 spin_unlock_irqrestore(&ctl->event_lock, flags);
530
531                 if (event_received)
532                         break;
533
534                 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
535                                          kthread_should_stop());
536                 if (kthread_should_stop())
537                         goto err_free_event;
538         }
539
540         if (!event || ctl->exp_flg)
541                 goto err_free_event;
542
543         ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
544         if (ret) {
545                 dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
546                 goto err_free_event;
547         }
548
549         if (ctl->exp_flg)
550                 goto err_free_event;
551
552         ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
553         if (ret) {
554                 dev_err(dev, "Device failure parsing runtime data: %d", ret);
555                 goto err_free_event;
556         }
557
558         core_info->ready = true;
559         core_info->handshake_ongoing = false;
560         wake_up(&ctl->async_hk_wq);
561 err_free_event:
562         kfree(event);
563 }
564
565 static void t7xx_md_hk_wq(struct work_struct *work)
566 {
567         struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
568         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
569
570         /* Clear the HS2 EXIT event appended in core_reset() */
571         t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
572         t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
573         t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
574         t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
575         md->core_md.handshake_ongoing = true;
576         t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
577 }
578
579 static void t7xx_ap_hk_wq(struct work_struct *work)
580 {
581         struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
582         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
583
584          /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
585         t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
586         t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
587         t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
588         t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
589         md->core_ap.handshake_ongoing = true;
590         t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
591 }
592
593 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
594 {
595         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
596         unsigned int int_sta;
597         unsigned long flags;
598
599         switch (evt_id) {
600         case FSM_PRE_START:
601                 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
602                                                    D2H_INT_ASYNC_AP_HK);
603                 break;
604
605         case FSM_START:
606                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
607
608                 spin_lock_irqsave(&md->exp_lock, flags);
609                 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
610                 md->exp_id |= int_sta;
611                 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
612                         ctl->exp_flg = true;
613                         md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
614                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
615                         md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
616                 } else if (ctl->exp_flg) {
617                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
618                         md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
619                 } else {
620                         void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
621
622                         if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
623                                 queue_work(md->handshake_wq, &md->handshake_work);
624                                 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
625                                 iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
626                                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
627                         }
628
629                         if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
630                                 queue_work(md->handshake_wq, &md->ap_handshake_work);
631                                 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
632                                 iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
633                                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
634                         }
635                 }
636                 spin_unlock_irqrestore(&md->exp_lock, flags);
637
638                 t7xx_mhccif_mask_clr(md->t7xx_dev,
639                                      D2H_INT_EXCEPTION_INIT |
640                                      D2H_INT_EXCEPTION_INIT_DONE |
641                                      D2H_INT_EXCEPTION_CLEARQ_DONE |
642                                      D2H_INT_EXCEPTION_ALLQ_RESET);
643                 break;
644
645         case FSM_READY:
646                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
647                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
648                 break;
649
650         default:
651                 break;
652         }
653 }
654
655 void t7xx_md_exception_handshake(struct t7xx_modem *md)
656 {
657         struct device *dev = &md->t7xx_dev->pdev->dev;
658         int ret;
659
660         t7xx_md_exception(md, HIF_EX_INIT);
661         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
662         if (ret)
663                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
664
665         t7xx_md_exception(md, HIF_EX_INIT_DONE);
666         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
667         if (ret)
668                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
669
670         t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
671         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
672         if (ret)
673                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
674
675         t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
676 }
677
678 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
679 {
680         struct device *dev = &t7xx_dev->pdev->dev;
681         struct t7xx_modem *md;
682
683         md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
684         if (!md)
685                 return NULL;
686
687         md->t7xx_dev = t7xx_dev;
688         t7xx_dev->md = md;
689         spin_lock_init(&md->exp_lock);
690         md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
691                                            0, "md_hk_wq");
692         if (!md->handshake_wq)
693                 return NULL;
694
695         INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
696         md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
697         md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
698                 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
699
700         INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
701         md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
702         md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
703                 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
704
705         return md;
706 }
707
708 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
709 {
710         struct t7xx_modem *md = t7xx_dev->md;
711
712         md->md_init_finish = false;
713         md->exp_id = 0;
714         t7xx_fsm_reset(md);
715         t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
716         t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
717         t7xx_port_proxy_reset(md->port_prox);
718         md->md_init_finish = true;
719         return t7xx_core_reset(md);
720 }
721
722 /**
723  * t7xx_md_init() - Initialize modem.
724  * @t7xx_dev: MTK device.
725  *
726  * Allocate and initialize MD control block, and initialize data path.
727  * Register MHCCIF ISR and RGU ISR, and start the state machine.
728  *
729  * Return:
730  ** 0           - Success.
731  ** -ENOMEM     - Allocation failure.
732  */
733 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
734 {
735         struct t7xx_modem *md;
736         int ret;
737
738         md = t7xx_md_alloc(t7xx_dev);
739         if (!md)
740                 return -ENOMEM;
741
742         ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
743         if (ret)
744                 goto err_destroy_hswq;
745
746         ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
747         if (ret)
748                 goto err_destroy_hswq;
749
750         ret = t7xx_fsm_init(md);
751         if (ret)
752                 goto err_destroy_hswq;
753
754         ret = t7xx_ccmni_init(t7xx_dev);
755         if (ret)
756                 goto err_uninit_fsm;
757
758         ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
759         if (ret)
760                 goto err_uninit_ccmni;
761
762         ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
763         if (ret)
764                 goto err_uninit_md_cldma;
765
766         ret = t7xx_port_proxy_init(md);
767         if (ret)
768                 goto err_uninit_ap_cldma;
769
770         ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
771         if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
772                 goto err_uninit_proxy;
773
774         t7xx_md_sys_sw_init(t7xx_dev);
775         md->md_init_finish = true;
776         return 0;
777
778 err_uninit_proxy:
779         t7xx_port_proxy_uninit(md->port_prox);
780
781 err_uninit_ap_cldma:
782         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
783
784 err_uninit_md_cldma:
785         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
786
787 err_uninit_ccmni:
788         t7xx_ccmni_exit(t7xx_dev);
789
790 err_uninit_fsm:
791         t7xx_fsm_uninit(md);
792
793 err_destroy_hswq:
794         destroy_workqueue(md->handshake_wq);
795         dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
796         return ret;
797 }
798
799 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
800 {
801         enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
802         struct t7xx_modem *md = t7xx_dev->md;
803
804         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
805
806         if (!md->md_init_finish)
807                 return;
808
809         if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
810                 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
811         t7xx_port_proxy_uninit(md->port_prox);
812         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
813         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
814         t7xx_ccmni_exit(t7xx_dev);
815         t7xx_fsm_uninit(md);
816         destroy_workqueue(md->handshake_wq);
817 }
This page took 0.072818 seconds and 4 git commands to generate.