]> Git Repo - linux.git/blob - drivers/net/wwan/t7xx/t7xx_modem_ops.c
bpf: Disable bpf_refcount_acquire kfunc calls until race conditions are fixed
[linux.git] / drivers / net / wwan / t7xx / t7xx_modem_ops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <[email protected]>
8  *  Eliot Lee <[email protected]>
9  *  Moises Veleta <[email protected]>
10  *  Ricardo Martinez <[email protected]>
11  *
12  * Contributors:
13  *  Amir Hanania <[email protected]>
14  *  Chiranjeevi Rapolu <[email protected]>
15  *  Sreehari Kancharla <[email protected]>
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/gfp.h>
24 #include <linux/io.h>
25 #include <linux/irqreturn.h>
26 #include <linux/kthread.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/wait.h>
32 #include <linux/workqueue.h>
33
34 #include "t7xx_cldma.h"
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_pcie_mac.h"
41 #include "t7xx_port.h"
42 #include "t7xx_port_proxy.h"
43 #include "t7xx_reg.h"
44 #include "t7xx_state_monitor.h"
45
46 #define RT_ID_MD_PORT_ENUM      0
47 /* Modem feature query identification code - "ICCC" */
48 #define MD_FEATURE_QUERY_ID     0x49434343
49
50 #define FEATURE_VER             GENMASK(7, 4)
51 #define FEATURE_MSK             GENMASK(3, 0)
52
53 #define RGU_RESET_DELAY_MS      10
54 #define PORT_RESET_DELAY_MS     2000
55 #define EX_HS_TIMEOUT_MS        5000
56 #define EX_HS_POLL_DELAY_MS     10
57
58 enum mtk_feature_support_type {
59         MTK_FEATURE_DOES_NOT_EXIST,
60         MTK_FEATURE_NOT_SUPPORTED,
61         MTK_FEATURE_MUST_BE_SUPPORTED,
62 };
63
64 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
65 {
66         return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
67 }
68
69 /**
70  * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
71  * @t7xx_dev: MTK device.
72  *
73  * Check the interrupt status and queue commands accordingly.
74  *
75  * Returns:
76  ** 0           - Success.
77  ** -EINVAL     - Failure to get FSM control.
78  */
79 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
80 {
81         struct t7xx_modem *md = t7xx_dev->md;
82         struct t7xx_fsm_ctl *ctl;
83         unsigned int int_sta;
84         int ret = 0;
85         u32 mask;
86
87         ctl = md->fsm_ctl;
88         if (!ctl) {
89                 dev_err_ratelimited(&t7xx_dev->pdev->dev,
90                                     "MHCCIF interrupt received before initializing MD monitor\n");
91                 return -EINVAL;
92         }
93
94         spin_lock_bh(&md->exp_lock);
95         int_sta = t7xx_get_interrupt_status(t7xx_dev);
96         md->exp_id |= int_sta;
97         if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
98                 if (ctl->md_state == MD_STATE_INVALID ||
99                     ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
100                     ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
101                     ctl->md_state == MD_STATE_READY) {
102                         md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
103                         ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
104                 }
105         } else if (md->exp_id & D2H_INT_PORT_ENUM) {
106                 md->exp_id &= ~D2H_INT_PORT_ENUM;
107
108                 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
109                     ctl->curr_state == FSM_STATE_STOPPED)
110                         ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
111         } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
112                 mask = t7xx_mhccif_mask_get(t7xx_dev);
113                 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
114                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
115                         queue_work(md->handshake_wq, &md->handshake_work);
116                 }
117         }
118         spin_unlock_bh(&md->exp_lock);
119
120         return ret;
121 }
122
123 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
124 {
125         struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
126         void __iomem *reset_pcie_reg;
127         u32 val;
128
129         reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
130                           pbase_addr->pcie_dev_reg_trsl_addr;
131         val = ioread32(reset_pcie_reg);
132         iowrite32(val, reset_pcie_reg);
133 }
134
135 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
136 {
137         /* Clear L2 */
138         t7xx_clr_device_irq_via_pcie(t7xx_dev);
139         /* Clear L1 */
140         t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
141 }
142
143 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
144 {
145 #ifdef CONFIG_ACPI
146         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
147         struct device *dev = &t7xx_dev->pdev->dev;
148         acpi_status acpi_ret;
149         acpi_handle handle;
150
151         handle = ACPI_HANDLE(dev);
152         if (!handle) {
153                 dev_err(dev, "ACPI handle not found\n");
154                 return -EFAULT;
155         }
156
157         if (!acpi_has_method(handle, fn_name)) {
158                 dev_err(dev, "%s method not found\n", fn_name);
159                 return -EFAULT;
160         }
161
162         acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
163         if (ACPI_FAILURE(acpi_ret)) {
164                 dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
165                 return -EFAULT;
166         }
167
168         kfree(buffer.pointer);
169
170 #endif
171         return 0;
172 }
173
174 int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
175 {
176         return t7xx_acpi_reset(t7xx_dev, "_RST");
177 }
178
179 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
180 {
181         u32 val;
182
183         val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
184         if (val & MISC_RESET_TYPE_PLDR)
185                 t7xx_acpi_reset(t7xx_dev, "MRST._RST");
186         else if (val & MISC_RESET_TYPE_FLDR)
187                 t7xx_acpi_fldr_func(t7xx_dev);
188 }
189
190 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
191 {
192         struct t7xx_pci_dev *t7xx_dev = data;
193
194         msleep(RGU_RESET_DELAY_MS);
195         t7xx_reset_device_via_pmic(t7xx_dev);
196         return IRQ_HANDLED;
197 }
198
199 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
200 {
201         struct t7xx_pci_dev *t7xx_dev = data;
202         struct t7xx_modem *modem;
203
204         t7xx_clear_rgu_irq(t7xx_dev);
205         if (!t7xx_dev->rgu_pci_irq_en)
206                 return IRQ_HANDLED;
207
208         modem = t7xx_dev->md;
209         modem->rgu_irq_asserted = true;
210         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
211         return IRQ_WAKE_THREAD;
212 }
213
214 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
215 {
216         /* Registers RGU callback ISR with PCIe driver */
217         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
218         t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
219
220         t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
221         t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
222         t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
223         t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
224 }
225
226 /**
227  * t7xx_cldma_exception() - CLDMA exception handler.
228  * @md_ctrl: modem control struct.
229  * @stage: exception stage.
230  *
231  * Part of the modem exception recovery.
232  * Stages are one after the other as describe below:
233  * HIF_EX_INIT:         Disable and clear TXQ.
234  * HIF_EX_CLEARQ_DONE:  Disable RX, flush TX/RX workqueues and clear RX.
235  * HIF_EX_ALLQ_RESET:   HW is back in safe mode for re-initialization and restart.
236  */
237
238 /* Modem Exception Handshake Flow
239  *
240  * Modem HW Exception interrupt received
241  *           (MD_IRQ_CCIF_EX)
242  *                   |
243  *         +---------v--------+
244  *         |   HIF_EX_INIT    | : Disable and clear TXQ
245  *         +------------------+
246  *                   |
247  *         +---------v--------+
248  *         | HIF_EX_INIT_DONE | : Wait for the init to be done
249  *         +------------------+
250  *                   |
251  *         +---------v--------+
252  *         |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
253  *         +------------------+ : Flush TX/RX workqueues
254  *                   |
255  *         +---------v--------+
256  *         |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
257  *         +------------------+
258  */
259 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
260 {
261         switch (stage) {
262         case HIF_EX_INIT:
263                 t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
264                 t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
265                 break;
266
267         case HIF_EX_CLEARQ_DONE:
268                 /* We do not want to get CLDMA IRQ when MD is
269                  * resetting CLDMA after it got clearq_ack.
270                  */
271                 t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
272                 t7xx_cldma_stop(md_ctrl);
273
274                 if (md_ctrl->hif_id == CLDMA_ID_MD)
275                         t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
276
277                 t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
278                 break;
279
280         case HIF_EX_ALLQ_RESET:
281                 t7xx_cldma_hw_init(&md_ctrl->hw_info);
282                 t7xx_cldma_start(md_ctrl);
283                 break;
284
285         default:
286                 break;
287         }
288 }
289
290 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
291 {
292         struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
293
294         if (stage == HIF_EX_CLEARQ_DONE) {
295                 /* Give DHL time to flush data */
296                 msleep(PORT_RESET_DELAY_MS);
297                 t7xx_port_proxy_reset(md->port_prox);
298         }
299
300         t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
301
302         if (stage == HIF_EX_INIT)
303                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
304         else if (stage == HIF_EX_CLEARQ_DONE)
305                 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
306 }
307
308 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
309 {
310         unsigned int waited_time_ms = 0;
311
312         do {
313                 if (md->exp_id & event_id)
314                         return 0;
315
316                 waited_time_ms += EX_HS_POLL_DELAY_MS;
317                 msleep(EX_HS_POLL_DELAY_MS);
318         } while (waited_time_ms < EX_HS_TIMEOUT_MS);
319
320         return -EFAULT;
321 }
322
323 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
324 {
325         /* Register the MHCCIF ISR for MD exception, port enum and
326          * async handshake notifications.
327          */
328         t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
329         t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
330
331         /* Register RGU IRQ handler for sAP exception notification */
332         t7xx_dev->rgu_pci_irq_en = true;
333         t7xx_pcie_register_rgu_isr(t7xx_dev);
334 }
335
336 struct feature_query {
337         __le32 head_pattern;
338         u8 feature_set[FEATURE_COUNT];
339         __le32 tail_pattern;
340 };
341
342 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
343 {
344         struct feature_query *ft_query;
345         struct sk_buff *skb;
346
347         skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
348         if (!skb)
349                 return;
350
351         ft_query = skb_put(skb, sizeof(*ft_query));
352         ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
353         memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
354         ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
355
356         /* Send HS1 message to device */
357         t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
358 }
359
360 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
361                                        void *data)
362 {
363         struct feature_query *md_feature = data;
364         struct mtk_runtime_feature *rt_feature;
365         unsigned int i, rt_data_len = 0;
366         struct sk_buff *skb;
367
368         /* Parse MD runtime data query */
369         if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
370             le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
371                 dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
372                         le32_to_cpu(md_feature->head_pattern),
373                         le32_to_cpu(md_feature->tail_pattern));
374                 return -EINVAL;
375         }
376
377         for (i = 0; i < FEATURE_COUNT; i++) {
378                 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
379                     MTK_FEATURE_MUST_BE_SUPPORTED)
380                         rt_data_len += sizeof(*rt_feature);
381         }
382
383         skb = t7xx_ctrl_alloc_skb(rt_data_len);
384         if (!skb)
385                 return -ENOMEM;
386
387         rt_feature = skb_put(skb, rt_data_len);
388         memset(rt_feature, 0, rt_data_len);
389
390         /* Fill runtime feature */
391         for (i = 0; i < FEATURE_COUNT; i++) {
392                 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
393
394                 if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
395                         continue;
396
397                 rt_feature->feature_id = i;
398                 if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
399                         rt_feature->support_info = md_feature->feature_set[i];
400
401                 rt_feature++;
402         }
403
404         /* Send HS3 message to device */
405         t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
406         return 0;
407 }
408
409 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
410                                    struct device *dev, void *data, int data_length)
411 {
412         enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
413         struct mtk_runtime_feature *rt_feature;
414         int i, offset;
415
416         offset = sizeof(struct feature_query);
417         for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
418                 rt_feature = data + offset;
419                 offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
420
421                 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
422                 if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
423                         continue;
424
425                 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
426                 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
427                         return -EINVAL;
428
429                 if (i == RT_ID_MD_PORT_ENUM)
430                         t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
431         }
432
433         return 0;
434 }
435
436 static int t7xx_core_reset(struct t7xx_modem *md)
437 {
438         struct device *dev = &md->t7xx_dev->pdev->dev;
439         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
440
441         md->core_md.ready = false;
442
443         if (!ctl) {
444                 dev_err(dev, "FSM is not initialized\n");
445                 return -EINVAL;
446         }
447
448         if (md->core_md.handshake_ongoing) {
449                 int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
450
451                 if (ret)
452                         return ret;
453         }
454
455         md->core_md.handshake_ongoing = false;
456         return 0;
457 }
458
459 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
460                                  enum t7xx_fsm_event_state event_id,
461                                  enum t7xx_fsm_event_state err_detect)
462 {
463         struct t7xx_fsm_event *event = NULL, *event_next;
464         struct t7xx_sys_info *core_info = &md->core_md;
465         struct device *dev = &md->t7xx_dev->pdev->dev;
466         unsigned long flags;
467         int ret;
468
469         t7xx_prepare_host_rt_data_query(core_info);
470
471         while (!kthread_should_stop()) {
472                 bool event_received = false;
473
474                 spin_lock_irqsave(&ctl->event_lock, flags);
475                 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
476                         if (event->event_id == err_detect) {
477                                 list_del(&event->entry);
478                                 spin_unlock_irqrestore(&ctl->event_lock, flags);
479                                 dev_err(dev, "Core handshake error event received\n");
480                                 goto err_free_event;
481                         } else if (event->event_id == event_id) {
482                                 list_del(&event->entry);
483                                 event_received = true;
484                                 break;
485                         }
486                 }
487                 spin_unlock_irqrestore(&ctl->event_lock, flags);
488
489                 if (event_received)
490                         break;
491
492                 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
493                                          kthread_should_stop());
494                 if (kthread_should_stop())
495                         goto err_free_event;
496         }
497
498         if (!event || ctl->exp_flg)
499                 goto err_free_event;
500
501         ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
502         if (ret) {
503                 dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
504                 goto err_free_event;
505         }
506
507         if (ctl->exp_flg)
508                 goto err_free_event;
509
510         ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
511         if (ret) {
512                 dev_err(dev, "Device failure parsing runtime data: %d", ret);
513                 goto err_free_event;
514         }
515
516         core_info->ready = true;
517         core_info->handshake_ongoing = false;
518         wake_up(&ctl->async_hk_wq);
519 err_free_event:
520         kfree(event);
521 }
522
523 static void t7xx_md_hk_wq(struct work_struct *work)
524 {
525         struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
526         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
527
528         /* Clear the HS2 EXIT event appended in core_reset() */
529         t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
530         t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
531         t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
532         t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
533         md->core_md.handshake_ongoing = true;
534         t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
535 }
536
537 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
538 {
539         struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
540         void __iomem *mhccif_base;
541         unsigned int int_sta;
542         unsigned long flags;
543
544         switch (evt_id) {
545         case FSM_PRE_START:
546                 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
547                 break;
548
549         case FSM_START:
550                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
551
552                 spin_lock_irqsave(&md->exp_lock, flags);
553                 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
554                 md->exp_id |= int_sta;
555                 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
556                         ctl->exp_flg = true;
557                         md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
558                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
559                 } else if (ctl->exp_flg) {
560                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
561                 } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
562                         queue_work(md->handshake_wq, &md->handshake_work);
563                         md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
564                         mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
565                         iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
566                         t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
567                 } else {
568                         t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
569                 }
570                 spin_unlock_irqrestore(&md->exp_lock, flags);
571
572                 t7xx_mhccif_mask_clr(md->t7xx_dev,
573                                      D2H_INT_EXCEPTION_INIT |
574                                      D2H_INT_EXCEPTION_INIT_DONE |
575                                      D2H_INT_EXCEPTION_CLEARQ_DONE |
576                                      D2H_INT_EXCEPTION_ALLQ_RESET);
577                 break;
578
579         case FSM_READY:
580                 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
581                 break;
582
583         default:
584                 break;
585         }
586 }
587
588 void t7xx_md_exception_handshake(struct t7xx_modem *md)
589 {
590         struct device *dev = &md->t7xx_dev->pdev->dev;
591         int ret;
592
593         t7xx_md_exception(md, HIF_EX_INIT);
594         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
595         if (ret)
596                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
597
598         t7xx_md_exception(md, HIF_EX_INIT_DONE);
599         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
600         if (ret)
601                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
602
603         t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
604         ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
605         if (ret)
606                 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
607
608         t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
609 }
610
611 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
612 {
613         struct device *dev = &t7xx_dev->pdev->dev;
614         struct t7xx_modem *md;
615
616         md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
617         if (!md)
618                 return NULL;
619
620         md->t7xx_dev = t7xx_dev;
621         t7xx_dev->md = md;
622         spin_lock_init(&md->exp_lock);
623         md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
624                                            0, "md_hk_wq");
625         if (!md->handshake_wq)
626                 return NULL;
627
628         INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
629         md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
630         md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
631                 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
632         return md;
633 }
634
635 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
636 {
637         struct t7xx_modem *md = t7xx_dev->md;
638
639         md->md_init_finish = false;
640         md->exp_id = 0;
641         t7xx_fsm_reset(md);
642         t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
643         t7xx_port_proxy_reset(md->port_prox);
644         md->md_init_finish = true;
645         return t7xx_core_reset(md);
646 }
647
648 /**
649  * t7xx_md_init() - Initialize modem.
650  * @t7xx_dev: MTK device.
651  *
652  * Allocate and initialize MD control block, and initialize data path.
653  * Register MHCCIF ISR and RGU ISR, and start the state machine.
654  *
655  * Return:
656  ** 0           - Success.
657  ** -ENOMEM     - Allocation failure.
658  */
659 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
660 {
661         struct t7xx_modem *md;
662         int ret;
663
664         md = t7xx_md_alloc(t7xx_dev);
665         if (!md)
666                 return -ENOMEM;
667
668         ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
669         if (ret)
670                 goto err_destroy_hswq;
671
672         ret = t7xx_fsm_init(md);
673         if (ret)
674                 goto err_destroy_hswq;
675
676         ret = t7xx_ccmni_init(t7xx_dev);
677         if (ret)
678                 goto err_uninit_fsm;
679
680         ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
681         if (ret)
682                 goto err_uninit_ccmni;
683
684         ret = t7xx_port_proxy_init(md);
685         if (ret)
686                 goto err_uninit_md_cldma;
687
688         ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
689         if (ret) /* fsm_uninit flushes cmd queue */
690                 goto err_uninit_proxy;
691
692         t7xx_md_sys_sw_init(t7xx_dev);
693         md->md_init_finish = true;
694         return 0;
695
696 err_uninit_proxy:
697         t7xx_port_proxy_uninit(md->port_prox);
698
699 err_uninit_md_cldma:
700         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
701
702 err_uninit_ccmni:
703         t7xx_ccmni_exit(t7xx_dev);
704
705 err_uninit_fsm:
706         t7xx_fsm_uninit(md);
707
708 err_destroy_hswq:
709         destroy_workqueue(md->handshake_wq);
710         dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
711         return ret;
712 }
713
714 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
715 {
716         struct t7xx_modem *md = t7xx_dev->md;
717
718         t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
719
720         if (!md->md_init_finish)
721                 return;
722
723         t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
724         t7xx_port_proxy_uninit(md->port_prox);
725         t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
726         t7xx_ccmni_exit(t7xx_dev);
727         t7xx_fsm_uninit(md);
728         destroy_workqueue(md->handshake_wq);
729 }
This page took 0.075135 seconds and 4 git commands to generate.