]> Git Repo - linux.git/blob - drivers/net/wwan/iosm/iosm_ipc_imem.c
Merge tag 'for-linus-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[linux.git] / drivers / net / wwan / iosm / iosm_ipc_imem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5
6 #include <linux/delay.h>
7
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_imem.h"
10 #include "iosm_ipc_port.h"
11
12 /* Check the wwan ips if it is valid with Channel as input. */
13 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
14 {
15         if (chnl)
16                 return chnl->ctype == IPC_CTYPE_WWAN &&
17                        chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
18         return false;
19 }
20
21 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
22 {
23         union ipc_msg_prep_args prep_args = {
24                 .sleep.target = 1,
25                 .sleep.state = state,
26         };
27
28         ipc_imem->device_sleep = state;
29
30         return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
31                                         IPC_MSG_PREP_SLEEP, &prep_args, NULL);
32 }
33
34 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
35                                   struct ipc_pipe *pipe)
36 {
37         /* limit max. nr of entries */
38         if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
39                 return false;
40
41         return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
42 }
43
44 /* This timer handler will retry DL buff allocation if a pipe has no free buf
45  * and gives doorbell if TD is available
46  */
47 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
48                                       void *msg, size_t size)
49 {
50         bool new_buffers_available = false;
51         bool retry_allocation = false;
52         int i;
53
54         for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
55                 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
56
57                 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
58                         continue;
59
60                 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
61                         new_buffers_available = true;
62
63                 if (pipe->nr_of_queued_entries == 0)
64                         retry_allocation = true;
65         }
66
67         if (new_buffers_available)
68                 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
69                                               IPC_HP_DL_PROCESS);
70
71         if (retry_allocation) {
72                 ipc_imem->hrtimer_period =
73                 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
74                 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
75                         hrtimer_start(&ipc_imem->td_alloc_timer,
76                                       ipc_imem->hrtimer_period,
77                                       HRTIMER_MODE_REL);
78         }
79         return 0;
80 }
81
82 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
83 {
84         struct iosm_imem *ipc_imem =
85                 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
86         /* Post an async tasklet event to trigger HP update Doorbell */
87         ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
88                                  0, false);
89         return HRTIMER_NORESTART;
90 }
91
92 /* Fast update timer tasklet handler to trigger HP update */
93 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
94                                             void *msg, size_t size)
95 {
96         ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
97                                       IPC_HP_FAST_TD_UPD_TMR);
98
99         return 0;
100 }
101
102 static enum hrtimer_restart
103 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
104 {
105         struct iosm_imem *ipc_imem =
106                 container_of(hr_timer, struct iosm_imem, fast_update_timer);
107         /* Post an async tasklet event to trigger HP update Doorbell */
108         ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
109                                  NULL, 0, false);
110         return HRTIMER_NORESTART;
111 }
112
113 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
114                                           struct ipc_mux_config *cfg)
115 {
116         ipc_mmio_update_cp_capability(ipc_imem->mmio);
117
118         if (!ipc_imem->mmio->has_mux_lite) {
119                 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
120                 return -EINVAL;
121         }
122
123         cfg->protocol = MUX_LITE;
124
125         cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
126                                MUX_UL_ON_CREDITS :
127                                MUX_UL;
128
129         /* The instance ID is same as channel ID because this is been reused
130          * for channel alloc function.
131          */
132         cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
133         cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
134
135         return 0;
136 }
137
138 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
139                                    unsigned int reset_enable, bool atomic_ctx)
140 {
141         union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
142                                                       reset_enable };
143
144         if (atomic_ctx)
145                 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
146                                          IPC_MSG_PREP_FEATURE_SET, &prep_args,
147                                          NULL);
148         else
149                 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
150                                       IPC_MSG_PREP_FEATURE_SET, &prep_args);
151 }
152
153 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
154 {
155         /* Use the TD update timer only in the runtime phase */
156         if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
157                 /* trigger the doorbell irq on CP directly. */
158                 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
159                                               IPC_HP_TD_UPD_TMR_START);
160                 return;
161         }
162
163         if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
164                 ipc_imem->hrtimer_period =
165                 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
166                 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
167                         hrtimer_start(&ipc_imem->tdupdate_timer,
168                                       ipc_imem->hrtimer_period,
169                                       HRTIMER_MODE_REL);
170         }
171 }
172
173 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
174 {
175         if (hrtimer_active(hr_timer))
176                 hrtimer_cancel(hr_timer);
177 }
178
179 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
180 {
181         struct ipc_mem_channel *channel;
182         struct sk_buff_head *ul_list;
183         bool hpda_pending = false;
184         bool forced_hpdu = false;
185         struct ipc_pipe *pipe;
186         int i;
187
188         /* Analyze the uplink pipe of all active channels. */
189         for (i = 0; i < ipc_imem->nr_of_channels; i++) {
190                 channel = &ipc_imem->channels[i];
191
192                 if (channel->state != IMEM_CHANNEL_ACTIVE)
193                         continue;
194
195                 pipe = &channel->ul_pipe;
196
197                 /* Get the reference to the skbuf accumulator list. */
198                 ul_list = &channel->ul_list;
199
200                 /* Fill the transfer descriptor with the uplink buffer info. */
201                 hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
202                                                         pipe, ul_list);
203
204                 /* forced HP update needed for non data channels */
205                 if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
206                         forced_hpdu = true;
207         }
208
209         if (forced_hpdu) {
210                 hpda_pending = false;
211                 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
212                                               IPC_HP_UL_WRITE_TD);
213         }
214
215         return hpda_pending;
216 }
217
218 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
219 {
220         int timeout = IPC_MODEM_BOOT_TIMEOUT;
221
222         ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
223
224         /* Trigger the CP interrupt to enter the init state. */
225         ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
226                           IPC_MEM_DEVICE_IPC_INIT);
227         /* Wait for the CP update. */
228         do {
229                 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
230                     ipc_imem->ipc_requested_state) {
231                         /* Prepare the MMIO space */
232                         ipc_mmio_config(ipc_imem->mmio);
233
234                         /* Trigger the CP irq to enter the running state. */
235                         ipc_imem->ipc_requested_state =
236                                 IPC_MEM_DEVICE_IPC_RUNNING;
237                         ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
238                                           IPC_MEM_DEVICE_IPC_RUNNING);
239
240                         return;
241                 }
242                 msleep(20);
243         } while (--timeout);
244
245         /* timeout */
246         dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
247                 ipc_imem_phase_get_string(ipc_imem->phase),
248                 ipc_mmio_get_ipc_state(ipc_imem->mmio));
249
250         ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
251 }
252
253 /* Analyze the packet type and distribute it. */
254 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
255                                     struct ipc_pipe *pipe, struct sk_buff *skb)
256 {
257         u16 port_id;
258
259         if (!skb)
260                 return;
261
262         /* An AT/control or IP packet is expected. */
263         switch (pipe->channel->ctype) {
264         case IPC_CTYPE_CTRL:
265                 port_id = pipe->channel->channel_id;
266
267                 /* Pass the packet to the wwan layer. */
268                 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
269                 break;
270
271         case IPC_CTYPE_WWAN:
272                 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
273                         ipc_mux_dl_decode(ipc_imem->mux, skb);
274                 break;
275         default:
276                 dev_err(ipc_imem->dev, "Invalid channel type");
277                 break;
278         }
279 }
280
281 /* Process the downlink data and pass them to the char or net layer. */
282 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
283                                      struct ipc_pipe *pipe)
284 {
285         s32 cnt = 0, processed_td_cnt = 0;
286         struct ipc_mem_channel *channel;
287         u32 head = 0, tail = 0;
288         bool processed = false;
289         struct sk_buff *skb;
290
291         channel = pipe->channel;
292
293         ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
294                                          &tail);
295         if (pipe->old_tail != tail) {
296                 if (pipe->old_tail < tail)
297                         cnt = tail - pipe->old_tail;
298                 else
299                         cnt = pipe->nr_of_entries - pipe->old_tail + tail;
300         }
301
302         processed_td_cnt = cnt;
303
304         /* Seek for pipes with pending DL data. */
305         while (cnt--) {
306                 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
307
308                 /* Analyze the packet type and distribute it. */
309                 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
310         }
311
312         /* try to allocate new empty DL SKbs from head..tail - 1*/
313         while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
314                 processed = true;
315
316         if (processed && !ipc_imem_check_wwan_ips(channel)) {
317                 /* Force HP update for non IP channels */
318                 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
319                                               IPC_HP_DL_PROCESS);
320                 processed = false;
321
322                 /* If Fast Update timer is already running then stop */
323                 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
324         }
325
326         /* Any control channel process will get immediate HP update.
327          * Start Fast update timer only for IP channel if all the TDs were
328          * used in last process.
329          */
330         if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
331                 ipc_imem->hrtimer_period =
332                 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
333                 hrtimer_start(&ipc_imem->fast_update_timer,
334                               ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
335         }
336
337         if (ipc_imem->app_notify_dl_pend)
338                 complete(&ipc_imem->dl_pend_sem);
339 }
340
341 /* process open uplink pipe */
342 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
343                                      struct ipc_pipe *pipe)
344 {
345         struct ipc_mem_channel *channel;
346         u32 tail = 0, head = 0;
347         struct sk_buff *skb;
348         s32 cnt = 0;
349
350         channel = pipe->channel;
351
352         /* Get the internal phase. */
353         ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
354                                          &tail);
355
356         if (pipe->old_tail != tail) {
357                 if (pipe->old_tail < tail)
358                         cnt = tail - pipe->old_tail;
359                 else
360                         cnt = pipe->nr_of_entries - pipe->old_tail + tail;
361         }
362
363         /* Free UL buffers. */
364         while (cnt--) {
365                 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
366
367                 if (!skb)
368                         continue;
369
370                 /* If the user app was suspended in uplink direction - blocking
371                  * write, resume it.
372                  */
373                 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
374                         complete(&channel->ul_sem);
375
376                 /* Free the skbuf element. */
377                 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
378                         if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
379                                 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
380                         else
381                                 dev_err(ipc_imem->dev,
382                                         "OP Type is UL_MUX, unknown if_id %d",
383                                         channel->if_id);
384                 } else {
385                         ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
386                 }
387         }
388
389         /* Trace channel stats for IP UL pipe. */
390         if (ipc_imem_check_wwan_ips(pipe->channel))
391                 ipc_mux_check_n_restart_tx(ipc_imem->mux);
392
393         if (ipc_imem->app_notify_ul_pend)
394                 complete(&ipc_imem->ul_pend_sem);
395 }
396
397 /* Executes the irq. */
398 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
399 {
400         struct ipc_mem_channel *channel;
401
402         if (ipc_imem->flash_channel_id < 0) {
403                 ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
404                 dev_err(ipc_imem->dev, "Missing flash app:%d",
405                         ipc_imem->flash_channel_id);
406                 return;
407         }
408
409         ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
410
411         /* Wake up the flash app to continue or to terminate depending
412          * on the CP ROM exit code.
413          */
414         channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
415         complete(&channel->ul_sem);
416 }
417
418 /* Execute the UL bundle timer actions, generating the doorbell irq. */
419 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
420                                           void *msg, size_t size)
421 {
422         ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
423                                       IPC_HP_TD_UPD_TMR);
424         return 0;
425 }
426
427 /* Consider link power management in the runtime phase. */
428 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
429 {
430             /* link will go down, Test pending UL packets.*/
431         if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
432             hrtimer_active(&ipc_imem->tdupdate_timer)) {
433                 /* Generate the doorbell irq. */
434                 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
435                 /* Stop the TD update timer. */
436                 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
437                 /* Stop the fast update timer. */
438                 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
439         }
440 }
441
442 /* Execute startup timer and wait for delayed start (e.g. NAND) */
443 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
444                                         void *msg, size_t size)
445 {
446         /* Update & check the current operation phase. */
447         if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
448                 return -EIO;
449
450         if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
451             IPC_MEM_DEVICE_IPC_UNINIT) {
452                 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
453
454                 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
455                                   IPC_MEM_DEVICE_IPC_INIT);
456
457                 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
458                 /* reduce period to 100 ms to check for mmio init state */
459                 if (!hrtimer_active(&ipc_imem->startup_timer))
460                         hrtimer_start(&ipc_imem->startup_timer,
461                                       ipc_imem->hrtimer_period,
462                                       HRTIMER_MODE_REL);
463         } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
464                    IPC_MEM_DEVICE_IPC_INIT) {
465                 /* Startup complete  - disable timer */
466                 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
467
468                 /* Prepare the MMIO space */
469                 ipc_mmio_config(ipc_imem->mmio);
470                 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
471                 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
472                                   IPC_MEM_DEVICE_IPC_RUNNING);
473         }
474
475         return 0;
476 }
477
478 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
479 {
480         enum hrtimer_restart result = HRTIMER_NORESTART;
481         struct iosm_imem *ipc_imem =
482                 container_of(hr_timer, struct iosm_imem, startup_timer);
483
484         if (ktime_to_ns(ipc_imem->hrtimer_period)) {
485                 hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
486                                 ipc_imem->hrtimer_period);
487                 result = HRTIMER_RESTART;
488         }
489
490         ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
491                                  NULL, 0, false);
492         return result;
493 }
494
495 /* Get the CP execution stage */
496 static enum ipc_mem_exec_stage
497 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
498 {
499         return (ipc_imem->phase == IPC_P_RUN &&
500                 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
501                        ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
502                        ipc_mmio_get_exec_stage(ipc_imem->mmio);
503 }
504
505 /* Callback to send the modem ready uevent */
506 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
507                                     void *msg, size_t size)
508 {
509         enum ipc_mem_exec_stage exec_stage =
510                 ipc_imem_get_exec_stage_buffered(ipc_imem);
511
512         if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
513                 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
514
515         return 0;
516 }
517
518 /* This function is executed in a task context via an ipc_worker object,
519  * as the creation or removal of device can't be done from tasklet.
520  */
521 static void ipc_imem_run_state_worker(struct work_struct *instance)
522 {
523         struct ipc_chnl_cfg chnl_cfg_port = { 0 };
524         struct ipc_mux_config mux_cfg;
525         struct iosm_imem *ipc_imem;
526         u8 ctrl_chl_idx = 0;
527
528         ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
529
530         if (ipc_imem->phase != IPC_P_RUN) {
531                 dev_err(ipc_imem->dev,
532                         "Modem link down. Exit run state worker.");
533                 return;
534         }
535
536         if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
537                 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
538
539         ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
540         if (ipc_imem->mux)
541                 ipc_imem->mux->wwan = ipc_imem->wwan;
542
543         while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
544                 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
545                         ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
546                         if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
547                                 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
548                                                       chnl_cfg_port,
549                                                       IRQ_MOD_OFF);
550                                 ipc_imem->ipc_port[ctrl_chl_idx] =
551                                         ipc_port_init(ipc_imem, chnl_cfg_port);
552                         }
553                 }
554                 ctrl_chl_idx++;
555         }
556
557         ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
558                                  false);
559
560         /* Complete all memory stores before setting bit */
561         smp_mb__before_atomic();
562
563         set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
564
565         /* Complete all memory stores after setting bit */
566         smp_mb__after_atomic();
567 }
568
569 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
570 {
571         enum ipc_mem_device_ipc_state curr_ipc_status;
572         enum ipc_phase old_phase, phase;
573         bool retry_allocation = false;
574         bool ul_pending = false;
575         int ch_id, i;
576
577         if (irq != IMEM_IRQ_DONT_CARE)
578                 ipc_imem->ev_irq_pending[irq] = false;
579
580         /* Get the internal phase. */
581         old_phase = ipc_imem->phase;
582
583         if (old_phase == IPC_P_OFF_REQ) {
584                 dev_dbg(ipc_imem->dev,
585                         "[%s]: Ignoring MSI. Deinit sequence in progress!",
586                         ipc_imem_phase_get_string(old_phase));
587                 return;
588         }
589
590         /* Update the phase controlled by CP. */
591         phase = ipc_imem_phase_update(ipc_imem);
592
593         switch (phase) {
594         case IPC_P_RUN:
595                 if (!ipc_imem->enter_runtime) {
596                         /* Excute the transition from flash/boot to runtime. */
597                         ipc_imem->enter_runtime = 1;
598
599                         /* allow device to sleep, default value is
600                          * IPC_HOST_SLEEP_ENTER_SLEEP
601                          */
602                         ipc_imem_msg_send_device_sleep(ipc_imem,
603                                                        ipc_imem->device_sleep);
604
605                         ipc_imem_msg_send_feature_set(ipc_imem,
606                                                       IPC_MEM_INBAND_CRASH_SIG,
607                                                   true);
608                 }
609
610                 curr_ipc_status =
611                         ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
612
613                 /* check ipc_status change */
614                 if (ipc_imem->ipc_status != curr_ipc_status) {
615                         ipc_imem->ipc_status = curr_ipc_status;
616
617                         if (ipc_imem->ipc_status ==
618                             IPC_MEM_DEVICE_IPC_RUNNING) {
619                                 schedule_work(&ipc_imem->run_state_worker);
620                         }
621                 }
622
623                 /* Consider power management in the runtime phase. */
624                 ipc_imem_slp_control_exec(ipc_imem);
625                 break; /* Continue with skbuf processing. */
626
627                 /* Unexpected phases. */
628         case IPC_P_OFF:
629         case IPC_P_OFF_REQ:
630                 dev_err(ipc_imem->dev, "confused phase %s",
631                         ipc_imem_phase_get_string(phase));
632                 return;
633
634         case IPC_P_PSI:
635                 if (old_phase != IPC_P_ROM)
636                         break;
637
638                 fallthrough;
639                 /* On CP the PSI phase is already active. */
640
641         case IPC_P_ROM:
642                 /* Before CP ROM driver starts the PSI image, it sets
643                  * the exit_code field on the doorbell scratchpad and
644                  * triggers the irq.
645                  */
646                 ipc_imem_rom_irq_exec(ipc_imem);
647                 return;
648
649         default:
650                 break;
651         }
652
653         /* process message ring */
654         ipc_protocol_msg_process(ipc_imem, irq);
655
656         /* process all open pipes */
657         for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
658                 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
659                 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
660
661                 if (dl_pipe->is_open &&
662                     (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
663                         ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
664
665                         if (dl_pipe->nr_of_queued_entries == 0)
666                                 retry_allocation = true;
667                 }
668
669                 if (ul_pipe->is_open)
670                         ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
671         }
672
673         /* Try to generate new ADB or ADGH. */
674         if (ipc_mux_ul_data_encode(ipc_imem->mux))
675                 ipc_imem_td_update_timer_start(ipc_imem);
676
677         /* Continue the send procedure with accumulated SIO or NETIF packets.
678          * Reset the debounce flags.
679          */
680         ul_pending |= ipc_imem_ul_write_td(ipc_imem);
681
682         /* if UL data is pending restart TD update timer */
683         if (ul_pending) {
684                 ipc_imem->hrtimer_period =
685                 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
686                 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
687                         hrtimer_start(&ipc_imem->tdupdate_timer,
688                                       ipc_imem->hrtimer_period,
689                                       HRTIMER_MODE_REL);
690         }
691
692         /* If CP has executed the transition
693          * from IPC_INIT to IPC_RUNNING in the PSI
694          * phase, wake up the flash app to open the pipes.
695          */
696         if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
697             ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
698             ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
699                     IPC_MEM_DEVICE_IPC_RUNNING &&
700             ipc_imem->flash_channel_id >= 0) {
701                 /* Wake up the flash app to open the pipes. */
702                 ch_id = ipc_imem->flash_channel_id;
703                 complete(&ipc_imem->channels[ch_id].ul_sem);
704         }
705
706         /* Reset the expected CP state. */
707         ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
708
709         if (retry_allocation) {
710                 ipc_imem->hrtimer_period =
711                 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
712                 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
713                         hrtimer_start(&ipc_imem->td_alloc_timer,
714                                       ipc_imem->hrtimer_period,
715                                       HRTIMER_MODE_REL);
716         }
717 }
718
719 /* Callback by tasklet for handling interrupt events. */
720 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
721                               size_t size)
722 {
723         ipc_imem_handle_irq(ipc_imem, arg);
724
725         return 0;
726 }
727
728 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
729 {
730         /* start doorbell irq delay timer if UL is pending */
731         if (ipc_imem_ul_write_td(ipc_imem))
732                 ipc_imem_td_update_timer_start(ipc_imem);
733 }
734
735 /* Check the execution stage and update the AP phase */
736 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
737                                                   enum ipc_mem_exec_stage stage)
738 {
739         switch (stage) {
740         case IPC_MEM_EXEC_STAGE_BOOT:
741                 if (ipc_imem->phase != IPC_P_ROM) {
742                         /* Send this event only once */
743                         ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
744                 }
745
746                 ipc_imem->phase = IPC_P_ROM;
747                 break;
748
749         case IPC_MEM_EXEC_STAGE_PSI:
750                 ipc_imem->phase = IPC_P_PSI;
751                 break;
752
753         case IPC_MEM_EXEC_STAGE_EBL:
754                 ipc_imem->phase = IPC_P_EBL;
755                 break;
756
757         case IPC_MEM_EXEC_STAGE_RUN:
758                 if (ipc_imem->phase != IPC_P_RUN &&
759                     ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
760                         ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
761                 }
762                 ipc_imem->phase = IPC_P_RUN;
763                 break;
764
765         case IPC_MEM_EXEC_STAGE_CRASH:
766                 if (ipc_imem->phase != IPC_P_CRASH)
767                         ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
768
769                 ipc_imem->phase = IPC_P_CRASH;
770                 break;
771
772         case IPC_MEM_EXEC_STAGE_CD_READY:
773                 if (ipc_imem->phase != IPC_P_CD_READY)
774                         ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
775                 ipc_imem->phase = IPC_P_CD_READY;
776                 break;
777
778         default:
779                 /* unknown exec stage:
780                  * assume that link is down and send info to listeners
781                  */
782                 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
783                 break;
784         }
785
786         return ipc_imem->phase;
787 }
788
789 /* Send msg to device to open pipe */
790 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
791                                struct ipc_pipe *pipe)
792 {
793         union ipc_msg_prep_args prep_args = {
794                 .pipe_open.pipe = pipe,
795         };
796
797         if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
798                                   IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
799                 pipe->is_open = true;
800
801         return pipe->is_open;
802 }
803
804 /* Allocates the TDs for the given pipe along with firing HP update DB. */
805 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
806                                      void *msg, size_t size)
807 {
808         struct ipc_pipe *dl_pipe = msg;
809         bool processed = false;
810         int i;
811
812         for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
813                 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
814
815         /* Trigger the doorbell irq to inform CP that new downlink buffers are
816          * available.
817          */
818         if (processed)
819                 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
820
821         return 0;
822 }
823
824 static enum hrtimer_restart
825 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
826 {
827         struct iosm_imem *ipc_imem =
828                 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
829
830         ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
831                                  NULL, 0, false);
832         return HRTIMER_NORESTART;
833 }
834
835 /* Get the CP execution state and map it to the AP phase. */
836 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
837 {
838         enum ipc_mem_exec_stage exec_stage =
839                                 ipc_imem_get_exec_stage_buffered(ipc_imem);
840         /* If the CP stage is undef, return the internal precalculated phase. */
841         return ipc_imem->phase == IPC_P_OFF_REQ ?
842                        ipc_imem->phase :
843                        ipc_imem_phase_update_check(ipc_imem, exec_stage);
844 }
845
846 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
847 {
848         switch (phase) {
849         case IPC_P_RUN:
850                 return "A-RUN";
851
852         case IPC_P_OFF:
853                 return "A-OFF";
854
855         case IPC_P_ROM:
856                 return "A-ROM";
857
858         case IPC_P_PSI:
859                 return "A-PSI";
860
861         case IPC_P_EBL:
862                 return "A-EBL";
863
864         case IPC_P_CRASH:
865                 return "A-CRASH";
866
867         case IPC_P_CD_READY:
868                 return "A-CD_READY";
869
870         case IPC_P_OFF_REQ:
871                 return "A-OFF_REQ";
872
873         default:
874                 return "A-???";
875         }
876 }
877
878 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
879 {
880         union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
881
882         pipe->is_open = false;
883         ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
884                               &prep_args);
885
886         ipc_imem_pipe_cleanup(ipc_imem, pipe);
887 }
888
889 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
890 {
891         struct ipc_mem_channel *channel;
892
893         if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
894                 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
895                 return;
896         }
897
898         channel = &ipc_imem->channels[channel_id];
899
900         if (channel->state == IMEM_CHANNEL_FREE) {
901                 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
902                         channel_id, channel->state);
903                 return;
904         }
905
906         /* Free only the channel id in the CP power off mode. */
907         if (channel->state == IMEM_CHANNEL_RESERVED)
908                 /* Release only the channel id. */
909                 goto channel_free;
910
911         if (ipc_imem->phase == IPC_P_RUN) {
912                 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
913                 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
914         }
915
916         ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
917         ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
918
919 channel_free:
920         ipc_imem_channel_free(channel);
921 }
922
923 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
924                                               int channel_id, u32 db_id)
925 {
926         struct ipc_mem_channel *channel;
927
928         if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
929                 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
930                 return NULL;
931         }
932
933         channel = &ipc_imem->channels[channel_id];
934
935         channel->state = IMEM_CHANNEL_ACTIVE;
936
937         if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
938                 goto ul_pipe_err;
939
940         if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
941                 goto dl_pipe_err;
942
943         /* Allocate the downlink buffers in tasklet context. */
944         if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
945                                      &channel->dl_pipe, 0, false)) {
946                 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
947                 goto task_failed;
948         }
949
950         /* Active channel. */
951         return channel;
952 task_failed:
953         ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
954 dl_pipe_err:
955         ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
956 ul_pipe_err:
957         ipc_imem_channel_free(channel);
958         return NULL;
959 }
960
961 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
962 {
963         ipc_protocol_suspend(ipc_imem->ipc_protocol);
964 }
965
966 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
967 {
968         ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
969 }
970
971 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
972 {
973         enum ipc_mem_exec_stage stage;
974
975         if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
976                 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
977                 ipc_imem_phase_update_check(ipc_imem, stage);
978         }
979 }
980
981 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
982 {
983         /* Reset dynamic channel elements. */
984         channel->state = IMEM_CHANNEL_FREE;
985 }
986
987 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
988                            enum ipc_ctype ctype)
989 {
990         struct ipc_mem_channel *channel;
991         int i;
992
993         /* Find channel of given type/index */
994         for (i = 0; i < ipc_imem->nr_of_channels; i++) {
995                 channel = &ipc_imem->channels[i];
996                 if (channel->ctype == ctype && channel->index == index)
997                         break;
998         }
999
1000         if (i >= ipc_imem->nr_of_channels) {
1001                 dev_dbg(ipc_imem->dev,
1002                         "no channel definition for index=%d ctype=%d", index,
1003                         ctype);
1004                 return -ECHRNG;
1005         }
1006
1007         if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1008                 dev_dbg(ipc_imem->dev, "channel is in use");
1009                 return -EBUSY;
1010         }
1011
1012         if (channel->ctype == IPC_CTYPE_WWAN &&
1013             index == IPC_MEM_MUX_IP_CH_IF_ID)
1014                 channel->if_id = index;
1015
1016         channel->channel_id = index;
1017         channel->state = IMEM_CHANNEL_RESERVED;
1018
1019         return i;
1020 }
1021
1022 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1023                            struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1024 {
1025         struct ipc_mem_channel *channel;
1026
1027         if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1028             chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1029                 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1030                         chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1031                 return;
1032         }
1033
1034         if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1035                 dev_err(ipc_imem->dev, "too many channels");
1036                 return;
1037         }
1038
1039         channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1040         channel->channel_id = ipc_imem->nr_of_channels;
1041         channel->ctype = ctype;
1042         channel->index = chnl_cfg.id;
1043         channel->net_err_count = 0;
1044         channel->state = IMEM_CHANNEL_FREE;
1045         ipc_imem->nr_of_channels++;
1046
1047         ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1048                                 IRQ_MOD_OFF);
1049
1050         skb_queue_head_init(&channel->ul_list);
1051
1052         init_completion(&channel->ul_sem);
1053 }
1054
1055 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1056                              struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1057 {
1058         struct ipc_mem_channel *channel;
1059
1060         if (id < 0 || id >= ipc_imem->nr_of_channels) {
1061                 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1062                 return;
1063         }
1064
1065         channel = &ipc_imem->channels[id];
1066
1067         if (channel->state != IMEM_CHANNEL_FREE &&
1068             channel->state != IMEM_CHANNEL_RESERVED) {
1069                 dev_err(ipc_imem->dev, "invalid channel state %d",
1070                         channel->state);
1071                 return;
1072         }
1073
1074         channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1075         channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1076         channel->ul_pipe.is_open = false;
1077         channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1078         channel->ul_pipe.channel = channel;
1079         channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1080         channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1081         channel->ul_pipe.irq_moderation = irq_moderation;
1082         channel->ul_pipe.buf_size = 0;
1083
1084         channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1085         channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1086         channel->dl_pipe.is_open = false;
1087         channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1088         channel->dl_pipe.channel = channel;
1089         channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1090         channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1091         channel->dl_pipe.irq_moderation = irq_moderation;
1092         channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1093 }
1094
1095 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1096 {
1097         int i;
1098
1099         for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1100                 struct ipc_mem_channel *channel;
1101
1102                 channel = &ipc_imem->channels[i];
1103
1104                 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1105                 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1106
1107                 ipc_imem_channel_free(channel);
1108         }
1109 }
1110
1111 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1112 {
1113         struct sk_buff *skb;
1114
1115         /* Force pipe to closed state also when not explicitly closed through
1116          * ipc_imem_pipe_close()
1117          */
1118         pipe->is_open = false;
1119
1120         /* Empty the uplink skb accumulator. */
1121         while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1122                 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1123
1124         ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1125 }
1126
1127 /* Send IPC protocol uninit to the modem when Link is active. */
1128 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1129 {
1130         int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1131         enum ipc_mem_device_ipc_state ipc_state;
1132
1133         /* When PCIe link is up set IPC_UNINIT
1134          * of the modem otherwise ignore it when PCIe link down happens.
1135          */
1136         if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1137                 /* set modem to UNINIT
1138                  * (in case we want to reload the AP driver without resetting
1139                  * the modem)
1140                  */
1141                 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1142                                   IPC_MEM_DEVICE_IPC_UNINIT);
1143                 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1144
1145                 /* Wait for maximum 30ms to allow the Modem to uninitialize the
1146                  * protocol.
1147                  */
1148                 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1149                        (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1150                        (timeout > 0)) {
1151                         usleep_range(1000, 1250);
1152                         timeout--;
1153                         ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1154                 }
1155         }
1156 }
1157
1158 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1159 {
1160         ipc_imem->phase = IPC_P_OFF_REQ;
1161
1162         /* forward MDM_NOT_READY to listeners */
1163         ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1164
1165         hrtimer_cancel(&ipc_imem->td_alloc_timer);
1166         hrtimer_cancel(&ipc_imem->tdupdate_timer);
1167         hrtimer_cancel(&ipc_imem->fast_update_timer);
1168         hrtimer_cancel(&ipc_imem->startup_timer);
1169
1170         /* cancel the workqueue */
1171         cancel_work_sync(&ipc_imem->run_state_worker);
1172
1173         if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1174                 ipc_mux_deinit(ipc_imem->mux);
1175                 ipc_wwan_deinit(ipc_imem->wwan);
1176                 ipc_port_deinit(ipc_imem->ipc_port);
1177         }
1178
1179         ipc_imem_device_ipc_uninit(ipc_imem);
1180         ipc_imem_channel_reset(ipc_imem);
1181
1182         ipc_protocol_deinit(ipc_imem->ipc_protocol);
1183         ipc_task_deinit(ipc_imem->ipc_task);
1184
1185         kfree(ipc_imem->ipc_task);
1186         kfree(ipc_imem->mmio);
1187
1188         ipc_imem->phase = IPC_P_OFF;
1189 }
1190
1191 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1192  * scratchpad and prepare the shared memory region. If the flashing to RAM
1193  * procedure shall be executed, copy the chip information from the doorbell
1194  * scratchtpad to the application buffer and wake up the flash app.
1195  */
1196 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1197 {
1198         enum ipc_phase phase;
1199
1200         /* Initialize the semaphore for the blocking read UL/DL transfer. */
1201         init_completion(&ipc_imem->ul_pend_sem);
1202
1203         init_completion(&ipc_imem->dl_pend_sem);
1204
1205         /* clear internal flags */
1206         ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1207         ipc_imem->enter_runtime = 0;
1208
1209         phase = ipc_imem_phase_update(ipc_imem);
1210
1211         /* Either CP shall be in the power off or power on phase. */
1212         switch (phase) {
1213         case IPC_P_ROM:
1214                 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1215                 /* poll execution stage (for delayed start, e.g. NAND) */
1216                 if (!hrtimer_active(&ipc_imem->startup_timer))
1217                         hrtimer_start(&ipc_imem->startup_timer,
1218                                       ipc_imem->hrtimer_period,
1219                                       HRTIMER_MODE_REL);
1220                 return 0;
1221
1222         case IPC_P_PSI:
1223         case IPC_P_EBL:
1224         case IPC_P_RUN:
1225                 /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1226                 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1227
1228                 /* Verify the exepected initial state. */
1229                 if (ipc_imem->ipc_requested_state ==
1230                     ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1231                         ipc_imem_ipc_init_check(ipc_imem);
1232
1233                         return 0;
1234                 }
1235                 dev_err(ipc_imem->dev,
1236                         "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1237                         ipc_mmio_get_ipc_state(ipc_imem->mmio));
1238                 break;
1239         case IPC_P_CRASH:
1240         case IPC_P_CD_READY:
1241                 dev_dbg(ipc_imem->dev,
1242                         "Modem is in phase %d, reset Modem to collect CD",
1243                         phase);
1244                 return 0;
1245         default:
1246                 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1247                 break;
1248         }
1249
1250         complete(&ipc_imem->dl_pend_sem);
1251         complete(&ipc_imem->ul_pend_sem);
1252         ipc_imem->phase = IPC_P_OFF;
1253         return -EIO;
1254 }
1255
1256 /* Pass the dev ptr to the shared memory driver and request the entry points */
1257 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1258                                 void __iomem *mmio, struct device *dev)
1259 {
1260         struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1261
1262         if (!ipc_imem)
1263                 return NULL;
1264
1265         /* Save the device address. */
1266         ipc_imem->pcie = pcie;
1267         ipc_imem->dev = dev;
1268
1269         ipc_imem->pci_device_id = device_id;
1270
1271         ipc_imem->ev_cdev_write_pending = false;
1272         ipc_imem->cp_version = 0;
1273         ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1274
1275         /* Reset the flash channel id. */
1276         ipc_imem->flash_channel_id = -1;
1277
1278         /* Reset the max number of configured channels */
1279         ipc_imem->nr_of_channels = 0;
1280
1281         /* allocate IPC MMIO */
1282         ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1283         if (!ipc_imem->mmio) {
1284                 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1285                 goto mmio_init_fail;
1286         }
1287
1288         ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1289                                      GFP_KERNEL);
1290
1291         /* Create tasklet for event handling*/
1292         if (!ipc_imem->ipc_task)
1293                 goto ipc_task_fail;
1294
1295         if (ipc_task_init(ipc_imem->ipc_task))
1296                 goto ipc_task_init_fail;
1297
1298         ipc_imem->ipc_task->dev = ipc_imem->dev;
1299
1300         INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1301
1302         ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1303
1304         if (!ipc_imem->ipc_protocol)
1305                 goto protocol_init_fail;
1306
1307         /* The phase is set to power off. */
1308         ipc_imem->phase = IPC_P_OFF;
1309
1310         hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1311                      HRTIMER_MODE_REL);
1312         ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1313
1314         hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1315                      HRTIMER_MODE_REL);
1316         ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1317
1318         hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1319                      HRTIMER_MODE_REL);
1320         ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1321
1322         hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1323                      HRTIMER_MODE_REL);
1324         ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1325
1326         if (ipc_imem_config(ipc_imem)) {
1327                 dev_err(ipc_imem->dev, "failed to initialize the imem");
1328                 goto imem_config_fail;
1329         }
1330
1331         return ipc_imem;
1332
1333 imem_config_fail:
1334         hrtimer_cancel(&ipc_imem->td_alloc_timer);
1335         hrtimer_cancel(&ipc_imem->fast_update_timer);
1336         hrtimer_cancel(&ipc_imem->tdupdate_timer);
1337         hrtimer_cancel(&ipc_imem->startup_timer);
1338 protocol_init_fail:
1339         cancel_work_sync(&ipc_imem->run_state_worker);
1340         ipc_task_deinit(ipc_imem->ipc_task);
1341 ipc_task_init_fail:
1342         kfree(ipc_imem->ipc_task);
1343 ipc_task_fail:
1344         kfree(ipc_imem->mmio);
1345 mmio_init_fail:
1346         kfree(ipc_imem);
1347         return NULL;
1348 }
1349
1350 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1351 {
1352         /* Debounce IPC_EV_IRQ. */
1353         if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1354                 ipc_imem->ev_irq_pending[irq] = true;
1355                 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1356                                          NULL, 0, false);
1357         }
1358 }
1359
1360 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1361 {
1362         ipc_imem->td_update_timer_suspended = suspend;
1363 }
This page took 0.117021 seconds and 4 git commands to generate.