1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/delay.h>
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_imem.h"
10 #include "iosm_ipc_port.h"
12 /* Check the wwan ips if it is valid with Channel as input. */
13 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
16 return chnl->ctype == IPC_CTYPE_WWAN &&
17 chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
21 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
23 union ipc_msg_prep_args prep_args = {
28 ipc_imem->device_sleep = state;
30 return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
31 IPC_MSG_PREP_SLEEP, &prep_args, NULL);
34 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
35 struct ipc_pipe *pipe)
37 /* limit max. nr of entries */
38 if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
41 return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
44 /* This timer handler will retry DL buff allocation if a pipe has no free buf
45 * and gives doorbell if TD is available
47 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
48 void *msg, size_t size)
50 bool new_buffers_available = false;
51 bool retry_allocation = false;
54 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
55 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
57 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
60 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
61 new_buffers_available = true;
63 if (pipe->nr_of_queued_entries == 0)
64 retry_allocation = true;
67 if (new_buffers_available)
68 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
71 if (retry_allocation) {
72 ipc_imem->hrtimer_period =
73 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
74 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
75 hrtimer_start(&ipc_imem->td_alloc_timer,
76 ipc_imem->hrtimer_period,
82 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
84 struct iosm_imem *ipc_imem =
85 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
86 /* Post an async tasklet event to trigger HP update Doorbell */
87 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
89 return HRTIMER_NORESTART;
92 /* Fast update timer tasklet handler to trigger HP update */
93 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
94 void *msg, size_t size)
96 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
97 IPC_HP_FAST_TD_UPD_TMR);
102 static enum hrtimer_restart
103 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
105 struct iosm_imem *ipc_imem =
106 container_of(hr_timer, struct iosm_imem, fast_update_timer);
107 /* Post an async tasklet event to trigger HP update Doorbell */
108 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
110 return HRTIMER_NORESTART;
113 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
114 struct ipc_mux_config *cfg)
116 ipc_mmio_update_cp_capability(ipc_imem->mmio);
118 if (!ipc_imem->mmio->has_mux_lite) {
119 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
123 cfg->protocol = MUX_LITE;
125 cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
129 /* The instance ID is same as channel ID because this is been reused
130 * for channel alloc function.
132 cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
133 cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
138 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
139 unsigned int reset_enable, bool atomic_ctx)
141 union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
145 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
146 IPC_MSG_PREP_FEATURE_SET, &prep_args,
149 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
150 IPC_MSG_PREP_FEATURE_SET, &prep_args);
153 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
155 /* Use the TD update timer only in the runtime phase */
156 if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
157 /* trigger the doorbell irq on CP directly. */
158 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
159 IPC_HP_TD_UPD_TMR_START);
163 if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
164 ipc_imem->hrtimer_period =
165 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
166 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
167 hrtimer_start(&ipc_imem->tdupdate_timer,
168 ipc_imem->hrtimer_period,
173 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
175 if (hrtimer_active(hr_timer))
176 hrtimer_cancel(hr_timer);
179 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
181 struct ipc_mem_channel *channel;
182 struct sk_buff_head *ul_list;
183 bool hpda_pending = false;
184 bool forced_hpdu = false;
185 struct ipc_pipe *pipe;
188 /* Analyze the uplink pipe of all active channels. */
189 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
190 channel = &ipc_imem->channels[i];
192 if (channel->state != IMEM_CHANNEL_ACTIVE)
195 pipe = &channel->ul_pipe;
197 /* Get the reference to the skbuf accumulator list. */
198 ul_list = &channel->ul_list;
200 /* Fill the transfer descriptor with the uplink buffer info. */
201 hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
204 /* forced HP update needed for non data channels */
205 if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
210 hpda_pending = false;
211 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
218 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
220 int timeout = IPC_MODEM_BOOT_TIMEOUT;
222 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
224 /* Trigger the CP interrupt to enter the init state. */
225 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
226 IPC_MEM_DEVICE_IPC_INIT);
227 /* Wait for the CP update. */
229 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
230 ipc_imem->ipc_requested_state) {
231 /* Prepare the MMIO space */
232 ipc_mmio_config(ipc_imem->mmio);
234 /* Trigger the CP irq to enter the running state. */
235 ipc_imem->ipc_requested_state =
236 IPC_MEM_DEVICE_IPC_RUNNING;
237 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
238 IPC_MEM_DEVICE_IPC_RUNNING);
246 dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
247 ipc_imem_phase_get_string(ipc_imem->phase),
248 ipc_mmio_get_ipc_state(ipc_imem->mmio));
250 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
253 /* Analyze the packet type and distribute it. */
254 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
255 struct ipc_pipe *pipe, struct sk_buff *skb)
262 /* An AT/control or IP packet is expected. */
263 switch (pipe->channel->ctype) {
265 port_id = pipe->channel->channel_id;
267 /* Pass the packet to the wwan layer. */
268 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
272 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
273 ipc_mux_dl_decode(ipc_imem->mux, skb);
276 dev_err(ipc_imem->dev, "Invalid channel type");
281 /* Process the downlink data and pass them to the char or net layer. */
282 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
283 struct ipc_pipe *pipe)
285 s32 cnt = 0, processed_td_cnt = 0;
286 struct ipc_mem_channel *channel;
287 u32 head = 0, tail = 0;
288 bool processed = false;
291 channel = pipe->channel;
293 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
295 if (pipe->old_tail != tail) {
296 if (pipe->old_tail < tail)
297 cnt = tail - pipe->old_tail;
299 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
302 processed_td_cnt = cnt;
304 /* Seek for pipes with pending DL data. */
306 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
308 /* Analyze the packet type and distribute it. */
309 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
312 /* try to allocate new empty DL SKbs from head..tail - 1*/
313 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
316 if (processed && !ipc_imem_check_wwan_ips(channel)) {
317 /* Force HP update for non IP channels */
318 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
322 /* If Fast Update timer is already running then stop */
323 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
326 /* Any control channel process will get immediate HP update.
327 * Start Fast update timer only for IP channel if all the TDs were
328 * used in last process.
330 if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
331 ipc_imem->hrtimer_period =
332 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
333 hrtimer_start(&ipc_imem->fast_update_timer,
334 ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
337 if (ipc_imem->app_notify_dl_pend)
338 complete(&ipc_imem->dl_pend_sem);
341 /* process open uplink pipe */
342 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
343 struct ipc_pipe *pipe)
345 struct ipc_mem_channel *channel;
346 u32 tail = 0, head = 0;
350 channel = pipe->channel;
352 /* Get the internal phase. */
353 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
356 if (pipe->old_tail != tail) {
357 if (pipe->old_tail < tail)
358 cnt = tail - pipe->old_tail;
360 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
363 /* Free UL buffers. */
365 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
370 /* If the user app was suspended in uplink direction - blocking
373 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
374 complete(&channel->ul_sem);
376 /* Free the skbuf element. */
377 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
378 if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
379 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
381 dev_err(ipc_imem->dev,
382 "OP Type is UL_MUX, unknown if_id %d",
385 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
389 /* Trace channel stats for IP UL pipe. */
390 if (ipc_imem_check_wwan_ips(pipe->channel))
391 ipc_mux_check_n_restart_tx(ipc_imem->mux);
393 if (ipc_imem->app_notify_ul_pend)
394 complete(&ipc_imem->ul_pend_sem);
397 /* Executes the irq. */
398 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
400 struct ipc_mem_channel *channel;
402 if (ipc_imem->flash_channel_id < 0) {
403 ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
404 dev_err(ipc_imem->dev, "Missing flash app:%d",
405 ipc_imem->flash_channel_id);
409 ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
411 /* Wake up the flash app to continue or to terminate depending
412 * on the CP ROM exit code.
414 channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
415 complete(&channel->ul_sem);
418 /* Execute the UL bundle timer actions, generating the doorbell irq. */
419 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
420 void *msg, size_t size)
422 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
427 /* Consider link power management in the runtime phase. */
428 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
430 /* link will go down, Test pending UL packets.*/
431 if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
432 hrtimer_active(&ipc_imem->tdupdate_timer)) {
433 /* Generate the doorbell irq. */
434 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
435 /* Stop the TD update timer. */
436 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
437 /* Stop the fast update timer. */
438 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
442 /* Execute startup timer and wait for delayed start (e.g. NAND) */
443 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
444 void *msg, size_t size)
446 /* Update & check the current operation phase. */
447 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
450 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
451 IPC_MEM_DEVICE_IPC_UNINIT) {
452 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
454 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
455 IPC_MEM_DEVICE_IPC_INIT);
457 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
458 /* reduce period to 100 ms to check for mmio init state */
459 if (!hrtimer_active(&ipc_imem->startup_timer))
460 hrtimer_start(&ipc_imem->startup_timer,
461 ipc_imem->hrtimer_period,
463 } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
464 IPC_MEM_DEVICE_IPC_INIT) {
465 /* Startup complete - disable timer */
466 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
468 /* Prepare the MMIO space */
469 ipc_mmio_config(ipc_imem->mmio);
470 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
471 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
472 IPC_MEM_DEVICE_IPC_RUNNING);
478 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
480 enum hrtimer_restart result = HRTIMER_NORESTART;
481 struct iosm_imem *ipc_imem =
482 container_of(hr_timer, struct iosm_imem, startup_timer);
484 if (ktime_to_ns(ipc_imem->hrtimer_period)) {
485 hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
486 ipc_imem->hrtimer_period);
487 result = HRTIMER_RESTART;
490 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
495 /* Get the CP execution stage */
496 static enum ipc_mem_exec_stage
497 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
499 return (ipc_imem->phase == IPC_P_RUN &&
500 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
501 ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
502 ipc_mmio_get_exec_stage(ipc_imem->mmio);
505 /* Callback to send the modem ready uevent */
506 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
507 void *msg, size_t size)
509 enum ipc_mem_exec_stage exec_stage =
510 ipc_imem_get_exec_stage_buffered(ipc_imem);
512 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
513 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
518 /* This function is executed in a task context via an ipc_worker object,
519 * as the creation or removal of device can't be done from tasklet.
521 static void ipc_imem_run_state_worker(struct work_struct *instance)
523 struct ipc_chnl_cfg chnl_cfg_port = { 0 };
524 struct ipc_mux_config mux_cfg;
525 struct iosm_imem *ipc_imem;
528 ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
530 if (ipc_imem->phase != IPC_P_RUN) {
531 dev_err(ipc_imem->dev,
532 "Modem link down. Exit run state worker.");
536 if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
537 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
539 ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
541 ipc_imem->mux->wwan = ipc_imem->wwan;
543 while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
544 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
545 ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
546 if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
547 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
550 ipc_imem->ipc_port[ctrl_chl_idx] =
551 ipc_port_init(ipc_imem, chnl_cfg_port);
557 ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
560 /* Complete all memory stores before setting bit */
561 smp_mb__before_atomic();
563 set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
565 /* Complete all memory stores after setting bit */
566 smp_mb__after_atomic();
569 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
571 enum ipc_mem_device_ipc_state curr_ipc_status;
572 enum ipc_phase old_phase, phase;
573 bool retry_allocation = false;
574 bool ul_pending = false;
577 if (irq != IMEM_IRQ_DONT_CARE)
578 ipc_imem->ev_irq_pending[irq] = false;
580 /* Get the internal phase. */
581 old_phase = ipc_imem->phase;
583 if (old_phase == IPC_P_OFF_REQ) {
584 dev_dbg(ipc_imem->dev,
585 "[%s]: Ignoring MSI. Deinit sequence in progress!",
586 ipc_imem_phase_get_string(old_phase));
590 /* Update the phase controlled by CP. */
591 phase = ipc_imem_phase_update(ipc_imem);
595 if (!ipc_imem->enter_runtime) {
596 /* Excute the transition from flash/boot to runtime. */
597 ipc_imem->enter_runtime = 1;
599 /* allow device to sleep, default value is
600 * IPC_HOST_SLEEP_ENTER_SLEEP
602 ipc_imem_msg_send_device_sleep(ipc_imem,
603 ipc_imem->device_sleep);
605 ipc_imem_msg_send_feature_set(ipc_imem,
606 IPC_MEM_INBAND_CRASH_SIG,
611 ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
613 /* check ipc_status change */
614 if (ipc_imem->ipc_status != curr_ipc_status) {
615 ipc_imem->ipc_status = curr_ipc_status;
617 if (ipc_imem->ipc_status ==
618 IPC_MEM_DEVICE_IPC_RUNNING) {
619 schedule_work(&ipc_imem->run_state_worker);
623 /* Consider power management in the runtime phase. */
624 ipc_imem_slp_control_exec(ipc_imem);
625 break; /* Continue with skbuf processing. */
627 /* Unexpected phases. */
630 dev_err(ipc_imem->dev, "confused phase %s",
631 ipc_imem_phase_get_string(phase));
635 if (old_phase != IPC_P_ROM)
639 /* On CP the PSI phase is already active. */
642 /* Before CP ROM driver starts the PSI image, it sets
643 * the exit_code field on the doorbell scratchpad and
646 ipc_imem_rom_irq_exec(ipc_imem);
653 /* process message ring */
654 ipc_protocol_msg_process(ipc_imem, irq);
656 /* process all open pipes */
657 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
658 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
659 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
661 if (dl_pipe->is_open &&
662 (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
663 ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
665 if (dl_pipe->nr_of_queued_entries == 0)
666 retry_allocation = true;
669 if (ul_pipe->is_open)
670 ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
673 /* Try to generate new ADB or ADGH. */
674 if (ipc_mux_ul_data_encode(ipc_imem->mux))
675 ipc_imem_td_update_timer_start(ipc_imem);
677 /* Continue the send procedure with accumulated SIO or NETIF packets.
678 * Reset the debounce flags.
680 ul_pending |= ipc_imem_ul_write_td(ipc_imem);
682 /* if UL data is pending restart TD update timer */
684 ipc_imem->hrtimer_period =
685 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
686 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
687 hrtimer_start(&ipc_imem->tdupdate_timer,
688 ipc_imem->hrtimer_period,
692 /* If CP has executed the transition
693 * from IPC_INIT to IPC_RUNNING in the PSI
694 * phase, wake up the flash app to open the pipes.
696 if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
697 ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
698 ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
699 IPC_MEM_DEVICE_IPC_RUNNING &&
700 ipc_imem->flash_channel_id >= 0) {
701 /* Wake up the flash app to open the pipes. */
702 ch_id = ipc_imem->flash_channel_id;
703 complete(&ipc_imem->channels[ch_id].ul_sem);
706 /* Reset the expected CP state. */
707 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
709 if (retry_allocation) {
710 ipc_imem->hrtimer_period =
711 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
712 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
713 hrtimer_start(&ipc_imem->td_alloc_timer,
714 ipc_imem->hrtimer_period,
719 /* Callback by tasklet for handling interrupt events. */
720 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
723 ipc_imem_handle_irq(ipc_imem, arg);
728 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
730 /* start doorbell irq delay timer if UL is pending */
731 if (ipc_imem_ul_write_td(ipc_imem))
732 ipc_imem_td_update_timer_start(ipc_imem);
735 /* Check the execution stage and update the AP phase */
736 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
737 enum ipc_mem_exec_stage stage)
740 case IPC_MEM_EXEC_STAGE_BOOT:
741 if (ipc_imem->phase != IPC_P_ROM) {
742 /* Send this event only once */
743 ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
746 ipc_imem->phase = IPC_P_ROM;
749 case IPC_MEM_EXEC_STAGE_PSI:
750 ipc_imem->phase = IPC_P_PSI;
753 case IPC_MEM_EXEC_STAGE_EBL:
754 ipc_imem->phase = IPC_P_EBL;
757 case IPC_MEM_EXEC_STAGE_RUN:
758 if (ipc_imem->phase != IPC_P_RUN &&
759 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
760 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
762 ipc_imem->phase = IPC_P_RUN;
765 case IPC_MEM_EXEC_STAGE_CRASH:
766 if (ipc_imem->phase != IPC_P_CRASH)
767 ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
769 ipc_imem->phase = IPC_P_CRASH;
772 case IPC_MEM_EXEC_STAGE_CD_READY:
773 if (ipc_imem->phase != IPC_P_CD_READY)
774 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
775 ipc_imem->phase = IPC_P_CD_READY;
779 /* unknown exec stage:
780 * assume that link is down and send info to listeners
782 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
786 return ipc_imem->phase;
789 /* Send msg to device to open pipe */
790 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
791 struct ipc_pipe *pipe)
793 union ipc_msg_prep_args prep_args = {
794 .pipe_open.pipe = pipe,
797 if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
798 IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
799 pipe->is_open = true;
801 return pipe->is_open;
804 /* Allocates the TDs for the given pipe along with firing HP update DB. */
805 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
806 void *msg, size_t size)
808 struct ipc_pipe *dl_pipe = msg;
809 bool processed = false;
812 for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
813 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
815 /* Trigger the doorbell irq to inform CP that new downlink buffers are
819 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
824 static enum hrtimer_restart
825 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
827 struct iosm_imem *ipc_imem =
828 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
830 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
832 return HRTIMER_NORESTART;
835 /* Get the CP execution state and map it to the AP phase. */
836 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
838 enum ipc_mem_exec_stage exec_stage =
839 ipc_imem_get_exec_stage_buffered(ipc_imem);
840 /* If the CP stage is undef, return the internal precalculated phase. */
841 return ipc_imem->phase == IPC_P_OFF_REQ ?
843 ipc_imem_phase_update_check(ipc_imem, exec_stage);
846 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
878 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
880 union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
882 pipe->is_open = false;
883 ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
886 ipc_imem_pipe_cleanup(ipc_imem, pipe);
889 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
891 struct ipc_mem_channel *channel;
893 if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
894 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
898 channel = &ipc_imem->channels[channel_id];
900 if (channel->state == IMEM_CHANNEL_FREE) {
901 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
902 channel_id, channel->state);
906 /* Free only the channel id in the CP power off mode. */
907 if (channel->state == IMEM_CHANNEL_RESERVED)
908 /* Release only the channel id. */
911 if (ipc_imem->phase == IPC_P_RUN) {
912 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
913 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
916 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
917 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
920 ipc_imem_channel_free(channel);
923 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
924 int channel_id, u32 db_id)
926 struct ipc_mem_channel *channel;
928 if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
929 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
933 channel = &ipc_imem->channels[channel_id];
935 channel->state = IMEM_CHANNEL_ACTIVE;
937 if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
940 if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
943 /* Allocate the downlink buffers in tasklet context. */
944 if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
945 &channel->dl_pipe, 0, false)) {
946 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
950 /* Active channel. */
953 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
955 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
957 ipc_imem_channel_free(channel);
961 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
963 ipc_protocol_suspend(ipc_imem->ipc_protocol);
966 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
968 ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
971 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
973 enum ipc_mem_exec_stage stage;
975 if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
976 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
977 ipc_imem_phase_update_check(ipc_imem, stage);
981 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
983 /* Reset dynamic channel elements. */
984 channel->state = IMEM_CHANNEL_FREE;
987 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
988 enum ipc_ctype ctype)
990 struct ipc_mem_channel *channel;
993 /* Find channel of given type/index */
994 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
995 channel = &ipc_imem->channels[i];
996 if (channel->ctype == ctype && channel->index == index)
1000 if (i >= ipc_imem->nr_of_channels) {
1001 dev_dbg(ipc_imem->dev,
1002 "no channel definition for index=%d ctype=%d", index,
1007 if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1008 dev_dbg(ipc_imem->dev, "channel is in use");
1012 if (channel->ctype == IPC_CTYPE_WWAN &&
1013 index == IPC_MEM_MUX_IP_CH_IF_ID)
1014 channel->if_id = index;
1016 channel->channel_id = index;
1017 channel->state = IMEM_CHANNEL_RESERVED;
1022 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1023 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1025 struct ipc_mem_channel *channel;
1027 if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1028 chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1029 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1030 chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1034 if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1035 dev_err(ipc_imem->dev, "too many channels");
1039 channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1040 channel->channel_id = ipc_imem->nr_of_channels;
1041 channel->ctype = ctype;
1042 channel->index = chnl_cfg.id;
1043 channel->net_err_count = 0;
1044 channel->state = IMEM_CHANNEL_FREE;
1045 ipc_imem->nr_of_channels++;
1047 ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1050 skb_queue_head_init(&channel->ul_list);
1052 init_completion(&channel->ul_sem);
1055 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1056 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1058 struct ipc_mem_channel *channel;
1060 if (id < 0 || id >= ipc_imem->nr_of_channels) {
1061 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1065 channel = &ipc_imem->channels[id];
1067 if (channel->state != IMEM_CHANNEL_FREE &&
1068 channel->state != IMEM_CHANNEL_RESERVED) {
1069 dev_err(ipc_imem->dev, "invalid channel state %d",
1074 channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1075 channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1076 channel->ul_pipe.is_open = false;
1077 channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1078 channel->ul_pipe.channel = channel;
1079 channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1080 channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1081 channel->ul_pipe.irq_moderation = irq_moderation;
1082 channel->ul_pipe.buf_size = 0;
1084 channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1085 channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1086 channel->dl_pipe.is_open = false;
1087 channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1088 channel->dl_pipe.channel = channel;
1089 channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1090 channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1091 channel->dl_pipe.irq_moderation = irq_moderation;
1092 channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1095 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1099 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1100 struct ipc_mem_channel *channel;
1102 channel = &ipc_imem->channels[i];
1104 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1105 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1107 ipc_imem_channel_free(channel);
1111 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1113 struct sk_buff *skb;
1115 /* Force pipe to closed state also when not explicitly closed through
1116 * ipc_imem_pipe_close()
1118 pipe->is_open = false;
1120 /* Empty the uplink skb accumulator. */
1121 while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1122 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1124 ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1127 /* Send IPC protocol uninit to the modem when Link is active. */
1128 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1130 int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1131 enum ipc_mem_device_ipc_state ipc_state;
1133 /* When PCIe link is up set IPC_UNINIT
1134 * of the modem otherwise ignore it when PCIe link down happens.
1136 if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1137 /* set modem to UNINIT
1138 * (in case we want to reload the AP driver without resetting
1141 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1142 IPC_MEM_DEVICE_IPC_UNINIT);
1143 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1145 /* Wait for maximum 30ms to allow the Modem to uninitialize the
1148 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1149 (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1151 usleep_range(1000, 1250);
1153 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1158 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1160 ipc_imem->phase = IPC_P_OFF_REQ;
1162 /* forward MDM_NOT_READY to listeners */
1163 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1165 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1166 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1167 hrtimer_cancel(&ipc_imem->fast_update_timer);
1168 hrtimer_cancel(&ipc_imem->startup_timer);
1170 /* cancel the workqueue */
1171 cancel_work_sync(&ipc_imem->run_state_worker);
1173 if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1174 ipc_mux_deinit(ipc_imem->mux);
1175 ipc_wwan_deinit(ipc_imem->wwan);
1176 ipc_port_deinit(ipc_imem->ipc_port);
1179 ipc_imem_device_ipc_uninit(ipc_imem);
1180 ipc_imem_channel_reset(ipc_imem);
1182 ipc_protocol_deinit(ipc_imem->ipc_protocol);
1183 ipc_task_deinit(ipc_imem->ipc_task);
1185 kfree(ipc_imem->ipc_task);
1186 kfree(ipc_imem->mmio);
1188 ipc_imem->phase = IPC_P_OFF;
1191 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1192 * scratchpad and prepare the shared memory region. If the flashing to RAM
1193 * procedure shall be executed, copy the chip information from the doorbell
1194 * scratchtpad to the application buffer and wake up the flash app.
1196 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1198 enum ipc_phase phase;
1200 /* Initialize the semaphore for the blocking read UL/DL transfer. */
1201 init_completion(&ipc_imem->ul_pend_sem);
1203 init_completion(&ipc_imem->dl_pend_sem);
1205 /* clear internal flags */
1206 ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1207 ipc_imem->enter_runtime = 0;
1209 phase = ipc_imem_phase_update(ipc_imem);
1211 /* Either CP shall be in the power off or power on phase. */
1214 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1215 /* poll execution stage (for delayed start, e.g. NAND) */
1216 if (!hrtimer_active(&ipc_imem->startup_timer))
1217 hrtimer_start(&ipc_imem->startup_timer,
1218 ipc_imem->hrtimer_period,
1225 /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1226 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1228 /* Verify the exepected initial state. */
1229 if (ipc_imem->ipc_requested_state ==
1230 ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1231 ipc_imem_ipc_init_check(ipc_imem);
1235 dev_err(ipc_imem->dev,
1236 "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1237 ipc_mmio_get_ipc_state(ipc_imem->mmio));
1240 case IPC_P_CD_READY:
1241 dev_dbg(ipc_imem->dev,
1242 "Modem is in phase %d, reset Modem to collect CD",
1246 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1250 complete(&ipc_imem->dl_pend_sem);
1251 complete(&ipc_imem->ul_pend_sem);
1252 ipc_imem->phase = IPC_P_OFF;
1256 /* Pass the dev ptr to the shared memory driver and request the entry points */
1257 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1258 void __iomem *mmio, struct device *dev)
1260 struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1265 /* Save the device address. */
1266 ipc_imem->pcie = pcie;
1267 ipc_imem->dev = dev;
1269 ipc_imem->pci_device_id = device_id;
1271 ipc_imem->ev_cdev_write_pending = false;
1272 ipc_imem->cp_version = 0;
1273 ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1275 /* Reset the flash channel id. */
1276 ipc_imem->flash_channel_id = -1;
1278 /* Reset the max number of configured channels */
1279 ipc_imem->nr_of_channels = 0;
1281 /* allocate IPC MMIO */
1282 ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1283 if (!ipc_imem->mmio) {
1284 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1285 goto mmio_init_fail;
1288 ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1291 /* Create tasklet for event handling*/
1292 if (!ipc_imem->ipc_task)
1295 if (ipc_task_init(ipc_imem->ipc_task))
1296 goto ipc_task_init_fail;
1298 ipc_imem->ipc_task->dev = ipc_imem->dev;
1300 INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1302 ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1304 if (!ipc_imem->ipc_protocol)
1305 goto protocol_init_fail;
1307 /* The phase is set to power off. */
1308 ipc_imem->phase = IPC_P_OFF;
1310 hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1312 ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1314 hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1316 ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1318 hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1320 ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1322 hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1324 ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1326 if (ipc_imem_config(ipc_imem)) {
1327 dev_err(ipc_imem->dev, "failed to initialize the imem");
1328 goto imem_config_fail;
1334 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1335 hrtimer_cancel(&ipc_imem->fast_update_timer);
1336 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1337 hrtimer_cancel(&ipc_imem->startup_timer);
1339 cancel_work_sync(&ipc_imem->run_state_worker);
1340 ipc_task_deinit(ipc_imem->ipc_task);
1342 kfree(ipc_imem->ipc_task);
1344 kfree(ipc_imem->mmio);
1350 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1352 /* Debounce IPC_EV_IRQ. */
1353 if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1354 ipc_imem->ev_irq_pending[irq] = true;
1355 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1360 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1362 ipc_imem->td_update_timer_suspended = suspend;