1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/nospec.h>
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
16 struct iosm_mux *ipc_mux = ipc_imem->mux;
17 const struct mux_acb *acb = msg;
19 skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20 ipc_imem_ul_send(ipc_mux->imem);
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
27 struct completion *completion = &ipc_mux->channel->ul_sem;
28 int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
30 sizeof(ipc_mux->acb), false);
32 dev_err(ipc_mux->dev, "unable to send mux command");
36 /* if blocking, suspend the app and wait for irq in the flash or
37 * crash phase. return false on timeout to indicate failure.
40 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
42 reinit_completion(completion);
44 if (wait_for_completion_interruptible_timeout
45 (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
47 dev_err(ipc_mux->dev, "ch[%d] timeout",
49 ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
57 /* Prepare mux Command */
58 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
59 u32 cmd, struct mux_acb *acb,
60 void *param, u32 param_size)
62 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
64 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
65 cmdh->command_type = cpu_to_le32(cmd);
66 cmdh->if_id = acb->if_id;
70 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
72 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
75 memcpy(&cmdh->param, param, param_size);
77 skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
82 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
84 struct mux_acb *acb = &ipc_mux->acb;
88 /* Allocate skb memory for the uplink buffer. */
89 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
90 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
94 /* Save the skb address. */
97 memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
102 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
103 u32 transaction_id, union mux_cmd_param *param,
104 size_t res_size, bool blocking, bool respond)
106 struct mux_acb *acb = &ipc_mux->acb;
107 struct mux_lite_cmdh *ack_lite;
111 ret = ipc_mux_acb_alloc(ipc_mux);
115 ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
118 ack_lite->transaction_id = cpu_to_le32(transaction_id);
120 ret = ipc_mux_acb_send(ipc_mux, blocking);
125 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
127 /* Inform the network interface to start/stop flow ctrl */
128 ipc_wwan_tx_flowctrl(session->wwan, idx, on);
131 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
132 struct mux_lite_cmdh *cmdh)
134 struct mux_acb *acb = &ipc_mux->acb;
136 switch (le32_to_cpu(cmdh->command_type)) {
137 case MUX_CMD_OPEN_SESSION_RESP:
138 case MUX_CMD_CLOSE_SESSION_RESP:
139 /* Resume the control application. */
140 acb->got_param = cmdh->param;
143 case MUX_LITE_CMD_FLOW_CTL_ACK:
144 /* This command type is not expected as response for
145 * Aggregation version of the protocol. So return non-zero.
147 if (ipc_mux->protocol != MUX_LITE)
150 dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
151 cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
158 acb->wanted_response = MUX_CMD_INVALID;
159 acb->got_response = le32_to_cpu(cmdh->command_type);
160 complete(&ipc_mux->channel->ul_sem);
165 static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
166 struct mux_lite_cmdh *cmdh)
168 union mux_cmd_param *param = &cmdh->param;
169 struct mux_session *session;
172 dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
173 cmdh->if_id, le32_to_cpu(cmdh->command_type));
175 switch (le32_to_cpu(cmdh->command_type)) {
176 case MUX_LITE_CMD_FLOW_CTL:
178 if (cmdh->if_id >= ipc_mux->nr_sessions) {
179 dev_err(ipc_mux->dev, "if_id [%d] not valid",
181 return -EINVAL; /* No session interface id. */
184 session = &ipc_mux->session[cmdh->if_id];
186 new_size = offsetof(struct mux_lite_cmdh, param) +
187 sizeof(param->flow_ctl);
188 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
189 /* Backward Compatibility */
190 if (cmdh->cmd_len == cpu_to_le16(new_size))
191 session->flow_ctl_mask =
192 le32_to_cpu(param->flow_ctl.mask);
194 session->flow_ctl_mask = ~0;
195 /* if CP asks for FLOW CTRL Enable
196 * then set our internal flow control Tx flag
197 * to limit uplink session queueing
199 session->net_tx_stop = true;
200 /* Update the stats */
201 session->flow_ctl_en_cnt++;
202 } else if (param->flow_ctl.mask == 0) {
203 /* Just reset the Flow control mask and let
204 * mux_flow_ctrl_low_thre_b take control on
205 * our internal Tx flag and enabling kernel
208 /* Backward Compatibility */
209 if (cmdh->cmd_len == cpu_to_le16(new_size))
210 session->flow_ctl_mask =
211 le32_to_cpu(param->flow_ctl.mask);
213 session->flow_ctl_mask = 0;
214 /* Update the stats */
215 session->flow_ctl_dis_cnt++;
220 dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
221 le32_to_cpu(param->flow_ctl.mask));
224 case MUX_LITE_CMD_LINK_STATUS_REPORT:
233 /* Decode and Send appropriate response to a command block. */
234 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
236 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
237 __le32 trans_id = cmdh->transaction_id;
239 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
240 /* Unable to decode command response indicates the cmd_type
241 * may be a command instead of response. So try to decoding it.
243 if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
244 /* Decoded command may need a response. Give the
245 * response according to the command type.
247 union mux_cmd_param *mux_cmd = NULL;
249 u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
251 if (cmdh->command_type ==
252 cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
253 mux_cmd = &cmdh->param;
254 mux_cmd->link_status_resp.response =
255 cpu_to_le32(MUX_CMD_RESP_SUCCESS);
256 /* response field is u32 */
258 } else if (cmdh->command_type ==
259 cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
260 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
265 if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
266 le32_to_cpu(trans_id),
267 mux_cmd, size, false,
269 dev_err(ipc_mux->dev,
270 "if_id %d: cmd send failed",
276 /* Pass the DL packet to the netif layer. */
277 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
278 struct iosm_wwan *wwan, u32 offset,
279 u8 service_class, struct sk_buff *skb)
281 struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
286 skb_pull(dest_skb, offset);
287 skb_set_tail_pointer(dest_skb, dest_skb->len);
288 /* Pass the packet to the netif layer. */
289 dest_skb->priority = service_class;
291 return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
294 /* Decode Flow Credit Table in the block */
295 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
296 unsigned char *block)
298 struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
299 struct iosm_wwan *wwan;
303 if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
304 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
310 if (if_id >= ipc_mux->nr_sessions) {
311 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
315 /* Is the session active ? */
316 if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
317 wwan = ipc_mux->session[if_id].wwan;
319 dev_err(ipc_mux->dev, "session Net ID is NULL");
323 ul_credits = fct->vfl.nr_of_bytes;
325 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
326 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
328 /* Update the Flow Credit information from ADB */
329 ipc_mux->session[if_id].ul_flow_credits += ul_credits;
331 /* Check whether the TX can be started */
332 if (ipc_mux->session[if_id].ul_flow_credits > 0) {
333 ipc_mux->session[if_id].net_tx_stop = false;
334 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
335 ipc_mux->session[if_id].if_id, false);
339 /* Decode non-aggregated datagram */
340 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
343 u32 pad_len, packet_offset;
344 struct iosm_wwan *wwan;
345 struct mux_adgh *adgh;
346 u8 *block = skb->data;
350 adgh = (struct mux_adgh *)block;
352 if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
353 dev_err(ipc_mux->dev, "invalid ADGH signature received");
358 if (if_id >= ipc_mux->nr_sessions) {
359 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
363 /* Is the session active ? */
364 if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
365 wwan = ipc_mux->session[if_id].wwan;
367 dev_err(ipc_mux->dev, "session Net ID is NULL");
371 /* Store the pad len for the corresponding session
372 * Pad bytes as negotiated in the open session less the header size
373 * (see session management chapter for details).
374 * If resulting padding is zero or less, the additional head padding is
375 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
376 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
380 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
381 packet_offset = sizeof(*adgh) + pad_len;
383 if_id += ipc_mux->wwan_q_offset;
385 /* Pass the packet to the netif layer */
386 rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
387 adgh->service_class, skb);
389 dev_err(ipc_mux->dev, "mux adgh decoding error");
392 ipc_mux->session[if_id].flush = 1;
395 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
402 /* Decode the MUX header type. */
403 signature = le32_to_cpup((__le32 *)skb->data);
407 ipc_mux_dl_adgh_decode(ipc_mux, skb);
411 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
415 ipc_mux_dl_cmd_decode(ipc_mux, skb);
419 dev_err(ipc_mux->dev, "invalid ABH signature");
422 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
425 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
426 struct mux_adb *ul_adb, u32 type)
428 /* Take the first element of the free list. */
429 struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
433 return -EBUSY; /* Wait for a free ADB skb. */
435 /* Mark it as UL ADB to select the right free operation. */
436 IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
440 /* Save the ADB memory settings. */
441 ul_adb->dest_skb = skb;
442 ul_adb->buf = skb->data;
443 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
444 /* reset statistic counter */
446 ul_adb->payload_size = 0;
447 ul_adb->dg_cnt_total = 0;
449 ul_adb->adgh = (struct mux_adgh *)skb->data;
450 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
454 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
455 (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
457 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
458 dev_err(ipc_mux->dev,
459 "can't support. QLT size:%d SKB size: %d",
460 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
464 ul_adb->qlth_skb = skb;
465 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
466 skb_put(skb, qlt_size);
473 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
475 struct mux_adb *ul_adb = &ipc_mux->ul_adb;
480 if (!ul_adb->dest_skb) {
481 dev_err(ipc_mux->dev, "no dest skb");
485 adgh_len = le16_to_cpu(ul_adb->adgh->length);
486 skb_put(ul_adb->dest_skb, adgh_len);
487 skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
488 ul_adb->dest_skb = NULL;
490 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
491 struct mux_session *session;
493 session = &ipc_mux->session[ul_adb->adgh->if_id];
494 str = "available_credits";
495 bytes = (long long)session->ul_flow_credits;
499 bytes = ipc_mux->ul_data_pend_bytes;
500 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
504 dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
505 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
509 /* Allocates an ADB from the free list and initializes it with ADBH */
510 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
511 struct mux_adb *adb, int *size_needed,
514 bool ret_val = false;
517 if (!adb->dest_skb) {
518 /* Allocate memory for the ADB including of the
519 * datagram table header.
521 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
523 /* Is a pending ADB available ? */
524 ret_val = true; /* None. */
526 /* Update size need to zero only for new ADB memory */
533 /* Informs the network stack to stop sending further packets for all opened
536 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
538 struct mux_session *session;
541 for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
542 session = &ipc_mux->session[idx];
547 session->net_tx_stop = true;
551 /* Sends Queue Level Table of all opened sessions */
552 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
554 struct ipc_mem_lite_gen_tbl *qlt;
555 struct mux_session *session;
556 bool qlt_updated = false;
560 if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
563 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
564 MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
566 for (i = 0; i < ipc_mux->nr_sessions; i++) {
567 session = &ipc_mux->session[i];
569 if (!session->wwan || session->flow_ctl_mask)
572 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
574 dev_err(ipc_mux->dev,
575 "no reserved mem to send QLT of if_id: %d", i);
580 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
582 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
583 qlt->length = cpu_to_le16(qlt_size);
585 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
586 qlt->reserved[0] = 0;
587 qlt->reserved[1] = 0;
589 qlt->vfl.nr_of_bytes = session->ul_list.qlen;
591 /* Add QLT to the transfer list. */
592 skb_queue_tail(&ipc_mux->channel->ul_list,
593 ipc_mux->ul_adb.qlth_skb);
596 ipc_mux->ul_adb.qlth_skb = NULL;
600 /* Updates the TDs with ul_list */
601 (void)ipc_imem_ul_write_td(ipc_mux->imem);
606 /* Checks the available credits for the specified session and returns
607 * number of packets for which credits are available.
609 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
610 struct mux_session *session,
611 struct sk_buff_head *ul_list,
614 int pkts_to_send = 0;
618 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
619 credits = session->ul_flow_credits;
621 dev_dbg(ipc_mux->dev,
622 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
623 session->if_id, session->ul_flow_credits,
624 session->ul_list.qlen); /* nr_of_bytes */
628 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
629 ipc_mux->ul_data_pend_bytes;
631 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
633 dev_dbg(ipc_mux->dev,
634 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
635 session->if_id, ipc_mux->ul_data_pend_bytes,
636 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
641 /* Check if there are enough credits/bytes available to send the
642 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
643 * depending on available credits.
645 skb_queue_walk(ul_list, skb)
647 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
656 /* Encode the UL IP packet according to Lite spec. */
657 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
658 struct mux_session *session,
659 struct sk_buff_head *ul_list,
660 struct mux_adb *adb, int nr_of_pkts)
662 int offset = sizeof(struct mux_adgh);
663 int adb_updated = -EINVAL;
664 struct sk_buff *src_skb;
665 int aligned_size = 0;
669 /* Re-calculate the number of packets depending on number of bytes to be
670 * processed/available credits.
672 nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
675 /* If calculated nr_of_pkts from available credits is <= 0
676 * then nothing to do.
681 /* Read configured UL head_pad_length for session.*/
682 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
683 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
685 /* Process all pending UL packets for this session
686 * depending on the allocated datagram table size.
688 while (nr_of_pkts > 0) {
689 /* get destination skb allocated */
690 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
692 dev_err(ipc_mux->dev, "no reserved memory for ADGH");
696 /* Peek at the head of the list. */
697 src_skb = skb_peek(ul_list);
699 dev_err(ipc_mux->dev,
700 "skb peek return NULL with count : %d",
705 /* Calculate the memory value. */
706 aligned_size = ALIGN((pad_len + src_skb->len), 4);
708 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
710 if (ipc_mux->size_needed > adb->size) {
711 dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
712 ipc_mux->size_needed, adb->size);
713 /* Return 1 if any IP packet is added to the transfer
716 return nr_of_skb ? 1 : 0;
719 /* Add buffer (without head padding to next pending transfer) */
720 memcpy(adb->buf + offset + pad_len, src_skb->data,
723 adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
724 adb->adgh->if_id = session_id;
726 cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
728 adb->adgh->service_class = src_skb->priority;
729 adb->adgh->next_count = --nr_of_pkts;
731 adb->payload_size += src_skb->len;
733 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
734 /* Decrement the credit value as we are processing the
735 * datagram from the UL list.
737 session->ul_flow_credits -= src_skb->len;
739 /* Remove the processed elements and free it. */
740 src_skb = skb_dequeue(ul_list);
741 dev_kfree_skb(src_skb);
744 ipc_mux_ul_adgh_finish(ipc_mux);
748 /* Send QLT info to modem if pending bytes > high watermark
749 * in case of mux lite
751 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
752 ipc_mux->ul_data_pend_bytes >=
753 IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
754 adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
758 /* Updates the TDs with ul_list */
759 (void)ipc_imem_ul_write_td(ipc_mux->imem);
765 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
767 struct sk_buff_head *ul_list;
768 struct mux_session *session;
774 if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
775 ipc_mux->adb_prep_ongoing)
778 ipc_mux->adb_prep_ongoing = true;
780 for (i = 0; i < ipc_mux->nr_sessions; i++) {
781 session_id = ipc_mux->rr_next_session;
782 session = &ipc_mux->session[session_id];
784 /* Go to next handle rr_next_session overflow */
785 ipc_mux->rr_next_session++;
786 if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions)
787 ipc_mux->rr_next_session = 0;
789 if (!session->wwan || session->flow_ctl_mask ||
790 session->net_tx_stop)
793 ul_list = &session->ul_list;
795 /* Is something pending in UL and flow ctrl off */
796 dg_n = skb_queue_len(ul_list);
797 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
798 dg_n = MUX_MAX_UL_DG_ENTRIES;
801 /* Nothing to do for ipc_mux session
802 * -> try next session id.
806 updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
807 ul_list, &ipc_mux->ul_adb,
811 ipc_mux->adb_prep_ongoing = false;
815 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
817 struct mux_adgh *adgh;
820 adgh = (struct mux_adgh *)skb->data;
821 adgh_len = le16_to_cpu(adgh->length);
823 if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
824 ipc_mux->ul_flow == MUX_UL)
825 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
828 if (ipc_mux->ul_flow == MUX_UL)
829 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
830 ipc_mux->ul_data_pend_bytes);
832 /* Reset the skb settings. */
836 /* Add the consumed ADB to the free list. */
837 skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
840 /* Start the NETIF uplink send transfer in MUX mode. */
841 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
842 void *msg, size_t size)
844 struct iosm_mux *ipc_mux = ipc_imem->mux;
845 bool ul_data_pend = false;
847 /* Add session UL data to a ADB and ADGH */
848 ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
850 /* Delay the doorbell irq */
851 ipc_imem_td_update_timer_start(ipc_mux->imem);
853 /* reset the debounce flag */
854 ipc_mux->ev_mux_net_transmit_pending = false;
859 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
862 struct mux_session *session = &ipc_mux->session[if_id];
865 if (ipc_mux->channel &&
866 ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
867 dev_err(ipc_mux->dev,
868 "channel state is not IMEM_CHANNEL_ACTIVE");
872 if (!session->wwan) {
873 dev_err(ipc_mux->dev, "session net ID is NULL");
878 /* Session is under flow control.
879 * Check if packet can be queued in session list, if not
882 if (skb_queue_len(&session->ul_list) >=
883 (session->net_tx_stop ?
884 IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
885 (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
886 IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
887 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
892 /* Add skb to the uplink skb accumulator. */
893 skb_queue_tail(&session->ul_list, skb);
895 /* Inform the IPC kthread to pass uplink IP packets to CP. */
896 if (!ipc_mux->ev_mux_net_transmit_pending) {
897 ipc_mux->ev_mux_net_transmit_pending = true;
898 ret = ipc_task_queue_send_task(ipc_mux->imem,
899 ipc_mux_tq_ul_trigger_encode, 0,
904 dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
905 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
906 skb->len, skb->truesize, skb->priority);