1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/nospec.h>
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
16 struct iosm_mux *ipc_mux = ipc_imem->mux;
17 const struct mux_acb *acb = msg;
19 skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20 ipc_imem_ul_send(ipc_mux->imem);
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
27 struct completion *completion = &ipc_mux->channel->ul_sem;
28 int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
30 sizeof(ipc_mux->acb), false);
32 dev_err(ipc_mux->dev, "unable to send mux command");
36 /* if blocking, suspend the app and wait for irq in the flash or
37 * crash phase. return false on timeout to indicate failure.
40 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
42 reinit_completion(completion);
44 if (wait_for_completion_interruptible_timeout
45 (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
47 dev_err(ipc_mux->dev, "ch[%d] timeout",
49 ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
57 /* Initialize the command header. */
58 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
60 struct mux_acb *acb = &ipc_mux->acb;
61 struct mux_acbh *header;
63 header = (struct mux_acbh *)(acb->skb)->data;
64 header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
65 header->first_cmd_index = header->block_length;
66 header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
67 header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
70 /* Add a command to the ACB. */
71 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
72 void *param, u32 param_size)
74 struct mux_acbh *header;
75 struct mux_cmdh *cmdh;
79 header = (struct mux_acbh *)(acb->skb)->data;
80 cmdh = (struct mux_cmdh *)
81 ((acb->skb)->data + le32_to_cpu(header->block_length));
83 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
84 cmdh->command_type = cpu_to_le32(cmd);
85 cmdh->if_id = acb->if_id;
88 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
90 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
92 memcpy(&cmdh->param, param, param_size);
94 skb_put(acb->skb, le32_to_cpu(header->block_length) +
95 le16_to_cpu(cmdh->cmd_len));
100 /* Prepare mux Command */
101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
102 u32 cmd, struct mux_acb *acb,
103 void *param, u32 param_size)
105 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
107 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
108 cmdh->command_type = cpu_to_le32(cmd);
109 cmdh->if_id = acb->if_id;
113 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
115 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
118 memcpy(&cmdh->param, param, param_size);
120 skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
127 struct mux_acb *acb = &ipc_mux->acb;
131 /* Allocate skb memory for the uplink buffer. */
132 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
133 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
137 /* Save the skb address. */
140 memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
146 u32 transaction_id, union mux_cmd_param *param,
147 size_t res_size, bool blocking, bool respond)
149 struct mux_acb *acb = &ipc_mux->acb;
150 union mux_type_cmdh cmdh;
154 ret = ipc_mux_acb_alloc(ipc_mux);
158 if (ipc_mux->protocol == MUX_LITE) {
159 cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
163 cmdh.ack_lite->transaction_id =
164 cpu_to_le32(transaction_id);
166 /* Initialize the ACB header. */
167 ipc_mux_acb_init(ipc_mux);
168 cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
172 cmdh.ack_aggr->transaction_id =
173 cpu_to_le32(transaction_id);
175 ret = ipc_mux_acb_send(ipc_mux, blocking);
180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
182 /* Inform the network interface to start/stop flow ctrl */
183 ipc_wwan_tx_flowctrl(session->wwan, idx, on);
186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
187 union mux_cmd_param param,
188 __le32 command_type, u8 if_id,
189 __le32 transaction_id)
191 struct mux_acb *acb = &ipc_mux->acb;
193 switch (le32_to_cpu(command_type)) {
194 case MUX_CMD_OPEN_SESSION_RESP:
195 case MUX_CMD_CLOSE_SESSION_RESP:
196 /* Resume the control application. */
197 acb->got_param = param;
200 case MUX_LITE_CMD_FLOW_CTL_ACK:
201 /* This command type is not expected as response for
202 * Aggregation version of the protocol. So return non-zero.
204 if (ipc_mux->protocol != MUX_LITE)
207 dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
208 if_id, le32_to_cpu(transaction_id));
211 case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
212 /* This command type is not expected as response for
213 * Lite version of the protocol. So return non-zero.
215 if (ipc_mux->protocol == MUX_LITE)
223 acb->wanted_response = MUX_CMD_INVALID;
224 acb->got_response = le32_to_cpu(command_type);
225 complete(&ipc_mux->channel->ul_sem);
230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
231 union mux_cmd_param *param,
232 __le32 command_type, u8 if_id,
233 __le16 cmd_len, int size)
235 struct mux_session *session;
236 struct hrtimer *adb_timer;
238 dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
239 if_id, le32_to_cpu(command_type));
241 switch (le32_to_cpu(command_type)) {
242 case MUX_LITE_CMD_FLOW_CTL:
243 case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
245 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
246 dev_err(ipc_mux->dev, "if_id [%d] not valid",
248 return -EINVAL; /* No session interface id. */
251 session = &ipc_mux->session[if_id];
252 adb_timer = &ipc_mux->imem->adb_timer;
254 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
255 /* Backward Compatibility */
256 if (cmd_len == cpu_to_le16(size))
257 session->flow_ctl_mask =
258 le32_to_cpu(param->flow_ctl.mask);
260 session->flow_ctl_mask = ~0;
261 /* if CP asks for FLOW CTRL Enable
262 * then set our internal flow control Tx flag
263 * to limit uplink session queueing
265 session->net_tx_stop = true;
267 /* We have to call Finish ADB here.
268 * Otherwise any already queued data
269 * will be sent to CP when ADB is full
270 * for some other sessions.
272 if (ipc_mux->protocol == MUX_AGGREGATION) {
273 ipc_mux_ul_adb_finish(ipc_mux);
274 ipc_imem_hrtimer_stop(adb_timer);
276 /* Update the stats */
277 session->flow_ctl_en_cnt++;
278 } else if (param->flow_ctl.mask == 0) {
279 /* Just reset the Flow control mask and let
280 * mux_flow_ctrl_low_thre_b take control on
281 * our internal Tx flag and enabling kernel
284 dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
285 if_id, le32_to_cpu(param->flow_ctl.mask));
286 /* Backward Compatibility */
287 if (cmd_len == cpu_to_le16(size))
288 session->flow_ctl_mask =
289 le32_to_cpu(param->flow_ctl.mask);
291 session->flow_ctl_mask = 0;
292 /* Update the stats */
293 session->flow_ctl_dis_cnt++;
298 ipc_mux->acc_adb_size = 0;
299 ipc_mux->acc_payload_size = 0;
301 dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
302 le32_to_cpu(param->flow_ctl.mask));
305 case MUX_LITE_CMD_LINK_STATUS_REPORT:
314 /* Decode and Send appropriate response to a command block. */
315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
317 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
318 __le32 trans_id = cmdh->transaction_id;
321 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
322 cmdh->command_type, cmdh->if_id,
323 cmdh->transaction_id)) {
324 /* Unable to decode command response indicates the cmd_type
325 * may be a command instead of response. So try to decoding it.
327 size = offsetof(struct mux_lite_cmdh, param) +
328 sizeof(cmdh->param.flow_ctl);
329 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
332 cmdh->cmd_len, size)) {
333 /* Decoded command may need a response. Give the
334 * response according to the command type.
336 union mux_cmd_param *mux_cmd = NULL;
338 u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
340 if (cmdh->command_type ==
341 cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
342 mux_cmd = &cmdh->param;
343 mux_cmd->link_status_resp.response =
344 cpu_to_le32(MUX_CMD_RESP_SUCCESS);
345 /* response field is u32 */
347 } else if (cmdh->command_type ==
348 cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
349 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
354 if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
355 le32_to_cpu(trans_id),
356 mux_cmd, size, false,
358 dev_err(ipc_mux->dev,
359 "if_id %d: cmd send failed",
365 /* Pass the DL packet to the netif layer. */
366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
367 struct iosm_wwan *wwan, u32 offset,
368 u8 service_class, struct sk_buff *skb,
371 struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
376 skb_pull(dest_skb, offset);
377 skb_trim(dest_skb, pkt_len);
378 /* Pass the packet to the netif layer. */
379 dest_skb->priority = service_class;
381 return ipc_wwan_receive(wwan, dest_skb, false, if_id);
384 /* Decode Flow Credit Table in the block */
385 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
386 unsigned char *block)
388 struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
389 struct iosm_wwan *wwan;
393 if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
394 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
400 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
401 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
405 /* Is the session active ? */
406 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
407 wwan = ipc_mux->session[if_id].wwan;
409 dev_err(ipc_mux->dev, "session Net ID is NULL");
413 ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
415 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
416 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
418 /* Update the Flow Credit information from ADB */
419 ipc_mux->session[if_id].ul_flow_credits += ul_credits;
421 /* Check whether the TX can be started */
422 if (ipc_mux->session[if_id].ul_flow_credits > 0) {
423 ipc_mux->session[if_id].net_tx_stop = false;
424 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
425 ipc_mux->session[if_id].if_id, false);
429 /* Decode non-aggregated datagram */
430 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
433 u32 pad_len, packet_offset, adgh_len;
434 struct iosm_wwan *wwan;
435 struct mux_adgh *adgh;
436 u8 *block = skb->data;
440 adgh = (struct mux_adgh *)block;
442 if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
443 dev_err(ipc_mux->dev, "invalid ADGH signature received");
448 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
449 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
453 /* Is the session active ? */
454 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
455 wwan = ipc_mux->session[if_id].wwan;
457 dev_err(ipc_mux->dev, "session Net ID is NULL");
461 /* Store the pad len for the corresponding session
462 * Pad bytes as negotiated in the open session less the header size
463 * (see session management chapter for details).
464 * If resulting padding is zero or less, the additional head padding is
465 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
466 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
470 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
471 packet_offset = sizeof(*adgh) + pad_len;
473 if_id += ipc_mux->wwan_q_offset;
474 adgh_len = le16_to_cpu(adgh->length);
476 /* Pass the packet to the netif layer */
477 rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
478 adgh->service_class, skb,
479 adgh_len - packet_offset);
481 dev_err(ipc_mux->dev, "mux adgh decoding error");
484 ipc_mux->session[if_id].flush = 1;
487 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
488 struct mux_cmdh *cmdh, int size)
490 u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
491 u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
492 u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
493 u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
494 union mux_cmd_param *cmd_p = NULL;
498 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
499 cmdh->command_type, cmdh->if_id,
500 cmdh->cmd_len, size)) {
502 if (cmdh->command_type == cpu_to_le32(link_st)) {
503 cmd_p = &cmdh->param;
504 cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
505 } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
506 (cmdh->command_type == cpu_to_le32(fctl_dis))) {
511 trans_id = le32_to_cpu(cmdh->transaction_id);
512 ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
513 trans_id, cmd_p, size, false, true);
517 /* Decode an aggregated command block. */
518 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
520 struct mux_acbh *acbh;
521 struct mux_cmdh *cmdh;
526 acbh = (struct mux_acbh *)(skb->data);
527 block = (u8 *)(skb->data);
529 next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
530 next_cmd_index = array_index_nospec(next_cmd_index,
531 sizeof(struct mux_cmdh));
533 while (next_cmd_index != 0) {
534 cmdh = (struct mux_cmdh *)&block[next_cmd_index];
535 next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
536 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
539 cmdh->transaction_id)) {
540 size = offsetof(struct mux_cmdh, param) +
541 sizeof(cmdh->param.flow_ctl);
542 ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
547 /* process datagram */
548 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
549 struct mux_adth_dg *dg, struct sk_buff *skb,
550 int if_id, int nr_of_dg)
552 u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
553 u32 packet_offset, i, rc, dg_len;
555 for (i = 0; i < nr_of_dg; i++, dg++) {
556 if (le32_to_cpu(dg->datagram_index)
557 < sizeof(struct mux_adbh))
560 /* Is the packet inside of the ADB */
561 if (le32_to_cpu(dg->datagram_index) >=
562 le32_to_cpu(adbh->block_length)) {
566 le32_to_cpu(dg->datagram_index) +
568 dg_len = le16_to_cpu(dg->datagram_length);
569 /* Pass the packet to the netif layer. */
570 rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
572 dg->service_class, skb,
573 dg_len - dl_head_pad_len);
583 /* Decode an aggregated data block. */
584 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
587 struct mux_adth_dg *dg;
588 struct iosm_wwan *wwan;
589 struct mux_adbh *adbh;
590 struct mux_adth *adth;
596 adbh = (struct mux_adbh *)block;
598 /* Process the aggregated datagram tables. */
599 adth_index = le32_to_cpu(adbh->first_table_index);
601 /* Has CP sent an empty ADB ? */
602 if (adth_index < 1) {
603 dev_err(ipc_mux->dev, "unexpected empty ADB");
607 /* Loop through mixed session tables. */
609 /* Get the reference to the table header. */
610 adth = (struct mux_adth *)(block + adth_index);
612 /* Get the interface id and map it to the netif id. */
614 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
617 if_id = array_index_nospec(if_id,
618 IPC_MEM_MUX_IP_SESSION_ENTRIES);
620 /* Is the session active ? */
621 wwan = ipc_mux->session[if_id].wwan;
625 /* Consistency checks for aggregated datagram table. */
626 if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
629 if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
632 /* Calculate the number of datagrams. */
633 nr_of_dg = (le16_to_cpu(adth->table_length) -
634 sizeof(struct mux_adth)) /
635 sizeof(struct mux_adth_dg);
637 /* Is the datagram table empty ? */
639 dev_err(ipc_mux->dev,
640 "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
641 adth_index, nr_of_dg,
642 le32_to_cpu(adth->next_table_index));
644 /* Move to the next aggregated datagram table. */
645 adth_index = le32_to_cpu(adth->next_table_index);
649 /* New aggregated datagram table. */
651 if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
655 /* mark session for final flush */
656 ipc_mux->session[if_id].flush = 1;
658 /* Move to the next aggregated datagram table. */
659 adth_index = le32_to_cpu(adth->next_table_index);
667 * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
668 * depending on Header.
669 * @ipc_mux: Pointer to MUX data-struct
670 * @skb: Pointer to ipc_skb.
672 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
679 /* Decode the MUX header type. */
680 signature = le32_to_cpup((__le32 *)skb->data);
683 case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
684 mux_dl_adb_decode(ipc_mux, skb);
686 case IOSM_AGGR_MUX_SIG_ADGH:
687 ipc_mux_dl_adgh_decode(ipc_mux, skb);
690 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
692 case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
693 ipc_mux_dl_acb_decode(ipc_mux, skb);
696 ipc_mux_dl_cmd_decode(ipc_mux, skb);
700 dev_err(ipc_mux->dev, "invalid ABH signature");
703 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
706 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
707 struct mux_adb *ul_adb, u32 type)
709 /* Take the first element of the free list. */
710 struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
711 u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
717 return -EBUSY; /* Wait for a free ADB skb. */
719 /* Mark it as UL ADB to select the right free operation. */
720 IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
723 case IOSM_AGGR_MUX_SIG_ADBH:
724 /* Save the ADB memory settings. */
725 ul_adb->dest_skb = skb;
726 ul_adb->buf = skb->data;
727 ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
729 /* reset statistic counter */
731 ul_adb->payload_size = 0;
732 ul_adb->dg_cnt_total = 0;
734 /* Initialize the ADBH. */
735 ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
736 memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
737 ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
738 ul_adb->adbh->block_length =
739 cpu_to_le32(sizeof(struct mux_adbh));
740 next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
741 ul_adb->next_table_index = next_tb_id;
743 /* Clear the local copy of DGs for new ADB */
744 memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
746 /* Clear the DG count and QLT updated status for new ADB */
747 for (if_id = 0; if_id < no_if; if_id++) {
748 ul_adb->dg_count[if_id] = 0;
749 ul_adb->qlt_updated[if_id] = 0;
753 case IOSM_AGGR_MUX_SIG_ADGH:
754 /* Save the ADB memory settings. */
755 ul_adb->dest_skb = skb;
756 ul_adb->buf = skb->data;
757 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
758 /* reset statistic counter */
760 ul_adb->payload_size = 0;
761 ul_adb->dg_cnt_total = 0;
763 ul_adb->adgh = (struct mux_adgh *)skb->data;
764 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
768 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
769 (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
771 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
772 dev_err(ipc_mux->dev,
773 "can't support. QLT size:%d SKB size: %d",
774 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
778 ul_adb->qlth_skb = skb;
779 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
780 skb_put(skb, qlt_size);
787 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
789 struct mux_adb *ul_adb = &ipc_mux->ul_adb;
794 if (!ul_adb->dest_skb) {
795 dev_err(ipc_mux->dev, "no dest skb");
799 adgh_len = le16_to_cpu(ul_adb->adgh->length);
800 skb_put(ul_adb->dest_skb, adgh_len);
801 skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
802 ul_adb->dest_skb = NULL;
804 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
805 struct mux_session *session;
807 session = &ipc_mux->session[ul_adb->adgh->if_id];
808 str = "available_credits";
809 bytes = (long long)session->ul_flow_credits;
813 bytes = ipc_mux->ul_data_pend_bytes;
814 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
818 dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
819 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
823 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
824 struct mux_adb *ul_adb, int *out_offset)
826 int i, qlt_size, offset = *out_offset;
827 struct mux_qlth *p_adb_qlt;
828 struct mux_adth_dg *dg;
829 struct mux_adth *adth;
833 qlt_size = offsetof(struct mux_qlth, ql) +
834 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
836 for (i = 0; i < ipc_mux->nr_sessions; i++) {
837 if (ul_adb->dg_count[i] > 0) {
838 adth_dg_size = offsetof(struct mux_adth, dg) +
839 ul_adb->dg_count[i] * sizeof(*dg);
841 *ul_adb->next_table_index = offset;
842 adth = (struct mux_adth *)&ul_adb->buf[offset];
843 next_tb_id = (unsigned int *)&adth->next_table_index;
844 ul_adb->next_table_index = next_tb_id;
845 offset += adth_dg_size;
846 adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
848 adth->table_length = cpu_to_le16(adth_dg_size);
849 adth_dg_size -= offsetof(struct mux_adth, dg);
850 memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
854 if (ul_adb->qlt_updated[i]) {
855 *ul_adb->next_table_index = offset;
856 p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
857 ul_adb->next_table_index =
858 (u32 *)&p_adb_qlt->next_table_index;
859 memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
863 *out_offset = offset;
867 * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
868 * @ipc_mux: Pointer to MUX data-struct.
870 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
872 bool ul_data_pend = false;
873 struct mux_adb *ul_adb;
877 ul_adb = &ipc_mux->ul_adb;
878 if (!ul_adb->dest_skb)
881 offset = *ul_adb->next_table_index;
882 ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
883 ul_adb->adbh->block_length = cpu_to_le32(offset);
885 if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
886 ul_adb->dest_skb = NULL;
890 *ul_adb->next_table_index = 0;
891 ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
892 skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
894 spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
895 __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
896 spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
898 ul_adb->dest_skb = NULL;
899 /* Updates the TDs with ul_list */
900 ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
902 /* Delay the doorbell irq */
904 ipc_imem_td_update_timer_start(ipc_mux->imem);
906 ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
907 ipc_mux->acc_payload_size += ul_adb->payload_size;
908 ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
911 /* Allocates an ADB from the free list and initializes it with ADBH */
912 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
913 struct mux_adb *adb, int *size_needed,
916 bool ret_val = false;
919 if (!adb->dest_skb) {
920 /* Allocate memory for the ADB including of the
921 * datagram table header.
923 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
925 /* Is a pending ADB available ? */
926 ret_val = true; /* None. */
928 /* Update size need to zero only for new ADB memory */
935 /* Informs the network stack to stop sending further packets for all opened
938 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
940 struct mux_session *session;
943 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
944 session = &ipc_mux->session[idx];
949 session->net_tx_stop = true;
953 /* Sends Queue Level Table of all opened sessions */
954 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
956 struct ipc_mem_lite_gen_tbl *qlt;
957 struct mux_session *session;
958 bool qlt_updated = false;
962 if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
965 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
966 MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
968 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
969 session = &ipc_mux->session[i];
971 if (!session->wwan || session->flow_ctl_mask)
974 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
976 dev_err(ipc_mux->dev,
977 "no reserved mem to send QLT of if_id: %d", i);
982 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
984 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
985 qlt->length = cpu_to_le16(qlt_size);
987 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
988 qlt->reserved[0] = 0;
989 qlt->reserved[1] = 0;
991 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
993 /* Add QLT to the transfer list. */
994 skb_queue_tail(&ipc_mux->channel->ul_list,
995 ipc_mux->ul_adb.qlth_skb);
998 ipc_mux->ul_adb.qlth_skb = NULL;
1002 /* Updates the TDs with ul_list */
1003 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1008 /* Checks the available credits for the specified session and returns
1009 * number of packets for which credits are available.
1011 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1012 struct mux_session *session,
1013 struct sk_buff_head *ul_list,
1016 int pkts_to_send = 0;
1017 struct sk_buff *skb;
1020 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1021 credits = session->ul_flow_credits;
1023 dev_dbg(ipc_mux->dev,
1024 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1025 session->if_id, session->ul_flow_credits,
1026 session->ul_list.qlen); /* nr_of_bytes */
1030 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1031 ipc_mux->ul_data_pend_bytes;
1033 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1035 dev_dbg(ipc_mux->dev,
1036 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1037 session->if_id, ipc_mux->ul_data_pend_bytes,
1038 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1043 /* Check if there are enough credits/bytes available to send the
1044 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1045 * depending on available credits.
1047 skb_queue_walk(ul_list, skb)
1049 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1051 credits -= skb->len;
1055 return pkts_to_send;
1058 /* Encode the UL IP packet according to Lite spec. */
1059 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1060 struct mux_session *session,
1061 struct sk_buff_head *ul_list,
1062 struct mux_adb *adb, int nr_of_pkts)
1064 int offset = sizeof(struct mux_adgh);
1065 int adb_updated = -EINVAL;
1066 struct sk_buff *src_skb;
1067 int aligned_size = 0;
1071 /* Re-calculate the number of packets depending on number of bytes to be
1072 * processed/available credits.
1074 nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1077 /* If calculated nr_of_pkts from available credits is <= 0
1078 * then nothing to do.
1080 if (nr_of_pkts <= 0)
1083 /* Read configured UL head_pad_length for session.*/
1084 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1085 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1087 /* Process all pending UL packets for this session
1088 * depending on the allocated datagram table size.
1090 while (nr_of_pkts > 0) {
1091 /* get destination skb allocated */
1092 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1093 IOSM_AGGR_MUX_SIG_ADGH)) {
1094 dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1098 /* Peek at the head of the list. */
1099 src_skb = skb_peek(ul_list);
1101 dev_err(ipc_mux->dev,
1102 "skb peek return NULL with count : %d",
1107 /* Calculate the memory value. */
1108 aligned_size = ALIGN((pad_len + src_skb->len), 4);
1110 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1112 if (ipc_mux->size_needed > adb->size) {
1113 dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1114 ipc_mux->size_needed, adb->size);
1115 /* Return 1 if any IP packet is added to the transfer
1118 return nr_of_skb ? 1 : 0;
1121 /* Add buffer (without head padding to next pending transfer) */
1122 memcpy(adb->buf + offset + pad_len, src_skb->data,
1125 adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1126 adb->adgh->if_id = session_id;
1128 cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1130 adb->adgh->service_class = src_skb->priority;
1131 adb->adgh->next_count = --nr_of_pkts;
1132 adb->dg_cnt_total++;
1133 adb->payload_size += src_skb->len;
1135 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1136 /* Decrement the credit value as we are processing the
1137 * datagram from the UL list.
1139 session->ul_flow_credits -= src_skb->len;
1141 /* Remove the processed elements and free it. */
1142 src_skb = skb_dequeue(ul_list);
1143 dev_kfree_skb(src_skb);
1146 ipc_mux_ul_adgh_finish(ipc_mux);
1150 /* Send QLT info to modem if pending bytes > high watermark
1151 * in case of mux lite
1153 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1154 ipc_mux->ul_data_pend_bytes >=
1155 IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1156 adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1160 /* Updates the TDs with ul_list */
1161 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1168 * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1169 * @ipc_mux: pointer to MUX instance data
1170 * @p_adb: pointer to UL aggegated data block
1171 * @session_id: session id
1172 * @qlth_n_ql_size: Length (in bytes) of the datagram table
1173 * @ul_list: pointer to skb buffer head
1175 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1176 int session_id, int qlth_n_ql_size,
1177 struct sk_buff_head *ul_list)
1179 int qlevel = ul_list->qlen;
1180 struct mux_qlth *p_qlt;
1182 p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1184 /* Initialize QLTH if not been done */
1185 if (p_adb->qlt_updated[session_id] == 0) {
1186 p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1187 p_qlt->if_id = session_id;
1188 p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1189 p_qlt->reserved = 0;
1190 p_qlt->reserved2 = 0;
1193 /* Update Queue Level information always */
1194 p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1195 p_adb->qlt_updated[session_id] = 1;
1198 /* Update the next table index. */
1199 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1201 struct sk_buff_head *ul_list,
1202 struct mux_adth_dg *dg,
1205 struct mux_adb *adb,
1206 struct sk_buff *src_skb)
1208 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1209 qlth_n_ql_size, ul_list);
1210 ipc_mux_ul_adb_finish(ipc_mux);
1211 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1212 IOSM_AGGR_MUX_SIG_ADBH))
1215 ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1217 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1218 ipc_mux->size_needed += qlth_n_ql_size;
1219 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1223 /* Process encode session UL data. */
1224 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1225 struct mux_adth_dg *dg,
1226 struct sk_buff_head *ul_list,
1227 struct sk_buff *src_skb, int session_id,
1228 int pkt_to_send, u32 qlth_n_ql_size,
1229 int *out_offset, int head_pad_len)
1232 int offset = *out_offset;
1233 unsigned long flags;
1236 while (pkt_to_send > 0) {
1237 /* Peek at the head of the list. */
1238 src_skb = skb_peek(ul_list);
1240 dev_err(ipc_mux->dev,
1241 "skb peek return NULL with count : %d",
1245 aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1246 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1248 if (ipc_mux->size_needed > adb->size ||
1249 ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1250 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1251 *adb->next_table_index = offset;
1252 if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1255 qlth_n_ql_size, adb,
1259 offset = le32_to_cpu(adb->adbh->block_length);
1260 /* Load pointer to next available datagram entry */
1261 dg = adb->dg[session_id] + adb->dg_count[session_id];
1263 /* Add buffer without head padding to next pending transfer. */
1264 memcpy(adb->buf + offset + head_pad_len,
1265 src_skb->data, src_skb->len);
1266 /* Setup datagram entry. */
1267 dg->datagram_index = cpu_to_le32(offset);
1268 dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1269 dg->service_class = (((struct sk_buff *)src_skb)->priority);
1271 adb->dg_cnt_total++;
1272 adb->payload_size += le16_to_cpu(dg->datagram_length);
1274 adb->dg_count[session_id]++;
1275 offset += aligned_size;
1276 /* Remove the processed elements and free it. */
1277 spin_lock_irqsave(&ul_list->lock, flags);
1278 src_skb = __skb_dequeue(ul_list);
1279 spin_unlock_irqrestore(&ul_list->lock, flags);
1281 dev_kfree_skb(src_skb);
1285 *out_offset = offset;
1289 /* Process encode session UL data to ADB. */
1290 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1291 struct mux_session *session,
1292 struct sk_buff_head *ul_list, struct mux_adb *adb,
1295 int adb_updated = -EINVAL;
1296 int head_pad_len, offset;
1297 struct sk_buff *src_skb = NULL;
1298 struct mux_adth_dg *dg;
1301 /* If any of the opened session has set Flow Control ON then limit the
1302 * UL data to mux_flow_ctrl_high_thresh_b bytes
1304 if (ipc_mux->ul_data_pend_bytes >=
1305 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1306 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1310 qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1311 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1312 head_pad_len = session->ul_head_pad_len;
1314 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1315 head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1317 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1318 IOSM_AGGR_MUX_SIG_ADBH))
1321 offset = le32_to_cpu(adb->adbh->block_length);
1323 if (ipc_mux->size_needed == 0)
1324 ipc_mux->size_needed = offset;
1326 /* Calculate the size needed for ADTH, QLTH and QL*/
1327 if (adb->dg_count[session_id] == 0) {
1328 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1329 ipc_mux->size_needed += qlth_n_ql_size;
1332 dg = adb->dg[session_id] + adb->dg_count[session_id];
1334 if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1335 session_id, pkt_to_send, qlth_n_ql_size, &offset,
1336 head_pad_len) > 0) {
1338 *adb->next_table_index = offset;
1339 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1340 qlth_n_ql_size, ul_list);
1341 adb->adbh->block_length = cpu_to_le32(offset);
1347 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1349 struct sk_buff_head *ul_list;
1350 struct mux_session *session;
1356 if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1357 ipc_mux->adb_prep_ongoing)
1360 ipc_mux->adb_prep_ongoing = true;
1362 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1363 session_id = ipc_mux->rr_next_session;
1364 session = &ipc_mux->session[session_id];
1366 /* Go to next handle rr_next_session overflow */
1367 ipc_mux->rr_next_session++;
1368 if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1369 ipc_mux->rr_next_session = 0;
1371 if (!session->wwan || session->flow_ctl_mask ||
1372 session->net_tx_stop)
1375 ul_list = &session->ul_list;
1377 /* Is something pending in UL and flow ctrl off */
1378 dg_n = skb_queue_len(ul_list);
1379 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1380 dg_n = MUX_MAX_UL_DG_ENTRIES;
1383 /* Nothing to do for ipc_mux session
1384 * -> try next session id.
1387 if (ipc_mux->protocol == MUX_LITE)
1388 updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1393 updated = mux_ul_adb_encode(ipc_mux, session_id,
1399 ipc_mux->adb_prep_ongoing = false;
1400 return updated == 1;
1403 /* Calculates the Payload from any given ADB. */
1404 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1405 struct mux_adbh *p_adbh)
1407 struct mux_adth_dg *dg;
1408 struct mux_adth *adth;
1409 u32 payload_size = 0;
1413 /* Process the aggregated datagram tables. */
1414 next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1416 if (next_table_idx < sizeof(struct mux_adbh)) {
1417 dev_err(ipc_mux->dev, "unexpected empty ADB");
1418 return payload_size;
1421 while (next_table_idx != 0) {
1422 /* Get the reference to the table header. */
1423 adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1425 if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1426 nr_of_dg = (le16_to_cpu(adth->table_length) -
1427 sizeof(struct mux_adth)) /
1428 sizeof(struct mux_adth_dg);
1431 return payload_size;
1435 for (i = 0; i < nr_of_dg; i++, dg++) {
1436 if (le32_to_cpu(dg->datagram_index) <
1437 sizeof(struct mux_adbh)) {
1438 return payload_size;
1441 le16_to_cpu(dg->datagram_length);
1444 next_table_idx = le32_to_cpu(adth->next_table_index);
1447 return payload_size;
1450 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1452 union mux_type_header hr;
1456 if (ipc_mux->protocol == MUX_LITE) {
1457 hr.adgh = (struct mux_adgh *)skb->data;
1458 adgh_len = le16_to_cpu(hr.adgh->length);
1459 if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460 ipc_mux->ul_flow == MUX_UL)
1461 ipc_mux->ul_data_pend_bytes =
1462 ipc_mux->ul_data_pend_bytes - adgh_len;
1464 hr.adbh = (struct mux_adbh *)(skb->data);
1465 payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466 ipc_mux->ul_data_pend_bytes -= payload;
1469 if (ipc_mux->ul_flow == MUX_UL)
1470 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471 ipc_mux->ul_data_pend_bytes);
1473 /* Reset the skb settings. */
1476 /* Add the consumed ADB to the free list. */
1477 skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1480 /* Start the NETIF uplink send transfer in MUX mode. */
1481 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1482 void *msg, size_t size)
1484 struct iosm_mux *ipc_mux = ipc_imem->mux;
1485 bool ul_data_pend = false;
1487 /* Add session UL data to a ADB and ADGH */
1488 ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1490 if (ipc_mux->protocol == MUX_AGGREGATION)
1491 ipc_imem_adb_timer_start(ipc_mux->imem);
1493 /* Delay the doorbell irq */
1494 ipc_imem_td_update_timer_start(ipc_mux->imem);
1496 /* reset the debounce flag */
1497 ipc_mux->ev_mux_net_transmit_pending = false;
1502 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1503 struct sk_buff *skb)
1505 struct mux_session *session = &ipc_mux->session[if_id];
1508 if (ipc_mux->channel &&
1509 ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1510 dev_err(ipc_mux->dev,
1511 "channel state is not IMEM_CHANNEL_ACTIVE");
1515 if (!session->wwan) {
1516 dev_err(ipc_mux->dev, "session net ID is NULL");
1521 /* Session is under flow control.
1522 * Check if packet can be queued in session list, if not
1525 if (skb_queue_len(&session->ul_list) >=
1526 (session->net_tx_stop ?
1527 IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1528 (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1529 IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1530 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1535 /* Add skb to the uplink skb accumulator. */
1536 skb_queue_tail(&session->ul_list, skb);
1538 /* Inform the IPC kthread to pass uplink IP packets to CP. */
1539 if (!ipc_mux->ev_mux_net_transmit_pending) {
1540 ipc_mux->ev_mux_net_transmit_pending = true;
1541 ret = ipc_task_queue_send_task(ipc_mux->imem,
1542 ipc_mux_tq_ul_trigger_encode, 0,
1547 dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1548 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1549 skb->len, skb->truesize, skb->priority);