]> Git Repo - linux.git/blob - drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
Linux 6.14-rc3
[linux.git] / drivers / net / wwan / iosm / iosm_ipc_mux_codec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5
6 #include <linux/nospec.h>
7
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
11
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
14                                size_t size)
15 {
16         struct iosm_mux *ipc_mux = ipc_imem->mux;
17         const struct mux_acb *acb = msg;
18
19         skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20         ipc_imem_ul_send(ipc_mux->imem);
21
22         return 0;
23 }
24
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
26 {
27         struct completion *completion = &ipc_mux->channel->ul_sem;
28         int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
29                                            0, &ipc_mux->acb,
30                                            sizeof(ipc_mux->acb), false);
31         if (ret) {
32                 dev_err(ipc_mux->dev, "unable to send mux command");
33                 return ret;
34         }
35
36         /* if blocking, suspend the app and wait for irq in the flash or
37          * crash phase. return false on timeout to indicate failure.
38          */
39         if (blocking) {
40                 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
41
42                 reinit_completion(completion);
43
44                 if (wait_for_completion_interruptible_timeout
45                    (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
46                    0) {
47                         dev_err(ipc_mux->dev, "ch[%d] timeout",
48                                 ipc_mux->channel_id);
49                         ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
50                         return -ETIMEDOUT;
51                 }
52         }
53
54         return 0;
55 }
56
57 /* Initialize the command header. */
58 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
59 {
60         struct mux_acb *acb = &ipc_mux->acb;
61         struct mux_acbh *header;
62
63         header = (struct mux_acbh *)(acb->skb)->data;
64         header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
65         header->first_cmd_index = header->block_length;
66         header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
67         header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
68 }
69
70 /* Add a command to the ACB. */
71 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
72                                             void *param, u32 param_size)
73 {
74         struct mux_acbh *header;
75         struct mux_cmdh *cmdh;
76         struct mux_acb *acb;
77
78         acb = &ipc_mux->acb;
79         header = (struct mux_acbh *)(acb->skb)->data;
80         cmdh = (struct mux_cmdh *)
81                 ((acb->skb)->data + le32_to_cpu(header->block_length));
82
83         cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
84         cmdh->command_type = cpu_to_le32(cmd);
85         cmdh->if_id = acb->if_id;
86
87         acb->cmd = cmd;
88         cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
89                                     param_size);
90         cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
91         if (param)
92                 memcpy(&cmdh->param, param, param_size);
93
94         skb_put(acb->skb, le32_to_cpu(header->block_length) +
95                                         le16_to_cpu(cmdh->cmd_len));
96
97         return cmdh;
98 }
99
100 /* Prepare mux Command */
101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
102                                                   u32 cmd, struct mux_acb *acb,
103                                                   void *param, u32 param_size)
104 {
105         struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
106
107         cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
108         cmdh->command_type = cpu_to_le32(cmd);
109         cmdh->if_id = acb->if_id;
110
111         acb->cmd = cmd;
112
113         cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
114                                     param_size);
115         cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
116
117         if (param)
118                 memcpy(&cmdh->param, param, param_size);
119
120         skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
121
122         return cmdh;
123 }
124
125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
126 {
127         struct mux_acb *acb = &ipc_mux->acb;
128         struct sk_buff *skb;
129         dma_addr_t mapping;
130
131         /* Allocate skb memory for the uplink buffer. */
132         skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
133                                  GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
134         if (!skb)
135                 return -ENOMEM;
136
137         /* Save the skb address. */
138         acb->skb = skb;
139
140         memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
141
142         return 0;
143 }
144
145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
146                              u32 transaction_id, union mux_cmd_param *param,
147                              size_t res_size, bool blocking, bool respond)
148 {
149         struct mux_acb *acb = &ipc_mux->acb;
150         union mux_type_cmdh cmdh;
151         int ret = 0;
152
153         acb->if_id = if_id;
154         ret = ipc_mux_acb_alloc(ipc_mux);
155         if (ret)
156                 return ret;
157
158         if (ipc_mux->protocol == MUX_LITE) {
159                 cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
160                                                      param, res_size);
161
162                 if (respond)
163                         cmdh.ack_lite->transaction_id =
164                                         cpu_to_le32(transaction_id);
165         } else {
166                 /* Initialize the ACB header. */
167                 ipc_mux_acb_init(ipc_mux);
168                 cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
169                                                     res_size);
170
171                 if (respond)
172                         cmdh.ack_aggr->transaction_id =
173                                         cpu_to_le32(transaction_id);
174         }
175         ret = ipc_mux_acb_send(ipc_mux, blocking);
176
177         return ret;
178 }
179
180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
181 {
182         /* Inform the network interface to start/stop flow ctrl */
183         ipc_wwan_tx_flowctrl(session->wwan, idx, on);
184 }
185
186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
187                                               union mux_cmd_param param,
188                                               __le32 command_type, u8 if_id,
189                                               __le32 transaction_id)
190 {
191         struct mux_acb *acb = &ipc_mux->acb;
192
193         switch (le32_to_cpu(command_type)) {
194         case MUX_CMD_OPEN_SESSION_RESP:
195         case MUX_CMD_CLOSE_SESSION_RESP:
196                 /* Resume the control application. */
197                 acb->got_param = param;
198                 break;
199
200         case MUX_LITE_CMD_FLOW_CTL_ACK:
201                 /* This command type is not expected as response for
202                  * Aggregation version of the protocol. So return non-zero.
203                  */
204                 if (ipc_mux->protocol != MUX_LITE)
205                         return -EINVAL;
206
207                 dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
208                         if_id, le32_to_cpu(transaction_id));
209                 break;
210
211         case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
212                 /* This command type is not expected as response for
213                  * Lite version of the protocol. So return non-zero.
214                  */
215                 if (ipc_mux->protocol == MUX_LITE)
216                         return -EINVAL;
217                 break;
218
219         default:
220                 return -EINVAL;
221         }
222
223         acb->wanted_response = MUX_CMD_INVALID;
224         acb->got_response = le32_to_cpu(command_type);
225         complete(&ipc_mux->channel->ul_sem);
226
227         return 0;
228 }
229
230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
231                                           union mux_cmd_param *param,
232                                           __le32 command_type, u8 if_id,
233                                           __le16 cmd_len, int size)
234 {
235         struct mux_session *session;
236         struct hrtimer *adb_timer;
237
238         dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
239                 if_id, le32_to_cpu(command_type));
240
241         switch (le32_to_cpu(command_type)) {
242         case MUX_LITE_CMD_FLOW_CTL:
243         case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
244
245                 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
246                         dev_err(ipc_mux->dev, "if_id [%d] not valid",
247                                 if_id);
248                         return -EINVAL; /* No session interface id. */
249                 }
250
251                 session = &ipc_mux->session[if_id];
252                 adb_timer = &ipc_mux->imem->adb_timer;
253
254                 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
255                         /* Backward Compatibility */
256                         if (cmd_len == cpu_to_le16(size))
257                                 session->flow_ctl_mask =
258                                         le32_to_cpu(param->flow_ctl.mask);
259                         else
260                                 session->flow_ctl_mask = ~0;
261                         /* if CP asks for FLOW CTRL Enable
262                          * then set our internal flow control Tx flag
263                          * to limit uplink session queueing
264                          */
265                         session->net_tx_stop = true;
266
267                         /* We have to call Finish ADB here.
268                          * Otherwise any already queued data
269                          * will be sent to CP when ADB is full
270                          * for some other sessions.
271                          */
272                         if (ipc_mux->protocol == MUX_AGGREGATION) {
273                                 ipc_mux_ul_adb_finish(ipc_mux);
274                                 ipc_imem_hrtimer_stop(adb_timer);
275                         }
276                         /* Update the stats */
277                         session->flow_ctl_en_cnt++;
278                 } else if (param->flow_ctl.mask == 0) {
279                         /* Just reset the Flow control mask and let
280                          * mux_flow_ctrl_low_thre_b take control on
281                          * our internal Tx flag and enabling kernel
282                          * flow control
283                          */
284                         dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
285                                 if_id, le32_to_cpu(param->flow_ctl.mask));
286                         /* Backward Compatibility */
287                         if (cmd_len == cpu_to_le16(size))
288                                 session->flow_ctl_mask =
289                                         le32_to_cpu(param->flow_ctl.mask);
290                         else
291                                 session->flow_ctl_mask = 0;
292                         /* Update the stats */
293                         session->flow_ctl_dis_cnt++;
294                 } else {
295                         break;
296                 }
297
298                 ipc_mux->acc_adb_size = 0;
299                 ipc_mux->acc_payload_size = 0;
300
301                 dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
302                         le32_to_cpu(param->flow_ctl.mask));
303                 break;
304
305         case MUX_LITE_CMD_LINK_STATUS_REPORT:
306                 break;
307
308         default:
309                 return -EINVAL;
310         }
311         return 0;
312 }
313
314 /* Decode and Send appropriate response to a command block. */
315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
316 {
317         struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
318         __le32 trans_id = cmdh->transaction_id;
319         int size;
320
321         if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
322                                                cmdh->command_type, cmdh->if_id,
323                                                cmdh->transaction_id)) {
324                 /* Unable to decode command response indicates the cmd_type
325                  * may be a command instead of response. So try to decoding it.
326                  */
327                 size = offsetof(struct mux_lite_cmdh, param) +
328                                 sizeof(cmdh->param.flow_ctl);
329                 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
330                                                     cmdh->command_type,
331                                                     cmdh->if_id,
332                                                     cmdh->cmd_len, size)) {
333                         /* Decoded command may need a response. Give the
334                          * response according to the command type.
335                          */
336                         union mux_cmd_param *mux_cmd = NULL;
337                         size_t size = 0;
338                         u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
339
340                         if (cmdh->command_type ==
341                             cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
342                                 mux_cmd = &cmdh->param;
343                                 mux_cmd->link_status_resp.response =
344                                         cpu_to_le32(MUX_CMD_RESP_SUCCESS);
345                                 /* response field is u32 */
346                                 size = sizeof(u32);
347                         } else if (cmdh->command_type ==
348                                    cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
349                                 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
350                         } else {
351                                 return;
352                         }
353
354                         if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
355                                                      le32_to_cpu(trans_id),
356                                                      mux_cmd, size, false,
357                                                      true))
358                                 dev_err(ipc_mux->dev,
359                                         "if_id %d: cmd send failed",
360                                         cmdh->if_id);
361                 }
362         }
363 }
364
365 /* Pass the DL packet to the netif layer. */
366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
367                                struct iosm_wwan *wwan, u32 offset,
368                                u8 service_class, struct sk_buff *skb,
369                                u32 pkt_len)
370 {
371         struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
372
373         if (!dest_skb)
374                 return -ENOMEM;
375
376         skb_pull(dest_skb, offset);
377         skb_trim(dest_skb, pkt_len);
378         /* Pass the packet to the netif layer. */
379         dest_skb->priority = service_class;
380
381         return ipc_wwan_receive(wwan, dest_skb, false, if_id);
382 }
383
384 /* Decode Flow Credit Table in the block */
385 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
386                                    unsigned char *block)
387 {
388         struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
389         struct iosm_wwan *wwan;
390         int ul_credits;
391         int if_id;
392
393         if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
394                 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
395                         fct->vfl_length);
396                 return;
397         }
398
399         if_id = fct->if_id;
400         if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
401                 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
402                 return;
403         }
404
405         /* Is the session active ? */
406         if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
407         wwan = ipc_mux->session[if_id].wwan;
408         if (!wwan) {
409                 dev_err(ipc_mux->dev, "session Net ID is NULL");
410                 return;
411         }
412
413         ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
414
415         dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
416                 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
417
418         /* Update the Flow Credit information from ADB */
419         ipc_mux->session[if_id].ul_flow_credits += ul_credits;
420
421         /* Check whether the TX can be started */
422         if (ipc_mux->session[if_id].ul_flow_credits > 0) {
423                 ipc_mux->session[if_id].net_tx_stop = false;
424                 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
425                                           ipc_mux->session[if_id].if_id, false);
426         }
427 }
428
429 /* Decode non-aggregated datagram */
430 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
431                                    struct sk_buff *skb)
432 {
433         u32 pad_len, packet_offset, adgh_len;
434         struct iosm_wwan *wwan;
435         struct mux_adgh *adgh;
436         u8 *block = skb->data;
437         int rc = 0;
438         u8 if_id;
439
440         adgh = (struct mux_adgh *)block;
441
442         if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
443                 dev_err(ipc_mux->dev, "invalid ADGH signature received");
444                 return;
445         }
446
447         if_id = adgh->if_id;
448         if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
449                 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
450                 return;
451         }
452
453         /* Is the session active ? */
454         if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
455         wwan = ipc_mux->session[if_id].wwan;
456         if (!wwan) {
457                 dev_err(ipc_mux->dev, "session Net ID is NULL");
458                 return;
459         }
460
461         /* Store the pad len for the corresponding session
462          * Pad bytes as negotiated in the open session less the header size
463          * (see session management chapter for details).
464          * If resulting padding is zero or less, the additional head padding is
465          * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
466          * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
467          * set to zero
468          */
469         pad_len =
470                 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
471         packet_offset = sizeof(*adgh) + pad_len;
472
473         if_id += ipc_mux->wwan_q_offset;
474         adgh_len = le16_to_cpu(adgh->length);
475
476         /* Pass the packet to the netif layer */
477         rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
478                                  adgh->service_class, skb,
479                                  adgh_len - packet_offset);
480         if (rc) {
481                 dev_err(ipc_mux->dev, "mux adgh decoding error");
482                 return;
483         }
484         ipc_mux->session[if_id].flush = 1;
485 }
486
487 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
488                                      struct mux_cmdh *cmdh, int size)
489 {
490         u32 link_st  = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
491         u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
492         u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
493         u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
494         union mux_cmd_param *cmd_p = NULL;
495         u32 cmd = link_st;
496         u32 trans_id;
497
498         if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
499                                             cmdh->command_type, cmdh->if_id,
500                                             cmdh->cmd_len, size)) {
501                 size = 0;
502                 if (cmdh->command_type == cpu_to_le32(link_st)) {
503                         cmd_p = &cmdh->param;
504                         cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
505                 } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
506                                 (cmdh->command_type == cpu_to_le32(fctl_dis))) {
507                         cmd = fctl_ack;
508                 } else {
509                         return;
510                         }
511                 trans_id = le32_to_cpu(cmdh->transaction_id);
512                 ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
513                                          trans_id, cmd_p, size, false, true);
514         }
515 }
516
517 /* Decode an aggregated command block. */
518 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
519 {
520         struct mux_acbh *acbh;
521         struct mux_cmdh *cmdh;
522         u32 next_cmd_index;
523         u8 *block;
524         int size;
525
526         acbh = (struct mux_acbh *)(skb->data);
527         block = (u8 *)(skb->data);
528
529         next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
530         next_cmd_index = array_index_nospec(next_cmd_index,
531                                             sizeof(struct mux_cmdh));
532
533         while (next_cmd_index != 0) {
534                 cmdh = (struct mux_cmdh *)&block[next_cmd_index];
535                 next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
536                 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
537                                                        cmdh->command_type,
538                                                        cmdh->if_id,
539                                                        cmdh->transaction_id)) {
540                         size = offsetof(struct mux_cmdh, param) +
541                                 sizeof(cmdh->param.flow_ctl);
542                         ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
543                 }
544         }
545 }
546
547 /* process datagram */
548 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
549                              struct mux_adth_dg *dg, struct sk_buff *skb,
550                              int if_id, int nr_of_dg)
551 {
552         u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
553         u32 packet_offset, i, rc, dg_len;
554
555         for (i = 0; i < nr_of_dg; i++, dg++) {
556                 if (le32_to_cpu(dg->datagram_index)
557                                 < sizeof(struct mux_adbh))
558                         goto dg_error;
559
560                 /* Is the packet inside of the ADB */
561                 if (le32_to_cpu(dg->datagram_index) >=
562                                         le32_to_cpu(adbh->block_length)) {
563                         goto dg_error;
564                 } else {
565                         packet_offset =
566                                 le32_to_cpu(dg->datagram_index) +
567                                 dl_head_pad_len;
568                         dg_len = le16_to_cpu(dg->datagram_length);
569                         /* Pass the packet to the netif layer. */
570                         rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
571                                                  packet_offset,
572                                                  dg->service_class, skb,
573                                                  dg_len - dl_head_pad_len);
574                         if (rc)
575                                 goto dg_error;
576                 }
577         }
578         return 0;
579 dg_error:
580         return -1;
581 }
582
583 /* Decode an aggregated data block. */
584 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
585                               struct sk_buff *skb)
586 {
587         struct mux_adth_dg *dg;
588         struct iosm_wwan *wwan;
589         struct mux_adbh *adbh;
590         struct mux_adth *adth;
591         int nr_of_dg, if_id;
592         u32 adth_index;
593         u8 *block;
594
595         block = skb->data;
596         adbh = (struct mux_adbh *)block;
597
598         /* Process the aggregated datagram tables. */
599         adth_index = le32_to_cpu(adbh->first_table_index);
600
601         /* Has CP sent an empty ADB ? */
602         if (adth_index < 1) {
603                 dev_err(ipc_mux->dev, "unexpected empty ADB");
604                 goto adb_decode_err;
605         }
606
607         /* Loop through mixed session tables. */
608         while (adth_index) {
609                 /* Get the reference to the table header. */
610                 adth = (struct mux_adth *)(block + adth_index);
611
612                 /* Get the interface id and map it to the netif id. */
613                 if_id = adth->if_id;
614                 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
615                         goto adb_decode_err;
616
617                 if_id = array_index_nospec(if_id,
618                                            IPC_MEM_MUX_IP_SESSION_ENTRIES);
619
620                 /* Is the session active ? */
621                 wwan = ipc_mux->session[if_id].wwan;
622                 if (!wwan)
623                         goto adb_decode_err;
624
625                 /* Consistency checks for aggregated datagram table. */
626                 if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
627                         goto adb_decode_err;
628
629                 if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
630                         goto adb_decode_err;
631
632                 /* Calculate the number of datagrams. */
633                 nr_of_dg = (le16_to_cpu(adth->table_length) -
634                                         sizeof(struct mux_adth)) /
635                                         sizeof(struct mux_adth_dg);
636
637                 /* Is the datagram table empty ? */
638                 if (nr_of_dg < 1) {
639                         dev_err(ipc_mux->dev,
640                                 "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
641                                 adth_index, nr_of_dg,
642                                 le32_to_cpu(adth->next_table_index));
643
644                         /* Move to the next aggregated datagram table. */
645                         adth_index = le32_to_cpu(adth->next_table_index);
646                         continue;
647                 }
648
649                 /* New aggregated datagram table. */
650                 dg = adth->dg;
651                 if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
652                                       nr_of_dg) < 0)
653                         goto adb_decode_err;
654
655                 /* mark session for final flush */
656                 ipc_mux->session[if_id].flush = 1;
657
658                 /* Move to the next aggregated datagram table. */
659                 adth_index = le32_to_cpu(adth->next_table_index);
660         }
661
662 adb_decode_err:
663         return;
664 }
665
666 /**
667  * ipc_mux_dl_decode -  Route the DL packet through the IP MUX layer
668  *                      depending on Header.
669  * @ipc_mux:            Pointer to MUX data-struct
670  * @skb:                Pointer to ipc_skb.
671  */
672 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
673 {
674         u32 signature;
675
676         if (!skb->data)
677                 return;
678
679         /* Decode the MUX header type. */
680         signature = le32_to_cpup((__le32 *)skb->data);
681
682         switch (signature) {
683         case IOSM_AGGR_MUX_SIG_ADBH:    /* Aggregated Data Block Header */
684                 mux_dl_adb_decode(ipc_mux, skb);
685                 break;
686         case IOSM_AGGR_MUX_SIG_ADGH:
687                 ipc_mux_dl_adgh_decode(ipc_mux, skb);
688                 break;
689         case MUX_SIG_FCTH:
690                 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
691                 break;
692         case IOSM_AGGR_MUX_SIG_ACBH:    /* Aggregated Command Block Header */
693                 ipc_mux_dl_acb_decode(ipc_mux, skb);
694                 break;
695         case MUX_SIG_CMDH:
696                 ipc_mux_dl_cmd_decode(ipc_mux, skb);
697                 break;
698
699         default:
700                 dev_err(ipc_mux->dev, "invalid ABH signature");
701         }
702
703         ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
704 }
705
706 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
707                                 struct mux_adb *ul_adb, u32 type)
708 {
709         /* Take the first element of the free list. */
710         struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
711         u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
712         u32 *next_tb_id;
713         int qlt_size;
714         u32 if_id;
715
716         if (!skb)
717                 return -EBUSY; /* Wait for a free ADB skb. */
718
719         /* Mark it as UL ADB to select the right free operation. */
720         IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
721
722         switch (type) {
723         case IOSM_AGGR_MUX_SIG_ADBH:
724                 /* Save the ADB memory settings. */
725                 ul_adb->dest_skb = skb;
726                 ul_adb->buf = skb->data;
727                 ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
728
729                 /* reset statistic counter */
730                 ul_adb->if_cnt = 0;
731                 ul_adb->payload_size = 0;
732                 ul_adb->dg_cnt_total = 0;
733
734                 /* Initialize the ADBH. */
735                 ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
736                 memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
737                 ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
738                 ul_adb->adbh->block_length =
739                                         cpu_to_le32(sizeof(struct mux_adbh));
740                 next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
741                 ul_adb->next_table_index = next_tb_id;
742
743                 /* Clear the local copy of DGs for new ADB */
744                 memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
745
746                 /* Clear the DG count and QLT updated status for new ADB */
747                 for (if_id = 0; if_id < no_if; if_id++) {
748                         ul_adb->dg_count[if_id] = 0;
749                         ul_adb->qlt_updated[if_id] = 0;
750                 }
751                 break;
752
753         case IOSM_AGGR_MUX_SIG_ADGH:
754                 /* Save the ADB memory settings. */
755                 ul_adb->dest_skb = skb;
756                 ul_adb->buf = skb->data;
757                 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
758                 /* reset statistic counter */
759                 ul_adb->if_cnt = 0;
760                 ul_adb->payload_size = 0;
761                 ul_adb->dg_cnt_total = 0;
762
763                 ul_adb->adgh = (struct mux_adgh *)skb->data;
764                 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
765                 break;
766
767         case MUX_SIG_QLTH:
768                 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
769                            (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
770
771                 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
772                         dev_err(ipc_mux->dev,
773                                 "can't support. QLT size:%d SKB size: %d",
774                                 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
775                         return -ERANGE;
776                 }
777
778                 ul_adb->qlth_skb = skb;
779                 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
780                 skb_put(skb, qlt_size);
781                 break;
782         }
783
784         return 0;
785 }
786
787 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
788 {
789         struct mux_adb *ul_adb = &ipc_mux->ul_adb;
790         u16 adgh_len;
791         long long bytes;
792         char *str;
793
794         if (!ul_adb->dest_skb) {
795                 dev_err(ipc_mux->dev, "no dest skb");
796                 return;
797         }
798
799         adgh_len = le16_to_cpu(ul_adb->adgh->length);
800         skb_put(ul_adb->dest_skb, adgh_len);
801         skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
802         ul_adb->dest_skb = NULL;
803
804         if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
805                 struct mux_session *session;
806
807                 session = &ipc_mux->session[ul_adb->adgh->if_id];
808                 str = "available_credits";
809                 bytes = (long long)session->ul_flow_credits;
810
811         } else {
812                 str = "pend_bytes";
813                 bytes = ipc_mux->ul_data_pend_bytes;
814                 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
815                                               adgh_len;
816         }
817
818         dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
819                 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
820                 str, bytes);
821 }
822
823 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
824                                    struct mux_adb *ul_adb, int *out_offset)
825 {
826         int i, qlt_size, offset = *out_offset;
827         struct mux_qlth *p_adb_qlt;
828         struct mux_adth_dg *dg;
829         struct mux_adth *adth;
830         u16 adth_dg_size;
831         u32 *next_tb_id;
832
833         qlt_size = offsetof(struct mux_qlth, ql) +
834                         MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
835
836         for (i = 0; i < ipc_mux->nr_sessions; i++) {
837                 if (ul_adb->dg_count[i] > 0) {
838                         adth_dg_size = offsetof(struct mux_adth, dg) +
839                                         ul_adb->dg_count[i] * sizeof(*dg);
840
841                         *ul_adb->next_table_index = offset;
842                         adth = (struct mux_adth *)&ul_adb->buf[offset];
843                         next_tb_id = (unsigned int *)&adth->next_table_index;
844                         ul_adb->next_table_index = next_tb_id;
845                         offset += adth_dg_size;
846                         adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
847                         adth->if_id = i;
848                         adth->table_length = cpu_to_le16(adth_dg_size);
849                         adth_dg_size -= offsetof(struct mux_adth, dg);
850                         memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
851                         ul_adb->if_cnt++;
852                 }
853
854                 if (ul_adb->qlt_updated[i]) {
855                         *ul_adb->next_table_index = offset;
856                         p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
857                         ul_adb->next_table_index =
858                                 (u32 *)&p_adb_qlt->next_table_index;
859                         memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
860                         offset += qlt_size;
861                 }
862         }
863         *out_offset = offset;
864 }
865
866 /**
867  * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
868  * @ipc_mux:               Pointer to MUX data-struct.
869  */
870 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
871 {
872         bool ul_data_pend = false;
873         struct mux_adb *ul_adb;
874         unsigned long flags;
875         int offset;
876
877         ul_adb = &ipc_mux->ul_adb;
878         if (!ul_adb->dest_skb)
879                 return;
880
881         offset = *ul_adb->next_table_index;
882         ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
883         ul_adb->adbh->block_length = cpu_to_le32(offset);
884
885         if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
886                 ul_adb->dest_skb = NULL;
887                 return;
888         }
889
890         *ul_adb->next_table_index = 0;
891         ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
892         skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
893
894         spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
895         __skb_queue_tail(&ipc_mux->channel->ul_list,  ul_adb->dest_skb);
896         spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
897
898         ul_adb->dest_skb = NULL;
899         /* Updates the TDs with ul_list */
900         ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
901
902         /* Delay the doorbell irq */
903         if (ul_data_pend)
904                 ipc_imem_td_update_timer_start(ipc_mux->imem);
905
906         ipc_mux->acc_adb_size +=  le32_to_cpu(ul_adb->adbh->block_length);
907         ipc_mux->acc_payload_size += ul_adb->payload_size;
908         ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
909 }
910
911 /* Allocates an ADB from the free list and initializes it with ADBH  */
912 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
913                                     struct mux_adb *adb, int *size_needed,
914                                     u32 type)
915 {
916         bool ret_val = false;
917         int status;
918
919         if (!adb->dest_skb) {
920                 /* Allocate memory for the ADB including of the
921                  * datagram table header.
922                  */
923                 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
924                 if (status)
925                         /* Is a pending ADB available ? */
926                         ret_val = true; /* None. */
927
928                 /* Update size need to zero only for new ADB memory */
929                 *size_needed = 0;
930         }
931
932         return ret_val;
933 }
934
935 /* Informs the network stack to stop sending further packets for all opened
936  * sessions
937  */
938 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
939 {
940         struct mux_session *session;
941         int idx;
942
943         for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
944                 session = &ipc_mux->session[idx];
945
946                 if (!session->wwan)
947                         continue;
948
949                 session->net_tx_stop = true;
950         }
951 }
952
953 /* Sends Queue Level Table of all opened sessions */
954 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
955 {
956         struct ipc_mem_lite_gen_tbl *qlt;
957         struct mux_session *session;
958         bool qlt_updated = false;
959         int i;
960         int qlt_size;
961
962         if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
963                 return qlt_updated;
964
965         qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
966                    MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
967
968         for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
969                 session = &ipc_mux->session[i];
970
971                 if (!session->wwan || session->flow_ctl_mask)
972                         continue;
973
974                 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
975                                          MUX_SIG_QLTH)) {
976                         dev_err(ipc_mux->dev,
977                                 "no reserved mem to send QLT of if_id: %d", i);
978                         break;
979                 }
980
981                 /* Prepare QLT */
982                 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
983                               ->data;
984                 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
985                 qlt->length = cpu_to_le16(qlt_size);
986                 qlt->if_id = i;
987                 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
988                 qlt->reserved[0] = 0;
989                 qlt->reserved[1] = 0;
990
991                 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
992
993                 /* Add QLT to the transfer list. */
994                 skb_queue_tail(&ipc_mux->channel->ul_list,
995                                ipc_mux->ul_adb.qlth_skb);
996
997                 qlt_updated = true;
998                 ipc_mux->ul_adb.qlth_skb = NULL;
999         }
1000
1001         if (qlt_updated)
1002                 /* Updates the TDs with ul_list */
1003                 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1004
1005         return qlt_updated;
1006 }
1007
1008 /* Checks the available credits for the specified session and returns
1009  * number of packets for which credits are available.
1010  */
1011 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1012                                           struct mux_session *session,
1013                                           struct sk_buff_head *ul_list,
1014                                           int max_nr_of_pkts)
1015 {
1016         int pkts_to_send = 0;
1017         struct sk_buff *skb;
1018         int credits = 0;
1019
1020         if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1021                 credits = session->ul_flow_credits;
1022                 if (credits <= 0) {
1023                         dev_dbg(ipc_mux->dev,
1024                                 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1025                                 session->if_id, session->ul_flow_credits,
1026                                 session->ul_list.qlen); /* nr_of_bytes */
1027                         return 0;
1028                 }
1029         } else {
1030                 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1031                           ipc_mux->ul_data_pend_bytes;
1032                 if (credits <= 0) {
1033                         ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1034
1035                         dev_dbg(ipc_mux->dev,
1036                                 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1037                                 session->if_id, ipc_mux->ul_data_pend_bytes,
1038                                 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1039                         return 0;
1040                 }
1041         }
1042
1043         /* Check if there are enough credits/bytes available to send the
1044          * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1045          * depending on available credits.
1046          */
1047         skb_queue_walk(ul_list, skb)
1048         {
1049                 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1050                         break;
1051                 credits -= skb->len;
1052                 pkts_to_send++;
1053         }
1054
1055         return pkts_to_send;
1056 }
1057
1058 /* Encode the UL IP packet according to Lite spec. */
1059 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1060                                   struct mux_session *session,
1061                                   struct sk_buff_head *ul_list,
1062                                   struct mux_adb *adb, int nr_of_pkts)
1063 {
1064         int offset = sizeof(struct mux_adgh);
1065         int adb_updated = -EINVAL;
1066         struct sk_buff *src_skb;
1067         int aligned_size = 0;
1068         int nr_of_skb = 0;
1069         u32 pad_len = 0;
1070
1071         /* Re-calculate the number of packets depending on number of bytes to be
1072          * processed/available credits.
1073          */
1074         nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1075                                                     nr_of_pkts);
1076
1077         /* If calculated nr_of_pkts from available credits is <= 0
1078          * then nothing to do.
1079          */
1080         if (nr_of_pkts <= 0)
1081                 return 0;
1082
1083         /* Read configured UL head_pad_length for session.*/
1084         if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1085                 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1086
1087         /* Process all pending UL packets for this session
1088          * depending on the allocated datagram table size.
1089          */
1090         while (nr_of_pkts > 0) {
1091                 /* get destination skb allocated */
1092                 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1093                                             IOSM_AGGR_MUX_SIG_ADGH)) {
1094                         dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1095                         return -ENOMEM;
1096                 }
1097
1098                 /* Peek at the head of the list. */
1099                 src_skb = skb_peek(ul_list);
1100                 if (!src_skb) {
1101                         dev_err(ipc_mux->dev,
1102                                 "skb peek return NULL with count : %d",
1103                                 nr_of_pkts);
1104                         break;
1105                 }
1106
1107                 /* Calculate the memory value. */
1108                 aligned_size = ALIGN((pad_len + src_skb->len), 4);
1109
1110                 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1111
1112                 if (ipc_mux->size_needed > adb->size) {
1113                         dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1114                                 ipc_mux->size_needed, adb->size);
1115                         /* Return 1 if any IP packet is added to the transfer
1116                          * list.
1117                          */
1118                         return nr_of_skb ? 1 : 0;
1119                 }
1120
1121                 /* Add buffer (without head padding to next pending transfer) */
1122                 memcpy(adb->buf + offset + pad_len, src_skb->data,
1123                        src_skb->len);
1124
1125                 adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1126                 adb->adgh->if_id = session_id;
1127                 adb->adgh->length =
1128                         cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1129                                     src_skb->len);
1130                 adb->adgh->service_class = src_skb->priority;
1131                 adb->adgh->next_count = --nr_of_pkts;
1132                 adb->dg_cnt_total++;
1133                 adb->payload_size += src_skb->len;
1134
1135                 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1136                         /* Decrement the credit value as we are processing the
1137                          * datagram from the UL list.
1138                          */
1139                         session->ul_flow_credits -= src_skb->len;
1140
1141                 /* Remove the processed elements and free it. */
1142                 src_skb = skb_dequeue(ul_list);
1143                 dev_kfree_skb(src_skb);
1144                 nr_of_skb++;
1145
1146                 ipc_mux_ul_adgh_finish(ipc_mux);
1147         }
1148
1149         if (nr_of_skb) {
1150                 /* Send QLT info to modem if pending bytes > high watermark
1151                  * in case of mux lite
1152                  */
1153                 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1154                     ipc_mux->ul_data_pend_bytes >=
1155                             IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1156                         adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1157                 else
1158                         adb_updated = 1;
1159
1160                 /* Updates the TDs with ul_list */
1161                 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1162         }
1163
1164         return adb_updated;
1165 }
1166
1167 /**
1168  * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1169  * @ipc_mux:            pointer to MUX instance data
1170  * @p_adb:              pointer to UL aggegated data block
1171  * @session_id:         session id
1172  * @qlth_n_ql_size:     Length (in bytes) of the datagram table
1173  * @ul_list:            pointer to skb buffer head
1174  */
1175 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1176                               int session_id, int qlth_n_ql_size,
1177                               struct sk_buff_head *ul_list)
1178 {
1179         int qlevel = ul_list->qlen;
1180         struct mux_qlth *p_qlt;
1181
1182         p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1183
1184         /* Initialize QLTH if not been done */
1185         if (p_adb->qlt_updated[session_id] == 0) {
1186                 p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1187                 p_qlt->if_id = session_id;
1188                 p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1189                 p_qlt->reserved = 0;
1190                 p_qlt->reserved2 = 0;
1191         }
1192
1193         /* Update Queue Level information always */
1194         p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1195         p_adb->qlt_updated[session_id] = 1;
1196 }
1197
1198 /* Update the next table index. */
1199 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1200                                       int session_id,
1201                                       struct sk_buff_head *ul_list,
1202                                       struct mux_adth_dg *dg,
1203                                       int aligned_size,
1204                                       u32 qlth_n_ql_size,
1205                                       struct mux_adb *adb,
1206                                       struct sk_buff *src_skb)
1207 {
1208         ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1209                                  qlth_n_ql_size, ul_list);
1210         ipc_mux_ul_adb_finish(ipc_mux);
1211         if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1212                                     IOSM_AGGR_MUX_SIG_ADBH))
1213                 return -ENOMEM;
1214
1215         ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1216
1217         ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1218         ipc_mux->size_needed += qlth_n_ql_size;
1219         ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1220         return 0;
1221 }
1222
1223 /* Process encode session UL data. */
1224 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1225                             struct mux_adth_dg *dg,
1226                             struct sk_buff_head *ul_list,
1227                             struct sk_buff *src_skb, int session_id,
1228                             int pkt_to_send, u32 qlth_n_ql_size,
1229                             int *out_offset, int head_pad_len)
1230 {
1231         int aligned_size;
1232         int offset = *out_offset;
1233         unsigned long flags;
1234         int nr_of_skb = 0;
1235
1236         while (pkt_to_send > 0) {
1237                 /* Peek at the head of the list. */
1238                 src_skb = skb_peek(ul_list);
1239                 if (!src_skb) {
1240                         dev_err(ipc_mux->dev,
1241                                 "skb peek return NULL with count : %d",
1242                                 pkt_to_send);
1243                         return -1;
1244                 }
1245                 aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1246                 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1247
1248                 if (ipc_mux->size_needed > adb->size ||
1249                     ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1250                       IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1251                         *adb->next_table_index = offset;
1252                         if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1253                                                        ul_list, dg,
1254                                                        aligned_size,
1255                                                        qlth_n_ql_size, adb,
1256                                                        src_skb) < 0)
1257                                 return -ENOMEM;
1258                         nr_of_skb = 0;
1259                         offset = le32_to_cpu(adb->adbh->block_length);
1260                         /* Load pointer to next available datagram entry */
1261                         dg = adb->dg[session_id] + adb->dg_count[session_id];
1262                 }
1263                 /* Add buffer without head padding to next pending transfer. */
1264                 memcpy(adb->buf + offset + head_pad_len,
1265                        src_skb->data, src_skb->len);
1266                 /* Setup datagram entry. */
1267                 dg->datagram_index = cpu_to_le32(offset);
1268                 dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1269                 dg->service_class = (((struct sk_buff *)src_skb)->priority);
1270                 dg->reserved = 0;
1271                 adb->dg_cnt_total++;
1272                 adb->payload_size += le16_to_cpu(dg->datagram_length);
1273                 dg++;
1274                 adb->dg_count[session_id]++;
1275                 offset += aligned_size;
1276                 /* Remove the processed elements and free it. */
1277                 spin_lock_irqsave(&ul_list->lock, flags);
1278                 src_skb = __skb_dequeue(ul_list);
1279                 spin_unlock_irqrestore(&ul_list->lock, flags);
1280
1281                 dev_kfree_skb(src_skb);
1282                 nr_of_skb++;
1283                 pkt_to_send--;
1284         }
1285         *out_offset = offset;
1286         return nr_of_skb;
1287 }
1288
1289 /* Process encode session UL data to ADB. */
1290 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1291                              struct mux_session *session,
1292                              struct sk_buff_head *ul_list, struct mux_adb *adb,
1293                              int pkt_to_send)
1294 {
1295         int adb_updated = -EINVAL;
1296         int head_pad_len, offset;
1297         struct sk_buff *src_skb = NULL;
1298         struct mux_adth_dg *dg;
1299         u32 qlth_n_ql_size;
1300
1301         /* If any of the opened session has set Flow Control ON then limit the
1302          * UL data to mux_flow_ctrl_high_thresh_b bytes
1303          */
1304         if (ipc_mux->ul_data_pend_bytes >=
1305                 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1306                 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1307                 return adb_updated;
1308         }
1309
1310         qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1311                          MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1312         head_pad_len = session->ul_head_pad_len;
1313
1314         if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1315                 head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1316
1317         if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1318                                     IOSM_AGGR_MUX_SIG_ADBH))
1319                 return -ENOMEM;
1320
1321         offset = le32_to_cpu(adb->adbh->block_length);
1322
1323         if (ipc_mux->size_needed == 0)
1324                 ipc_mux->size_needed = offset;
1325
1326         /* Calculate the size needed for ADTH, QLTH and QL*/
1327         if (adb->dg_count[session_id] == 0) {
1328                 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1329                 ipc_mux->size_needed += qlth_n_ql_size;
1330         }
1331
1332         dg = adb->dg[session_id] + adb->dg_count[session_id];
1333
1334         if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1335                              session_id, pkt_to_send, qlth_n_ql_size, &offset,
1336                              head_pad_len) > 0) {
1337                 adb_updated = 1;
1338                 *adb->next_table_index = offset;
1339                 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1340                                          qlth_n_ql_size, ul_list);
1341                 adb->adbh->block_length = cpu_to_le32(offset);
1342         }
1343
1344         return adb_updated;
1345 }
1346
1347 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1348 {
1349         struct sk_buff_head *ul_list;
1350         struct mux_session *session;
1351         int updated = 0;
1352         int session_id;
1353         int dg_n;
1354         int i;
1355
1356         if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1357             ipc_mux->adb_prep_ongoing)
1358                 return false;
1359
1360         ipc_mux->adb_prep_ongoing = true;
1361
1362         for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1363                 session_id = ipc_mux->rr_next_session;
1364                 session = &ipc_mux->session[session_id];
1365
1366                 /* Go to next handle rr_next_session overflow */
1367                 ipc_mux->rr_next_session++;
1368                 if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1369                         ipc_mux->rr_next_session = 0;
1370
1371                 if (!session->wwan || session->flow_ctl_mask ||
1372                     session->net_tx_stop)
1373                         continue;
1374
1375                 ul_list = &session->ul_list;
1376
1377                 /* Is something pending in UL and flow ctrl off */
1378                 dg_n = skb_queue_len(ul_list);
1379                 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1380                         dg_n = MUX_MAX_UL_DG_ENTRIES;
1381
1382                 if (dg_n == 0)
1383                         /* Nothing to do for ipc_mux session
1384                          * -> try next session id.
1385                          */
1386                         continue;
1387                 if (ipc_mux->protocol == MUX_LITE)
1388                         updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1389                                                          session, ul_list,
1390                                                          &ipc_mux->ul_adb,
1391                                                          dg_n);
1392                 else
1393                         updated = mux_ul_adb_encode(ipc_mux, session_id,
1394                                                     session, ul_list,
1395                                                     &ipc_mux->ul_adb,
1396                                                     dg_n);
1397         }
1398
1399         ipc_mux->adb_prep_ongoing = false;
1400         return updated == 1;
1401 }
1402
1403 /* Calculates the Payload from any given ADB. */
1404 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1405                                         struct mux_adbh *p_adbh)
1406 {
1407         struct mux_adth_dg *dg;
1408         struct mux_adth *adth;
1409         u32 payload_size = 0;
1410         u32 next_table_idx;
1411         int nr_of_dg, i;
1412
1413         /* Process the aggregated datagram tables. */
1414         next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1415
1416         if (next_table_idx < sizeof(struct mux_adbh)) {
1417                 dev_err(ipc_mux->dev, "unexpected empty ADB");
1418                 return payload_size;
1419         }
1420
1421         while (next_table_idx != 0) {
1422                 /* Get the reference to the table header. */
1423                 adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1424
1425                 if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1426                         nr_of_dg = (le16_to_cpu(adth->table_length) -
1427                                         sizeof(struct mux_adth)) /
1428                                         sizeof(struct mux_adth_dg);
1429
1430                         if (nr_of_dg <= 0)
1431                                 return payload_size;
1432
1433                         dg = adth->dg;
1434
1435                         for (i = 0; i < nr_of_dg; i++, dg++) {
1436                                 if (le32_to_cpu(dg->datagram_index) <
1437                                         sizeof(struct mux_adbh)) {
1438                                         return payload_size;
1439                                 }
1440                                 payload_size +=
1441                                         le16_to_cpu(dg->datagram_length);
1442                         }
1443                 }
1444                 next_table_idx = le32_to_cpu(adth->next_table_index);
1445         }
1446
1447         return payload_size;
1448 }
1449
1450 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1451 {
1452         union mux_type_header hr;
1453         u16 adgh_len;
1454         int payload;
1455
1456         if (ipc_mux->protocol == MUX_LITE) {
1457                 hr.adgh = (struct mux_adgh *)skb->data;
1458                 adgh_len = le16_to_cpu(hr.adgh->length);
1459                 if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460                     ipc_mux->ul_flow == MUX_UL)
1461                         ipc_mux->ul_data_pend_bytes =
1462                                         ipc_mux->ul_data_pend_bytes - adgh_len;
1463         } else {
1464                 hr.adbh = (struct mux_adbh *)(skb->data);
1465                 payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466                 ipc_mux->ul_data_pend_bytes -= payload;
1467         }
1468
1469         if (ipc_mux->ul_flow == MUX_UL)
1470                 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471                         ipc_mux->ul_data_pend_bytes);
1472
1473         /* Reset the skb settings. */
1474         skb_trim(skb, 0);
1475
1476         /* Add the consumed ADB to the free list. */
1477         skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1478 }
1479
1480 /* Start the NETIF uplink send transfer in MUX mode. */
1481 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1482                                         void *msg, size_t size)
1483 {
1484         struct iosm_mux *ipc_mux = ipc_imem->mux;
1485         bool ul_data_pend = false;
1486
1487         /* Add session UL data to a ADB and ADGH */
1488         ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1489         if (ul_data_pend) {
1490                 if (ipc_mux->protocol == MUX_AGGREGATION)
1491                         ipc_imem_adb_timer_start(ipc_mux->imem);
1492
1493                 /* Delay the doorbell irq */
1494                 ipc_imem_td_update_timer_start(ipc_mux->imem);
1495         }
1496         /* reset the debounce flag */
1497         ipc_mux->ev_mux_net_transmit_pending = false;
1498
1499         return 0;
1500 }
1501
1502 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1503                               struct sk_buff *skb)
1504 {
1505         struct mux_session *session = &ipc_mux->session[if_id];
1506         int ret = -EINVAL;
1507
1508         if (ipc_mux->channel &&
1509             ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1510                 dev_err(ipc_mux->dev,
1511                         "channel state is not IMEM_CHANNEL_ACTIVE");
1512                 goto out;
1513         }
1514
1515         if (!session->wwan) {
1516                 dev_err(ipc_mux->dev, "session net ID is NULL");
1517                 ret = -EFAULT;
1518                 goto out;
1519         }
1520
1521         /* Session is under flow control.
1522          * Check if packet can be queued in session list, if not
1523          * suspend net tx
1524          */
1525         if (skb_queue_len(&session->ul_list) >=
1526             (session->net_tx_stop ?
1527                      IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1528                      (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1529                       IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1530                 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1531                 ret = -EBUSY;
1532                 goto out;
1533         }
1534
1535         /* Add skb to the uplink skb accumulator. */
1536         skb_queue_tail(&session->ul_list, skb);
1537
1538         /* Inform the IPC kthread to pass uplink IP packets to CP. */
1539         if (!ipc_mux->ev_mux_net_transmit_pending) {
1540                 ipc_mux->ev_mux_net_transmit_pending = true;
1541                 ret = ipc_task_queue_send_task(ipc_mux->imem,
1542                                                ipc_mux_tq_ul_trigger_encode, 0,
1543                                                NULL, 0, false);
1544                 if (ret)
1545                         goto out;
1546         }
1547         dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1548                 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1549                 skb->len, skb->truesize, skb->priority);
1550         ret = 0;
1551 out:
1552         return ret;
1553 }
This page took 0.124913 seconds and 4 git commands to generate.