]> Git Repo - J-linux.git/blob - drivers/net/wireless/ath/ath12k/dp_rx.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / net / wireless / ath / ath12k / dp_rx.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "hal_desc.h"
14 #include "hw.h"
15 #include "dp_rx.h"
16 #include "hal_rx.h"
17 #include "dp_tx.h"
18 #include "peer.h"
19 #include "dp_mon.h"
20
21 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
24                                                     struct hal_rx_desc *desc)
25 {
26         if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
27                 return HAL_ENCRYPT_TYPE_OPEN;
28
29         return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
30 }
31
32 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
33                              struct hal_rx_desc *desc)
34 {
35         return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
36 }
37
38 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
39                                           struct hal_rx_desc *desc)
40 {
41         return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
42 }
43
44 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
45                                           struct hal_rx_desc *desc)
46 {
47         return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
48 }
49
50 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
51                                     struct hal_rx_desc *desc)
52 {
53         return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
54 }
55
56 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
57                                       struct sk_buff *skb)
58 {
59         struct ieee80211_hdr *hdr;
60
61         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
62         return ieee80211_has_morefrags(hdr->frame_control);
63 }
64
65 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
66                                   struct sk_buff *skb)
67 {
68         struct ieee80211_hdr *hdr;
69
70         hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
71         return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
72 }
73
74 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
75                                  struct hal_rx_desc *desc)
76 {
77         return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
78 }
79
80 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
81                                      struct hal_rx_desc *desc)
82 {
83         return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
84 }
85
86 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
87                                          struct hal_rx_desc *desc)
88 {
89         return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
90 }
91
92 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
93                                          struct hal_rx_desc *desc)
94 {
95         return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
96 }
97
98 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
99                                         struct hal_rx_desc *desc)
100 {
101         return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
102 }
103
104 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
105                             struct hal_rx_desc *desc)
106 {
107         return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
108 }
109
110 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
111                                    struct hal_rx_desc *desc)
112 {
113         return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
114 }
115
116 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
117                              struct hal_rx_desc *desc)
118 {
119         return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
120 }
121
122 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
123                                   struct hal_rx_desc *desc)
124 {
125         return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
126 }
127
128 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
129                                struct hal_rx_desc *desc)
130 {
131         return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
132 }
133
134 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
135                                struct hal_rx_desc *desc)
136 {
137         return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
138 }
139
140 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
141                                   struct hal_rx_desc *desc)
142 {
143         return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
144 }
145
146 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
147                              struct hal_rx_desc *desc)
148 {
149         return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
150 }
151
152 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
153                              struct hal_rx_desc *desc)
154 {
155         return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
156 }
157
158 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
159                                   struct hal_rx_desc *desc)
160 {
161         return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
162 }
163
164 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
165                         struct hal_rx_desc *desc)
166 {
167         return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
168 }
169
170 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
171                                       struct hal_rx_desc *desc)
172 {
173         return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
174 }
175
176 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
177                                      struct hal_rx_desc *desc)
178 {
179         return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
180 }
181
182 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
183                                            struct hal_rx_desc *fdesc,
184                                            struct hal_rx_desc *ldesc)
185 {
186         ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
187 }
188
189 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
190                                           struct hal_rx_desc *desc,
191                                           u16 len)
192 {
193         ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
194 }
195
196 static bool ath12k_dp_rx_h_is_mcbc(struct ath12k_base *ab,
197                                    struct hal_rx_desc *desc)
198 {
199         return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
200                 ab->hw_params->hal_ops->rx_desc_is_mcbc(desc));
201 }
202
203 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
204                                              struct hal_rx_desc *desc)
205 {
206         return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
207 }
208
209 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
210                                                  struct hal_rx_desc *desc)
211 {
212         return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
213 }
214
215 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
216                                             struct hal_rx_desc *desc,
217                                             struct ieee80211_hdr *hdr)
218 {
219         ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
220 }
221
222 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
223                                                 struct hal_rx_desc *desc,
224                                                 u8 *crypto_hdr,
225                                                 enum hal_encrypt_type enctype)
226 {
227         ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
228 }
229
230 static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
231                                                 struct hal_rx_desc *desc)
232 {
233         return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
234 }
235
236 static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
237 {
238         int i, reaped = 0;
239         unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
240
241         do {
242                 for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
243                         reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
244                                                              DP_MON_SERVICE_BUDGET,
245                                                              ATH12K_DP_RX_MONITOR_MODE);
246
247                 /* nothing more to reap */
248                 if (reaped < DP_MON_SERVICE_BUDGET)
249                         return 0;
250
251         } while (time_before(jiffies, timeout));
252
253         ath12k_warn(ab, "dp mon ring purge timeout");
254
255         return -ETIMEDOUT;
256 }
257
258 /* Returns number of Rx buffers replenished */
259 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
260                                 struct dp_rxdma_ring *rx_ring,
261                                 int req_entries,
262                                 enum hal_rx_buf_return_buf_manager mgr,
263                                 bool hw_cc)
264 {
265         struct ath12k_buffer_addr *desc;
266         struct hal_srng *srng;
267         struct sk_buff *skb;
268         int num_free;
269         int num_remain;
270         int buf_id;
271         u32 cookie;
272         dma_addr_t paddr;
273         struct ath12k_dp *dp = &ab->dp;
274         struct ath12k_rx_desc_info *rx_desc;
275
276         req_entries = min(req_entries, rx_ring->bufs_max);
277
278         srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
279
280         spin_lock_bh(&srng->lock);
281
282         ath12k_hal_srng_access_begin(ab, srng);
283
284         num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
285         if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
286                 req_entries = num_free;
287
288         req_entries = min(num_free, req_entries);
289         num_remain = req_entries;
290
291         while (num_remain > 0) {
292                 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
293                                     DP_RX_BUFFER_ALIGN_SIZE);
294                 if (!skb)
295                         break;
296
297                 if (!IS_ALIGNED((unsigned long)skb->data,
298                                 DP_RX_BUFFER_ALIGN_SIZE)) {
299                         skb_pull(skb,
300                                  PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
301                                  skb->data);
302                 }
303
304                 paddr = dma_map_single(ab->dev, skb->data,
305                                        skb->len + skb_tailroom(skb),
306                                        DMA_FROM_DEVICE);
307                 if (dma_mapping_error(ab->dev, paddr))
308                         goto fail_free_skb;
309
310                 if (hw_cc) {
311                         spin_lock_bh(&dp->rx_desc_lock);
312
313                         /* Get desc from free list and store in used list
314                          * for cleanup purposes
315                          *
316                          * TODO: pass the removed descs rather than
317                          * add/read to optimize
318                          */
319                         rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
320                                                            struct ath12k_rx_desc_info,
321                                                            list);
322                         if (!rx_desc) {
323                                 spin_unlock_bh(&dp->rx_desc_lock);
324                                 goto fail_dma_unmap;
325                         }
326
327                         rx_desc->skb = skb;
328                         cookie = rx_desc->cookie;
329                         list_del(&rx_desc->list);
330                         list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
331
332                         spin_unlock_bh(&dp->rx_desc_lock);
333                 } else {
334                         spin_lock_bh(&rx_ring->idr_lock);
335                         buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
336                                            rx_ring->bufs_max * 3, GFP_ATOMIC);
337                         spin_unlock_bh(&rx_ring->idr_lock);
338                         if (buf_id < 0)
339                                 goto fail_dma_unmap;
340                         cookie = u32_encode_bits(mac_id,
341                                                  DP_RXDMA_BUF_COOKIE_PDEV_ID) |
342                                  u32_encode_bits(buf_id,
343                                                  DP_RXDMA_BUF_COOKIE_BUF_ID);
344                 }
345
346                 desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
347                 if (!desc)
348                         goto fail_buf_unassign;
349
350                 ATH12K_SKB_RXCB(skb)->paddr = paddr;
351
352                 num_remain--;
353
354                 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
355         }
356
357         ath12k_hal_srng_access_end(ab, srng);
358
359         spin_unlock_bh(&srng->lock);
360
361         return req_entries - num_remain;
362
363 fail_buf_unassign:
364         if (hw_cc) {
365                 spin_lock_bh(&dp->rx_desc_lock);
366                 list_del(&rx_desc->list);
367                 list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
368                 rx_desc->skb = NULL;
369                 spin_unlock_bh(&dp->rx_desc_lock);
370         } else {
371                 spin_lock_bh(&rx_ring->idr_lock);
372                 idr_remove(&rx_ring->bufs_idr, buf_id);
373                 spin_unlock_bh(&rx_ring->idr_lock);
374         }
375 fail_dma_unmap:
376         dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
377                          DMA_FROM_DEVICE);
378 fail_free_skb:
379         dev_kfree_skb_any(skb);
380
381         ath12k_hal_srng_access_end(ab, srng);
382
383         spin_unlock_bh(&srng->lock);
384
385         return req_entries - num_remain;
386 }
387
388 static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
389                                          struct dp_rxdma_ring *rx_ring)
390 {
391         struct sk_buff *skb;
392         int buf_id;
393
394         spin_lock_bh(&rx_ring->idr_lock);
395         idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
396                 idr_remove(&rx_ring->bufs_idr, buf_id);
397                 /* TODO: Understand where internal driver does this dma_unmap
398                  * of rxdma_buffer.
399                  */
400                 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
401                                  skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
402                 dev_kfree_skb_any(skb);
403         }
404
405         idr_destroy(&rx_ring->bufs_idr);
406         spin_unlock_bh(&rx_ring->idr_lock);
407
408         return 0;
409 }
410
411 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
412 {
413         struct ath12k_dp *dp = &ab->dp;
414         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
415
416         ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
417
418         rx_ring = &dp->rxdma_mon_buf_ring;
419         ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
420
421         rx_ring = &dp->tx_mon_buf_ring;
422         ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
423
424         return 0;
425 }
426
427 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
428                                           struct dp_rxdma_ring *rx_ring,
429                                           u32 ringtype)
430 {
431         int num_entries;
432
433         num_entries = rx_ring->refill_buf_ring.size /
434                 ath12k_hal_srng_get_entrysize(ab, ringtype);
435
436         rx_ring->bufs_max = num_entries;
437         if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
438                 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
439         else
440                 ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
441                                             ab->hw_params->hal_params->rx_buf_rbm,
442                                             ringtype == HAL_RXDMA_BUF);
443         return 0;
444 }
445
446 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
447 {
448         struct ath12k_dp *dp = &ab->dp;
449         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
450         int ret;
451
452         ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
453                                              HAL_RXDMA_BUF);
454         if (ret) {
455                 ath12k_warn(ab,
456                             "failed to setup HAL_RXDMA_BUF\n");
457                 return ret;
458         }
459
460         if (ab->hw_params->rxdma1_enable) {
461                 rx_ring = &dp->rxdma_mon_buf_ring;
462                 ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
463                                                      HAL_RXDMA_MONITOR_BUF);
464                 if (ret) {
465                         ath12k_warn(ab,
466                                     "failed to setup HAL_RXDMA_MONITOR_BUF\n");
467                         return ret;
468                 }
469
470                 rx_ring = &dp->tx_mon_buf_ring;
471                 ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
472                                                      HAL_TX_MONITOR_BUF);
473                 if (ret) {
474                         ath12k_warn(ab,
475                                     "failed to setup HAL_TX_MONITOR_BUF\n");
476                         return ret;
477                 }
478         }
479
480         return 0;
481 }
482
483 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
484 {
485         struct ath12k_pdev_dp *dp = &ar->dp;
486         struct ath12k_base *ab = ar->ab;
487         int i;
488
489         for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
490                 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
491                 ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
492         }
493 }
494
495 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
496 {
497         struct ath12k_dp *dp = &ab->dp;
498         int i;
499
500         for (i = 0; i < DP_REO_DST_RING_MAX; i++)
501                 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
502 }
503
504 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
505 {
506         struct ath12k_dp *dp = &ab->dp;
507         int ret;
508         int i;
509
510         for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
511                 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
512                                            HAL_REO_DST, i, 0,
513                                            DP_REO_DST_RING_SIZE);
514                 if (ret) {
515                         ath12k_warn(ab, "failed to setup reo_dst_ring\n");
516                         goto err_reo_cleanup;
517                 }
518         }
519
520         return 0;
521
522 err_reo_cleanup:
523         ath12k_dp_rx_pdev_reo_cleanup(ab);
524
525         return ret;
526 }
527
528 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
529 {
530         struct ath12k_pdev_dp *dp = &ar->dp;
531         struct ath12k_base *ab = ar->ab;
532         int i;
533         int ret;
534         u32 mac_id = dp->mac_id;
535
536         for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
537                 ret = ath12k_dp_srng_setup(ar->ab,
538                                            &dp->rxdma_mon_dst_ring[i],
539                                            HAL_RXDMA_MONITOR_DST,
540                                            0, mac_id + i,
541                                            DP_RXDMA_MONITOR_DST_RING_SIZE);
542                 if (ret) {
543                         ath12k_warn(ar->ab,
544                                     "failed to setup HAL_RXDMA_MONITOR_DST\n");
545                         return ret;
546                 }
547
548                 ret = ath12k_dp_srng_setup(ar->ab,
549                                            &dp->tx_mon_dst_ring[i],
550                                            HAL_TX_MONITOR_DST,
551                                            0, mac_id + i,
552                                            DP_TX_MONITOR_DEST_RING_SIZE);
553                 if (ret) {
554                         ath12k_warn(ar->ab,
555                                     "failed to setup HAL_TX_MONITOR_DST\n");
556                         return ret;
557                 }
558         }
559
560         return 0;
561 }
562
563 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
564 {
565         struct ath12k_dp *dp = &ab->dp;
566         struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
567         struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
568
569         spin_lock_bh(&dp->reo_cmd_lock);
570         list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
571                 list_del(&cmd->list);
572                 dma_unmap_single(ab->dev, cmd->data.paddr,
573                                  cmd->data.size, DMA_BIDIRECTIONAL);
574                 kfree(cmd->data.vaddr);
575                 kfree(cmd);
576         }
577
578         list_for_each_entry_safe(cmd_cache, tmp_cache,
579                                  &dp->reo_cmd_cache_flush_list, list) {
580                 list_del(&cmd_cache->list);
581                 dp->reo_cmd_cache_flush_count--;
582                 dma_unmap_single(ab->dev, cmd_cache->data.paddr,
583                                  cmd_cache->data.size, DMA_BIDIRECTIONAL);
584                 kfree(cmd_cache->data.vaddr);
585                 kfree(cmd_cache);
586         }
587         spin_unlock_bh(&dp->reo_cmd_lock);
588 }
589
590 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
591                                    enum hal_reo_cmd_status status)
592 {
593         struct ath12k_dp_rx_tid *rx_tid = ctx;
594
595         if (status != HAL_REO_CMD_SUCCESS)
596                 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
597                             rx_tid->tid, status);
598
599         dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
600                          DMA_BIDIRECTIONAL);
601         kfree(rx_tid->vaddr);
602         rx_tid->vaddr = NULL;
603 }
604
605 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
606                                   enum hal_reo_cmd_type type,
607                                   struct ath12k_hal_reo_cmd *cmd,
608                                   void (*cb)(struct ath12k_dp *dp, void *ctx,
609                                              enum hal_reo_cmd_status status))
610 {
611         struct ath12k_dp *dp = &ab->dp;
612         struct ath12k_dp_rx_reo_cmd *dp_cmd;
613         struct hal_srng *cmd_ring;
614         int cmd_num;
615
616         cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
617         cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
618
619         /* cmd_num should start from 1, during failure return the error code */
620         if (cmd_num < 0)
621                 return cmd_num;
622
623         /* reo cmd ring descriptors has cmd_num starting from 1 */
624         if (cmd_num == 0)
625                 return -EINVAL;
626
627         if (!cb)
628                 return 0;
629
630         /* Can this be optimized so that we keep the pending command list only
631          * for tid delete command to free up the resource on the command status
632          * indication?
633          */
634         dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
635
636         if (!dp_cmd)
637                 return -ENOMEM;
638
639         memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
640         dp_cmd->cmd_num = cmd_num;
641         dp_cmd->handler = cb;
642
643         spin_lock_bh(&dp->reo_cmd_lock);
644         list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
645         spin_unlock_bh(&dp->reo_cmd_lock);
646
647         return 0;
648 }
649
650 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
651                                       struct ath12k_dp_rx_tid *rx_tid)
652 {
653         struct ath12k_hal_reo_cmd cmd = {0};
654         unsigned long tot_desc_sz, desc_sz;
655         int ret;
656
657         tot_desc_sz = rx_tid->size;
658         desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
659
660         while (tot_desc_sz > desc_sz) {
661                 tot_desc_sz -= desc_sz;
662                 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
663                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
664                 ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
665                                              HAL_REO_CMD_FLUSH_CACHE, &cmd,
666                                              NULL);
667                 if (ret)
668                         ath12k_warn(ab,
669                                     "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
670                                     rx_tid->tid, ret);
671         }
672
673         memset(&cmd, 0, sizeof(cmd));
674         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
675         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
676         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
677         ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
678                                      HAL_REO_CMD_FLUSH_CACHE,
679                                      &cmd, ath12k_dp_reo_cmd_free);
680         if (ret) {
681                 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
682                            rx_tid->tid, ret);
683                 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
684                                  DMA_BIDIRECTIONAL);
685                 kfree(rx_tid->vaddr);
686                 rx_tid->vaddr = NULL;
687         }
688 }
689
690 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
691                                       enum hal_reo_cmd_status status)
692 {
693         struct ath12k_base *ab = dp->ab;
694         struct ath12k_dp_rx_tid *rx_tid = ctx;
695         struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
696
697         if (status == HAL_REO_CMD_DRAIN) {
698                 goto free_desc;
699         } else if (status != HAL_REO_CMD_SUCCESS) {
700                 /* Shouldn't happen! Cleanup in case of other failure? */
701                 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
702                             rx_tid->tid, status);
703                 return;
704         }
705
706         elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
707         if (!elem)
708                 goto free_desc;
709
710         elem->ts = jiffies;
711         memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
712
713         spin_lock_bh(&dp->reo_cmd_lock);
714         list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
715         dp->reo_cmd_cache_flush_count++;
716
717         /* Flush and invalidate aged REO desc from HW cache */
718         list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
719                                  list) {
720                 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
721                     time_after(jiffies, elem->ts +
722                                msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
723                         list_del(&elem->list);
724                         dp->reo_cmd_cache_flush_count--;
725
726                         /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
727                          * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
728                          * is used in only two contexts, one is in this function called
729                          * from napi and the other in ath12k_dp_free during core destroy.
730                          * Before dp_free, the irqs would be disabled and would wait to
731                          * synchronize. Hence there wouldn’t be any race against add or
732                          * delete to this list. Hence unlock-lock is safe here.
733                          */
734                         spin_unlock_bh(&dp->reo_cmd_lock);
735
736                         ath12k_dp_reo_cache_flush(ab, &elem->data);
737                         kfree(elem);
738                         spin_lock_bh(&dp->reo_cmd_lock);
739                 }
740         }
741         spin_unlock_bh(&dp->reo_cmd_lock);
742
743         return;
744 free_desc:
745         dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
746                          DMA_BIDIRECTIONAL);
747         kfree(rx_tid->vaddr);
748         rx_tid->vaddr = NULL;
749 }
750
751 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
752                                           dma_addr_t paddr)
753 {
754         struct ath12k_reo_queue_ref *qref;
755         struct ath12k_dp *dp = &ab->dp;
756
757         if (!ab->hw_params->reoq_lut_support)
758                 return;
759
760         /* TODO: based on ML peer or not, select the LUT. below assumes non
761          * ML peer
762          */
763         qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
764                         (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
765
766         qref->info0 = u32_encode_bits(lower_32_bits(paddr),
767                                       BUFFER_ADDR_INFO0_ADDR);
768         qref->info1 = u32_encode_bits(upper_32_bits(paddr),
769                                       BUFFER_ADDR_INFO1_ADDR) |
770                       u32_encode_bits(tid, DP_REO_QREF_NUM);
771 }
772
773 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
774 {
775         struct ath12k_reo_queue_ref *qref;
776         struct ath12k_dp *dp = &ab->dp;
777
778         if (!ab->hw_params->reoq_lut_support)
779                 return;
780
781         /* TODO: based on ML peer or not, select the LUT. below assumes non
782          * ML peer
783          */
784         qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
785                         (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
786
787         qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
788         qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
789                       u32_encode_bits(tid, DP_REO_QREF_NUM);
790 }
791
792 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
793                                   struct ath12k_peer *peer, u8 tid)
794 {
795         struct ath12k_hal_reo_cmd cmd = {0};
796         struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
797         int ret;
798
799         if (!rx_tid->active)
800                 return;
801
802         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
803         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
804         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
805         cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
806         ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
807                                      HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
808                                      ath12k_dp_rx_tid_del_func);
809         if (ret) {
810                 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
811                            tid, ret);
812                 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
813                                  DMA_BIDIRECTIONAL);
814                 kfree(rx_tid->vaddr);
815                 rx_tid->vaddr = NULL;
816         }
817
818         ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
819
820         rx_tid->active = false;
821 }
822
823 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
824  * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
825  * that.
826  */
827 static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
828                                          struct hal_reo_dest_ring *ring,
829                                          enum hal_wbm_rel_bm_act action)
830 {
831         struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
832         struct hal_wbm_release_ring *desc;
833         struct ath12k_dp *dp = &ab->dp;
834         struct hal_srng *srng;
835         int ret = 0;
836
837         srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
838
839         spin_lock_bh(&srng->lock);
840
841         ath12k_hal_srng_access_begin(ab, srng);
842
843         desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
844         if (!desc) {
845                 ret = -ENOBUFS;
846                 goto exit;
847         }
848
849         ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
850
851 exit:
852         ath12k_hal_srng_access_end(ab, srng);
853
854         spin_unlock_bh(&srng->lock);
855
856         return ret;
857 }
858
859 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
860                                        bool rel_link_desc)
861 {
862         struct ath12k_base *ab = rx_tid->ab;
863
864         lockdep_assert_held(&ab->base_lock);
865
866         if (rx_tid->dst_ring_desc) {
867                 if (rel_link_desc)
868                         ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
869                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
870                 kfree(rx_tid->dst_ring_desc);
871                 rx_tid->dst_ring_desc = NULL;
872         }
873
874         rx_tid->cur_sn = 0;
875         rx_tid->last_frag_no = 0;
876         rx_tid->rx_frag_bitmap = 0;
877         __skb_queue_purge(&rx_tid->rx_frags);
878 }
879
880 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
881 {
882         struct ath12k_dp_rx_tid *rx_tid;
883         int i;
884
885         lockdep_assert_held(&ar->ab->base_lock);
886
887         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
888                 rx_tid = &peer->rx_tid[i];
889
890                 ath12k_dp_rx_peer_tid_delete(ar, peer, i);
891                 ath12k_dp_rx_frags_cleanup(rx_tid, true);
892
893                 spin_unlock_bh(&ar->ab->base_lock);
894                 del_timer_sync(&rx_tid->frag_timer);
895                 spin_lock_bh(&ar->ab->base_lock);
896         }
897 }
898
899 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
900                                          struct ath12k_peer *peer,
901                                          struct ath12k_dp_rx_tid *rx_tid,
902                                          u32 ba_win_sz, u16 ssn,
903                                          bool update_ssn)
904 {
905         struct ath12k_hal_reo_cmd cmd = {0};
906         int ret;
907
908         cmd.addr_lo = lower_32_bits(rx_tid->paddr);
909         cmd.addr_hi = upper_32_bits(rx_tid->paddr);
910         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
911         cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
912         cmd.ba_window_size = ba_win_sz;
913
914         if (update_ssn) {
915                 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
916                 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
917         }
918
919         ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
920                                      HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
921                                      NULL);
922         if (ret) {
923                 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
924                             rx_tid->tid, ret);
925                 return ret;
926         }
927
928         rx_tid->ba_win_sz = ba_win_sz;
929
930         return 0;
931 }
932
933 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
934                                 u8 tid, u32 ba_win_sz, u16 ssn,
935                                 enum hal_pn_type pn_type)
936 {
937         struct ath12k_base *ab = ar->ab;
938         struct ath12k_dp *dp = &ab->dp;
939         struct hal_rx_reo_queue *addr_aligned;
940         struct ath12k_peer *peer;
941         struct ath12k_dp_rx_tid *rx_tid;
942         u32 hw_desc_sz;
943         void *vaddr;
944         dma_addr_t paddr;
945         int ret;
946
947         spin_lock_bh(&ab->base_lock);
948
949         peer = ath12k_peer_find(ab, vdev_id, peer_mac);
950         if (!peer) {
951                 spin_unlock_bh(&ab->base_lock);
952                 ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
953                 return -ENOENT;
954         }
955
956         if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
957                 spin_unlock_bh(&ab->base_lock);
958                 ath12k_warn(ab, "reo qref table is not setup\n");
959                 return -EINVAL;
960         }
961
962         if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
963                 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
964                             peer->peer_id, tid);
965                 spin_unlock_bh(&ab->base_lock);
966                 return -EINVAL;
967         }
968
969         rx_tid = &peer->rx_tid[tid];
970         /* Update the tid queue if it is already setup */
971         if (rx_tid->active) {
972                 paddr = rx_tid->paddr;
973                 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
974                                                     ba_win_sz, ssn, true);
975                 spin_unlock_bh(&ab->base_lock);
976                 if (ret) {
977                         ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
978                         return ret;
979                 }
980
981                 return ret;
982         }
983
984         rx_tid->tid = tid;
985
986         rx_tid->ba_win_sz = ba_win_sz;
987
988         /* TODO: Optimize the memory allocation for qos tid based on
989          * the actual BA window size in REO tid update path.
990          */
991         if (tid == HAL_DESC_REO_NON_QOS_TID)
992                 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
993         else
994                 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
995
996         vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
997         if (!vaddr) {
998                 spin_unlock_bh(&ab->base_lock);
999                 return -ENOMEM;
1000         }
1001
1002         addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1003
1004         ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1005                                    ssn, pn_type);
1006
1007         paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1008                                DMA_BIDIRECTIONAL);
1009
1010         ret = dma_mapping_error(ab->dev, paddr);
1011         if (ret) {
1012                 spin_unlock_bh(&ab->base_lock);
1013                 goto err_mem_free;
1014         }
1015
1016         rx_tid->vaddr = vaddr;
1017         rx_tid->paddr = paddr;
1018         rx_tid->size = hw_desc_sz;
1019         rx_tid->active = true;
1020
1021         if (ab->hw_params->reoq_lut_support) {
1022                 /* Update the REO queue LUT at the corresponding peer id
1023                  * and tid with qaddr.
1024                  */
1025                 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
1026                 spin_unlock_bh(&ab->base_lock);
1027         } else {
1028                 spin_unlock_bh(&ab->base_lock);
1029                 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1030                                                              paddr, tid, 1, ba_win_sz);
1031         }
1032
1033         return ret;
1034
1035 err_mem_free:
1036         kfree(vaddr);
1037
1038         return ret;
1039 }
1040
1041 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1042                              struct ieee80211_ampdu_params *params)
1043 {
1044         struct ath12k_base *ab = ar->ab;
1045         struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1046         int vdev_id = arsta->arvif->vdev_id;
1047         int ret;
1048
1049         ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
1050                                           params->tid, params->buf_size,
1051                                           params->ssn, arsta->pn_type);
1052         if (ret)
1053                 ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1054
1055         return ret;
1056 }
1057
1058 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1059                             struct ieee80211_ampdu_params *params)
1060 {
1061         struct ath12k_base *ab = ar->ab;
1062         struct ath12k_peer *peer;
1063         struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
1064         int vdev_id = arsta->arvif->vdev_id;
1065         bool active;
1066         int ret;
1067
1068         spin_lock_bh(&ab->base_lock);
1069
1070         peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
1071         if (!peer) {
1072                 spin_unlock_bh(&ab->base_lock);
1073                 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1074                 return -ENOENT;
1075         }
1076
1077         active = peer->rx_tid[params->tid].active;
1078
1079         if (!active) {
1080                 spin_unlock_bh(&ab->base_lock);
1081                 return 0;
1082         }
1083
1084         ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1085         spin_unlock_bh(&ab->base_lock);
1086         if (ret) {
1087                 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1088                             params->tid, ret);
1089                 return ret;
1090         }
1091
1092         return ret;
1093 }
1094
1095 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
1096                                        const u8 *peer_addr,
1097                                        enum set_key_cmd key_cmd,
1098                                        struct ieee80211_key_conf *key)
1099 {
1100         struct ath12k *ar = arvif->ar;
1101         struct ath12k_base *ab = ar->ab;
1102         struct ath12k_hal_reo_cmd cmd = {0};
1103         struct ath12k_peer *peer;
1104         struct ath12k_dp_rx_tid *rx_tid;
1105         u8 tid;
1106         int ret = 0;
1107
1108         /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1109          * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1110          * for now.
1111          */
1112         if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1113                 return 0;
1114
1115         cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1116         cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1117                     HAL_REO_CMD_UPD0_PN_SIZE |
1118                     HAL_REO_CMD_UPD0_PN_VALID |
1119                     HAL_REO_CMD_UPD0_PN_CHECK |
1120                     HAL_REO_CMD_UPD0_SVLD;
1121
1122         switch (key->cipher) {
1123         case WLAN_CIPHER_SUITE_TKIP:
1124         case WLAN_CIPHER_SUITE_CCMP:
1125         case WLAN_CIPHER_SUITE_CCMP_256:
1126         case WLAN_CIPHER_SUITE_GCMP:
1127         case WLAN_CIPHER_SUITE_GCMP_256:
1128                 if (key_cmd == SET_KEY) {
1129                         cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1130                         cmd.pn_size = 48;
1131                 }
1132                 break;
1133         default:
1134                 break;
1135         }
1136
1137         spin_lock_bh(&ab->base_lock);
1138
1139         peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1140         if (!peer) {
1141                 spin_unlock_bh(&ab->base_lock);
1142                 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1143                             peer_addr);
1144                 return -ENOENT;
1145         }
1146
1147         for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1148                 rx_tid = &peer->rx_tid[tid];
1149                 if (!rx_tid->active)
1150                         continue;
1151                 cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1152                 cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1153                 ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1154                                              HAL_REO_CMD_UPDATE_RX_QUEUE,
1155                                              &cmd, NULL);
1156                 if (ret) {
1157                         ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1158                                     tid, peer_addr, ret);
1159                         break;
1160                 }
1161         }
1162
1163         spin_unlock_bh(&ab->base_lock);
1164
1165         return ret;
1166 }
1167
1168 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1169                                       u16 peer_id)
1170 {
1171         int i;
1172
1173         for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1174                 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1175                         if (peer_id == ppdu_stats->user_stats[i].peer_id)
1176                                 return i;
1177                 } else {
1178                         return i;
1179                 }
1180         }
1181
1182         return -EINVAL;
1183 }
1184
1185 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1186                                            u16 tag, u16 len, const void *ptr,
1187                                            void *data)
1188 {
1189         const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1190         const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1191         const struct htt_ppdu_stats_user_rate *user_rate;
1192         struct htt_ppdu_stats_info *ppdu_info;
1193         struct htt_ppdu_user_stats *user_stats;
1194         int cur_user;
1195         u16 peer_id;
1196
1197         ppdu_info = data;
1198
1199         switch (tag) {
1200         case HTT_PPDU_STATS_TAG_COMMON:
1201                 if (len < sizeof(struct htt_ppdu_stats_common)) {
1202                         ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1203                                     len, tag);
1204                         return -EINVAL;
1205                 }
1206                 memcpy(&ppdu_info->ppdu_stats.common, ptr,
1207                        sizeof(struct htt_ppdu_stats_common));
1208                 break;
1209         case HTT_PPDU_STATS_TAG_USR_RATE:
1210                 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1211                         ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1212                                     len, tag);
1213                         return -EINVAL;
1214                 }
1215                 user_rate = ptr;
1216                 peer_id = le16_to_cpu(user_rate->sw_peer_id);
1217                 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1218                                                       peer_id);
1219                 if (cur_user < 0)
1220                         return -EINVAL;
1221                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1222                 user_stats->peer_id = peer_id;
1223                 user_stats->is_valid_peer_id = true;
1224                 memcpy(&user_stats->rate, ptr,
1225                        sizeof(struct htt_ppdu_stats_user_rate));
1226                 user_stats->tlv_flags |= BIT(tag);
1227                 break;
1228         case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1229                 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1230                         ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1231                                     len, tag);
1232                         return -EINVAL;
1233                 }
1234
1235                 cmplt_cmn = ptr;
1236                 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1237                 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1238                                                       peer_id);
1239                 if (cur_user < 0)
1240                         return -EINVAL;
1241                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1242                 user_stats->peer_id = peer_id;
1243                 user_stats->is_valid_peer_id = true;
1244                 memcpy(&user_stats->cmpltn_cmn, ptr,
1245                        sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1246                 user_stats->tlv_flags |= BIT(tag);
1247                 break;
1248         case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1249                 if (len <
1250                     sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1251                         ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1252                                     len, tag);
1253                         return -EINVAL;
1254                 }
1255
1256                 ba_status = ptr;
1257                 peer_id = le16_to_cpu(ba_status->sw_peer_id);
1258                 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1259                                                       peer_id);
1260                 if (cur_user < 0)
1261                         return -EINVAL;
1262                 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1263                 user_stats->peer_id = peer_id;
1264                 user_stats->is_valid_peer_id = true;
1265                 memcpy(&user_stats->ack_ba, ptr,
1266                        sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1267                 user_stats->tlv_flags |= BIT(tag);
1268                 break;
1269         }
1270         return 0;
1271 }
1272
1273 static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1274                                   int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1275                                               const void *ptr, void *data),
1276                                   void *data)
1277 {
1278         const struct htt_tlv *tlv;
1279         const void *begin = ptr;
1280         u16 tlv_tag, tlv_len;
1281         int ret = -EINVAL;
1282
1283         while (len > 0) {
1284                 if (len < sizeof(*tlv)) {
1285                         ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1286                                    ptr - begin, len, sizeof(*tlv));
1287                         return -EINVAL;
1288                 }
1289                 tlv = (struct htt_tlv *)ptr;
1290                 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1291                 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1292                 ptr += sizeof(*tlv);
1293                 len -= sizeof(*tlv);
1294
1295                 if (tlv_len > len) {
1296                         ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1297                                    tlv_tag, ptr - begin, len, tlv_len);
1298                         return -EINVAL;
1299                 }
1300                 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1301                 if (ret == -ENOMEM)
1302                         return ret;
1303
1304                 ptr += tlv_len;
1305                 len -= tlv_len;
1306         }
1307         return 0;
1308 }
1309
1310 static void
1311 ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1312                                 struct htt_ppdu_stats *ppdu_stats, u8 user)
1313 {
1314         struct ath12k_base *ab = ar->ab;
1315         struct ath12k_peer *peer;
1316         struct ieee80211_sta *sta;
1317         struct ath12k_sta *arsta;
1318         struct htt_ppdu_stats_user_rate *user_rate;
1319         struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1320         struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1321         struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1322         int ret;
1323         u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1324         u32 v, succ_bytes = 0;
1325         u16 tones, rate = 0, succ_pkts = 0;
1326         u32 tx_duration = 0;
1327         u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1328         bool is_ampdu = false;
1329
1330         if (!usr_stats)
1331                 return;
1332
1333         if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1334                 return;
1335
1336         if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1337                 is_ampdu =
1338                         HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1339
1340         if (usr_stats->tlv_flags &
1341             BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1342                 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1343                 succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1344                                           HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1345                 tid = le32_get_bits(usr_stats->ack_ba.info,
1346                                     HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1347         }
1348
1349         if (common->fes_duration_us)
1350                 tx_duration = le32_to_cpu(common->fes_duration_us);
1351
1352         user_rate = &usr_stats->rate;
1353         flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1354         bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1355         nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1356         mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1357         sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1358         dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1359
1360         /* Note: If host configured fixed rates and in some other special
1361          * cases, the broadcast/management frames are sent in different rates.
1362          * Firmware rate's control to be skipped for this?
1363          */
1364
1365         if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
1366                 ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1367                 return;
1368         }
1369
1370         if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1371                 ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1372                 return;
1373         }
1374
1375         if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1376                 ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1377                 return;
1378         }
1379
1380         if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1381                 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1382                             mcs, nss);
1383                 return;
1384         }
1385
1386         if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1387                 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1388                                                             flags,
1389                                                             &rate_idx,
1390                                                             &rate);
1391                 if (ret < 0)
1392                         return;
1393         }
1394
1395         rcu_read_lock();
1396         spin_lock_bh(&ab->base_lock);
1397         peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1398
1399         if (!peer || !peer->sta) {
1400                 spin_unlock_bh(&ab->base_lock);
1401                 rcu_read_unlock();
1402                 return;
1403         }
1404
1405         sta = peer->sta;
1406         arsta = (struct ath12k_sta *)sta->drv_priv;
1407
1408         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1409
1410         switch (flags) {
1411         case WMI_RATE_PREAMBLE_OFDM:
1412                 arsta->txrate.legacy = rate;
1413                 break;
1414         case WMI_RATE_PREAMBLE_CCK:
1415                 arsta->txrate.legacy = rate;
1416                 break;
1417         case WMI_RATE_PREAMBLE_HT:
1418                 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1419                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1420                 if (sgi)
1421                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1422                 break;
1423         case WMI_RATE_PREAMBLE_VHT:
1424                 arsta->txrate.mcs = mcs;
1425                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1426                 if (sgi)
1427                         arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1428                 break;
1429         case WMI_RATE_PREAMBLE_HE:
1430                 arsta->txrate.mcs = mcs;
1431                 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1432                 arsta->txrate.he_dcm = dcm;
1433                 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1434                 tones = le16_to_cpu(user_rate->ru_end) -
1435                         le16_to_cpu(user_rate->ru_start) + 1;
1436                 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1437                 arsta->txrate.he_ru_alloc = v;
1438                 break;
1439         }
1440
1441         arsta->txrate.nss = nss;
1442         arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1443         arsta->tx_duration += tx_duration;
1444         memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1445
1446         /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1447          * So skip peer stats update for mgmt packets.
1448          */
1449         if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1450                 memset(peer_stats, 0, sizeof(*peer_stats));
1451                 peer_stats->succ_pkts = succ_pkts;
1452                 peer_stats->succ_bytes = succ_bytes;
1453                 peer_stats->is_ampdu = is_ampdu;
1454                 peer_stats->duration = tx_duration;
1455                 peer_stats->ba_fails =
1456                         HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1457                         HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1458         }
1459
1460         spin_unlock_bh(&ab->base_lock);
1461         rcu_read_unlock();
1462 }
1463
1464 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1465                                          struct htt_ppdu_stats *ppdu_stats)
1466 {
1467         u8 user;
1468
1469         for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1470                 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1471 }
1472
1473 static
1474 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1475                                                         u32 ppdu_id)
1476 {
1477         struct htt_ppdu_stats_info *ppdu_info;
1478
1479         lockdep_assert_held(&ar->data_lock);
1480         if (!list_empty(&ar->ppdu_stats_info)) {
1481                 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1482                         if (ppdu_info->ppdu_id == ppdu_id)
1483                                 return ppdu_info;
1484                 }
1485
1486                 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1487                         ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1488                                                      typeof(*ppdu_info), list);
1489                         list_del(&ppdu_info->list);
1490                         ar->ppdu_stat_list_depth--;
1491                         ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1492                         kfree(ppdu_info);
1493                 }
1494         }
1495
1496         ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1497         if (!ppdu_info)
1498                 return NULL;
1499
1500         list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1501         ar->ppdu_stat_list_depth++;
1502
1503         return ppdu_info;
1504 }
1505
1506 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1507                                        struct htt_ppdu_user_stats *usr_stats)
1508 {
1509         peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1510         peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1511         peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1512         peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1513         peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1514         peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1515         peer->ppdu_stats_delayba.resp_rate_flags =
1516                 le32_to_cpu(usr_stats->rate.resp_rate_flags);
1517
1518         peer->delayba_flag = true;
1519 }
1520
1521 static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1522                                struct htt_ppdu_user_stats *usr_stats)
1523 {
1524         usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1525         usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1526         usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1527         usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1528         usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1529         usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1530         usr_stats->rate.resp_rate_flags =
1531                 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1532
1533         peer->delayba_flag = false;
1534 }
1535
1536 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1537                                       struct sk_buff *skb)
1538 {
1539         struct ath12k_htt_ppdu_stats_msg *msg;
1540         struct htt_ppdu_stats_info *ppdu_info;
1541         struct ath12k_peer *peer = NULL;
1542         struct htt_ppdu_user_stats *usr_stats = NULL;
1543         u32 peer_id = 0;
1544         struct ath12k *ar;
1545         int ret, i;
1546         u8 pdev_id;
1547         u32 ppdu_id, len;
1548
1549         msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1550         len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1551         pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1552         ppdu_id = le32_to_cpu(msg->ppdu_id);
1553
1554         rcu_read_lock();
1555         ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1556         if (!ar) {
1557                 ret = -EINVAL;
1558                 goto exit;
1559         }
1560
1561         spin_lock_bh(&ar->data_lock);
1562         ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1563         if (!ppdu_info) {
1564                 spin_unlock_bh(&ar->data_lock);
1565                 ret = -EINVAL;
1566                 goto exit;
1567         }
1568
1569         ppdu_info->ppdu_id = ppdu_id;
1570         ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1571                                      ath12k_htt_tlv_ppdu_stats_parse,
1572                                      (void *)ppdu_info);
1573         if (ret) {
1574                 spin_unlock_bh(&ar->data_lock);
1575                 ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1576                 goto exit;
1577         }
1578
1579         /* back up data rate tlv for all peers */
1580         if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1581             (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1582             ppdu_info->delay_ba) {
1583                 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1584                         peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1585                         spin_lock_bh(&ab->base_lock);
1586                         peer = ath12k_peer_find_by_id(ab, peer_id);
1587                         if (!peer) {
1588                                 spin_unlock_bh(&ab->base_lock);
1589                                 continue;
1590                         }
1591
1592                         usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1593                         if (usr_stats->delay_ba)
1594                                 ath12k_copy_to_delay_stats(peer, usr_stats);
1595                         spin_unlock_bh(&ab->base_lock);
1596                 }
1597         }
1598
1599         /* restore all peers' data rate tlv to mu-bar tlv */
1600         if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1601             (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1602                 for (i = 0; i < ppdu_info->bar_num_users; i++) {
1603                         peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1604                         spin_lock_bh(&ab->base_lock);
1605                         peer = ath12k_peer_find_by_id(ab, peer_id);
1606                         if (!peer) {
1607                                 spin_unlock_bh(&ab->base_lock);
1608                                 continue;
1609                         }
1610
1611                         usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1612                         if (peer->delayba_flag)
1613                                 ath12k_copy_to_bar(peer, usr_stats);
1614                         spin_unlock_bh(&ab->base_lock);
1615                 }
1616         }
1617
1618         spin_unlock_bh(&ar->data_lock);
1619
1620 exit:
1621         rcu_read_unlock();
1622
1623         return ret;
1624 }
1625
1626 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1627                                                 struct sk_buff *skb)
1628 {
1629         struct ath12k_htt_mlo_offset_msg *msg;
1630         struct ath12k_pdev *pdev;
1631         struct ath12k *ar;
1632         u8 pdev_id;
1633
1634         msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1635         pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1636                                HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1637         ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1638
1639         if (!ar) {
1640                 ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
1641                 return;
1642         }
1643
1644         spin_lock_bh(&ar->data_lock);
1645         pdev = ar->pdev;
1646
1647         pdev->timestamp.info = __le32_to_cpu(msg->info);
1648         pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1649         pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1650         pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1651         pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1652         pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1653         pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1654         pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1655
1656         spin_unlock_bh(&ar->data_lock);
1657 }
1658
1659 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1660                                        struct sk_buff *skb)
1661 {
1662         struct ath12k_dp *dp = &ab->dp;
1663         struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1664         enum htt_t2h_msg_type type;
1665         u16 peer_id;
1666         u8 vdev_id;
1667         u8 mac_addr[ETH_ALEN];
1668         u16 peer_mac_h16;
1669         u16 ast_hash = 0;
1670         u16 hw_peer_id;
1671
1672         type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1673
1674         ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1675
1676         switch (type) {
1677         case HTT_T2H_MSG_TYPE_VERSION_CONF:
1678                 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1679                                                       HTT_T2H_VERSION_CONF_MAJOR);
1680                 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1681                                                       HTT_T2H_VERSION_CONF_MINOR);
1682                 complete(&dp->htt_tgt_version_received);
1683                 break;
1684         /* TODO: remove unused peer map versions after testing */
1685         case HTT_T2H_MSG_TYPE_PEER_MAP:
1686                 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1687                                         HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1688                 peer_id = le32_get_bits(resp->peer_map_ev.info,
1689                                         HTT_T2H_PEER_MAP_INFO_PEER_ID);
1690                 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1691                                              HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1692                 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1693                                        peer_mac_h16, mac_addr);
1694                 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1695                 break;
1696         case HTT_T2H_MSG_TYPE_PEER_MAP2:
1697                 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1698                                         HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1699                 peer_id = le32_get_bits(resp->peer_map_ev.info,
1700                                         HTT_T2H_PEER_MAP_INFO_PEER_ID);
1701                 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1702                                              HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1703                 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1704                                        peer_mac_h16, mac_addr);
1705                 ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1706                                          HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1707                 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1708                                            HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1709                 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1710                                       hw_peer_id);
1711                 break;
1712         case HTT_T2H_MSG_TYPE_PEER_MAP3:
1713                 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1714                                         HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1715                 peer_id = le32_get_bits(resp->peer_map_ev.info,
1716                                         HTT_T2H_PEER_MAP_INFO_PEER_ID);
1717                 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1718                                              HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1719                 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1720                                        peer_mac_h16, mac_addr);
1721                 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1722                                       peer_id);
1723                 break;
1724         case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1725         case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1726                 peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1727                                         HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1728                 ath12k_peer_unmap_event(ab, peer_id);
1729                 break;
1730         case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1731                 ath12k_htt_pull_ppdu_stats(ab, skb);
1732                 break;
1733         case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1734                 break;
1735         case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1736                 ath12k_htt_mlo_offset_event_handler(ab, skb);
1737                 break;
1738         default:
1739                 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1740                            type);
1741                 break;
1742         }
1743
1744         dev_kfree_skb_any(skb);
1745 }
1746
1747 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1748                                       struct sk_buff_head *msdu_list,
1749                                       struct sk_buff *first, struct sk_buff *last,
1750                                       u8 l3pad_bytes, int msdu_len)
1751 {
1752         struct ath12k_base *ab = ar->ab;
1753         struct sk_buff *skb;
1754         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1755         int buf_first_hdr_len, buf_first_len;
1756         struct hal_rx_desc *ldesc;
1757         int space_extra, rem_len, buf_len;
1758         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
1759
1760         /* As the msdu is spread across multiple rx buffers,
1761          * find the offset to the start of msdu for computing
1762          * the length of the msdu in the first buffer.
1763          */
1764         buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1765         buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1766
1767         if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1768                 skb_put(first, buf_first_hdr_len + msdu_len);
1769                 skb_pull(first, buf_first_hdr_len);
1770                 return 0;
1771         }
1772
1773         ldesc = (struct hal_rx_desc *)last->data;
1774         rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1775         rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1776
1777         /* MSDU spans over multiple buffers because the length of the MSDU
1778          * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1779          * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1780          */
1781         skb_put(first, DP_RX_BUFFER_SIZE);
1782         skb_pull(first, buf_first_hdr_len);
1783
1784         /* When an MSDU spread over multiple buffers MSDU_END
1785          * tlvs are valid only in the last buffer. Copy those tlvs.
1786          */
1787         ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1788
1789         space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1790         if (space_extra > 0 &&
1791             (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1792                 /* Free up all buffers of the MSDU */
1793                 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1794                         rxcb = ATH12K_SKB_RXCB(skb);
1795                         if (!rxcb->is_continuation) {
1796                                 dev_kfree_skb_any(skb);
1797                                 break;
1798                         }
1799                         dev_kfree_skb_any(skb);
1800                 }
1801                 return -ENOMEM;
1802         }
1803
1804         rem_len = msdu_len - buf_first_len;
1805         while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1806                 rxcb = ATH12K_SKB_RXCB(skb);
1807                 if (rxcb->is_continuation)
1808                         buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1809                 else
1810                         buf_len = rem_len;
1811
1812                 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1813                         WARN_ON_ONCE(1);
1814                         dev_kfree_skb_any(skb);
1815                         return -EINVAL;
1816                 }
1817
1818                 skb_put(skb, buf_len + hal_rx_desc_sz);
1819                 skb_pull(skb, hal_rx_desc_sz);
1820                 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1821                                           buf_len);
1822                 dev_kfree_skb_any(skb);
1823
1824                 rem_len -= buf_len;
1825                 if (!rxcb->is_continuation)
1826                         break;
1827         }
1828
1829         return 0;
1830 }
1831
1832 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1833                                                       struct sk_buff *first)
1834 {
1835         struct sk_buff *skb;
1836         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1837
1838         if (!rxcb->is_continuation)
1839                 return first;
1840
1841         skb_queue_walk(msdu_list, skb) {
1842                 rxcb = ATH12K_SKB_RXCB(skb);
1843                 if (!rxcb->is_continuation)
1844                         return skb;
1845         }
1846
1847         return NULL;
1848 }
1849
1850 static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
1851 {
1852         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1853         struct ath12k_base *ab = ar->ab;
1854         bool ip_csum_fail, l4_csum_fail;
1855
1856         ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
1857         l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
1858
1859         msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1860                           CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1861 }
1862
1863 static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
1864                                        enum hal_encrypt_type enctype)
1865 {
1866         switch (enctype) {
1867         case HAL_ENCRYPT_TYPE_OPEN:
1868         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1869         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1870                 return 0;
1871         case HAL_ENCRYPT_TYPE_CCMP_128:
1872                 return IEEE80211_CCMP_MIC_LEN;
1873         case HAL_ENCRYPT_TYPE_CCMP_256:
1874                 return IEEE80211_CCMP_256_MIC_LEN;
1875         case HAL_ENCRYPT_TYPE_GCMP_128:
1876         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1877                 return IEEE80211_GCMP_MIC_LEN;
1878         case HAL_ENCRYPT_TYPE_WEP_40:
1879         case HAL_ENCRYPT_TYPE_WEP_104:
1880         case HAL_ENCRYPT_TYPE_WEP_128:
1881         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1882         case HAL_ENCRYPT_TYPE_WAPI:
1883                 break;
1884         }
1885
1886         ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1887         return 0;
1888 }
1889
1890 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
1891                                          enum hal_encrypt_type enctype)
1892 {
1893         switch (enctype) {
1894         case HAL_ENCRYPT_TYPE_OPEN:
1895                 return 0;
1896         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1897         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1898                 return IEEE80211_TKIP_IV_LEN;
1899         case HAL_ENCRYPT_TYPE_CCMP_128:
1900                 return IEEE80211_CCMP_HDR_LEN;
1901         case HAL_ENCRYPT_TYPE_CCMP_256:
1902                 return IEEE80211_CCMP_256_HDR_LEN;
1903         case HAL_ENCRYPT_TYPE_GCMP_128:
1904         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1905                 return IEEE80211_GCMP_HDR_LEN;
1906         case HAL_ENCRYPT_TYPE_WEP_40:
1907         case HAL_ENCRYPT_TYPE_WEP_104:
1908         case HAL_ENCRYPT_TYPE_WEP_128:
1909         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1910         case HAL_ENCRYPT_TYPE_WAPI:
1911                 break;
1912         }
1913
1914         ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1915         return 0;
1916 }
1917
1918 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
1919                                        enum hal_encrypt_type enctype)
1920 {
1921         switch (enctype) {
1922         case HAL_ENCRYPT_TYPE_OPEN:
1923         case HAL_ENCRYPT_TYPE_CCMP_128:
1924         case HAL_ENCRYPT_TYPE_CCMP_256:
1925         case HAL_ENCRYPT_TYPE_GCMP_128:
1926         case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1927                 return 0;
1928         case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1929         case HAL_ENCRYPT_TYPE_TKIP_MIC:
1930                 return IEEE80211_TKIP_ICV_LEN;
1931         case HAL_ENCRYPT_TYPE_WEP_40:
1932         case HAL_ENCRYPT_TYPE_WEP_104:
1933         case HAL_ENCRYPT_TYPE_WEP_128:
1934         case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1935         case HAL_ENCRYPT_TYPE_WAPI:
1936                 break;
1937         }
1938
1939         ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1940         return 0;
1941 }
1942
1943 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
1944                                          struct sk_buff *msdu,
1945                                          enum hal_encrypt_type enctype,
1946                                          struct ieee80211_rx_status *status)
1947 {
1948         struct ath12k_base *ab = ar->ab;
1949         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1950         u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1951         struct ieee80211_hdr *hdr;
1952         size_t hdr_len;
1953         u8 *crypto_hdr;
1954         u16 qos_ctl;
1955
1956         /* pull decapped header */
1957         hdr = (struct ieee80211_hdr *)msdu->data;
1958         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1959         skb_pull(msdu, hdr_len);
1960
1961         /*  Rebuild qos header */
1962         hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1963
1964         /* Reset the order bit as the HT_Control header is stripped */
1965         hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1966
1967         qos_ctl = rxcb->tid;
1968
1969         if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
1970                 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1971
1972         /* TODO: Add other QoS ctl fields when required */
1973
1974         /* copy decap header before overwriting for reuse below */
1975         memcpy(decap_hdr, hdr, hdr_len);
1976
1977         /* Rebuild crypto header for mac80211 use */
1978         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1979                 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
1980                 ath12k_dp_rx_desc_get_crypto_header(ar->ab,
1981                                                     rxcb->rx_desc, crypto_hdr,
1982                                                     enctype);
1983         }
1984
1985         memcpy(skb_push(msdu,
1986                         IEEE80211_QOS_CTL_LEN), &qos_ctl,
1987                         IEEE80211_QOS_CTL_LEN);
1988         memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
1989 }
1990
1991 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
1992                                        enum hal_encrypt_type enctype,
1993                                        struct ieee80211_rx_status *status,
1994                                        bool decrypted)
1995 {
1996         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1997         struct ieee80211_hdr *hdr;
1998         size_t hdr_len;
1999         size_t crypto_len;
2000
2001         if (!rxcb->is_first_msdu ||
2002             !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2003                 WARN_ON_ONCE(1);
2004                 return;
2005         }
2006
2007         skb_trim(msdu, msdu->len - FCS_LEN);
2008
2009         if (!decrypted)
2010                 return;
2011
2012         hdr = (void *)msdu->data;
2013
2014         /* Tail */
2015         if (status->flag & RX_FLAG_IV_STRIPPED) {
2016                 skb_trim(msdu, msdu->len -
2017                          ath12k_dp_rx_crypto_mic_len(ar, enctype));
2018
2019                 skb_trim(msdu, msdu->len -
2020                          ath12k_dp_rx_crypto_icv_len(ar, enctype));
2021         } else {
2022                 /* MIC */
2023                 if (status->flag & RX_FLAG_MIC_STRIPPED)
2024                         skb_trim(msdu, msdu->len -
2025                                  ath12k_dp_rx_crypto_mic_len(ar, enctype));
2026
2027                 /* ICV */
2028                 if (status->flag & RX_FLAG_ICV_STRIPPED)
2029                         skb_trim(msdu, msdu->len -
2030                                  ath12k_dp_rx_crypto_icv_len(ar, enctype));
2031         }
2032
2033         /* MMIC */
2034         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2035             !ieee80211_has_morefrags(hdr->frame_control) &&
2036             enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2037                 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2038
2039         /* Head */
2040         if (status->flag & RX_FLAG_IV_STRIPPED) {
2041                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2042                 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2043
2044                 memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2045                 skb_pull(msdu, crypto_len);
2046         }
2047 }
2048
2049 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2050                                               struct sk_buff *msdu,
2051                                               struct ath12k_skb_rxcb *rxcb,
2052                                               struct ieee80211_rx_status *status,
2053                                               enum hal_encrypt_type enctype)
2054 {
2055         struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2056         struct ath12k_base *ab = ar->ab;
2057         size_t hdr_len, crypto_len;
2058         struct ieee80211_hdr *hdr;
2059         u16 qos_ctl;
2060         __le16 fc;
2061         u8 *crypto_hdr;
2062
2063         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2064                 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2065                 crypto_hdr = skb_push(msdu, crypto_len);
2066                 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2067         }
2068
2069         fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
2070         hdr_len = ieee80211_hdrlen(fc);
2071         skb_push(msdu, hdr_len);
2072         hdr = (struct ieee80211_hdr *)msdu->data;
2073         hdr->frame_control = fc;
2074
2075         /* Get wifi header from rx_desc */
2076         ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
2077
2078         if (rxcb->is_mcbc)
2079                 status->flag &= ~RX_FLAG_PN_VALIDATED;
2080
2081         /* Add QOS header */
2082         if (ieee80211_is_data_qos(hdr->frame_control)) {
2083                 qos_ctl = rxcb->tid;
2084                 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
2085                         qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2086
2087                 /* TODO: Add other QoS ctl fields when required */
2088                 memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
2089                        &qos_ctl, IEEE80211_QOS_CTL_LEN);
2090         }
2091 }
2092
2093 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2094                                        struct sk_buff *msdu,
2095                                        enum hal_encrypt_type enctype,
2096                                        struct ieee80211_rx_status *status)
2097 {
2098         struct ieee80211_hdr *hdr;
2099         struct ethhdr *eth;
2100         u8 da[ETH_ALEN];
2101         u8 sa[ETH_ALEN];
2102         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2103         struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2104
2105         eth = (struct ethhdr *)msdu->data;
2106         ether_addr_copy(da, eth->h_dest);
2107         ether_addr_copy(sa, eth->h_source);
2108         rfc.snap_type = eth->h_proto;
2109         skb_pull(msdu, sizeof(*eth));
2110         memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2111                sizeof(rfc));
2112         ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2113
2114         /* original 802.11 header has a different DA and in
2115          * case of 4addr it may also have different SA
2116          */
2117         hdr = (struct ieee80211_hdr *)msdu->data;
2118         ether_addr_copy(ieee80211_get_DA(hdr), da);
2119         ether_addr_copy(ieee80211_get_SA(hdr), sa);
2120 }
2121
2122 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2123                                    struct hal_rx_desc *rx_desc,
2124                                    enum hal_encrypt_type enctype,
2125                                    struct ieee80211_rx_status *status,
2126                                    bool decrypted)
2127 {
2128         struct ath12k_base *ab = ar->ab;
2129         u8 decap;
2130         struct ethhdr *ehdr;
2131
2132         decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2133
2134         switch (decap) {
2135         case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2136                 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2137                 break;
2138         case DP_RX_DECAP_TYPE_RAW:
2139                 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2140                                            decrypted);
2141                 break;
2142         case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2143                 ehdr = (struct ethhdr *)msdu->data;
2144
2145                 /* mac80211 allows fast path only for authorized STA */
2146                 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2147                         ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2148                         ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2149                         break;
2150                 }
2151
2152                 /* PN for mcast packets will be validated in mac80211;
2153                  * remove eth header and add 802.11 header.
2154                  */
2155                 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2156                         ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2157                 break;
2158         case DP_RX_DECAP_TYPE_8023:
2159                 /* TODO: Handle undecap for these formats */
2160                 break;
2161         }
2162 }
2163
2164 struct ath12k_peer *
2165 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
2166 {
2167         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2168         struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2169         struct ath12k_peer *peer = NULL;
2170
2171         lockdep_assert_held(&ab->base_lock);
2172
2173         if (rxcb->peer_id)
2174                 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2175
2176         if (peer)
2177                 return peer;
2178
2179         if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2180                 return NULL;
2181
2182         peer = ath12k_peer_find_by_addr(ab,
2183                                         ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
2184                                                                               rx_desc));
2185         return peer;
2186 }
2187
2188 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2189                                 struct sk_buff *msdu,
2190                                 struct hal_rx_desc *rx_desc,
2191                                 struct ieee80211_rx_status *rx_status)
2192 {
2193         bool  fill_crypto_hdr;
2194         struct ath12k_base *ab = ar->ab;
2195         struct ath12k_skb_rxcb *rxcb;
2196         enum hal_encrypt_type enctype;
2197         bool is_decrypted = false;
2198         struct ieee80211_hdr *hdr;
2199         struct ath12k_peer *peer;
2200         u32 err_bitmap;
2201
2202         /* PN for multicast packets will be checked in mac80211 */
2203         rxcb = ATH12K_SKB_RXCB(msdu);
2204         fill_crypto_hdr = ath12k_dp_rx_h_is_mcbc(ar->ab, rx_desc);
2205         rxcb->is_mcbc = fill_crypto_hdr;
2206
2207         if (rxcb->is_mcbc)
2208                 rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
2209
2210         spin_lock_bh(&ar->ab->base_lock);
2211         peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
2212         if (peer) {
2213                 if (rxcb->is_mcbc)
2214                         enctype = peer->sec_type_grp;
2215                 else
2216                         enctype = peer->sec_type;
2217         } else {
2218                 enctype = HAL_ENCRYPT_TYPE_OPEN;
2219         }
2220         spin_unlock_bh(&ar->ab->base_lock);
2221
2222         err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2223         if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2224                 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2225
2226         /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2227         rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2228                              RX_FLAG_MMIC_ERROR |
2229                              RX_FLAG_DECRYPTED |
2230                              RX_FLAG_IV_STRIPPED |
2231                              RX_FLAG_MMIC_STRIPPED);
2232
2233         if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2234                 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2235         if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2236                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2237
2238         if (is_decrypted) {
2239                 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2240
2241                 if (fill_crypto_hdr)
2242                         rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2243                                         RX_FLAG_ICV_STRIPPED;
2244                 else
2245                         rx_status->flag |= RX_FLAG_IV_STRIPPED |
2246                                            RX_FLAG_PN_VALIDATED;
2247         }
2248
2249         ath12k_dp_rx_h_csum_offload(ar, msdu);
2250         ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2251                                enctype, rx_status, is_decrypted);
2252
2253         if (!is_decrypted || fill_crypto_hdr)
2254                 return;
2255
2256         if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
2257             DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2258                 hdr = (void *)msdu->data;
2259                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2260         }
2261 }
2262
2263 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2264                                 struct ieee80211_rx_status *rx_status)
2265 {
2266         struct ath12k_base *ab = ar->ab;
2267         struct ieee80211_supported_band *sband;
2268         enum rx_msdu_start_pkt_type pkt_type;
2269         u8 bw;
2270         u8 rate_mcs, nss;
2271         u8 sgi;
2272         bool is_cck;
2273
2274         pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2275         bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2276         rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2277         nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2278         sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2279
2280         switch (pkt_type) {
2281         case RX_MSDU_START_PKT_TYPE_11A:
2282         case RX_MSDU_START_PKT_TYPE_11B:
2283                 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2284                 sband = &ar->mac.sbands[rx_status->band];
2285                 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2286                                                                 is_cck);
2287                 break;
2288         case RX_MSDU_START_PKT_TYPE_11N:
2289                 rx_status->encoding = RX_ENC_HT;
2290                 if (rate_mcs > ATH12K_HT_MCS_MAX) {
2291                         ath12k_warn(ar->ab,
2292                                     "Received with invalid mcs in HT mode %d\n",
2293                                      rate_mcs);
2294                         break;
2295                 }
2296                 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2297                 if (sgi)
2298                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2299                 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2300                 break;
2301         case RX_MSDU_START_PKT_TYPE_11AC:
2302                 rx_status->encoding = RX_ENC_VHT;
2303                 rx_status->rate_idx = rate_mcs;
2304                 if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2305                         ath12k_warn(ar->ab,
2306                                     "Received with invalid mcs in VHT mode %d\n",
2307                                      rate_mcs);
2308                         break;
2309                 }
2310                 rx_status->nss = nss;
2311                 if (sgi)
2312                         rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2313                 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2314                 break;
2315         case RX_MSDU_START_PKT_TYPE_11AX:
2316                 rx_status->rate_idx = rate_mcs;
2317                 if (rate_mcs > ATH12K_HE_MCS_MAX) {
2318                         ath12k_warn(ar->ab,
2319                                     "Received with invalid mcs in HE mode %d\n",
2320                                     rate_mcs);
2321                         break;
2322                 }
2323                 rx_status->encoding = RX_ENC_HE;
2324                 rx_status->nss = nss;
2325                 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2326                 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2327                 break;
2328         }
2329 }
2330
2331 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
2332                          struct ieee80211_rx_status *rx_status)
2333 {
2334         struct ath12k_base *ab = ar->ab;
2335         u8 channel_num;
2336         u32 center_freq, meta_data;
2337         struct ieee80211_channel *channel;
2338
2339         rx_status->freq = 0;
2340         rx_status->rate_idx = 0;
2341         rx_status->nss = 0;
2342         rx_status->encoding = RX_ENC_LEGACY;
2343         rx_status->bw = RATE_INFO_BW_20;
2344         rx_status->enc_flags = 0;
2345
2346         rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2347
2348         meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2349         channel_num = meta_data;
2350         center_freq = meta_data >> 16;
2351
2352         if (center_freq >= 5935 && center_freq <= 7105) {
2353                 rx_status->band = NL80211_BAND_6GHZ;
2354         } else if (channel_num >= 1 && channel_num <= 14) {
2355                 rx_status->band = NL80211_BAND_2GHZ;
2356         } else if (channel_num >= 36 && channel_num <= 173) {
2357                 rx_status->band = NL80211_BAND_5GHZ;
2358         } else {
2359                 spin_lock_bh(&ar->data_lock);
2360                 channel = ar->rx_channel;
2361                 if (channel) {
2362                         rx_status->band = channel->band;
2363                         channel_num =
2364                                 ieee80211_frequency_to_channel(channel->center_freq);
2365                 }
2366                 spin_unlock_bh(&ar->data_lock);
2367                 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2368                                 rx_desc, sizeof(*rx_desc));
2369         }
2370
2371         rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2372                                                          rx_status->band);
2373
2374         ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
2375 }
2376
2377 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2378                                       struct sk_buff *msdu,
2379                                       struct ieee80211_rx_status *status)
2380 {
2381         struct ath12k_base *ab = ar->ab;
2382         static const struct ieee80211_radiotap_he known = {
2383                 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2384                                      IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2385                 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2386         };
2387         struct ieee80211_radiotap_he *he;
2388         struct ieee80211_rx_status *rx_status;
2389         struct ieee80211_sta *pubsta;
2390         struct ath12k_peer *peer;
2391         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2392         u8 decap = DP_RX_DECAP_TYPE_RAW;
2393         bool is_mcbc = rxcb->is_mcbc;
2394         bool is_eapol = rxcb->is_eapol;
2395
2396         if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2397             !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2398                 he = skb_push(msdu, sizeof(known));
2399                 memcpy(he, &known, sizeof(known));
2400                 status->flag |= RX_FLAG_RADIOTAP_HE;
2401         }
2402
2403         if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2404                 decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
2405
2406         spin_lock_bh(&ab->base_lock);
2407         peer = ath12k_dp_rx_h_find_peer(ab, msdu);
2408
2409         pubsta = peer ? peer->sta : NULL;
2410
2411         spin_unlock_bh(&ab->base_lock);
2412
2413         ath12k_dbg(ab, ATH12K_DBG_DATA,
2414                    "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2415                    msdu,
2416                    msdu->len,
2417                    peer ? peer->addr : NULL,
2418                    rxcb->tid,
2419                    is_mcbc ? "mcast" : "ucast",
2420                    ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2421                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2422                    (status->encoding == RX_ENC_HT) ? "ht" : "",
2423                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
2424                    (status->encoding == RX_ENC_HE) ? "he" : "",
2425                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
2426                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
2427                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
2428                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2429                    status->rate_idx,
2430                    status->nss,
2431                    status->freq,
2432                    status->band, status->flag,
2433                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2434                    !!(status->flag & RX_FLAG_MMIC_ERROR),
2435                    !!(status->flag & RX_FLAG_AMSDU_MORE));
2436
2437         ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2438                         msdu->data, msdu->len);
2439
2440         rx_status = IEEE80211_SKB_RXCB(msdu);
2441         *rx_status = *status;
2442
2443         /* TODO: trace rx packet */
2444
2445         /* PN for multicast packets are not validate in HW,
2446          * so skip 802.3 rx path
2447          * Also, fast_rx expects the STA to be authorized, hence
2448          * eapol packets are sent in slow path.
2449          */
2450         if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2451             !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2452                 rx_status->flag |= RX_FLAG_8023;
2453
2454         ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2455 }
2456
2457 static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2458                                      struct sk_buff *msdu,
2459                                      struct sk_buff_head *msdu_list,
2460                                      struct ieee80211_rx_status *rx_status)
2461 {
2462         struct ath12k_base *ab = ar->ab;
2463         struct hal_rx_desc *rx_desc, *lrx_desc;
2464         struct ath12k_skb_rxcb *rxcb;
2465         struct sk_buff *last_buf;
2466         u8 l3_pad_bytes;
2467         u16 msdu_len;
2468         int ret;
2469         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2470
2471         last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2472         if (!last_buf) {
2473                 ath12k_warn(ab,
2474                             "No valid Rx buffer to access MSDU_END tlv\n");
2475                 ret = -EIO;
2476                 goto free_out;
2477         }
2478
2479         rx_desc = (struct hal_rx_desc *)msdu->data;
2480         lrx_desc = (struct hal_rx_desc *)last_buf->data;
2481         if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2482                 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2483                 ret = -EIO;
2484                 goto free_out;
2485         }
2486
2487         rxcb = ATH12K_SKB_RXCB(msdu);
2488         rxcb->rx_desc = rx_desc;
2489         msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2490         l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2491
2492         if (rxcb->is_frag) {
2493                 skb_pull(msdu, hal_rx_desc_sz);
2494         } else if (!rxcb->is_continuation) {
2495                 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2496                         ret = -EINVAL;
2497                         ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2498                         ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2499                                         sizeof(*rx_desc));
2500                         goto free_out;
2501                 }
2502                 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2503                 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2504         } else {
2505                 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2506                                                  msdu, last_buf,
2507                                                  l3_pad_bytes, msdu_len);
2508                 if (ret) {
2509                         ath12k_warn(ab,
2510                                     "failed to coalesce msdu rx buffer%d\n", ret);
2511                         goto free_out;
2512                 }
2513         }
2514
2515         ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2516         ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2517
2518         rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2519
2520         return 0;
2521
2522 free_out:
2523         return ret;
2524 }
2525
2526 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2527                                                   struct napi_struct *napi,
2528                                                   struct sk_buff_head *msdu_list,
2529                                                   int ring_id)
2530 {
2531         struct ieee80211_rx_status rx_status = {0};
2532         struct ath12k_skb_rxcb *rxcb;
2533         struct sk_buff *msdu;
2534         struct ath12k *ar;
2535         u8 mac_id;
2536         int ret;
2537
2538         if (skb_queue_empty(msdu_list))
2539                 return;
2540
2541         rcu_read_lock();
2542
2543         while ((msdu = __skb_dequeue(msdu_list))) {
2544                 rxcb = ATH12K_SKB_RXCB(msdu);
2545                 mac_id = rxcb->mac_id;
2546                 ar = ab->pdevs[mac_id].ar;
2547                 if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2548                         dev_kfree_skb_any(msdu);
2549                         continue;
2550                 }
2551
2552                 if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
2553                         dev_kfree_skb_any(msdu);
2554                         continue;
2555                 }
2556
2557                 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2558                 if (ret) {
2559                         ath12k_dbg(ab, ATH12K_DBG_DATA,
2560                                    "Unable to process msdu %d", ret);
2561                         dev_kfree_skb_any(msdu);
2562                         continue;
2563                 }
2564
2565                 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2566         }
2567
2568         rcu_read_unlock();
2569 }
2570
2571 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2572                          struct napi_struct *napi, int budget)
2573 {
2574         struct ath12k_rx_desc_info *desc_info;
2575         struct ath12k_dp *dp = &ab->dp;
2576         struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2577         struct hal_reo_dest_ring *desc;
2578         int num_buffs_reaped = 0;
2579         struct sk_buff_head msdu_list;
2580         struct ath12k_skb_rxcb *rxcb;
2581         int total_msdu_reaped = 0;
2582         struct hal_srng *srng;
2583         struct sk_buff *msdu;
2584         bool done = false;
2585         int mac_id;
2586         u64 desc_va;
2587
2588         __skb_queue_head_init(&msdu_list);
2589
2590         srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2591
2592         spin_lock_bh(&srng->lock);
2593
2594 try_again:
2595         ath12k_hal_srng_access_begin(ab, srng);
2596
2597         while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2598                 enum hal_reo_dest_ring_push_reason push_reason;
2599                 u32 cookie;
2600
2601                 cookie = le32_get_bits(desc->buf_addr_info.info1,
2602                                        BUFFER_ADDR_INFO1_SW_COOKIE);
2603
2604                 mac_id = le32_get_bits(desc->info0,
2605                                        HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2606
2607                 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2608                            le32_to_cpu(desc->buf_va_lo));
2609                 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2610
2611                 /* retry manual desc retrieval */
2612                 if (!desc_info) {
2613                         desc_info = ath12k_dp_get_rx_desc(ab, cookie);
2614                         if (!desc_info) {
2615                                 ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
2616                                 continue;
2617                         }
2618                 }
2619
2620                 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2621                         ath12k_warn(ab, "Check HW CC implementation");
2622
2623                 msdu = desc_info->skb;
2624                 desc_info->skb = NULL;
2625
2626                 spin_lock_bh(&dp->rx_desc_lock);
2627                 list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
2628                 spin_unlock_bh(&dp->rx_desc_lock);
2629
2630                 rxcb = ATH12K_SKB_RXCB(msdu);
2631                 dma_unmap_single(ab->dev, rxcb->paddr,
2632                                  msdu->len + skb_tailroom(msdu),
2633                                  DMA_FROM_DEVICE);
2634
2635                 num_buffs_reaped++;
2636
2637                 push_reason = le32_get_bits(desc->info0,
2638                                             HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2639                 if (push_reason !=
2640                     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2641                         dev_kfree_skb_any(msdu);
2642                         ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2643                         continue;
2644                 }
2645
2646                 rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2647                                          RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2648                 rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2649                                         RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2650                 rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
2651                                            RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2652                 rxcb->mac_id = mac_id;
2653                 rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
2654                                               RX_MPDU_DESC_META_DATA_PEER_ID);
2655                 rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
2656                                           RX_MPDU_DESC_INFO0_TID);
2657
2658                 __skb_queue_tail(&msdu_list, msdu);
2659
2660                 if (!rxcb->is_continuation) {
2661                         total_msdu_reaped++;
2662                         done = true;
2663                 } else {
2664                         done = false;
2665                 }
2666
2667                 if (total_msdu_reaped >= budget)
2668                         break;
2669         }
2670
2671         /* Hw might have updated the head pointer after we cached it.
2672          * In this case, even though there are entries in the ring we'll
2673          * get rx_desc NULL. Give the read another try with updated cached
2674          * head pointer so that we can reap complete MPDU in the current
2675          * rx processing.
2676          */
2677         if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2678                 ath12k_hal_srng_access_end(ab, srng);
2679                 goto try_again;
2680         }
2681
2682         ath12k_hal_srng_access_end(ab, srng);
2683
2684         spin_unlock_bh(&srng->lock);
2685
2686         if (!total_msdu_reaped)
2687                 goto exit;
2688
2689         /* TODO: Move to implicit BM? */
2690         ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
2691                                     ab->hw_params->hal_params->rx_buf_rbm, true);
2692
2693         ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2694                                               ring_id);
2695
2696 exit:
2697         return total_msdu_reaped;
2698 }
2699
2700 static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2701 {
2702         struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
2703
2704         spin_lock_bh(&rx_tid->ab->base_lock);
2705         if (rx_tid->last_frag_no &&
2706             rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2707                 spin_unlock_bh(&rx_tid->ab->base_lock);
2708                 return;
2709         }
2710         ath12k_dp_rx_frags_cleanup(rx_tid, true);
2711         spin_unlock_bh(&rx_tid->ab->base_lock);
2712 }
2713
2714 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2715 {
2716         struct ath12k_base *ab = ar->ab;
2717         struct crypto_shash *tfm;
2718         struct ath12k_peer *peer;
2719         struct ath12k_dp_rx_tid *rx_tid;
2720         int i;
2721
2722         tfm = crypto_alloc_shash("michael_mic", 0, 0);
2723         if (IS_ERR(tfm))
2724                 return PTR_ERR(tfm);
2725
2726         spin_lock_bh(&ab->base_lock);
2727
2728         peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2729         if (!peer) {
2730                 spin_unlock_bh(&ab->base_lock);
2731                 ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2732                 return -ENOENT;
2733         }
2734
2735         for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2736                 rx_tid = &peer->rx_tid[i];
2737                 rx_tid->ab = ab;
2738                 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2739                 skb_queue_head_init(&rx_tid->rx_frags);
2740         }
2741
2742         peer->tfm_mmic = tfm;
2743         spin_unlock_bh(&ab->base_lock);
2744
2745         return 0;
2746 }
2747
2748 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2749                                       struct ieee80211_hdr *hdr, u8 *data,
2750                                       size_t data_len, u8 *mic)
2751 {
2752         SHASH_DESC_ON_STACK(desc, tfm);
2753         u8 mic_hdr[16] = {0};
2754         u8 tid = 0;
2755         int ret;
2756
2757         if (!tfm)
2758                 return -EINVAL;
2759
2760         desc->tfm = tfm;
2761
2762         ret = crypto_shash_setkey(tfm, key, 8);
2763         if (ret)
2764                 goto out;
2765
2766         ret = crypto_shash_init(desc);
2767         if (ret)
2768                 goto out;
2769
2770         /* TKIP MIC header */
2771         memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
2772         memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
2773         if (ieee80211_is_data_qos(hdr->frame_control))
2774                 tid = ieee80211_get_tid(hdr);
2775         mic_hdr[12] = tid;
2776
2777         ret = crypto_shash_update(desc, mic_hdr, 16);
2778         if (ret)
2779                 goto out;
2780         ret = crypto_shash_update(desc, data, data_len);
2781         if (ret)
2782                 goto out;
2783         ret = crypto_shash_final(desc, mic);
2784 out:
2785         shash_desc_zero(desc);
2786         return ret;
2787 }
2788
2789 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
2790                                           struct sk_buff *msdu)
2791 {
2792         struct ath12k_base *ab = ar->ab;
2793         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
2794         struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
2795         struct ieee80211_key_conf *key_conf;
2796         struct ieee80211_hdr *hdr;
2797         u8 mic[IEEE80211_CCMP_MIC_LEN];
2798         int head_len, tail_len, ret;
2799         size_t data_len;
2800         u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2801         u8 *key, *data;
2802         u8 key_idx;
2803
2804         if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
2805                 return 0;
2806
2807         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2808         hdr_len = ieee80211_hdrlen(hdr->frame_control);
2809         head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
2810         tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
2811
2812         if (!is_multicast_ether_addr(hdr->addr1))
2813                 key_idx = peer->ucast_keyidx;
2814         else
2815                 key_idx = peer->mcast_keyidx;
2816
2817         key_conf = peer->keys[key_idx];
2818
2819         data = msdu->data + head_len;
2820         data_len = msdu->len - head_len - tail_len;
2821         key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
2822
2823         ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
2824         if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
2825                 goto mic_fail;
2826
2827         return 0;
2828
2829 mic_fail:
2830         (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
2831         (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
2832
2833         rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
2834                     RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
2835         skb_pull(msdu, hal_rx_desc_sz);
2836
2837         ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
2838         ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2839                                HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
2840         ieee80211_rx(ar->hw, msdu);
2841         return -EINVAL;
2842 }
2843
2844 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
2845                                         enum hal_encrypt_type enctype, u32 flags)
2846 {
2847         struct ieee80211_hdr *hdr;
2848         size_t hdr_len;
2849         size_t crypto_len;
2850         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2851
2852         if (!flags)
2853                 return;
2854
2855         hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
2856
2857         if (flags & RX_FLAG_MIC_STRIPPED)
2858                 skb_trim(msdu, msdu->len -
2859                          ath12k_dp_rx_crypto_mic_len(ar, enctype));
2860
2861         if (flags & RX_FLAG_ICV_STRIPPED)
2862                 skb_trim(msdu, msdu->len -
2863                          ath12k_dp_rx_crypto_icv_len(ar, enctype));
2864
2865         if (flags & RX_FLAG_IV_STRIPPED) {
2866                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2867                 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2868
2869                 memmove(msdu->data + hal_rx_desc_sz + crypto_len,
2870                         msdu->data + hal_rx_desc_sz, hdr_len);
2871                 skb_pull(msdu, crypto_len);
2872         }
2873 }
2874
2875 static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
2876                                  struct ath12k_peer *peer,
2877                                  struct ath12k_dp_rx_tid *rx_tid,
2878                                  struct sk_buff **defrag_skb)
2879 {
2880         struct ath12k_base *ab = ar->ab;
2881         struct hal_rx_desc *rx_desc;
2882         struct sk_buff *skb, *first_frag, *last_frag;
2883         struct ieee80211_hdr *hdr;
2884         enum hal_encrypt_type enctype;
2885         bool is_decrypted = false;
2886         int msdu_len = 0;
2887         int extra_space;
2888         u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
2889
2890         first_frag = skb_peek(&rx_tid->rx_frags);
2891         last_frag = skb_peek_tail(&rx_tid->rx_frags);
2892
2893         skb_queue_walk(&rx_tid->rx_frags, skb) {
2894                 flags = 0;
2895                 rx_desc = (struct hal_rx_desc *)skb->data;
2896                 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
2897
2898                 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
2899                 if (enctype != HAL_ENCRYPT_TYPE_OPEN)
2900                         is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
2901                                                                    rx_desc);
2902
2903                 if (is_decrypted) {
2904                         if (skb != first_frag)
2905                                 flags |= RX_FLAG_IV_STRIPPED;
2906                         if (skb != last_frag)
2907                                 flags |= RX_FLAG_ICV_STRIPPED |
2908                                          RX_FLAG_MIC_STRIPPED;
2909                 }
2910
2911                 /* RX fragments are always raw packets */
2912                 if (skb != last_frag)
2913                         skb_trim(skb, skb->len - FCS_LEN);
2914                 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
2915
2916                 if (skb != first_frag)
2917                         skb_pull(skb, hal_rx_desc_sz +
2918                                       ieee80211_hdrlen(hdr->frame_control));
2919                 msdu_len += skb->len;
2920         }
2921
2922         extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
2923         if (extra_space > 0 &&
2924             (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
2925                 return -ENOMEM;
2926
2927         __skb_unlink(first_frag, &rx_tid->rx_frags);
2928         while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
2929                 skb_put_data(first_frag, skb->data, skb->len);
2930                 dev_kfree_skb_any(skb);
2931         }
2932
2933         hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
2934         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
2935         ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
2936
2937         if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
2938                 first_frag = NULL;
2939
2940         *defrag_skb = first_frag;
2941         return 0;
2942 }
2943
2944 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
2945                                               struct ath12k_dp_rx_tid *rx_tid,
2946                                               struct sk_buff *defrag_skb)
2947 {
2948         struct ath12k_base *ab = ar->ab;
2949         struct ath12k_dp *dp = &ab->dp;
2950         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
2951         struct hal_reo_entrance_ring *reo_ent_ring;
2952         struct hal_reo_dest_ring *reo_dest_ring;
2953         struct dp_link_desc_bank *link_desc_banks;
2954         struct hal_rx_msdu_link *msdu_link;
2955         struct hal_rx_msdu_details *msdu0;
2956         struct hal_srng *srng;
2957         dma_addr_t link_paddr, buf_paddr;
2958         u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
2959         u32 cookie, hal_rx_desc_sz, dest_ring_info0;
2960         int ret;
2961         struct ath12k_rx_desc_info *desc_info;
2962         u8 dst_ind;
2963
2964         hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
2965         link_desc_banks = dp->link_desc_banks;
2966         reo_dest_ring = rx_tid->dst_ring_desc;
2967
2968         ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
2969                                         &link_paddr, &cookie);
2970         desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
2971
2972         msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
2973                         (link_paddr - link_desc_banks[desc_bank].paddr));
2974         msdu0 = &msdu_link->msdu_link[0];
2975         msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
2976         dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
2977
2978         memset(msdu0, 0, sizeof(*msdu0));
2979
2980         msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
2981                     u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
2982                     u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
2983                     u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
2984                                     RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
2985                     u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
2986                     u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
2987         msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
2988         msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
2989
2990         /* change msdu len in hal rx desc */
2991         ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
2992
2993         buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
2994                                    defrag_skb->len + skb_tailroom(defrag_skb),
2995                                    DMA_FROM_DEVICE);
2996         if (dma_mapping_error(ab->dev, buf_paddr))
2997                 return -ENOMEM;
2998
2999         spin_lock_bh(&dp->rx_desc_lock);
3000         desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3001                                              struct ath12k_rx_desc_info,
3002                                              list);
3003         if (!desc_info) {
3004                 spin_unlock_bh(&dp->rx_desc_lock);
3005                 ath12k_warn(ab, "failed to find rx desc for reinject\n");
3006                 ret = -ENOMEM;
3007                 goto err_unmap_dma;
3008         }
3009
3010         desc_info->skb = defrag_skb;
3011
3012         list_del(&desc_info->list);
3013         list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
3014         spin_unlock_bh(&dp->rx_desc_lock);
3015
3016         ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3017
3018         ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3019                                         desc_info->cookie,
3020                                         HAL_RX_BUF_RBM_SW3_BM);
3021
3022         /* Fill mpdu details into reo entrace ring */
3023         srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3024
3025         spin_lock_bh(&srng->lock);
3026         ath12k_hal_srng_access_begin(ab, srng);
3027
3028         reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3029         if (!reo_ent_ring) {
3030                 ath12k_hal_srng_access_end(ab, srng);
3031                 spin_unlock_bh(&srng->lock);
3032                 ret = -ENOSPC;
3033                 goto err_free_desc;
3034         }
3035         memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3036
3037         ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3038                                         cookie,
3039                                         HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
3040
3041         mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3042                     u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3043                     u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3044                     u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3045                     u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3046
3047         reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3048         reo_ent_ring->rx_mpdu_info.peer_meta_data =
3049                 reo_dest_ring->rx_mpdu_info.peer_meta_data;
3050
3051         /* Firmware expects physical address to be filled in queue_addr_lo in
3052          * the MLO scenario and in case of non MLO peer meta data needs to be
3053          * filled.
3054          * TODO: Need to handle for MLO scenario.
3055          */
3056         reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3057         reo_ent_ring->info0 = le32_encode_bits(dst_ind,
3058                                                HAL_REO_ENTR_RING_INFO0_DEST_IND);
3059
3060         reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3061                                                HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3062         dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3063                                         HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3064         reo_ent_ring->info2 =
3065                 cpu_to_le32(u32_get_bits(dest_ring_info0,
3066                                          HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3067
3068         ath12k_hal_srng_access_end(ab, srng);
3069         spin_unlock_bh(&srng->lock);
3070
3071         return 0;
3072
3073 err_free_desc:
3074         spin_lock_bh(&dp->rx_desc_lock);
3075         list_del(&desc_info->list);
3076         list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3077         desc_info->skb = NULL;
3078         spin_unlock_bh(&dp->rx_desc_lock);
3079 err_unmap_dma:
3080         dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3081                          DMA_FROM_DEVICE);
3082         return ret;
3083 }
3084
3085 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3086                                     struct sk_buff *a, struct sk_buff *b)
3087 {
3088         int frag1, frag2;
3089
3090         frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3091         frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3092
3093         return frag1 - frag2;
3094 }
3095
3096 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3097                                       struct sk_buff_head *frag_list,
3098                                       struct sk_buff *cur_frag)
3099 {
3100         struct sk_buff *skb;
3101         int cmp;
3102
3103         skb_queue_walk(frag_list, skb) {
3104                 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3105                 if (cmp < 0)
3106                         continue;
3107                 __skb_queue_before(frag_list, skb, cur_frag);
3108                 return;
3109         }
3110         __skb_queue_tail(frag_list, cur_frag);
3111 }
3112
3113 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3114 {
3115         struct ieee80211_hdr *hdr;
3116         u64 pn = 0;
3117         u8 *ehdr;
3118         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3119
3120         hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3121         ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3122
3123         pn = ehdr[0];
3124         pn |= (u64)ehdr[1] << 8;
3125         pn |= (u64)ehdr[4] << 16;
3126         pn |= (u64)ehdr[5] << 24;
3127         pn |= (u64)ehdr[6] << 32;
3128         pn |= (u64)ehdr[7] << 40;
3129
3130         return pn;
3131 }
3132
3133 static bool
3134 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3135 {
3136         struct ath12k_base *ab = ar->ab;
3137         enum hal_encrypt_type encrypt_type;
3138         struct sk_buff *first_frag, *skb;
3139         struct hal_rx_desc *desc;
3140         u64 last_pn;
3141         u64 cur_pn;
3142
3143         first_frag = skb_peek(&rx_tid->rx_frags);
3144         desc = (struct hal_rx_desc *)first_frag->data;
3145
3146         encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3147         if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3148             encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3149             encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3150             encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3151                 return true;
3152
3153         last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3154         skb_queue_walk(&rx_tid->rx_frags, skb) {
3155                 if (skb == first_frag)
3156                         continue;
3157
3158                 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3159                 if (cur_pn != last_pn + 1)
3160                         return false;
3161                 last_pn = cur_pn;
3162         }
3163         return true;
3164 }
3165
3166 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3167                                     struct sk_buff *msdu,
3168                                     struct hal_reo_dest_ring *ring_desc)
3169 {
3170         struct ath12k_base *ab = ar->ab;
3171         struct hal_rx_desc *rx_desc;
3172         struct ath12k_peer *peer;
3173         struct ath12k_dp_rx_tid *rx_tid;
3174         struct sk_buff *defrag_skb = NULL;
3175         u32 peer_id;
3176         u16 seqno, frag_no;
3177         u8 tid;
3178         int ret = 0;
3179         bool more_frags;
3180
3181         rx_desc = (struct hal_rx_desc *)msdu->data;
3182         peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3183         tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3184         seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3185         frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3186         more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3187
3188         if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3189             !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3190             tid > IEEE80211_NUM_TIDS)
3191                 return -EINVAL;
3192
3193         /* received unfragmented packet in reo
3194          * exception ring, this shouldn't happen
3195          * as these packets typically come from
3196          * reo2sw srngs.
3197          */
3198         if (WARN_ON_ONCE(!frag_no && !more_frags))
3199                 return -EINVAL;
3200
3201         spin_lock_bh(&ab->base_lock);
3202         peer = ath12k_peer_find_by_id(ab, peer_id);
3203         if (!peer) {
3204                 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3205                             peer_id);
3206                 ret = -ENOENT;
3207                 goto out_unlock;
3208         }
3209         rx_tid = &peer->rx_tid[tid];
3210
3211         if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3212             skb_queue_empty(&rx_tid->rx_frags)) {
3213                 /* Flush stored fragments and start a new sequence */
3214                 ath12k_dp_rx_frags_cleanup(rx_tid, true);
3215                 rx_tid->cur_sn = seqno;
3216         }
3217
3218         if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3219                 /* Fragment already present */
3220                 ret = -EINVAL;
3221                 goto out_unlock;
3222         }
3223
3224         if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3225                 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3226         else
3227                 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3228
3229         rx_tid->rx_frag_bitmap |= BIT(frag_no);
3230         if (!more_frags)
3231                 rx_tid->last_frag_no = frag_no;
3232
3233         if (frag_no == 0) {
3234                 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3235                                                 sizeof(*rx_tid->dst_ring_desc),
3236                                                 GFP_ATOMIC);
3237                 if (!rx_tid->dst_ring_desc) {
3238                         ret = -ENOMEM;
3239                         goto out_unlock;
3240                 }
3241         } else {
3242                 ath12k_dp_rx_link_desc_return(ab, ring_desc,
3243                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3244         }
3245
3246         if (!rx_tid->last_frag_no ||
3247             rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3248                 mod_timer(&rx_tid->frag_timer, jiffies +
3249                                                ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3250                 goto out_unlock;
3251         }
3252
3253         spin_unlock_bh(&ab->base_lock);
3254         del_timer_sync(&rx_tid->frag_timer);
3255         spin_lock_bh(&ab->base_lock);
3256
3257         peer = ath12k_peer_find_by_id(ab, peer_id);
3258         if (!peer)
3259                 goto err_frags_cleanup;
3260
3261         if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3262                 goto err_frags_cleanup;
3263
3264         if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3265                 goto err_frags_cleanup;
3266
3267         if (!defrag_skb)
3268                 goto err_frags_cleanup;
3269
3270         if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3271                 goto err_frags_cleanup;
3272
3273         ath12k_dp_rx_frags_cleanup(rx_tid, false);
3274         goto out_unlock;
3275
3276 err_frags_cleanup:
3277         dev_kfree_skb_any(defrag_skb);
3278         ath12k_dp_rx_frags_cleanup(rx_tid, true);
3279 out_unlock:
3280         spin_unlock_bh(&ab->base_lock);
3281         return ret;
3282 }
3283
3284 static int
3285 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3286                              bool drop, u32 cookie)
3287 {
3288         struct ath12k_base *ab = ar->ab;
3289         struct sk_buff *msdu;
3290         struct ath12k_skb_rxcb *rxcb;
3291         struct hal_rx_desc *rx_desc;
3292         u16 msdu_len;
3293         u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3294         struct ath12k_rx_desc_info *desc_info;
3295         u64 desc_va;
3296
3297         desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3298                    le32_to_cpu(desc->buf_va_lo));
3299         desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3300
3301         /* retry manual desc retrieval */
3302         if (!desc_info) {
3303                 desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3304                 if (!desc_info) {
3305                         ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3306                         return -EINVAL;
3307                 }
3308         }
3309
3310         if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3311                 ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3312
3313         msdu = desc_info->skb;
3314         desc_info->skb = NULL;
3315         spin_lock_bh(&ab->dp.rx_desc_lock);
3316         list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
3317         spin_unlock_bh(&ab->dp.rx_desc_lock);
3318
3319         rxcb = ATH12K_SKB_RXCB(msdu);
3320         dma_unmap_single(ar->ab->dev, rxcb->paddr,
3321                          msdu->len + skb_tailroom(msdu),
3322                          DMA_FROM_DEVICE);
3323
3324         if (drop) {
3325                 dev_kfree_skb_any(msdu);
3326                 return 0;
3327         }
3328
3329         rcu_read_lock();
3330         if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3331                 dev_kfree_skb_any(msdu);
3332                 goto exit;
3333         }
3334
3335         if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3336                 dev_kfree_skb_any(msdu);
3337                 goto exit;
3338         }
3339
3340         rx_desc = (struct hal_rx_desc *)msdu->data;
3341         msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3342         if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3343                 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3344                 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3345                                 sizeof(*rx_desc));
3346                 dev_kfree_skb_any(msdu);
3347                 goto exit;
3348         }
3349
3350         skb_put(msdu, hal_rx_desc_sz + msdu_len);
3351
3352         if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3353                 dev_kfree_skb_any(msdu);
3354                 ath12k_dp_rx_link_desc_return(ar->ab, desc,
3355                                               HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3356         }
3357 exit:
3358         rcu_read_unlock();
3359         return 0;
3360 }
3361
3362 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3363                              int budget)
3364 {
3365         u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3366         struct dp_link_desc_bank *link_desc_banks;
3367         enum hal_rx_buf_return_buf_manager rbm;
3368         struct hal_rx_msdu_link *link_desc_va;
3369         int tot_n_bufs_reaped, quota, ret, i;
3370         struct hal_reo_dest_ring *reo_desc;
3371         struct dp_rxdma_ring *rx_ring;
3372         struct dp_srng *reo_except;
3373         u32 desc_bank, num_msdus;
3374         struct hal_srng *srng;
3375         struct ath12k_dp *dp;
3376         int mac_id;
3377         struct ath12k *ar;
3378         dma_addr_t paddr;
3379         bool is_frag;
3380         bool drop = false;
3381
3382         tot_n_bufs_reaped = 0;
3383         quota = budget;
3384
3385         dp = &ab->dp;
3386         reo_except = &dp->reo_except_ring;
3387         link_desc_banks = dp->link_desc_banks;
3388
3389         srng = &ab->hal.srng_list[reo_except->ring_id];
3390
3391         spin_lock_bh(&srng->lock);
3392
3393         ath12k_hal_srng_access_begin(ab, srng);
3394
3395         while (budget &&
3396                (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3397                 ab->soc_stats.err_ring_pkts++;
3398                 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3399                                                     &desc_bank);
3400                 if (ret) {
3401                         ath12k_warn(ab, "failed to parse error reo desc %d\n",
3402                                     ret);
3403                         continue;
3404                 }
3405                 link_desc_va = link_desc_banks[desc_bank].vaddr +
3406                                (paddr - link_desc_banks[desc_bank].paddr);
3407                 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3408                                                  &rbm);
3409                 if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
3410                     rbm != HAL_RX_BUF_RBM_SW3_BM &&
3411                     rbm != ab->hw_params->hal_params->rx_buf_rbm) {
3412                         ab->soc_stats.invalid_rbm++;
3413                         ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3414                         ath12k_dp_rx_link_desc_return(ab, reo_desc,
3415                                                       HAL_WBM_REL_BM_ACT_REL_MSDU);
3416                         continue;
3417                 }
3418
3419                 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3420                              RX_MPDU_DESC_INFO0_FRAG_FLAG);
3421
3422                 /* Process only rx fragments with one msdu per link desc below, and drop
3423                  * msdu's indicated due to error reasons.
3424                  */
3425                 if (!is_frag || num_msdus > 1) {
3426                         drop = true;
3427                         /* Return the link desc back to wbm idle list */
3428                         ath12k_dp_rx_link_desc_return(ab, reo_desc,
3429                                                       HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3430                 }
3431
3432                 for (i = 0; i < num_msdus; i++) {
3433                         mac_id = le32_get_bits(reo_desc->info0,
3434                                                HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3435
3436                         ar = ab->pdevs[mac_id].ar;
3437
3438                         if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
3439                                                           msdu_cookies[i]))
3440                                 tot_n_bufs_reaped++;
3441                 }
3442
3443                 if (tot_n_bufs_reaped >= quota) {
3444                         tot_n_bufs_reaped = quota;
3445                         goto exit;
3446                 }
3447
3448                 budget = quota - tot_n_bufs_reaped;
3449         }
3450
3451 exit:
3452         ath12k_hal_srng_access_end(ab, srng);
3453
3454         spin_unlock_bh(&srng->lock);
3455
3456         rx_ring = &dp->rx_refill_buf_ring;
3457
3458         ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
3459                                     ab->hw_params->hal_params->rx_buf_rbm, true);
3460
3461         return tot_n_bufs_reaped;
3462 }
3463
3464 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3465                                              int msdu_len,
3466                                              struct sk_buff_head *msdu_list)
3467 {
3468         struct sk_buff *skb, *tmp;
3469         struct ath12k_skb_rxcb *rxcb;
3470         int n_buffs;
3471
3472         n_buffs = DIV_ROUND_UP(msdu_len,
3473                                (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
3474
3475         skb_queue_walk_safe(msdu_list, skb, tmp) {
3476                 rxcb = ATH12K_SKB_RXCB(skb);
3477                 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3478                     rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3479                         if (!n_buffs)
3480                                 break;
3481                         __skb_unlink(skb, msdu_list);
3482                         dev_kfree_skb_any(skb);
3483                         n_buffs--;
3484                 }
3485         }
3486 }
3487
3488 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3489                                       struct ieee80211_rx_status *status,
3490                                       struct sk_buff_head *msdu_list)
3491 {
3492         struct ath12k_base *ab = ar->ab;
3493         u16 msdu_len, peer_id;
3494         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3495         u8 l3pad_bytes;
3496         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3497         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3498
3499         msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3500         peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
3501
3502         spin_lock(&ab->base_lock);
3503         if (!ath12k_peer_find_by_id(ab, peer_id)) {
3504                 spin_unlock(&ab->base_lock);
3505                 ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
3506                            peer_id);
3507                 return -EINVAL;
3508         }
3509         spin_unlock(&ab->base_lock);
3510
3511         if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3512                 /* First buffer will be freed by the caller, so deduct it's length */
3513                 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3514                 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3515                 return -EINVAL;
3516         }
3517
3518         /* Even after cleaning up the sg buffers in the msdu list with above check
3519          * any msdu received with continuation flag needs to be dropped as invalid.
3520          * This protects against some random err frame with continuation flag.
3521          */
3522         if (rxcb->is_continuation)
3523                 return -EINVAL;
3524
3525         if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3526                 ath12k_warn(ar->ab,
3527                             "msdu_done bit not set in null_q_des processing\n");
3528                 __skb_queue_purge(msdu_list);
3529                 return -EIO;
3530         }
3531
3532         /* Handle NULL queue descriptor violations arising out a missing
3533          * REO queue for a given peer or a given TID. This typically
3534          * may happen if a packet is received on a QOS enabled TID before the
3535          * ADDBA negotiation for that TID, when the TID queue is setup. Or
3536          * it may also happen for MC/BC frames if they are not routed to the
3537          * non-QOS TID queue, in the absence of any other default TID queue.
3538          * This error can show up both in a REO destination or WBM release ring.
3539          */
3540
3541         if (rxcb->is_frag) {
3542                 skb_pull(msdu, hal_rx_desc_sz);
3543         } else {
3544                 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3545
3546                 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3547                         return -EINVAL;
3548
3549                 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3550                 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3551         }
3552         ath12k_dp_rx_h_ppdu(ar, desc, status);
3553
3554         ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
3555
3556         rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
3557
3558         /* Please note that caller will having the access to msdu and completing
3559          * rx with mac80211. Need not worry about cleaning up amsdu_list.
3560          */
3561
3562         return 0;
3563 }
3564
3565 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3566                                    struct ieee80211_rx_status *status,
3567                                    struct sk_buff_head *msdu_list)
3568 {
3569         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3570         bool drop = false;
3571
3572         ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3573
3574         switch (rxcb->err_code) {
3575         case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3576                 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3577                         drop = true;
3578                 break;
3579         case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3580                 /* TODO: Do not drop PN failed packets in the driver;
3581                  * instead, it is good to drop such packets in mac80211
3582                  * after incrementing the replay counters.
3583                  */
3584                 fallthrough;
3585         default:
3586                 /* TODO: Review other errors and process them to mac80211
3587                  * as appropriate.
3588                  */
3589                 drop = true;
3590                 break;
3591         }
3592
3593         return drop;
3594 }
3595
3596 static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3597                                         struct ieee80211_rx_status *status)
3598 {
3599         struct ath12k_base *ab = ar->ab;
3600         u16 msdu_len;
3601         struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3602         u8 l3pad_bytes;
3603         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3604         u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
3605
3606         rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3607         rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3608
3609         l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3610         msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3611         skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3612         skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3613
3614         ath12k_dp_rx_h_ppdu(ar, desc, status);
3615
3616         status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3617                          RX_FLAG_DECRYPTED);
3618
3619         ath12k_dp_rx_h_undecap(ar, msdu, desc,
3620                                HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3621 }
3622
3623 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3624                                      struct ieee80211_rx_status *status)
3625 {
3626         struct ath12k_base *ab = ar->ab;
3627         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3628         struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3629         bool drop = false;
3630         u32 err_bitmap;
3631
3632         ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3633
3634         switch (rxcb->err_code) {
3635         case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3636         case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3637                 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3638                 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3639                         ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3640                         break;
3641                 }
3642                 fallthrough;
3643         default:
3644                 /* TODO: Review other rxdma error code to check if anything is
3645                  * worth reporting to mac80211
3646                  */
3647                 drop = true;
3648                 break;
3649         }
3650
3651         return drop;
3652 }
3653
3654 static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3655                                  struct napi_struct *napi,
3656                                  struct sk_buff *msdu,
3657                                  struct sk_buff_head *msdu_list)
3658 {
3659         struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3660         struct ieee80211_rx_status rxs = {0};
3661         bool drop = true;
3662
3663         switch (rxcb->err_rel_src) {
3664         case HAL_WBM_REL_SRC_MODULE_REO:
3665                 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3666                 break;
3667         case HAL_WBM_REL_SRC_MODULE_RXDMA:
3668                 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3669                 break;
3670         default:
3671                 /* msdu will get freed */
3672                 break;
3673         }
3674
3675         if (drop) {
3676                 dev_kfree_skb_any(msdu);
3677                 return;
3678         }
3679
3680         ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
3681 }
3682
3683 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3684                                  struct napi_struct *napi, int budget)
3685 {
3686         struct ath12k *ar;
3687         struct ath12k_dp *dp = &ab->dp;
3688         struct dp_rxdma_ring *rx_ring;
3689         struct hal_rx_wbm_rel_info err_info;
3690         struct hal_srng *srng;
3691         struct sk_buff *msdu;
3692         struct sk_buff_head msdu_list[MAX_RADIOS];
3693         struct ath12k_skb_rxcb *rxcb;
3694         void *rx_desc;
3695         int mac_id;
3696         int num_buffs_reaped = 0;
3697         struct ath12k_rx_desc_info *desc_info;
3698         int ret, i;
3699
3700         for (i = 0; i < ab->num_radios; i++)
3701                 __skb_queue_head_init(&msdu_list[i]);
3702
3703         srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3704         rx_ring = &dp->rx_refill_buf_ring;
3705
3706         spin_lock_bh(&srng->lock);
3707
3708         ath12k_hal_srng_access_begin(ab, srng);
3709
3710         while (budget) {
3711                 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
3712                 if (!rx_desc)
3713                         break;
3714
3715                 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
3716                 if (ret) {
3717                         ath12k_warn(ab,
3718                                     "failed to parse rx error in wbm_rel ring desc %d\n",
3719                                     ret);
3720                         continue;
3721                 }
3722
3723                 desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
3724
3725                 /* retry manual desc retrieval if hw cc is not done */
3726                 if (!desc_info) {
3727                         desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
3728                         if (!desc_info) {
3729                                 ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
3730                                 continue;
3731                         }
3732                 }
3733
3734                 /* FIXME: Extract mac id correctly. Since descs are not tied
3735                  * to mac, we can extract from vdev id in ring desc.
3736                  */
3737                 mac_id = 0;
3738
3739                 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3740                         ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
3741
3742                 msdu = desc_info->skb;
3743                 desc_info->skb = NULL;
3744
3745                 spin_lock_bh(&dp->rx_desc_lock);
3746                 list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
3747                 spin_unlock_bh(&dp->rx_desc_lock);
3748
3749                 rxcb = ATH12K_SKB_RXCB(msdu);
3750                 dma_unmap_single(ab->dev, rxcb->paddr,
3751                                  msdu->len + skb_tailroom(msdu),
3752                                  DMA_FROM_DEVICE);
3753
3754                 num_buffs_reaped++;
3755
3756                 if (!err_info.continuation)
3757                         budget--;
3758
3759                 if (err_info.push_reason !=
3760                     HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
3761                         dev_kfree_skb_any(msdu);
3762                         continue;
3763                 }
3764
3765                 rxcb->err_rel_src = err_info.err_rel_src;
3766                 rxcb->err_code = err_info.err_code;
3767                 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
3768                 __skb_queue_tail(&msdu_list[mac_id], msdu);
3769
3770                 rxcb->is_first_msdu = err_info.first_msdu;
3771                 rxcb->is_last_msdu = err_info.last_msdu;
3772                 rxcb->is_continuation = err_info.continuation;
3773         }
3774
3775         ath12k_hal_srng_access_end(ab, srng);
3776
3777         spin_unlock_bh(&srng->lock);
3778
3779         if (!num_buffs_reaped)
3780                 goto done;
3781
3782         ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
3783                                     ab->hw_params->hal_params->rx_buf_rbm, true);
3784
3785         rcu_read_lock();
3786         for (i = 0; i <  ab->num_radios; i++) {
3787                 if (!rcu_dereference(ab->pdevs_active[i])) {
3788                         __skb_queue_purge(&msdu_list[i]);
3789                         continue;
3790                 }
3791
3792                 ar = ab->pdevs[i].ar;
3793
3794                 if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
3795                         __skb_queue_purge(&msdu_list[i]);
3796                         continue;
3797                 }
3798
3799                 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
3800                         ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
3801         }
3802         rcu_read_unlock();
3803 done:
3804         return num_buffs_reaped;
3805 }
3806
3807 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
3808 {
3809         struct ath12k_dp *dp = &ab->dp;
3810         struct hal_tlv_64_hdr *hdr;
3811         struct hal_srng *srng;
3812         struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
3813         bool found = false;
3814         u16 tag;
3815         struct hal_reo_status reo_status;
3816
3817         srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
3818
3819         memset(&reo_status, 0, sizeof(reo_status));
3820
3821         spin_lock_bh(&srng->lock);
3822
3823         ath12k_hal_srng_access_begin(ab, srng);
3824
3825         while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3826                 tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
3827
3828                 switch (tag) {
3829                 case HAL_REO_GET_QUEUE_STATS_STATUS:
3830                         ath12k_hal_reo_status_queue_stats(ab, hdr,
3831                                                           &reo_status);
3832                         break;
3833                 case HAL_REO_FLUSH_QUEUE_STATUS:
3834                         ath12k_hal_reo_flush_queue_status(ab, hdr,
3835                                                           &reo_status);
3836                         break;
3837                 case HAL_REO_FLUSH_CACHE_STATUS:
3838                         ath12k_hal_reo_flush_cache_status(ab, hdr,
3839                                                           &reo_status);
3840                         break;
3841                 case HAL_REO_UNBLOCK_CACHE_STATUS:
3842                         ath12k_hal_reo_unblk_cache_status(ab, hdr,
3843                                                           &reo_status);
3844                         break;
3845                 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
3846                         ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
3847                                                                  &reo_status);
3848                         break;
3849                 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
3850                         ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
3851                                                                   &reo_status);
3852                         break;
3853                 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
3854                         ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
3855                                                                   &reo_status);
3856                         break;
3857                 default:
3858                         ath12k_warn(ab, "Unknown reo status type %d\n", tag);
3859                         continue;
3860                 }
3861
3862                 spin_lock_bh(&dp->reo_cmd_lock);
3863                 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
3864                         if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
3865                                 found = true;
3866                                 list_del(&cmd->list);
3867                                 break;
3868                         }
3869                 }
3870                 spin_unlock_bh(&dp->reo_cmd_lock);
3871
3872                 if (found) {
3873                         cmd->handler(dp, (void *)&cmd->data,
3874                                      reo_status.uniform_hdr.cmd_status);
3875                         kfree(cmd);
3876                 }
3877
3878                 found = false;
3879         }
3880
3881         ath12k_hal_srng_access_end(ab, srng);
3882
3883         spin_unlock_bh(&srng->lock);
3884 }
3885
3886 void ath12k_dp_rx_free(struct ath12k_base *ab)
3887 {
3888         struct ath12k_dp *dp = &ab->dp;
3889         int i;
3890
3891         ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
3892
3893         for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3894                 if (ab->hw_params->rx_mac_buf_ring)
3895                         ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
3896         }
3897
3898         for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
3899                 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
3900
3901         ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
3902         ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
3903
3904         ath12k_dp_rxdma_buf_free(ab);
3905 }
3906
3907 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
3908 {
3909         struct ath12k *ar = ab->pdevs[mac_id].ar;
3910
3911         ath12k_dp_rx_pdev_srng_free(ar);
3912 }
3913
3914 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
3915 {
3916         struct ath12k_dp *dp = &ab->dp;
3917         struct htt_rx_ring_tlv_filter tlv_filter = {0};
3918         u32 ring_id;
3919         int ret;
3920         u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3921
3922         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3923
3924         tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3925         tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3926         tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3927                                         HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3928                                         HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3929         tlv_filter.offset_valid = true;
3930         tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3931
3932         tlv_filter.rx_mpdu_start_offset =
3933                         ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3934         tlv_filter.rx_msdu_end_offset =
3935                 ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3936
3937         /* TODO: Selectively subscribe to required qwords within msdu_end
3938          * and mpdu_start and setup the mask in below msg
3939          * and modify the rx_desc struct
3940          */
3941         ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
3942                                                HAL_RXDMA_BUF,
3943                                                DP_RXDMA_REFILL_RING_SIZE,
3944                                                &tlv_filter);
3945
3946         return ret;
3947 }
3948
3949 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
3950 {
3951         struct ath12k_dp *dp = &ab->dp;
3952         struct htt_rx_ring_tlv_filter tlv_filter = {0};
3953         u32 ring_id;
3954         int ret;
3955         u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
3956         int i;
3957
3958         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3959
3960         tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
3961         tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
3962         tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
3963                                         HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
3964                                         HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
3965         tlv_filter.offset_valid = true;
3966         tlv_filter.rx_packet_offset = hal_rx_desc_sz;
3967
3968         tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
3969
3970         tlv_filter.rx_mpdu_start_offset =
3971                         ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
3972         tlv_filter.rx_msdu_end_offset =
3973                 ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
3974
3975         /* TODO: Selectively subscribe to required qwords within msdu_end
3976          * and mpdu_start and setup the mask in below msg
3977          * and modify the rx_desc struct
3978          */
3979
3980         for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
3981                 ring_id = dp->rx_mac_buf_ring[i].ring_id;
3982                 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
3983                                                        HAL_RXDMA_BUF,
3984                                                        DP_RXDMA_REFILL_RING_SIZE,
3985                                                        &tlv_filter);
3986         }
3987
3988         return ret;
3989 }
3990
3991 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
3992 {
3993         struct ath12k_dp *dp = &ab->dp;
3994         u32 ring_id;
3995         int i, ret;
3996
3997         /* TODO: Need to verify the HTT setup for QCN9224 */
3998         ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
3999         ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4000         if (ret) {
4001                 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4002                             ret);
4003                 return ret;
4004         }
4005
4006         if (ab->hw_params->rx_mac_buf_ring) {
4007                 for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4008                         ring_id = dp->rx_mac_buf_ring[i].ring_id;
4009                         ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4010                                                           i, HAL_RXDMA_BUF);
4011                         if (ret) {
4012                                 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4013                                             i, ret);
4014                                 return ret;
4015                         }
4016                 }
4017         }
4018
4019         for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4020                 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4021                 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4022                                                   i, HAL_RXDMA_DST);
4023                 if (ret) {
4024                         ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4025                                     i, ret);
4026                         return ret;
4027                 }
4028         }
4029
4030         if (ab->hw_params->rxdma1_enable) {
4031                 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4032                 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4033                                                   0, HAL_RXDMA_MONITOR_BUF);
4034                 if (ret) {
4035                         ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4036                                     ret);
4037                         return ret;
4038                 }
4039
4040                 ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
4041                 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4042                                                   0, HAL_TX_MONITOR_BUF);
4043                 if (ret) {
4044                         ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4045                                     ret);
4046                         return ret;
4047                 }
4048         }
4049
4050         ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4051         if (ret) {
4052                 ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4053                 return ret;
4054         }
4055
4056         return 0;
4057 }
4058
4059 int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4060 {
4061         struct ath12k_dp *dp = &ab->dp;
4062         int i, ret;
4063
4064         idr_init(&dp->rx_refill_buf_ring.bufs_idr);
4065         spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
4066
4067         idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4068         spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4069
4070         idr_init(&dp->tx_mon_buf_ring.bufs_idr);
4071         spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
4072
4073         ret = ath12k_dp_srng_setup(ab,
4074                                    &dp->rx_refill_buf_ring.refill_buf_ring,
4075                                    HAL_RXDMA_BUF, 0, 0,
4076                                    DP_RXDMA_BUF_RING_SIZE);
4077         if (ret) {
4078                 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4079                 return ret;
4080         }
4081
4082         if (ab->hw_params->rx_mac_buf_ring) {
4083                 for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4084                         ret = ath12k_dp_srng_setup(ab,
4085                                                    &dp->rx_mac_buf_ring[i],
4086                                                    HAL_RXDMA_BUF, 1,
4087                                                    i, 1024);
4088                         if (ret) {
4089                                 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4090                                             i);
4091                                 return ret;
4092                         }
4093                 }
4094         }
4095
4096         for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4097                 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4098                                            HAL_RXDMA_DST, 0, i,
4099                                            DP_RXDMA_ERR_DST_RING_SIZE);
4100                 if (ret) {
4101                         ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4102                         return ret;
4103                 }
4104         }
4105
4106         if (ab->hw_params->rxdma1_enable) {
4107                 ret = ath12k_dp_srng_setup(ab,
4108                                            &dp->rxdma_mon_buf_ring.refill_buf_ring,
4109                                            HAL_RXDMA_MONITOR_BUF, 0, 0,
4110                                            DP_RXDMA_MONITOR_BUF_RING_SIZE);
4111                 if (ret) {
4112                         ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4113                         return ret;
4114                 }
4115
4116                 ret = ath12k_dp_srng_setup(ab,
4117                                            &dp->tx_mon_buf_ring.refill_buf_ring,
4118                                            HAL_TX_MONITOR_BUF, 0, 0,
4119                                            DP_TX_MONITOR_BUF_RING_SIZE);
4120                 if (ret) {
4121                         ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
4122                         return ret;
4123                 }
4124         }
4125
4126         ret = ath12k_dp_rxdma_buf_setup(ab);
4127         if (ret) {
4128                 ath12k_warn(ab, "failed to setup rxdma ring\n");
4129                 return ret;
4130         }
4131
4132         return 0;
4133 }
4134
4135 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4136 {
4137         struct ath12k *ar = ab->pdevs[mac_id].ar;
4138         struct ath12k_pdev_dp *dp = &ar->dp;
4139         u32 ring_id;
4140         int i;
4141         int ret;
4142
4143         if (!ab->hw_params->rxdma1_enable)
4144                 goto out;
4145
4146         ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4147         if (ret) {
4148                 ath12k_warn(ab, "failed to setup rx srngs\n");
4149                 return ret;
4150         }
4151
4152         for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
4153                 ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4154                 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4155                                                   mac_id + i,
4156                                                   HAL_RXDMA_MONITOR_DST);
4157                 if (ret) {
4158                         ath12k_warn(ab,
4159                                     "failed to configure rxdma_mon_dst_ring %d %d\n",
4160                                     i, ret);
4161                         return ret;
4162                 }
4163
4164                 ring_id = dp->tx_mon_dst_ring[i].ring_id;
4165                 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4166                                                   mac_id + i,
4167                                                   HAL_TX_MONITOR_DST);
4168                 if (ret) {
4169                         ath12k_warn(ab,
4170                                     "failed to configure tx_mon_dst_ring %d %d\n",
4171                                     i, ret);
4172                         return ret;
4173                 }
4174         }
4175 out:
4176         return 0;
4177 }
4178
4179 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4180 {
4181         struct ath12k_pdev_dp *dp = &ar->dp;
4182         struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4183
4184         skb_queue_head_init(&pmon->rx_status_q);
4185
4186         pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4187
4188         memset(&pmon->rx_mon_stats, 0,
4189                sizeof(pmon->rx_mon_stats));
4190         return 0;
4191 }
4192
4193 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4194 {
4195         struct ath12k_pdev_dp *dp = &ar->dp;
4196         struct ath12k_mon_data *pmon = &dp->mon_data;
4197         int ret = 0;
4198
4199         ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4200         if (ret) {
4201                 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4202                 return ret;
4203         }
4204
4205         /* if rxdma1_enable is false, no need to setup
4206          * rxdma_mon_desc_ring.
4207          */
4208         if (!ar->ab->hw_params->rxdma1_enable)
4209                 return 0;
4210
4211         pmon->mon_last_linkdesc_paddr = 0;
4212         pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4213         spin_lock_init(&pmon->mon_lock);
4214
4215         return 0;
4216 }
4217
4218 int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
4219 {
4220         /* start reap timer */
4221         mod_timer(&ab->mon_reap_timer,
4222                   jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
4223
4224         return 0;
4225 }
4226
4227 int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
4228 {
4229         int ret;
4230
4231         if (stop_timer)
4232                 del_timer_sync(&ab->mon_reap_timer);
4233
4234         /* reap all the monitor related rings */
4235         ret = ath12k_dp_purge_mon_ring(ab);
4236         if (ret) {
4237                 ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
4238                 return ret;
4239         }
4240
4241         return 0;
4242 }
This page took 0.291773 seconds and 4 git commands to generate.