1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
16 #include <linux/log2.h>
17 #include <linux/bitfield.h>
19 /* when under memory pressure rx ring refill may fail and needs a retry */
20 #define HTT_RX_RING_REFILL_RETRY_MS 50
22 #define HTT_RX_RING_REFILL_RESCHED_MS 5
24 /* shortcut to interpret a raw memory buffer as a rx descriptor */
25 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
27 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
29 static struct sk_buff *
30 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
32 struct ath10k_skb_rxcb *rxcb;
34 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
35 if (rxcb->paddr == paddr)
36 return ATH10K_RXCB_SKB(rxcb);
42 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
45 struct ath10k_skb_rxcb *rxcb;
49 if (htt->rx_ring.in_ord_rx) {
50 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
51 skb = ATH10K_RXCB_SKB(rxcb);
52 dma_unmap_single(htt->ar->dev, rxcb->paddr,
53 skb->len + skb_tailroom(skb),
55 hash_del(&rxcb->hlist);
56 dev_kfree_skb_any(skb);
59 for (i = 0; i < htt->rx_ring.size; i++) {
60 skb = htt->rx_ring.netbufs_ring[i];
64 rxcb = ATH10K_SKB_RXCB(skb);
65 dma_unmap_single(htt->ar->dev, rxcb->paddr,
66 skb->len + skb_tailroom(skb),
68 dev_kfree_skb_any(skb);
72 htt->rx_ring.fill_cnt = 0;
73 hash_init(htt->rx_ring.skb_table);
74 memset(htt->rx_ring.netbufs_ring, 0,
75 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
78 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
80 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
83 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
85 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
88 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
91 htt->rx_ring.paddrs_ring_32 = vaddr;
94 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
97 htt->rx_ring.paddrs_ring_64 = vaddr;
100 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
101 dma_addr_t paddr, int idx)
103 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
106 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
107 dma_addr_t paddr, int idx)
109 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
112 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
114 htt->rx_ring.paddrs_ring_32[idx] = 0;
117 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
119 htt->rx_ring.paddrs_ring_64[idx] = 0;
122 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
124 return (void *)htt->rx_ring.paddrs_ring_32;
127 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
129 return (void *)htt->rx_ring.paddrs_ring_64;
132 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
134 struct ath10k_hw_params *hw = &htt->ar->hw_params;
135 struct htt_rx_desc *rx_desc;
136 struct ath10k_skb_rxcb *rxcb;
141 /* The Full Rx Reorder firmware has no way of telling the host
142 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
143 * To keep things simple make sure ring is always half empty. This
144 * guarantees there'll be no replenishment overruns possible.
146 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
148 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
150 if (idx < 0 || idx >= htt->rx_ring.size) {
151 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
152 idx &= htt->rx_ring.size_mask;
158 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
164 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
166 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
169 /* Clear rx_desc attention word before posting to Rx ring */
170 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
171 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
173 paddr = dma_map_single(htt->ar->dev, skb->data,
174 skb->len + skb_tailroom(skb),
177 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
178 dev_kfree_skb_any(skb);
183 rxcb = ATH10K_SKB_RXCB(skb);
185 htt->rx_ring.netbufs_ring[idx] = skb;
186 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
187 htt->rx_ring.fill_cnt++;
189 if (htt->rx_ring.in_ord_rx) {
190 hash_add(htt->rx_ring.skb_table,
191 &ATH10K_SKB_RXCB(skb)->hlist,
197 idx &= htt->rx_ring.size_mask;
202 * Make sure the rx buffer is updated before available buffer
203 * index to avoid any potential rx ring corruption.
206 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
210 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
212 lockdep_assert_held(&htt->rx_ring.lock);
213 return __ath10k_htt_rx_ring_fill_n(htt, num);
216 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
218 int ret, num_deficit, num_to_fill;
220 /* Refilling the whole RX ring buffer proves to be a bad idea. The
221 * reason is RX may take up significant amount of CPU cycles and starve
222 * other tasks, e.g. TX on an ethernet device while acting as a bridge
223 * with ath10k wlan interface. This ended up with very poor performance
224 * once CPU the host system was overwhelmed with RX on ath10k.
226 * By limiting the number of refills the replenishing occurs
227 * progressively. This in turns makes use of the fact tasklets are
228 * processed in FIFO order. This means actual RX processing can starve
229 * out refilling. If there's not enough buffers on RX ring FW will not
230 * report RX until it is refilled with enough buffers. This
231 * automatically balances load wrt to CPU power.
233 * This probably comes at a cost of lower maximum throughput but
234 * improves the average and stability.
236 spin_lock_bh(&htt->rx_ring.lock);
237 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
238 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
239 num_deficit -= num_to_fill;
240 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
241 if (ret == -ENOMEM) {
243 * Failed to fill it to the desired level -
244 * we'll start a timer and try again next time.
245 * As long as enough buffers are left in the ring for
246 * another A-MPDU rx, no special recovery is needed.
248 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
249 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
250 } else if (num_deficit > 0) {
251 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
252 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
254 spin_unlock_bh(&htt->rx_ring.lock);
257 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
259 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
261 ath10k_htt_rx_msdu_buff_replenish(htt);
264 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
266 struct ath10k_htt *htt = &ar->htt;
269 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
272 spin_lock_bh(&htt->rx_ring.lock);
273 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
274 htt->rx_ring.fill_cnt));
277 ath10k_htt_rx_ring_free(htt);
279 spin_unlock_bh(&htt->rx_ring.lock);
284 void ath10k_htt_rx_free(struct ath10k_htt *htt)
286 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
289 del_timer_sync(&htt->rx_ring.refill_retry_timer);
291 skb_queue_purge(&htt->rx_msdus_q);
292 skb_queue_purge(&htt->rx_in_ord_compl_q);
293 skb_queue_purge(&htt->tx_fetch_ind_q);
295 spin_lock_bh(&htt->rx_ring.lock);
296 ath10k_htt_rx_ring_free(htt);
297 spin_unlock_bh(&htt->rx_ring.lock);
299 dma_free_coherent(htt->ar->dev,
300 ath10k_htt_get_rx_ring_size(htt),
301 ath10k_htt_get_vaddr_ring(htt),
302 htt->rx_ring.base_paddr);
304 ath10k_htt_config_paddrs_ring(htt, NULL);
306 dma_free_coherent(htt->ar->dev,
307 sizeof(*htt->rx_ring.alloc_idx.vaddr),
308 htt->rx_ring.alloc_idx.vaddr,
309 htt->rx_ring.alloc_idx.paddr);
310 htt->rx_ring.alloc_idx.vaddr = NULL;
312 kfree(htt->rx_ring.netbufs_ring);
313 htt->rx_ring.netbufs_ring = NULL;
316 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
318 struct ath10k *ar = htt->ar;
320 struct sk_buff *msdu;
322 lockdep_assert_held(&htt->rx_ring.lock);
324 if (htt->rx_ring.fill_cnt == 0) {
325 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
329 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
330 msdu = htt->rx_ring.netbufs_ring[idx];
331 htt->rx_ring.netbufs_ring[idx] = NULL;
332 ath10k_htt_reset_paddrs_ring(htt, idx);
335 idx &= htt->rx_ring.size_mask;
336 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
337 htt->rx_ring.fill_cnt--;
339 dma_unmap_single(htt->ar->dev,
340 ATH10K_SKB_RXCB(msdu)->paddr,
341 msdu->len + skb_tailroom(msdu),
343 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
344 msdu->data, msdu->len + skb_tailroom(msdu));
349 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
350 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
351 struct sk_buff_head *amsdu)
353 struct ath10k *ar = htt->ar;
354 struct ath10k_hw_params *hw = &ar->hw_params;
355 int msdu_len, msdu_chaining = 0;
356 struct sk_buff *msdu;
357 struct htt_rx_desc *rx_desc;
358 struct rx_attention *rx_desc_attention;
359 struct rx_frag_info_common *rx_desc_frag_info_common;
360 struct rx_msdu_start_common *rx_desc_msdu_start_common;
361 struct rx_msdu_end_common *rx_desc_msdu_end_common;
363 lockdep_assert_held(&htt->rx_ring.lock);
366 int last_msdu, msdu_len_invalid, msdu_chained;
368 msdu = ath10k_htt_rx_netbuf_pop(htt);
370 __skb_queue_purge(amsdu);
374 __skb_queue_tail(amsdu, msdu);
376 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
377 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
378 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
380 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
381 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
383 /* FIXME: we must report msdu payload since this is what caller
386 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
387 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
390 * Sanity check - confirm the HW is finished filling in the
392 * If the HW and SW are working correctly, then it's guaranteed
393 * that the HW's MAC DMA is done before this point in the SW.
394 * To prevent the case that we handle a stale Rx descriptor,
395 * just assert for now until we have a way to recover.
397 if (!(__le32_to_cpu(rx_desc_attention->flags)
398 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
399 __skb_queue_purge(amsdu);
403 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
404 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
405 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
406 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
407 RX_MSDU_START_INFO0_MSDU_LENGTH);
408 msdu_chained = rx_desc_frag_info_common->ring2_more_count;
410 if (msdu_len_invalid)
414 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
415 msdu_len -= msdu->len;
417 /* Note: Chained buffers do not contain rx descriptor */
418 while (msdu_chained--) {
419 msdu = ath10k_htt_rx_netbuf_pop(htt);
421 __skb_queue_purge(amsdu);
425 __skb_queue_tail(amsdu, msdu);
427 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
428 msdu_len -= msdu->len;
432 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
433 RX_MSDU_END_INFO0_LAST_MSDU;
435 /* FIXME: why are we skipping the first part of the rx_desc? */
436 trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
437 hw->rx_desc_ops->rx_desc_size - sizeof(u32));
443 if (skb_queue_empty(amsdu))
447 * Don't refill the ring yet.
449 * First, the elements popped here are still in use - it is not
450 * safe to overwrite them until the matching call to
451 * mpdu_desc_list_next. Second, for efficiency it is preferable to
452 * refill the rx ring with 1 PPDU's worth of rx buffers (something
453 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
454 * (something like 3 buffers). Consequently, we'll rely on the txrx
455 * SW to tell us when it is done pulling all the PPDU's rx buffers
456 * out of the rx ring, and then refill it just once.
459 return msdu_chaining;
462 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
465 struct ath10k *ar = htt->ar;
466 struct ath10k_skb_rxcb *rxcb;
467 struct sk_buff *msdu;
469 lockdep_assert_held(&htt->rx_ring.lock);
471 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
475 rxcb = ATH10K_SKB_RXCB(msdu);
476 hash_del(&rxcb->hlist);
477 htt->rx_ring.fill_cnt--;
479 dma_unmap_single(htt->ar->dev, rxcb->paddr,
480 msdu->len + skb_tailroom(msdu),
482 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
483 msdu->data, msdu->len + skb_tailroom(msdu));
488 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
489 struct sk_buff *frag_list,
490 unsigned int frag_len)
492 skb_shinfo(skb_head)->frag_list = frag_list;
493 skb_head->data_len = frag_len;
494 skb_head->len += skb_head->data_len;
497 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
498 struct sk_buff *msdu,
499 struct htt_rx_in_ord_msdu_desc **msdu_desc)
501 struct ath10k *ar = htt->ar;
502 struct ath10k_hw_params *hw = &ar->hw_params;
504 struct sk_buff *frag_buf;
505 struct sk_buff *prev_frag_buf;
507 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
508 struct htt_rx_desc *rxd;
509 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
511 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
512 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
514 skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
515 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
516 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
517 amsdu_len -= msdu->len;
519 last_frag = ind_desc->reserved;
522 ath10k_warn(ar, "invalid amsdu len %u, left %d",
523 __le16_to_cpu(ind_desc->msdu_len),
530 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
531 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
533 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
537 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
538 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
540 amsdu_len -= frag_buf->len;
541 prev_frag_buf = frag_buf;
542 last_frag = ind_desc->reserved;
545 paddr = __le32_to_cpu(ind_desc->msdu_paddr);
546 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
548 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
550 prev_frag_buf->next = NULL;
554 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
555 last_frag = ind_desc->reserved;
556 amsdu_len -= frag_buf->len;
558 prev_frag_buf->next = frag_buf;
559 prev_frag_buf = frag_buf;
563 ath10k_warn(ar, "invalid amsdu len %u, left %d",
564 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
567 *msdu_desc = ind_desc;
569 prev_frag_buf->next = NULL;
574 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
575 struct sk_buff *msdu,
576 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
578 struct ath10k *ar = htt->ar;
579 struct ath10k_hw_params *hw = &ar->hw_params;
581 struct sk_buff *frag_buf;
582 struct sk_buff *prev_frag_buf;
584 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
585 struct htt_rx_desc *rxd;
586 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
588 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
589 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
591 skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
592 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
593 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
594 amsdu_len -= msdu->len;
596 last_frag = ind_desc->reserved;
599 ath10k_warn(ar, "invalid amsdu len %u, left %d",
600 __le16_to_cpu(ind_desc->msdu_len),
607 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
608 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
610 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
614 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
615 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
617 amsdu_len -= frag_buf->len;
618 prev_frag_buf = frag_buf;
619 last_frag = ind_desc->reserved;
622 paddr = __le64_to_cpu(ind_desc->msdu_paddr);
623 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
625 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
627 prev_frag_buf->next = NULL;
631 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
632 last_frag = ind_desc->reserved;
633 amsdu_len -= frag_buf->len;
635 prev_frag_buf->next = frag_buf;
636 prev_frag_buf = frag_buf;
640 ath10k_warn(ar, "invalid amsdu len %u, left %d",
641 __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
644 *msdu_desc = ind_desc;
646 prev_frag_buf->next = NULL;
650 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
651 struct htt_rx_in_ord_ind *ev,
652 struct sk_buff_head *list)
654 struct ath10k *ar = htt->ar;
655 struct ath10k_hw_params *hw = &ar->hw_params;
656 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
657 struct htt_rx_desc *rxd;
658 struct rx_attention *rxd_attention;
659 struct sk_buff *msdu;
664 lockdep_assert_held(&htt->rx_ring.lock);
666 msdu_count = __le16_to_cpu(ev->msdu_count);
667 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
669 while (msdu_count--) {
670 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
672 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
674 __skb_queue_purge(list);
678 if (!is_offload && ar->monitor_arvif) {
679 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
682 __skb_queue_purge(list);
685 __skb_queue_tail(list, msdu);
690 __skb_queue_tail(list, msdu);
693 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
694 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
696 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
698 skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
699 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
700 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
702 if (!(__le32_to_cpu(rxd_attention->flags) &
703 RX_ATTENTION_FLAGS_MSDU_DONE)) {
704 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
715 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
716 struct htt_rx_in_ord_ind *ev,
717 struct sk_buff_head *list)
719 struct ath10k *ar = htt->ar;
720 struct ath10k_hw_params *hw = &ar->hw_params;
721 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
722 struct htt_rx_desc *rxd;
723 struct rx_attention *rxd_attention;
724 struct sk_buff *msdu;
729 lockdep_assert_held(&htt->rx_ring.lock);
731 msdu_count = __le16_to_cpu(ev->msdu_count);
732 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
734 while (msdu_count--) {
735 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
736 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
738 __skb_queue_purge(list);
742 if (!is_offload && ar->monitor_arvif) {
743 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
746 __skb_queue_purge(list);
749 __skb_queue_tail(list, msdu);
754 __skb_queue_tail(list, msdu);
757 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
758 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
760 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
762 skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
763 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
764 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
766 if (!(__le32_to_cpu(rxd_attention->flags) &
767 RX_ATTENTION_FLAGS_MSDU_DONE)) {
768 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
779 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
781 struct ath10k *ar = htt->ar;
783 void *vaddr, *vaddr_ring;
785 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
787 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
790 htt->rx_confused = false;
792 /* XXX: The fill level could be changed during runtime in response to
793 * the host processing latency. Is this really worth it?
795 htt->rx_ring.size = HTT_RX_RING_SIZE;
796 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
797 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
799 if (!is_power_of_2(htt->rx_ring.size)) {
800 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
804 htt->rx_ring.netbufs_ring =
805 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
807 if (!htt->rx_ring.netbufs_ring)
810 size = ath10k_htt_get_rx_ring_size(htt);
812 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
816 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
817 htt->rx_ring.base_paddr = paddr;
819 vaddr = dma_alloc_coherent(htt->ar->dev,
820 sizeof(*htt->rx_ring.alloc_idx.vaddr),
825 htt->rx_ring.alloc_idx.vaddr = vaddr;
826 htt->rx_ring.alloc_idx.paddr = paddr;
827 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
828 *htt->rx_ring.alloc_idx.vaddr = 0;
830 /* Initialize the Rx refill retry timer */
831 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
833 spin_lock_init(&htt->rx_ring.lock);
835 htt->rx_ring.fill_cnt = 0;
836 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
837 hash_init(htt->rx_ring.skb_table);
839 skb_queue_head_init(&htt->rx_msdus_q);
840 skb_queue_head_init(&htt->rx_in_ord_compl_q);
841 skb_queue_head_init(&htt->tx_fetch_ind_q);
842 atomic_set(&htt->num_mpdus_ready, 0);
844 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
845 htt->rx_ring.size, htt->rx_ring.fill_level);
849 dma_free_coherent(htt->ar->dev,
850 ath10k_htt_get_rx_ring_size(htt),
852 htt->rx_ring.base_paddr);
853 ath10k_htt_config_paddrs_ring(htt, NULL);
855 kfree(htt->rx_ring.netbufs_ring);
856 htt->rx_ring.netbufs_ring = NULL;
861 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
862 enum htt_rx_mpdu_encrypt_type type)
865 case HTT_RX_MPDU_ENCRYPT_NONE:
867 case HTT_RX_MPDU_ENCRYPT_WEP40:
868 case HTT_RX_MPDU_ENCRYPT_WEP104:
869 return IEEE80211_WEP_IV_LEN;
870 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
871 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
872 return IEEE80211_TKIP_IV_LEN;
873 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
874 return IEEE80211_CCMP_HDR_LEN;
875 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
876 return IEEE80211_CCMP_256_HDR_LEN;
877 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
878 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
879 return IEEE80211_GCMP_HDR_LEN;
880 case HTT_RX_MPDU_ENCRYPT_WEP128:
881 case HTT_RX_MPDU_ENCRYPT_WAPI:
885 ath10k_warn(ar, "unsupported encryption type %d\n", type);
889 #define MICHAEL_MIC_LEN 8
891 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
892 enum htt_rx_mpdu_encrypt_type type)
895 case HTT_RX_MPDU_ENCRYPT_NONE:
896 case HTT_RX_MPDU_ENCRYPT_WEP40:
897 case HTT_RX_MPDU_ENCRYPT_WEP104:
898 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
899 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
901 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
902 return IEEE80211_CCMP_MIC_LEN;
903 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
904 return IEEE80211_CCMP_256_MIC_LEN;
905 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
906 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
907 return IEEE80211_GCMP_MIC_LEN;
908 case HTT_RX_MPDU_ENCRYPT_WEP128:
909 case HTT_RX_MPDU_ENCRYPT_WAPI:
913 ath10k_warn(ar, "unsupported encryption type %d\n", type);
917 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
918 enum htt_rx_mpdu_encrypt_type type)
921 case HTT_RX_MPDU_ENCRYPT_NONE:
922 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
923 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
924 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
925 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
927 case HTT_RX_MPDU_ENCRYPT_WEP40:
928 case HTT_RX_MPDU_ENCRYPT_WEP104:
929 return IEEE80211_WEP_ICV_LEN;
930 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
931 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
932 return IEEE80211_TKIP_ICV_LEN;
933 case HTT_RX_MPDU_ENCRYPT_WEP128:
934 case HTT_RX_MPDU_ENCRYPT_WAPI:
938 ath10k_warn(ar, "unsupported encryption type %d\n", type);
942 struct amsdu_subframe_hdr {
948 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
950 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
956 ret = RATE_INFO_BW_20;
959 ret = RATE_INFO_BW_40;
962 ret = RATE_INFO_BW_80;
965 ret = RATE_INFO_BW_160;
972 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
973 struct ieee80211_rx_status *status,
974 struct htt_rx_desc *rxd)
976 struct ath10k_hw_params *hw = &ar->hw_params;
977 struct rx_attention *rxd_attention;
978 struct rx_mpdu_start *rxd_mpdu_start;
979 struct rx_mpdu_end *rxd_mpdu_end;
980 struct rx_msdu_start_common *rxd_msdu_start_common;
981 struct rx_msdu_end_common *rxd_msdu_end_common;
982 struct rx_ppdu_start *rxd_ppdu_start;
983 struct ieee80211_supported_band *sband;
984 u8 cck, rate, bw, sgi, mcs, nss;
985 u8 *rxd_msdu_payload;
988 u32 info1, info2, info3;
991 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
992 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
993 rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
994 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
995 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
996 rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
997 rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
999 info1 = __le32_to_cpu(rxd_ppdu_start->info1);
1000 info2 = __le32_to_cpu(rxd_ppdu_start->info2);
1001 info3 = __le32_to_cpu(rxd_ppdu_start->info3);
1003 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
1007 /* To get legacy rate index band is required. Since band can't
1008 * be undefined check if freq is non-zero.
1013 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
1014 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
1015 rate &= ~RX_PPDU_START_RATE_FLAG;
1017 sband = &ar->mac.sbands[status->band];
1018 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
1021 case HTT_RX_HT_WITH_TXBF:
1022 /* HT-SIG - Table 20-11 in info2 and info3 */
1025 bw = (info2 >> 7) & 1;
1026 sgi = (info3 >> 7) & 1;
1028 status->rate_idx = mcs;
1029 status->encoding = RX_ENC_HT;
1031 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1033 status->bw = RATE_INFO_BW_40;
1036 case HTT_RX_VHT_WITH_TXBF:
1037 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
1042 stbc = (info2 >> 3) & 1;
1043 group_id = (info2 >> 4) & 0x3F;
1045 if (GROUP_ID_IS_SU_MIMO(group_id)) {
1046 mcs = (info3 >> 4) & 0x0F;
1047 nsts_su = ((info2 >> 10) & 0x07);
1049 nss = (nsts_su >> 2) + 1;
1051 nss = (nsts_su + 1);
1053 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1054 * so it's impossible to decode MCS. Also since
1055 * firmware consumes Group Id Management frames host
1056 * has no knowledge regarding group/user position
1057 * mapping so it's impossible to pick the correct Nsts
1060 * Bandwidth and SGI are valid so report the rateinfo
1061 * on best-effort basis.
1068 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1069 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1070 __le32_to_cpu(rxd_attention->flags),
1071 __le32_to_cpu(rxd_mpdu_start->info0),
1072 __le32_to_cpu(rxd_mpdu_start->info1),
1073 __le32_to_cpu(rxd_msdu_start_common->info0),
1074 __le32_to_cpu(rxd_msdu_start_common->info1),
1075 rxd_ppdu_start->info0,
1076 __le32_to_cpu(rxd_ppdu_start->info1),
1077 __le32_to_cpu(rxd_ppdu_start->info2),
1078 __le32_to_cpu(rxd_ppdu_start->info3),
1079 __le32_to_cpu(rxd_ppdu_start->info4));
1081 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1082 __le32_to_cpu(rxd_msdu_end_common->info0),
1083 __le32_to_cpu(rxd_mpdu_end->info0));
1085 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1086 "rx desc msdu payload: ",
1087 rxd_msdu_payload, 50);
1090 status->rate_idx = mcs;
1094 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1096 status->bw = ath10k_bw_to_mac80211_bw(bw);
1097 status->encoding = RX_ENC_VHT;
1104 static struct ieee80211_channel *
1105 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1107 struct ath10k_hw_params *hw = &ar->hw_params;
1108 struct rx_attention *rxd_attention;
1109 struct rx_msdu_end_common *rxd_msdu_end_common;
1110 struct rx_mpdu_start *rxd_mpdu_start;
1111 struct ath10k_peer *peer;
1112 struct ath10k_vif *arvif;
1113 struct cfg80211_chan_def def;
1116 lockdep_assert_held(&ar->data_lock);
1121 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1122 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1123 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1125 if (rxd_attention->flags &
1126 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1129 if (!(rxd_msdu_end_common->info0 &
1130 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1133 peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1134 RX_MPDU_START_INFO0_PEER_IDX);
1136 peer = ath10k_peer_find_by_id(ar, peer_id);
1140 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1141 if (WARN_ON_ONCE(!arvif))
1144 if (ath10k_mac_vif_chan(arvif->vif, &def))
1150 static struct ieee80211_channel *
1151 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1153 struct ath10k_vif *arvif;
1154 struct cfg80211_chan_def def;
1156 lockdep_assert_held(&ar->data_lock);
1158 list_for_each_entry(arvif, &ar->arvifs, list) {
1159 if (arvif->vdev_id == vdev_id &&
1160 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1168 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1169 struct ieee80211_chanctx_conf *conf,
1172 struct cfg80211_chan_def *def = data;
1177 static struct ieee80211_channel *
1178 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1180 struct cfg80211_chan_def def = {};
1182 ieee80211_iter_chan_contexts_atomic(ar->hw,
1183 ath10k_htt_rx_h_any_chan_iter,
1189 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1190 struct ieee80211_rx_status *status,
1191 struct htt_rx_desc *rxd,
1194 struct ieee80211_channel *ch;
1196 spin_lock_bh(&ar->data_lock);
1197 ch = ar->scan_channel;
1199 ch = ar->rx_channel;
1201 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1203 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1205 ch = ath10k_htt_rx_h_any_channel(ar);
1207 ch = ar->tgt_oper_chan;
1208 spin_unlock_bh(&ar->data_lock);
1213 status->band = ch->band;
1214 status->freq = ch->center_freq;
1219 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1220 struct ieee80211_rx_status *status,
1221 struct htt_rx_desc *rxd)
1223 struct ath10k_hw_params *hw = &ar->hw_params;
1224 struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
1227 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1228 status->chains &= ~BIT(i);
1230 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
1231 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1232 rxd_ppdu_start->rssi_chains[i].pri20_mhz;
1234 status->chains |= BIT(i);
1238 /* FIXME: Get real NF */
1239 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1240 rxd_ppdu_start->rssi_comb;
1241 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1244 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1245 struct ieee80211_rx_status *status,
1246 struct htt_rx_desc *rxd)
1248 struct ath10k_hw_params *hw = &ar->hw_params;
1249 struct rx_ppdu_end_common *rxd_ppdu_end_common;
1251 rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
1253 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1254 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1255 * TSF. Is it worth holding frames until end of PPDU is known?
1257 * FIXME: Can we get/compute 64bit TSF?
1259 status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
1260 status->flag |= RX_FLAG_MACTIME_END;
1263 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1264 struct sk_buff_head *amsdu,
1265 struct ieee80211_rx_status *status,
1268 struct sk_buff *first;
1269 struct ath10k_hw_params *hw = &ar->hw_params;
1270 struct htt_rx_desc *rxd;
1271 struct rx_attention *rxd_attention;
1275 if (skb_queue_empty(amsdu))
1278 first = skb_peek(amsdu);
1279 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1280 (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1282 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1284 is_first_ppdu = !!(rxd_attention->flags &
1285 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1286 is_last_ppdu = !!(rxd_attention->flags &
1287 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1289 if (is_first_ppdu) {
1290 /* New PPDU starts so clear out the old per-PPDU status. */
1292 status->rate_idx = 0;
1294 status->encoding = RX_ENC_LEGACY;
1295 status->bw = RATE_INFO_BW_20;
1297 status->flag &= ~RX_FLAG_MACTIME_END;
1298 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1300 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1301 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1302 status->ampdu_reference = ar->ampdu_reference;
1304 ath10k_htt_rx_h_signal(ar, status, rxd);
1305 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1306 ath10k_htt_rx_h_rates(ar, status, rxd);
1310 ath10k_htt_rx_h_mactime(ar, status, rxd);
1312 /* set ampdu last segment flag */
1313 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1314 ar->ampdu_reference++;
1318 static const char * const tid_to_ac[] = {
1329 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1334 if (!ieee80211_is_data_qos(hdr->frame_control))
1337 qc = ieee80211_get_qos_ctl(hdr);
1338 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1340 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1342 snprintf(out, size, "tid %d", tid);
1347 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1348 struct ieee80211_rx_status *rx_status,
1349 struct sk_buff *skb)
1351 struct ieee80211_rx_status *status;
1353 status = IEEE80211_SKB_RXCB(skb);
1354 *status = *rx_status;
1356 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1359 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1361 struct ieee80211_rx_status *status;
1362 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1365 status = IEEE80211_SKB_RXCB(skb);
1367 if (!(ar->filter_flags & FIF_FCSFAIL) &&
1368 status->flag & RX_FLAG_FAILED_FCS_CRC) {
1369 ar->stats.rx_crc_err_drop++;
1370 dev_kfree_skb_any(skb);
1374 ath10k_dbg(ar, ATH10K_DBG_DATA,
1375 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1378 ieee80211_get_SA(hdr),
1379 ath10k_get_tid(hdr, tid, sizeof(tid)),
1380 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1382 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
1383 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1384 (status->encoding == RX_ENC_HT) ? "ht" : "",
1385 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1386 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1387 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1388 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1389 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1393 status->band, status->flag,
1394 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1395 !!(status->flag & RX_FLAG_MMIC_ERROR),
1396 !!(status->flag & RX_FLAG_AMSDU_MORE));
1397 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1398 skb->data, skb->len);
1399 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1400 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1402 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1405 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1406 struct ieee80211_hdr *hdr)
1408 int len = ieee80211_hdrlen(hdr->frame_control);
1410 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1411 ar->running_fw->fw_file.fw_features))
1412 len = round_up(len, 4);
1417 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1418 struct sk_buff *msdu,
1419 struct ieee80211_rx_status *status,
1420 enum htt_rx_mpdu_encrypt_type enctype,
1422 const u8 first_hdr[64])
1424 struct ieee80211_hdr *hdr;
1425 struct ath10k_hw_params *hw = &ar->hw_params;
1426 struct htt_rx_desc *rxd;
1427 struct rx_msdu_end_common *rxd_msdu_end_common;
1432 bool msdu_limit_err;
1433 int bytes_aligned = ar->hw_params.decap_align_bytes;
1436 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1437 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1439 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1440 is_first = !!(rxd_msdu_end_common->info0 &
1441 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1442 is_last = !!(rxd_msdu_end_common->info0 &
1443 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1445 /* Delivered decapped frame:
1447 * [crypto param] <-- can be trimmed if !fcs_err &&
1448 * !decrypt_err && !peer_idx_invalid
1449 * [amsdu header] <-- only if A-MSDU
1452 * [FCS] <-- at end, needs to be trimmed
1455 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1456 * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1457 * error packets. If limit exceeds, hw sends all remaining MSDUs as
1458 * a single last MSDU with this msdu limit error set.
1460 msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
1462 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1463 * without first MSDU is expected in that case, and handled later here.
1465 /* This probably shouldn't happen but warn just in case */
1466 if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1469 /* This probably shouldn't happen but warn just in case */
1470 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1473 skb_trim(msdu, msdu->len - FCS_LEN);
1475 /* Push original 80211 header */
1476 if (unlikely(msdu_limit_err)) {
1477 hdr = (struct ieee80211_hdr *)first_hdr;
1478 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1479 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1481 if (ieee80211_is_data_qos(hdr->frame_control)) {
1482 qos = ieee80211_get_qos_ctl(hdr);
1483 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1487 memcpy(skb_push(msdu, crypto_len),
1488 (void *)hdr + round_up(hdr_len, bytes_aligned),
1491 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1494 /* In most cases this will be true for sniffed frames. It makes sense
1495 * to deliver them as-is without stripping the crypto param. This is
1496 * necessary for software based decryption.
1498 * If there's no error then the frame is decrypted. At least that is
1499 * the case for frames that come in via fragmented rx indication.
1504 /* The payload is decrypted so strip crypto params. Start from tail
1505 * since hdr is used to compute some stuff.
1508 hdr = (void *)msdu->data;
1511 if (status->flag & RX_FLAG_IV_STRIPPED) {
1512 skb_trim(msdu, msdu->len -
1513 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1515 skb_trim(msdu, msdu->len -
1516 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1519 if (status->flag & RX_FLAG_MIC_STRIPPED)
1520 skb_trim(msdu, msdu->len -
1521 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1524 if (status->flag & RX_FLAG_ICV_STRIPPED)
1525 skb_trim(msdu, msdu->len -
1526 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1530 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1531 !ieee80211_has_morefrags(hdr->frame_control) &&
1532 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1533 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1536 if (status->flag & RX_FLAG_IV_STRIPPED) {
1537 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1538 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1540 memmove((void *)msdu->data + crypto_len,
1541 (void *)msdu->data, hdr_len);
1542 skb_pull(msdu, crypto_len);
1546 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1547 struct sk_buff *msdu,
1548 struct ieee80211_rx_status *status,
1549 const u8 first_hdr[64],
1550 enum htt_rx_mpdu_encrypt_type enctype)
1552 struct ath10k_hw_params *hw = &ar->hw_params;
1553 struct ieee80211_hdr *hdr;
1554 struct htt_rx_desc *rxd;
1559 int bytes_aligned = ar->hw_params.decap_align_bytes;
1561 /* Delivered decapped frame:
1562 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1565 * Note: The nwifi header doesn't have QoS Control and is
1566 * (always?) a 3addr frame.
1568 * Note2: There's no A-MSDU subframe header. Even if it's part
1572 /* pull decapped header and copy SA & DA */
1573 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
1574 hw->rx_desc_ops->rx_desc_size);
1576 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1577 skb_put(msdu, l3_pad_bytes);
1579 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1581 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1582 ether_addr_copy(da, ieee80211_get_DA(hdr));
1583 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1584 skb_pull(msdu, hdr_len);
1586 /* push original 802.11 header */
1587 hdr = (struct ieee80211_hdr *)first_hdr;
1588 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1590 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1591 memcpy(skb_push(msdu,
1592 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1593 (void *)hdr + round_up(hdr_len, bytes_aligned),
1594 ath10k_htt_rx_crypto_param_len(ar, enctype));
1597 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1599 /* original 802.11 header has a different DA and in
1600 * case of 4addr it may also have different SA
1602 hdr = (struct ieee80211_hdr *)msdu->data;
1603 ether_addr_copy(ieee80211_get_DA(hdr), da);
1604 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1607 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1608 struct sk_buff *msdu,
1609 enum htt_rx_mpdu_encrypt_type enctype)
1611 struct ieee80211_hdr *hdr;
1612 struct ath10k_hw_params *hw = &ar->hw_params;
1613 struct htt_rx_desc *rxd;
1614 struct rx_msdu_end_common *rxd_msdu_end_common;
1615 u8 *rxd_rx_hdr_status;
1616 size_t hdr_len, crypto_len;
1618 bool is_first, is_last, is_amsdu;
1619 int bytes_aligned = ar->hw_params.decap_align_bytes;
1621 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1622 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1624 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
1625 rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1626 hdr = (void *)rxd_rx_hdr_status;
1628 is_first = !!(rxd_msdu_end_common->info0 &
1629 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1630 is_last = !!(rxd_msdu_end_common->info0 &
1631 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1632 is_amsdu = !(is_first && is_last);
1637 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1638 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1640 rfc1042 += round_up(hdr_len, bytes_aligned) +
1641 round_up(crypto_len, bytes_aligned);
1645 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1650 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1651 struct sk_buff *msdu,
1652 struct ieee80211_rx_status *status,
1653 const u8 first_hdr[64],
1654 enum htt_rx_mpdu_encrypt_type enctype)
1656 struct ath10k_hw_params *hw = &ar->hw_params;
1657 struct ieee80211_hdr *hdr;
1664 struct htt_rx_desc *rxd;
1665 int bytes_aligned = ar->hw_params.decap_align_bytes;
1667 /* Delivered decapped frame:
1668 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1672 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1673 if (WARN_ON_ONCE(!rfc1042))
1676 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1677 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1679 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1680 skb_put(msdu, l3_pad_bytes);
1681 skb_pull(msdu, l3_pad_bytes);
1683 /* pull decapped header and copy SA & DA */
1684 eth = (struct ethhdr *)msdu->data;
1685 ether_addr_copy(da, eth->h_dest);
1686 ether_addr_copy(sa, eth->h_source);
1687 skb_pull(msdu, sizeof(struct ethhdr));
1689 /* push rfc1042/llc/snap */
1690 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1691 sizeof(struct rfc1042_hdr));
1693 /* push original 802.11 header */
1694 hdr = (struct ieee80211_hdr *)first_hdr;
1695 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1697 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1698 memcpy(skb_push(msdu,
1699 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1700 (void *)hdr + round_up(hdr_len, bytes_aligned),
1701 ath10k_htt_rx_crypto_param_len(ar, enctype));
1704 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1706 /* original 802.11 header has a different DA and in
1707 * case of 4addr it may also have different SA
1709 hdr = (struct ieee80211_hdr *)msdu->data;
1710 ether_addr_copy(ieee80211_get_DA(hdr), da);
1711 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1714 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1715 struct sk_buff *msdu,
1716 struct ieee80211_rx_status *status,
1717 const u8 first_hdr[64],
1718 enum htt_rx_mpdu_encrypt_type enctype)
1720 struct ath10k_hw_params *hw = &ar->hw_params;
1721 struct ieee80211_hdr *hdr;
1724 struct htt_rx_desc *rxd;
1725 int bytes_aligned = ar->hw_params.decap_align_bytes;
1727 /* Delivered decapped frame:
1728 * [amsdu header] <-- replaced with 802.11 hdr
1733 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1734 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1736 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1738 skb_put(msdu, l3_pad_bytes);
1739 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1741 hdr = (struct ieee80211_hdr *)first_hdr;
1742 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1744 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1745 memcpy(skb_push(msdu,
1746 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1747 (void *)hdr + round_up(hdr_len, bytes_aligned),
1748 ath10k_htt_rx_crypto_param_len(ar, enctype));
1751 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1754 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1755 struct sk_buff *msdu,
1756 struct ieee80211_rx_status *status,
1758 enum htt_rx_mpdu_encrypt_type enctype,
1761 struct ath10k_hw_params *hw = &ar->hw_params;
1762 struct htt_rx_desc *rxd;
1763 struct rx_msdu_start_common *rxd_msdu_start_common;
1764 enum rx_msdu_decap_format decap;
1766 /* First msdu's decapped header:
1767 * [802.11 header] <-- padded to 4 bytes long
1768 * [crypto param] <-- padded to 4 bytes long
1769 * [amsdu header] <-- only if A-MSDU
1772 * Other (2nd, 3rd, ..) msdu's decapped header:
1773 * [amsdu header] <-- only if A-MSDU
1777 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1778 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
1780 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1781 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
1782 RX_MSDU_START_INFO1_DECAP_FORMAT);
1785 case RX_MSDU_DECAP_RAW:
1786 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1787 is_decrypted, first_hdr);
1789 case RX_MSDU_DECAP_NATIVE_WIFI:
1790 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1793 case RX_MSDU_DECAP_ETHERNET2_DIX:
1794 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1796 case RX_MSDU_DECAP_8023_SNAP_LLC:
1797 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1803 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
1805 struct htt_rx_desc *rxd;
1806 struct rx_attention *rxd_attention;
1807 struct rx_msdu_start_common *rxd_msdu_start_common;
1809 bool is_ip4, is_ip6;
1810 bool is_tcp, is_udp;
1811 bool ip_csum_ok, tcpudp_csum_ok;
1813 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1814 (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
1816 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1817 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
1818 flags = __le32_to_cpu(rxd_attention->flags);
1819 info = __le32_to_cpu(rxd_msdu_start_common->info1);
1821 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1822 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1823 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1824 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1825 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1826 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1828 if (!is_ip4 && !is_ip6)
1829 return CHECKSUM_NONE;
1830 if (!is_tcp && !is_udp)
1831 return CHECKSUM_NONE;
1833 return CHECKSUM_NONE;
1834 if (!tcpudp_csum_ok)
1835 return CHECKSUM_NONE;
1837 return CHECKSUM_UNNECESSARY;
1840 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
1841 struct sk_buff *msdu)
1843 msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
1846 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1847 enum htt_rx_mpdu_encrypt_type enctype)
1849 struct ieee80211_hdr *hdr;
1853 hdr = (struct ieee80211_hdr *)skb->data;
1854 ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
1856 if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1858 pn |= (u64)ehdr[1] << 8;
1859 pn |= (u64)ehdr[4] << 16;
1860 pn |= (u64)ehdr[5] << 24;
1861 pn |= (u64)ehdr[6] << 32;
1862 pn |= (u64)ehdr[7] << 40;
1867 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1868 struct sk_buff *skb)
1870 struct ieee80211_hdr *hdr;
1872 hdr = (struct ieee80211_hdr *)skb->data;
1873 return !is_multicast_ether_addr(hdr->addr1);
1876 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
1877 struct sk_buff *skb,
1879 enum htt_rx_mpdu_encrypt_type enctype)
1881 struct ath10k_peer *peer;
1882 union htt_rx_pn_t *last_pn, new_pn = {0};
1883 struct ieee80211_hdr *hdr;
1884 u8 tid, frag_number;
1887 peer = ath10k_peer_find_by_id(ar, peer_id);
1889 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
1893 hdr = (struct ieee80211_hdr *)skb->data;
1894 if (ieee80211_is_data_qos(hdr->frame_control))
1895 tid = ieee80211_get_tid(hdr);
1897 tid = ATH10K_TXRX_NON_QOS_TID;
1899 last_pn = &peer->frag_tids_last_pn[tid];
1900 new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
1901 frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1902 seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
1904 if (frag_number == 0) {
1905 last_pn->pn48 = new_pn.pn48;
1906 peer->frag_tids_seq[tid] = seq;
1908 if (seq != peer->frag_tids_seq[tid])
1911 if (new_pn.pn48 != last_pn->pn48 + 1)
1914 last_pn->pn48 = new_pn.pn48;
1920 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1921 struct sk_buff_head *amsdu,
1922 struct ieee80211_rx_status *status,
1923 bool fill_crypt_header,
1925 enum ath10k_pkt_rx_err *err,
1929 struct sk_buff *first;
1930 struct sk_buff *last;
1931 struct sk_buff *msdu, *temp;
1932 struct ath10k_hw_params *hw = &ar->hw_params;
1933 struct htt_rx_desc *rxd;
1934 struct rx_attention *rxd_attention;
1935 struct rx_mpdu_start *rxd_mpdu_start;
1937 struct ieee80211_hdr *hdr;
1938 enum htt_rx_mpdu_encrypt_type enctype;
1942 bool has_crypto_err;
1944 bool has_peer_idx_invalid;
1948 bool frag_pn_check = true, multicast_check = true;
1950 if (skb_queue_empty(amsdu))
1953 first = skb_peek(amsdu);
1954 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1955 (void *)first->data - hw->rx_desc_ops->rx_desc_size);
1957 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1958 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
1960 is_mgmt = !!(rxd_attention->flags &
1961 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1963 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
1964 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1966 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1967 * decapped header. It'll be used for undecapping of each MSDU.
1969 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
1970 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1973 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1975 /* Each A-MSDU subframe will use the original header as the base and be
1976 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1978 hdr = (void *)first_hdr;
1980 if (ieee80211_is_data_qos(hdr->frame_control)) {
1981 qos = ieee80211_get_qos_ctl(hdr);
1982 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1985 /* Some attention flags are valid only in the last MSDU. */
1986 last = skb_peek_tail(amsdu);
1987 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
1988 (void *)last->data - hw->rx_desc_ops->rx_desc_size);
1990 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
1991 attention = __le32_to_cpu(rxd_attention->flags);
1993 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1994 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1995 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1996 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1998 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1999 * e.g. due to fcs error, missing peer or invalid key data it will
2000 * report the frame as raw.
2002 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
2005 !has_peer_idx_invalid);
2007 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
2008 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2009 RX_FLAG_MMIC_ERROR |
2011 RX_FLAG_IV_STRIPPED |
2012 RX_FLAG_ONLY_MONITOR |
2013 RX_FLAG_MMIC_STRIPPED);
2016 status->flag |= RX_FLAG_FAILED_FCS_CRC;
2019 status->flag |= RX_FLAG_MMIC_ERROR;
2023 *err = ATH10K_PKT_RX_ERR_FCS;
2024 else if (has_tkip_err)
2025 *err = ATH10K_PKT_RX_ERR_TKIP;
2026 else if (has_crypto_err)
2027 *err = ATH10K_PKT_RX_ERR_CRYPT;
2028 else if (has_peer_idx_invalid)
2029 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
2032 /* Firmware reports all necessary management frames via WMI already.
2033 * They are not reported to monitor interfaces at all so pass the ones
2034 * coming via HTT to monitor interfaces instead. This simplifies
2038 status->flag |= RX_FLAG_ONLY_MONITOR;
2041 status->flag |= RX_FLAG_DECRYPTED;
2043 if (likely(!is_mgmt))
2044 status->flag |= RX_FLAG_MMIC_STRIPPED;
2046 if (fill_crypt_header)
2047 status->flag |= RX_FLAG_MIC_STRIPPED |
2048 RX_FLAG_ICV_STRIPPED;
2050 status->flag |= RX_FLAG_IV_STRIPPED;
2053 skb_queue_walk(amsdu, msdu) {
2054 if (frag && !fill_crypt_header && is_decrypted &&
2055 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
2056 frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
2062 multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
2065 if (!frag_pn_check || !multicast_check) {
2066 /* Discard the fragment with invalid PN or multicast DA
2069 __skb_unlink(msdu, amsdu);
2070 dev_kfree_skb_any(msdu);
2072 frag_pn_check = true;
2073 multicast_check = true;
2077 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
2079 if (frag && !fill_crypt_header &&
2080 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2081 status->flag &= ~RX_FLAG_MMIC_STRIPPED;
2083 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
2086 /* Undecapping involves copying the original 802.11 header back
2087 * to sk_buff. If frame is protected and hardware has decrypted
2088 * it then remove the protected bit.
2095 if (fill_crypt_header)
2098 hdr = (void *)msdu->data;
2099 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2101 if (frag && !fill_crypt_header &&
2102 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2103 status->flag &= ~RX_FLAG_IV_STRIPPED &
2104 ~RX_FLAG_MMIC_STRIPPED;
2108 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2109 struct sk_buff_head *amsdu,
2110 struct ieee80211_rx_status *status)
2112 struct sk_buff *msdu;
2113 struct sk_buff *first_subframe;
2115 first_subframe = skb_peek(amsdu);
2117 while ((msdu = __skb_dequeue(amsdu))) {
2118 /* Setup per-MSDU flags */
2119 if (skb_queue_empty(amsdu))
2120 status->flag &= ~RX_FLAG_AMSDU_MORE;
2122 status->flag |= RX_FLAG_AMSDU_MORE;
2124 if (msdu == first_subframe) {
2125 first_subframe = NULL;
2126 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2128 status->flag |= RX_FLAG_ALLOW_SAME_PN;
2131 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2135 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2136 unsigned long *unchain_cnt)
2138 struct sk_buff *skb, *first;
2141 int amsdu_len = skb_queue_len(amsdu);
2143 /* TODO: Might could optimize this by using
2144 * skb_try_coalesce or similar method to
2145 * decrease copying, or maybe get mac80211 to
2146 * provide a way to just receive a list of
2150 first = __skb_dequeue(amsdu);
2152 /* Allocate total length all at once. */
2153 skb_queue_walk(amsdu, skb)
2154 total_len += skb->len;
2156 space = total_len - skb_tailroom(first);
2158 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2159 /* TODO: bump some rx-oom error stat */
2160 /* put it back together so we can free the
2161 * whole list at once.
2163 __skb_queue_head(amsdu, first);
2167 /* Walk list again, copying contents into
2170 while ((skb = __skb_dequeue(amsdu))) {
2171 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2173 dev_kfree_skb_any(skb);
2176 __skb_queue_head(amsdu, first);
2178 *unchain_cnt += amsdu_len - 1;
2183 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2184 struct sk_buff_head *amsdu,
2185 unsigned long *drop_cnt,
2186 unsigned long *unchain_cnt)
2188 struct sk_buff *first;
2189 struct ath10k_hw_params *hw = &ar->hw_params;
2190 struct htt_rx_desc *rxd;
2191 struct rx_msdu_start_common *rxd_msdu_start_common;
2192 struct rx_frag_info_common *rxd_frag_info;
2193 enum rx_msdu_decap_format decap;
2195 first = skb_peek(amsdu);
2196 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2197 (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2199 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
2200 rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
2201 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
2202 RX_MSDU_START_INFO1_DECAP_FORMAT);
2204 /* FIXME: Current unchaining logic can only handle simple case of raw
2205 * msdu chaining. If decapping is other than raw the chaining may be
2206 * more complex and this isn't handled by the current code. Don't even
2207 * try re-constructing such frames - it'll be pretty much garbage.
2209 if (decap != RX_MSDU_DECAP_RAW ||
2210 skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
2211 *drop_cnt += skb_queue_len(amsdu);
2212 __skb_queue_purge(amsdu);
2216 ath10k_unchain_msdu(amsdu, unchain_cnt);
2219 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2220 struct sk_buff_head *amsdu)
2223 struct sk_buff *first;
2224 bool is_first, is_last;
2225 struct ath10k_hw_params *hw = &ar->hw_params;
2226 struct htt_rx_desc *rxd;
2227 struct rx_msdu_end_common *rxd_msdu_end_common;
2228 struct rx_mpdu_start *rxd_mpdu_start;
2229 struct ieee80211_hdr *hdr;
2230 size_t hdr_len, crypto_len;
2231 enum htt_rx_mpdu_encrypt_type enctype;
2232 int bytes_aligned = ar->hw_params.decap_align_bytes;
2234 first = skb_peek(amsdu);
2236 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
2237 (void *)first->data - hw->rx_desc_ops->rx_desc_size);
2239 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
2240 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
2241 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
2243 is_first = !!(rxd_msdu_end_common->info0 &
2244 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2245 is_last = !!(rxd_msdu_end_common->info0 &
2246 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2248 /* Return in case of non-aggregated msdu */
2249 if (is_first && is_last)
2252 /* First msdu flag is not set for the first msdu of the list */
2256 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
2257 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2259 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2260 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2262 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2265 /* Validate if the amsdu has a proper first subframe.
2266 * There are chances a single msdu can be received as amsdu when
2267 * the unauthenticated amsdu flag of a QoS header
2268 * gets flipped in non-SPP AMSDU's, in such cases the first
2269 * subframe has llc/snap header in place of a valid da.
2270 * return false if the da matches rfc1042 pattern
2272 if (ether_addr_equal(subframe_hdr, rfc1042_header))
2278 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2279 struct sk_buff_head *amsdu,
2280 struct ieee80211_rx_status *rx_status)
2282 if (!rx_status->freq) {
2283 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2287 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2288 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2292 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2293 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2300 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2301 struct sk_buff_head *amsdu,
2302 struct ieee80211_rx_status *rx_status,
2303 unsigned long *drop_cnt)
2305 if (skb_queue_empty(amsdu))
2308 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2312 *drop_cnt += skb_queue_len(amsdu);
2314 __skb_queue_purge(amsdu);
2317 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2319 struct ath10k *ar = htt->ar;
2320 struct ieee80211_rx_status *rx_status = &htt->rx_status;
2321 struct sk_buff_head amsdu;
2323 unsigned long drop_cnt = 0;
2324 unsigned long unchain_cnt = 0;
2325 unsigned long drop_cnt_filter = 0;
2326 unsigned long msdus_to_queue, num_msdus;
2327 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2328 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2330 __skb_queue_head_init(&amsdu);
2332 spin_lock_bh(&htt->rx_ring.lock);
2333 if (htt->rx_confused) {
2334 spin_unlock_bh(&htt->rx_ring.lock);
2337 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2338 spin_unlock_bh(&htt->rx_ring.lock);
2341 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2342 __skb_queue_purge(&amsdu);
2343 /* FIXME: It's probably a good idea to reboot the
2344 * device instead of leaving it inoperable.
2346 htt->rx_confused = true;
2350 num_msdus = skb_queue_len(&amsdu);
2352 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2354 /* only for ret = 1 indicates chained msdus */
2356 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2358 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2359 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2361 msdus_to_queue = skb_queue_len(&amsdu);
2362 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2364 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2365 unchain_cnt, drop_cnt, drop_cnt_filter,
2371 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2372 union htt_rx_pn_t *pn,
2375 switch (pn_len_bits) {
2377 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2378 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2381 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2386 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2387 union htt_rx_pn_t *old_pn)
2389 return ((new_pn->pn48 & 0xffffffffffffULL) <=
2390 (old_pn->pn48 & 0xffffffffffffULL));
2393 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2394 struct ath10k_peer *peer,
2395 struct htt_rx_indication_hl *rx)
2397 bool last_pn_valid, pn_invalid = false;
2398 enum htt_txrx_sec_cast_type sec_index;
2399 enum htt_security_types sec_type;
2400 union htt_rx_pn_t new_pn = {0};
2401 struct htt_hl_rx_desc *rx_desc;
2402 union htt_rx_pn_t *last_pn;
2403 u32 rx_desc_info, tid;
2404 int num_mpdu_ranges;
2406 lockdep_assert_held(&ar->data_lock);
2411 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2414 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2415 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2417 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2418 rx_desc_info = __le32_to_cpu(rx_desc->info);
2420 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2423 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2424 last_pn_valid = peer->tids_last_pn_valid[tid];
2425 last_pn = &peer->tids_last_pn[tid];
2427 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2428 sec_index = HTT_TXRX_SEC_MCAST;
2430 sec_index = HTT_TXRX_SEC_UCAST;
2432 sec_type = peer->rx_pn[sec_index].sec_type;
2433 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2435 if (sec_type != HTT_SECURITY_AES_CCMP &&
2436 sec_type != HTT_SECURITY_TKIP &&
2437 sec_type != HTT_SECURITY_TKIP_NOMIC)
2441 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2443 peer->tids_last_pn_valid[tid] = true;
2446 last_pn->pn48 = new_pn.pn48;
2451 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2452 struct htt_rx_indication_hl *rx,
2453 struct sk_buff *skb,
2454 enum htt_rx_pn_check_type check_pn_type,
2455 enum htt_rx_tkip_demic_type tkip_mic_type)
2457 struct ath10k *ar = htt->ar;
2458 struct ath10k_peer *peer;
2459 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2460 struct fw_rx_desc_hl *fw_desc;
2461 enum htt_txrx_sec_cast_type sec_index;
2462 enum htt_security_types sec_type;
2463 union htt_rx_pn_t new_pn = {0};
2464 struct htt_hl_rx_desc *rx_desc;
2465 struct ieee80211_hdr *hdr;
2466 struct ieee80211_rx_status *rx_status;
2469 int num_mpdu_ranges;
2471 struct ieee80211_channel *ch;
2472 bool pn_invalid, qos, first_msdu;
2473 u32 tid, rx_desc_info;
2475 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2476 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2478 spin_lock_bh(&ar->data_lock);
2479 peer = ath10k_peer_find_by_id(ar, peer_id);
2480 spin_unlock_bh(&ar->data_lock);
2481 if (!peer && peer_id != HTT_INVALID_PEERID)
2482 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2487 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2488 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2489 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2490 fw_desc = &rx->fw_desc;
2491 rx_desc_len = fw_desc->len;
2493 if (fw_desc->u.bits.discard) {
2494 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2498 /* I have not yet seen any case where num_mpdu_ranges > 1.
2499 * qcacld does not seem handle that case either, so we introduce the
2500 * same limitation here as well.
2502 if (num_mpdu_ranges > 1)
2504 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2507 if (mpdu_ranges->mpdu_range_status !=
2508 HTT_RX_IND_MPDU_STATUS_OK &&
2509 mpdu_ranges->mpdu_range_status !=
2510 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2511 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2512 mpdu_ranges->mpdu_range_status);
2516 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2517 rx_desc_info = __le32_to_cpu(rx_desc->info);
2519 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2520 sec_index = HTT_TXRX_SEC_MCAST;
2522 sec_index = HTT_TXRX_SEC_UCAST;
2524 sec_type = peer->rx_pn[sec_index].sec_type;
2525 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2527 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2529 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2530 spin_lock_bh(&ar->data_lock);
2531 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2532 spin_unlock_bh(&ar->data_lock);
2538 /* Strip off all headers before the MAC header before delivery to
2541 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2542 sizeof(rx->ppdu) + sizeof(rx->prefix) +
2543 sizeof(rx->fw_desc) +
2544 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2546 skb_pull(skb, tot_hdr_len);
2548 hdr = (struct ieee80211_hdr *)skb->data;
2549 qos = ieee80211_is_data_qos(hdr->frame_control);
2551 rx_status = IEEE80211_SKB_RXCB(skb);
2552 memset(rx_status, 0, sizeof(*rx_status));
2554 if (rx->ppdu.combined_rssi == 0) {
2555 /* SDIO firmware does not provide signal */
2556 rx_status->signal = 0;
2557 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2559 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2560 rx->ppdu.combined_rssi;
2561 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2564 spin_lock_bh(&ar->data_lock);
2565 ch = ar->scan_channel;
2567 ch = ar->rx_channel;
2569 ch = ath10k_htt_rx_h_any_channel(ar);
2571 ch = ar->tgt_oper_chan;
2572 spin_unlock_bh(&ar->data_lock);
2575 rx_status->band = ch->band;
2576 rx_status->freq = ch->center_freq;
2578 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2579 rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2581 rx_status->flag |= RX_FLAG_AMSDU_MORE;
2583 /* Not entirely sure about this, but all frames from the chipset has
2584 * the protected flag set even though they have already been decrypted.
2585 * Unmasking this flag is necessary in order for mac80211 not to drop
2587 * TODO: Verify this is always the case or find out a way to check
2588 * if there has been hw decryption.
2590 if (ieee80211_has_protected(hdr->frame_control)) {
2591 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2592 rx_status->flag |= RX_FLAG_DECRYPTED |
2593 RX_FLAG_IV_STRIPPED |
2594 RX_FLAG_MMIC_STRIPPED;
2596 if (tid < IEEE80211_NUM_TIDS &&
2598 check_pn_type == HTT_RX_PN_CHECK &&
2599 (sec_type == HTT_SECURITY_AES_CCMP ||
2600 sec_type == HTT_SECURITY_TKIP ||
2601 sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2604 __le64 pn48 = cpu_to_le64(new_pn.pn48);
2606 hdr = (struct ieee80211_hdr *)skb->data;
2607 offset = ieee80211_hdrlen(hdr->frame_control);
2608 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2609 rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2611 memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2613 skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2614 ivp = skb->data + offset;
2615 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2617 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2619 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2620 if (peer->keys[i] &&
2621 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2622 keyidx = peer->keys[i]->keyidx;
2626 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2628 if (sec_type == HTT_SECURITY_AES_CCMP) {
2629 rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2631 memcpy(skb->data + offset, &pn48, 2);
2632 /* pn 1, pn 3 , pn 34 , pn 5 */
2633 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2635 rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2637 memcpy(skb->data + offset + 2, &pn48, 1);
2639 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2640 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2641 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2646 if (tkip_mic_type == HTT_RX_TKIP_MIC)
2647 rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2648 ~RX_FLAG_MMIC_STRIPPED;
2650 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2651 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2653 if (!qos && tid < IEEE80211_NUM_TIDS) {
2655 __le16 qos_ctrl = 0;
2657 hdr = (struct ieee80211_hdr *)skb->data;
2658 offset = ieee80211_hdrlen(hdr->frame_control);
2660 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2661 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2662 skb_push(skb, IEEE80211_QOS_CTL_LEN);
2663 qos_ctrl = cpu_to_le16(tid);
2664 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2668 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2670 ieee80211_rx_ni(ar->hw, skb);
2672 /* We have delivered the skb to the upper layers (mac80211) so we
2677 /* Tell the caller that it must free the skb since we have not
2683 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2689 orig_hdr = skb->data;
2690 ivp = orig_hdr + hdr_len + head_len;
2692 /* the ExtIV bit is always set to 1 for TKIP */
2693 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2696 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2697 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2698 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2702 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2708 orig_hdr = skb->data;
2709 ivp = orig_hdr + hdr_len + head_len;
2711 /* the ExtIV bit is always set to 1 for TKIP */
2712 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2715 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2716 skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2717 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2721 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2727 orig_hdr = skb->data;
2728 ivp = orig_hdr + hdr_len + head_len;
2730 /* the ExtIV bit is always set to 1 for CCMP */
2731 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2734 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2735 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2736 skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2740 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2746 orig_hdr = skb->data;
2748 memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2749 orig_hdr, head_len + hdr_len);
2750 skb_pull(skb, IEEE80211_WEP_IV_LEN);
2751 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2755 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2756 struct htt_rx_fragment_indication *rx,
2757 struct sk_buff *skb)
2759 struct ath10k *ar = htt->ar;
2760 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2761 enum htt_txrx_sec_cast_type sec_index;
2762 struct htt_rx_indication_hl *rx_hl;
2763 enum htt_security_types sec_type;
2764 u32 tid, frag, seq, rx_desc_info;
2765 union htt_rx_pn_t new_pn = {0};
2766 struct htt_hl_rx_desc *rx_desc;
2767 u16 peer_id, sc, hdr_space;
2768 union htt_rx_pn_t *last_pn;
2769 struct ieee80211_hdr *hdr;
2770 int ret, num_mpdu_ranges;
2771 struct ath10k_peer *peer;
2772 struct htt_resp *resp;
2775 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2776 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2777 skb_trim(skb, skb->len - FCS_LEN);
2779 peer_id = __le16_to_cpu(rx->peer_id);
2780 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2782 spin_lock_bh(&ar->data_lock);
2783 peer = ath10k_peer_find_by_id(ar, peer_id);
2785 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2789 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2790 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2792 tot_hdr_len = sizeof(struct htt_resp_hdr) +
2793 sizeof(rx_hl->hdr) +
2794 sizeof(rx_hl->ppdu) +
2795 sizeof(rx_hl->prefix) +
2796 sizeof(rx_hl->fw_desc) +
2797 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2799 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2800 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2801 rx_desc_info = __le32_to_cpu(rx_desc->info);
2803 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2805 if (is_multicast_ether_addr(hdr->addr1)) {
2806 /* Discard the fragment with multicast DA */
2810 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2811 spin_unlock_bh(&ar->data_lock);
2812 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2813 HTT_RX_NON_PN_CHECK,
2814 HTT_RX_NON_TKIP_MIC);
2817 if (ieee80211_has_retry(hdr->frame_control))
2820 hdr_space = ieee80211_hdrlen(hdr->frame_control);
2821 sc = __le16_to_cpu(hdr->seq_ctrl);
2822 seq = IEEE80211_SEQ_TO_SN(sc);
2823 frag = sc & IEEE80211_SCTL_FRAG;
2825 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2826 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2827 sec_type = peer->rx_pn[sec_index].sec_type;
2828 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2831 case HTT_SECURITY_TKIP:
2832 tkip_mic = HTT_RX_TKIP_MIC;
2833 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2840 case HTT_SECURITY_TKIP_NOMIC:
2841 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2848 case HTT_SECURITY_AES_CCMP:
2849 ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2850 tot_hdr_len + rx_hl->fw_desc.len,
2855 case HTT_SECURITY_WEP128:
2856 case HTT_SECURITY_WEP104:
2857 case HTT_SECURITY_WEP40:
2858 ret = ath10k_htt_rx_frag_wep_decap(skb,
2859 tot_hdr_len + rx_hl->fw_desc.len,
2868 resp = (struct htt_resp *)(skb->data);
2870 if (sec_type != HTT_SECURITY_AES_CCMP &&
2871 sec_type != HTT_SECURITY_TKIP &&
2872 sec_type != HTT_SECURITY_TKIP_NOMIC) {
2873 spin_unlock_bh(&ar->data_lock);
2874 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2875 HTT_RX_NON_PN_CHECK,
2876 HTT_RX_NON_TKIP_MIC);
2879 last_pn = &peer->frag_tids_last_pn[tid];
2882 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2885 last_pn->pn48 = new_pn.pn48;
2886 peer->frag_tids_seq[tid] = seq;
2887 } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2888 if (seq != peer->frag_tids_seq[tid])
2891 if (new_pn.pn48 != last_pn->pn48 + 1)
2894 last_pn->pn48 = new_pn.pn48;
2895 last_pn = &peer->tids_last_pn[tid];
2896 last_pn->pn48 = new_pn.pn48;
2899 spin_unlock_bh(&ar->data_lock);
2901 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2902 HTT_RX_NON_PN_CHECK, tkip_mic);
2905 spin_unlock_bh(&ar->data_lock);
2907 /* Tell the caller that it must free the skb since we have not
2913 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2914 struct htt_rx_indication *rx)
2916 struct ath10k *ar = htt->ar;
2917 struct htt_rx_indication_mpdu_range *mpdu_ranges;
2918 int num_mpdu_ranges;
2919 int i, mpdu_count = 0;
2923 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2924 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2925 peer_id = __le16_to_cpu(rx->hdr.peer_id);
2926 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2928 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2930 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2931 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2933 for (i = 0; i < num_mpdu_ranges; i++)
2934 mpdu_count += mpdu_ranges[i].mpdu_count;
2936 atomic_add(mpdu_count, &htt->num_mpdus_ready);
2938 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2942 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2943 struct sk_buff *skb)
2945 struct ath10k_htt *htt = &ar->htt;
2946 struct htt_resp *resp = (struct htt_resp *)skb->data;
2947 struct htt_tx_done tx_done = {};
2948 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2949 __le16 msdu_id, *msdus;
2950 bool rssi_enabled = false;
2951 u8 msdu_count = 0, num_airtime_records, tid;
2953 struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2954 struct ath10k_peer *peer;
2955 u16 ppdu_info_offset = 0, peer_id;
2959 case HTT_DATA_TX_STATUS_NO_ACK:
2960 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2962 case HTT_DATA_TX_STATUS_OK:
2963 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2965 case HTT_DATA_TX_STATUS_DISCARD:
2966 case HTT_DATA_TX_STATUS_POSTPONE:
2967 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2968 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2971 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2972 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2976 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2977 resp->data_tx_completion.num_msdus);
2979 msdu_count = resp->data_tx_completion.num_msdus;
2980 msdus = resp->data_tx_completion.msdus;
2981 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2984 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2987 for (i = 0; i < msdu_count; i++) {
2989 tx_done.msdu_id = __le16_to_cpu(msdu_id);
2992 /* Total no of MSDUs should be even,
2993 * if odd MSDUs are sent firmware fills
2994 * last msdu id with 0xffff
2996 if (msdu_count & 0x01) {
2997 msdu_id = msdus[msdu_count + i + 1 + htt_pad];
2998 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3000 msdu_id = msdus[msdu_count + i + htt_pad];
3001 tx_done.ack_rssi = __le16_to_cpu(msdu_id);
3005 /* kfifo_put: In practice firmware shouldn't fire off per-CE
3006 * interrupt and main interrupt (MSI/-X range case) for the same
3007 * HTC service so it should be safe to use kfifo_put w/o lock.
3009 * From kfifo_put() documentation:
3010 * Note that with only one concurrent reader and one concurrent
3011 * writer, you don't need extra locking to use these macro.
3013 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
3014 ath10k_txrx_tx_unref(htt, &tx_done);
3015 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
3016 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
3017 tx_done.msdu_id, tx_done.status);
3018 ath10k_txrx_tx_unref(htt, &tx_done);
3022 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
3025 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
3028 ppdu_info_offset += ppdu_info_offset;
3030 if (resp->data_tx_completion.flags2 &
3031 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
3032 ppdu_info_offset += 2;
3034 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
3035 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
3036 __le32_to_cpu(ppdu_info->info0));
3038 for (i = 0; i < num_airtime_records; i++) {
3039 struct htt_data_tx_ppdu_dur *ppdu_dur;
3042 ppdu_dur = &ppdu_info->ppdu_dur[i];
3043 info0 = __le32_to_cpu(ppdu_dur->info0);
3045 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
3048 spin_lock_bh(&ar->data_lock);
3050 peer = ath10k_peer_find_by_id(ar, peer_id);
3051 if (!peer || !peer->sta) {
3052 spin_unlock_bh(&ar->data_lock);
3057 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
3058 IEEE80211_QOS_CTL_TID_MASK;
3059 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
3061 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
3063 spin_unlock_bh(&ar->data_lock);
3068 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
3070 struct htt_rx_addba *ev = &resp->rx_addba;
3071 struct ath10k_peer *peer;
3072 struct ath10k_vif *arvif;
3073 u16 info0, tid, peer_id;
3075 info0 = __le16_to_cpu(ev->info0);
3076 tid = MS(info0, HTT_RX_BA_INFO0_TID);
3077 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3079 ath10k_dbg(ar, ATH10K_DBG_HTT,
3080 "htt rx addba tid %u peer_id %u size %u\n",
3081 tid, peer_id, ev->window_size);
3083 spin_lock_bh(&ar->data_lock);
3084 peer = ath10k_peer_find_by_id(ar, peer_id);
3086 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3088 spin_unlock_bh(&ar->data_lock);
3092 arvif = ath10k_get_arvif(ar, peer->vdev_id);
3094 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3096 spin_unlock_bh(&ar->data_lock);
3100 ath10k_dbg(ar, ATH10K_DBG_HTT,
3101 "htt rx start rx ba session sta %pM tid %u size %u\n",
3102 peer->addr, tid, ev->window_size);
3104 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3105 spin_unlock_bh(&ar->data_lock);
3108 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
3110 struct htt_rx_delba *ev = &resp->rx_delba;
3111 struct ath10k_peer *peer;
3112 struct ath10k_vif *arvif;
3113 u16 info0, tid, peer_id;
3115 info0 = __le16_to_cpu(ev->info0);
3116 tid = MS(info0, HTT_RX_BA_INFO0_TID);
3117 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3119 ath10k_dbg(ar, ATH10K_DBG_HTT,
3120 "htt rx delba tid %u peer_id %u\n",
3123 spin_lock_bh(&ar->data_lock);
3124 peer = ath10k_peer_find_by_id(ar, peer_id);
3126 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
3128 spin_unlock_bh(&ar->data_lock);
3132 arvif = ath10k_get_arvif(ar, peer->vdev_id);
3134 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3136 spin_unlock_bh(&ar->data_lock);
3140 ath10k_dbg(ar, ATH10K_DBG_HTT,
3141 "htt rx stop rx ba session sta %pM tid %u\n",
3144 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3145 spin_unlock_bh(&ar->data_lock);
3148 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
3149 struct sk_buff_head *list,
3150 struct sk_buff_head *amsdu)
3152 struct sk_buff *msdu;
3153 struct htt_rx_desc *rxd;
3154 struct rx_msdu_end_common *rxd_msdu_end_common;
3156 if (skb_queue_empty(list))
3159 if (WARN_ON(!skb_queue_empty(amsdu)))
3162 while ((msdu = __skb_dequeue(list))) {
3163 __skb_queue_tail(amsdu, msdu);
3165 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3166 (void *)msdu->data -
3167 hw->rx_desc_ops->rx_desc_size);
3169 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3170 if (rxd_msdu_end_common->info0 &
3171 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3175 msdu = skb_peek_tail(amsdu);
3176 rxd = HTT_RX_BUF_TO_RX_DESC(hw,
3177 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
3179 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
3180 if (!(rxd_msdu_end_common->info0 &
3181 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3182 skb_queue_splice_init(amsdu, list);
3189 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3190 struct sk_buff *skb)
3192 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3194 if (!ieee80211_has_protected(hdr->frame_control))
3197 /* Offloaded frames are already decrypted but firmware insists they are
3198 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
3199 * will drop the frame.
3202 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3203 status->flag |= RX_FLAG_DECRYPTED |
3204 RX_FLAG_IV_STRIPPED |
3205 RX_FLAG_MMIC_STRIPPED;
3208 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3209 struct sk_buff_head *list)
3211 struct ath10k_htt *htt = &ar->htt;
3212 struct ieee80211_rx_status *status = &htt->rx_status;
3213 struct htt_rx_offload_msdu *rx;
3214 struct sk_buff *msdu;
3217 while ((msdu = __skb_dequeue(list))) {
3218 /* Offloaded frames don't have Rx descriptor. Instead they have
3219 * a short meta information header.
3222 rx = (void *)msdu->data;
3224 skb_put(msdu, sizeof(*rx));
3225 skb_pull(msdu, sizeof(*rx));
3227 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3228 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3229 dev_kfree_skb_any(msdu);
3233 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3235 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3236 * actual payload is unaligned. Align the frame. Otherwise
3237 * mac80211 complains. This shouldn't reduce performance much
3238 * because these offloaded frames are rare.
3240 offset = 4 - ((unsigned long)msdu->data & 3);
3241 skb_put(msdu, offset);
3242 memmove(msdu->data + offset, msdu->data, msdu->len);
3243 skb_pull(msdu, offset);
3245 /* FIXME: The frame is NWifi. Re-construct QoS Control
3246 * if possible later.
3249 memset(status, 0, sizeof(*status));
3250 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3252 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3253 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3254 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3258 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3260 struct ath10k_htt *htt = &ar->htt;
3261 struct htt_resp *resp = (void *)skb->data;
3262 struct ieee80211_rx_status *status = &htt->rx_status;
3263 struct sk_buff_head list;
3264 struct sk_buff_head amsdu;
3273 lockdep_assert_held(&htt->rx_ring.lock);
3275 if (htt->rx_confused)
3278 skb_pull(skb, sizeof(resp->hdr));
3279 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3281 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3282 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3283 vdev_id = resp->rx_in_ord_ind.vdev_id;
3284 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3285 offload = !!(resp->rx_in_ord_ind.info &
3286 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3287 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3289 ath10k_dbg(ar, ATH10K_DBG_HTT,
3290 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3291 vdev_id, peer_id, tid, offload, frag, msdu_count);
3293 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3294 ath10k_warn(ar, "dropping invalid in order rx indication\n");
3298 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3299 * extracted and processed.
3301 __skb_queue_head_init(&list);
3302 if (ar->hw_params.target_64bit)
3303 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3306 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3310 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3311 htt->rx_confused = true;
3315 /* Offloaded frames are very different and need to be handled
3319 ath10k_htt_rx_h_rx_offload(ar, &list);
3321 while (!skb_queue_empty(&list)) {
3322 __skb_queue_head_init(&amsdu);
3323 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
3326 /* Note: The in-order indication may report interleaved
3327 * frames from different PPDUs meaning reported rx rate
3328 * to mac80211 isn't accurate/reliable. It's still
3329 * better to report something than nothing though. This
3330 * should still give an idea about rx rate to the user.
3332 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3333 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3334 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3335 NULL, peer_id, frag);
3336 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3341 /* Should not happen. */
3342 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3343 htt->rx_confused = true;
3344 __skb_queue_purge(&list);
3351 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3352 const __le32 *resp_ids,
3358 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3361 for (i = 0; i < num_resp_ids; i++) {
3362 resp_id = le32_to_cpu(resp_ids[i]);
3364 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3367 /* TODO: free resp_id */
3371 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3373 struct ieee80211_hw *hw = ar->hw;
3374 struct ieee80211_txq *txq;
3375 struct htt_resp *resp = (struct htt_resp *)skb->data;
3376 struct htt_tx_fetch_record *record;
3378 size_t max_num_bytes;
3379 size_t max_num_msdus;
3382 const __le32 *resp_ids;
3391 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3393 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3394 if (unlikely(skb->len < len)) {
3395 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3399 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3400 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3402 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3403 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3405 if (unlikely(skb->len < len)) {
3406 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3410 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
3411 num_records, num_resp_ids,
3412 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3414 if (!ar->htt.tx_q_state.enabled) {
3415 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3419 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3420 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3426 for (i = 0; i < num_records; i++) {
3427 record = &resp->tx_fetch_ind.records[i];
3428 peer_id = MS(le16_to_cpu(record->info),
3429 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3430 tid = MS(le16_to_cpu(record->info),
3431 HTT_TX_FETCH_RECORD_INFO_TID);
3432 max_num_msdus = le16_to_cpu(record->num_msdus);
3433 max_num_bytes = le32_to_cpu(record->num_bytes);
3435 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
3436 i, peer_id, tid, max_num_msdus, max_num_bytes);
3438 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3439 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3440 ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3445 spin_lock_bh(&ar->data_lock);
3446 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3447 spin_unlock_bh(&ar->data_lock);
3449 /* It is okay to release the lock and use txq because RCU read
3453 if (unlikely(!txq)) {
3454 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3462 ieee80211_txq_schedule_start(hw, txq->ac);
3463 may_tx = ieee80211_txq_may_transmit(hw, txq);
3464 while (num_msdus < max_num_msdus &&
3465 num_bytes < max_num_bytes) {
3469 ret = ath10k_mac_tx_push_txq(hw, txq);
3476 ieee80211_return_txq(hw, txq, false);
3477 ieee80211_txq_schedule_end(hw, txq->ac);
3479 record->num_msdus = cpu_to_le16(num_msdus);
3480 record->num_bytes = cpu_to_le32(num_bytes);
3482 ath10k_htt_tx_txq_recalc(hw, txq);
3487 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3488 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3490 ret = ath10k_htt_tx_fetch_resp(ar,
3491 resp->tx_fetch_ind.token,
3492 resp->tx_fetch_ind.fetch_seq_num,
3493 resp->tx_fetch_ind.records,
3495 if (unlikely(ret)) {
3496 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3497 le32_to_cpu(resp->tx_fetch_ind.token), ret);
3498 /* FIXME: request fw restart */
3501 ath10k_htt_tx_txq_sync(ar);
3504 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3505 struct sk_buff *skb)
3507 const struct htt_resp *resp = (void *)skb->data;
3511 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3513 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3514 if (unlikely(skb->len < len)) {
3515 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3519 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3520 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3522 if (unlikely(skb->len < len)) {
3523 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3527 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3528 resp->tx_fetch_confirm.resp_ids,
3532 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3533 struct sk_buff *skb)
3535 const struct htt_resp *resp = (void *)skb->data;
3536 const struct htt_tx_mode_switch_record *record;
3537 struct ieee80211_txq *txq;
3538 struct ath10k_txq *artxq;
3541 enum htt_tx_mode_switch_mode mode;
3550 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3552 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3553 if (unlikely(skb->len < len)) {
3554 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3558 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3559 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3561 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3562 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3563 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3564 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3566 ath10k_dbg(ar, ATH10K_DBG_HTT,
3567 "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
3568 info0, info1, enable, num_records, mode, threshold);
3570 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3572 if (unlikely(skb->len < len)) {
3573 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3578 case HTT_TX_MODE_SWITCH_PUSH:
3579 case HTT_TX_MODE_SWITCH_PUSH_PULL:
3582 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3590 ar->htt.tx_q_state.enabled = enable;
3591 ar->htt.tx_q_state.mode = mode;
3592 ar->htt.tx_q_state.num_push_allowed = threshold;
3596 for (i = 0; i < num_records; i++) {
3597 record = &resp->tx_mode_switch_ind.records[i];
3598 info0 = le16_to_cpu(record->info0);
3599 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3600 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3602 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3603 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3604 ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
3609 spin_lock_bh(&ar->data_lock);
3610 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3611 spin_unlock_bh(&ar->data_lock);
3613 /* It is okay to release the lock and use txq because RCU read
3617 if (unlikely(!txq)) {
3618 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
3623 spin_lock_bh(&ar->htt.tx_lock);
3624 artxq = (void *)txq->drv_priv;
3625 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3626 spin_unlock_bh(&ar->htt.tx_lock);
3631 ath10k_mac_tx_push_pending(ar);
3634 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3638 release = ath10k_htt_t2h_msg_handler(ar, skb);
3640 /* Free the indication buffer */
3642 dev_kfree_skb_any(skb);
3645 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3647 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3648 18, 24, 36, 48, 54};
3651 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3652 if (rate == legacy_rates[i])
3656 ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
3661 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3662 struct ath10k_sta *arsta,
3663 struct ath10k_per_peer_tx_stats *pstats,
3666 struct rate_info *txrate = &arsta->txrate;
3667 struct ath10k_htt_tx_stats *tx_stats;
3668 int idx, ht_idx, gi, mcs, bw, nss;
3669 unsigned long flags;
3671 if (!arsta->tx_stats)
3674 tx_stats = arsta->tx_stats;
3675 flags = txrate->flags;
3676 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3677 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3680 ht_idx = mcs + (nss - 1) * 8;
3681 idx = mcs * 8 + 8 * 10 * (nss - 1);
3684 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3686 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3687 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3688 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3689 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3690 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3691 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3692 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3693 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3694 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3695 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3696 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3697 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3698 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3699 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3701 mcs = legacy_rate_idx;
3703 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3704 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3705 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3706 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3707 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3708 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3711 if (ATH10K_HW_AMPDU(pstats->flags)) {
3712 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3714 if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3715 STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3716 pstats->succ_bytes + pstats->retry_bytes;
3717 STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3718 pstats->succ_pkts + pstats->retry_pkts;
3720 STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3721 pstats->succ_bytes + pstats->retry_bytes;
3722 STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3723 pstats->succ_pkts + pstats->retry_pkts;
3725 STATS_OP_FMT(AMPDU).bw[0][bw] +=
3726 pstats->succ_bytes + pstats->retry_bytes;
3727 STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3728 pstats->succ_bytes + pstats->retry_bytes;
3729 STATS_OP_FMT(AMPDU).gi[0][gi] +=
3730 pstats->succ_bytes + pstats->retry_bytes;
3731 STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3732 pstats->succ_bytes + pstats->retry_bytes;
3733 STATS_OP_FMT(AMPDU).bw[1][bw] +=
3734 pstats->succ_pkts + pstats->retry_pkts;
3735 STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3736 pstats->succ_pkts + pstats->retry_pkts;
3737 STATS_OP_FMT(AMPDU).gi[1][gi] +=
3738 pstats->succ_pkts + pstats->retry_pkts;
3739 STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3740 pstats->succ_pkts + pstats->retry_pkts;
3742 tx_stats->ack_fails +=
3743 ATH10K_HW_BA_FAIL(pstats->flags);
3746 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3747 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3748 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3750 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3751 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3752 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3754 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3755 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3756 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3758 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3759 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3760 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3762 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3763 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3764 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3766 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3767 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3768 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3770 if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3771 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3772 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3773 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3774 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3775 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3776 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3779 tx_stats->tx_duration += pstats->duration;
3783 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3784 struct ieee80211_sta *sta,
3785 struct ath10k_per_peer_tx_stats *peer_stats)
3787 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3788 struct ieee80211_chanctx_conf *conf = NULL;
3791 bool skip_auto_rate;
3792 struct rate_info txrate;
3794 lockdep_assert_held(&ar->data_lock);
3796 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3797 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3798 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3799 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3800 sgi = ATH10K_HW_GI(peer_stats->flags);
3801 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3803 /* Firmware's rate control skips broadcast/management frames,
3804 * if host has configure fixed rates and in some other special cases.
3809 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3810 ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
3814 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3815 (txrate.mcs > 7 || txrate.nss < 1)) {
3816 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
3817 txrate.mcs, txrate.nss);
3821 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3822 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3823 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3824 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3825 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3826 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3827 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3829 rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3832 arsta->txrate.legacy = rate;
3833 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3834 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3835 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3837 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3838 arsta->txrate.mcs = txrate.mcs;
3841 switch (txrate.flags) {
3842 case WMI_RATE_PREAMBLE_OFDM:
3843 if (arsta->arvif && arsta->arvif->vif)
3844 conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
3845 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3846 arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3848 case WMI_RATE_PREAMBLE_CCK:
3849 arsta->tx_info.status.rates[0].idx = rate_idx;
3851 arsta->tx_info.status.rates[0].flags |=
3852 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3853 IEEE80211_TX_RC_SHORT_GI);
3855 case WMI_RATE_PREAMBLE_HT:
3856 arsta->tx_info.status.rates[0].idx =
3857 txrate.mcs + ((txrate.nss - 1) * 8);
3859 arsta->tx_info.status.rates[0].flags |=
3860 IEEE80211_TX_RC_SHORT_GI;
3861 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3863 case WMI_RATE_PREAMBLE_VHT:
3864 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3865 txrate.mcs, txrate.nss);
3867 arsta->tx_info.status.rates[0].flags |=
3868 IEEE80211_TX_RC_SHORT_GI;
3869 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3873 arsta->txrate.nss = txrate.nss;
3874 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3875 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3877 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3879 switch (arsta->txrate.bw) {
3880 case RATE_INFO_BW_40:
3881 arsta->tx_info.status.rates[0].flags |=
3882 IEEE80211_TX_RC_40_MHZ_WIDTH;
3884 case RATE_INFO_BW_80:
3885 arsta->tx_info.status.rates[0].flags |=
3886 IEEE80211_TX_RC_80_MHZ_WIDTH;
3888 case RATE_INFO_BW_160:
3889 arsta->tx_info.status.rates[0].flags |=
3890 IEEE80211_TX_RC_160_MHZ_WIDTH;
3894 if (peer_stats->succ_pkts) {
3895 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3896 arsta->tx_info.status.rates[0].count = 1;
3897 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3900 if (ar->htt.disable_tx_comp) {
3901 arsta->tx_failed += peer_stats->failed_pkts;
3902 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3906 arsta->tx_retries += peer_stats->retry_pkts;
3907 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3909 if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3910 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3914 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3915 struct sk_buff *skb)
3917 struct htt_resp *resp = (struct htt_resp *)skb->data;
3918 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3919 struct htt_per_peer_tx_stats_ind *tx_stats;
3920 struct ieee80211_sta *sta;
3921 struct ath10k_peer *peer;
3923 u8 ppdu_len, num_ppdu;
3925 num_ppdu = resp->peer_tx_stats.num_ppdu;
3926 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3928 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3929 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3933 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3934 (resp->peer_tx_stats.payload);
3935 peer_id = __le16_to_cpu(tx_stats->peer_id);
3938 spin_lock_bh(&ar->data_lock);
3939 peer = ath10k_peer_find_by_id(ar, peer_id);
3940 if (!peer || !peer->sta) {
3941 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3947 for (i = 0; i < num_ppdu; i++) {
3948 tx_stats = (struct htt_per_peer_tx_stats_ind *)
3949 (resp->peer_tx_stats.payload + i * ppdu_len);
3951 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3952 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3953 p_tx_stats->failed_bytes =
3954 __le32_to_cpu(tx_stats->failed_bytes);
3955 p_tx_stats->ratecode = tx_stats->ratecode;
3956 p_tx_stats->flags = tx_stats->flags;
3957 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3958 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3959 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3960 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3962 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3966 spin_unlock_bh(&ar->data_lock);
3970 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3972 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3973 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3974 struct ath10k_10_2_peer_tx_stats *tx_stats;
3975 struct ieee80211_sta *sta;
3976 struct ath10k_peer *peer;
3977 u16 log_type = __le16_to_cpu(hdr->log_type);
3980 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3983 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3984 ATH10K_10_2_TX_STATS_OFFSET);
3986 if (!tx_stats->tx_ppdu_cnt)
3989 peer_id = tx_stats->peer_id;
3992 spin_lock_bh(&ar->data_lock);
3993 peer = ath10k_peer_find_by_id(ar, peer_id);
3994 if (!peer || !peer->sta) {
3995 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
4001 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
4002 p_tx_stats->succ_bytes =
4003 __le16_to_cpu(tx_stats->success_bytes[i]);
4004 p_tx_stats->retry_bytes =
4005 __le16_to_cpu(tx_stats->retry_bytes[i]);
4006 p_tx_stats->failed_bytes =
4007 __le16_to_cpu(tx_stats->failed_bytes[i]);
4008 p_tx_stats->ratecode = tx_stats->ratecode[i];
4009 p_tx_stats->flags = tx_stats->flags[i];
4010 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
4011 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
4012 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
4014 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
4016 spin_unlock_bh(&ar->data_lock);
4022 spin_unlock_bh(&ar->data_lock);
4026 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
4029 case HTT_SECURITY_TKIP:
4030 case HTT_SECURITY_TKIP_NOMIC:
4031 case HTT_SECURITY_AES_CCMP:
4038 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
4039 struct htt_security_indication *ev)
4041 enum htt_txrx_sec_cast_type sec_index;
4042 enum htt_security_types sec_type;
4043 struct ath10k_peer *peer;
4045 spin_lock_bh(&ar->data_lock);
4047 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
4049 ath10k_warn(ar, "failed to find peer id %d for security indication",
4050 __le16_to_cpu(ev->peer_id));
4054 sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
4056 if (ev->flags & HTT_SECURITY_IS_UNICAST)
4057 sec_index = HTT_TXRX_SEC_UCAST;
4059 sec_index = HTT_TXRX_SEC_MCAST;
4061 peer->rx_pn[sec_index].sec_type = sec_type;
4062 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
4064 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
4065 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
4068 spin_unlock_bh(&ar->data_lock);
4071 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
4073 struct ath10k_htt *htt = &ar->htt;
4074 struct htt_resp *resp = (struct htt_resp *)skb->data;
4075 enum htt_t2h_msg_type type;
4077 /* confirm alignment */
4078 if (!IS_ALIGNED((unsigned long)skb->data, 4))
4079 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
4081 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
4082 resp->hdr.msg_type);
4084 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
4085 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
4086 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
4089 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
4092 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
4093 htt->target_version_major = resp->ver_resp.major;
4094 htt->target_version_minor = resp->ver_resp.minor;
4095 complete(&htt->target_version_received);
4098 case HTT_T2H_MSG_TYPE_RX_IND:
4099 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
4100 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
4102 skb_queue_tail(&htt->rx_indication_head, skb);
4106 case HTT_T2H_MSG_TYPE_PEER_MAP: {
4107 struct htt_peer_map_event ev = {
4108 .vdev_id = resp->peer_map.vdev_id,
4109 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
4111 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
4112 ath10k_peer_map_event(htt, &ev);
4115 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
4116 struct htt_peer_unmap_event ev = {
4117 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
4119 ath10k_peer_unmap_event(htt, &ev);
4122 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
4123 struct htt_tx_done tx_done = {};
4124 struct ath10k_htt *htt = &ar->htt;
4125 struct ath10k_htc *htc = &ar->htc;
4126 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4127 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4128 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4130 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4133 case HTT_MGMT_TX_STATUS_OK:
4134 tx_done.status = HTT_TX_COMPL_STATE_ACK;
4135 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4137 (resp->mgmt_tx_completion.flags &
4138 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4140 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4144 case HTT_MGMT_TX_STATUS_RETRY:
4145 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4147 case HTT_MGMT_TX_STATUS_DROP:
4148 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4152 if (htt->disable_tx_comp) {
4153 spin_lock_bh(&htc->tx_lock);
4155 spin_unlock_bh(&htc->tx_lock);
4158 status = ath10k_txrx_tx_unref(htt, &tx_done);
4160 spin_lock_bh(&htt->tx_lock);
4161 ath10k_htt_tx_mgmt_dec_pending(htt);
4162 spin_unlock_bh(&htt->tx_lock);
4166 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4167 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4169 case HTT_T2H_MSG_TYPE_SEC_IND: {
4170 struct ath10k *ar = htt->ar;
4171 struct htt_security_indication *ev = &resp->security_indication;
4173 ath10k_htt_rx_sec_ind_handler(ar, ev);
4174 ath10k_dbg(ar, ATH10K_DBG_HTT,
4175 "sec ind peer_id %d unicast %d type %d\n",
4176 __le16_to_cpu(ev->peer_id),
4177 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4178 MS(ev->flags, HTT_SECURITY_TYPE));
4179 complete(&ar->install_key_done);
4182 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4183 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4184 skb->data, skb->len);
4185 atomic_inc(&htt->num_mpdus_ready);
4187 return ath10k_htt_rx_proc_rx_frag_ind(htt,
4191 case HTT_T2H_MSG_TYPE_TEST:
4193 case HTT_T2H_MSG_TYPE_STATS_CONF:
4194 trace_ath10k_htt_stats(ar, skb->data, skb->len);
4196 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4197 /* Firmware can return tx frames if it's unable to fully
4198 * process them and suspects host may be able to fix it. ath10k
4199 * sends all tx frames as already inspected so this shouldn't
4200 * happen unless fw has a bug.
4202 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4204 case HTT_T2H_MSG_TYPE_RX_ADDBA:
4205 ath10k_htt_rx_addba(ar, resp);
4207 case HTT_T2H_MSG_TYPE_RX_DELBA:
4208 ath10k_htt_rx_delba(ar, resp);
4210 case HTT_T2H_MSG_TYPE_PKTLOG: {
4211 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4213 offsetof(struct htt_resp,
4214 pktlog_msg.payload));
4216 if (ath10k_peer_stats_enabled(ar))
4217 ath10k_fetch_10_2_tx_stats(ar,
4218 resp->pktlog_msg.payload);
4221 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4222 /* Ignore this event because mac80211 takes care of Rx
4223 * aggregation reordering.
4227 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4228 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4231 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4232 struct ath10k_htt *htt = &ar->htt;
4233 struct ath10k_htc *htc = &ar->htc;
4234 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4235 u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4236 int htt_credit_delta;
4238 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4239 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4240 htt_credit_delta = -htt_credit_delta;
4242 ath10k_dbg(ar, ATH10K_DBG_HTT,
4243 "htt credit update delta %d\n",
4246 if (htt->disable_tx_comp) {
4247 spin_lock_bh(&htc->tx_lock);
4248 ep->tx_credits += htt_credit_delta;
4249 spin_unlock_bh(&htc->tx_lock);
4250 ath10k_dbg(ar, ATH10K_DBG_HTT,
4251 "htt credit total %d\n",
4253 ep->ep_ops.ep_tx_credits(htc->ar);
4257 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4258 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4259 u32 freq = __le32_to_cpu(resp->chan_change.freq);
4261 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4262 ath10k_dbg(ar, ATH10K_DBG_HTT,
4263 "htt chan change freq %u phymode %s\n",
4264 freq, ath10k_wmi_phymode_str(phymode));
4267 case HTT_T2H_MSG_TYPE_AGGR_CONF:
4269 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4270 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4272 if (!tx_fetch_ind) {
4273 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4276 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4279 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4280 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4282 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4283 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4285 case HTT_T2H_MSG_TYPE_PEER_STATS:
4286 ath10k_htt_fetch_peer_stats(ar, skb);
4288 case HTT_T2H_MSG_TYPE_EN_STATS:
4290 ath10k_warn(ar, "htt event (%d) not handled\n",
4291 resp->hdr.msg_type);
4292 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4293 skb->data, skb->len);
4298 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4300 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4301 struct sk_buff *skb)
4303 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4304 dev_kfree_skb_any(skb);
4306 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4308 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4310 struct sk_buff *skb;
4312 while (quota < budget) {
4313 if (skb_queue_empty(&ar->htt.rx_msdus_q))
4316 skb = skb_dequeue(&ar->htt.rx_msdus_q);
4319 ath10k_process_rx(ar, skb);
4326 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4328 struct htt_resp *resp;
4329 struct ath10k_htt *htt = &ar->htt;
4330 struct sk_buff *skb;
4334 for (quota = 0; quota < budget; quota++) {
4335 skb = skb_dequeue(&htt->rx_indication_head);
4339 resp = (struct htt_resp *)skb->data;
4341 release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4345 HTT_RX_NON_TKIP_MIC);
4348 dev_kfree_skb_any(skb);
4350 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4351 skb_queue_len(&htt->rx_indication_head));
4355 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4357 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4359 struct ath10k_htt *htt = &ar->htt;
4360 struct htt_tx_done tx_done = {};
4361 struct sk_buff_head tx_ind_q;
4362 struct sk_buff *skb;
4363 unsigned long flags;
4364 int quota = 0, done, ret;
4365 bool resched_napi = false;
4367 __skb_queue_head_init(&tx_ind_q);
4369 /* Process pending frames before dequeuing more data
4372 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4373 if (quota == budget) {
4374 resched_napi = true;
4378 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4379 spin_lock_bh(&htt->rx_ring.lock);
4380 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4381 spin_unlock_bh(&htt->rx_ring.lock);
4383 dev_kfree_skb_any(skb);
4385 resched_napi = true;
4390 while (atomic_read(&htt->num_mpdus_ready)) {
4391 ret = ath10k_htt_rx_handle_amsdu(htt);
4393 resched_napi = true;
4396 atomic_dec(&htt->num_mpdus_ready);
4399 /* Deliver received data after processing data from hardware */
4400 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4402 /* From NAPI documentation:
4403 * The napi poll() function may also process TX completions, in which
4404 * case if it processes the entire TX ring then it should count that
4405 * work as the rest of the budget.
4407 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4410 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4411 * From kfifo_get() documentation:
4412 * Note that with only one concurrent reader and one concurrent writer,
4413 * you don't need extra locking to use these macro.
4415 while (kfifo_get(&htt->txdone_fifo, &tx_done))
4416 ath10k_txrx_tx_unref(htt, &tx_done);
4418 ath10k_mac_tx_push_pending(ar);
4420 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4421 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4422 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4424 while ((skb = __skb_dequeue(&tx_ind_q))) {
4425 ath10k_htt_rx_tx_fetch_ind(ar, skb);
4426 dev_kfree_skb_any(skb);
4430 ath10k_htt_rx_msdu_buff_replenish(htt);
4431 /* In case of rx failure or more data to read, report budget
4432 * to reschedule NAPI poll
4434 done = resched_napi ? budget : quota;
4438 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4440 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4441 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4442 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4443 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4444 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4445 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4448 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4449 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4450 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4451 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4452 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4453 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4456 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4457 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4460 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4462 struct ath10k *ar = htt->ar;
4464 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4465 htt->rx_ops = &htt_rx_ops_hl;
4466 else if (ar->hw_params.target_64bit)
4467 htt->rx_ops = &htt_rx_ops_64;
4469 htt->rx_ops = &htt_rx_ops_32;