1 // SPDX-License-Identifier: ISC
5 #include <linux/sched.h>
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
23 #define CHAN6G(_idx, _freq) { \
24 .band = NL80211_BAND_6GHZ, \
25 .center_freq = (_freq), \
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
160 struct ieee80211_rate mt76_rates[] = {
174 EXPORT_SYMBOL_GPL(mt76_rates);
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
196 static int mt76_led_init(struct mt76_phy *phy)
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200 struct device_node *np = dev->dev->of_node;
202 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
205 np = of_get_child_by_name(np, "led");
207 if (!of_device_is_available(np)) {
210 "led registration was explicitly disabled by dts\n");
214 if (phy == &dev->phy) {
217 if (!of_property_read_u32(np, "led-sources", &led_pin))
218 phy->leds.pin = led_pin;
221 of_property_read_bool(np, "led-active-low");
227 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 wiphy_name(hw->wiphy));
230 phy->leds.cdev.name = phy->leds.name;
231 phy->leds.cdev.default_trigger =
232 ieee80211_create_tpt_led_trigger(hw,
233 IEEE80211_TPT_LEDTRIG_FL_RADIO,
235 ARRAY_SIZE(mt76_tpt_blink));
238 "registering led '%s'\n", phy->leds.name);
240 return led_classdev_register(dev->dev, &phy->leds.cdev);
243 static void mt76_led_cleanup(struct mt76_phy *phy)
245 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
248 led_classdev_unregister(&phy->leds.cdev);
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 struct ieee80211_supported_band *sband,
255 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 int i, nstream = hweight8(phy->antenna_mask);
257 struct ieee80211_sta_vht_cap *vht_cap;
261 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
263 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
265 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
271 vht_cap = &sband->vht_cap;
273 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
275 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
279 for (i = 0; i < 8; i++) {
281 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
284 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
286 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 vht_cap->vht_mcs.tx_highest |=
290 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
295 if (phy->cap.has_2ghz)
296 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 if (phy->cap.has_5ghz)
298 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 if (phy->cap.has_6ghz)
300 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 const struct ieee80211_channel *chan, int n_chan,
307 struct ieee80211_rate *rates, int n_rates,
310 struct ieee80211_supported_band *sband = &msband->sband;
311 struct ieee80211_sta_vht_cap *vht_cap;
312 struct ieee80211_sta_ht_cap *ht_cap;
313 struct mt76_dev *dev = phy->dev;
317 size = n_chan * sizeof(*chan);
318 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
322 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
327 sband->channels = chanlist;
328 sband->n_channels = n_chan;
329 sband->bitrates = rates;
330 sband->n_bitrates = n_rates;
335 ht_cap = &sband->ht_cap;
336 ht_cap->ht_supported = true;
337 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 IEEE80211_HT_CAP_GRN_FLD |
339 IEEE80211_HT_CAP_SGI_20 |
340 IEEE80211_HT_CAP_SGI_40 |
341 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
343 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
346 mt76_init_stream_cap(phy, sband, vht);
351 vht_cap = &sband->vht_cap;
352 vht_cap->vht_supported = true;
353 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 IEEE80211_VHT_CAP_RXSTBC_1 |
355 IEEE80211_VHT_CAP_SHORT_GI_80 |
356 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
365 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
367 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 ARRAY_SIZE(mt76_channels_2ghz), rates,
369 n_rates, true, false);
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates, bool vht)
376 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
378 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 ARRAY_SIZE(mt76_channels_5ghz), rates,
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
387 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
389 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 ARRAY_SIZE(mt76_channels_6ghz), rates,
391 n_rates, false, false);
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 enum nl80211_band band)
398 struct ieee80211_supported_band *sband = &msband->sband;
405 for (i = 0; i < sband->n_channels; i++) {
406 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
414 phy->chandef.chan = &sband->channels[0];
415 phy->chan_state = &msband->chan[0];
419 sband->n_channels = 0;
420 phy->hw->wiphy->bands[band] = NULL;
424 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
426 struct mt76_dev *dev = phy->dev;
427 struct wiphy *wiphy = hw->wiphy;
429 INIT_LIST_HEAD(&phy->tx_list);
430 spin_lock_init(&phy->tx_lock);
432 SET_IEEE80211_DEV(hw, dev->dev);
433 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
435 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
436 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
437 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
438 WIPHY_FLAG_SUPPORTS_TDLS |
441 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
442 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
443 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
445 wiphy->available_antennas_tx = phy->antenna_mask;
446 wiphy->available_antennas_rx = phy->antenna_mask;
448 wiphy->sar_capa = &mt76_sar_capa;
449 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
450 sizeof(struct mt76_freq_range_power),
455 hw->txq_data_size = sizeof(struct mt76_txq);
456 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
458 if (!hw->max_tx_fragments)
459 hw->max_tx_fragments = 16;
461 ieee80211_hw_set(hw, SIGNAL_DBM);
462 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
463 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
464 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
465 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
466 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
467 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
468 ieee80211_hw_set(hw, SPECTRUM_MGMT);
470 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
471 hw->max_tx_fragments > 1) {
472 ieee80211_hw_set(hw, TX_AMSDU);
473 ieee80211_hw_set(hw, TX_FRAG_LIST);
476 ieee80211_hw_set(hw, MFP_CAPABLE);
477 ieee80211_hw_set(hw, AP_LINK_PS);
478 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
484 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
485 const struct ieee80211_ops *ops, u8 band_idx)
487 struct ieee80211_hw *hw;
488 unsigned int phy_size;
489 struct mt76_phy *phy;
491 phy_size = ALIGN(sizeof(*phy), 8);
492 hw = ieee80211_alloc_hw(size + phy_size, ops);
499 phy->priv = hw->priv + phy_size;
500 phy->band_idx = band_idx;
502 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
503 hw->wiphy->interface_modes =
504 BIT(NL80211_IFTYPE_STATION) |
505 BIT(NL80211_IFTYPE_AP) |
506 #ifdef CONFIG_MAC80211_MESH
507 BIT(NL80211_IFTYPE_MESH_POINT) |
509 BIT(NL80211_IFTYPE_P2P_CLIENT) |
510 BIT(NL80211_IFTYPE_P2P_GO) |
511 BIT(NL80211_IFTYPE_ADHOC);
515 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
517 int mt76_register_phy(struct mt76_phy *phy, bool vht,
518 struct ieee80211_rate *rates, int n_rates)
522 ret = mt76_phy_init(phy, phy->hw);
526 if (phy->cap.has_2ghz) {
527 ret = mt76_init_sband_2g(phy, rates, n_rates);
532 if (phy->cap.has_5ghz) {
533 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
538 if (phy->cap.has_6ghz) {
539 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
544 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
545 ret = mt76_led_init(phy);
550 wiphy_read_of_freq_limits(phy->hw->wiphy);
551 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
552 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
553 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
555 ret = ieee80211_register_hw(phy->hw);
559 set_bit(MT76_STATE_REGISTERED, &phy->state);
560 phy->dev->phys[phy->band_idx] = phy;
564 EXPORT_SYMBOL_GPL(mt76_register_phy);
566 void mt76_unregister_phy(struct mt76_phy *phy)
568 struct mt76_dev *dev = phy->dev;
570 if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
573 if (IS_ENABLED(CONFIG_MT76_LEDS))
574 mt76_led_cleanup(phy);
575 mt76_tx_status_check(dev, true);
576 ieee80211_unregister_hw(phy->hw);
577 dev->phys[phy->band_idx] = NULL;
579 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
581 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
583 bool is_qrx = mt76_queue_is_rx(dev, q);
584 struct page_pool_params pp_params = {
590 int idx = is_qrx ? q - dev->q_rx : -1;
592 /* Allocate page_pools just for rx/wed_tx_free queues */
593 if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
600 pp_params.pool_size = 256;
603 pp_params.pool_size = 16;
607 if (mt76_is_mmio(dev)) {
608 /* rely on page_pool for DMA mapping */
609 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
610 pp_params.dma_dir = DMA_FROM_DEVICE;
611 pp_params.max_len = PAGE_SIZE;
612 pp_params.offset = 0;
613 /* NAPI is available just for rx queues */
614 if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
615 pp_params.napi = &dev->napi[idx];
618 q->page_pool = page_pool_create(&pp_params);
619 if (IS_ERR(q->page_pool)) {
620 int err = PTR_ERR(q->page_pool);
628 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
631 mt76_alloc_device(struct device *pdev, unsigned int size,
632 const struct ieee80211_ops *ops,
633 const struct mt76_driver_ops *drv_ops)
635 struct ieee80211_hw *hw;
636 struct mt76_phy *phy;
637 struct mt76_dev *dev;
640 hw = ieee80211_alloc_hw(size, ops);
653 phy->band_idx = MT_BAND0;
654 dev->phys[phy->band_idx] = phy;
656 spin_lock_init(&dev->rx_lock);
657 spin_lock_init(&dev->lock);
658 spin_lock_init(&dev->cc_lock);
659 spin_lock_init(&dev->status_lock);
660 spin_lock_init(&dev->wed_lock);
661 mutex_init(&dev->mutex);
662 init_waitqueue_head(&dev->tx_wait);
664 skb_queue_head_init(&dev->mcu.res_q);
665 init_waitqueue_head(&dev->mcu.wait);
666 mutex_init(&dev->mcu.mutex);
667 dev->tx_worker.fn = mt76_tx_worker;
669 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
670 hw->wiphy->interface_modes =
671 BIT(NL80211_IFTYPE_STATION) |
672 BIT(NL80211_IFTYPE_AP) |
673 #ifdef CONFIG_MAC80211_MESH
674 BIT(NL80211_IFTYPE_MESH_POINT) |
676 BIT(NL80211_IFTYPE_P2P_CLIENT) |
677 BIT(NL80211_IFTYPE_P2P_GO) |
678 BIT(NL80211_IFTYPE_ADHOC);
680 spin_lock_init(&dev->token_lock);
681 idr_init(&dev->token);
683 spin_lock_init(&dev->rx_token_lock);
684 idr_init(&dev->rx_token);
686 INIT_LIST_HEAD(&dev->wcid_list);
687 INIT_LIST_HEAD(&dev->sta_poll_list);
688 spin_lock_init(&dev->sta_poll_lock);
690 INIT_LIST_HEAD(&dev->txwi_cache);
691 INIT_LIST_HEAD(&dev->rxwi_cache);
692 dev->token_size = dev->drv->token_size;
694 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
695 skb_queue_head_init(&dev->rx_skb[i]);
697 dev->wq = alloc_ordered_workqueue("mt76", 0);
699 ieee80211_free_hw(hw);
705 EXPORT_SYMBOL_GPL(mt76_alloc_device);
707 int mt76_register_device(struct mt76_dev *dev, bool vht,
708 struct ieee80211_rate *rates, int n_rates)
710 struct ieee80211_hw *hw = dev->hw;
711 struct mt76_phy *phy = &dev->phy;
714 dev_set_drvdata(dev->dev, dev);
715 mt76_wcid_init(&dev->global_wcid);
716 ret = mt76_phy_init(phy, hw);
720 if (phy->cap.has_2ghz) {
721 ret = mt76_init_sband_2g(phy, rates, n_rates);
726 if (phy->cap.has_5ghz) {
727 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
732 if (phy->cap.has_6ghz) {
733 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
738 wiphy_read_of_freq_limits(hw->wiphy);
739 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
740 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
741 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
743 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
744 ret = mt76_led_init(phy);
749 ret = ieee80211_register_hw(hw);
753 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
754 set_bit(MT76_STATE_REGISTERED, &phy->state);
755 sched_set_fifo_low(dev->tx_worker.task);
759 EXPORT_SYMBOL_GPL(mt76_register_device);
761 void mt76_unregister_device(struct mt76_dev *dev)
763 struct ieee80211_hw *hw = dev->hw;
765 if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
768 if (IS_ENABLED(CONFIG_MT76_LEDS))
769 mt76_led_cleanup(&dev->phy);
770 mt76_tx_status_check(dev, true);
771 mt76_wcid_cleanup(dev, &dev->global_wcid);
772 ieee80211_unregister_hw(hw);
774 EXPORT_SYMBOL_GPL(mt76_unregister_device);
776 void mt76_free_device(struct mt76_dev *dev)
778 mt76_worker_teardown(&dev->tx_worker);
780 destroy_workqueue(dev->wq);
783 ieee80211_free_hw(dev->hw);
785 EXPORT_SYMBOL_GPL(mt76_free_device);
787 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
789 struct sk_buff *skb = phy->rx_amsdu[q].head;
790 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
791 struct mt76_dev *dev = phy->dev;
793 phy->rx_amsdu[q].head = NULL;
794 phy->rx_amsdu[q].tail = NULL;
797 * Validate if the amsdu has a proper first subframe.
798 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
799 * flag of the QoS header gets flipped. In such cases, the first
800 * subframe has a LLC/SNAP header in the location of the destination
803 if (skb_shinfo(skb)->frag_list) {
806 if (!(status->flag & RX_FLAG_8023)) {
807 offset = ieee80211_get_hdrlen_from_skb(skb);
810 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
815 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
820 __skb_queue_tail(&dev->rx_skb[q], skb);
823 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
826 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
828 if (phy->rx_amsdu[q].head &&
829 (!status->amsdu || status->first_amsdu ||
830 status->seqno != phy->rx_amsdu[q].seqno))
831 mt76_rx_release_amsdu(phy, q);
833 if (!phy->rx_amsdu[q].head) {
834 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
835 phy->rx_amsdu[q].seqno = status->seqno;
836 phy->rx_amsdu[q].head = skb;
838 *phy->rx_amsdu[q].tail = skb;
839 phy->rx_amsdu[q].tail = &skb->next;
842 if (!status->amsdu || status->last_amsdu)
843 mt76_rx_release_amsdu(phy, q);
846 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
848 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
849 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
851 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
856 #ifdef CONFIG_NL80211_TESTMODE
857 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
858 phy->test.rx_stats.packets[q]++;
859 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
860 phy->test.rx_stats.fcs_error[q]++;
864 mt76_rx_release_burst(phy, q, skb);
866 EXPORT_SYMBOL_GPL(mt76_rx);
868 bool mt76_has_tx_pending(struct mt76_phy *phy)
870 struct mt76_queue *q;
873 for (i = 0; i < __MT_TXQ_MAX; i++) {
881 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
883 static struct mt76_channel_state *
884 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
886 struct mt76_sband *msband;
889 if (c->band == NL80211_BAND_2GHZ)
890 msband = &phy->sband_2g;
891 else if (c->band == NL80211_BAND_6GHZ)
892 msband = &phy->sband_6g;
894 msband = &phy->sband_5g;
896 idx = c - &msband->sband.channels[0];
897 return &msband->chan[idx];
900 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
902 struct mt76_channel_state *state = phy->chan_state;
904 state->cc_active += ktime_to_us(ktime_sub(time,
906 phy->survey_time = time;
908 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
910 void mt76_update_survey(struct mt76_phy *phy)
912 struct mt76_dev *dev = phy->dev;
915 if (dev->drv->update_survey)
916 dev->drv->update_survey(phy);
918 cur_time = ktime_get_boottime();
919 mt76_update_survey_active_time(phy, cur_time);
921 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
922 struct mt76_channel_state *state = phy->chan_state;
924 spin_lock_bh(&dev->cc_lock);
925 state->cc_bss_rx += dev->cur_cc_bss_rx;
926 dev->cur_cc_bss_rx = 0;
927 spin_unlock_bh(&dev->cc_lock);
930 EXPORT_SYMBOL_GPL(mt76_update_survey);
932 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
935 struct mt76_dev *dev = phy->dev;
936 int timeout = HZ / 5;
939 cancel_delayed_work_sync(&phy->mac_work);
941 mutex_lock(&dev->mutex);
942 set_bit(MT76_RESET, &phy->state);
944 mt76_worker_disable(&dev->tx_worker);
945 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
946 mt76_update_survey(phy);
948 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
949 phy->chandef.width != chandef->width)
950 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
952 phy->chandef = *chandef;
953 phy->chan_state = mt76_channel_state(phy, chandef->chan);
954 phy->offchannel = offchannel;
957 phy->main_chan = chandef->chan;
959 if (chandef->chan != phy->main_chan)
960 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
961 mt76_worker_enable(&dev->tx_worker);
963 ret = dev->drv->set_channel(phy);
965 clear_bit(MT76_RESET, &phy->state);
966 mt76_worker_schedule(&dev->tx_worker);
968 mutex_unlock(&dev->mutex);
973 int mt76_update_channel(struct mt76_phy *phy)
975 struct ieee80211_hw *hw = phy->hw;
976 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
977 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
979 return mt76_set_channel(phy, chandef, offchannel);
981 EXPORT_SYMBOL_GPL(mt76_update_channel);
983 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
984 struct survey_info *survey)
986 struct mt76_phy *phy = hw->priv;
987 struct mt76_dev *dev = phy->dev;
988 struct mt76_sband *sband;
989 struct ieee80211_channel *chan;
990 struct mt76_channel_state *state;
993 mutex_lock(&dev->mutex);
994 if (idx == 0 && dev->drv->update_survey)
995 mt76_update_survey(phy);
997 if (idx >= phy->sband_2g.sband.n_channels +
998 phy->sband_5g.sband.n_channels) {
999 idx -= (phy->sband_2g.sband.n_channels +
1000 phy->sband_5g.sband.n_channels);
1001 sband = &phy->sband_6g;
1002 } else if (idx >= phy->sband_2g.sband.n_channels) {
1003 idx -= phy->sband_2g.sband.n_channels;
1004 sband = &phy->sband_5g;
1006 sband = &phy->sband_2g;
1009 if (idx >= sband->sband.n_channels) {
1014 chan = &sband->sband.channels[idx];
1015 state = mt76_channel_state(phy, chan);
1017 memset(survey, 0, sizeof(*survey));
1018 survey->channel = chan;
1019 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1020 survey->filled |= dev->drv->survey_flags;
1022 survey->filled |= SURVEY_INFO_NOISE_DBM;
1024 if (chan == phy->main_chan) {
1025 survey->filled |= SURVEY_INFO_IN_USE;
1027 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1028 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1031 survey->time_busy = div_u64(state->cc_busy, 1000);
1032 survey->time_rx = div_u64(state->cc_rx, 1000);
1033 survey->time = div_u64(state->cc_active, 1000);
1034 survey->noise = state->noise;
1036 spin_lock_bh(&dev->cc_lock);
1037 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1038 survey->time_tx = div_u64(state->cc_tx, 1000);
1039 spin_unlock_bh(&dev->cc_lock);
1042 mutex_unlock(&dev->mutex);
1046 EXPORT_SYMBOL_GPL(mt76_get_survey);
1048 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1049 struct ieee80211_key_conf *key)
1051 struct ieee80211_key_seq seq;
1054 wcid->rx_check_pn = false;
1059 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1062 wcid->rx_check_pn = true;
1065 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1066 ieee80211_get_key_rx_seq(key, i, &seq);
1067 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1070 /* robust management frame */
1071 ieee80211_get_key_rx_seq(key, -1, &seq);
1072 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1075 EXPORT_SYMBOL(mt76_wcid_key_setup);
1077 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1082 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1085 cur = *chain_signal;
1086 if (!(chains & BIT(0)) ||
1093 diff = signal - cur;
1104 EXPORT_SYMBOL(mt76_rx_signal);
1107 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1108 struct ieee80211_hw **hw,
1109 struct ieee80211_sta **sta)
1111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1112 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1113 struct mt76_rx_status mstat;
1115 mstat = *((struct mt76_rx_status *)skb->cb);
1116 memset(status, 0, sizeof(*status));
1118 status->flag = mstat.flag;
1119 status->freq = mstat.freq;
1120 status->enc_flags = mstat.enc_flags;
1121 status->encoding = mstat.encoding;
1122 status->bw = mstat.bw;
1123 if (status->encoding == RX_ENC_EHT) {
1124 status->eht.ru = mstat.eht.ru;
1125 status->eht.gi = mstat.eht.gi;
1127 status->he_ru = mstat.he_ru;
1128 status->he_gi = mstat.he_gi;
1129 status->he_dcm = mstat.he_dcm;
1131 status->rate_idx = mstat.rate_idx;
1132 status->nss = mstat.nss;
1133 status->band = mstat.band;
1134 status->signal = mstat.signal;
1135 status->chains = mstat.chains;
1136 status->ampdu_reference = mstat.ampdu_ref;
1137 status->device_timestamp = mstat.timestamp;
1138 status->mactime = mstat.timestamp;
1139 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1140 if (status->signal <= -128)
1141 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1143 if (ieee80211_is_beacon(hdr->frame_control) ||
1144 ieee80211_is_probe_resp(hdr->frame_control))
1145 status->boottime_ns = ktime_get_boottime_ns();
1147 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1148 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1149 sizeof(mstat.chain_signal));
1150 memcpy(status->chain_signal, mstat.chain_signal,
1151 sizeof(mstat.chain_signal));
1154 status->link_valid = mstat.wcid->link_valid;
1155 status->link_id = mstat.wcid->link_id;
1158 *sta = wcid_to_sta(mstat.wcid);
1159 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1163 mt76_check_ccmp_pn(struct sk_buff *skb)
1165 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1166 struct mt76_wcid *wcid = status->wcid;
1167 struct ieee80211_hdr *hdr;
1171 if (!(status->flag & RX_FLAG_DECRYPTED))
1174 if (status->flag & RX_FLAG_ONLY_MONITOR)
1177 if (!wcid || !wcid->rx_check_pn)
1180 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1181 if (status->flag & RX_FLAG_8023)
1182 goto skip_hdr_check;
1184 hdr = mt76_skb_get_hdr(skb);
1185 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1187 * Validate the first fragment both here and in mac80211
1188 * All further fragments will be validated by mac80211 only.
1190 if (ieee80211_is_frag(hdr) &&
1191 !ieee80211_is_first_frag(hdr->frame_control))
1195 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1197 * the recipient shall maintain a single replay counter for received
1198 * individually addressed robust Management frames that are received
1199 * with the To DS subfield equal to 0, [...]
1201 if (ieee80211_is_mgmt(hdr->frame_control) &&
1202 !ieee80211_has_tods(hdr->frame_control))
1203 security_idx = IEEE80211_NUM_TIDS;
1206 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1207 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1208 sizeof(status->iv));
1210 status->flag |= RX_FLAG_ONLY_MONITOR;
1214 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1216 if (status->flag & RX_FLAG_IV_STRIPPED)
1217 status->flag |= RX_FLAG_PN_VALIDATED;
1221 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1224 struct mt76_wcid *wcid = status->wcid;
1225 struct ieee80211_rx_status info = {
1226 .enc_flags = status->enc_flags,
1227 .rate_idx = status->rate_idx,
1228 .encoding = status->encoding,
1229 .band = status->band,
1233 struct ieee80211_sta *sta;
1235 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1237 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1238 spin_lock(&dev->cc_lock);
1239 dev->cur_cc_bss_rx += airtime;
1240 spin_unlock(&dev->cc_lock);
1242 if (!wcid || !wcid->sta)
1245 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1246 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1250 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1252 struct mt76_wcid *wcid;
1255 if (!dev->rx_ampdu_len)
1258 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1259 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1260 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1263 dev->rx_ampdu_status.wcid = wcid;
1265 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1267 dev->rx_ampdu_len = 0;
1268 dev->rx_ampdu_ref = 0;
1272 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1274 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1275 struct mt76_wcid *wcid = status->wcid;
1277 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1280 if (!wcid || !wcid->sta) {
1281 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1283 if (status->flag & RX_FLAG_8023)
1286 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1292 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1293 status->ampdu_ref != dev->rx_ampdu_ref)
1294 mt76_airtime_flush_ampdu(dev);
1296 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1297 if (!dev->rx_ampdu_len ||
1298 status->ampdu_ref != dev->rx_ampdu_ref) {
1299 dev->rx_ampdu_status = *status;
1300 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1301 dev->rx_ampdu_ref = status->ampdu_ref;
1304 dev->rx_ampdu_len += skb->len;
1308 mt76_airtime_report(dev, status, skb->len);
1312 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1314 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1315 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1316 struct ieee80211_sta *sta;
1317 struct ieee80211_hw *hw;
1318 struct mt76_wcid *wcid = status->wcid;
1319 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1322 hw = mt76_phy_hw(dev, status->phy_idx);
1323 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1324 !(status->flag & RX_FLAG_8023)) {
1325 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1327 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1330 mt76_airtime_check(dev, skb);
1332 if (!wcid || !wcid->sta)
1335 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1337 if (status->signal <= 0)
1338 ewma_signal_add(&wcid->rssi, -status->signal);
1340 wcid->inactive_count = 0;
1342 if (status->flag & RX_FLAG_8023)
1345 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1348 if (ieee80211_is_pspoll(hdr->frame_control)) {
1349 ieee80211_sta_pspoll(sta);
1353 if (ieee80211_has_morefrags(hdr->frame_control) ||
1354 !(ieee80211_is_mgmt(hdr->frame_control) ||
1355 ieee80211_is_data(hdr->frame_control)))
1358 ps = ieee80211_has_pm(hdr->frame_control);
1360 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1361 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1362 ieee80211_sta_uapsd_trigger(sta, tidno);
1364 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1368 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1370 if (dev->drv->sta_ps)
1371 dev->drv->sta_ps(dev, sta, ps);
1374 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1376 ieee80211_sta_ps_transition(sta, ps);
1379 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1380 struct napi_struct *napi)
1382 struct ieee80211_sta *sta;
1383 struct ieee80211_hw *hw;
1384 struct sk_buff *skb, *tmp;
1387 spin_lock(&dev->rx_lock);
1388 while ((skb = __skb_dequeue(frames)) != NULL) {
1389 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1391 mt76_check_ccmp_pn(skb);
1392 skb_shinfo(skb)->frag_list = NULL;
1393 mt76_rx_convert(dev, skb, &hw, &sta);
1394 ieee80211_rx_list(hw, sta, skb, &list);
1396 /* subsequent amsdu frames */
1402 mt76_rx_convert(dev, skb, &hw, &sta);
1403 ieee80211_rx_list(hw, sta, skb, &list);
1406 spin_unlock(&dev->rx_lock);
1409 netif_receive_skb_list(&list);
1413 list_for_each_entry_safe(skb, tmp, &list, list) {
1414 skb_list_del_init(skb);
1415 napi_gro_receive(napi, skb);
1419 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1420 struct napi_struct *napi)
1422 struct sk_buff_head frames;
1423 struct sk_buff *skb;
1425 __skb_queue_head_init(&frames);
1427 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1428 mt76_check_sta(dev, skb);
1429 if (mtk_wed_device_active(&dev->mmio.wed))
1430 __skb_queue_tail(&frames, skb);
1432 mt76_rx_aggr_reorder(skb, &frames);
1435 mt76_rx_complete(dev, &frames, napi);
1437 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1440 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1441 struct ieee80211_sta *sta)
1443 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1444 struct mt76_dev *dev = phy->dev;
1448 mutex_lock(&dev->mutex);
1450 ret = dev->drv->sta_add(dev, vif, sta);
1454 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1455 struct mt76_txq *mtxq;
1460 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1461 mtxq->wcid = wcid->idx;
1464 ewma_signal_init(&wcid->rssi);
1465 if (phy->band_idx == MT_BAND1)
1466 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1467 wcid->phy_idx = phy->band_idx;
1468 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1470 mt76_wcid_init(wcid);
1472 mutex_unlock(&dev->mutex);
1477 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1478 struct ieee80211_sta *sta)
1480 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1481 int i, idx = wcid->idx;
1483 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1484 mt76_rx_aggr_stop(dev, wcid, i);
1486 if (dev->drv->sta_remove)
1487 dev->drv->sta_remove(dev, vif, sta);
1489 mt76_wcid_cleanup(dev, wcid);
1491 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1492 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1494 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1497 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1498 struct ieee80211_sta *sta)
1500 mutex_lock(&dev->mutex);
1501 __mt76_sta_remove(dev, vif, sta);
1502 mutex_unlock(&dev->mutex);
1505 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1506 struct ieee80211_sta *sta,
1507 enum ieee80211_sta_state old_state,
1508 enum ieee80211_sta_state new_state)
1510 struct mt76_phy *phy = hw->priv;
1511 struct mt76_dev *dev = phy->dev;
1512 enum mt76_sta_event ev;
1514 if (old_state == IEEE80211_STA_NOTEXIST &&
1515 new_state == IEEE80211_STA_NONE)
1516 return mt76_sta_add(phy, vif, sta);
1518 if (old_state == IEEE80211_STA_NONE &&
1519 new_state == IEEE80211_STA_NOTEXIST)
1520 mt76_sta_remove(dev, vif, sta);
1522 if (!dev->drv->sta_event)
1525 if (old_state == IEEE80211_STA_AUTH &&
1526 new_state == IEEE80211_STA_ASSOC)
1527 ev = MT76_STA_EVENT_ASSOC;
1528 else if (old_state == IEEE80211_STA_ASSOC &&
1529 new_state == IEEE80211_STA_AUTHORIZED)
1530 ev = MT76_STA_EVENT_AUTHORIZE;
1531 else if (old_state == IEEE80211_STA_ASSOC &&
1532 new_state == IEEE80211_STA_AUTH)
1533 ev = MT76_STA_EVENT_DISASSOC;
1537 return dev->drv->sta_event(dev, vif, sta, ev);
1539 EXPORT_SYMBOL_GPL(mt76_sta_state);
1541 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1542 struct ieee80211_sta *sta)
1544 struct mt76_phy *phy = hw->priv;
1545 struct mt76_dev *dev = phy->dev;
1546 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1548 mutex_lock(&dev->mutex);
1549 spin_lock_bh(&dev->status_lock);
1550 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1551 spin_unlock_bh(&dev->status_lock);
1552 mutex_unlock(&dev->mutex);
1554 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1556 void mt76_wcid_init(struct mt76_wcid *wcid)
1558 INIT_LIST_HEAD(&wcid->tx_list);
1559 skb_queue_head_init(&wcid->tx_pending);
1560 skb_queue_head_init(&wcid->tx_offchannel);
1562 INIT_LIST_HEAD(&wcid->list);
1563 idr_init(&wcid->pktid);
1565 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1567 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1569 struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1570 struct ieee80211_hw *hw;
1571 struct sk_buff_head list;
1572 struct sk_buff *skb;
1574 mt76_tx_status_lock(dev, &list);
1575 mt76_tx_status_skb_get(dev, wcid, -1, &list);
1576 mt76_tx_status_unlock(dev, &list);
1578 idr_destroy(&wcid->pktid);
1580 spin_lock_bh(&phy->tx_lock);
1582 if (!list_empty(&wcid->tx_list))
1583 list_del_init(&wcid->tx_list);
1585 spin_lock(&wcid->tx_pending.lock);
1586 skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1587 spin_unlock(&wcid->tx_pending.lock);
1589 spin_unlock_bh(&phy->tx_lock);
1591 while ((skb = __skb_dequeue(&list)) != NULL) {
1592 hw = mt76_tx_status_get_hw(dev, skb);
1593 ieee80211_free_txskb(hw, skb);
1596 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1598 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1601 struct mt76_phy *phy = hw->priv;
1602 int n_chains = hweight16(phy->chainmask);
1603 int delta = mt76_tx_power_nss_delta(n_chains);
1605 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1609 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1611 int mt76_init_sar_power(struct ieee80211_hw *hw,
1612 const struct cfg80211_sar_specs *sar)
1614 struct mt76_phy *phy = hw->priv;
1615 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1618 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1621 for (i = 0; i < sar->num_sub_specs; i++) {
1622 u32 index = sar->sub_specs[i].freq_range_index;
1623 /* SAR specifies power limitaton in 0.25dbm */
1624 s32 power = sar->sub_specs[i].power >> 1;
1626 if (power > 127 || power < -127)
1629 phy->frp[index].range = &capa->freq_ranges[index];
1630 phy->frp[index].power = power;
1635 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1637 int mt76_get_sar_power(struct mt76_phy *phy,
1638 struct ieee80211_channel *chan,
1641 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1644 if (!capa || !phy->frp)
1647 if (power > 127 || power < -127)
1650 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1651 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1652 if (phy->frp[i].range &&
1653 freq >= phy->frp[i].range->start_freq &&
1654 freq < phy->frp[i].range->end_freq) {
1655 power = min_t(int, phy->frp[i].power, power);
1662 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1665 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1667 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1668 ieee80211_csa_finish(vif, 0);
1671 void mt76_csa_finish(struct mt76_dev *dev)
1673 if (!dev->csa_complete)
1676 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1677 IEEE80211_IFACE_ITER_RESUME_ALL,
1678 __mt76_csa_finish, dev);
1680 dev->csa_complete = 0;
1682 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1685 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1687 struct mt76_dev *dev = priv;
1689 if (!vif->bss_conf.csa_active)
1692 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1695 void mt76_csa_check(struct mt76_dev *dev)
1697 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1698 IEEE80211_IFACE_ITER_RESUME_ALL,
1699 __mt76_csa_check, dev);
1701 EXPORT_SYMBOL_GPL(mt76_csa_check);
1704 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1708 EXPORT_SYMBOL_GPL(mt76_set_tim);
1710 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1712 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1713 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1714 u8 *hdr, *pn = status->iv;
1717 memmove(skb->data, skb->data + 8, hdr_len);
1718 hdr = skb->data + hdr_len;
1723 hdr[3] = 0x20 | (key_id << 6);
1729 status->flag &= ~RX_FLAG_IV_STRIPPED;
1731 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1733 int mt76_get_rate(struct mt76_dev *dev,
1734 struct ieee80211_supported_band *sband,
1737 bool is_2g = sband->band == NL80211_BAND_2GHZ;
1738 int i, offset = 0, len = sband->n_bitrates;
1744 idx &= ~BIT(2); /* short preamble */
1749 for (i = offset; i < len; i++) {
1750 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1756 EXPORT_SYMBOL_GPL(mt76_get_rate);
1758 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1761 struct mt76_phy *phy = hw->priv;
1763 set_bit(MT76_SCANNING, &phy->state);
1765 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1767 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1769 struct mt76_phy *phy = hw->priv;
1771 clear_bit(MT76_SCANNING, &phy->state);
1773 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1775 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1777 struct mt76_phy *phy = hw->priv;
1778 struct mt76_dev *dev = phy->dev;
1780 mutex_lock(&dev->mutex);
1781 *tx_ant = phy->antenna_mask;
1782 *rx_ant = phy->antenna_mask;
1783 mutex_unlock(&dev->mutex);
1787 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1790 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1791 int ring_base, void *wed, u32 flags)
1793 struct mt76_queue *hwq;
1796 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1798 return ERR_PTR(-ENOMEM);
1803 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1805 return ERR_PTR(err);
1809 EXPORT_SYMBOL_GPL(mt76_init_queue);
1811 u16 mt76_calculate_default_rate(struct mt76_phy *phy,
1812 struct ieee80211_vif *vif, int rateidx)
1814 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1815 struct cfg80211_chan_def *chandef = mvif->ctx ?
1820 if (chandef->chan->band != NL80211_BAND_2GHZ)
1823 /* pick the lowest rate for hidden nodes */
1828 if (rateidx >= ARRAY_SIZE(mt76_rates))
1831 return mt76_rates[rateidx].hw_value;
1833 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1835 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1836 struct mt76_sta_stats *stats, bool eht)
1838 int i, ei = wi->initial_stat_idx;
1839 u64 *data = wi->data;
1843 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1844 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1845 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1846 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1847 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1848 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1849 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1850 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1851 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1853 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1854 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1855 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1858 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1859 data[ei++] += stats->tx_bw[i];
1861 for (i = 0; i < (eht ? 14 : 12); i++)
1862 data[ei++] += stats->tx_mcs[i];
1864 for (i = 0; i < 4; i++)
1865 data[ei++] += stats->tx_nss[i];
1867 wi->worker_stat_count = ei - wi->initial_stat_idx;
1869 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1871 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1873 #ifdef CONFIG_PAGE_POOL_STATS
1874 struct page_pool_stats stats = {};
1877 mt76_for_each_q_rx(dev, i)
1878 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1880 page_pool_ethtool_stats_get(data, &stats);
1881 *index += page_pool_ethtool_stats_get_count();
1884 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1886 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1888 struct ieee80211_hw *hw = phy->hw;
1889 struct mt76_dev *dev = phy->dev;
1891 if (dev->region == NL80211_DFS_UNSET ||
1892 test_bit(MT76_SCANNING, &phy->state))
1893 return MT_DFS_STATE_DISABLED;
1895 if (!hw->conf.radar_enabled) {
1896 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1897 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1898 return MT_DFS_STATE_ACTIVE;
1900 return MT_DFS_STATE_DISABLED;
1903 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1904 return MT_DFS_STATE_CAC;
1906 return MT_DFS_STATE_ACTIVE;
1908 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);