]> Git Repo - linux.git/commitdiff
wifi: iwlwifi: add support for a wiphy_work rx handler
authorMiri Korenblit <[email protected]>
Tue, 23 Jan 2024 18:08:23 +0000 (20:08 +0200)
committerJohannes Berg <[email protected]>
Fri, 26 Jan 2024 08:48:54 +0000 (09:48 +0100)
The wiphy_work infra ensures that the entire worker will run
with the wiphy mutex. It is useful to have RX handlers
running as a wiphy_work, when we don't want the handler to
run in parallel with mac80211 work (to avoid races).

For example - BT notification can disable eSR starting from the next
patch.
In ieee80211_set_active_links we first check that eSR is
allowed, (drv_can_activate_links) and then activate it.
If the BT notif was received after drv_can_activate_links
(which returned true), and before the activation - eSR will be
activated when it shouldn't.
If BT notif is handled with the wiphy mutex, it can't run in
parallel to ieee80211_set_active_links, which also holds that
mutex.

Add the necessary infrastructure here, for use in the next commit.

Signed-off-by: Miri Korenblit <[email protected]>
Reviewed-by: Gregory Greenman <[email protected]>
Reviewed-by: Johannes Berg <[email protected]>
Link: https://msgid.link/20240123200528.ce83d16cdec8.I35ef53fa23f58b9ec17924099238b61deafcecd7@changeid
Signed-off-by: Johannes Berg <[email protected]>
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c

index 6bbcf4092f52a111333b835a04eb11957391ec36..406956574f5278123d3d3bce2aea206087e9d216 100644 (file)
@@ -1342,6 +1342,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
         * discover that its list is now empty.
         */
        cancel_work_sync(&mvm->async_handlers_wk);
+       wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
 }
 
 struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
index 997f0395b97abdc86ae5816597800a9d67aa37ca..af5c8b4bb5a6272e9e72d7ff6d851ca007382cba 100644 (file)
@@ -848,6 +848,9 @@ struct iwl_mvm {
        spinlock_t async_handlers_lock;
        struct work_struct async_handlers_wk;
 
+       /* For async rx handlers that require the wiphy lock */
+       struct wiphy_work async_handlers_wiphy_wk;
+
        struct work_struct roc_done_wk;
 
        unsigned long init_status;
index adbbe19aeae5dc705065e1e2fd041ac1c8932ba9..38a84a54ff78349747e3842154e2cfa1fd16fccd 100644 (file)
@@ -267,11 +267,15 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
  *     it will be called from a worker with mvm->mutex held.
  * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
  *     mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ *     and mvm->mutex. Will be handled with the wiphy_work queue infra
+ *     instead of regular work queue.
  */
 enum iwl_rx_handler_context {
        RX_HANDLER_SYNC,
        RX_HANDLER_ASYNC_LOCKED,
        RX_HANDLER_ASYNC_UNLOCKED,
+       RX_HANDLER_ASYNC_LOCKED_WIPHY,
 };
 
 /**
@@ -673,6 +677,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
 
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+                                           struct wiphy_work *work);
 
 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
 {
@@ -1265,6 +1271,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_LIST_HEAD(&mvm->add_stream_txqs);
        spin_lock_init(&mvm->add_stream_lock);
 
+       wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+                       iwl_mvm_async_handlers_wiphy_wk);
        init_waitqueue_head(&mvm->rx_sync_waitq);
 
        mvm->queue_sync_state = 0;
@@ -1551,35 +1559,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
        spin_unlock_bh(&mvm->async_handlers_lock);
 }
 
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+                                             u8 contexts)
 {
-       struct iwl_mvm *mvm =
-               container_of(wk, struct iwl_mvm, async_handlers_wk);
        struct iwl_async_handler_entry *entry, *tmp;
        LIST_HEAD(local_list);
 
-       /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
        /*
-        * Sync with Rx path with a lock. Remove all the entries from this list,
-        * add them to a local one (lock free), and then handle them.
+        * Sync with Rx path with a lock. Remove all the entries of the
+        * wanted contexts from this list, add them to a local one (lock free),
+        * and then handle them.
         */
        spin_lock_bh(&mvm->async_handlers_lock);
-       list_splice_init(&mvm->async_handlers_list, &local_list);
+       list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+               if (!(BIT(entry->context) & contexts))
+                       continue;
+               list_del(&entry->list);
+               list_add_tail(&entry->list, &local_list);
+       }
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
-               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+               if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
                        mutex_lock(&mvm->mutex);
                entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
-               if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+               if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
                        mutex_unlock(&mvm->mutex);
                kfree(entry);
        }
 }
 
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+                                           struct wiphy_work *wk)
+{
+       struct iwl_mvm *mvm =
+               container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+       u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+       iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm =
+               container_of(wk, struct iwl_mvm, async_handlers_wk);
+       u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+                     BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+       iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                                            struct iwl_rx_packet *pkt)
 {
@@ -1659,7 +1694,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
                spin_lock(&mvm->async_handlers_lock);
                list_add_tail(&entry->list, &mvm->async_handlers_list);
                spin_unlock(&mvm->async_handlers_lock);
-               schedule_work(&mvm->async_handlers_wk);
+               if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+                       wiphy_work_queue(mvm->hw->wiphy,
+                                        &mvm->async_handlers_wiphy_wk);
+               else
+                       schedule_work(&mvm->async_handlers_wk);
                break;
        }
 }
This page took 0.089365 seconds and 4 git commands to generate.