1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
11 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
14 struct ath12k_peer *peer;
16 lockdep_assert_held(&ab->base_lock);
18 list_for_each_entry(peer, &ab->peers, list) {
19 if (peer->vdev_id != vdev_id)
21 if (!ether_addr_equal(peer->addr, addr))
30 static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
31 u8 pdev_idx, const u8 *addr)
33 struct ath12k_peer *peer;
35 lockdep_assert_held(&ab->base_lock);
37 list_for_each_entry(peer, &ab->peers, list) {
38 if (peer->pdev_idx != pdev_idx)
40 if (!ether_addr_equal(peer->addr, addr))
49 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
52 struct ath12k_peer *peer;
54 lockdep_assert_held(&ab->base_lock);
56 list_for_each_entry(peer, &ab->peers, list) {
57 if (!ether_addr_equal(peer->addr, addr))
66 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
69 struct ath12k_peer *peer;
71 lockdep_assert_held(&ab->base_lock);
73 list_for_each_entry(peer, &ab->peers, list)
74 if (peer_id == peer->peer_id)
80 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
82 struct ath12k_peer *peer;
84 spin_lock_bh(&ab->base_lock);
86 list_for_each_entry(peer, &ab->peers, list) {
87 if (vdev_id == peer->vdev_id) {
88 spin_unlock_bh(&ab->base_lock);
92 spin_unlock_bh(&ab->base_lock);
96 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
99 struct ath12k_peer *peer;
101 lockdep_assert_held(&ab->base_lock);
103 list_for_each_entry(peer, &ab->peers, list)
104 if (ast_hash == peer->ast_hash)
110 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
112 struct ath12k_peer *peer;
114 spin_lock_bh(&ab->base_lock);
116 peer = ath12k_peer_find_by_id(ab, peer_id);
118 ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
123 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
124 peer->vdev_id, peer->addr, peer_id);
126 list_del(&peer->list);
128 wake_up(&ab->peer_mapping_wq);
131 spin_unlock_bh(&ab->base_lock);
134 void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
135 u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
137 struct ath12k_peer *peer;
139 spin_lock_bh(&ab->base_lock);
140 peer = ath12k_peer_find(ab, vdev_id, mac_addr);
142 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
146 peer->vdev_id = vdev_id;
147 peer->peer_id = peer_id;
148 peer->ast_hash = ast_hash;
149 peer->hw_peer_id = hw_peer_id;
150 ether_addr_copy(peer->addr, mac_addr);
151 list_add(&peer->list, &ab->peers);
152 wake_up(&ab->peer_mapping_wq);
155 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
156 vdev_id, mac_addr, peer_id);
159 spin_unlock_bh(&ab->base_lock);
162 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
163 const u8 *addr, bool expect_mapped)
167 ret = wait_event_timeout(ab->peer_mapping_wq, ({
170 spin_lock_bh(&ab->base_lock);
171 mapped = !!ath12k_peer_find(ab, vdev_id, addr);
172 spin_unlock_bh(&ab->base_lock);
174 (mapped == expect_mapped ||
175 test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
184 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
186 struct ath12k_peer *peer, *tmp;
187 struct ath12k_base *ab = ar->ab;
189 lockdep_assert_held(&ar->conf_mutex);
191 spin_lock_bh(&ab->base_lock);
192 list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
193 if (peer->vdev_id != vdev_id)
196 ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
197 peer->addr, vdev_id);
199 list_del(&peer->list);
204 spin_unlock_bh(&ab->base_lock);
207 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
209 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
212 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
216 unsigned long time_left;
218 ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
220 ath12k_warn(ar->ab, "failed wait for peer deleted");
224 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
226 if (time_left == 0) {
227 ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
234 int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
238 lockdep_assert_held(&ar->conf_mutex);
240 reinit_completion(&ar->peer_delete_done);
242 ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
245 "failed to delete peer vdev_id %d addr %pM ret %d\n",
250 ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
259 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
261 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
264 int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
265 struct ieee80211_sta *sta,
266 struct ath12k_wmi_peer_create_arg *arg)
268 struct ath12k_peer *peer;
271 lockdep_assert_held(&ar->conf_mutex);
273 if (ar->num_peers > (ar->max_num_peers - 1)) {
275 "failed to create peer due to insufficient peer entry resource in firmware\n");
279 spin_lock_bh(&ar->ab->base_lock);
280 peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
282 spin_unlock_bh(&ar->ab->base_lock);
285 spin_unlock_bh(&ar->ab->base_lock);
287 ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
290 "failed to send peer create vdev_id %d ret %d\n",
295 ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
300 spin_lock_bh(&ar->ab->base_lock);
302 peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
304 spin_unlock_bh(&ar->ab->base_lock);
305 ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
306 arg->peer_addr, arg->vdev_id);
308 reinit_completion(&ar->peer_delete_done);
310 ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
313 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
314 arg->vdev_id, arg->peer_addr);
318 ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
326 peer->pdev_idx = ar->pdev_idx;
329 if (arvif->vif->type == NL80211_IFTYPE_STATION) {
330 arvif->ast_hash = peer->ast_hash;
331 arvif->ast_idx = peer->hw_peer_id;
334 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
335 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
339 spin_unlock_bh(&ar->ab->base_lock);