]> Git Repo - J-linux.git/blob - drivers/net/wireless/ath/ath12k/wow.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / wireless / ath / ath12k / wow.c
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6
7 #include <linux/delay.h>
8 #include <linux/inetdevice.h>
9 #include <net/addrconf.h>
10 #include <net/if_inet6.h>
11 #include <net/ipv6.h>
12
13 #include "mac.h"
14
15 #include <net/mac80211.h>
16 #include "core.h"
17 #include "hif.h"
18 #include "debug.h"
19 #include "wmi.h"
20 #include "wow.h"
21
22 static const struct wiphy_wowlan_support ath12k_wowlan_support = {
23         .flags = WIPHY_WOWLAN_DISCONNECT |
24                  WIPHY_WOWLAN_MAGIC_PKT |
25                  WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
26                  WIPHY_WOWLAN_GTK_REKEY_FAILURE,
27         .pattern_min_len = WOW_MIN_PATTERN_SIZE,
28         .pattern_max_len = WOW_MAX_PATTERN_SIZE,
29         .max_pkt_offset = WOW_MAX_PKT_OFFSET,
30 };
31
32 static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif)
33 {
34         return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
35                 ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
36                 ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
37 }
38
39 int ath12k_wow_enable(struct ath12k *ar)
40 {
41         struct ath12k_base *ab = ar->ab;
42         int i, ret;
43
44         clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
45
46         /* The firmware might be busy and it can not enter WoW immediately.
47          * In that case firmware notifies host with
48          * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
49          * later. Per the firmware team there could be up to 10 loops.
50          */
51         for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
52                 reinit_completion(&ab->htc_suspend);
53
54                 ret = ath12k_wmi_wow_enable(ar);
55                 if (ret) {
56                         ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
57                         return ret;
58                 }
59
60                 ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
61                 if (ret == 0) {
62                         ath12k_warn(ab,
63                                     "timed out while waiting for htc suspend completion\n");
64                         return -ETIMEDOUT;
65                 }
66
67                 if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
68                         /* success, suspend complete received */
69                         return 0;
70
71                 ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
72                             i);
73                 msleep(ATH12K_WOW_RETRY_WAIT_MS);
74         }
75
76         ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
77
78         return -ETIMEDOUT;
79 }
80
81 int ath12k_wow_wakeup(struct ath12k *ar)
82 {
83         struct ath12k_base *ab = ar->ab;
84         int ret;
85
86         reinit_completion(&ab->wow.wakeup_completed);
87
88         ret = ath12k_wmi_wow_host_wakeup_ind(ar);
89         if (ret) {
90                 ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
91                             ret);
92                 return ret;
93         }
94
95         ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
96         if (ret == 0) {
97                 ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
98                 return -ETIMEDOUT;
99         }
100
101         return 0;
102 }
103
104 static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif)
105 {
106         struct ath12k *ar = arvif->ar;
107         int i, ret;
108
109         for (i = 0; i < WOW_EVENT_MAX; i++) {
110                 ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
111                 if (ret) {
112                         ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
113                                     wow_wakeup_event(i), arvif->vdev_id, ret);
114                         return ret;
115                 }
116         }
117
118         for (i = 0; i < ar->wow.max_num_patterns; i++) {
119                 ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
120                 if (ret) {
121                         ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
122                                     i, arvif->vdev_id, ret);
123                         return ret;
124                 }
125         }
126
127         return 0;
128 }
129
130 static int ath12k_wow_cleanup(struct ath12k *ar)
131 {
132         struct ath12k_link_vif *arvif;
133         int ret;
134
135         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
136
137         list_for_each_entry(arvif, &ar->arvifs, list) {
138                 ret = ath12k_wow_vif_cleanup(arvif);
139                 if (ret) {
140                         ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
141                                     arvif->vdev_id, ret);
142                         return ret;
143                 }
144         }
145
146         return 0;
147 }
148
149 /* Convert a 802.3 format to a 802.11 format.
150  *         +------------+-----------+--------+----------------+
151  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
152  *         +------------+-----------+--------+----------------+
153  *                |__         |_______    |____________  |________
154  *                   |                |                |          |
155  *         +--+------------+----+-----------+---------------+-----------+
156  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
157  *         +--+------------+----+-----------+---------------+-----------+
158  */
159 static void
160 ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
161                                  const struct cfg80211_pkt_pattern *eth_pattern,
162                                  struct ath12k_pkt_pattern *i80211_pattern)
163 {
164         size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
165         size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
166         size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
167         size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
168         size_t prot_ofs = offsetof(struct ethhdr, h_proto);
169         size_t src_ofs = offsetof(struct ethhdr, h_source);
170         u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
171         const u8 *eth_pat = eth_pattern->pattern;
172         size_t eth_pat_len = eth_pattern->pattern_len;
173         size_t eth_pkt_ofs = eth_pattern->pkt_offset;
174         u8 *bytemask = i80211_pattern->bytemask;
175         u8 *pat = i80211_pattern->pattern;
176         size_t pat_len = 0;
177         size_t pkt_ofs = 0;
178         size_t delta;
179         int i;
180
181         /* convert bitmask to bytemask */
182         for (i = 0; i < eth_pat_len; i++)
183                 if (eth_pattern->mask[i / 8] & BIT(i % 8))
184                         eth_bytemask[i] = 0xff;
185
186         if (eth_pkt_ofs < ETH_ALEN) {
187                 pkt_ofs = eth_pkt_ofs + a1_ofs;
188
189                 if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
190                         memcpy(pat, eth_pat, eth_pat_len);
191                         memcpy(bytemask, eth_bytemask, eth_pat_len);
192
193                         pat_len = eth_pat_len;
194                 } else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
195                         memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
196                         memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
197
198                         delta = eth_pkt_ofs + eth_pat_len - src_ofs;
199                         memcpy(pat + a3_ofs - pkt_ofs,
200                                eth_pat + ETH_ALEN - eth_pkt_ofs,
201                                delta);
202                         memcpy(bytemask + a3_ofs - pkt_ofs,
203                                eth_bytemask + ETH_ALEN - eth_pkt_ofs,
204                                delta);
205
206                         pat_len = a3_ofs - pkt_ofs + delta;
207                 } else {
208                         memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
209                         memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
210
211                         memcpy(pat + a3_ofs - pkt_ofs,
212                                eth_pat + ETH_ALEN - eth_pkt_ofs,
213                                ETH_ALEN);
214                         memcpy(bytemask + a3_ofs - pkt_ofs,
215                                eth_bytemask + ETH_ALEN - eth_pkt_ofs,
216                                ETH_ALEN);
217
218                         delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
219                         memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
220                                eth_pat + prot_ofs - eth_pkt_ofs,
221                                delta);
222                         memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
223                                eth_bytemask + prot_ofs - eth_pkt_ofs,
224                                delta);
225
226                         pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
227                 }
228         } else if (eth_pkt_ofs < prot_ofs) {
229                 pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
230
231                 if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
232                         memcpy(pat, eth_pat, eth_pat_len);
233                         memcpy(bytemask, eth_bytemask, eth_pat_len);
234
235                         pat_len = eth_pat_len;
236                 } else {
237                         memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
238                         memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
239
240                         delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
241                         memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
242                                eth_pat +  prot_ofs - eth_pkt_ofs,
243                                delta);
244                         memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
245                                eth_bytemask + prot_ofs - eth_pkt_ofs,
246                                delta);
247
248                         pat_len =  i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
249                 }
250         } else {
251                 pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
252
253                 memcpy(pat, eth_pat, eth_pat_len);
254                 memcpy(bytemask, eth_bytemask, eth_pat_len);
255
256                 pat_len = eth_pat_len;
257         }
258
259         i80211_pattern->pattern_len = pat_len;
260         i80211_pattern->pkt_offset = pkt_ofs;
261 }
262
263 static int
264 ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
265                                  const struct cfg80211_sched_scan_request *nd_config,
266                                  struct wmi_pno_scan_req_arg *pno)
267 {
268         int i, j;
269         u8 ssid_len;
270
271         pno->enable = 1;
272         pno->vdev_id = vdev_id;
273         pno->uc_networks_count = nd_config->n_match_sets;
274
275         if (!pno->uc_networks_count ||
276             pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
277                 return -EINVAL;
278
279         if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
280                 return -EINVAL;
281
282         /* Filling per profile params */
283         for (i = 0; i < pno->uc_networks_count; i++) {
284                 ssid_len = nd_config->match_sets[i].ssid.ssid_len;
285
286                 if (ssid_len == 0 || ssid_len > 32)
287                         return -EINVAL;
288
289                 pno->a_networks[i].ssid.ssid_len = ssid_len;
290
291                 memcpy(pno->a_networks[i].ssid.ssid,
292                        nd_config->match_sets[i].ssid.ssid,
293                        ssid_len);
294                 pno->a_networks[i].authentication = 0;
295                 pno->a_networks[i].encryption     = 0;
296                 pno->a_networks[i].bcast_nw_type  = 0;
297
298                 /* Copying list of valid channel into request */
299                 pno->a_networks[i].channel_count = nd_config->n_channels;
300                 pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
301
302                 for (j = 0; j < nd_config->n_channels; j++) {
303                         pno->a_networks[i].channels[j] =
304                                         nd_config->channels[j]->center_freq;
305                 }
306         }
307
308         /* set scan to passive if no SSIDs are specified in the request */
309         if (nd_config->n_ssids == 0)
310                 pno->do_passive_scan = true;
311         else
312                 pno->do_passive_scan = false;
313
314         for (i = 0; i < nd_config->n_ssids; i++) {
315                 for (j = 0; j < pno->uc_networks_count; j++) {
316                         if (pno->a_networks[j].ssid.ssid_len ==
317                                 nd_config->ssids[i].ssid_len &&
318                             !memcmp(pno->a_networks[j].ssid.ssid,
319                                     nd_config->ssids[i].ssid,
320                                     pno->a_networks[j].ssid.ssid_len)) {
321                                 pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
322                                 break;
323                         }
324                 }
325         }
326
327         if (nd_config->n_scan_plans == 2) {
328                 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
329                 pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
330                 pno->slow_scan_period =
331                         nd_config->scan_plans[1].interval * MSEC_PER_SEC;
332         } else if (nd_config->n_scan_plans == 1) {
333                 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
334                 pno->fast_scan_max_cycles = 1;
335                 pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
336         } else {
337                 ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
338                             nd_config->n_scan_plans);
339         }
340
341         if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
342                 /* enable mac randomization */
343                 pno->enable_pno_scan_randomization = 1;
344                 memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
345                 memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
346         }
347
348         pno->delay_start_time = nd_config->delay;
349
350         /* Current FW does not support min-max range for dwell time */
351         pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
352         pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
353
354         return 0;
355 }
356
357 static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif,
358                                       struct cfg80211_wowlan *wowlan)
359 {
360         const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
361         struct ath12k *ar = arvif->ar;
362         unsigned long wow_mask = 0;
363         int pattern_id = 0;
364         int ret, i, j;
365
366         /* Setup requested WOW features */
367         switch (arvif->ahvif->vdev_type) {
368         case WMI_VDEV_TYPE_IBSS:
369                 __set_bit(WOW_BEACON_EVENT, &wow_mask);
370                 fallthrough;
371         case WMI_VDEV_TYPE_AP:
372                 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
373                 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
374                 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
375                 __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
376                 __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
377                 __set_bit(WOW_HTT_EVENT, &wow_mask);
378                 __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
379                 break;
380         case WMI_VDEV_TYPE_STA:
381                 if (wowlan->disconnect) {
382                         __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
383                         __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
384                         __set_bit(WOW_BMISS_EVENT, &wow_mask);
385                         __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
386                 }
387
388                 if (wowlan->magic_pkt)
389                         __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
390
391                 if (wowlan->nd_config) {
392                         struct wmi_pno_scan_req_arg *pno;
393                         int ret;
394
395                         pno = kzalloc(sizeof(*pno), GFP_KERNEL);
396                         if (!pno)
397                                 return -ENOMEM;
398
399                         ar->nlo_enabled = true;
400
401                         ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
402                                                                wowlan->nd_config, pno);
403                         if (!ret) {
404                                 ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
405                                 __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
406                         }
407
408                         kfree(pno);
409                 }
410                 break;
411         default:
412                 break;
413         }
414
415         for (i = 0; i < wowlan->n_patterns; i++) {
416                 const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
417                 struct ath12k_pkt_pattern new_pattern = {};
418
419                 if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
420                         return -EINVAL;
421
422                 if (ar->ab->wow.wmi_conf_rx_decap_mode ==
423                     ATH12K_HW_TXRX_NATIVE_WIFI) {
424                         ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
425                                                          &new_pattern);
426
427                         if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
428                                 return -EINVAL;
429                 } else {
430                         memcpy(new_pattern.pattern, eth_pattern->pattern,
431                                eth_pattern->pattern_len);
432
433                         /* convert bitmask to bytemask */
434                         for (j = 0; j < eth_pattern->pattern_len; j++)
435                                 if (eth_pattern->mask[j / 8] & BIT(j % 8))
436                                         new_pattern.bytemask[j] = 0xff;
437
438                         new_pattern.pattern_len = eth_pattern->pattern_len;
439                         new_pattern.pkt_offset = eth_pattern->pkt_offset;
440                 }
441
442                 ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
443                                                  pattern_id,
444                                                  new_pattern.pattern,
445                                                  new_pattern.bytemask,
446                                                  new_pattern.pattern_len,
447                                                  new_pattern.pkt_offset);
448                 if (ret) {
449                         ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
450                                     pattern_id,
451                                     arvif->vdev_id, ret);
452                         return ret;
453                 }
454
455                 pattern_id++;
456                 __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
457         }
458
459         for (i = 0; i < WOW_EVENT_MAX; i++) {
460                 if (!test_bit(i, &wow_mask))
461                         continue;
462                 ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
463                 if (ret) {
464                         ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
465                                     wow_wakeup_event(i), arvif->vdev_id, ret);
466                         return ret;
467                 }
468         }
469
470         return 0;
471 }
472
473 static int ath12k_wow_set_wakeups(struct ath12k *ar,
474                                   struct cfg80211_wowlan *wowlan)
475 {
476         struct ath12k_link_vif *arvif;
477         int ret;
478
479         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
480
481         list_for_each_entry(arvif, &ar->arvifs, list) {
482                 if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
483                         continue;
484                 ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
485                 if (ret) {
486                         ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
487                                     arvif->vdev_id, ret);
488                         return ret;
489                 }
490         }
491
492         return 0;
493 }
494
495 static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
496 {
497         struct wmi_pno_scan_req_arg *pno;
498         int ret;
499
500         if (!ar->nlo_enabled)
501                 return 0;
502
503         pno = kzalloc(sizeof(*pno), GFP_KERNEL);
504         if (!pno)
505                 return -ENOMEM;
506
507         pno->enable = 0;
508         ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
509         if (ret) {
510                 ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
511                 goto out;
512         }
513
514         ar->nlo_enabled = false;
515
516 out:
517         kfree(pno);
518         return ret;
519 }
520
521 static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif)
522 {
523         struct ath12k *ar = arvif->ar;
524
525         switch (arvif->ahvif->vdev_type) {
526         case WMI_VDEV_TYPE_STA:
527                 return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
528         default:
529                 return 0;
530         }
531 }
532
533 static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
534 {
535         struct ath12k_link_vif *arvif;
536         int ret;
537
538         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
539
540         list_for_each_entry(arvif, &ar->arvifs, list) {
541                 if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
542                         continue;
543
544                 ret = ath12k_wow_vif_clean_nlo(arvif);
545                 if (ret) {
546                         ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
547                                     arvif->vdev_id, ret);
548                         return ret;
549                 }
550         }
551
552         return 0;
553 }
554
555 static int ath12k_wow_set_hw_filter(struct ath12k *ar)
556 {
557         struct wmi_hw_data_filter_arg arg;
558         struct ath12k_link_vif *arvif;
559         int ret;
560
561         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
562
563         list_for_each_entry(arvif, &ar->arvifs, list) {
564                 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
565                         continue;
566
567                 arg.vdev_id = arvif->vdev_id;
568                 arg.enable = true;
569                 arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
570                 ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
571                 if (ret) {
572                         ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
573                                     arvif->vdev_id, ret);
574                         return ret;
575                 }
576         }
577
578         return 0;
579 }
580
581 static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
582 {
583         struct wmi_hw_data_filter_arg arg;
584         struct ath12k_link_vif *arvif;
585         int ret;
586
587         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
588
589         list_for_each_entry(arvif, &ar->arvifs, list) {
590                 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
591                         continue;
592
593                 arg.vdev_id = arvif->vdev_id;
594                 arg.enable = false;
595                 arg.hw_filter_bitmap = 0;
596                 ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
597
598                 if (ret) {
599                         ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
600                                     arvif->vdev_id, ret);
601                         return ret;
602                 }
603         }
604
605         return 0;
606 }
607
608 static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
609                                            struct wmi_arp_ns_offload_arg *offload)
610 {
611         int i;
612
613         for (i = 0; i < offload->ipv6_count; i++) {
614                 offload->self_ipv6_addr[i][0] = 0xff;
615                 offload->self_ipv6_addr[i][1] = 0x02;
616                 offload->self_ipv6_addr[i][11] = 0x01;
617                 offload->self_ipv6_addr[i][12] = 0xff;
618                 offload->self_ipv6_addr[i][13] =
619                                         offload->ipv6_addr[i][13];
620                 offload->self_ipv6_addr[i][14] =
621                                         offload->ipv6_addr[i][14];
622                 offload->self_ipv6_addr[i][15] =
623                                         offload->ipv6_addr[i][15];
624                 ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
625                            offload->self_ipv6_addr[i]);
626         }
627 }
628
629 static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif,
630                                           struct wmi_arp_ns_offload_arg *offload)
631 {
632         struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev;
633         struct ath12k_base *ab = arvif->ar->ab;
634         struct inet6_ifaddr *ifa6;
635         struct ifacaddr6 *ifaca6;
636         struct inet6_dev *idev;
637         u32 count = 0, scope;
638
639         if (!ndev)
640                 return;
641
642         idev = in6_dev_get(ndev);
643         if (!idev)
644                 return;
645
646         ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
647
648         read_lock_bh(&idev->lock);
649
650         /* get unicast address */
651         list_for_each_entry(ifa6, &idev->addr_list, if_list) {
652                 if (count >= WMI_IPV6_MAX_COUNT)
653                         goto unlock;
654
655                 if (ifa6->flags & IFA_F_DADFAILED)
656                         continue;
657
658                 scope = ipv6_addr_src_scope(&ifa6->addr);
659                 if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
660                     scope != IPV6_ADDR_SCOPE_GLOBAL) {
661                         ath12k_dbg(ab, ATH12K_DBG_WOW,
662                                    "Unsupported ipv6 scope: %d\n", scope);
663                         continue;
664                 }
665
666                 memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
667                        sizeof(ifa6->addr.s6_addr));
668                 offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
669                 ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
670                            count, offload->ipv6_addr[count],
671                            scope);
672                 count++;
673         }
674
675         /* get anycast address */
676         rcu_read_lock();
677
678         for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
679              ifaca6 = rcu_dereference(ifaca6->aca_next)) {
680                 if (count >= WMI_IPV6_MAX_COUNT) {
681                         rcu_read_unlock();
682                         goto unlock;
683                 }
684
685                 scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
686                 if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
687                     scope != IPV6_ADDR_SCOPE_GLOBAL) {
688                         ath12k_dbg(ab, ATH12K_DBG_WOW,
689                                    "Unsupported ipv scope: %d\n", scope);
690                         continue;
691                 }
692
693                 memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
694                        sizeof(ifaca6->aca_addr));
695                 offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
696                 ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
697                            count, offload->ipv6_addr[count],
698                            scope);
699                 count++;
700         }
701
702         rcu_read_unlock();
703
704 unlock:
705         read_unlock_bh(&idev->lock);
706
707         in6_dev_put(idev);
708
709         offload->ipv6_count = count;
710         ath12k_wow_generate_ns_mc_addr(ab, offload);
711 }
712
713 static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif,
714                                            struct wmi_arp_ns_offload_arg *offload)
715 {
716         struct ieee80211_vif *vif = arvif->ahvif->vif;
717         struct ieee80211_vif_cfg vif_cfg = vif->cfg;
718         struct ath12k_base *ab = arvif->ar->ab;
719         u32 ipv4_cnt;
720
721         ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
722
723         ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
724         memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
725         offload->ipv4_count = ipv4_cnt;
726
727         ath12k_dbg(ab, ATH12K_DBG_WOW,
728                    "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
729                    vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
730 }
731
732 static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
733 {
734         struct wmi_arp_ns_offload_arg *offload;
735         struct ath12k_link_vif *arvif;
736         struct ath12k_vif *ahvif;
737         int ret;
738
739         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
740
741         offload = kmalloc(sizeof(*offload), GFP_KERNEL);
742         if (!offload)
743                 return -ENOMEM;
744
745         list_for_each_entry(arvif, &ar->arvifs, list) {
746                 ahvif = arvif->ahvif;
747
748                 if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
749                         continue;
750
751                 memset(offload, 0, sizeof(*offload));
752
753                 memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN);
754                 ath12k_wow_prepare_ns_offload(arvif, offload);
755                 ath12k_wow_prepare_arp_offload(arvif, offload);
756
757                 ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
758                 if (ret) {
759                         ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
760                                     arvif->vdev_id, enable, ret);
761                         return ret;
762                 }
763         }
764
765         kfree(offload);
766
767         return 0;
768 }
769
770 static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
771 {
772         struct ath12k_link_vif *arvif;
773         int ret;
774
775         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
776
777         list_for_each_entry(arvif, &ar->arvifs, list) {
778                 if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA ||
779                     !arvif->is_up ||
780                     !arvif->rekey_data.enable_offload)
781                         continue;
782
783                 /* get rekey info before disable rekey offload */
784                 if (!enable) {
785                         ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
786                         if (ret) {
787                                 ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
788                                             arvif->vdev_id, ret);
789                                 return ret;
790                         }
791                 }
792
793                 ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
794
795                 if (ret) {
796                         ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
797                                     arvif->vdev_id, enable, ret);
798                         return ret;
799                 }
800         }
801
802         return 0;
803 }
804
805 static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
806 {
807         int ret;
808
809         ret = ath12k_wow_arp_ns_offload(ar, enable);
810         if (ret) {
811                 ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
812                             enable, ret);
813                 return ret;
814         }
815
816         ret = ath12k_gtk_rekey_offload(ar, enable);
817         if (ret) {
818                 ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
819                             enable, ret);
820                 return ret;
821         }
822
823         return 0;
824 }
825
826 static int ath12k_wow_set_keepalive(struct ath12k *ar,
827                                     enum wmi_sta_keepalive_method method,
828                                     u32 interval)
829 {
830         struct ath12k_link_vif *arvif;
831         int ret;
832
833         lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
834
835         list_for_each_entry(arvif, &ar->arvifs, list) {
836                 ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
837                 if (ret)
838                         return ret;
839         }
840
841         return 0;
842 }
843
844 int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
845                           struct cfg80211_wowlan *wowlan)
846 {
847         struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
848         struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
849         int ret;
850
851         lockdep_assert_wiphy(hw->wiphy);
852
853         ret =  ath12k_wow_cleanup(ar);
854         if (ret) {
855                 ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
856                             ret);
857                 goto exit;
858         }
859
860         ret = ath12k_wow_set_wakeups(ar, wowlan);
861         if (ret) {
862                 ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
863                             ret);
864                 goto cleanup;
865         }
866
867         ret = ath12k_wow_protocol_offload(ar, true);
868         if (ret) {
869                 ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
870                             ret);
871                 goto cleanup;
872         }
873
874         ret = ath12k_mac_wait_tx_complete(ar);
875         if (ret) {
876                 ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
877                 goto cleanup;
878         }
879
880         ret = ath12k_wow_set_hw_filter(ar);
881         if (ret) {
882                 ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
883                             ret);
884                 goto cleanup;
885         }
886
887         ret = ath12k_wow_set_keepalive(ar,
888                                        WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
889                                        WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
890         if (ret) {
891                 ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
892                 goto cleanup;
893         }
894
895         ret = ath12k_wow_enable(ar);
896         if (ret) {
897                 ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
898                 goto cleanup;
899         }
900
901         ath12k_hif_irq_disable(ar->ab);
902         ath12k_hif_ce_irq_disable(ar->ab);
903
904         ret = ath12k_hif_suspend(ar->ab);
905         if (ret) {
906                 ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
907                 goto wakeup;
908         }
909
910         goto exit;
911
912 wakeup:
913         ath12k_wow_wakeup(ar);
914
915 cleanup:
916         ath12k_wow_cleanup(ar);
917
918 exit:
919         return ret ? 1 : 0;
920 }
921
922 void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
923 {
924         struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
925         struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
926
927         lockdep_assert_wiphy(hw->wiphy);
928
929         device_set_wakeup_enable(ar->ab->dev, enabled);
930 }
931
932 int ath12k_wow_op_resume(struct ieee80211_hw *hw)
933 {
934         struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
935         struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
936         int ret;
937
938         lockdep_assert_wiphy(hw->wiphy);
939
940         ret = ath12k_hif_resume(ar->ab);
941         if (ret) {
942                 ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
943                 goto exit;
944         }
945
946         ath12k_hif_ce_irq_enable(ar->ab);
947         ath12k_hif_irq_enable(ar->ab);
948
949         ret = ath12k_wow_wakeup(ar);
950         if (ret) {
951                 ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
952                 goto exit;
953         }
954
955         ret = ath12k_wow_nlo_cleanup(ar);
956         if (ret) {
957                 ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
958                 goto exit;
959         }
960
961         ret = ath12k_wow_clear_hw_filter(ar);
962         if (ret) {
963                 ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
964                 goto exit;
965         }
966
967         ret = ath12k_wow_protocol_offload(ar, false);
968         if (ret) {
969                 ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
970                             ret);
971                 goto exit;
972         }
973
974         ret = ath12k_wow_set_keepalive(ar,
975                                        WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
976                                        WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
977         if (ret) {
978                 ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
979                 goto exit;
980         }
981
982 exit:
983         if (ret) {
984                 switch (ah->state) {
985                 case ATH12K_HW_STATE_ON:
986                         ah->state = ATH12K_HW_STATE_RESTARTING;
987                         ret = 1;
988                         break;
989                 case ATH12K_HW_STATE_OFF:
990                 case ATH12K_HW_STATE_RESTARTING:
991                 case ATH12K_HW_STATE_RESTARTED:
992                 case ATH12K_HW_STATE_WEDGED:
993                         ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
994                                     ah->state);
995                         ret = -EIO;
996                         break;
997                 }
998         }
999
1000         return ret;
1001 }
1002
1003 int ath12k_wow_init(struct ath12k *ar)
1004 {
1005         if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
1006                 return 0;
1007
1008         ar->wow.wowlan_support = ath12k_wowlan_support;
1009
1010         if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
1011                 ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
1012                 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
1013         }
1014
1015         if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
1016                 ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
1017                 ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
1018         }
1019
1020         ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
1021         ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
1022         ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
1023
1024         device_set_wakeup_capable(ar->ab->dev, true);
1025
1026         return 0;
1027 }
This page took 0.088525 seconds and 4 git commands to generate.