1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015 Intel Deutschland GmbH
8 #include <net/addrconf.h>
9 #include <linux/bitops.h>
12 void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
13 struct iwl_wowlan_config_cmd_v6 *cmd)
18 * For QoS counters, we store the one to use next, so subtract 0x10
19 * since the uCode will add 0x10 *before* using the value while we
20 * increment after using the value (i.e. store the next value to use).
22 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
23 u16 seq = mvm_ap_sta->tid_data[i].seq_number;
25 cmd->qos_seq[i] = cpu_to_le16(seq);
29 int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
30 struct ieee80211_vif *vif,
31 bool disable_offloading,
37 struct iwl_proto_offload_cmd_v1 v1;
38 struct iwl_proto_offload_cmd_v2 v2;
39 struct iwl_proto_offload_cmd_v3_small v3s;
40 struct iwl_proto_offload_cmd_v4 v4;
42 struct iwl_host_cmd hcmd = {
43 .id = PROT_OFFLOAD_CONFIG_CMD,
46 .dataflags[0] = IWL_HCMD_DFL_DUP,
48 struct iwl_proto_offload_cmd_common *common;
49 u32 enabled = 0, size;
50 u32 capa_flags = mvm->fw->ucode_capa.flags;
51 int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
53 #if IS_ENABLED(CONFIG_IPV6)
54 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
57 * Skip tentative address when ns offload is enabled to avoid
59 * Keep tentative address when ns offload is disabled so the NS packets
60 * will not be filtered out and will wake up the host.
62 bool skip_tentative = offload_ns;
64 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
65 capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
66 struct iwl_ns_config *nsc;
67 struct iwl_targ_addr *addrs;
72 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
73 nsc = cmd.v3s.ns_config;
74 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
75 addrs = cmd.v3s.targ_addrs;
76 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
78 nsc = cmd.v4.ns_config;
79 n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
80 addrs = cmd.v4.targ_addrs;
81 n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
85 * For each address we have (and that will fit) fill a target
86 * address struct and combine for NS offload structs with the
87 * solicited node addresses.
90 i < mvmvif->num_target_ipv6_addrs &&
91 i < n_addrs && c < n_nsc; i++) {
92 struct in6_addr solicited_addr;
96 test_bit(i, mvmvif->tentative_addrs)) {
101 addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
103 for (j = 0; j < c; j++)
104 if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
105 &solicited_addr) == 0)
109 addrs[i].addr = mvmvif->target_ipv6_addrs[i];
110 addrs[i].config_num = cpu_to_le32(j);
111 nsc[j].dest_ipv6_addr = solicited_addr;
112 memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
115 if (mvmvif->num_target_ipv6_addrs - num_skipped)
116 enabled |= IWL_D3_PROTO_IPV6_VALID;
118 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
119 cmd.v3s.num_valid_ipv6_addrs =
120 cpu_to_le32(i - num_skipped);
122 cmd.v4.num_valid_ipv6_addrs =
123 cpu_to_le32(i - num_skipped);
124 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
127 BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
128 sizeof(mvmvif->target_ipv6_addrs[0]));
130 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
131 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
132 if (skip_tentative &&
133 test_bit(i, mvmvif->tentative_addrs))
136 memcpy(cmd.v2.target_ipv6_addr[i],
137 &mvmvif->target_ipv6_addrs[i],
138 sizeof(cmd.v2.target_ipv6_addr[i]));
143 enabled |= IWL_D3_PROTO_IPV6_VALID;
144 memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
148 BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
149 sizeof(mvmvif->target_ipv6_addrs[0]));
151 for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
152 IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
153 if (skip_tentative &&
154 test_bit(i, mvmvif->tentative_addrs))
157 memcpy(cmd.v1.target_ipv6_addr[i],
158 &mvmvif->target_ipv6_addrs[i],
159 sizeof(cmd.v1.target_ipv6_addr[i]));
165 enabled |= IWL_D3_PROTO_IPV6_VALID;
166 memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
170 if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
171 enabled |= IWL_D3_PROTO_OFFLOAD_NS;
173 if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
174 common = &cmd.v3s.common;
175 size = sizeof(cmd.v3s);
176 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
177 common = &cmd.v4.common;
178 size = sizeof(cmd.v4);
181 * This basically uses iwl_proto_offload_cmd_v3_large
182 * which doesn't have the sta_id parameter before the
185 size -= sizeof(cmd.v4.sta_id);
186 hcmd.data[0] = common;
188 } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
189 common = &cmd.v2.common;
190 size = sizeof(cmd.v2);
192 common = &cmd.v1.common;
193 size = sizeof(cmd.v1);
196 if (vif->cfg.arp_addr_cnt) {
197 enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
198 common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
199 memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
202 if (fw_has_capa(&mvm->fw->ucode_capa,
203 IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT))
204 enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
206 if (!disable_offloading)
207 common->enabled = cpu_to_le32(enabled);
210 cmd.v4.sta_id = cpu_to_le32(sta_id);
213 return iwl_mvm_send_cmd(mvm, &hcmd);