1 /* SPDX-License-Identifier: ISC */
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
15 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
19 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 struct wmi_scan_ev_arg *arg);
21 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 struct wmi_mgmt_rx_ev_arg *arg);
23 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 int (*pull_mgmt_tx_bundle_compl)(
26 struct ath10k *ar, struct sk_buff *skb,
27 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_ch_info_ev_arg *arg);
30 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_vdev_start_ev_arg *arg);
32 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_peer_kick_ev_arg *arg);
34 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_swba_ev_arg *arg);
36 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_phyerr_hdr_arg *arg);
38 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 int left_len, struct wmi_phyerr_ev_arg *arg);
40 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_svc_rdy_ev_arg *arg);
42 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_rdy_ev_arg *arg);
44 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 struct ath10k_fw_stats *stats);
46 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_roam_ev_arg *arg);
48 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 struct wmi_wow_ev_arg *arg);
50 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_echo_ev_arg *arg);
52 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_dfs_status_ev_arg *arg);
54 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_svc_avail_ev_arg *arg);
57 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
59 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 const u8 macaddr[ETH_ALEN]);
63 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 u16 rd5g, u16 ctl2g, u16 ctl5g,
65 enum wmi_dfs_region dfs_reg);
66 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
68 struct sk_buff *(*gen_init)(struct ath10k *ar);
69 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 const struct wmi_start_scan_arg *arg);
71 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 const struct wmi_stop_scan_arg *arg);
73 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 enum wmi_vdev_type type,
75 enum wmi_vdev_subtype subtype,
76 const u8 macaddr[ETH_ALEN]);
77 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 const struct wmi_vdev_start_request_arg *arg,
81 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
84 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 u32 param_id, u32 param_value);
87 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 const struct wmi_vdev_install_key_arg *arg);
89 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 const struct wmi_vdev_spectral_conf_arg *arg);
91 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 u32 trigger, u32 enable);
93 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 const struct wmi_wmm_params_all_arg *arg);
95 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN],
97 enum wmi_peer_type peer_type);
98 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 const u8 peer_addr[ETH_ALEN]);
100 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 const u8 peer_addr[ETH_ALEN],
103 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
105 enum wmi_peer_param param_id,
107 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 const struct wmi_peer_assoc_complete_arg *arg);
109 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 enum wmi_sta_ps_mode psmode);
111 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 enum wmi_sta_powersave_param param_id,
114 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
116 enum wmi_ap_ps_peer_param param_id,
118 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 const struct wmi_scan_chan_list_arg *arg);
120 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
122 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 const void *bcn, size_t bcn_len,
124 u32 bcn_paddr, bool dtim_zero,
126 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 const struct wmi_wmm_params_all_arg *arg);
128 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
132 wmi_peer_stats_info_request_type
136 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
137 enum wmi_force_fw_hang_type type,
139 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
140 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
143 int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
144 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
146 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
147 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
148 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
149 u32 period, u32 duration,
152 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
153 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
155 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
156 const u8 *mac, u32 tid, u32 buf_size);
157 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
158 const u8 *mac, u32 tid,
160 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
161 const u8 *mac, u32 tid, u32 initiator,
163 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
164 u32 tim_ie_offset, struct sk_buff *bcn,
165 u32 prb_caps, u32 prb_erp,
166 void *prb_ies, size_t prb_ies_len);
167 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
168 struct sk_buff *bcn);
169 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
171 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
172 const u8 peer_addr[ETH_ALEN],
173 const struct wmi_sta_uapsd_auto_trig_arg *args,
175 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
176 const struct wmi_sta_keepalive_arg *arg);
177 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
178 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
179 enum wmi_wow_wakeup_event event,
181 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
182 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
188 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
190 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
192 enum wmi_tdls_state state);
193 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
194 const struct wmi_tdls_peer_update_cmd_arg *arg,
195 const struct wmi_tdls_peer_capab_arg *cap,
196 const struct wmi_channel_arg *chan);
197 struct sk_buff *(*gen_radar_found)
199 const struct ath10k_radar_found_info *arg);
200 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
201 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
203 void (*fw_stats_fill)(struct ath10k *ar,
204 struct ath10k_fw_stats *fw_stats,
206 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
210 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
211 enum wmi_host_platform_type type,
212 u32 fw_feature_bitmap);
213 int (*get_vdev_subtype)(struct ath10k *ar,
214 enum wmi_vdev_subtype subtype);
215 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
217 struct wmi_pno_scan_req *pno_scan);
218 struct sk_buff *(*gen_pdev_bss_chan_info_req)
220 enum wmi_bss_survey_req_type type);
221 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
222 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
224 struct sk_buff *(*gen_bb_timing)
226 const struct wmi_bb_timing_cfg_arg *arg);
230 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
233 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
235 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
238 ar->wmi.ops->rx(ar, skb);
243 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
246 if (!ar->wmi.ops->map_svc)
249 ar->wmi.ops->map_svc(in, out, len);
254 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
257 if (!ar->wmi.ops->map_svc_ext)
260 ar->wmi.ops->map_svc_ext(in, out, len);
265 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
266 struct wmi_scan_ev_arg *arg)
268 if (!ar->wmi.ops->pull_scan)
271 return ar->wmi.ops->pull_scan(ar, skb, arg);
275 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
276 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
278 if (!ar->wmi.ops->pull_mgmt_tx_compl)
281 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
285 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
286 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
288 if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
291 return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
295 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
296 struct wmi_mgmt_rx_ev_arg *arg)
298 if (!ar->wmi.ops->pull_mgmt_rx)
301 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
305 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
306 struct wmi_ch_info_ev_arg *arg)
308 if (!ar->wmi.ops->pull_ch_info)
311 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
315 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
316 struct wmi_vdev_start_ev_arg *arg)
318 if (!ar->wmi.ops->pull_vdev_start)
321 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
325 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
326 struct wmi_peer_kick_ev_arg *arg)
328 if (!ar->wmi.ops->pull_peer_kick)
331 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
335 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
336 struct wmi_swba_ev_arg *arg)
338 if (!ar->wmi.ops->pull_swba)
341 return ar->wmi.ops->pull_swba(ar, skb, arg);
345 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
346 struct wmi_phyerr_hdr_arg *arg)
348 if (!ar->wmi.ops->pull_phyerr_hdr)
351 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
355 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
356 int left_len, struct wmi_phyerr_ev_arg *arg)
358 if (!ar->wmi.ops->pull_phyerr)
361 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
365 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
366 struct wmi_svc_rdy_ev_arg *arg)
368 if (!ar->wmi.ops->pull_svc_rdy)
371 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
375 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
376 struct wmi_rdy_ev_arg *arg)
378 if (!ar->wmi.ops->pull_rdy)
381 return ar->wmi.ops->pull_rdy(ar, skb, arg);
385 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
386 struct wmi_svc_avail_ev_arg *arg)
388 if (!ar->wmi.ops->pull_svc_avail)
390 return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
394 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
395 struct ath10k_fw_stats *stats)
397 if (!ar->wmi.ops->pull_fw_stats)
400 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
404 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
405 struct wmi_roam_ev_arg *arg)
407 if (!ar->wmi.ops->pull_roam_ev)
410 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
414 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
415 struct wmi_wow_ev_arg *arg)
417 if (!ar->wmi.ops->pull_wow_event)
420 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
424 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
425 struct wmi_echo_ev_arg *arg)
427 if (!ar->wmi.ops->pull_echo_ev)
430 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
434 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
435 struct wmi_dfs_status_ev_arg *arg)
437 if (!ar->wmi.ops->pull_dfs_status_ev)
440 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
443 static inline enum wmi_txbf_conf
444 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
446 if (!ar->wmi.ops->get_txbf_conf_scheme)
447 return WMI_TXBF_CONF_UNSUPPORTED;
449 return ar->wmi.ops->get_txbf_conf_scheme(ar);
453 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
455 if (!ar->wmi.ops->cleanup_mgmt_tx_send)
458 return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
462 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
468 if (!ar->wmi.ops->gen_mgmt_tx_send)
471 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
475 ret = ath10k_wmi_cmd_send(ar, skb,
476 ar->wmi.cmd->mgmt_tx_send_cmdid);
484 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
486 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
490 if (!ar->wmi.ops->gen_mgmt_tx)
493 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
497 ret = ath10k_wmi_cmd_send(ar, skb,
498 ar->wmi.cmd->mgmt_tx_cmdid);
502 /* FIXME There's no ACK event for Management Tx. This probably
503 * shouldn't be called here either.
505 info->flags |= IEEE80211_TX_STAT_ACK;
506 ieee80211_tx_status_irqsafe(ar->hw, msdu);
512 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
513 u16 ctl2g, u16 ctl5g,
514 enum wmi_dfs_region dfs_reg)
518 if (!ar->wmi.ops->gen_pdev_set_rd)
521 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
526 return ath10k_wmi_cmd_send(ar, skb,
527 ar->wmi.cmd->pdev_set_regdomain_cmdid);
531 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
535 if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
538 skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
542 return ath10k_wmi_cmd_send(ar, skb,
543 ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
547 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
551 if (!ar->wmi.ops->gen_pdev_suspend)
554 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
558 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
562 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
566 if (!ar->wmi.ops->gen_pdev_resume)
569 skb = ar->wmi.ops->gen_pdev_resume(ar);
573 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
577 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
581 if (!ar->wmi.ops->gen_pdev_set_param)
584 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
588 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
592 ath10k_wmi_cmd_init(struct ath10k *ar)
596 if (!ar->wmi.ops->gen_init)
599 skb = ar->wmi.ops->gen_init(ar);
603 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
607 ath10k_wmi_start_scan(struct ath10k *ar,
608 const struct wmi_start_scan_arg *arg)
612 if (!ar->wmi.ops->gen_start_scan)
615 skb = ar->wmi.ops->gen_start_scan(ar, arg);
619 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
623 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
627 if (!ar->wmi.ops->gen_stop_scan)
630 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
634 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
638 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
639 enum wmi_vdev_type type,
640 enum wmi_vdev_subtype subtype,
641 const u8 macaddr[ETH_ALEN])
645 if (!ar->wmi.ops->gen_vdev_create)
648 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
652 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
656 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
660 if (!ar->wmi.ops->gen_vdev_delete)
663 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
667 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
671 ath10k_wmi_vdev_start(struct ath10k *ar,
672 const struct wmi_vdev_start_request_arg *arg)
676 if (!ar->wmi.ops->gen_vdev_start)
679 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
683 return ath10k_wmi_cmd_send(ar, skb,
684 ar->wmi.cmd->vdev_start_request_cmdid);
688 ath10k_wmi_vdev_restart(struct ath10k *ar,
689 const struct wmi_vdev_start_request_arg *arg)
693 if (!ar->wmi.ops->gen_vdev_start)
696 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
700 return ath10k_wmi_cmd_send(ar, skb,
701 ar->wmi.cmd->vdev_restart_request_cmdid);
705 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
709 if (!ar->wmi.ops->gen_vdev_stop)
712 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
716 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
720 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
724 if (!ar->wmi.ops->gen_vdev_up)
727 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
731 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
735 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
739 if (!ar->wmi.ops->gen_vdev_down)
742 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
746 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
750 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
755 if (!ar->wmi.ops->gen_vdev_set_param)
758 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
763 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
767 ath10k_wmi_vdev_install_key(struct ath10k *ar,
768 const struct wmi_vdev_install_key_arg *arg)
772 if (!ar->wmi.ops->gen_vdev_install_key)
775 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
779 return ath10k_wmi_cmd_send(ar, skb,
780 ar->wmi.cmd->vdev_install_key_cmdid);
784 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
785 const struct wmi_vdev_spectral_conf_arg *arg)
790 if (!ar->wmi.ops->gen_vdev_spectral_conf)
793 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
797 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
798 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
802 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
808 if (!ar->wmi.ops->gen_vdev_spectral_enable)
811 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
816 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
817 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
821 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
822 const u8 peer_addr[ETH_ALEN],
823 const struct wmi_sta_uapsd_auto_trig_arg *args,
829 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
832 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
837 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
838 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
842 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
843 const struct wmi_wmm_params_all_arg *arg)
848 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
852 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
853 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
857 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
858 const u8 peer_addr[ETH_ALEN],
859 enum wmi_peer_type peer_type)
863 if (!ar->wmi.ops->gen_peer_create)
866 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
870 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
874 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
875 const u8 peer_addr[ETH_ALEN])
879 if (!ar->wmi.ops->gen_peer_delete)
882 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
886 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
890 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
891 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
895 if (!ar->wmi.ops->gen_peer_flush)
898 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
902 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
906 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
907 enum wmi_peer_param param_id, u32 param_value)
911 if (!ar->wmi.ops->gen_peer_set_param)
914 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
919 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
923 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
924 enum wmi_sta_ps_mode psmode)
928 if (!ar->wmi.ops->gen_set_psmode)
931 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
935 return ath10k_wmi_cmd_send(ar, skb,
936 ar->wmi.cmd->sta_powersave_mode_cmdid);
940 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
941 enum wmi_sta_powersave_param param_id, u32 value)
945 if (!ar->wmi.ops->gen_set_sta_ps)
948 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
952 return ath10k_wmi_cmd_send(ar, skb,
953 ar->wmi.cmd->sta_powersave_param_cmdid);
957 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
958 enum wmi_ap_ps_peer_param param_id, u32 value)
962 if (!ar->wmi.ops->gen_set_ap_ps)
965 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
969 return ath10k_wmi_cmd_send(ar, skb,
970 ar->wmi.cmd->ap_ps_peer_param_cmdid);
974 ath10k_wmi_scan_chan_list(struct ath10k *ar,
975 const struct wmi_scan_chan_list_arg *arg)
979 if (!ar->wmi.ops->gen_scan_chan_list)
982 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
986 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
990 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
995 prob_req_oui = (((u32)mac_addr[0]) << 16) |
996 (((u32)mac_addr[1]) << 8) | mac_addr[2];
998 if (!ar->wmi.ops->gen_scan_prob_req_oui)
1001 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1003 return PTR_ERR(skb);
1005 return ath10k_wmi_cmd_send(ar, skb,
1006 ar->wmi.cmd->scan_prob_req_oui_cmdid);
1010 ath10k_wmi_peer_assoc(struct ath10k *ar,
1011 const struct wmi_peer_assoc_complete_arg *arg)
1013 struct sk_buff *skb;
1015 if (!ar->wmi.ops->gen_peer_assoc)
1018 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1020 return PTR_ERR(skb);
1022 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1026 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1027 const void *bcn, size_t bcn_len,
1028 u32 bcn_paddr, bool dtim_zero,
1031 struct sk_buff *skb;
1034 if (!ar->wmi.ops->gen_beacon_dma)
1037 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1038 dtim_zero, deliver_cab);
1040 return PTR_ERR(skb);
1042 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1043 ar->wmi.cmd->pdev_send_bcn_cmdid);
1053 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1054 const struct wmi_wmm_params_all_arg *arg)
1056 struct sk_buff *skb;
1058 if (!ar->wmi.ops->gen_pdev_set_wmm)
1061 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1063 return PTR_ERR(skb);
1065 return ath10k_wmi_cmd_send(ar, skb,
1066 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1070 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1072 struct sk_buff *skb;
1074 if (!ar->wmi.ops->gen_request_stats)
1077 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1079 return PTR_ERR(skb);
1081 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1085 ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1087 enum wmi_peer_stats_info_request_type type,
1091 struct sk_buff *skb;
1093 if (!ar->wmi.ops->gen_request_peer_stats_info)
1096 skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1102 return PTR_ERR(skb);
1104 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1108 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1109 enum wmi_force_fw_hang_type type, u32 delay_ms)
1111 struct sk_buff *skb;
1113 if (!ar->wmi.ops->gen_force_fw_hang)
1116 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1118 return PTR_ERR(skb);
1120 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1124 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1126 struct sk_buff *skb;
1128 if (!ar->wmi.ops->gen_dbglog_cfg)
1131 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1133 return PTR_ERR(skb);
1135 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1139 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1141 struct sk_buff *skb;
1143 if (!ar->wmi.ops->gen_pktlog_enable)
1146 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1148 return PTR_ERR(skb);
1150 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1154 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1156 struct sk_buff *skb;
1158 if (!ar->wmi.ops->gen_pktlog_disable)
1161 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1163 return PTR_ERR(skb);
1165 return ath10k_wmi_cmd_send(ar, skb,
1166 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1170 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1171 u32 next_offset, u32 enabled)
1173 struct sk_buff *skb;
1175 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1178 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1179 next_offset, enabled);
1181 return PTR_ERR(skb);
1183 return ath10k_wmi_cmd_send(ar, skb,
1184 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1188 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1190 struct sk_buff *skb;
1192 if (!ar->wmi.ops->gen_pdev_get_temperature)
1195 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1197 return PTR_ERR(skb);
1199 return ath10k_wmi_cmd_send(ar, skb,
1200 ar->wmi.cmd->pdev_get_temperature_cmdid);
1204 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1206 struct sk_buff *skb;
1208 if (!ar->wmi.ops->gen_addba_clear_resp)
1211 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1213 return PTR_ERR(skb);
1215 return ath10k_wmi_cmd_send(ar, skb,
1216 ar->wmi.cmd->addba_clear_resp_cmdid);
1220 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1221 u32 tid, u32 buf_size)
1223 struct sk_buff *skb;
1225 if (!ar->wmi.ops->gen_addba_send)
1228 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1230 return PTR_ERR(skb);
1232 return ath10k_wmi_cmd_send(ar, skb,
1233 ar->wmi.cmd->addba_send_cmdid);
1237 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1238 u32 tid, u32 status)
1240 struct sk_buff *skb;
1242 if (!ar->wmi.ops->gen_addba_set_resp)
1245 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1247 return PTR_ERR(skb);
1249 return ath10k_wmi_cmd_send(ar, skb,
1250 ar->wmi.cmd->addba_set_resp_cmdid);
1254 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1255 u32 tid, u32 initiator, u32 reason)
1257 struct sk_buff *skb;
1259 if (!ar->wmi.ops->gen_delba_send)
1262 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1265 return PTR_ERR(skb);
1267 return ath10k_wmi_cmd_send(ar, skb,
1268 ar->wmi.cmd->delba_send_cmdid);
1272 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1273 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1274 void *prb_ies, size_t prb_ies_len)
1276 struct sk_buff *skb;
1278 if (!ar->wmi.ops->gen_bcn_tmpl)
1281 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1282 prb_caps, prb_erp, prb_ies,
1285 return PTR_ERR(skb);
1287 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1291 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1293 struct sk_buff *skb;
1295 if (!ar->wmi.ops->gen_prb_tmpl)
1298 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1300 return PTR_ERR(skb);
1302 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1306 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1308 struct sk_buff *skb;
1310 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1313 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1315 return PTR_ERR(skb);
1317 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1321 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1322 const struct wmi_sta_keepalive_arg *arg)
1324 struct sk_buff *skb;
1327 if (!ar->wmi.ops->gen_sta_keepalive)
1330 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1332 return PTR_ERR(skb);
1334 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1335 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1339 ath10k_wmi_wow_enable(struct ath10k *ar)
1341 struct sk_buff *skb;
1344 if (!ar->wmi.ops->gen_wow_enable)
1347 skb = ar->wmi.ops->gen_wow_enable(ar);
1349 return PTR_ERR(skb);
1351 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1352 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1356 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1357 enum wmi_wow_wakeup_event event,
1360 struct sk_buff *skb;
1363 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1366 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1368 return PTR_ERR(skb);
1370 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1371 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1375 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1377 struct sk_buff *skb;
1380 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1383 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1385 return PTR_ERR(skb);
1387 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1388 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1392 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1393 const u8 *pattern, const u8 *mask,
1394 int pattern_len, int pattern_offset)
1396 struct sk_buff *skb;
1399 if (!ar->wmi.ops->gen_wow_add_pattern)
1402 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1403 pattern, mask, pattern_len,
1406 return PTR_ERR(skb);
1408 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1409 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1413 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1415 struct sk_buff *skb;
1418 if (!ar->wmi.ops->gen_wow_del_pattern)
1421 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1423 return PTR_ERR(skb);
1425 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1426 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1430 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1431 struct wmi_pno_scan_req *pno_scan)
1433 struct sk_buff *skb;
1436 if (!ar->wmi.ops->gen_wow_config_pno)
1439 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1441 return PTR_ERR(skb);
1443 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1444 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1448 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1449 enum wmi_tdls_state state)
1451 struct sk_buff *skb;
1453 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1456 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1458 return PTR_ERR(skb);
1460 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1464 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1465 const struct wmi_tdls_peer_update_cmd_arg *arg,
1466 const struct wmi_tdls_peer_capab_arg *cap,
1467 const struct wmi_channel_arg *chan)
1469 struct sk_buff *skb;
1471 if (!ar->wmi.ops->gen_tdls_peer_update)
1474 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1476 return PTR_ERR(skb);
1478 return ath10k_wmi_cmd_send(ar, skb,
1479 ar->wmi.cmd->tdls_peer_update_cmdid);
1483 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1485 struct sk_buff *skb;
1487 if (!ar->wmi.ops->gen_adaptive_qcs)
1490 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1492 return PTR_ERR(skb);
1494 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1498 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1500 struct sk_buff *skb;
1502 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1505 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1508 return PTR_ERR(skb);
1510 return ath10k_wmi_cmd_send(ar, skb,
1511 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1515 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1518 if (!ar->wmi.ops->fw_stats_fill)
1521 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1526 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1527 u32 detect_level, u32 detect_margin)
1529 struct sk_buff *skb;
1531 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1534 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1539 return PTR_ERR(skb);
1541 return ath10k_wmi_cmd_send(ar, skb,
1542 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1546 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1547 enum wmi_host_platform_type type,
1548 u32 fw_feature_bitmap)
1550 struct sk_buff *skb;
1552 if (!ar->wmi.ops->ext_resource_config)
1555 skb = ar->wmi.ops->ext_resource_config(ar, type,
1559 return PTR_ERR(skb);
1561 return ath10k_wmi_cmd_send(ar, skb,
1562 ar->wmi.cmd->ext_resource_cfg_cmdid);
1566 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1568 if (!ar->wmi.ops->get_vdev_subtype)
1571 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1575 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1576 enum wmi_bss_survey_req_type type)
1578 struct ath10k_wmi *wmi = &ar->wmi;
1579 struct sk_buff *skb;
1581 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1584 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1586 return PTR_ERR(skb);
1588 return ath10k_wmi_cmd_send(ar, skb,
1589 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1593 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1595 struct ath10k_wmi *wmi = &ar->wmi;
1596 struct sk_buff *skb;
1598 if (!wmi->ops->gen_echo)
1601 skb = wmi->ops->gen_echo(ar, value);
1603 return PTR_ERR(skb);
1605 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1609 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1611 struct sk_buff *skb;
1613 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1616 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1619 return PTR_ERR(skb);
1621 return ath10k_wmi_cmd_send(ar, skb,
1622 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1626 ath10k_wmi_report_radar_found(struct ath10k *ar,
1627 const struct ath10k_radar_found_info *arg)
1629 struct sk_buff *skb;
1631 if (!ar->wmi.ops->gen_radar_found)
1634 skb = ar->wmi.ops->gen_radar_found(ar, arg);
1636 return PTR_ERR(skb);
1638 return ath10k_wmi_cmd_send(ar, skb,
1639 ar->wmi.cmd->radar_found_cmdid);
1643 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1644 const struct wmi_bb_timing_cfg_arg *arg)
1646 struct sk_buff *skb;
1648 if (!ar->wmi.ops->gen_bb_timing)
1651 skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1654 return PTR_ERR(skb);
1656 return ath10k_wmi_cmd_send(ar, skb,
1657 ar->wmi.cmd->set_bb_timing_cmdid);