1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/property.h>
13 #include <linux/regulator/consumer.h>
14 #include <linux/of_address.h>
15 #include <linux/iommu.h>
24 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
25 #define CE_POLL_PIPE 4
26 #define ATH10K_SNOC_WAKE_IRQ 2
28 static char *const ce_name[] = {
43 static const char * const ath10k_regulators[] = {
50 static const char * const ath10k_clocks[] = {
51 "cxo_ref_clk_pin", "qdss",
54 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
55 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
56 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
57 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
58 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
59 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
61 static const struct ath10k_snoc_drv_priv drv_priv = {
62 .hw_rev = ATH10K_HW_WCN3990,
63 .dma_mask = DMA_BIT_MASK(35),
67 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
68 #define WCN3990_DST_WR_IDX_OFFSET 0x40
70 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
72 .ce_id = __cpu_to_le16(0),
73 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
77 .ce_id = __cpu_to_le16(3),
78 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
82 .ce_id = __cpu_to_le16(4),
83 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
87 .ce_id = __cpu_to_le16(5),
88 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
92 .ce_id = __cpu_to_le16(7),
93 .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
97 .ce_id = __cpu_to_le16(1),
98 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
102 .ce_id = __cpu_to_le16(2),
103 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
107 .ce_id = __cpu_to_le16(7),
108 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
112 .ce_id = __cpu_to_le16(8),
113 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
117 .ce_id = __cpu_to_le16(9),
118 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
122 .ce_id = __cpu_to_le16(10),
123 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
127 .ce_id = __cpu_to_le16(11),
128 .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
132 static struct ce_attr host_ce_config_wlan[] = {
133 /* CE0: host->target HTC control streams */
135 .flags = CE_ATTR_FLAGS,
139 .send_cb = ath10k_snoc_htc_tx_cb,
142 /* CE1: target->host HTT + HTC control */
144 .flags = CE_ATTR_FLAGS,
147 .dest_nentries = 512,
148 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
151 /* CE2: target->host WMI */
153 .flags = CE_ATTR_FLAGS,
157 .recv_cb = ath10k_snoc_htc_rx_cb,
160 /* CE3: host->target WMI */
162 .flags = CE_ATTR_FLAGS,
166 .send_cb = ath10k_snoc_htc_tx_cb,
169 /* CE4: host->target HTT */
171 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
172 .src_nentries = 2048,
175 .send_cb = ath10k_snoc_htt_tx_cb,
178 /* CE5: target->host HTT (ipa_uc->target ) */
180 .flags = CE_ATTR_FLAGS,
183 .dest_nentries = 512,
184 .recv_cb = ath10k_snoc_htt_rx_cb,
187 /* CE6: target autonomous hif_memcpy */
189 .flags = CE_ATTR_FLAGS,
195 /* CE7: ce_diag, the Diagnostic Window */
197 .flags = CE_ATTR_FLAGS,
203 /* CE8: Target to uMC */
205 .flags = CE_ATTR_FLAGS,
208 .dest_nentries = 128,
211 /* CE9 target->host HTT */
213 .flags = CE_ATTR_FLAGS,
216 .dest_nentries = 512,
217 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
220 /* CE10: target->host HTT */
222 .flags = CE_ATTR_FLAGS,
225 .dest_nentries = 512,
226 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
229 /* CE11: target -> host PKTLOG */
231 .flags = CE_ATTR_FLAGS,
234 .dest_nentries = 512,
235 .recv_cb = ath10k_snoc_pktlog_rx_cb,
239 static struct ce_pipe_config target_ce_config_wlan[] = {
240 /* CE0: host->target HTC control and raw streams */
242 .pipenum = __cpu_to_le32(0),
243 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
244 .nentries = __cpu_to_le32(32),
245 .nbytes_max = __cpu_to_le32(2048),
246 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
247 .reserved = __cpu_to_le32(0),
250 /* CE1: target->host HTT + HTC control */
252 .pipenum = __cpu_to_le32(1),
253 .pipedir = __cpu_to_le32(PIPEDIR_IN),
254 .nentries = __cpu_to_le32(32),
255 .nbytes_max = __cpu_to_le32(2048),
256 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
257 .reserved = __cpu_to_le32(0),
260 /* CE2: target->host WMI */
262 .pipenum = __cpu_to_le32(2),
263 .pipedir = __cpu_to_le32(PIPEDIR_IN),
264 .nentries = __cpu_to_le32(64),
265 .nbytes_max = __cpu_to_le32(2048),
266 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
267 .reserved = __cpu_to_le32(0),
270 /* CE3: host->target WMI */
272 .pipenum = __cpu_to_le32(3),
273 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
274 .nentries = __cpu_to_le32(32),
275 .nbytes_max = __cpu_to_le32(2048),
276 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
277 .reserved = __cpu_to_le32(0),
280 /* CE4: host->target HTT */
282 .pipenum = __cpu_to_le32(4),
283 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
284 .nentries = __cpu_to_le32(256),
285 .nbytes_max = __cpu_to_le32(256),
286 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
287 .reserved = __cpu_to_le32(0),
290 /* CE5: target->host HTT (HIF->HTT) */
292 .pipenum = __cpu_to_le32(5),
293 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
294 .nentries = __cpu_to_le32(1024),
295 .nbytes_max = __cpu_to_le32(64),
296 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
297 .reserved = __cpu_to_le32(0),
300 /* CE6: Reserved for target autonomous hif_memcpy */
302 .pipenum = __cpu_to_le32(6),
303 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
304 .nentries = __cpu_to_le32(32),
305 .nbytes_max = __cpu_to_le32(16384),
306 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
307 .reserved = __cpu_to_le32(0),
310 /* CE7 used only by Host */
312 .pipenum = __cpu_to_le32(7),
313 .pipedir = __cpu_to_le32(4),
314 .nentries = __cpu_to_le32(0),
315 .nbytes_max = __cpu_to_le32(0),
316 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
317 .reserved = __cpu_to_le32(0),
320 /* CE8 Target to uMC */
322 .pipenum = __cpu_to_le32(8),
323 .pipedir = __cpu_to_le32(PIPEDIR_IN),
324 .nentries = __cpu_to_le32(32),
325 .nbytes_max = __cpu_to_le32(2048),
326 .flags = __cpu_to_le32(0),
327 .reserved = __cpu_to_le32(0),
330 /* CE9 target->host HTT */
332 .pipenum = __cpu_to_le32(9),
333 .pipedir = __cpu_to_le32(PIPEDIR_IN),
334 .nentries = __cpu_to_le32(32),
335 .nbytes_max = __cpu_to_le32(2048),
336 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
337 .reserved = __cpu_to_le32(0),
340 /* CE10 target->host HTT */
342 .pipenum = __cpu_to_le32(10),
343 .pipedir = __cpu_to_le32(PIPEDIR_IN),
344 .nentries = __cpu_to_le32(32),
345 .nbytes_max = __cpu_to_le32(2048),
346 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
347 .reserved = __cpu_to_le32(0),
350 /* CE11 target autonomous qcache memcpy */
352 .pipenum = __cpu_to_le32(11),
353 .pipedir = __cpu_to_le32(PIPEDIR_IN),
354 .nentries = __cpu_to_le32(32),
355 .nbytes_max = __cpu_to_le32(2048),
356 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
357 .reserved = __cpu_to_le32(0),
361 static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
363 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
364 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
368 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
369 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
373 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
374 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
378 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
379 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
383 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
384 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
388 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
389 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
393 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
394 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
398 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
399 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
403 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
404 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
408 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
409 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
413 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
414 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
418 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
419 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
423 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
424 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
428 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
429 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
433 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
434 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
438 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
439 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
443 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
444 __cpu_to_le32(PIPEDIR_OUT),
447 { /* in = DL = target -> host */
448 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
449 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
452 { /* in = DL = target -> host */
453 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
454 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
457 { /* in = DL = target -> host pktlog */
458 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
459 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
462 /* (Additions here) */
471 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
473 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
475 iowrite32(value, ar_snoc->mem + offset);
478 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
480 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
483 val = ioread32(ar_snoc->mem + offset);
488 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
490 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
491 struct ath10k *ar = pipe->hif_ce_state;
492 struct ath10k_ce *ce = ath10k_ce_priv(ar);
497 skb = dev_alloc_skb(pipe->buf_sz);
501 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
503 paddr = dma_map_single(ar->dev, skb->data,
504 skb->len + skb_tailroom(skb),
506 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
507 ath10k_warn(ar, "failed to dma map snoc rx buf\n");
508 dev_kfree_skb_any(skb);
512 ATH10K_SKB_RXCB(skb)->paddr = paddr;
514 spin_lock_bh(&ce->ce_lock);
515 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
516 spin_unlock_bh(&ce->ce_lock);
518 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
520 dev_kfree_skb_any(skb);
527 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
529 struct ath10k *ar = pipe->hif_ce_state;
530 struct ath10k_ce *ce = ath10k_ce_priv(ar);
531 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
532 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
535 if (pipe->buf_sz == 0)
538 if (!ce_pipe->dest_ring)
541 spin_lock_bh(&ce->ce_lock);
542 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
543 spin_unlock_bh(&ce->ce_lock);
545 ret = __ath10k_snoc_rx_post_buf(pipe);
549 ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
550 mod_timer(&ar_snoc->rx_post_retry, jiffies +
551 ATH10K_SNOC_RX_POST_RETRY_MS);
557 static void ath10k_snoc_rx_post(struct ath10k *ar)
559 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
562 for (i = 0; i < CE_COUNT; i++)
563 ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
566 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
567 void (*callback)(struct ath10k *ar,
568 struct sk_buff *skb))
570 struct ath10k *ar = ce_state->ar;
571 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
572 struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
574 struct sk_buff_head list;
575 void *transfer_context;
576 unsigned int nbytes, max_nbytes;
578 __skb_queue_head_init(&list);
579 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
581 skb = transfer_context;
582 max_nbytes = skb->len + skb_tailroom(skb);
583 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
584 max_nbytes, DMA_FROM_DEVICE);
586 if (unlikely(max_nbytes < nbytes)) {
587 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
589 dev_kfree_skb_any(skb);
593 skb_put(skb, nbytes);
594 __skb_queue_tail(&list, skb);
597 while ((skb = __skb_dequeue(&list))) {
598 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
599 ce_state->id, skb->len);
604 ath10k_snoc_rx_post_pipe(pipe_info);
607 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
609 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
612 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
614 /* CE4 polling needs to be done whenever CE pipe which transports
615 * HTT Rx (target->host) is processed.
617 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
619 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
622 /* Called by lower (CE) layer when data is received from the Target.
623 * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
625 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
627 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
630 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
632 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
633 ath10k_htt_t2h_msg_handler(ar, skb);
636 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
638 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
639 ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
642 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
644 struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
645 struct ath10k *ar = ar_snoc->ar;
647 ath10k_snoc_rx_post(ar);
650 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
652 struct ath10k *ar = ce_state->ar;
653 struct sk_buff_head list;
656 __skb_queue_head_init(&list);
657 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
661 __skb_queue_tail(&list, skb);
664 while ((skb = __skb_dequeue(&list)))
665 ath10k_htc_tx_completion_handler(ar, skb);
668 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
670 struct ath10k *ar = ce_state->ar;
673 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
677 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
678 skb->len, DMA_TO_DEVICE);
679 ath10k_htt_hif_tx_complete(ar, skb);
683 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
684 struct ath10k_hif_sg_item *items, int n_items)
686 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
687 struct ath10k_ce *ce = ath10k_ce_priv(ar);
688 struct ath10k_snoc_pipe *snoc_pipe;
689 struct ath10k_ce_pipe *ce_pipe;
692 snoc_pipe = &ar_snoc->pipe_info[pipe_id];
693 ce_pipe = snoc_pipe->ce_hdl;
694 spin_lock_bh(&ce->ce_lock);
696 for (i = 0; i < n_items - 1; i++) {
697 ath10k_dbg(ar, ATH10K_DBG_SNOC,
698 "snoc tx item %d paddr %pad len %d n_items %d\n",
699 i, &items[i].paddr, items[i].len, n_items);
701 err = ath10k_ce_send_nolock(ce_pipe,
702 items[i].transfer_context,
705 items[i].transfer_id,
706 CE_SEND_FLAG_GATHER);
711 ath10k_dbg(ar, ATH10K_DBG_SNOC,
712 "snoc tx item %d paddr %pad len %d n_items %d\n",
713 i, &items[i].paddr, items[i].len, n_items);
715 err = ath10k_ce_send_nolock(ce_pipe,
716 items[i].transfer_context,
719 items[i].transfer_id,
724 spin_unlock_bh(&ce->ce_lock);
730 __ath10k_ce_send_revert(ce_pipe);
732 spin_unlock_bh(&ce->ce_lock);
736 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
737 struct bmi_target_info *target_info)
739 target_info->version = ATH10K_HW_WCN3990;
740 target_info->type = ATH10K_HW_WCN3990;
745 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
747 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
749 ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
751 return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
754 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
759 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
762 resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
764 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
767 ath10k_ce_per_engine_service(ar, pipe);
770 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
772 u8 *ul_pipe, u8 *dl_pipe)
774 const struct ce_service_to_pipe *entry;
775 bool ul_set = false, dl_set = false;
778 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
780 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
781 entry = &target_service_to_ce_map_wlan[i];
783 if (__le32_to_cpu(entry->service_id) != service_id)
786 switch (__le32_to_cpu(entry->pipedir)) {
791 *dl_pipe = __le32_to_cpu(entry->pipenum);
796 *ul_pipe = __le32_to_cpu(entry->pipenum);
802 *dl_pipe = __le32_to_cpu(entry->pipenum);
803 *ul_pipe = __le32_to_cpu(entry->pipenum);
810 if (!ul_set || !dl_set)
816 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
817 u8 *ul_pipe, u8 *dl_pipe)
819 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
821 (void)ath10k_snoc_hif_map_service_to_pipe(ar,
822 ATH10K_HTC_SVC_ID_RSVD_CTRL,
826 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
828 ath10k_ce_disable_interrupts(ar);
831 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
833 ath10k_ce_enable_interrupts(ar);
836 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
838 struct ath10k_ce_pipe *ce_pipe;
839 struct ath10k_ce_ring *ce_ring;
844 ar = snoc_pipe->hif_ce_state;
845 ce_pipe = snoc_pipe->ce_hdl;
846 ce_ring = ce_pipe->dest_ring;
851 if (!snoc_pipe->buf_sz)
854 for (i = 0; i < ce_ring->nentries; i++) {
855 skb = ce_ring->per_transfer_context[i];
859 ce_ring->per_transfer_context[i] = NULL;
861 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
862 skb->len + skb_tailroom(skb),
864 dev_kfree_skb_any(skb);
868 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
870 struct ath10k_ce_pipe *ce_pipe;
871 struct ath10k_ce_ring *ce_ring;
876 ar = snoc_pipe->hif_ce_state;
877 ce_pipe = snoc_pipe->ce_hdl;
878 ce_ring = ce_pipe->src_ring;
883 if (!snoc_pipe->buf_sz)
886 for (i = 0; i < ce_ring->nentries; i++) {
887 skb = ce_ring->per_transfer_context[i];
891 ce_ring->per_transfer_context[i] = NULL;
893 ath10k_htc_tx_completion_handler(ar, skb);
897 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
899 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
900 struct ath10k_snoc_pipe *pipe_info;
903 del_timer_sync(&ar_snoc->rx_post_retry);
904 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
905 pipe_info = &ar_snoc->pipe_info[pipe_num];
906 ath10k_snoc_rx_pipe_cleanup(pipe_info);
907 ath10k_snoc_tx_pipe_cleanup(pipe_info);
911 static void ath10k_snoc_hif_stop(struct ath10k *ar)
913 if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
914 ath10k_snoc_irq_disable(ar);
916 napi_synchronize(&ar->napi);
917 napi_disable(&ar->napi);
918 ath10k_snoc_buffer_cleanup(ar);
919 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
922 static int ath10k_snoc_hif_start(struct ath10k *ar)
924 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
926 napi_enable(&ar->napi);
927 ath10k_snoc_irq_enable(ar);
928 ath10k_snoc_rx_post(ar);
930 clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
932 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
937 static int ath10k_snoc_init_pipes(struct ath10k *ar)
941 for (i = 0; i < CE_COUNT; i++) {
942 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
944 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
953 static int ath10k_snoc_wlan_enable(struct ath10k *ar,
954 enum ath10k_firmware_mode fw_mode)
956 struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
957 struct ath10k_qmi_wlan_enable_cfg cfg;
958 enum wlfw_driver_mode_enum_v01 mode;
961 for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
962 tgt_cfg[pipe_num].pipe_num =
963 target_ce_config_wlan[pipe_num].pipenum;
964 tgt_cfg[pipe_num].pipe_dir =
965 target_ce_config_wlan[pipe_num].pipedir;
966 tgt_cfg[pipe_num].nentries =
967 target_ce_config_wlan[pipe_num].nentries;
968 tgt_cfg[pipe_num].nbytes_max =
969 target_ce_config_wlan[pipe_num].nbytes_max;
970 tgt_cfg[pipe_num].flags =
971 target_ce_config_wlan[pipe_num].flags;
972 tgt_cfg[pipe_num].reserved = 0;
975 cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
976 sizeof(struct ath10k_tgt_pipe_cfg);
977 cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
979 cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
980 sizeof(struct ath10k_svc_pipe_cfg);
981 cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
982 &target_service_to_ce_map_wlan;
983 cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
984 cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
985 &target_shadow_reg_cfg_map;
988 case ATH10K_FIRMWARE_MODE_NORMAL:
989 mode = QMI_WLFW_MISSION_V01;
991 case ATH10K_FIRMWARE_MODE_UTF:
992 mode = QMI_WLFW_FTM_V01;
995 ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
999 return ath10k_qmi_wlan_enable(ar, &cfg, mode,
1003 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1005 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1007 /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1008 * flags are not set, it means that the driver has restarted
1009 * due to a crash inject via debugfs. In this case, the driver
1010 * needs to restart the firmware and hence send qmi wlan disable,
1011 * during the driver restart sequence.
1013 if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1014 !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1015 ath10k_qmi_wlan_disable(ar);
1018 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1020 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1022 ath10k_snoc_wlan_disable(ar);
1023 ath10k_ce_free_rri(ar);
1026 static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1027 enum ath10k_firmware_mode fw_mode)
1031 ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1032 __func__, ar->state);
1034 ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1036 ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1040 ath10k_ce_alloc_rri(ar);
1042 ret = ath10k_snoc_init_pipes(ar);
1044 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1045 goto err_wlan_enable;
1051 ath10k_snoc_wlan_disable(ar);
1056 static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1062 fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1064 fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1066 return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1070 static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1072 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1075 if (!device_may_wakeup(ar->dev))
1078 ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1080 ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1084 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1089 static int ath10k_snoc_hif_resume(struct ath10k *ar)
1091 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1094 if (!device_may_wakeup(ar->dev))
1097 ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1099 ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1103 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1109 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1110 .read32 = ath10k_snoc_read32,
1111 .write32 = ath10k_snoc_write32,
1112 .start = ath10k_snoc_hif_start,
1113 .stop = ath10k_snoc_hif_stop,
1114 .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
1115 .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
1116 .power_up = ath10k_snoc_hif_power_up,
1117 .power_down = ath10k_snoc_hif_power_down,
1118 .tx_sg = ath10k_snoc_hif_tx_sg,
1119 .send_complete_check = ath10k_snoc_hif_send_complete_check,
1120 .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
1121 .get_target_info = ath10k_snoc_hif_get_target_info,
1122 .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
1125 .suspend = ath10k_snoc_hif_suspend,
1126 .resume = ath10k_snoc_hif_resume,
1130 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1131 .read32 = ath10k_snoc_read32,
1132 .write32 = ath10k_snoc_write32,
1135 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1137 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1140 for (i = 0; i < CE_COUNT_MAX; i++) {
1141 if (ar_snoc->ce_irqs[i].irq_line == irq)
1144 ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1149 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1151 struct ath10k *ar = arg;
1152 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1153 int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1155 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1156 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1161 ath10k_snoc_irq_disable(ar);
1162 napi_schedule(&ar->napi);
1167 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1169 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1172 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1177 ath10k_ce_per_engine_service_any(ar);
1178 done = ath10k_htt_txrx_compl_task(ar, budget);
1180 if (done < budget) {
1182 ath10k_snoc_irq_enable(ar);
1188 static void ath10k_snoc_init_napi(struct ath10k *ar)
1190 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1191 ATH10K_NAPI_BUDGET);
1194 static int ath10k_snoc_request_irq(struct ath10k *ar)
1196 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1197 int irqflags = IRQF_TRIGGER_RISING;
1200 for (id = 0; id < CE_COUNT_MAX; id++) {
1201 ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1202 ath10k_snoc_per_engine_handler,
1203 irqflags, ce_name[id], ar);
1206 "failed to register IRQ handler for CE %d: %d\n",
1215 for (id -= 1; id >= 0; id--)
1216 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1221 static void ath10k_snoc_free_irq(struct ath10k *ar)
1223 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1226 for (id = 0; id < CE_COUNT_MAX; id++)
1227 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1230 static int ath10k_snoc_resource_init(struct ath10k *ar)
1232 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1233 struct platform_device *pdev;
1234 struct resource *res;
1237 pdev = ar_snoc->dev;
1238 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1240 ath10k_err(ar, "Memory base not found in DT\n");
1244 ar_snoc->mem_pa = res->start;
1245 ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1246 resource_size(res));
1247 if (!ar_snoc->mem) {
1248 ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1253 for (i = 0; i < CE_COUNT; i++) {
1254 res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1256 ath10k_err(ar, "failed to get IRQ%d\n", i);
1260 ar_snoc->ce_irqs[i].irq_line = res->start;
1263 ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1264 &ar_snoc->xo_cal_data);
1265 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1267 ar_snoc->xo_cal_supported = true;
1268 ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1269 ar_snoc->xo_cal_data);
1277 static void ath10k_snoc_quirks_init(struct ath10k *ar)
1279 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1280 struct device *dev = &ar_snoc->dev->dev;
1282 if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1283 set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1286 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1288 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1289 struct ath10k_bus_params bus_params = {};
1292 if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1296 case ATH10K_QMI_EVENT_FW_READY_IND:
1297 if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1298 queue_work(ar->workqueue, &ar->restart_work);
1302 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1303 bus_params.chip_id = ar_snoc->target_info.soc_version;
1304 ret = ath10k_core_register(ar, &bus_params);
1306 ath10k_err(ar, "Failed to register driver core: %d\n",
1310 set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1312 case ATH10K_QMI_EVENT_FW_DOWN_IND:
1313 set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1314 set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1317 ath10k_err(ar, "invalid fw indication: %llx\n", type);
1324 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1326 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1327 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1328 struct ath10k_snoc_pipe *pipe;
1331 timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1332 spin_lock_init(&ce->ce_lock);
1333 for (i = 0; i < CE_COUNT; i++) {
1334 pipe = &ar_snoc->pipe_info[i];
1335 pipe->ce_hdl = &ce->ce_states[i];
1337 pipe->hif_ce_state = ar;
1339 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1341 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1346 pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1348 ath10k_snoc_init_napi(ar);
1353 static void ath10k_snoc_release_resource(struct ath10k *ar)
1357 netif_napi_del(&ar->napi);
1358 for (i = 0; i < CE_COUNT; i++)
1359 ath10k_ce_free_pipe(ar, i);
1362 static int ath10k_hw_power_on(struct ath10k *ar)
1364 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1367 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1369 ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1373 ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1380 regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1384 static int ath10k_hw_power_off(struct ath10k *ar)
1386 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1388 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1390 clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1392 return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1395 static void ath10k_msa_dump_memory(struct ath10k *ar,
1396 struct ath10k_fw_crash_data *crash_data)
1398 const struct ath10k_hw_mem_layout *mem_layout;
1399 const struct ath10k_mem_region *current_region;
1400 struct ath10k_dump_ram_data_hdr *hdr;
1404 if (!crash_data || !crash_data->ramdump_buf)
1407 mem_layout = ath10k_coredump_get_mem_layout(ar);
1411 current_region = &mem_layout->region_table.regions[0];
1413 buf = crash_data->ramdump_buf;
1414 buf_len = crash_data->ramdump_buf_len;
1415 memset(buf, 0, buf_len);
1417 /* Reserve space for the header. */
1419 buf += sizeof(*hdr);
1420 buf_len -= sizeof(*hdr);
1422 hdr->region_type = cpu_to_le32(current_region->type);
1423 hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
1424 hdr->length = cpu_to_le32(ar->msa.mem_size);
1426 if (current_region->len < ar->msa.mem_size) {
1427 memcpy(buf, ar->msa.vaddr, current_region->len);
1428 ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1429 current_region->len, ar->msa.mem_size);
1431 memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
1435 void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1437 struct ath10k_fw_crash_data *crash_data;
1438 char guid[UUID_STRING_LEN + 1];
1440 mutex_lock(&ar->dump_mutex);
1442 spin_lock_bh(&ar->data_lock);
1443 ar->stats.fw_crash_counter++;
1444 spin_unlock_bh(&ar->data_lock);
1446 crash_data = ath10k_coredump_new(ar);
1449 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1451 scnprintf(guid, sizeof(guid), "n/a");
1453 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1454 ath10k_print_driver_info(ar);
1455 ath10k_msa_dump_memory(ar, crash_data);
1456 mutex_unlock(&ar->dump_mutex);
1459 static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
1461 struct device *dev = ar->dev;
1462 struct device_node *node;
1466 node = of_parse_phandle(dev->of_node, "memory-region", 0);
1468 ret = of_address_to_resource(node, 0, &r);
1470 dev_err(dev, "failed to resolve msa fixed region\n");
1475 ar->msa.paddr = r.start;
1476 ar->msa.mem_size = resource_size(&r);
1477 ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
1480 if (IS_ERR(ar->msa.vaddr)) {
1481 dev_err(dev, "failed to map memory region: %pa\n",
1483 return PTR_ERR(ar->msa.vaddr);
1486 ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
1489 if (!ar->msa.vaddr) {
1490 ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1493 ar->msa.mem_size = msa_size;
1496 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
1503 static int ath10k_fw_init(struct ath10k *ar)
1505 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1506 struct device *host_dev = &ar_snoc->dev->dev;
1507 struct platform_device_info info;
1508 struct iommu_domain *iommu_dom;
1509 struct platform_device *pdev;
1510 struct device_node *node;
1513 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1515 ar_snoc->use_tz = true;
1519 memset(&info, 0, sizeof(info));
1520 info.fwnode = &node->fwnode;
1521 info.parent = host_dev;
1522 info.name = node->name;
1523 info.dma_mask = DMA_BIT_MASK(32);
1525 pdev = platform_device_register_full(&info);
1528 return PTR_ERR(pdev);
1531 pdev->dev.of_node = node;
1533 ret = of_dma_configure(&pdev->dev, node, true);
1535 ath10k_err(ar, "dma configure fail: %d\n", ret);
1536 goto err_unregister;
1539 ar_snoc->fw.dev = &pdev->dev;
1541 iommu_dom = iommu_domain_alloc(&platform_bus_type);
1543 ath10k_err(ar, "failed to allocate iommu domain\n");
1545 goto err_unregister;
1548 ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
1550 ath10k_err(ar, "could not attach device: %d\n", ret);
1551 goto err_iommu_free;
1554 ar_snoc->fw.iommu_domain = iommu_dom;
1555 ar_snoc->fw.fw_start_addr = ar->msa.paddr;
1557 ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
1558 ar->msa.paddr, ar->msa.mem_size,
1559 IOMMU_READ | IOMMU_WRITE);
1561 ath10k_err(ar, "failed to map firmware region: %d\n", ret);
1562 goto err_iommu_detach;
1570 iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
1573 iommu_domain_free(iommu_dom);
1576 platform_device_unregister(pdev);
1582 static int ath10k_fw_deinit(struct ath10k *ar)
1584 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1585 const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
1586 struct iommu_domain *iommu;
1587 size_t unmapped_size;
1589 if (ar_snoc->use_tz)
1592 iommu = ar_snoc->fw.iommu_domain;
1594 unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
1596 if (unmapped_size != mapped_size)
1597 ath10k_err(ar, "failed to unmap firmware: %zu\n",
1600 iommu_detach_device(iommu, ar_snoc->fw.dev);
1601 iommu_domain_free(iommu);
1603 platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
1608 static const struct of_device_id ath10k_snoc_dt_match[] = {
1609 { .compatible = "qcom,wcn3990-wifi",
1614 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1616 static int ath10k_snoc_probe(struct platform_device *pdev)
1618 const struct ath10k_snoc_drv_priv *drv_data;
1619 struct ath10k_snoc *ar_snoc;
1627 drv_data = device_get_match_data(dev);
1629 dev_err(dev, "failed to find matching device tree id\n");
1633 ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1635 dev_err(dev, "failed to set dma mask: %d\n", ret);
1639 ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1640 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1642 dev_err(dev, "failed to allocate core\n");
1646 ar_snoc = ath10k_snoc_priv(ar);
1647 ar_snoc->dev = pdev;
1648 platform_set_drvdata(pdev, ar);
1650 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1651 ar->ce_priv = &ar_snoc->ce;
1652 msa_size = drv_data->msa_size;
1654 ath10k_snoc_quirks_init(ar);
1656 ret = ath10k_snoc_resource_init(ar);
1658 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1659 goto err_core_destroy;
1662 ret = ath10k_snoc_setup_resource(ar);
1664 ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1665 goto err_core_destroy;
1667 ret = ath10k_snoc_request_irq(ar);
1669 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1670 goto err_release_resource;
1673 ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1674 ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1675 sizeof(*ar_snoc->vregs), GFP_KERNEL);
1676 if (!ar_snoc->vregs) {
1680 for (i = 0; i < ar_snoc->num_vregs; i++)
1681 ar_snoc->vregs[i].supply = ath10k_regulators[i];
1683 ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1688 ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1689 ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1690 sizeof(*ar_snoc->clks), GFP_KERNEL);
1691 if (!ar_snoc->clks) {
1696 for (i = 0; i < ar_snoc->num_clks; i++)
1697 ar_snoc->clks[i].id = ath10k_clocks[i];
1699 ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1704 ret = ath10k_hw_power_on(ar);
1706 ath10k_err(ar, "failed to power on device: %d\n", ret);
1710 ret = ath10k_setup_msa_resources(ar, msa_size);
1712 ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
1716 ret = ath10k_fw_init(ar);
1718 ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
1722 ret = ath10k_qmi_init(ar, msa_size);
1724 ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1728 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1733 ath10k_fw_deinit(ar);
1736 ath10k_hw_power_off(ar);
1739 ath10k_snoc_free_irq(ar);
1741 err_release_resource:
1742 ath10k_snoc_release_resource(ar);
1745 ath10k_core_destroy(ar);
1750 static int ath10k_snoc_remove(struct platform_device *pdev)
1752 struct ath10k *ar = platform_get_drvdata(pdev);
1753 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1755 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1757 reinit_completion(&ar->driver_recovery);
1759 if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1760 wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1762 set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1764 ath10k_core_unregister(ar);
1765 ath10k_hw_power_off(ar);
1766 ath10k_fw_deinit(ar);
1767 ath10k_snoc_free_irq(ar);
1768 ath10k_snoc_release_resource(ar);
1769 ath10k_qmi_deinit(ar);
1770 ath10k_core_destroy(ar);
1775 static struct platform_driver ath10k_snoc_driver = {
1776 .probe = ath10k_snoc_probe,
1777 .remove = ath10k_snoc_remove,
1779 .name = "ath10k_snoc",
1780 .of_match_table = ath10k_snoc_dt_match,
1783 module_platform_driver(ath10k_snoc_driver);
1785 MODULE_AUTHOR("Qualcomm");
1786 MODULE_LICENSE("Dual BSD/GPL");
1787 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");