1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/property.h>
10 #include <linux/of_device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/of_address.h>
14 #include <linux/iommu.h>
19 #include <linux/remoteproc.h>
21 #include <linux/soc/qcom/smem.h>
22 #include <linux/soc/qcom/smem_state.h>
24 static const struct of_device_id ath11k_ahb_of_match[] = {
25 /* TODO: Should we change the compatible string to something similar
26 * to one that ath10k uses?
28 { .compatible = "qcom,ipq8074-wifi",
29 .data = (void *)ATH11K_HW_IPQ8074,
31 { .compatible = "qcom,ipq6018-wifi",
32 .data = (void *)ATH11K_HW_IPQ6018_HW10,
34 { .compatible = "qcom,wcn6750-wifi",
35 .data = (void *)ATH11K_HW_WCN6750_HW10,
37 { .compatible = "qcom,ipq5018-wifi",
38 .data = (void *)ATH11K_HW_IPQ5018_HW10,
43 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
45 #define ATH11K_IRQ_CE0_OFFSET 4
47 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
65 "host2reo-re-injection",
67 "host2rxdma-monitor-ring3",
68 "host2rxdma-monitor-ring2",
69 "host2rxdma-monitor-ring1",
71 "wbm2host-rx-release",
73 "reo2host-destination-ring4",
74 "reo2host-destination-ring3",
75 "reo2host-destination-ring2",
76 "reo2host-destination-ring1",
77 "rxdma2host-monitor-destination-mac3",
78 "rxdma2host-monitor-destination-mac2",
79 "rxdma2host-monitor-destination-mac1",
80 "ppdu-end-interrupts-mac3",
81 "ppdu-end-interrupts-mac2",
82 "ppdu-end-interrupts-mac1",
83 "rxdma2host-monitor-status-ring-mac3",
84 "rxdma2host-monitor-status-ring-mac2",
85 "rxdma2host-monitor-status-ring-mac1",
86 "host2rxdma-host-buf-ring-mac3",
87 "host2rxdma-host-buf-ring-mac2",
88 "host2rxdma-host-buf-ring-mac1",
89 "rxdma2host-destination-ring-mac3",
90 "rxdma2host-destination-ring-mac2",
91 "rxdma2host-destination-ring-mac1",
92 "host2tcl-input-ring4",
93 "host2tcl-input-ring3",
94 "host2tcl-input-ring2",
95 "host2tcl-input-ring1",
96 "wbm2host-tx-completions-ring3",
97 "wbm2host-tx-completions-ring2",
98 "wbm2host-tx-completions-ring1",
99 "tcl2host-status-ring",
102 /* enum ext_irq_num - irq numbers that can be used by external modules
106 host2wbm_desc_feed = 16,
107 host2reo_re_injection,
109 host2rxdma_monitor_ring3,
110 host2rxdma_monitor_ring2,
111 host2rxdma_monitor_ring1,
115 reo2host_destination_ring4,
116 reo2host_destination_ring3,
117 reo2host_destination_ring2,
118 reo2host_destination_ring1,
119 rxdma2host_monitor_destination_mac3,
120 rxdma2host_monitor_destination_mac2,
121 rxdma2host_monitor_destination_mac1,
122 ppdu_end_interrupts_mac3,
123 ppdu_end_interrupts_mac2,
124 ppdu_end_interrupts_mac1,
125 rxdma2host_monitor_status_ring_mac3,
126 rxdma2host_monitor_status_ring_mac2,
127 rxdma2host_monitor_status_ring_mac1,
128 host2rxdma_host_buf_ring_mac3,
129 host2rxdma_host_buf_ring_mac2,
130 host2rxdma_host_buf_ring_mac1,
131 rxdma2host_destination_ring_mac3,
132 rxdma2host_destination_ring_mac2,
133 rxdma2host_destination_ring_mac1,
134 host2tcl_input_ring4,
135 host2tcl_input_ring3,
136 host2tcl_input_ring2,
137 host2tcl_input_ring1,
138 wbm2host_tx_completions_ring3,
139 wbm2host_tx_completions_ring2,
140 wbm2host_tx_completions_ring1,
141 tcl2host_status_ring,
145 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
147 return ab->pci.msi.irqs[vector];
151 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
153 u32 window_start = 0;
155 /* If offset lies within DP register range, use 1st window */
156 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
157 window_start = ATH11K_PCI_WINDOW_START;
158 /* If offset lies within CE register range, use 2nd window */
159 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
160 ATH11K_PCI_WINDOW_RANGE_MASK)
161 window_start = 2 * ATH11K_PCI_WINDOW_START;
167 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
171 /* WCN6750 uses static window based register access*/
172 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
174 iowrite32(value, ab->mem + window_start +
175 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
178 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
183 /* WCN6750 uses static window based register access */
184 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
186 val = ioread32(ab->mem + window_start +
187 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
191 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
194 .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
195 .window_write32 = ath11k_ahb_window_write32_wcn6750,
196 .window_read32 = ath11k_ahb_window_read32_wcn6750,
199 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
201 return ioread32(ab->mem + offset);
204 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
206 iowrite32(value, ab->mem + offset);
209 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
213 for (i = 0; i < ab->hw_params.ce_count; i++) {
214 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
216 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
219 tasklet_kill(&ce_pipe->intr_tq);
223 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
227 for (i = 0; i < irq_grp->num_irq; i++)
228 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
231 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
235 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
236 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
238 ath11k_ahb_ext_grp_disable(irq_grp);
240 if (irq_grp->napi_enabled) {
241 napi_synchronize(&irq_grp->napi);
242 napi_disable(&irq_grp->napi);
243 irq_grp->napi_enabled = false;
248 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
252 for (i = 0; i < irq_grp->num_irq; i++)
253 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
256 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
260 val = ath11k_ahb_read32(ab, offset);
261 ath11k_ahb_write32(ab, offset, val | BIT(bit));
264 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
268 val = ath11k_ahb_read32(ab, offset);
269 ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
272 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
274 const struct ce_attr *ce_attr;
275 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
276 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
278 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
279 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
280 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
282 ce_attr = &ab->hw_params.host_ce_config[ce_id];
283 if (ce_attr->src_nentries)
284 ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
286 if (ce_attr->dest_nentries) {
287 ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
288 ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
293 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
295 const struct ce_attr *ce_attr;
296 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
297 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
299 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
300 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
301 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
303 ce_attr = &ab->hw_params.host_ce_config[ce_id];
304 if (ce_attr->src_nentries)
305 ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
307 if (ce_attr->dest_nentries) {
308 ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
309 ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
314 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
319 for (i = 0; i < ab->hw_params.ce_count; i++) {
320 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
323 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
324 synchronize_irq(ab->irq_num[irq_idx]);
328 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
333 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
334 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
336 for (j = 0; j < irq_grp->num_irq; j++) {
337 irq_idx = irq_grp->irqs[j];
338 synchronize_irq(ab->irq_num[irq_idx]);
343 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
347 for (i = 0; i < ab->hw_params.ce_count; i++) {
348 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
350 ath11k_ahb_ce_irq_enable(ab, i);
354 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
358 for (i = 0; i < ab->hw_params.ce_count; i++) {
359 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
361 ath11k_ahb_ce_irq_disable(ab, i);
365 static int ath11k_ahb_start(struct ath11k_base *ab)
367 ath11k_ahb_ce_irqs_enable(ab);
368 ath11k_ce_rx_post_buf(ab);
373 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
377 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
378 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
380 if (!irq_grp->napi_enabled) {
381 napi_enable(&irq_grp->napi);
382 irq_grp->napi_enabled = true;
384 ath11k_ahb_ext_grp_enable(irq_grp);
388 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
390 __ath11k_ahb_ext_irq_disable(ab);
391 ath11k_ahb_sync_ext_irqs(ab);
394 static void ath11k_ahb_stop(struct ath11k_base *ab)
396 if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
397 ath11k_ahb_ce_irqs_disable(ab);
398 ath11k_ahb_sync_ce_irqs(ab);
399 ath11k_ahb_kill_tasklets(ab);
400 del_timer_sync(&ab->rx_replenish_retry);
401 ath11k_ce_cleanup_pipes(ab);
404 static int ath11k_ahb_power_up(struct ath11k_base *ab)
406 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
409 ret = rproc_boot(ab_ahb->tgt_rproc);
411 ath11k_err(ab, "failed to boot the remote processor Q6\n");
416 static void ath11k_ahb_power_down(struct ath11k_base *ab)
418 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
420 rproc_shutdown(ab_ahb->tgt_rproc);
423 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
425 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
427 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
428 cfg->tgt_ce = ab->hw_params.target_ce_config;
429 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
430 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
431 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
434 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
438 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
439 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
441 for (j = 0; j < irq_grp->num_irq; j++)
442 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
444 netif_napi_del(&irq_grp->napi);
445 free_netdev(irq_grp->napi_ndev);
449 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
454 if (ab->hw_params.hybrid_bus_type)
455 return ath11k_pcic_free_irq(ab);
457 for (i = 0; i < ab->hw_params.ce_count; i++) {
458 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
460 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
461 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
464 ath11k_ahb_free_ext_irq(ab);
467 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
469 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
471 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
473 ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
476 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
478 struct ath11k_ce_pipe *ce_pipe = arg;
480 /* last interrupt received for this CE */
481 ce_pipe->timestamp = jiffies;
483 ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
485 tasklet_schedule(&ce_pipe->intr_tq);
490 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
492 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
493 struct ath11k_ext_irq_grp,
495 struct ath11k_base *ab = irq_grp->ab;
498 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
499 if (work_done < budget) {
500 napi_complete_done(napi, work_done);
501 ath11k_ahb_ext_grp_enable(irq_grp);
504 if (work_done > budget)
510 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
512 struct ath11k_ext_irq_grp *irq_grp = arg;
514 /* last interrupt received for this group */
515 irq_grp->timestamp = jiffies;
517 ath11k_ahb_ext_grp_disable(irq_grp);
519 napi_schedule(&irq_grp->napi);
524 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
526 struct ath11k_hw_params *hw = &ab->hw_params;
531 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
532 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
538 irq_grp->napi_ndev = alloc_netdev_dummy(0);
539 if (!irq_grp->napi_ndev)
542 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
543 ath11k_ahb_ext_grp_napi_poll);
545 for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
546 if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
547 irq_grp->irqs[num_irq++] =
548 wbm2host_tx_completions_ring1 - j;
551 if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
552 irq_grp->irqs[num_irq++] =
553 reo2host_destination_ring1 - j;
556 if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
557 irq_grp->irqs[num_irq++] = reo2host_exception;
559 if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
560 irq_grp->irqs[num_irq++] = wbm2host_rx_release;
562 if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
563 irq_grp->irqs[num_irq++] = reo2host_status;
565 if (j < ab->hw_params.max_radios) {
566 if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
567 irq_grp->irqs[num_irq++] =
568 rxdma2host_destination_ring_mac1 -
569 ath11k_hw_get_mac_from_pdev_id(hw, j);
572 if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
573 irq_grp->irqs[num_irq++] =
574 host2rxdma_host_buf_ring_mac1 -
575 ath11k_hw_get_mac_from_pdev_id(hw, j);
578 if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
579 irq_grp->irqs[num_irq++] =
580 ppdu_end_interrupts_mac1 -
581 ath11k_hw_get_mac_from_pdev_id(hw, j);
582 irq_grp->irqs[num_irq++] =
583 rxdma2host_monitor_status_ring_mac1 -
584 ath11k_hw_get_mac_from_pdev_id(hw, j);
588 irq_grp->num_irq = num_irq;
590 for (j = 0; j < irq_grp->num_irq; j++) {
591 int irq_idx = irq_grp->irqs[j];
593 irq = platform_get_irq_byname(ab->pdev,
595 ab->irq_num[irq_idx] = irq;
596 irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
597 ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
599 irq_name[irq_idx], irq_grp);
601 ath11k_err(ab, "failed request_irq for %d\n",
610 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
615 if (ab->hw_params.hybrid_bus_type)
616 return ath11k_pcic_config_irq(ab);
618 /* Configure CE irqs */
619 for (i = 0; i < ab->hw_params.ce_count; i++) {
620 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
622 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
625 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
627 tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
628 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
629 ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
630 IRQF_TRIGGER_RISING, irq_name[irq_idx],
635 ab->irq_num[irq_idx] = irq;
638 /* Configure external interrupts */
639 ret = ath11k_ahb_config_ext_irq(ab);
644 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
645 u8 *ul_pipe, u8 *dl_pipe)
647 const struct service_to_pipe *entry;
648 bool ul_set = false, dl_set = false;
651 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
652 entry = &ab->hw_params.svc_to_ce_map[i];
654 if (__le32_to_cpu(entry->service_id) != service_id)
657 switch (__le32_to_cpu(entry->pipedir)) {
662 *dl_pipe = __le32_to_cpu(entry->pipenum);
667 *ul_pipe = __le32_to_cpu(entry->pipenum);
673 *dl_pipe = __le32_to_cpu(entry->pipenum);
674 *ul_pipe = __le32_to_cpu(entry->pipenum);
681 if (WARN_ON(!ul_set || !dl_set))
687 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
689 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
694 if (!device_may_wakeup(ab->dev))
697 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
699 ret = enable_irq_wake(wake_irq);
701 ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
705 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
706 ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
707 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
708 ATH11K_AHB_SMP2P_SMEM_MSG);
710 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
711 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
713 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
717 ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
722 static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
724 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
729 if (!device_may_wakeup(ab->dev))
732 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
734 ret = disable_irq_wake(wake_irq);
736 ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
740 reinit_completion(&ab->wow.wakeup_completed);
742 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
743 ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
744 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
745 ATH11K_AHB_SMP2P_SMEM_MSG);
747 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
748 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
750 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
754 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
756 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
760 ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
765 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
766 .start = ath11k_ahb_start,
767 .stop = ath11k_ahb_stop,
768 .read32 = ath11k_ahb_read32,
769 .write32 = ath11k_ahb_write32,
771 .irq_enable = ath11k_ahb_ext_irq_enable,
772 .irq_disable = ath11k_ahb_ext_irq_disable,
773 .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
774 .power_down = ath11k_ahb_power_down,
775 .power_up = ath11k_ahb_power_up,
778 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
779 .start = ath11k_pcic_start,
780 .stop = ath11k_pcic_stop,
781 .read32 = ath11k_pcic_read32,
782 .write32 = ath11k_pcic_write32,
784 .irq_enable = ath11k_pcic_ext_irq_enable,
785 .irq_disable = ath11k_pcic_ext_irq_disable,
786 .get_msi_address = ath11k_pcic_get_msi_address,
787 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
788 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
789 .power_down = ath11k_ahb_power_down,
790 .power_up = ath11k_ahb_power_up,
791 .suspend = ath11k_ahb_hif_suspend,
792 .resume = ath11k_ahb_hif_resume,
793 .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
794 .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
797 static int ath11k_core_get_rproc(struct ath11k_base *ab)
799 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
800 struct device *dev = ab->dev;
801 struct rproc *prproc;
802 phandle rproc_phandle;
804 if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
805 ath11k_err(ab, "failed to get q6_rproc handle\n");
809 prproc = rproc_get_by_phandle(rproc_phandle);
811 ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
812 return -EPROBE_DEFER;
814 ab_ahb->tgt_rproc = prproc;
819 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
821 struct platform_device *pdev = ab->pdev;
822 phys_addr_t msi_addr_pa;
823 dma_addr_t msi_addr_iova;
824 struct resource *res;
829 ret = ath11k_pcic_init_msi_config(ab);
831 ath11k_err(ab, "failed to init msi config: %d\n", ret);
835 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
837 ath11k_err(ab, "failed to fetch msi_addr\n");
841 msi_addr_pa = res->start;
842 msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
844 if (dma_mapping_error(ab->dev, msi_addr_iova))
847 ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
848 ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
850 ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
854 ab->pci.msi.ep_base_data = int_prop + 32;
856 for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
857 ret = platform_get_irq(pdev, i);
861 ab->pci.msi.irqs[i] = ret;
864 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
869 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
871 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
873 if (!ab->hw_params.smp2p_wow_exit)
876 ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
877 &ab_ahb->smp2p_info.smem_bit);
878 if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
879 ath11k_err(ab, "failed to fetch smem state: %ld\n",
880 PTR_ERR(ab_ahb->smp2p_info.smem_state));
881 return PTR_ERR(ab_ahb->smp2p_info.smem_state);
887 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
889 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
891 if (!ab->hw_params.smp2p_wow_exit)
894 qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
897 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
899 struct platform_device *pdev = ab->pdev;
900 struct resource *mem_res;
903 if (ab->hw_params.hybrid_bus_type)
904 return ath11k_ahb_setup_msi_resources(ab);
906 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
908 dev_err(&pdev->dev, "ioremap error\n");
913 ab->mem_len = resource_size(mem_res);
918 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
920 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
921 struct device *dev = ab->dev;
922 struct device_node *node;
926 node = of_parse_phandle(dev->of_node, "memory-region", 0);
930 ret = of_address_to_resource(node, 0, &r);
933 dev_err(dev, "failed to resolve msa fixed region\n");
937 ab_ahb->fw.msa_paddr = r.start;
938 ab_ahb->fw.msa_size = resource_size(&r);
940 node = of_parse_phandle(dev->of_node, "memory-region", 1);
944 ret = of_address_to_resource(node, 0, &r);
947 dev_err(dev, "failed to resolve ce fixed region\n");
951 ab_ahb->fw.ce_paddr = r.start;
952 ab_ahb->fw.ce_size = resource_size(&r);
957 static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
959 const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
960 struct platform_device *pdev = ab->pdev;
963 /* no separate CE register space */
964 ab->mem_ce = ab->mem;
968 /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
969 * and the space is not contiguous, hence remapping the CE registers
970 * to a new space for accessing them.
972 ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
974 dev_err(&pdev->dev, "ce ioremap error\n");
981 static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
983 if (ab->hw_params.ce_remap)
987 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
989 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
990 struct device *host_dev = ab->dev;
991 struct platform_device_info info = {0};
992 struct iommu_domain *iommu_dom;
993 struct platform_device *pdev;
994 struct device_node *node;
997 /* Chipsets not requiring MSA need not initialize
998 * MSA resources, return success in such cases.
1000 if (!ab->hw_params.fixed_fw_mem)
1003 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1005 ab_ahb->fw.use_tz = true;
1009 ret = ath11k_ahb_setup_msa_resources(ab);
1011 ath11k_err(ab, "failed to setup msa resources\n");
1015 info.fwnode = &node->fwnode;
1016 info.parent = host_dev;
1017 info.name = node->name;
1018 info.dma_mask = DMA_BIT_MASK(32);
1020 pdev = platform_device_register_full(&info);
1023 return PTR_ERR(pdev);
1026 ret = of_dma_configure(&pdev->dev, node, true);
1028 ath11k_err(ab, "dma configure fail: %d\n", ret);
1029 goto err_unregister;
1032 ab_ahb->fw.dev = &pdev->dev;
1034 iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
1035 if (IS_ERR(iommu_dom)) {
1036 ath11k_err(ab, "failed to allocate iommu domain\n");
1037 ret = PTR_ERR(iommu_dom);
1038 goto err_unregister;
1041 ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1043 ath11k_err(ab, "could not attach device: %d\n", ret);
1044 goto err_iommu_free;
1047 ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1048 ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1049 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1051 ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1052 goto err_iommu_detach;
1055 ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1056 ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1057 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1059 ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1060 goto err_iommu_unmap;
1063 ab_ahb->fw.use_tz = false;
1064 ab_ahb->fw.iommu_domain = iommu_dom;
1070 iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1073 iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1076 iommu_domain_free(iommu_dom);
1079 platform_device_unregister(pdev);
1085 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1087 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1088 struct iommu_domain *iommu;
1089 size_t unmapped_size;
1091 /* Chipsets not requiring MSA would have not initialized
1092 * MSA resources, return success in such cases.
1094 if (!ab->hw_params.fixed_fw_mem)
1097 if (ab_ahb->fw.use_tz)
1100 iommu = ab_ahb->fw.iommu_domain;
1102 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1103 if (unmapped_size != ab_ahb->fw.msa_size)
1104 ath11k_err(ab, "failed to unmap firmware: %zu\n",
1107 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1108 if (unmapped_size != ab_ahb->fw.ce_size)
1109 ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1112 iommu_detach_device(iommu, ab_ahb->fw.dev);
1113 iommu_domain_free(iommu);
1115 platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1120 static int ath11k_ahb_probe(struct platform_device *pdev)
1122 struct ath11k_base *ab;
1123 const struct ath11k_hif_ops *hif_ops;
1124 const struct ath11k_pci_ops *pci_ops;
1125 enum ath11k_hw_rev hw_rev;
1128 hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
1131 case ATH11K_HW_IPQ8074:
1132 case ATH11K_HW_IPQ6018_HW10:
1133 case ATH11K_HW_IPQ5018_HW10:
1134 hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1137 case ATH11K_HW_WCN6750_HW10:
1138 hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1139 pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1142 dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1146 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1148 dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1152 ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1155 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1159 ab->hif.ops = hif_ops;
1161 ab->hw_rev = hw_rev;
1162 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1163 platform_set_drvdata(pdev, ab);
1165 ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1167 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1171 ret = ath11k_core_pre_init(ab);
1175 ret = ath11k_ahb_setup_resources(ab);
1179 ret = ath11k_ahb_ce_remap(ab);
1183 ret = ath11k_ahb_fw_resources_init(ab);
1187 ret = ath11k_ahb_setup_smp2p_handle(ab);
1191 ret = ath11k_hal_srng_init(ab);
1193 goto err_release_smp2p_handle;
1195 ret = ath11k_ce_alloc_pipes(ab);
1197 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1198 goto err_hal_srng_deinit;
1201 ath11k_ahb_init_qmi_ce_config(ab);
1203 ret = ath11k_core_get_rproc(ab);
1205 ath11k_err(ab, "failed to get rproc: %d\n", ret);
1209 ret = ath11k_core_init(ab);
1211 ath11k_err(ab, "failed to init core: %d\n", ret);
1215 ret = ath11k_ahb_config_irq(ab);
1217 ath11k_err(ab, "failed to configure irq: %d\n", ret);
1221 ath11k_qmi_fwreset_from_cold_boot(ab);
1226 ath11k_ce_free_pipes(ab);
1228 err_hal_srng_deinit:
1229 ath11k_hal_srng_deinit(ab);
1231 err_release_smp2p_handle:
1232 ath11k_ahb_release_smp2p_handle(ab);
1235 ath11k_ahb_fw_resource_deinit(ab);
1238 ath11k_ahb_ce_unmap(ab);
1241 ath11k_core_free(ab);
1242 platform_set_drvdata(pdev, NULL);
1247 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1251 if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1252 left = wait_for_completion_timeout(&ab->driver_recovery,
1253 ATH11K_AHB_RECOVERY_TIMEOUT);
1255 ath11k_warn(ab, "failed to receive recovery response completion\n");
1258 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1259 cancel_work_sync(&ab->restart_work);
1260 cancel_work_sync(&ab->qmi.event_work);
1263 static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1265 struct platform_device *pdev = ab->pdev;
1267 ath11k_ahb_free_irq(ab);
1268 ath11k_hal_srng_deinit(ab);
1269 ath11k_ahb_release_smp2p_handle(ab);
1270 ath11k_ahb_fw_resource_deinit(ab);
1271 ath11k_ce_free_pipes(ab);
1272 ath11k_ahb_ce_unmap(ab);
1274 ath11k_core_free(ab);
1275 platform_set_drvdata(pdev, NULL);
1278 static void ath11k_ahb_remove(struct platform_device *pdev)
1280 struct ath11k_base *ab = platform_get_drvdata(pdev);
1282 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1283 ath11k_ahb_power_down(ab);
1284 ath11k_debugfs_soc_destroy(ab);
1285 ath11k_qmi_deinit_service(ab);
1289 ath11k_ahb_remove_prepare(ab);
1290 ath11k_core_deinit(ab);
1293 ath11k_ahb_free_resources(ab);
1296 static void ath11k_ahb_shutdown(struct platform_device *pdev)
1298 struct ath11k_base *ab = platform_get_drvdata(pdev);
1300 /* platform shutdown() & remove() are mutually exclusive.
1301 * remove() is invoked during rmmod & shutdown() during
1302 * system reboot/shutdown.
1304 ath11k_ahb_remove_prepare(ab);
1306 if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1307 goto free_resources;
1309 ath11k_core_deinit(ab);
1312 ath11k_ahb_free_resources(ab);
1315 static struct platform_driver ath11k_ahb_driver = {
1318 .of_match_table = ath11k_ahb_of_match,
1320 .probe = ath11k_ahb_probe,
1321 .remove = ath11k_ahb_remove,
1322 .shutdown = ath11k_ahb_shutdown,
1325 module_platform_driver(ath11k_ahb_driver);
1327 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1328 MODULE_LICENSE("Dual BSD/GPL");