1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
10 const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
11 /* CE0: host->target HTC control and raw streams */
13 .flags = CE_ATTR_FLAGS,
19 /* CE1: target->host HTT + HTC control */
21 .flags = CE_ATTR_FLAGS,
25 .recv_cb = ath11k_htc_rx_completion_handler,
28 /* CE2: target->host WMI */
30 .flags = CE_ATTR_FLAGS,
34 .recv_cb = ath11k_htc_rx_completion_handler,
37 /* CE3: host->target WMI (mac0) */
39 .flags = CE_ATTR_FLAGS,
45 /* CE4: host->target HTT */
47 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
53 /* CE5: target->host pktlog */
55 .flags = CE_ATTR_FLAGS,
59 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
62 /* CE6: target autonomous hif_memcpy */
64 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
70 /* CE7: host->target WMI (mac1) */
72 .flags = CE_ATTR_FLAGS,
78 /* CE8: target autonomous hif_memcpy */
80 .flags = CE_ATTR_FLAGS,
86 /* CE9: host->target WMI (mac2) */
88 .flags = CE_ATTR_FLAGS,
94 /* CE10: target->host HTT */
96 .flags = CE_ATTR_FLAGS,
100 .recv_cb = ath11k_htc_rx_completion_handler,
105 .flags = CE_ATTR_FLAGS,
112 const struct ce_attr ath11k_host_ce_config_qca6390[] = {
113 /* CE0: host->target HTC control and raw streams */
115 .flags = CE_ATTR_FLAGS,
121 /* CE1: target->host HTT + HTC control */
123 .flags = CE_ATTR_FLAGS,
126 .dest_nentries = 512,
127 .recv_cb = ath11k_htc_rx_completion_handler,
130 /* CE2: target->host WMI */
132 .flags = CE_ATTR_FLAGS,
135 .dest_nentries = 512,
136 .recv_cb = ath11k_htc_rx_completion_handler,
139 /* CE3: host->target WMI (mac0) */
141 .flags = CE_ATTR_FLAGS,
147 /* CE4: host->target HTT */
149 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
150 .src_nentries = 2048,
155 /* CE5: target->host pktlog */
157 .flags = CE_ATTR_FLAGS,
160 .dest_nentries = 512,
161 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
164 /* CE6: target autonomous hif_memcpy */
166 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
172 /* CE7: host->target WMI (mac1) */
174 .flags = CE_ATTR_FLAGS,
180 /* CE8: target autonomous hif_memcpy */
182 .flags = CE_ATTR_FLAGS,
190 const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
191 /* CE0: host->target HTC control and raw streams */
193 .flags = CE_ATTR_FLAGS,
199 /* CE1: target->host HTT + HTC control */
201 .flags = CE_ATTR_FLAGS,
204 .dest_nentries = 512,
205 .recv_cb = ath11k_htc_rx_completion_handler,
208 /* CE2: target->host WMI */
210 .flags = CE_ATTR_FLAGS,
214 .recv_cb = ath11k_htc_rx_completion_handler,
217 /* CE3: host->target WMI (mac0) */
219 .flags = CE_ATTR_FLAGS,
225 /* CE4: host->target HTT */
227 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
228 .src_nentries = 2048,
233 /* CE5: target->host pktlog */
235 .flags = CE_ATTR_FLAGS,
238 .dest_nentries = 512,
239 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
243 static bool ath11k_ce_need_shadow_fix(int ce_id)
245 /* only ce4 needs shadow workaroud*/
251 void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
255 if (!ab->hw_params.supports_shadow_regs)
258 for (i = 0; i < ab->hw_params.ce_count; i++)
259 if (ath11k_ce_need_shadow_fix(i))
260 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
263 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
264 struct sk_buff *skb, dma_addr_t paddr)
266 struct ath11k_base *ab = pipe->ab;
267 struct ath11k_ce_ring *ring = pipe->dest_ring;
268 struct hal_srng *srng;
269 unsigned int write_index;
270 unsigned int nentries_mask = ring->nentries_mask;
274 lockdep_assert_held(&ab->ce.ce_lock);
276 write_index = ring->write_index;
278 srng = &ab->hal.srng_list[ring->hal_ring_id];
280 spin_lock_bh(&srng->lock);
282 ath11k_hal_srng_access_begin(ab, srng);
284 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
289 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
295 ath11k_hal_ce_dst_set_desc(desc, paddr);
297 ring->skb[write_index] = skb;
298 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
299 ring->write_index = write_index;
301 pipe->rx_buf_needed--;
305 ath11k_hal_srng_access_end(ab, srng);
307 spin_unlock_bh(&srng->lock);
312 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
314 struct ath11k_base *ab = pipe->ab;
319 if (!(pipe->dest_ring || pipe->status_ring))
322 spin_lock_bh(&ab->ce.ce_lock);
323 while (pipe->rx_buf_needed) {
324 skb = dev_alloc_skb(pipe->buf_sz);
330 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
332 paddr = dma_map_single(ab->dev, skb->data,
333 skb->len + skb_tailroom(skb),
335 if (unlikely(dma_mapping_error(ab->dev, paddr))) {
336 ath11k_warn(ab, "failed to dma map ce rx buf\n");
337 dev_kfree_skb_any(skb);
342 ATH11K_SKB_RXCB(skb)->paddr = paddr;
344 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
347 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
348 dma_unmap_single(ab->dev, paddr,
349 skb->len + skb_tailroom(skb),
351 dev_kfree_skb_any(skb);
357 spin_unlock_bh(&ab->ce.ce_lock);
361 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
362 struct sk_buff **skb, int *nbytes)
364 struct ath11k_base *ab = pipe->ab;
365 struct hal_srng *srng;
366 unsigned int sw_index;
367 unsigned int nentries_mask;
371 spin_lock_bh(&ab->ce.ce_lock);
373 sw_index = pipe->dest_ring->sw_index;
374 nentries_mask = pipe->dest_ring->nentries_mask;
376 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
378 spin_lock_bh(&srng->lock);
380 ath11k_hal_srng_access_begin(ab, srng);
382 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
388 *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
394 *skb = pipe->dest_ring->skb[sw_index];
395 pipe->dest_ring->skb[sw_index] = NULL;
397 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
398 pipe->dest_ring->sw_index = sw_index;
400 pipe->rx_buf_needed++;
402 ath11k_hal_srng_access_end(ab, srng);
404 spin_unlock_bh(&srng->lock);
406 spin_unlock_bh(&ab->ce.ce_lock);
411 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
413 struct ath11k_base *ab = pipe->ab;
415 struct sk_buff_head list;
416 unsigned int nbytes, max_nbytes;
419 __skb_queue_head_init(&list);
420 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
421 max_nbytes = skb->len + skb_tailroom(skb);
422 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
423 max_nbytes, DMA_FROM_DEVICE);
425 if (unlikely(max_nbytes < nbytes)) {
426 ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
428 dev_kfree_skb_any(skb);
432 skb_put(skb, nbytes);
433 __skb_queue_tail(&list, skb);
436 while ((skb = __skb_dequeue(&list))) {
437 ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
438 pipe->pipe_num, skb->len);
439 pipe->recv_cb(ab, skb);
442 ret = ath11k_ce_rx_post_pipe(pipe);
443 if (ret && ret != -ENOSPC) {
444 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
445 pipe->pipe_num, ret);
446 mod_timer(&ab->rx_replenish_retry,
447 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
451 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
453 struct ath11k_base *ab = pipe->ab;
454 struct hal_srng *srng;
455 unsigned int sw_index;
456 unsigned int nentries_mask;
460 spin_lock_bh(&ab->ce.ce_lock);
462 sw_index = pipe->src_ring->sw_index;
463 nentries_mask = pipe->src_ring->nentries_mask;
465 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
467 spin_lock_bh(&srng->lock);
469 ath11k_hal_srng_access_begin(ab, srng);
471 desc = ath11k_hal_srng_src_reap_next(ab, srng);
477 skb = pipe->src_ring->skb[sw_index];
479 pipe->src_ring->skb[sw_index] = NULL;
481 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
482 pipe->src_ring->sw_index = sw_index;
485 spin_unlock_bh(&srng->lock);
487 spin_unlock_bh(&ab->ce.ce_lock);
492 static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
494 struct ath11k_base *ab = pipe->ab;
497 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
501 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
503 dev_kfree_skb_any(skb);
507 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
508 struct hal_srng_params *ring_params)
511 u32 msi_data_count, msi_data_idx;
517 ret = ath11k_get_user_msi_vector(ab, "CE",
518 &msi_data_count, &msi_data_start,
524 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
525 ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
527 ring_params->msi_addr = addr_lo;
528 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
529 ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
530 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
533 static int ath11k_ce_init_ring(struct ath11k_base *ab,
534 struct ath11k_ce_ring *ce_ring,
535 int ce_id, enum hal_ring_type type)
537 struct hal_srng_params params = { 0 };
540 params.ring_base_paddr = ce_ring->base_addr_ce_space;
541 params.ring_base_vaddr = ce_ring->base_addr_owner_space;
542 params.num_entries = ce_ring->nentries;
544 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
545 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
549 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
550 params.intr_batch_cntr_thres_entries = 1;
553 params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
554 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
555 params.intr_timer_thres_us = 1024;
556 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
557 params.low_threshold = ce_ring->nentries - 3;
560 case HAL_CE_DST_STATUS:
561 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
562 params.intr_batch_cntr_thres_entries = 1;
563 params.intr_timer_thres_us = 0x1000;
567 ath11k_warn(ab, "Invalid CE ring type %d\n", type);
571 /* TODO: Init other params needed by HAL to init the ring */
573 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
575 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
580 ce_ring->hal_ring_id = ret;
582 if (ab->hw_params.supports_shadow_regs &&
583 ath11k_ce_need_shadow_fix(ce_id))
584 ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
585 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
586 ce_ring->hal_ring_id);
591 static struct ath11k_ce_ring *
592 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
594 struct ath11k_ce_ring *ce_ring;
595 dma_addr_t base_addr;
597 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
599 return ERR_PTR(-ENOMEM);
601 ce_ring->nentries = nentries;
602 ce_ring->nentries_mask = nentries - 1;
604 /* Legacy platforms that do not support cache
605 * coherent DMA are unsupported
607 ce_ring->base_addr_owner_space_unaligned =
608 dma_alloc_coherent(ab->dev,
609 nentries * desc_sz + CE_DESC_RING_ALIGN,
610 &base_addr, GFP_KERNEL);
611 if (!ce_ring->base_addr_owner_space_unaligned) {
613 return ERR_PTR(-ENOMEM);
616 ce_ring->base_addr_ce_space_unaligned = base_addr;
618 ce_ring->base_addr_owner_space = PTR_ALIGN(
619 ce_ring->base_addr_owner_space_unaligned,
621 ce_ring->base_addr_ce_space = ALIGN(
622 ce_ring->base_addr_ce_space_unaligned,
628 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
630 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
631 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
632 struct ath11k_ce_ring *ring;
636 pipe->attr_flags = attr->flags;
638 if (attr->src_nentries) {
639 pipe->send_cb = ath11k_ce_send_done_cb;
640 nentries = roundup_pow_of_two(attr->src_nentries);
641 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
642 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
644 return PTR_ERR(ring);
645 pipe->src_ring = ring;
648 if (attr->dest_nentries) {
649 pipe->recv_cb = attr->recv_cb;
650 nentries = roundup_pow_of_two(attr->dest_nentries);
651 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
652 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
654 return PTR_ERR(ring);
655 pipe->dest_ring = ring;
657 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
658 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
660 return PTR_ERR(ring);
661 pipe->status_ring = ring;
667 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
669 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
675 ath11k_ce_recv_process_cb(pipe);
678 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
680 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
682 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
685 EXPORT_SYMBOL(ath11k_ce_per_engine_service);
687 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
690 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
691 struct hal_srng *srng;
693 unsigned int write_index, sw_index;
694 unsigned int nentries_mask;
696 u8 byte_swap_data = 0;
699 /* Check if some entries could be regained by handling tx completion if
700 * the CE has interrupts disabled and the used entries is more than the
701 * defined usage threshold.
703 if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
704 spin_lock_bh(&ab->ce.ce_lock);
705 write_index = pipe->src_ring->write_index;
707 sw_index = pipe->src_ring->sw_index;
709 if (write_index >= sw_index)
710 num_used = write_index - sw_index;
712 num_used = pipe->src_ring->nentries - sw_index +
715 spin_unlock_bh(&ab->ce.ce_lock);
717 if (num_used > ATH11K_CE_USAGE_THRESHOLD)
718 ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
721 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
724 spin_lock_bh(&ab->ce.ce_lock);
726 write_index = pipe->src_ring->write_index;
727 nentries_mask = pipe->src_ring->nentries_mask;
729 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
731 spin_lock_bh(&srng->lock);
733 ath11k_hal_srng_access_begin(ab, srng);
735 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
736 ath11k_hal_srng_access_end(ab, srng);
741 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
743 ath11k_hal_srng_access_end(ab, srng);
748 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
751 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
752 skb->len, transfer_id, byte_swap_data);
754 pipe->src_ring->skb[write_index] = skb;
755 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
758 ath11k_hal_srng_access_end(ab, srng);
760 if (ath11k_ce_need_shadow_fix(pipe_id))
761 ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
763 spin_unlock_bh(&srng->lock);
765 spin_unlock_bh(&ab->ce.ce_lock);
770 spin_unlock_bh(&srng->lock);
772 spin_unlock_bh(&ab->ce.ce_lock);
777 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
779 struct ath11k_base *ab = pipe->ab;
780 struct ath11k_ce_ring *ring = pipe->dest_ring;
784 if (!(ring && pipe->buf_sz))
787 for (i = 0; i < ring->nentries; i++) {
793 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
794 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
795 dev_kfree_skb_any(skb);
799 static void ath11k_ce_shadow_config(struct ath11k_base *ab)
803 for (i = 0; i < ab->hw_params.ce_count; i++) {
804 if (ab->hw_params.host_ce_config[i].src_nentries)
805 ath11k_hal_srng_update_shadow_config(ab,
808 if (ab->hw_params.host_ce_config[i].dest_nentries) {
809 ath11k_hal_srng_update_shadow_config(ab,
812 ath11k_hal_srng_update_shadow_config(ab,
813 HAL_CE_DST_STATUS, i);
818 void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
819 u32 **shadow_cfg, u32 *shadow_cfg_len)
821 if (!ab->hw_params.supports_shadow_regs)
824 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
826 /* shadow is already configured */
830 /* shadow isn't configured yet, configure now.
831 * non-CE srngs are configured firstly, then
834 ath11k_hal_srng_shadow_config(ab);
835 ath11k_ce_shadow_config(ab);
837 /* get the shadow configuration */
838 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
840 EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
842 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
844 struct ath11k_ce_pipe *pipe;
847 ath11k_ce_stop_shadow_timers(ab);
849 for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
850 pipe = &ab->ce.ce_pipe[pipe_num];
851 ath11k_ce_rx_pipe_cleanup(pipe);
853 /* Cleanup any src CE's which have interrupts disabled */
854 ath11k_ce_poll_send_completed(ab, pipe_num);
856 /* NOTE: Should we also clean up tx buffer in all pipes? */
859 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
861 void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
863 struct ath11k_ce_pipe *pipe;
867 for (i = 0; i < ab->hw_params.ce_count; i++) {
868 pipe = &ab->ce.ce_pipe[i];
869 ret = ath11k_ce_rx_post_pipe(pipe);
874 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
876 mod_timer(&ab->rx_replenish_retry,
877 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
883 EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
885 void ath11k_ce_rx_replenish_retry(struct timer_list *t)
887 struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
889 ath11k_ce_rx_post_buf(ab);
892 int ath11k_ce_init_pipes(struct ath11k_base *ab)
894 struct ath11k_ce_pipe *pipe;
898 ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
899 &ab->qmi.ce_cfg.shadow_reg_v2_len);
901 for (i = 0; i < ab->hw_params.ce_count; i++) {
902 pipe = &ab->ce.ce_pipe[i];
904 if (pipe->src_ring) {
905 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
908 ath11k_warn(ab, "failed to init src ring: %d\n",
910 /* Should we clear any partial init */
914 pipe->src_ring->write_index = 0;
915 pipe->src_ring->sw_index = 0;
918 if (pipe->dest_ring) {
919 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
922 ath11k_warn(ab, "failed to init dest ring: %d\n",
924 /* Should we clear any partial init */
928 pipe->rx_buf_needed = pipe->dest_ring->nentries ?
929 pipe->dest_ring->nentries - 2 : 0;
931 pipe->dest_ring->write_index = 0;
932 pipe->dest_ring->sw_index = 0;
935 if (pipe->status_ring) {
936 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
939 ath11k_warn(ab, "failed to init dest status ing: %d\n",
941 /* Should we clear any partial init */
945 pipe->status_ring->write_index = 0;
946 pipe->status_ring->sw_index = 0;
953 void ath11k_ce_free_pipes(struct ath11k_base *ab)
955 struct ath11k_ce_pipe *pipe;
959 for (i = 0; i < ab->hw_params.ce_count; i++) {
960 pipe = &ab->ce.ce_pipe[i];
962 if (ath11k_ce_need_shadow_fix(i))
963 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
965 if (pipe->src_ring) {
966 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
967 dma_free_coherent(ab->dev,
968 pipe->src_ring->nentries * desc_sz +
970 pipe->src_ring->base_addr_owner_space,
971 pipe->src_ring->base_addr_ce_space);
972 kfree(pipe->src_ring);
973 pipe->src_ring = NULL;
976 if (pipe->dest_ring) {
977 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
978 dma_free_coherent(ab->dev,
979 pipe->dest_ring->nentries * desc_sz +
981 pipe->dest_ring->base_addr_owner_space,
982 pipe->dest_ring->base_addr_ce_space);
983 kfree(pipe->dest_ring);
984 pipe->dest_ring = NULL;
987 if (pipe->status_ring) {
989 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
990 dma_free_coherent(ab->dev,
991 pipe->status_ring->nentries * desc_sz +
993 pipe->status_ring->base_addr_owner_space,
994 pipe->status_ring->base_addr_ce_space);
995 kfree(pipe->status_ring);
996 pipe->status_ring = NULL;
1000 EXPORT_SYMBOL(ath11k_ce_free_pipes);
1002 int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1004 struct ath11k_ce_pipe *pipe;
1007 const struct ce_attr *attr;
1009 spin_lock_init(&ab->ce.ce_lock);
1011 for (i = 0; i < ab->hw_params.ce_count; i++) {
1012 attr = &ab->hw_params.host_ce_config[i];
1013 pipe = &ab->ce.ce_pipe[i];
1016 pipe->buf_sz = attr->src_sz_max;
1018 ret = ath11k_ce_alloc_pipe(ab, i);
1020 /* Free any parial successful allocation */
1021 ath11k_ce_free_pipes(ab);
1028 EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1030 /* For Big Endian Host, Copy Engine byte_swap is enabled
1031 * When Copy Engine does byte_swap, need to byte swap again for the
1032 * Host to get/put buffer content in the correct byte order
1034 void ath11k_ce_byte_swap(void *mem, u32 len)
1038 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1042 for (i = 0; i < (len / 4); i++) {
1043 *(u32 *)mem = swab32(*(u32 *)mem);
1049 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1051 if (ce_id >= ab->hw_params.ce_count)
1054 return ab->hw_params.host_ce_config[ce_id].flags;
1056 EXPORT_SYMBOL(ath11k_ce_get_attr_flags);