1 // SPDX-License-Identifier: GPL-2.0+
3 * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
11 #include <asm/cache.h>
14 #include <asm/bitops.h>
16 #include <dm/device_compat.h>
17 #include <dm/devres.h>
19 #include <dm/uclass.h>
20 #include <linux/bitops.h>
21 #include <linux/compat.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/err.h>
24 #include <linux/soc/ti/k3-navss-ringacc.h>
25 #include <linux/soc/ti/ti_sci_protocol.h>
27 #define set_bit(bit, bitmap) __set_bit(bit, bitmap)
28 #define clear_bit(bit, bitmap) __clear_bit(bit, bitmap)
29 #define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
30 dma_free_coherent(cpu_addr)
31 #define dma_zalloc_coherent(dev, size, dma_handle, flag) \
33 void *ring_mem_virt; \
34 ring_mem_virt = dma_alloc_coherent((size), \
35 (unsigned long *)(dma_handle)); \
37 memset(ring_mem_virt, 0, (size)); \
41 static LIST_HEAD(k3_nav_ringacc_list);
43 static void ringacc_writel(u32 v, void __iomem *reg)
45 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg);
49 static u32 ringacc_readl(void __iomem *reg)
54 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg);
58 #define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
61 * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region
63 struct k3_nav_ring_rt_regs {
65 u32 db; /* RT Ring N Doorbell Register */
67 u32 occ; /* RT Ring N Occupancy Register */
68 u32 indx; /* RT Ring N Current Index Register */
69 u32 hwocc; /* RT Ring N Hardware Occupancy Register */
70 u32 hwindx; /* RT Ring N Current Index Register */
73 #define KNAV_RINGACC_RT_REGS_STEP 0x1000
76 * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region
78 struct k3_nav_ring_fifo_regs {
79 u32 head_data[128]; /* Ring Head Entry Data Registers */
80 u32 tail_data[128]; /* Ring Tail Entry Data Registers */
81 u32 peek_head_data[128]; /* Ring Peek Head Entry Data Regs */
82 u32 peek_tail_data[128]; /* Ring Peek Tail Entry Data Regs */
86 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
88 struct k3_ringacc_proxy_gcfg_regs {
89 u32 revision; /* Revision Register */
90 u32 config; /* Config Register */
93 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
96 * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region
98 struct k3_ringacc_proxy_target_regs {
99 u32 control; /* Proxy Control Register */
100 u32 status; /* Proxy Status Register */
102 u32 data[128]; /* Proxy Data Register */
105 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
106 #define K3_RINGACC_PROXY_NOT_USED (-1)
108 enum k3_ringacc_proxy_access_mode {
109 PROXY_ACCESS_MODE_HEAD = 0,
110 PROXY_ACCESS_MODE_TAIL = 1,
111 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
112 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
115 #define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
116 #define KNAV_RINGACC_FIFO_REGS_STEP 0x1000
117 #define KNAV_RINGACC_MAX_DB_RING_CNT (127U)
120 * struct k3_nav_ring_ops - Ring operations
122 struct k3_nav_ring_ops {
123 int (*push_tail)(struct k3_nav_ring *ring, void *elm);
124 int (*push_head)(struct k3_nav_ring *ring, void *elm);
125 int (*pop_tail)(struct k3_nav_ring *ring, void *elm);
126 int (*pop_head)(struct k3_nav_ring *ring, void *elm);
130 * struct k3_nav_ring_state - Internal state tracking structure
132 * @free: Number of free entries
134 * @windex: Write index
135 * @rindex: Read index
137 struct k3_nav_ring_state {
142 u32 tdown_complete:1;
146 * struct k3_nav_ring - RA Ring descriptor
148 * @rt - Ring control/status registers
149 * @fifos - Ring queues registers
150 * @proxy - Ring Proxy Datapath registers
151 * @ring_mem_dma - Ring buffer dma address
152 * @ring_mem_virt - Ring buffer virt address
153 * @ops - Ring operations
154 * @size - Ring size in elements
155 * @elm_size - Size of the ring element
159 * @parent - Pointer on struct @k3_nav_ringacc
160 * @use_count - Use count for shared rings
161 * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY)
164 struct k3_nav_ring_rt_regs __iomem *rt;
165 struct k3_nav_ring_fifo_regs __iomem *fifos;
166 struct k3_ringacc_proxy_target_regs __iomem *proxy;
167 dma_addr_t ring_mem_dma;
169 struct k3_nav_ring_ops *ops;
171 enum k3_nav_ring_size elm_size;
172 enum k3_nav_ring_mode mode;
174 #define KNAV_RING_FLAG_BUSY BIT(1)
175 #define K3_NAV_RING_FLAG_SHARED BIT(2)
176 struct k3_nav_ring_state state;
178 struct k3_nav_ringacc *parent;
183 struct k3_nav_ringacc_ops {
184 int (*init)(struct udevice *dev, struct k3_nav_ringacc *ringacc);
188 * struct k3_nav_ringacc - Rings accelerator descriptor
190 * @dev - pointer on RA device
191 * @proxy_gcfg - RA proxy global config registers
192 * @proxy_target_base - RA proxy datapath region
193 * @num_rings - number of ring in RA
194 * @rm_gp_range - general purpose rings range from tisci
195 * @dma_ring_reset_quirk - DMA reset w/a enable
196 * @num_proxies - number of RA proxies
197 * @rings - array of rings descriptors (struct @k3_nav_ring)
198 * @list - list of RAs in the system
199 * @tisci - pointer ti-sci handle
200 * @tisci_ring_ops - ti-sci rings ops
201 * @tisci_dev_id - ti-sci device id
202 * @ops: SoC specific ringacc operation
204 struct k3_nav_ringacc {
206 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
207 void __iomem *proxy_target_base;
208 u32 num_rings; /* number of rings in Ringacc module */
209 unsigned long *rings_inuse;
210 struct ti_sci_resource *rm_gp_range;
211 bool dma_ring_reset_quirk;
213 unsigned long *proxy_inuse;
215 struct k3_nav_ring *rings;
216 struct list_head list;
218 const struct ti_sci_handle *tisci;
219 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
222 const struct k3_nav_ringacc_ops *ops;
225 static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring *ring)
227 return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES -
228 (4 << ring->elm_size);
231 static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx)
233 return (idx * (4 << ring->elm_size) + ring->ring_mem_virt);
236 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem);
237 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem);
239 static struct k3_nav_ring_ops k3_nav_mode_ring_ops = {
240 .push_tail = k3_nav_ringacc_ring_push_mem,
241 .pop_head = k3_nav_ringacc_ring_pop_mem,
244 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem);
245 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem);
246 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
248 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring,
251 static struct k3_nav_ring_ops k3_nav_mode_msg_ops = {
252 .push_tail = k3_nav_ringacc_ring_push_io,
253 .push_head = k3_nav_ringacc_ring_push_head_io,
254 .pop_tail = k3_nav_ringacc_ring_pop_tail_io,
255 .pop_head = k3_nav_ringacc_ring_pop_io,
258 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring,
260 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring,
262 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem);
263 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem);
265 static struct k3_nav_ring_ops k3_nav_mode_proxy_ops = {
266 .push_tail = k3_ringacc_ring_push_tail_proxy,
267 .push_head = k3_ringacc_ring_push_head_proxy,
268 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
269 .pop_head = k3_ringacc_ring_pop_head_proxy,
272 struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc)
277 struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
280 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
282 if (id == K3_NAV_RINGACC_RING_ID_ANY) {
283 /* Request for any general purpose ring */
284 struct ti_sci_resource_desc *gp_rings =
285 &ringacc->rm_gp_range->desc[0];
288 size = gp_rings->start + gp_rings->num;
289 id = find_next_zero_bit(ringacc->rings_inuse,
290 size, gp_rings->start);
297 if (test_bit(id, ringacc->rings_inuse) &&
298 !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED))
300 else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)
303 if (flags & K3_NAV_RINGACC_RING_USE_PROXY) {
304 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
305 ringacc->num_proxies, 0);
306 if (proxy_id == ringacc->num_proxies)
310 if (!try_module_get(ringacc->dev->driver->owner))
313 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
314 set_bit(proxy_id, ringacc->proxy_inuse);
315 ringacc->rings[id].proxy_id = proxy_id;
316 pr_debug("Giving ring#%d proxy#%d\n",
319 pr_debug("Giving ring#%d\n", id);
322 set_bit(id, ringacc->rings_inuse);
324 ringacc->rings[id].use_count++;
325 return &ringacc->rings[id];
331 int k3_nav_ringacc_request_rings_pair(struct k3_nav_ringacc *ringacc,
332 int fwd_id, int compl_id,
333 struct k3_nav_ring **fwd_ring,
334 struct k3_nav_ring **compl_ring)
338 if (!fwd_ring || !compl_ring)
341 *fwd_ring = k3_nav_ringacc_request_ring(ringacc, fwd_id, 0);
345 *compl_ring = k3_nav_ringacc_request_ring(ringacc, compl_id, 0);
346 if (!(*compl_ring)) {
347 k3_nav_ringacc_ring_free(*fwd_ring);
354 static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring)
356 struct k3_nav_ringacc *ringacc = ring->parent;
359 ret = ringacc->tisci_ring_ops->config(
361 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
362 ringacc->tisci_dev_id,
371 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
375 void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring)
377 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
380 memset(&ring->state, 0, sizeof(ring->state));
382 k3_ringacc_ring_reset_sci(ring);
385 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring,
386 enum k3_nav_ring_mode mode)
388 struct k3_nav_ringacc *ringacc = ring->parent;
391 ret = ringacc->tisci_ring_ops->config(
393 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
394 ringacc->tisci_dev_id,
403 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
407 void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ)
409 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
412 if (!ring->parent->dma_ring_reset_quirk) {
413 k3_nav_ringacc_ring_reset(ring);
418 occ = ringacc_readl(&ring->rt->occ);
421 u32 db_ring_cnt, db_ring_cnt_cur;
423 pr_debug("%s %u occ: %u\n", __func__,
425 /* 2. Reset the ring */
426 k3_ringacc_ring_reset_sci(ring);
429 * 3. Setup the ring in ring/doorbell mode
430 * (if not already in this mode)
432 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
433 k3_ringacc_ring_reconfig_qmode_sci(
434 ring, K3_NAV_RINGACC_RING_MODE_RING);
436 * 4. Ring the doorbell 2**22 – ringOcc times.
437 * This will wrap the internal UDMAP ring state occupancy
438 * counter (which is 21-bits wide) to 0.
440 db_ring_cnt = (1U << 22) - occ;
442 while (db_ring_cnt != 0) {
444 * Ring the doorbell with the maximum count each
445 * iteration if possible to minimize the total
448 if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT)
449 db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT;
451 db_ring_cnt_cur = db_ring_cnt;
453 writel(db_ring_cnt_cur, &ring->rt->db);
454 db_ring_cnt -= db_ring_cnt_cur;
457 /* 5. Restore the original ring mode (if not ring mode) */
458 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
459 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
462 /* 2. Reset the ring */
463 k3_nav_ringacc_ring_reset(ring);
466 static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring)
468 struct k3_nav_ringacc *ringacc = ring->parent;
471 ret = ringacc->tisci_ring_ops->config(
473 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
474 ringacc->tisci_dev_id,
483 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
487 int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring)
489 struct k3_nav_ringacc *ringacc;
494 ringacc = ring->parent;
496 pr_debug("%s flags: 0x%08x\n", __func__, ring->flags);
498 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
501 if (--ring->use_count)
504 if (!(ring->flags & KNAV_RING_FLAG_BUSY))
507 k3_ringacc_ring_free_sci(ring);
509 dma_free_coherent(ringacc->dev,
510 ring->size * (4 << ring->elm_size),
511 ring->ring_mem_virt, ring->ring_mem_dma);
512 ring->flags &= ~KNAV_RING_FLAG_BUSY;
514 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
515 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
517 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
521 clear_bit(ring->ring_id, ringacc->rings_inuse);
523 module_put(ringacc->dev->driver->owner);
529 u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring)
534 return ring->ring_id;
537 static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring)
539 struct k3_nav_ringacc *ringacc = ring->parent;
546 ring_idx = ring->ring_id;
547 ret = ringacc->tisci_ring_ops->config(
549 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
550 ringacc->tisci_dev_id,
552 lower_32_bits(ring->ring_mem_dma),
553 upper_32_bits(ring->ring_mem_dma),
559 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
565 int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
566 struct k3_nav_ring_cfg *cfg)
568 struct k3_nav_ringacc *ringacc = ring->parent;
573 if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 ||
574 cfg->mode > K3_NAV_RINGACC_RING_MODE_QM ||
575 cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
576 !test_bit(ring->ring_id, ringacc->rings_inuse))
579 if (ring->use_count != 1)
582 ring->size = cfg->size;
583 ring->elm_size = cfg->elm_size;
584 ring->mode = cfg->mode;
585 memset(&ring->state, 0, sizeof(ring->state));
587 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
588 ring->proxy = ringacc->proxy_target_base +
589 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
591 switch (ring->mode) {
592 case K3_NAV_RINGACC_RING_MODE_RING:
593 ring->ops = &k3_nav_mode_ring_ops;
595 case K3_NAV_RINGACC_RING_MODE_QM:
597 * In Queue mode elm_size can be 8 only and each operation
598 * uses 2 element slots
600 if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 ||
603 case K3_NAV_RINGACC_RING_MODE_MESSAGE:
605 ring->ops = &k3_nav_mode_proxy_ops;
607 ring->ops = &k3_nav_mode_msg_ops;
615 ring->ring_mem_virt =
616 dma_zalloc_coherent(ringacc->dev,
617 ring->size * (4 << ring->elm_size),
618 &ring->ring_mem_dma, GFP_KERNEL);
619 if (!ring->ring_mem_virt) {
620 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
625 ret = k3_nav_ringacc_ring_cfg_sci(ring);
630 ring->flags |= KNAV_RING_FLAG_BUSY;
631 ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ?
632 K3_NAV_RING_FLAG_SHARED : 0;
637 dma_free_coherent(ringacc->dev,
638 ring->size * (4 << ring->elm_size),
648 u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring)
650 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
656 u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring)
658 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
661 if (!ring->state.free)
662 ring->state.free = ring->size - ringacc_readl(&ring->rt->occ);
664 return ring->state.free;
667 u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring)
669 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
672 return ringacc_readl(&ring->rt->occ);
675 u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring)
677 return !k3_nav_ringacc_ring_get_free(ring);
680 enum k3_ringacc_access_mode {
681 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
682 K3_RINGACC_ACCESS_MODE_POP_HEAD,
683 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
684 K3_RINGACC_ACCESS_MODE_POP_TAIL,
685 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
686 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
689 static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring *ring,
690 enum k3_ringacc_proxy_access_mode mode)
696 val |= ring->elm_size << 24;
697 ringacc_writel(val, &ring->proxy->control);
701 static int k3_nav_ringacc_ring_access_proxy(
702 struct k3_nav_ring *ring, void *elem,
703 enum k3_ringacc_access_mode access_mode)
707 ptr = (void __iomem *)&ring->proxy->data;
709 switch (access_mode) {
710 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
711 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
712 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
714 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
715 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
716 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
722 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
724 switch (access_mode) {
725 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
726 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
727 pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n",
729 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
732 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
733 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
734 pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n",
736 memcpy_toio(ptr, elem, (4 << ring->elm_size));
743 pr_debug("proxy: free%d occ%d\n",
744 ring->state.free, ring->state.occ);
748 static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, void *elem)
750 return k3_nav_ringacc_ring_access_proxy(
751 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
754 static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, void *elem)
756 return k3_nav_ringacc_ring_access_proxy(
757 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
760 static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem)
762 return k3_nav_ringacc_ring_access_proxy(
763 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
766 static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem)
768 return k3_nav_ringacc_ring_access_proxy(
769 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
772 static int k3_nav_ringacc_ring_access_io(
773 struct k3_nav_ring *ring, void *elem,
774 enum k3_ringacc_access_mode access_mode)
778 switch (access_mode) {
779 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
780 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
781 ptr = (void __iomem *)&ring->fifos->head_data;
783 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
784 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
785 ptr = (void __iomem *)&ring->fifos->tail_data;
791 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
793 switch (access_mode) {
794 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
795 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
796 pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n",
798 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
801 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
802 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
803 pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n",
805 memcpy_toio(ptr, elem, (4 << ring->elm_size));
812 pr_debug("free%d index%d occ%d index%d\n",
813 ring->state.free, ring->state.windex, ring->state.occ, ring->state.rindex);
817 static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
820 return k3_nav_ringacc_ring_access_io(
821 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
824 static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem)
826 return k3_nav_ringacc_ring_access_io(
827 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
830 static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem)
832 return k3_nav_ringacc_ring_access_io(
833 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
836 static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, void *elem)
838 return k3_nav_ringacc_ring_access_io(
839 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
842 static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
846 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.windex);
848 memcpy(elem_ptr, elem, (4 << ring->elm_size));
850 flush_dcache_range((unsigned long)ring->ring_mem_virt,
851 ALIGN((unsigned long)ring->ring_mem_virt +
852 ring->size * (4 << ring->elm_size),
855 ring->state.windex = (ring->state.windex + 1) % ring->size;
857 ringacc_writel(1, &ring->rt->db);
859 pr_debug("ring_push_mem: free%d index%d\n",
860 ring->state.free, ring->state.windex);
865 static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
869 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.rindex);
871 invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
872 ALIGN((unsigned long)ring->ring_mem_virt +
873 ring->size * (4 << ring->elm_size),
876 memcpy(elem, elem_ptr, (4 << ring->elm_size));
878 ring->state.rindex = (ring->state.rindex + 1) % ring->size;
880 ringacc_writel(-1, &ring->rt->db);
882 pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
883 ring->state.occ, ring->state.rindex, elem_ptr);
887 int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem)
889 int ret = -EOPNOTSUPP;
891 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
894 pr_debug("ring_push%d: free%d index%d\n",
895 ring->ring_id, ring->state.free, ring->state.windex);
897 if (k3_nav_ringacc_ring_is_full(ring))
900 if (ring->ops && ring->ops->push_tail)
901 ret = ring->ops->push_tail(ring, elem);
906 int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem)
908 int ret = -EOPNOTSUPP;
910 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
913 pr_debug("ring_push_head: free%d index%d\n",
914 ring->state.free, ring->state.windex);
916 if (k3_nav_ringacc_ring_is_full(ring))
919 if (ring->ops && ring->ops->push_head)
920 ret = ring->ops->push_head(ring, elem);
925 int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem)
927 int ret = -EOPNOTSUPP;
929 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
932 if (!ring->state.occ)
933 ring->state.occ = k3_nav_ringacc_ring_get_occ(ring);
935 pr_debug("ring_pop%d: occ%d index%d\n",
936 ring->ring_id, ring->state.occ, ring->state.rindex);
938 if (!ring->state.occ && !ring->state.tdown_complete)
941 if (ring->ops && ring->ops->pop_head)
942 ret = ring->ops->pop_head(ring, elem);
947 int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem)
949 int ret = -EOPNOTSUPP;
951 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
954 if (!ring->state.occ)
955 ring->state.occ = k3_nav_ringacc_ring_get_occ(ring);
957 pr_debug("ring_pop_tail: occ%d index%d\n",
958 ring->state.occ, ring->state.rindex);
960 if (!ring->state.occ)
963 if (ring->ops && ring->ops->pop_tail)
964 ret = ring->ops->pop_tail(ring, elem);
969 static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc)
971 struct udevice *dev = ringacc->dev;
972 struct udevice *tisci_dev = NULL;
975 ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0);
976 if (!ringacc->num_rings) {
977 dev_err(dev, "ti,num-rings read failure %d\n", ret);
981 ringacc->dma_ring_reset_quirk =
982 dev_read_bool(dev, "ti,dma-ring-reset-quirk");
984 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
985 "ti,sci", &tisci_dev);
987 pr_debug("TISCI RA RM get failed (%d)\n", ret);
988 ringacc->tisci = NULL;
991 ringacc->tisci = (struct ti_sci_handle *)
992 (ti_sci_get_handle_from_sysfw(tisci_dev));
994 ret = dev_read_u32_default(dev, "ti,sci", 0);
996 dev_err(dev, "TISCI RA RM disabled\n");
997 ringacc->tisci = NULL;
1001 ret = dev_read_u32(dev, "ti,sci-dev-id", &ringacc->tisci_dev_id);
1003 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1004 ringacc->tisci = NULL;
1008 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(
1009 ringacc->tisci, dev,
1010 ringacc->tisci_dev_id,
1011 "ti,sci-rm-range-gp-rings");
1012 if (IS_ERR(ringacc->rm_gp_range))
1013 ret = PTR_ERR(ringacc->rm_gp_range);
1018 static int k3_nav_ringacc_init(struct udevice *dev, struct k3_nav_ringacc *ringacc)
1020 void __iomem *base_fifo, *base_rt;
1023 ret = k3_nav_ringacc_probe_dt(ringacc);
1027 base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt");
1028 pr_debug("rt %p\n", base_rt);
1029 if (IS_ERR(base_rt))
1030 return PTR_ERR(base_rt);
1032 base_fifo = (uint32_t *)devfdt_get_addr_name(dev, "fifos");
1033 pr_debug("fifos %p\n", base_fifo);
1034 if (IS_ERR(base_fifo))
1035 return PTR_ERR(base_fifo);
1037 ringacc->proxy_gcfg = (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1038 devfdt_get_addr_name(dev, "proxy_gcfg");
1039 if (IS_ERR(ringacc->proxy_gcfg))
1040 return PTR_ERR(ringacc->proxy_gcfg);
1041 ringacc->proxy_target_base =
1042 (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1043 devfdt_get_addr_name(dev, "proxy_target");
1044 if (IS_ERR(ringacc->proxy_target_base))
1045 return PTR_ERR(ringacc->proxy_target_base);
1047 ringacc->num_proxies = ringacc_readl(&ringacc->proxy_gcfg->config) &
1048 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1050 ringacc->rings = devm_kzalloc(dev,
1051 sizeof(*ringacc->rings) *
1054 ringacc->rings_inuse = devm_kcalloc(dev,
1055 BITS_TO_LONGS(ringacc->num_rings),
1056 sizeof(unsigned long), GFP_KERNEL);
1057 ringacc->proxy_inuse = devm_kcalloc(dev,
1058 BITS_TO_LONGS(ringacc->num_proxies),
1059 sizeof(unsigned long), GFP_KERNEL);
1061 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1064 for (i = 0; i < ringacc->num_rings; i++) {
1065 ringacc->rings[i].rt = base_rt +
1066 KNAV_RINGACC_RT_REGS_STEP * i;
1067 ringacc->rings[i].fifos = base_fifo +
1068 KNAV_RINGACC_FIFO_REGS_STEP * i;
1069 ringacc->rings[i].parent = ringacc;
1070 ringacc->rings[i].ring_id = i;
1071 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1073 dev_set_drvdata(dev, ringacc);
1075 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1077 list_add_tail(&ringacc->list, &k3_nav_ringacc_list);
1079 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1081 ringacc->rm_gp_range->desc[0].start,
1082 ringacc->rm_gp_range->desc[0].num,
1083 ringacc->tisci_dev_id);
1084 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1085 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1086 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1087 ringacc_readl(&ringacc->proxy_gcfg->revision),
1088 ringacc->num_proxies);
1092 struct ringacc_match_data {
1093 struct k3_nav_ringacc_ops ops;
1096 static struct ringacc_match_data k3_nav_ringacc_data = {
1098 .init = k3_nav_ringacc_init,
1102 static const struct udevice_id knav_ringacc_ids[] = {
1103 { .compatible = "ti,am654-navss-ringacc", .data = (ulong)&k3_nav_ringacc_data, },
1107 static int k3_nav_ringacc_probe(struct udevice *dev)
1109 struct k3_nav_ringacc *ringacc;
1111 const struct ringacc_match_data *match_data;
1113 match_data = (struct ringacc_match_data *)dev_get_driver_data(dev);
1115 ringacc = dev_get_priv(dev);
1120 ringacc->ops = &match_data->ops;
1121 ret = ringacc->ops->init(dev, ringacc);
1128 U_BOOT_DRIVER(k3_navss_ringacc) = {
1129 .name = "k3-navss-ringacc",
1131 .of_match = knav_ringacc_ids,
1132 .probe = k3_nav_ringacc_probe,
1133 .priv_auto = sizeof(struct k3_nav_ringacc),