2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ = 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer.
78 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
83 * struct d40_desc - A descriptor is one DMA job.
85 * @lli_phy: LLI settings for physical channel. Both src and dst=
86 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
88 * @lli_log: Same as above but for logical channels.
89 * @lli_pool: The pool with two entries pre-allocated.
90 * @lli_len: Number of llis of current descriptor.
91 * @lli_current: Number of transfered llis.
92 * @lcla_alloc: Number of LCLA entries allocated.
93 * @txd: DMA engine struct. Used for among other things for communication
96 * @is_in_client_list: true if the client owns this descriptor.
97 * @is_hw_linked: true if this job will automatically be continued for
100 * This descriptor is used for both logical and physical transfers.
104 struct d40_phy_lli_bidir lli_phy;
106 struct d40_log_lli_bidir lli_log;
108 struct d40_lli_pool lli_pool;
113 struct dma_async_tx_descriptor txd;
114 struct list_head node;
116 bool is_in_client_list;
121 * struct d40_lcla_pool - LCLA pool settings and data.
123 * @base: The virtual address of LCLA. 18 bit aligned.
124 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
125 * This pointer is only there for clean-up on error.
126 * @pages: The number of pages needed for all physical channels.
127 * Only used later for clean-up on error
128 * @lock: Lock to protect the content in this struct.
129 * @alloc_map: big map over which LCLA entry is own by which job.
131 struct d40_lcla_pool {
133 void *base_unaligned;
136 struct d40_desc **alloc_map;
140 * struct d40_phy_res - struct for handling eventlines mapped to physical
143 * @lock: A lock protection this entity.
144 * @num: The physical channel number of this entity.
145 * @allocated_src: Bit mapped to show which src event line's are mapped to
146 * this physical channel. Can also be free or physically allocated.
147 * @allocated_dst: Same as for src but is dst.
148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
161 * struct d40_chan - Struct that describes a channel.
163 * @lock: A spinlock to protect this struct.
164 * @log_num: The logical number, if any of this channel.
165 * @completed: Starts with 1, after first interrupt it is set to dma engine's
167 * @pending_tx: The number of pending transfers. Used between interrupt handler
169 * @busy: Set to true when transfer is ongoing on this channel.
170 * @phy_chan: Pointer to physical channel which this instance runs on. If this
171 * point is NULL, then the channel is not allocated.
172 * @chan: DMA engine handle.
173 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
174 * transfer and call client callback.
175 * @client: Cliented owned descriptor list.
176 * @active: Active descriptor.
177 * @queue: Queued jobs.
178 * @dma_cfg: The client configuration of this dma channel.
179 * @configured: whether the dma_cfg configuration is valid
180 * @base: Pointer to the device instance struct.
181 * @src_def_cfg: Default cfg register setting for src.
182 * @dst_def_cfg: Default cfg register setting for dst.
183 * @log_def: Default logical channel settings.
184 * @lcla: Space for one dst src pair for logical channel transfers.
185 * @lcpa: Pointer to dst and src lcpa settings.
187 * This struct can either "be" a logical or a physical channel.
192 /* ID of the most recent completed transfer */
196 struct d40_phy_res *phy_chan;
197 struct dma_chan chan;
198 struct tasklet_struct tasklet;
199 struct list_head client;
200 struct list_head active;
201 struct list_head queue;
202 struct stedma40_chan_cfg dma_cfg;
204 struct d40_base *base;
205 /* Default register configurations */
208 struct d40_def_lcsp log_def;
209 struct d40_log_lli_full *lcpa;
210 /* Runtime reconfiguration */
211 dma_addr_t runtime_addr;
212 enum dma_data_direction runtime_direction;
216 * struct d40_base - The big global struct, one for each probe'd instance.
218 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
219 * @execmd_lock: Lock for execute command usage since several channels share
220 * the same physical register.
221 * @dev: The device structure.
222 * @virtbase: The virtual base address of the DMA's register.
223 * @rev: silicon revision detected.
224 * @clk: Pointer to the DMA clock structure.
225 * @phy_start: Physical memory start of the DMA registers.
226 * @phy_size: Size of the DMA register map.
227 * @irq: The IRQ number.
228 * @num_phy_chans: The number of physical channels. Read from HW. This
229 * is the number of available channels for this driver, not counting "Secure
230 * mode" allocated physical channels.
231 * @num_log_chans: The number of logical channels. Calculated from
233 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
234 * @dma_slave: dma_device channels that can do only do slave transfers.
235 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
236 * @log_chans: Room for all possible logical channels in system.
237 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
238 * to log_chans entries.
239 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
240 * to phy_chans entries.
241 * @plat_data: Pointer to provided platform_data which is the driver
243 * @phy_res: Vector containing all physical channels.
244 * @lcla_pool: lcla pool settings and data.
245 * @lcpa_base: The virtual mapped address of LCPA.
246 * @phy_lcpa: The physical address of the LCPA.
247 * @lcpa_size: The size of the LCPA area.
248 * @desc_slab: cache for descriptors.
251 spinlock_t interrupt_lock;
252 spinlock_t execmd_lock;
254 void __iomem *virtbase;
257 phys_addr_t phy_start;
258 resource_size_t phy_size;
262 struct dma_device dma_both;
263 struct dma_device dma_slave;
264 struct dma_device dma_memcpy;
265 struct d40_chan *phy_chans;
266 struct d40_chan *log_chans;
267 struct d40_chan **lookup_log_chans;
268 struct d40_chan **lookup_phy_chans;
269 struct stedma40_platform_data *plat_data;
270 /* Physical half channels */
271 struct d40_phy_res *phy_res;
272 struct d40_lcla_pool lcla_pool;
275 resource_size_t lcpa_size;
276 struct kmem_cache *desc_slab;
280 * struct d40_interrupt_lookup - lookup table for interrupt handler
282 * @src: Interrupt mask register.
283 * @clr: Interrupt clear register.
284 * @is_error: true if this is an error interrupt.
285 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
286 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
288 struct d40_interrupt_lookup {
296 * struct d40_reg_val - simple lookup struct
298 * @reg: The register.
299 * @val: The value that belongs to the register in reg.
306 static int d40_pool_lli_alloc(struct d40_desc *d40d,
307 int lli_len, bool is_log)
313 align = sizeof(struct d40_log_lli);
315 align = sizeof(struct d40_phy_lli);
318 base = d40d->lli_pool.pre_alloc_lli;
319 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
320 d40d->lli_pool.base = NULL;
322 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
324 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
325 d40d->lli_pool.base = base;
327 if (d40d->lli_pool.base == NULL)
332 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
334 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
337 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
339 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
346 static void d40_pool_lli_free(struct d40_desc *d40d)
348 kfree(d40d->lli_pool.base);
349 d40d->lli_pool.base = NULL;
350 d40d->lli_pool.size = 0;
351 d40d->lli_log.src = NULL;
352 d40d->lli_log.dst = NULL;
353 d40d->lli_phy.src = NULL;
354 d40d->lli_phy.dst = NULL;
357 static int d40_lcla_alloc_one(struct d40_chan *d40c,
358 struct d40_desc *d40d)
365 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
367 p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
370 * Allocate both src and dst at the same time, therefore the half
371 * start on 1 since 0 can't be used since zero is used as end marker.
373 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
374 if (!d40c->base->lcla_pool.alloc_map[p + i]) {
375 d40c->base->lcla_pool.alloc_map[p + i] = d40d;
382 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
387 static int d40_lcla_free_all(struct d40_chan *d40c,
388 struct d40_desc *d40d)
394 if (d40c->log_num == D40_PHY_CHAN)
397 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
399 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
400 if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
401 D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
402 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
403 D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
405 if (d40d->lcla_alloc == 0) {
412 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
418 static void d40_desc_remove(struct d40_desc *d40d)
420 list_del(&d40d->node);
423 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
425 struct d40_desc *desc = NULL;
427 if (!list_empty(&d40c->client)) {
431 list_for_each_entry_safe(d, _d, &d40c->client, node)
432 if (async_tx_test_ack(&d->txd)) {
433 d40_pool_lli_free(d);
436 memset(desc, 0, sizeof(*desc));
442 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
445 INIT_LIST_HEAD(&desc->node);
450 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
453 d40_lcla_free_all(d40c, d40d);
454 kmem_cache_free(d40c->base->desc_slab, d40d);
457 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
459 list_add_tail(&desc->node, &d40c->active);
462 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
464 int curr_lcla = -EINVAL, next_lcla;
466 if (d40c->log_num == D40_PHY_CHAN) {
467 d40_phy_lli_write(d40c->base->virtbase,
471 d40d->lli_current = d40d->lli_len;
474 if ((d40d->lli_len - d40d->lli_current) > 1)
475 curr_lcla = d40_lcla_alloc_one(d40c, d40d);
477 d40_log_lli_lcpa_write(d40c->lcpa,
478 &d40d->lli_log.dst[d40d->lli_current],
479 &d40d->lli_log.src[d40d->lli_current],
483 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) {
484 struct d40_log_lli *lcla;
486 if (d40d->lli_current + 1 < d40d->lli_len)
487 next_lcla = d40_lcla_alloc_one(d40c, d40d);
491 lcla = d40c->base->lcla_pool.base +
492 d40c->phy_chan->num * 1024 +
495 d40_log_lli_lcla_write(lcla,
496 &d40d->lli_log.dst[d40d->lli_current],
497 &d40d->lli_log.src[d40d->lli_current],
500 (void) dma_map_single(d40c->base->dev, lcla,
501 2 * sizeof(struct d40_log_lli),
504 curr_lcla = next_lcla;
506 if (curr_lcla == -EINVAL) {
515 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
519 if (list_empty(&d40c->active))
522 d = list_first_entry(&d40c->active,
528 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
530 list_add_tail(&desc->node, &d40c->queue);
533 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
537 if (list_empty(&d40c->queue))
540 d = list_first_entry(&d40c->queue,
546 static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
550 if (list_empty(&d40c->queue))
552 list_for_each_entry(d, &d40c->queue, node)
553 if (list_is_last(&d->node, &d40c->queue))
558 static int d40_psize_2_burst_size(bool is_log, int psize)
561 if (psize == STEDMA40_PSIZE_LOG_1)
564 if (psize == STEDMA40_PSIZE_PHY_1)
572 * The dma only supports transmitting packages up to
573 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
574 * dma elements required to send the entire sg list
576 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
579 u32 max_w = max(data_width1, data_width2);
580 u32 min_w = min(data_width1, data_width2);
581 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
583 if (seg_max > STEDMA40_MAX_SEG_SIZE)
584 seg_max -= (1 << max_w);
586 if (!IS_ALIGNED(size, 1 << max_w))
592 dmalen = size / seg_max;
593 if (dmalen * seg_max < size)
599 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
600 u32 data_width1, u32 data_width2)
602 struct scatterlist *sg;
607 for_each_sg(sgl, sg, sg_len, i) {
608 ret = d40_size_2_dmalen(sg_dma_len(sg),
609 data_width1, data_width2);
617 /* Support functions for logical channels */
619 static int d40_channel_execute_command(struct d40_chan *d40c,
620 enum d40_command command)
624 void __iomem *active_reg;
629 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
631 if (d40c->phy_chan->num % 2 == 0)
632 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
634 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
636 if (command == D40_DMA_SUSPEND_REQ) {
637 status = (readl(active_reg) &
638 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
639 D40_CHAN_POS(d40c->phy_chan->num);
641 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
645 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
646 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
649 if (command == D40_DMA_SUSPEND_REQ) {
651 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
652 status = (readl(active_reg) &
653 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
654 D40_CHAN_POS(d40c->phy_chan->num);
658 * Reduce the number of bus accesses while
659 * waiting for the DMA to suspend.
663 if (status == D40_DMA_STOP ||
664 status == D40_DMA_SUSPENDED)
668 if (i == D40_SUSPEND_MAX_IT) {
669 dev_err(&d40c->chan.dev->device,
670 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
671 __func__, d40c->phy_chan->num, d40c->log_num,
679 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
683 static void d40_term_all(struct d40_chan *d40c)
685 struct d40_desc *d40d;
687 /* Release active descriptors */
688 while ((d40d = d40_first_active_get(d40c))) {
689 d40_desc_remove(d40d);
690 d40_desc_free(d40c, d40d);
693 /* Release queued descriptors waiting for transfer */
694 while ((d40d = d40_first_queued(d40c))) {
695 d40_desc_remove(d40d);
696 d40_desc_free(d40c, d40d);
700 d40c->pending_tx = 0;
704 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
709 /* Notice, that disable requires the physical channel to be stopped */
711 val = D40_ACTIVATE_EVENTLINE;
713 val = D40_DEACTIVATE_EVENTLINE;
715 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
717 /* Enable event line connected to device (or memcpy) */
718 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
719 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
720 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
722 writel((val << D40_EVENTLINE_POS(event)) |
723 ~D40_EVENTLINE_MASK(event),
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
728 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
729 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
731 writel((val << D40_EVENTLINE_POS(event)) |
732 ~D40_EVENTLINE_MASK(event),
733 d40c->base->virtbase + D40_DREG_PCBASE +
734 d40c->phy_chan->num * D40_DREG_PCDELTA +
738 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
741 static u32 d40_chan_has_events(struct d40_chan *d40c)
745 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
746 d40c->phy_chan->num * D40_DREG_PCDELTA +
749 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
750 d40c->phy_chan->num * D40_DREG_PCDELTA +
755 static u32 d40_get_prmo(struct d40_chan *d40c)
757 static const unsigned int phy_map[] = {
758 [STEDMA40_PCHAN_BASIC_MODE]
759 = D40_DREG_PRMO_PCHAN_BASIC,
760 [STEDMA40_PCHAN_MODULO_MODE]
761 = D40_DREG_PRMO_PCHAN_MODULO,
762 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
763 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
765 static const unsigned int log_map[] = {
766 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
767 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
768 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
769 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
770 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
771 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
774 if (d40c->log_num == D40_PHY_CHAN)
775 return phy_map[d40c->dma_cfg.mode_opt];
777 return log_map[d40c->dma_cfg.mode_opt];
780 static void d40_config_write(struct d40_chan *d40c)
785 /* Odd addresses are even addresses + 4 */
786 addr_base = (d40c->phy_chan->num % 2) * 4;
787 /* Setup channel mode to logical or physical */
788 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
789 D40_CHAN_POS(d40c->phy_chan->num);
790 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
792 /* Setup operational mode option register */
793 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
795 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
797 if (d40c->log_num != D40_PHY_CHAN) {
798 /* Set default config for CFG reg */
799 writel(d40c->src_def_cfg,
800 d40c->base->virtbase + D40_DREG_PCBASE +
801 d40c->phy_chan->num * D40_DREG_PCDELTA +
803 writel(d40c->dst_def_cfg,
804 d40c->base->virtbase + D40_DREG_PCBASE +
805 d40c->phy_chan->num * D40_DREG_PCDELTA +
808 /* Set LIDX for lcla */
809 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
810 D40_SREG_ELEM_LOG_LIDX_MASK,
811 d40c->base->virtbase + D40_DREG_PCBASE +
812 d40c->phy_chan->num * D40_DREG_PCDELTA +
815 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
816 D40_SREG_ELEM_LOG_LIDX_MASK,
817 d40c->base->virtbase + D40_DREG_PCBASE +
818 d40c->phy_chan->num * D40_DREG_PCDELTA +
824 static u32 d40_residue(struct d40_chan *d40c)
828 if (d40c->log_num != D40_PHY_CHAN)
829 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
830 >> D40_MEM_LCSP2_ECNT_POS;
832 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
833 d40c->phy_chan->num * D40_DREG_PCDELTA +
834 D40_CHAN_REG_SDELT) &
835 D40_SREG_ELEM_PHY_ECNT_MASK) >>
836 D40_SREG_ELEM_PHY_ECNT_POS;
837 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
840 static bool d40_tx_is_linked(struct d40_chan *d40c)
844 if (d40c->log_num != D40_PHY_CHAN)
845 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
847 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
848 d40c->phy_chan->num * D40_DREG_PCDELTA +
849 D40_CHAN_REG_SDLNK) &
850 D40_SREG_LNK_PHYS_LNK_MASK;
854 static int d40_pause(struct dma_chan *chan)
856 struct d40_chan *d40c =
857 container_of(chan, struct d40_chan, chan);
864 spin_lock_irqsave(&d40c->lock, flags);
866 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
868 if (d40c->log_num != D40_PHY_CHAN) {
869 d40_config_set_event(d40c, false);
870 /* Resume the other logical channels if any */
871 if (d40_chan_has_events(d40c))
872 res = d40_channel_execute_command(d40c,
877 spin_unlock_irqrestore(&d40c->lock, flags);
881 static int d40_resume(struct dma_chan *chan)
883 struct d40_chan *d40c =
884 container_of(chan, struct d40_chan, chan);
891 spin_lock_irqsave(&d40c->lock, flags);
893 if (d40c->base->rev == 0)
894 if (d40c->log_num != D40_PHY_CHAN) {
895 res = d40_channel_execute_command(d40c,
896 D40_DMA_SUSPEND_REQ);
900 /* If bytes left to transfer or linked tx resume job */
901 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
903 if (d40c->log_num != D40_PHY_CHAN)
904 d40_config_set_event(d40c, true);
906 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
910 spin_unlock_irqrestore(&d40c->lock, flags);
914 static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
919 static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
921 struct d40_desc *d40d_prev = NULL;
925 if (!list_empty(&d40c->queue))
926 d40d_prev = d40_last_queued(d40c);
927 else if (!list_empty(&d40c->active))
928 d40d_prev = d40_first_active_get(d40c);
933 /* Here we try to join this job with previous jobs */
934 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
935 d40c->phy_chan->num * D40_DREG_PCDELTA +
938 /* Figure out which link we're currently transmitting */
939 for (i = 0; i < d40d_prev->lli_len; i++)
940 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
943 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
944 d40c->phy_chan->num * D40_DREG_PCDELTA +
945 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
947 if (i == (d40d_prev->lli_len - 1) && val > 0) {
948 /* Change the current one */
949 writel(virt_to_phys(d40d->lli_phy.src),
950 d40c->base->virtbase + D40_DREG_PCBASE +
951 d40c->phy_chan->num * D40_DREG_PCDELTA +
953 writel(virt_to_phys(d40d->lli_phy.dst),
954 d40c->base->virtbase + D40_DREG_PCBASE +
955 d40c->phy_chan->num * D40_DREG_PCDELTA +
958 d40d->is_hw_linked = true;
960 } else if (i < d40d_prev->lli_len) {
961 (void) dma_unmap_single(d40c->base->dev,
962 virt_to_phys(d40d_prev->lli_phy.src),
963 d40d_prev->lli_pool.size,
966 /* Keep the settings */
967 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
968 ~D40_SREG_LNK_PHYS_LNK_MASK;
969 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
970 val | virt_to_phys(d40d->lli_phy.src);
972 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
973 ~D40_SREG_LNK_PHYS_LNK_MASK;
974 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
975 val | virt_to_phys(d40d->lli_phy.dst);
977 (void) dma_map_single(d40c->base->dev,
978 d40d_prev->lli_phy.src,
979 d40d_prev->lli_pool.size,
981 d40d->is_hw_linked = true;
985 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
987 struct d40_chan *d40c = container_of(tx->chan,
990 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
993 (void) d40_pause(&d40c->chan);
995 spin_lock_irqsave(&d40c->lock, flags);
999 if (d40c->chan.cookie < 0)
1000 d40c->chan.cookie = 1;
1002 d40d->txd.cookie = d40c->chan.cookie;
1004 if (d40c->log_num == D40_PHY_CHAN)
1005 d40_tx_submit_phy(d40c, d40d);
1007 d40_tx_submit_log(d40c, d40d);
1009 d40_desc_queue(d40c, d40d);
1011 spin_unlock_irqrestore(&d40c->lock, flags);
1013 (void) d40_resume(&d40c->chan);
1018 static int d40_start(struct d40_chan *d40c)
1020 if (d40c->base->rev == 0) {
1023 if (d40c->log_num != D40_PHY_CHAN) {
1024 err = d40_channel_execute_command(d40c,
1025 D40_DMA_SUSPEND_REQ);
1031 if (d40c->log_num != D40_PHY_CHAN)
1032 d40_config_set_event(d40c, true);
1034 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1037 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1039 struct d40_desc *d40d;
1042 /* Start queued jobs, if any */
1043 d40d = d40_first_queued(d40c);
1048 /* Remove from queue */
1049 d40_desc_remove(d40d);
1051 /* Add to active queue */
1052 d40_desc_submit(d40c, d40d);
1055 * If this job is already linked in hw,
1059 if (!d40d->is_hw_linked) {
1060 /* Initiate DMA job */
1061 d40_desc_load(d40c, d40d);
1064 err = d40_start(d40c);
1074 /* called from interrupt context */
1075 static void dma_tc_handle(struct d40_chan *d40c)
1077 struct d40_desc *d40d;
1079 /* Get first active entry from list */
1080 d40d = d40_first_active_get(d40c);
1085 d40_lcla_free_all(d40c, d40d);
1087 if (d40d->lli_current < d40d->lli_len) {
1088 d40_desc_load(d40c, d40d);
1090 (void) d40_start(d40c);
1094 if (d40_queue_start(d40c) == NULL)
1098 tasklet_schedule(&d40c->tasklet);
1102 static void dma_tasklet(unsigned long data)
1104 struct d40_chan *d40c = (struct d40_chan *) data;
1105 struct d40_desc *d40d;
1106 unsigned long flags;
1107 dma_async_tx_callback callback;
1108 void *callback_param;
1110 spin_lock_irqsave(&d40c->lock, flags);
1112 /* Get first active entry from list */
1113 d40d = d40_first_active_get(d40c);
1118 d40c->completed = d40d->txd.cookie;
1121 * If terminating a channel pending_tx is set to zero.
1122 * This prevents any finished active jobs to return to the client.
1124 if (d40c->pending_tx == 0) {
1125 spin_unlock_irqrestore(&d40c->lock, flags);
1129 /* Callback to client */
1130 callback = d40d->txd.callback;
1131 callback_param = d40d->txd.callback_param;
1133 if (async_tx_test_ack(&d40d->txd)) {
1134 d40_pool_lli_free(d40d);
1135 d40_desc_remove(d40d);
1136 d40_desc_free(d40c, d40d);
1138 if (!d40d->is_in_client_list) {
1139 d40_desc_remove(d40d);
1140 d40_lcla_free_all(d40c, d40d);
1141 list_add_tail(&d40d->node, &d40c->client);
1142 d40d->is_in_client_list = true;
1148 if (d40c->pending_tx)
1149 tasklet_schedule(&d40c->tasklet);
1151 spin_unlock_irqrestore(&d40c->lock, flags);
1153 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1154 callback(callback_param);
1159 /* Rescue manouver if receiving double interrupts */
1160 if (d40c->pending_tx > 0)
1162 spin_unlock_irqrestore(&d40c->lock, flags);
1165 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1167 static const struct d40_interrupt_lookup il[] = {
1168 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1169 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1170 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1171 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1172 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1173 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1174 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1175 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1176 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1177 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1181 u32 regs[ARRAY_SIZE(il)];
1185 struct d40_chan *d40c;
1186 unsigned long flags;
1187 struct d40_base *base = data;
1189 spin_lock_irqsave(&base->interrupt_lock, flags);
1191 /* Read interrupt status of both logical and physical channels */
1192 for (i = 0; i < ARRAY_SIZE(il); i++)
1193 regs[i] = readl(base->virtbase + il[i].src);
1197 chan = find_next_bit((unsigned long *)regs,
1198 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1200 /* No more set bits found? */
1201 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1204 row = chan / BITS_PER_LONG;
1205 idx = chan & (BITS_PER_LONG - 1);
1208 writel(1 << idx, base->virtbase + il[row].clr);
1210 if (il[row].offset == D40_PHY_CHAN)
1211 d40c = base->lookup_phy_chans[idx];
1213 d40c = base->lookup_log_chans[il[row].offset + idx];
1214 spin_lock(&d40c->lock);
1216 if (!il[row].is_error)
1217 dma_tc_handle(d40c);
1220 "[%s] IRQ chan: %ld offset %d idx %d\n",
1221 __func__, chan, il[row].offset, idx);
1223 spin_unlock(&d40c->lock);
1226 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1231 static int d40_validate_conf(struct d40_chan *d40c,
1232 struct stedma40_chan_cfg *conf)
1235 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1236 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1237 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1240 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1245 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1246 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1247 d40c->runtime_addr == 0) {
1249 dev_err(&d40c->chan.dev->device,
1250 "[%s] Invalid TX channel address (%d)\n",
1251 __func__, conf->dst_dev_type);
1255 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1256 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1257 d40c->runtime_addr == 0) {
1258 dev_err(&d40c->chan.dev->device,
1259 "[%s] Invalid RX channel address (%d)\n",
1260 __func__, conf->src_dev_type);
1264 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1265 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1266 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1271 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1272 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1273 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1278 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1279 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1280 dev_err(&d40c->chan.dev->device,
1281 "[%s] No event line\n", __func__);
1285 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1286 (src_event_group != dst_event_group)) {
1287 dev_err(&d40c->chan.dev->device,
1288 "[%s] Invalid event group\n", __func__);
1292 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1294 * DMAC HW supports it. Will be added to this driver,
1295 * in case any dma client requires it.
1297 dev_err(&d40c->chan.dev->device,
1298 "[%s] periph to periph not supported\n",
1303 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1304 (1 << conf->src_info.data_width) !=
1305 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1306 (1 << conf->dst_info.data_width)) {
1308 * The DMAC hardware only supports
1309 * src (burst x width) == dst (burst x width)
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1321 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1322 int log_event_line, bool is_log)
1324 unsigned long flags;
1325 spin_lock_irqsave(&phy->lock, flags);
1327 /* Physical interrupts are masked per physical full channel */
1328 if (phy->allocated_src == D40_ALLOC_FREE &&
1329 phy->allocated_dst == D40_ALLOC_FREE) {
1330 phy->allocated_dst = D40_ALLOC_PHY;
1331 phy->allocated_src = D40_ALLOC_PHY;
1337 /* Logical channel */
1339 if (phy->allocated_src == D40_ALLOC_PHY)
1342 if (phy->allocated_src == D40_ALLOC_FREE)
1343 phy->allocated_src = D40_ALLOC_LOG_FREE;
1345 if (!(phy->allocated_src & (1 << log_event_line))) {
1346 phy->allocated_src |= 1 << log_event_line;
1351 if (phy->allocated_dst == D40_ALLOC_PHY)
1354 if (phy->allocated_dst == D40_ALLOC_FREE)
1355 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1357 if (!(phy->allocated_dst & (1 << log_event_line))) {
1358 phy->allocated_dst |= 1 << log_event_line;
1365 spin_unlock_irqrestore(&phy->lock, flags);
1368 spin_unlock_irqrestore(&phy->lock, flags);
1372 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1375 unsigned long flags;
1376 bool is_free = false;
1378 spin_lock_irqsave(&phy->lock, flags);
1379 if (!log_event_line) {
1380 phy->allocated_dst = D40_ALLOC_FREE;
1381 phy->allocated_src = D40_ALLOC_FREE;
1386 /* Logical channel */
1388 phy->allocated_src &= ~(1 << log_event_line);
1389 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1390 phy->allocated_src = D40_ALLOC_FREE;
1392 phy->allocated_dst &= ~(1 << log_event_line);
1393 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1394 phy->allocated_dst = D40_ALLOC_FREE;
1397 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1401 spin_unlock_irqrestore(&phy->lock, flags);
1406 static int d40_allocate_channel(struct d40_chan *d40c)
1411 struct d40_phy_res *phys;
1416 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1418 phys = d40c->base->phy_res;
1420 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1421 dev_type = d40c->dma_cfg.src_dev_type;
1422 log_num = 2 * dev_type;
1424 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1425 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1426 /* dst event lines are used for logical memcpy */
1427 dev_type = d40c->dma_cfg.dst_dev_type;
1428 log_num = 2 * dev_type + 1;
1433 event_group = D40_TYPE_TO_GROUP(dev_type);
1434 event_line = D40_TYPE_TO_EVENT(dev_type);
1437 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1438 /* Find physical half channel */
1439 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1441 if (d40_alloc_mask_set(&phys[i], is_src,
1446 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1447 int phy_num = j + event_group * 2;
1448 for (i = phy_num; i < phy_num + 2; i++) {
1449 if (d40_alloc_mask_set(&phys[i],
1458 d40c->phy_chan = &phys[i];
1459 d40c->log_num = D40_PHY_CHAN;
1465 /* Find logical channel */
1466 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1467 int phy_num = j + event_group * 2;
1469 * Spread logical channels across all available physical rather
1470 * than pack every logical channel at the first available phy
1474 for (i = phy_num; i < phy_num + 2; i++) {
1475 if (d40_alloc_mask_set(&phys[i], is_src,
1476 event_line, is_log))
1480 for (i = phy_num + 1; i >= phy_num; i--) {
1481 if (d40_alloc_mask_set(&phys[i], is_src,
1482 event_line, is_log))
1490 d40c->phy_chan = &phys[i];
1491 d40c->log_num = log_num;
1495 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1497 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1503 static int d40_config_memcpy(struct d40_chan *d40c)
1505 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1507 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1508 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1509 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1510 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1511 memcpy[d40c->chan.chan_id];
1513 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1514 dma_has_cap(DMA_SLAVE, cap)) {
1515 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1517 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1526 static int d40_free_dma(struct d40_chan *d40c)
1531 struct d40_phy_res *phy = d40c->phy_chan;
1534 struct d40_desc *_d;
1537 /* Terminate all queued and active transfers */
1540 /* Release client owned descriptors */
1541 if (!list_empty(&d40c->client))
1542 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1543 d40_pool_lli_free(d);
1545 d40_desc_free(d40c, d);
1549 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1554 if (phy->allocated_src == D40_ALLOC_FREE &&
1555 phy->allocated_dst == D40_ALLOC_FREE) {
1556 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1561 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1562 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1563 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1565 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1566 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1569 dev_err(&d40c->chan.dev->device,
1570 "[%s] Unknown direction\n", __func__);
1574 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1576 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1581 if (d40c->log_num != D40_PHY_CHAN) {
1582 /* Release logical channel, deactivate the event line */
1584 d40_config_set_event(d40c, false);
1585 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1588 * Check if there are more logical allocation
1589 * on this phy channel.
1591 if (!d40_alloc_mask_free(phy, is_src, event)) {
1592 /* Resume the other logical channels if any */
1593 if (d40_chan_has_events(d40c)) {
1594 res = d40_channel_execute_command(d40c,
1597 dev_err(&d40c->chan.dev->device,
1598 "[%s] Executing RUN command\n",
1606 (void) d40_alloc_mask_free(phy, is_src, 0);
1609 /* Release physical channel */
1610 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1612 dev_err(&d40c->chan.dev->device,
1613 "[%s] Failed to stop channel\n", __func__);
1616 d40c->phy_chan = NULL;
1617 d40c->configured = false;
1618 d40c->base->lookup_phy_chans[phy->num] = NULL;
1623 static bool d40_is_paused(struct d40_chan *d40c)
1625 bool is_paused = false;
1626 unsigned long flags;
1627 void __iomem *active_reg;
1631 spin_lock_irqsave(&d40c->lock, flags);
1633 if (d40c->log_num == D40_PHY_CHAN) {
1634 if (d40c->phy_chan->num % 2 == 0)
1635 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1637 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1639 status = (readl(active_reg) &
1640 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1641 D40_CHAN_POS(d40c->phy_chan->num);
1642 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1648 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1649 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1650 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1651 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1652 d40c->phy_chan->num * D40_DREG_PCDELTA +
1653 D40_CHAN_REG_SDLNK);
1654 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1655 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1656 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1657 d40c->phy_chan->num * D40_DREG_PCDELTA +
1658 D40_CHAN_REG_SSLNK);
1660 dev_err(&d40c->chan.dev->device,
1661 "[%s] Unknown direction\n", __func__);
1665 status = (status & D40_EVENTLINE_MASK(event)) >>
1666 D40_EVENTLINE_POS(event);
1668 if (status != D40_DMA_RUN)
1671 spin_unlock_irqrestore(&d40c->lock, flags);
1677 static u32 stedma40_residue(struct dma_chan *chan)
1679 struct d40_chan *d40c =
1680 container_of(chan, struct d40_chan, chan);
1682 unsigned long flags;
1684 spin_lock_irqsave(&d40c->lock, flags);
1685 bytes_left = d40_residue(d40c);
1686 spin_unlock_irqrestore(&d40c->lock, flags);
1691 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1692 struct scatterlist *sgl_dst,
1693 struct scatterlist *sgl_src,
1694 unsigned int sgl_len,
1695 unsigned long dma_flags)
1698 struct d40_desc *d40d;
1699 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1701 unsigned long flags;
1703 if (d40c->phy_chan == NULL) {
1704 dev_err(&d40c->chan.dev->device,
1705 "[%s] Unallocated channel.\n", __func__);
1706 return ERR_PTR(-EINVAL);
1709 spin_lock_irqsave(&d40c->lock, flags);
1710 d40d = d40_desc_get(d40c);
1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1716 d40c->dma_cfg.src_info.data_width,
1717 d40c->dma_cfg.dst_info.data_width);
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1724 d40d->lli_current = 0;
1725 d40d->txd.flags = dma_flags;
1727 if (d40c->log_num != D40_PHY_CHAN) {
1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1730 dev_err(&d40c->chan.dev->device,
1731 "[%s] Out of memory\n", __func__);
1735 (void) d40_log_sg_to_lli(sgl_src,
1738 d40c->log_def.lcsp1,
1739 d40c->dma_cfg.src_info.data_width,
1740 d40c->dma_cfg.dst_info.data_width);
1742 (void) d40_log_sg_to_lli(sgl_dst,
1745 d40c->log_def.lcsp3,
1746 d40c->dma_cfg.dst_info.data_width,
1747 d40c->dma_cfg.src_info.data_width);
1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1750 dev_err(&d40c->chan.dev->device,
1751 "[%s] Out of memory\n", __func__);
1755 res = d40_phy_sg_to_lli(sgl_src,
1759 virt_to_phys(d40d->lli_phy.src),
1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1763 d40c->dma_cfg.src_info.psize);
1768 res = d40_phy_sg_to_lli(sgl_dst,
1772 virt_to_phys(d40d->lli_phy.dst),
1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1776 d40c->dma_cfg.dst_info.psize);
1781 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1782 d40d->lli_pool.size, DMA_TO_DEVICE);
1785 dma_async_tx_descriptor_init(&d40d->txd, chan);
1787 d40d->txd.tx_submit = d40_tx_submit;
1789 spin_unlock_irqrestore(&d40c->lock, flags);
1794 d40_desc_free(d40c, d40d);
1795 spin_unlock_irqrestore(&d40c->lock, flags);
1798 EXPORT_SYMBOL(stedma40_memcpy_sg);
1800 bool stedma40_filter(struct dma_chan *chan, void *data)
1802 struct stedma40_chan_cfg *info = data;
1803 struct d40_chan *d40c =
1804 container_of(chan, struct d40_chan, chan);
1808 err = d40_validate_conf(d40c, info);
1810 d40c->dma_cfg = *info;
1812 err = d40_config_memcpy(d40c);
1815 d40c->configured = true;
1819 EXPORT_SYMBOL(stedma40_filter);
1821 /* DMA ENGINE functions */
1822 static int d40_alloc_chan_resources(struct dma_chan *chan)
1825 unsigned long flags;
1826 struct d40_chan *d40c =
1827 container_of(chan, struct d40_chan, chan);
1829 spin_lock_irqsave(&d40c->lock, flags);
1831 d40c->completed = chan->cookie = 1;
1833 /* If no dma configuration is set use default configuration (memcpy) */
1834 if (!d40c->configured) {
1835 err = d40_config_memcpy(d40c);
1837 dev_err(&d40c->chan.dev->device,
1838 "[%s] Failed to configure memcpy channel\n",
1843 is_free_phy = (d40c->phy_chan == NULL);
1845 err = d40_allocate_channel(d40c);
1847 dev_err(&d40c->chan.dev->device,
1848 "[%s] Failed to allocate channel\n", __func__);
1852 /* Fill in basic CFG register values */
1853 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1854 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1856 if (d40c->log_num != D40_PHY_CHAN) {
1857 d40_log_cfg(&d40c->dma_cfg,
1858 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1860 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1861 d40c->lcpa = d40c->base->lcpa_base +
1862 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1864 d40c->lcpa = d40c->base->lcpa_base +
1865 d40c->dma_cfg.dst_dev_type *
1866 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1870 * Only write channel configuration to the DMA if the physical
1871 * resource is free. In case of multiple logical channels
1872 * on the same physical resource, only the first write is necessary.
1875 d40_config_write(d40c);
1877 spin_unlock_irqrestore(&d40c->lock, flags);
1881 static void d40_free_chan_resources(struct dma_chan *chan)
1883 struct d40_chan *d40c =
1884 container_of(chan, struct d40_chan, chan);
1886 unsigned long flags;
1888 if (d40c->phy_chan == NULL) {
1889 dev_err(&d40c->chan.dev->device,
1890 "[%s] Cannot free unallocated channel\n", __func__);
1895 spin_lock_irqsave(&d40c->lock, flags);
1897 err = d40_free_dma(d40c);
1900 dev_err(&d40c->chan.dev->device,
1901 "[%s] Failed to free channel\n", __func__);
1902 spin_unlock_irqrestore(&d40c->lock, flags);
1905 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1909 unsigned long dma_flags)
1911 struct d40_desc *d40d;
1912 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1914 unsigned long flags;
1916 if (d40c->phy_chan == NULL) {
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Channel is not allocated.\n", __func__);
1919 return ERR_PTR(-EINVAL);
1922 spin_lock_irqsave(&d40c->lock, flags);
1923 d40d = d40_desc_get(d40c);
1926 dev_err(&d40c->chan.dev->device,
1927 "[%s] Descriptor is NULL\n", __func__);
1931 d40d->txd.flags = dma_flags;
1932 d40d->lli_len = d40_size_2_dmalen(size,
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1942 dma_async_tx_descriptor_init(&d40d->txd, chan);
1944 d40d->txd.tx_submit = d40_tx_submit;
1946 if (d40c->log_num != D40_PHY_CHAN) {
1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__);
1953 d40d->lli_current = 0;
1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1958 d40c->log_def.lcsp1,
1959 d40c->dma_cfg.src_info.data_width,
1960 d40c->dma_cfg.dst_info.data_width,
1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1967 d40c->log_def.lcsp3,
1968 d40c->dma_cfg.dst_info.data_width,
1969 d40c->dma_cfg.src_info.data_width,
1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1976 dev_err(&d40c->chan.dev->device,
1977 "[%s] Out of memory\n", __func__);
1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1984 d40c->dma_cfg.src_info.psize,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width,
1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1996 d40c->dma_cfg.dst_info.psize,
2000 d40c->dma_cfg.dst_info.data_width,
2001 d40c->dma_cfg.src_info.data_width,
2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2006 d40d->lli_pool.size, DMA_TO_DEVICE);
2009 spin_unlock_irqrestore(&d40c->lock, flags);
2014 d40_desc_free(d40c, d40d);
2015 spin_unlock_irqrestore(&d40c->lock, flags);
2019 static struct dma_async_tx_descriptor *
2020 d40_prep_sg(struct dma_chan *chan,
2021 struct scatterlist *dst_sg, unsigned int dst_nents,
2022 struct scatterlist *src_sg, unsigned int src_nents,
2023 unsigned long dma_flags)
2025 if (dst_nents != src_nents)
2028 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags);
2031 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
2032 struct d40_chan *d40c,
2033 struct scatterlist *sgl,
2034 unsigned int sg_len,
2035 enum dma_data_direction direction,
2036 unsigned long dma_flags)
2038 dma_addr_t dev_addr = 0;
2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
2051 dev_err(&d40c->chan.dev->device,
2052 "[%s] Out of memory\n", __func__);
2056 d40d->lli_current = 0;
2058 if (direction == DMA_FROM_DEVICE)
2059 if (d40c->runtime_addr)
2060 dev_addr = d40c->runtime_addr;
2062 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2063 else if (direction == DMA_TO_DEVICE)
2064 if (d40c->runtime_addr)
2065 dev_addr = d40c->runtime_addr;
2067 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2072 total_size = d40_log_sg_to_dev(sgl, sg_len,
2075 d40c->dma_cfg.src_info.data_width,
2076 d40c->dma_cfg.dst_info.data_width,
2086 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2087 struct d40_chan *d40c,
2088 struct scatterlist *sgl,
2089 unsigned int sgl_len,
2090 enum dma_data_direction direction,
2091 unsigned long dma_flags)
2093 dma_addr_t src_dev_addr;
2094 dma_addr_t dst_dev_addr;
2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2107 dev_err(&d40c->chan.dev->device,
2108 "[%s] Out of memory\n", __func__);
2112 d40d->lli_current = 0;
2114 if (direction == DMA_FROM_DEVICE) {
2116 if (d40c->runtime_addr)
2117 src_dev_addr = d40c->runtime_addr;
2119 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2120 } else if (direction == DMA_TO_DEVICE) {
2121 if (d40c->runtime_addr)
2122 dst_dev_addr = d40c->runtime_addr;
2124 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2129 res = d40_phy_sg_to_lli(sgl,
2133 virt_to_phys(d40d->lli_phy.src),
2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2137 d40c->dma_cfg.src_info.psize);
2141 res = d40_phy_sg_to_lli(sgl,
2145 virt_to_phys(d40d->lli_phy.dst),
2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2149 d40c->dma_cfg.dst_info.psize);
2153 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2154 d40d->lli_pool.size, DMA_TO_DEVICE);
2158 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2159 struct scatterlist *sgl,
2160 unsigned int sg_len,
2161 enum dma_data_direction direction,
2162 unsigned long dma_flags)
2164 struct d40_desc *d40d;
2165 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2167 unsigned long flags;
2170 if (d40c->phy_chan == NULL) {
2171 dev_err(&d40c->chan.dev->device,
2172 "[%s] Cannot prepare unallocated channel\n", __func__);
2173 return ERR_PTR(-EINVAL);
2176 spin_lock_irqsave(&d40c->lock, flags);
2177 d40d = d40_desc_get(d40c);
2182 if (d40c->log_num != D40_PHY_CHAN)
2183 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2184 direction, dma_flags);
2186 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2187 direction, dma_flags);
2189 dev_err(&d40c->chan.dev->device,
2190 "[%s] Failed to prepare %s slave sg job: %d\n",
2192 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2196 d40d->txd.flags = dma_flags;
2198 dma_async_tx_descriptor_init(&d40d->txd, chan);
2200 d40d->txd.tx_submit = d40_tx_submit;
2202 spin_unlock_irqrestore(&d40c->lock, flags);
2207 d40_desc_free(d40c, d40d);
2208 spin_unlock_irqrestore(&d40c->lock, flags);
2212 static enum dma_status d40_tx_status(struct dma_chan *chan,
2213 dma_cookie_t cookie,
2214 struct dma_tx_state *txstate)
2216 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2217 dma_cookie_t last_used;
2218 dma_cookie_t last_complete;
2221 if (d40c->phy_chan == NULL) {
2222 dev_err(&d40c->chan.dev->device,
2223 "[%s] Cannot read status of unallocated channel\n",
2228 last_complete = d40c->completed;
2229 last_used = chan->cookie;
2231 if (d40_is_paused(d40c))
2234 ret = dma_async_is_complete(cookie, last_complete, last_used);
2236 dma_set_tx_state(txstate, last_complete, last_used,
2237 stedma40_residue(chan));
2242 static void d40_issue_pending(struct dma_chan *chan)
2244 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2245 unsigned long flags;
2247 if (d40c->phy_chan == NULL) {
2248 dev_err(&d40c->chan.dev->device,
2249 "[%s] Channel is not allocated!\n", __func__);
2253 spin_lock_irqsave(&d40c->lock, flags);
2255 /* Busy means that pending jobs are already being processed */
2257 (void) d40_queue_start(d40c);
2259 spin_unlock_irqrestore(&d40c->lock, flags);
2262 /* Runtime reconfiguration extension */
2263 static void d40_set_runtime_config(struct dma_chan *chan,
2264 struct dma_slave_config *config)
2266 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2267 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2268 enum dma_slave_buswidth config_addr_width;
2269 dma_addr_t config_addr;
2270 u32 config_maxburst;
2271 enum stedma40_periph_data_width addr_width;
2274 if (config->direction == DMA_FROM_DEVICE) {
2275 dma_addr_t dev_addr_rx =
2276 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2278 config_addr = config->src_addr;
2280 dev_dbg(d40c->base->dev,
2281 "channel has a pre-wired RX address %08x "
2282 "overriding with %08x\n",
2283 dev_addr_rx, config_addr);
2284 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2285 dev_dbg(d40c->base->dev,
2286 "channel was not configured for peripheral "
2287 "to memory transfer (%d) overriding\n",
2289 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2291 config_addr_width = config->src_addr_width;
2292 config_maxburst = config->src_maxburst;
2294 } else if (config->direction == DMA_TO_DEVICE) {
2295 dma_addr_t dev_addr_tx =
2296 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2298 config_addr = config->dst_addr;
2300 dev_dbg(d40c->base->dev,
2301 "channel has a pre-wired TX address %08x "
2302 "overriding with %08x\n",
2303 dev_addr_tx, config_addr);
2304 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2305 dev_dbg(d40c->base->dev,
2306 "channel was not configured for memory "
2307 "to peripheral transfer (%d) overriding\n",
2309 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2311 config_addr_width = config->dst_addr_width;
2312 config_maxburst = config->dst_maxburst;
2315 dev_err(d40c->base->dev,
2316 "unrecognized channel direction %d\n",
2321 switch (config_addr_width) {
2322 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2323 addr_width = STEDMA40_BYTE_WIDTH;
2325 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2326 addr_width = STEDMA40_HALFWORD_WIDTH;
2328 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2329 addr_width = STEDMA40_WORD_WIDTH;
2331 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2332 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2335 dev_err(d40c->base->dev,
2336 "illegal peripheral address width "
2338 config->src_addr_width);
2342 if (d40c->log_num != D40_PHY_CHAN) {
2343 if (config_maxburst >= 16)
2344 psize = STEDMA40_PSIZE_LOG_16;
2345 else if (config_maxburst >= 8)
2346 psize = STEDMA40_PSIZE_LOG_8;
2347 else if (config_maxburst >= 4)
2348 psize = STEDMA40_PSIZE_LOG_4;
2350 psize = STEDMA40_PSIZE_LOG_1;
2352 if (config_maxburst >= 16)
2353 psize = STEDMA40_PSIZE_PHY_16;
2354 else if (config_maxburst >= 8)
2355 psize = STEDMA40_PSIZE_PHY_8;
2356 else if (config_maxburst >= 4)
2357 psize = STEDMA40_PSIZE_PHY_4;
2358 else if (config_maxburst >= 2)
2359 psize = STEDMA40_PSIZE_PHY_2;
2361 psize = STEDMA40_PSIZE_PHY_1;
2364 /* Set up all the endpoint configs */
2365 cfg->src_info.data_width = addr_width;
2366 cfg->src_info.psize = psize;
2367 cfg->src_info.big_endian = false;
2368 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2369 cfg->dst_info.data_width = addr_width;
2370 cfg->dst_info.psize = psize;
2371 cfg->dst_info.big_endian = false;
2372 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2374 /* Fill in register values */
2375 if (d40c->log_num != D40_PHY_CHAN)
2376 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2378 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2379 &d40c->dst_def_cfg, false);
2381 /* These settings will take precedence later */
2382 d40c->runtime_addr = config_addr;
2383 d40c->runtime_direction = config->direction;
2384 dev_dbg(d40c->base->dev,
2385 "configured channel %s for %s, data width %d, "
2386 "maxburst %d bytes, LE, no flow control\n",
2387 dma_chan_name(chan),
2388 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2393 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2396 unsigned long flags;
2397 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2399 if (d40c->phy_chan == NULL) {
2400 dev_err(&d40c->chan.dev->device,
2401 "[%s] Channel is not allocated!\n", __func__);
2406 case DMA_TERMINATE_ALL:
2407 spin_lock_irqsave(&d40c->lock, flags);
2409 spin_unlock_irqrestore(&d40c->lock, flags);
2412 return d40_pause(chan);
2414 return d40_resume(chan);
2415 case DMA_SLAVE_CONFIG:
2416 d40_set_runtime_config(chan,
2417 (struct dma_slave_config *) arg);
2423 /* Other commands are unimplemented */
2427 /* Initialization functions */
2429 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2430 struct d40_chan *chans, int offset,
2434 struct d40_chan *d40c;
2436 INIT_LIST_HEAD(&dma->channels);
2438 for (i = offset; i < offset + num_chans; i++) {
2441 d40c->chan.device = dma;
2443 spin_lock_init(&d40c->lock);
2445 d40c->log_num = D40_PHY_CHAN;
2447 INIT_LIST_HEAD(&d40c->active);
2448 INIT_LIST_HEAD(&d40c->queue);
2449 INIT_LIST_HEAD(&d40c->client);
2451 tasklet_init(&d40c->tasklet, dma_tasklet,
2452 (unsigned long) d40c);
2454 list_add_tail(&d40c->chan.device_node,
2459 static int __init d40_dmaengine_init(struct d40_base *base,
2460 int num_reserved_chans)
2464 d40_chan_init(base, &base->dma_slave, base->log_chans,
2465 0, base->num_log_chans);
2467 dma_cap_zero(base->dma_slave.cap_mask);
2468 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2470 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2471 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2472 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2473 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2474 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2475 base->dma_slave.device_tx_status = d40_tx_status;
2476 base->dma_slave.device_issue_pending = d40_issue_pending;
2477 base->dma_slave.device_control = d40_control;
2478 base->dma_slave.dev = base->dev;
2480 err = dma_async_device_register(&base->dma_slave);
2484 "[%s] Failed to register slave channels\n",
2489 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2490 base->num_log_chans, base->plat_data->memcpy_len);
2492 dma_cap_zero(base->dma_memcpy.cap_mask);
2493 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2494 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2496 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2497 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2498 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2499 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2500 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2501 base->dma_memcpy.device_tx_status = d40_tx_status;
2502 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2503 base->dma_memcpy.device_control = d40_control;
2504 base->dma_memcpy.dev = base->dev;
2506 * This controller can only access address at even
2507 * 32bit boundaries, i.e. 2^2
2509 base->dma_memcpy.copy_align = 2;
2511 err = dma_async_device_register(&base->dma_memcpy);
2515 "[%s] Failed to regsiter memcpy only channels\n",
2520 d40_chan_init(base, &base->dma_both, base->phy_chans,
2521 0, num_reserved_chans);
2523 dma_cap_zero(base->dma_both.cap_mask);
2524 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2525 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2526 dma_cap_set(DMA_SG, base->dma_slave.cap_mask);
2528 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2529 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2530 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2531 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2532 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2533 base->dma_both.device_tx_status = d40_tx_status;
2534 base->dma_both.device_issue_pending = d40_issue_pending;
2535 base->dma_both.device_control = d40_control;
2536 base->dma_both.dev = base->dev;
2537 base->dma_both.copy_align = 2;
2538 err = dma_async_device_register(&base->dma_both);
2542 "[%s] Failed to register logical and physical capable channels\n",
2548 dma_async_device_unregister(&base->dma_memcpy);
2550 dma_async_device_unregister(&base->dma_slave);
2555 /* Initialization functions. */
2557 static int __init d40_phy_res_init(struct d40_base *base)
2560 int num_phy_chans_avail = 0;
2562 int odd_even_bit = -2;
2564 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2565 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2567 for (i = 0; i < base->num_phy_chans; i++) {
2568 base->phy_res[i].num = i;
2569 odd_even_bit += 2 * ((i % 2) == 0);
2570 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2571 /* Mark security only channels as occupied */
2572 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2573 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2575 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2576 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2577 num_phy_chans_avail++;
2579 spin_lock_init(&base->phy_res[i].lock);
2582 /* Mark disabled channels as occupied */
2583 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2584 int chan = base->plat_data->disabled_channels[i];
2586 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2587 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
2588 num_phy_chans_avail--;
2591 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2592 num_phy_chans_avail, base->num_phy_chans);
2594 /* Verify settings extended vs standard */
2595 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2597 for (i = 0; i < base->num_phy_chans; i++) {
2599 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2600 (val[0] & 0x3) != 1)
2602 "[%s] INFO: channel %d is misconfigured (%d)\n",
2603 __func__, i, val[0] & 0x3);
2605 val[0] = val[0] >> 2;
2608 return num_phy_chans_avail;
2611 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2613 static const struct d40_reg_val dma_id_regs[] = {
2615 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2616 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2618 * D40_DREG_PERIPHID2 Depends on HW revision:
2619 * MOP500/HREF ED has 0x0008,
2621 * HREF V1 has 0x0028
2623 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2626 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2627 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2628 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2629 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2631 struct stedma40_platform_data *plat_data;
2632 struct clk *clk = NULL;
2633 void __iomem *virtbase = NULL;
2634 struct resource *res = NULL;
2635 struct d40_base *base = NULL;
2636 int num_log_chans = 0;
2642 clk = clk_get(&pdev->dev, NULL);
2645 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2652 /* Get IO for DMAC base address */
2653 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2657 if (request_mem_region(res->start, resource_size(res),
2658 D40_NAME " I/O base") == NULL)
2661 virtbase = ioremap(res->start, resource_size(res));
2665 /* HW version check */
2666 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2667 if (dma_id_regs[i].val !=
2668 readl(virtbase + dma_id_regs[i].reg)) {
2670 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2674 readl(virtbase + dma_id_regs[i].reg));
2679 /* Get silicon revision and designer */
2680 val = readl(virtbase + D40_DREG_PERIPHID2);
2682 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2685 "[%s] Unknown designer! Got %x wanted %x\n",
2686 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2691 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2692 D40_DREG_PERIPHID2_REV_POS;
2694 /* The number of physical channels on this HW */
2695 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2697 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2700 plat_data = pdev->dev.platform_data;
2702 /* Count the number of logical channels in use */
2703 for (i = 0; i < plat_data->dev_len; i++)
2704 if (plat_data->dev_rx[i] != 0)
2707 for (i = 0; i < plat_data->dev_len; i++)
2708 if (plat_data->dev_tx[i] != 0)
2711 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2712 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2713 sizeof(struct d40_chan), GFP_KERNEL);
2716 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2722 base->num_phy_chans = num_phy_chans;
2723 base->num_log_chans = num_log_chans;
2724 base->phy_start = res->start;
2725 base->phy_size = resource_size(res);
2726 base->virtbase = virtbase;
2727 base->plat_data = plat_data;
2728 base->dev = &pdev->dev;
2729 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2730 base->log_chans = &base->phy_chans[num_phy_chans];
2732 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2737 base->lookup_phy_chans = kzalloc(num_phy_chans *
2738 sizeof(struct d40_chan *),
2740 if (!base->lookup_phy_chans)
2743 if (num_log_chans + plat_data->memcpy_len) {
2745 * The max number of logical channels are event lines for all
2746 * src devices and dst devices
2748 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2749 sizeof(struct d40_chan *),
2751 if (!base->lookup_log_chans)
2755 base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2756 sizeof(struct d40_desc *) *
2757 D40_LCLA_LINK_PER_EVENT_GRP,
2759 if (!base->lcla_pool.alloc_map)
2762 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2763 0, SLAB_HWCACHE_ALIGN,
2765 if (base->desc_slab == NULL)
2778 release_mem_region(res->start,
2779 resource_size(res));
2784 kfree(base->lcla_pool.alloc_map);
2785 kfree(base->lookup_log_chans);
2786 kfree(base->lookup_phy_chans);
2787 kfree(base->phy_res);
2794 static void __init d40_hw_init(struct d40_base *base)
2797 static const struct d40_reg_val dma_init_reg[] = {
2798 /* Clock every part of the DMA block from start */
2799 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2801 /* Interrupts on all logical channels */
2802 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2803 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2804 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2805 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2806 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2807 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2808 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2809 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2810 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2811 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2812 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2813 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2816 u32 prmseo[2] = {0, 0};
2817 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2821 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2822 writel(dma_init_reg[i].val,
2823 base->virtbase + dma_init_reg[i].reg);
2825 /* Configure all our dma channels to default settings */
2826 for (i = 0; i < base->num_phy_chans; i++) {
2828 activeo[i % 2] = activeo[i % 2] << 2;
2830 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2832 activeo[i % 2] |= 3;
2836 /* Enable interrupt # */
2837 pcmis = (pcmis << 1) | 1;
2839 /* Clear interrupt # */
2840 pcicr = (pcicr << 1) | 1;
2842 /* Set channel to physical mode */
2843 prmseo[i % 2] = prmseo[i % 2] << 2;
2848 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2849 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2850 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2851 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2853 /* Write which interrupt to enable */
2854 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2856 /* Write which interrupt to clear */
2857 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2861 static int __init d40_lcla_allocate(struct d40_base *base)
2863 unsigned long *page_list;
2868 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2869 * To full fill this hardware requirement without wasting 256 kb
2870 * we allocate pages until we get an aligned one.
2872 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2880 /* Calculating how many pages that are required */
2881 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2883 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2884 page_list[i] = __get_free_pages(GFP_KERNEL,
2885 base->lcla_pool.pages);
2886 if (!page_list[i]) {
2889 "[%s] Failed to allocate %d pages.\n",
2890 __func__, base->lcla_pool.pages);
2892 for (j = 0; j < i; j++)
2893 free_pages(page_list[j], base->lcla_pool.pages);
2897 if ((virt_to_phys((void *)page_list[i]) &
2898 (LCLA_ALIGNMENT - 1)) == 0)
2902 for (j = 0; j < i; j++)
2903 free_pages(page_list[j], base->lcla_pool.pages);
2905 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2906 base->lcla_pool.base = (void *)page_list[i];
2909 * After many attempts and no succees with finding the correct
2910 * alignment, try with allocating a big buffer.
2913 "[%s] Failed to get %d pages @ 18 bit align.\n",
2914 __func__, base->lcla_pool.pages);
2915 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2916 base->num_phy_chans +
2919 if (!base->lcla_pool.base_unaligned) {
2924 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2928 writel(virt_to_phys(base->lcla_pool.base),
2929 base->virtbase + D40_DREG_LCLA);
2935 static int __init d40_probe(struct platform_device *pdev)
2939 struct d40_base *base;
2940 struct resource *res = NULL;
2941 int num_reserved_chans;
2944 base = d40_hw_detect_init(pdev);
2949 num_reserved_chans = d40_phy_res_init(base);
2951 platform_set_drvdata(pdev, base);
2953 spin_lock_init(&base->interrupt_lock);
2954 spin_lock_init(&base->execmd_lock);
2956 /* Get IO for logical channel parameter address */
2957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2961 "[%s] No \"lcpa\" memory resource\n",
2965 base->lcpa_size = resource_size(res);
2966 base->phy_lcpa = res->start;
2968 if (request_mem_region(res->start, resource_size(res),
2969 D40_NAME " I/O lcpa") == NULL) {
2972 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2973 __func__, res->start, res->end);
2977 /* We make use of ESRAM memory for this. */
2978 val = readl(base->virtbase + D40_DREG_LCPA);
2979 if (res->start != val && val != 0) {
2980 dev_warn(&pdev->dev,
2981 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2982 __func__, val, res->start);
2984 writel(res->start, base->virtbase + D40_DREG_LCPA);
2986 base->lcpa_base = ioremap(res->start, resource_size(res));
2987 if (!base->lcpa_base) {
2990 "[%s] Failed to ioremap LCPA region\n",
2995 ret = d40_lcla_allocate(base);
2997 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
3002 spin_lock_init(&base->lcla_pool.lock);
3004 base->irq = platform_get_irq(pdev, 0);
3006 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3009 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
3013 err = d40_dmaengine_init(base, num_reserved_chans);
3019 dev_info(base->dev, "initialized\n");
3024 if (base->desc_slab)
3025 kmem_cache_destroy(base->desc_slab);
3027 iounmap(base->virtbase);
3028 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3029 free_pages((unsigned long)base->lcla_pool.base,
3030 base->lcla_pool.pages);
3032 kfree(base->lcla_pool.base_unaligned);
3035 release_mem_region(base->phy_lcpa,
3037 if (base->phy_start)
3038 release_mem_region(base->phy_start,
3041 clk_disable(base->clk);
3045 kfree(base->lcla_pool.alloc_map);
3046 kfree(base->lookup_log_chans);
3047 kfree(base->lookup_phy_chans);
3048 kfree(base->phy_res);
3052 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
3056 static struct platform_driver d40_driver = {
3058 .owner = THIS_MODULE,
3063 int __init stedma40_init(void)
3065 return platform_driver_probe(&d40_driver, d40_probe);
3067 arch_initcall(stedma40_init);