]>
Commit | Line | Data |
---|---|---|
47e20577 MS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Actions Semi Owl SoCs DMA driver | |
4 | // | |
5 | // Copyright (c) 2014 Actions Semi Inc. | |
6 | // Author: David Liu <[email protected]> | |
7 | // | |
8 | // Copyright (c) 2018 Linaro Ltd. | |
9 | // Author: Manivannan Sadhasivam <[email protected]> | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/of_device.h> | |
d64e1b3f | 24 | #include <linux/of_dma.h> |
47e20577 MS |
25 | #include <linux/slab.h> |
26 | #include "virt-dma.h" | |
27 | ||
28 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff | |
29 | ||
30 | /* Global DMA Controller Registers */ | |
31 | #define OWL_DMA_IRQ_PD0 0x00 | |
32 | #define OWL_DMA_IRQ_PD1 0x04 | |
33 | #define OWL_DMA_IRQ_PD2 0x08 | |
34 | #define OWL_DMA_IRQ_PD3 0x0C | |
35 | #define OWL_DMA_IRQ_EN0 0x10 | |
36 | #define OWL_DMA_IRQ_EN1 0x14 | |
37 | #define OWL_DMA_IRQ_EN2 0x18 | |
38 | #define OWL_DMA_IRQ_EN3 0x1C | |
39 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 | |
40 | #define OWL_DMA_NIC_QOS 0x24 | |
41 | #define OWL_DMA_DBGSEL 0x28 | |
42 | #define OWL_DMA_IDLE_STAT 0x2C | |
43 | ||
44 | /* Channel Registers */ | |
45 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) | |
46 | #define OWL_DMAX_MODE 0x00 | |
47 | #define OWL_DMAX_SOURCE 0x04 | |
48 | #define OWL_DMAX_DESTINATION 0x08 | |
49 | #define OWL_DMAX_FRAME_LEN 0x0C | |
50 | #define OWL_DMAX_FRAME_CNT 0x10 | |
51 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 | |
52 | #define OWL_DMAX_REMAIN_CNT 0x18 | |
53 | #define OWL_DMAX_SOURCE_STRIDE 0x1C | |
54 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 | |
55 | #define OWL_DMAX_START 0x24 | |
56 | #define OWL_DMAX_PAUSE 0x28 | |
57 | #define OWL_DMAX_CHAINED_CTL 0x2C | |
58 | #define OWL_DMAX_CONSTANT 0x30 | |
59 | #define OWL_DMAX_LINKLIST_CTL 0x34 | |
60 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 | |
61 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C | |
62 | #define OWL_DMAX_INT_CTL 0x40 | |
63 | #define OWL_DMAX_INT_STATUS 0x44 | |
64 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 | |
65 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C | |
66 | ||
67 | /* OWL_DMAX_MODE Bits */ | |
68 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) | |
69 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) | |
70 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) | |
71 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) | |
72 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) | |
73 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) | |
74 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) | |
75 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) | |
76 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) | |
77 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) | |
78 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) | |
79 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) | |
80 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) | |
81 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) | |
82 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) | |
83 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) | |
84 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) | |
85 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) | |
86 | #define OWL_DMA_MODE_CB BIT(23) | |
87 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) | |
88 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) | |
89 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) | |
90 | #define OWL_DMA_MODE_CFE BIT(29) | |
91 | #define OWL_DMA_MODE_LME BIT(30) | |
92 | #define OWL_DMA_MODE_CME BIT(31) | |
93 | ||
94 | /* OWL_DMAX_LINKLIST_CTL Bits */ | |
95 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) | |
96 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) | |
97 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) | |
98 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) | |
99 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) | |
100 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) | |
101 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) | |
102 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) | |
103 | #define OWL_DMA_LLC_SUSPEND BIT(16) | |
104 | ||
105 | /* OWL_DMAX_INT_CTL Bits */ | |
106 | #define OWL_DMA_INTCTL_BLOCK BIT(0) | |
107 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) | |
108 | #define OWL_DMA_INTCTL_FRAME BIT(2) | |
109 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) | |
110 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) | |
111 | ||
112 | /* OWL_DMAX_INT_STATUS Bits */ | |
113 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) | |
114 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) | |
115 | #define OWL_DMA_INTSTAT_FRAME BIT(2) | |
116 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) | |
117 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) | |
118 | ||
119 | /* Pack shift and newshift in a single word */ | |
120 | #define BIT_FIELD(val, width, shift, newshift) \ | |
121 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) | |
122 | ||
57937fae AST |
123 | /* Frame count value is fixed as 1 */ |
124 | #define FCNT_VAL 0x1 | |
125 | ||
47e20577 | 126 | /** |
57937fae AST |
127 | * owl_dmadesc_offsets - Describe DMA descriptor, hardware link |
128 | * list for dma transfer | |
129 | * @OWL_DMADESC_NEXT_LLI: physical address of the next link list | |
130 | * @OWL_DMADESC_SADDR: source physical address | |
131 | * @OWL_DMADESC_DADDR: destination physical address | |
132 | * @OWL_DMADESC_FLEN: frame length | |
133 | * @OWL_DMADESC_SRC_STRIDE: source stride | |
134 | * @OWL_DMADESC_DST_STRIDE: destination stride | |
135 | * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config | |
136 | * @OWL_DMADESC_CTRLB: interrupt config | |
137 | * @OWL_DMADESC_CONST_NUM: data for constant fill | |
47e20577 | 138 | */ |
57937fae AST |
139 | enum owl_dmadesc_offsets { |
140 | OWL_DMADESC_NEXT_LLI = 0, | |
141 | OWL_DMADESC_SADDR, | |
142 | OWL_DMADESC_DADDR, | |
143 | OWL_DMADESC_FLEN, | |
144 | OWL_DMADESC_SRC_STRIDE, | |
145 | OWL_DMADESC_DST_STRIDE, | |
146 | OWL_DMADESC_CTRLA, | |
147 | OWL_DMADESC_CTRLB, | |
148 | OWL_DMADESC_CONST_NUM, | |
149 | OWL_DMADESC_SIZE | |
47e20577 MS |
150 | }; |
151 | ||
152 | /** | |
153 | * struct owl_dma_lli - Link list for dma transfer | |
154 | * @hw: hardware link list | |
155 | * @phys: physical address of hardware link list | |
156 | * @node: node for txd's lli_list | |
157 | */ | |
158 | struct owl_dma_lli { | |
57937fae | 159 | u32 hw[OWL_DMADESC_SIZE]; |
47e20577 MS |
160 | dma_addr_t phys; |
161 | struct list_head node; | |
162 | }; | |
163 | ||
164 | /** | |
165 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor | |
166 | * @vd: virtual DMA descriptor | |
167 | * @lli_list: link list of lli nodes | |
a3e40316 | 168 | * @cyclic: flag to indicate cyclic transfers |
47e20577 MS |
169 | */ |
170 | struct owl_dma_txd { | |
171 | struct virt_dma_desc vd; | |
172 | struct list_head lli_list; | |
d64e1b3f | 173 | bool cyclic; |
47e20577 MS |
174 | }; |
175 | ||
176 | /** | |
177 | * struct owl_dma_pchan - Holder for the physical channels | |
178 | * @id: physical index to this channel | |
179 | * @base: virtual memory base for the dma channel | |
180 | * @vchan: the virtual channel currently being served by this physical channel | |
47e20577 MS |
181 | */ |
182 | struct owl_dma_pchan { | |
183 | u32 id; | |
184 | void __iomem *base; | |
185 | struct owl_dma_vchan *vchan; | |
47e20577 MS |
186 | }; |
187 | ||
188 | /** | |
189 | * struct owl_dma_pchan - Wrapper for DMA ENGINE channel | |
190 | * @vc: wrappped virtual channel | |
191 | * @pchan: the physical channel utilized by this channel | |
192 | * @txd: active transaction on this channel | |
a3e40316 MS |
193 | * @cfg: slave configuration for this channel |
194 | * @drq: physical DMA request ID for this channel | |
47e20577 MS |
195 | */ |
196 | struct owl_dma_vchan { | |
197 | struct virt_dma_chan vc; | |
198 | struct owl_dma_pchan *pchan; | |
199 | struct owl_dma_txd *txd; | |
d64e1b3f MS |
200 | struct dma_slave_config cfg; |
201 | u8 drq; | |
47e20577 MS |
202 | }; |
203 | ||
204 | /** | |
205 | * struct owl_dma - Holder for the Owl DMA controller | |
206 | * @dma: dma engine for this instance | |
207 | * @base: virtual memory base for the DMA controller | |
208 | * @clk: clock for the DMA controller | |
209 | * @lock: a lock to use when change DMA controller global register | |
210 | * @lli_pool: a pool for the LLI descriptors | |
a3e40316 | 211 | * @irq: interrupt ID for the DMA controller |
47e20577 MS |
212 | * @nr_pchans: the number of physical channels |
213 | * @pchans: array of data for the physical channels | |
214 | * @nr_vchans: the number of physical channels | |
215 | * @vchans: array of data for the physical channels | |
216 | */ | |
217 | struct owl_dma { | |
218 | struct dma_device dma; | |
219 | void __iomem *base; | |
220 | struct clk *clk; | |
221 | spinlock_t lock; | |
222 | struct dma_pool *lli_pool; | |
223 | int irq; | |
224 | ||
225 | unsigned int nr_pchans; | |
226 | struct owl_dma_pchan *pchans; | |
227 | ||
228 | unsigned int nr_vchans; | |
229 | struct owl_dma_vchan *vchans; | |
230 | }; | |
231 | ||
232 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, | |
233 | u32 val, bool state) | |
234 | { | |
235 | u32 regval; | |
236 | ||
237 | regval = readl(pchan->base + reg); | |
238 | ||
239 | if (state) | |
240 | regval |= val; | |
241 | else | |
242 | regval &= ~val; | |
243 | ||
244 | writel(val, pchan->base + reg); | |
245 | } | |
246 | ||
247 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) | |
248 | { | |
249 | writel(data, pchan->base + reg); | |
250 | } | |
251 | ||
252 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) | |
253 | { | |
254 | return readl(pchan->base + reg); | |
255 | } | |
256 | ||
257 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) | |
258 | { | |
259 | u32 regval; | |
260 | ||
261 | regval = readl(od->base + reg); | |
262 | ||
263 | if (state) | |
264 | regval |= val; | |
265 | else | |
266 | regval &= ~val; | |
267 | ||
268 | writel(val, od->base + reg); | |
269 | } | |
270 | ||
271 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) | |
272 | { | |
273 | writel(data, od->base + reg); | |
274 | } | |
275 | ||
276 | static u32 dma_readl(struct owl_dma *od, u32 reg) | |
277 | { | |
278 | return readl(od->base + reg); | |
279 | } | |
280 | ||
281 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) | |
282 | { | |
283 | return container_of(dd, struct owl_dma, dma); | |
284 | } | |
285 | ||
286 | static struct device *chan2dev(struct dma_chan *chan) | |
287 | { | |
288 | return &chan->dev->device; | |
289 | } | |
290 | ||
291 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) | |
292 | { | |
293 | return container_of(chan, struct owl_dma_vchan, vc.chan); | |
294 | } | |
295 | ||
296 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) | |
297 | { | |
298 | return container_of(tx, struct owl_dma_txd, vd.tx); | |
299 | } | |
300 | ||
301 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) | |
302 | { | |
303 | u32 ctl; | |
304 | ||
305 | ctl = BIT_FIELD(mode, 4, 28, 28) | | |
306 | BIT_FIELD(mode, 8, 16, 20) | | |
307 | BIT_FIELD(mode, 4, 8, 16) | | |
308 | BIT_FIELD(mode, 6, 0, 10) | | |
309 | BIT_FIELD(llc_ctl, 2, 10, 8) | | |
310 | BIT_FIELD(llc_ctl, 2, 8, 6); | |
311 | ||
312 | return ctl; | |
313 | } | |
314 | ||
315 | static inline u32 llc_hw_ctrlb(u32 int_ctl) | |
316 | { | |
317 | u32 ctl; | |
318 | ||
319 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); | |
320 | ||
321 | return ctl; | |
322 | } | |
323 | ||
57937fae AST |
324 | static u32 llc_hw_flen(struct owl_dma_lli *lli) |
325 | { | |
326 | return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); | |
327 | } | |
328 | ||
47e20577 MS |
329 | static void owl_dma_free_lli(struct owl_dma *od, |
330 | struct owl_dma_lli *lli) | |
331 | { | |
332 | list_del(&lli->node); | |
333 | dma_pool_free(od->lli_pool, lli, lli->phys); | |
334 | } | |
335 | ||
336 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) | |
337 | { | |
338 | struct owl_dma_lli *lli; | |
339 | dma_addr_t phys; | |
340 | ||
341 | lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); | |
342 | if (!lli) | |
343 | return NULL; | |
344 | ||
345 | INIT_LIST_HEAD(&lli->node); | |
346 | lli->phys = phys; | |
347 | ||
348 | return lli; | |
349 | } | |
350 | ||
351 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, | |
352 | struct owl_dma_lli *prev, | |
d64e1b3f MS |
353 | struct owl_dma_lli *next, |
354 | bool is_cyclic) | |
47e20577 | 355 | { |
d64e1b3f MS |
356 | if (!is_cyclic) |
357 | list_add_tail(&next->node, &txd->lli_list); | |
47e20577 MS |
358 | |
359 | if (prev) { | |
57937fae AST |
360 | prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; |
361 | prev->hw[OWL_DMADESC_CTRLA] |= | |
362 | llc_hw_ctrla(OWL_DMA_MODE_LME, 0); | |
47e20577 MS |
363 | } |
364 | ||
365 | return next; | |
366 | } | |
367 | ||
368 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, | |
369 | struct owl_dma_lli *lli, | |
370 | dma_addr_t src, dma_addr_t dst, | |
d64e1b3f MS |
371 | u32 len, enum dma_transfer_direction dir, |
372 | struct dma_slave_config *sconfig, | |
373 | bool is_cyclic) | |
47e20577 | 374 | { |
57937fae | 375 | u32 mode, ctrlb; |
47e20577 MS |
376 | |
377 | mode = OWL_DMA_MODE_PW(0); | |
378 | ||
379 | switch (dir) { | |
380 | case DMA_MEM_TO_MEM: | |
381 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | | |
382 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | | |
383 | OWL_DMA_MODE_DAM_INC; | |
384 | ||
d64e1b3f MS |
385 | break; |
386 | case DMA_MEM_TO_DEV: | |
387 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
388 | | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV | |
389 | | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; | |
390 | ||
391 | /* | |
392 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
393 | * default is 32bit, select 8bit only when requested. | |
394 | */ | |
395 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
396 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
397 | ||
398 | break; | |
399 | case DMA_DEV_TO_MEM: | |
400 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
401 | | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU | |
402 | | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; | |
403 | ||
404 | /* | |
405 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
406 | * default is 32bit, select 8bit only when requested. | |
407 | */ | |
408 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
409 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
410 | ||
47e20577 MS |
411 | break; |
412 | default: | |
413 | return -EINVAL; | |
414 | } | |
415 | ||
57937fae AST |
416 | lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, |
417 | OWL_DMA_LLC_SAV_LOAD_NEXT | | |
418 | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
47e20577 | 419 | |
d64e1b3f | 420 | if (is_cyclic) |
57937fae | 421 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); |
d64e1b3f | 422 | else |
57937fae AST |
423 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); |
424 | ||
425 | lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ | |
426 | lli->hw[OWL_DMADESC_SADDR] = src; | |
427 | lli->hw[OWL_DMADESC_DADDR] = dst; | |
428 | lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; | |
429 | lli->hw[OWL_DMADESC_DST_STRIDE] = 0; | |
430 | /* | |
431 | * Word starts from offset 0xC is shared between frame length | |
432 | * (max frame length is 1MB) and frame count, where first 20 | |
433 | * bits are for frame length and rest of 12 bits are for frame | |
434 | * count. | |
435 | */ | |
436 | lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; | |
437 | lli->hw[OWL_DMADESC_CTRLB] = ctrlb; | |
47e20577 MS |
438 | |
439 | return 0; | |
440 | } | |
441 | ||
442 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, | |
443 | struct owl_dma_vchan *vchan) | |
444 | { | |
445 | struct owl_dma_pchan *pchan = NULL; | |
446 | unsigned long flags; | |
447 | int i; | |
448 | ||
449 | for (i = 0; i < od->nr_pchans; i++) { | |
450 | pchan = &od->pchans[i]; | |
451 | ||
f8f482de | 452 | spin_lock_irqsave(&od->lock, flags); |
47e20577 MS |
453 | if (!pchan->vchan) { |
454 | pchan->vchan = vchan; | |
f8f482de | 455 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
456 | break; |
457 | } | |
458 | ||
f8f482de | 459 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
460 | } |
461 | ||
462 | return pchan; | |
463 | } | |
464 | ||
465 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) | |
466 | { | |
467 | unsigned int val; | |
468 | ||
469 | val = dma_readl(od, OWL_DMA_IDLE_STAT); | |
470 | ||
471 | return !(val & (1 << pchan->id)); | |
472 | } | |
473 | ||
474 | static void owl_dma_terminate_pchan(struct owl_dma *od, | |
475 | struct owl_dma_pchan *pchan) | |
476 | { | |
477 | unsigned long flags; | |
478 | u32 irq_pd; | |
479 | ||
480 | pchan_writel(pchan, OWL_DMAX_START, 0); | |
481 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
482 | ||
483 | spin_lock_irqsave(&od->lock, flags); | |
484 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); | |
485 | ||
486 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); | |
487 | if (irq_pd & (1 << pchan->id)) { | |
488 | dev_warn(od->dma.dev, | |
489 | "terminating pchan %d that still has pending irq\n", | |
490 | pchan->id); | |
491 | dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); | |
492 | } | |
493 | ||
494 | pchan->vchan = NULL; | |
495 | ||
496 | spin_unlock_irqrestore(&od->lock, flags); | |
497 | } | |
498 | ||
d64e1b3f MS |
499 | static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) |
500 | { | |
501 | pchan_writel(pchan, 1, OWL_DMAX_PAUSE); | |
502 | } | |
503 | ||
504 | static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) | |
505 | { | |
506 | pchan_writel(pchan, 0, OWL_DMAX_PAUSE); | |
507 | } | |
508 | ||
47e20577 MS |
509 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) |
510 | { | |
511 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
512 | struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); | |
513 | struct owl_dma_pchan *pchan = vchan->pchan; | |
514 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
515 | struct owl_dma_lli *lli; | |
516 | unsigned long flags; | |
517 | u32 int_ctl; | |
518 | ||
519 | list_del(&vd->node); | |
520 | ||
521 | vchan->txd = txd; | |
522 | ||
523 | /* Wait for channel inactive */ | |
524 | while (owl_dma_pchan_busy(od, pchan)) | |
525 | cpu_relax(); | |
526 | ||
527 | lli = list_first_entry(&txd->lli_list, | |
528 | struct owl_dma_lli, node); | |
529 | ||
d64e1b3f MS |
530 | if (txd->cyclic) |
531 | int_ctl = OWL_DMA_INTCTL_BLOCK; | |
532 | else | |
533 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; | |
47e20577 MS |
534 | |
535 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); | |
536 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, | |
537 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
538 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); | |
539 | pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); | |
540 | ||
541 | /* Clear IRQ status for this pchan */ | |
542 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
543 | ||
544 | spin_lock_irqsave(&od->lock, flags); | |
545 | ||
546 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); | |
547 | ||
548 | spin_unlock_irqrestore(&od->lock, flags); | |
549 | ||
550 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); | |
551 | ||
552 | /* Start DMA transfer for this pchan */ | |
553 | pchan_writel(pchan, OWL_DMAX_START, 0x1); | |
554 | ||
555 | return 0; | |
556 | } | |
557 | ||
558 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) | |
559 | { | |
560 | /* Ensure that the physical channel is stopped */ | |
561 | owl_dma_terminate_pchan(od, vchan->pchan); | |
562 | ||
563 | vchan->pchan = NULL; | |
564 | } | |
565 | ||
566 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) | |
567 | { | |
568 | struct owl_dma *od = dev_id; | |
569 | struct owl_dma_vchan *vchan; | |
570 | struct owl_dma_pchan *pchan; | |
571 | unsigned long pending; | |
572 | int i; | |
573 | unsigned int global_irq_pending, chan_irq_pending; | |
574 | ||
575 | spin_lock(&od->lock); | |
576 | ||
577 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
578 | ||
579 | /* Clear IRQ status for each pchan */ | |
580 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
581 | pchan = &od->pchans[i]; | |
582 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
583 | } | |
584 | ||
585 | /* Clear pending IRQ */ | |
586 | dma_writel(od, OWL_DMA_IRQ_PD0, pending); | |
587 | ||
588 | /* Check missed pending IRQ */ | |
589 | for (i = 0; i < od->nr_pchans; i++) { | |
590 | pchan = &od->pchans[i]; | |
591 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & | |
592 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); | |
593 | ||
594 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ | |
595 | dma_readl(od, OWL_DMA_IRQ_PD0); | |
596 | ||
597 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
598 | ||
599 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { | |
600 | dev_dbg(od->dma.dev, | |
601 | "global and channel IRQ pending match err\n"); | |
602 | ||
603 | /* Clear IRQ status for this pchan */ | |
604 | pchan_update(pchan, OWL_DMAX_INT_STATUS, | |
605 | 0xff, false); | |
606 | ||
607 | /* Update global IRQ pending */ | |
608 | pending |= BIT(i); | |
609 | } | |
610 | } | |
611 | ||
612 | spin_unlock(&od->lock); | |
613 | ||
614 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
615 | struct owl_dma_txd *txd; | |
616 | ||
617 | pchan = &od->pchans[i]; | |
618 | ||
619 | vchan = pchan->vchan; | |
620 | if (!vchan) { | |
621 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", | |
622 | pchan->id); | |
623 | continue; | |
624 | } | |
625 | ||
626 | spin_lock(&vchan->vc.lock); | |
627 | ||
628 | txd = vchan->txd; | |
629 | if (txd) { | |
630 | vchan->txd = NULL; | |
631 | ||
632 | vchan_cookie_complete(&txd->vd); | |
633 | ||
634 | /* | |
635 | * Start the next descriptor (if any), | |
636 | * otherwise free this channel. | |
637 | */ | |
638 | if (vchan_next_desc(&vchan->vc)) | |
639 | owl_dma_start_next_txd(vchan); | |
640 | else | |
641 | owl_dma_phy_free(od, vchan); | |
642 | } | |
643 | ||
644 | spin_unlock(&vchan->vc.lock); | |
645 | } | |
646 | ||
647 | return IRQ_HANDLED; | |
648 | } | |
649 | ||
650 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) | |
651 | { | |
652 | struct owl_dma_lli *lli, *_lli; | |
653 | ||
654 | if (unlikely(!txd)) | |
655 | return; | |
656 | ||
657 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) | |
658 | owl_dma_free_lli(od, lli); | |
659 | ||
660 | kfree(txd); | |
661 | } | |
662 | ||
663 | static void owl_dma_desc_free(struct virt_dma_desc *vd) | |
664 | { | |
665 | struct owl_dma *od = to_owl_dma(vd->tx.chan->device); | |
666 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
667 | ||
668 | owl_dma_free_txd(od, txd); | |
669 | } | |
670 | ||
671 | static int owl_dma_terminate_all(struct dma_chan *chan) | |
672 | { | |
673 | struct owl_dma *od = to_owl_dma(chan->device); | |
674 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
675 | unsigned long flags; | |
676 | LIST_HEAD(head); | |
677 | ||
678 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
679 | ||
680 | if (vchan->pchan) | |
681 | owl_dma_phy_free(od, vchan); | |
682 | ||
683 | if (vchan->txd) { | |
684 | owl_dma_desc_free(&vchan->txd->vd); | |
685 | vchan->txd = NULL; | |
686 | } | |
687 | ||
688 | vchan_get_all_descriptors(&vchan->vc, &head); | |
47e20577 MS |
689 | |
690 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
691 | ||
51fe9cd2 SH |
692 | vchan_dma_desc_free_list(&vchan->vc, &head); |
693 | ||
47e20577 MS |
694 | return 0; |
695 | } | |
696 | ||
d64e1b3f MS |
697 | static int owl_dma_config(struct dma_chan *chan, |
698 | struct dma_slave_config *config) | |
699 | { | |
700 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
701 | ||
702 | /* Reject definitely invalid configurations */ | |
703 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | |
704 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
705 | return -EINVAL; | |
706 | ||
707 | memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | static int owl_dma_pause(struct dma_chan *chan) | |
713 | { | |
714 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
715 | unsigned long flags; | |
716 | ||
717 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
718 | ||
719 | owl_dma_pause_pchan(vchan->pchan); | |
720 | ||
721 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static int owl_dma_resume(struct dma_chan *chan) | |
727 | { | |
728 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
729 | unsigned long flags; | |
730 | ||
731 | if (!vchan->pchan && !vchan->txd) | |
732 | return 0; | |
733 | ||
734 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | |
735 | ||
736 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
737 | ||
738 | owl_dma_resume_pchan(vchan->pchan); | |
739 | ||
740 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
741 | ||
742 | return 0; | |
743 | } | |
744 | ||
47e20577 MS |
745 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) |
746 | { | |
747 | struct owl_dma_pchan *pchan; | |
748 | struct owl_dma_txd *txd; | |
749 | struct owl_dma_lli *lli; | |
750 | unsigned int next_lli_phy; | |
751 | size_t bytes; | |
752 | ||
753 | pchan = vchan->pchan; | |
754 | txd = vchan->txd; | |
755 | ||
756 | if (!pchan || !txd) | |
757 | return 0; | |
758 | ||
759 | /* Get remain count of current node in link list */ | |
760 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); | |
761 | ||
762 | /* Loop through the preceding nodes to get total remaining bytes */ | |
763 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { | |
764 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); | |
765 | list_for_each_entry(lli, &txd->lli_list, node) { | |
766 | /* Start from the next active node */ | |
767 | if (lli->phys == next_lli_phy) { | |
768 | list_for_each_entry(lli, &txd->lli_list, node) | |
57937fae | 769 | bytes += llc_hw_flen(lli); |
47e20577 MS |
770 | break; |
771 | } | |
772 | } | |
773 | } | |
774 | ||
775 | return bytes; | |
776 | } | |
777 | ||
778 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, | |
779 | dma_cookie_t cookie, | |
780 | struct dma_tx_state *state) | |
781 | { | |
782 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
783 | struct owl_dma_lli *lli; | |
784 | struct virt_dma_desc *vd; | |
785 | struct owl_dma_txd *txd; | |
786 | enum dma_status ret; | |
787 | unsigned long flags; | |
788 | size_t bytes = 0; | |
789 | ||
790 | ret = dma_cookie_status(chan, cookie, state); | |
791 | if (ret == DMA_COMPLETE || !state) | |
792 | return ret; | |
793 | ||
794 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
795 | ||
796 | vd = vchan_find_desc(&vchan->vc, cookie); | |
797 | if (vd) { | |
798 | txd = to_owl_txd(&vd->tx); | |
799 | list_for_each_entry(lli, &txd->lli_list, node) | |
57937fae | 800 | bytes += llc_hw_flen(lli); |
47e20577 MS |
801 | } else { |
802 | bytes = owl_dma_getbytes_chan(vchan); | |
803 | } | |
804 | ||
805 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
806 | ||
807 | dma_set_residue(state, bytes); | |
808 | ||
809 | return ret; | |
810 | } | |
811 | ||
812 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) | |
813 | { | |
814 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
815 | struct owl_dma_pchan *pchan; | |
816 | ||
817 | pchan = owl_dma_get_pchan(od, vchan); | |
818 | if (!pchan) | |
819 | return; | |
820 | ||
821 | dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); | |
822 | ||
823 | vchan->pchan = pchan; | |
824 | owl_dma_start_next_txd(vchan); | |
825 | } | |
826 | ||
827 | static void owl_dma_issue_pending(struct dma_chan *chan) | |
828 | { | |
829 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
830 | unsigned long flags; | |
831 | ||
832 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
833 | if (vchan_issue_pending(&vchan->vc)) { | |
834 | if (!vchan->pchan) | |
835 | owl_dma_phy_alloc_and_start(vchan); | |
836 | } | |
837 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
838 | } | |
839 | ||
840 | static struct dma_async_tx_descriptor | |
841 | *owl_dma_prep_memcpy(struct dma_chan *chan, | |
842 | dma_addr_t dst, dma_addr_t src, | |
843 | size_t len, unsigned long flags) | |
844 | { | |
845 | struct owl_dma *od = to_owl_dma(chan->device); | |
846 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
847 | struct owl_dma_txd *txd; | |
848 | struct owl_dma_lli *lli, *prev = NULL; | |
849 | size_t offset, bytes; | |
850 | int ret; | |
851 | ||
852 | if (!len) | |
853 | return NULL; | |
854 | ||
855 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
856 | if (!txd) | |
857 | return NULL; | |
858 | ||
859 | INIT_LIST_HEAD(&txd->lli_list); | |
860 | ||
861 | /* Process the transfer as frame by frame */ | |
862 | for (offset = 0; offset < len; offset += bytes) { | |
863 | lli = owl_dma_alloc_lli(od); | |
864 | if (!lli) { | |
865 | dev_warn(chan2dev(chan), "failed to allocate lli\n"); | |
866 | goto err_txd_free; | |
867 | } | |
868 | ||
869 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); | |
870 | ||
871 | ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, | |
d64e1b3f MS |
872 | bytes, DMA_MEM_TO_MEM, |
873 | &vchan->cfg, txd->cyclic); | |
47e20577 MS |
874 | if (ret) { |
875 | dev_warn(chan2dev(chan), "failed to config lli\n"); | |
876 | goto err_txd_free; | |
877 | } | |
878 | ||
d64e1b3f | 879 | prev = owl_dma_add_lli(txd, prev, lli, false); |
47e20577 MS |
880 | } |
881 | ||
882 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
883 | ||
884 | err_txd_free: | |
885 | owl_dma_free_txd(od, txd); | |
886 | return NULL; | |
887 | } | |
888 | ||
d64e1b3f MS |
889 | static struct dma_async_tx_descriptor |
890 | *owl_dma_prep_slave_sg(struct dma_chan *chan, | |
891 | struct scatterlist *sgl, | |
892 | unsigned int sg_len, | |
893 | enum dma_transfer_direction dir, | |
894 | unsigned long flags, void *context) | |
895 | { | |
896 | struct owl_dma *od = to_owl_dma(chan->device); | |
897 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
898 | struct dma_slave_config *sconfig = &vchan->cfg; | |
899 | struct owl_dma_txd *txd; | |
900 | struct owl_dma_lli *lli, *prev = NULL; | |
901 | struct scatterlist *sg; | |
902 | dma_addr_t addr, src = 0, dst = 0; | |
903 | size_t len; | |
904 | int ret, i; | |
905 | ||
906 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
907 | if (!txd) | |
908 | return NULL; | |
909 | ||
910 | INIT_LIST_HEAD(&txd->lli_list); | |
911 | ||
912 | for_each_sg(sgl, sg, sg_len, i) { | |
913 | addr = sg_dma_address(sg); | |
914 | len = sg_dma_len(sg); | |
915 | ||
916 | if (len > OWL_DMA_FRAME_MAX_LENGTH) { | |
917 | dev_err(od->dma.dev, | |
918 | "frame length exceeds max supported length"); | |
919 | goto err_txd_free; | |
920 | } | |
921 | ||
922 | lli = owl_dma_alloc_lli(od); | |
923 | if (!lli) { | |
924 | dev_err(chan2dev(chan), "failed to allocate lli"); | |
925 | goto err_txd_free; | |
926 | } | |
927 | ||
928 | if (dir == DMA_MEM_TO_DEV) { | |
929 | src = addr; | |
930 | dst = sconfig->dst_addr; | |
931 | } else { | |
932 | src = sconfig->src_addr; | |
933 | dst = addr; | |
934 | } | |
935 | ||
936 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, | |
937 | txd->cyclic); | |
938 | if (ret) { | |
939 | dev_warn(chan2dev(chan), "failed to config lli"); | |
940 | goto err_txd_free; | |
941 | } | |
942 | ||
943 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
944 | } | |
945 | ||
946 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
947 | ||
948 | err_txd_free: | |
949 | owl_dma_free_txd(od, txd); | |
950 | ||
951 | return NULL; | |
952 | } | |
953 | ||
954 | static struct dma_async_tx_descriptor | |
955 | *owl_prep_dma_cyclic(struct dma_chan *chan, | |
956 | dma_addr_t buf_addr, size_t buf_len, | |
957 | size_t period_len, | |
958 | enum dma_transfer_direction dir, | |
959 | unsigned long flags) | |
960 | { | |
961 | struct owl_dma *od = to_owl_dma(chan->device); | |
962 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
963 | struct dma_slave_config *sconfig = &vchan->cfg; | |
964 | struct owl_dma_txd *txd; | |
965 | struct owl_dma_lli *lli, *prev = NULL, *first = NULL; | |
966 | dma_addr_t src = 0, dst = 0; | |
967 | unsigned int periods = buf_len / period_len; | |
968 | int ret, i; | |
969 | ||
970 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
971 | if (!txd) | |
972 | return NULL; | |
973 | ||
974 | INIT_LIST_HEAD(&txd->lli_list); | |
975 | txd->cyclic = true; | |
976 | ||
977 | for (i = 0; i < periods; i++) { | |
978 | lli = owl_dma_alloc_lli(od); | |
979 | if (!lli) { | |
980 | dev_warn(chan2dev(chan), "failed to allocate lli"); | |
981 | goto err_txd_free; | |
982 | } | |
983 | ||
984 | if (dir == DMA_MEM_TO_DEV) { | |
985 | src = buf_addr + (period_len * i); | |
986 | dst = sconfig->dst_addr; | |
987 | } else if (dir == DMA_DEV_TO_MEM) { | |
988 | src = sconfig->src_addr; | |
989 | dst = buf_addr + (period_len * i); | |
990 | } | |
991 | ||
992 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, | |
993 | dir, sconfig, txd->cyclic); | |
994 | if (ret) { | |
995 | dev_warn(chan2dev(chan), "failed to config lli"); | |
996 | goto err_txd_free; | |
997 | } | |
998 | ||
999 | if (!first) | |
1000 | first = lli; | |
1001 | ||
1002 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
1003 | } | |
1004 | ||
1005 | /* close the cyclic list */ | |
1006 | owl_dma_add_lli(txd, prev, first, true); | |
1007 | ||
1008 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
1009 | ||
1010 | err_txd_free: | |
1011 | owl_dma_free_txd(od, txd); | |
1012 | ||
1013 | return NULL; | |
1014 | } | |
1015 | ||
47e20577 MS |
1016 | static void owl_dma_free_chan_resources(struct dma_chan *chan) |
1017 | { | |
1018 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
1019 | ||
1020 | /* Ensure all queued descriptors are freed */ | |
1021 | vchan_free_chan_resources(&vchan->vc); | |
1022 | } | |
1023 | ||
1024 | static inline void owl_dma_free(struct owl_dma *od) | |
1025 | { | |
1026 | struct owl_dma_vchan *vchan = NULL; | |
1027 | struct owl_dma_vchan *next; | |
1028 | ||
1029 | list_for_each_entry_safe(vchan, | |
1030 | next, &od->dma.channels, vc.chan.device_node) { | |
1031 | list_del(&vchan->vc.chan.device_node); | |
1032 | tasklet_kill(&vchan->vc.task); | |
1033 | } | |
1034 | } | |
1035 | ||
d64e1b3f MS |
1036 | static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, |
1037 | struct of_dma *ofdma) | |
1038 | { | |
1039 | struct owl_dma *od = ofdma->of_dma_data; | |
1040 | struct owl_dma_vchan *vchan; | |
1041 | struct dma_chan *chan; | |
1042 | u8 drq = dma_spec->args[0]; | |
1043 | ||
1044 | if (drq > od->nr_vchans) | |
1045 | return NULL; | |
1046 | ||
1047 | chan = dma_get_any_slave_channel(&od->dma); | |
1048 | if (!chan) | |
1049 | return NULL; | |
1050 | ||
1051 | vchan = to_owl_vchan(chan); | |
1052 | vchan->drq = drq; | |
1053 | ||
1054 | return chan; | |
1055 | } | |
1056 | ||
47e20577 MS |
1057 | static int owl_dma_probe(struct platform_device *pdev) |
1058 | { | |
1059 | struct device_node *np = pdev->dev.of_node; | |
1060 | struct owl_dma *od; | |
47e20577 MS |
1061 | int ret, i, nr_channels, nr_requests; |
1062 | ||
1063 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
1064 | if (!od) | |
1065 | return -ENOMEM; | |
1066 | ||
ecb4d34f | 1067 | od->base = devm_platform_ioremap_resource(pdev, 0); |
47e20577 MS |
1068 | if (IS_ERR(od->base)) |
1069 | return PTR_ERR(od->base); | |
1070 | ||
1071 | ret = of_property_read_u32(np, "dma-channels", &nr_channels); | |
1072 | if (ret) { | |
1073 | dev_err(&pdev->dev, "can't get dma-channels\n"); | |
1074 | return ret; | |
1075 | } | |
1076 | ||
1077 | ret = of_property_read_u32(np, "dma-requests", &nr_requests); | |
1078 | if (ret) { | |
1079 | dev_err(&pdev->dev, "can't get dma-requests\n"); | |
1080 | return ret; | |
1081 | } | |
1082 | ||
1083 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", | |
1084 | nr_channels, nr_requests); | |
1085 | ||
1086 | od->nr_pchans = nr_channels; | |
1087 | od->nr_vchans = nr_requests; | |
1088 | ||
1089 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
1090 | ||
1091 | platform_set_drvdata(pdev, od); | |
1092 | spin_lock_init(&od->lock); | |
1093 | ||
1094 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); | |
d64e1b3f MS |
1095 | dma_cap_set(DMA_SLAVE, od->dma.cap_mask); |
1096 | dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); | |
47e20577 MS |
1097 | |
1098 | od->dma.dev = &pdev->dev; | |
1099 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; | |
1100 | od->dma.device_tx_status = owl_dma_tx_status; | |
1101 | od->dma.device_issue_pending = owl_dma_issue_pending; | |
1102 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; | |
d64e1b3f MS |
1103 | od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; |
1104 | od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; | |
1105 | od->dma.device_config = owl_dma_config; | |
1106 | od->dma.device_pause = owl_dma_pause; | |
1107 | od->dma.device_resume = owl_dma_resume; | |
47e20577 MS |
1108 | od->dma.device_terminate_all = owl_dma_terminate_all; |
1109 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1110 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1111 | od->dma.directions = BIT(DMA_MEM_TO_MEM); | |
1112 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1113 | ||
1114 | INIT_LIST_HEAD(&od->dma.channels); | |
1115 | ||
1116 | od->clk = devm_clk_get(&pdev->dev, NULL); | |
1117 | if (IS_ERR(od->clk)) { | |
1118 | dev_err(&pdev->dev, "unable to get clock\n"); | |
1119 | return PTR_ERR(od->clk); | |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Eventhough the DMA controller is capable of generating 4 | |
1124 | * IRQ's for DMA priority feature, we only use 1 IRQ for | |
1125 | * simplification. | |
1126 | */ | |
1127 | od->irq = platform_get_irq(pdev, 0); | |
1128 | ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, | |
1129 | dev_name(&pdev->dev), od); | |
1130 | if (ret) { | |
1131 | dev_err(&pdev->dev, "unable to request IRQ\n"); | |
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | /* Init physical channel */ | |
1136 | od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, | |
1137 | sizeof(struct owl_dma_pchan), GFP_KERNEL); | |
1138 | if (!od->pchans) | |
1139 | return -ENOMEM; | |
1140 | ||
1141 | for (i = 0; i < od->nr_pchans; i++) { | |
1142 | struct owl_dma_pchan *pchan = &od->pchans[i]; | |
1143 | ||
1144 | pchan->id = i; | |
1145 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); | |
1146 | } | |
1147 | ||
1148 | /* Init virtual channel */ | |
1149 | od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, | |
1150 | sizeof(struct owl_dma_vchan), GFP_KERNEL); | |
1151 | if (!od->vchans) | |
1152 | return -ENOMEM; | |
1153 | ||
1154 | for (i = 0; i < od->nr_vchans; i++) { | |
1155 | struct owl_dma_vchan *vchan = &od->vchans[i]; | |
1156 | ||
1157 | vchan->vc.desc_free = owl_dma_desc_free; | |
1158 | vchan_init(&vchan->vc, &od->dma); | |
1159 | } | |
1160 | ||
1161 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
1162 | od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, | |
1163 | sizeof(struct owl_dma_lli), | |
1164 | __alignof__(struct owl_dma_lli), | |
1165 | 0); | |
1166 | if (!od->lli_pool) { | |
1167 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); | |
1168 | return -ENOMEM; | |
1169 | } | |
1170 | ||
1171 | clk_prepare_enable(od->clk); | |
1172 | ||
1173 | ret = dma_async_device_register(&od->dma); | |
1174 | if (ret) { | |
1175 | dev_err(&pdev->dev, "failed to register DMA engine device\n"); | |
1176 | goto err_pool_free; | |
1177 | } | |
1178 | ||
d64e1b3f MS |
1179 | /* Device-tree DMA controller registration */ |
1180 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1181 | owl_dma_of_xlate, od); | |
1182 | if (ret) { | |
1183 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | |
1184 | goto err_dma_unregister; | |
1185 | } | |
1186 | ||
47e20577 MS |
1187 | return 0; |
1188 | ||
d64e1b3f MS |
1189 | err_dma_unregister: |
1190 | dma_async_device_unregister(&od->dma); | |
47e20577 MS |
1191 | err_pool_free: |
1192 | clk_disable_unprepare(od->clk); | |
1193 | dma_pool_destroy(od->lli_pool); | |
1194 | ||
1195 | return ret; | |
1196 | } | |
1197 | ||
1198 | static int owl_dma_remove(struct platform_device *pdev) | |
1199 | { | |
1200 | struct owl_dma *od = platform_get_drvdata(pdev); | |
1201 | ||
d64e1b3f | 1202 | of_dma_controller_free(pdev->dev.of_node); |
47e20577 MS |
1203 | dma_async_device_unregister(&od->dma); |
1204 | ||
1205 | /* Mask all interrupts for this execution environment */ | |
1206 | dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); | |
1207 | ||
1208 | /* Make sure we won't have any further interrupts */ | |
1209 | devm_free_irq(od->dma.dev, od->irq, od); | |
1210 | ||
1211 | owl_dma_free(od); | |
1212 | ||
1213 | clk_disable_unprepare(od->clk); | |
1214 | ||
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | static const struct of_device_id owl_dma_match[] = { | |
1219 | { .compatible = "actions,s900-dma", }, | |
1220 | { /* sentinel */ } | |
1221 | }; | |
1222 | MODULE_DEVICE_TABLE(of, owl_dma_match); | |
1223 | ||
1224 | static struct platform_driver owl_dma_driver = { | |
1225 | .probe = owl_dma_probe, | |
1226 | .remove = owl_dma_remove, | |
1227 | .driver = { | |
1228 | .name = "dma-owl", | |
1229 | .of_match_table = of_match_ptr(owl_dma_match), | |
1230 | }, | |
1231 | }; | |
1232 | ||
1233 | static int owl_dma_init(void) | |
1234 | { | |
1235 | return platform_driver_register(&owl_dma_driver); | |
1236 | } | |
1237 | subsys_initcall(owl_dma_init); | |
1238 | ||
1239 | static void __exit owl_dma_exit(void) | |
1240 | { | |
1241 | platform_driver_unregister(&owl_dma_driver); | |
1242 | } | |
1243 | module_exit(owl_dma_exit); | |
1244 | ||
1245 | MODULE_AUTHOR("David Liu <[email protected]>"); | |
1246 | MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>"); | |
1247 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); | |
1248 | MODULE_LICENSE("GPL"); |