]>
Commit | Line | Data |
---|---|---|
47e20577 MS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Actions Semi Owl SoCs DMA driver | |
4 | // | |
5 | // Copyright (c) 2014 Actions Semi Inc. | |
6 | // Author: David Liu <[email protected]> | |
7 | // | |
8 | // Copyright (c) 2018 Linaro Ltd. | |
9 | // Author: Manivannan Sadhasivam <[email protected]> | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/of_device.h> | |
d64e1b3f | 24 | #include <linux/of_dma.h> |
47e20577 MS |
25 | #include <linux/slab.h> |
26 | #include "virt-dma.h" | |
27 | ||
28 | #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff | |
29 | ||
30 | /* Global DMA Controller Registers */ | |
31 | #define OWL_DMA_IRQ_PD0 0x00 | |
32 | #define OWL_DMA_IRQ_PD1 0x04 | |
33 | #define OWL_DMA_IRQ_PD2 0x08 | |
34 | #define OWL_DMA_IRQ_PD3 0x0C | |
35 | #define OWL_DMA_IRQ_EN0 0x10 | |
36 | #define OWL_DMA_IRQ_EN1 0x14 | |
37 | #define OWL_DMA_IRQ_EN2 0x18 | |
38 | #define OWL_DMA_IRQ_EN3 0x1C | |
39 | #define OWL_DMA_SECURE_ACCESS_CTL 0x20 | |
40 | #define OWL_DMA_NIC_QOS 0x24 | |
41 | #define OWL_DMA_DBGSEL 0x28 | |
42 | #define OWL_DMA_IDLE_STAT 0x2C | |
43 | ||
44 | /* Channel Registers */ | |
45 | #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) | |
46 | #define OWL_DMAX_MODE 0x00 | |
47 | #define OWL_DMAX_SOURCE 0x04 | |
48 | #define OWL_DMAX_DESTINATION 0x08 | |
49 | #define OWL_DMAX_FRAME_LEN 0x0C | |
50 | #define OWL_DMAX_FRAME_CNT 0x10 | |
51 | #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 | |
52 | #define OWL_DMAX_REMAIN_CNT 0x18 | |
53 | #define OWL_DMAX_SOURCE_STRIDE 0x1C | |
54 | #define OWL_DMAX_DESTINATION_STRIDE 0x20 | |
55 | #define OWL_DMAX_START 0x24 | |
56 | #define OWL_DMAX_PAUSE 0x28 | |
57 | #define OWL_DMAX_CHAINED_CTL 0x2C | |
58 | #define OWL_DMAX_CONSTANT 0x30 | |
59 | #define OWL_DMAX_LINKLIST_CTL 0x34 | |
60 | #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 | |
61 | #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C | |
62 | #define OWL_DMAX_INT_CTL 0x40 | |
63 | #define OWL_DMAX_INT_STATUS 0x44 | |
64 | #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 | |
65 | #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C | |
66 | ||
67 | /* OWL_DMAX_MODE Bits */ | |
68 | #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) | |
69 | #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) | |
70 | #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) | |
71 | #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) | |
72 | #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) | |
73 | #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) | |
74 | #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) | |
75 | #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) | |
76 | #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) | |
77 | #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) | |
78 | #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) | |
79 | #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) | |
80 | #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) | |
81 | #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) | |
82 | #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) | |
83 | #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) | |
84 | #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) | |
85 | #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) | |
86 | #define OWL_DMA_MODE_CB BIT(23) | |
87 | #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) | |
88 | #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) | |
89 | #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) | |
90 | #define OWL_DMA_MODE_CFE BIT(29) | |
91 | #define OWL_DMA_MODE_LME BIT(30) | |
92 | #define OWL_DMA_MODE_CME BIT(31) | |
93 | ||
94 | /* OWL_DMAX_LINKLIST_CTL Bits */ | |
95 | #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) | |
96 | #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) | |
97 | #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) | |
98 | #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) | |
99 | #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) | |
100 | #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) | |
101 | #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) | |
102 | #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) | |
103 | #define OWL_DMA_LLC_SUSPEND BIT(16) | |
104 | ||
105 | /* OWL_DMAX_INT_CTL Bits */ | |
106 | #define OWL_DMA_INTCTL_BLOCK BIT(0) | |
107 | #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) | |
108 | #define OWL_DMA_INTCTL_FRAME BIT(2) | |
109 | #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) | |
110 | #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) | |
111 | ||
112 | /* OWL_DMAX_INT_STATUS Bits */ | |
113 | #define OWL_DMA_INTSTAT_BLOCK BIT(0) | |
114 | #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) | |
115 | #define OWL_DMA_INTSTAT_FRAME BIT(2) | |
116 | #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) | |
117 | #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) | |
118 | ||
119 | /* Pack shift and newshift in a single word */ | |
120 | #define BIT_FIELD(val, width, shift, newshift) \ | |
121 | ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) | |
122 | ||
57937fae AST |
123 | /* Frame count value is fixed as 1 */ |
124 | #define FCNT_VAL 0x1 | |
125 | ||
47e20577 | 126 | /** |
fc143e38 | 127 | * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link |
57937fae AST |
128 | * list for dma transfer |
129 | * @OWL_DMADESC_NEXT_LLI: physical address of the next link list | |
130 | * @OWL_DMADESC_SADDR: source physical address | |
131 | * @OWL_DMADESC_DADDR: destination physical address | |
132 | * @OWL_DMADESC_FLEN: frame length | |
133 | * @OWL_DMADESC_SRC_STRIDE: source stride | |
134 | * @OWL_DMADESC_DST_STRIDE: destination stride | |
135 | * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config | |
136 | * @OWL_DMADESC_CTRLB: interrupt config | |
137 | * @OWL_DMADESC_CONST_NUM: data for constant fill | |
fc143e38 | 138 | * @OWL_DMADESC_SIZE: max size of this enum |
47e20577 | 139 | */ |
57937fae AST |
140 | enum owl_dmadesc_offsets { |
141 | OWL_DMADESC_NEXT_LLI = 0, | |
142 | OWL_DMADESC_SADDR, | |
143 | OWL_DMADESC_DADDR, | |
144 | OWL_DMADESC_FLEN, | |
145 | OWL_DMADESC_SRC_STRIDE, | |
146 | OWL_DMADESC_DST_STRIDE, | |
147 | OWL_DMADESC_CTRLA, | |
148 | OWL_DMADESC_CTRLB, | |
149 | OWL_DMADESC_CONST_NUM, | |
150 | OWL_DMADESC_SIZE | |
47e20577 MS |
151 | }; |
152 | ||
6f9e40d4 AST |
153 | enum owl_dma_id { |
154 | S900_DMA, | |
155 | S700_DMA, | |
156 | }; | |
157 | ||
47e20577 MS |
158 | /** |
159 | * struct owl_dma_lli - Link list for dma transfer | |
160 | * @hw: hardware link list | |
161 | * @phys: physical address of hardware link list | |
162 | * @node: node for txd's lli_list | |
163 | */ | |
164 | struct owl_dma_lli { | |
57937fae | 165 | u32 hw[OWL_DMADESC_SIZE]; |
47e20577 MS |
166 | dma_addr_t phys; |
167 | struct list_head node; | |
168 | }; | |
169 | ||
170 | /** | |
171 | * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor | |
172 | * @vd: virtual DMA descriptor | |
173 | * @lli_list: link list of lli nodes | |
a3e40316 | 174 | * @cyclic: flag to indicate cyclic transfers |
47e20577 MS |
175 | */ |
176 | struct owl_dma_txd { | |
177 | struct virt_dma_desc vd; | |
178 | struct list_head lli_list; | |
d64e1b3f | 179 | bool cyclic; |
47e20577 MS |
180 | }; |
181 | ||
182 | /** | |
183 | * struct owl_dma_pchan - Holder for the physical channels | |
184 | * @id: physical index to this channel | |
185 | * @base: virtual memory base for the dma channel | |
186 | * @vchan: the virtual channel currently being served by this physical channel | |
47e20577 MS |
187 | */ |
188 | struct owl_dma_pchan { | |
189 | u32 id; | |
190 | void __iomem *base; | |
191 | struct owl_dma_vchan *vchan; | |
47e20577 MS |
192 | }; |
193 | ||
194 | /** | |
195 | * struct owl_dma_pchan - Wrapper for DMA ENGINE channel | |
196 | * @vc: wrappped virtual channel | |
197 | * @pchan: the physical channel utilized by this channel | |
198 | * @txd: active transaction on this channel | |
a3e40316 MS |
199 | * @cfg: slave configuration for this channel |
200 | * @drq: physical DMA request ID for this channel | |
47e20577 MS |
201 | */ |
202 | struct owl_dma_vchan { | |
203 | struct virt_dma_chan vc; | |
204 | struct owl_dma_pchan *pchan; | |
205 | struct owl_dma_txd *txd; | |
d64e1b3f MS |
206 | struct dma_slave_config cfg; |
207 | u8 drq; | |
47e20577 MS |
208 | }; |
209 | ||
210 | /** | |
211 | * struct owl_dma - Holder for the Owl DMA controller | |
212 | * @dma: dma engine for this instance | |
213 | * @base: virtual memory base for the DMA controller | |
214 | * @clk: clock for the DMA controller | |
215 | * @lock: a lock to use when change DMA controller global register | |
216 | * @lli_pool: a pool for the LLI descriptors | |
a3e40316 | 217 | * @irq: interrupt ID for the DMA controller |
47e20577 MS |
218 | * @nr_pchans: the number of physical channels |
219 | * @pchans: array of data for the physical channels | |
220 | * @nr_vchans: the number of physical channels | |
221 | * @vchans: array of data for the physical channels | |
6f9e40d4 | 222 | * @devid: device id based on OWL SoC |
47e20577 MS |
223 | */ |
224 | struct owl_dma { | |
225 | struct dma_device dma; | |
226 | void __iomem *base; | |
227 | struct clk *clk; | |
228 | spinlock_t lock; | |
229 | struct dma_pool *lli_pool; | |
230 | int irq; | |
231 | ||
232 | unsigned int nr_pchans; | |
233 | struct owl_dma_pchan *pchans; | |
234 | ||
235 | unsigned int nr_vchans; | |
236 | struct owl_dma_vchan *vchans; | |
6f9e40d4 | 237 | enum owl_dma_id devid; |
47e20577 MS |
238 | }; |
239 | ||
240 | static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, | |
241 | u32 val, bool state) | |
242 | { | |
243 | u32 regval; | |
244 | ||
245 | regval = readl(pchan->base + reg); | |
246 | ||
247 | if (state) | |
248 | regval |= val; | |
249 | else | |
250 | regval &= ~val; | |
251 | ||
252 | writel(val, pchan->base + reg); | |
253 | } | |
254 | ||
255 | static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) | |
256 | { | |
257 | writel(data, pchan->base + reg); | |
258 | } | |
259 | ||
260 | static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) | |
261 | { | |
262 | return readl(pchan->base + reg); | |
263 | } | |
264 | ||
265 | static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) | |
266 | { | |
267 | u32 regval; | |
268 | ||
269 | regval = readl(od->base + reg); | |
270 | ||
271 | if (state) | |
272 | regval |= val; | |
273 | else | |
274 | regval &= ~val; | |
275 | ||
276 | writel(val, od->base + reg); | |
277 | } | |
278 | ||
279 | static void dma_writel(struct owl_dma *od, u32 reg, u32 data) | |
280 | { | |
281 | writel(data, od->base + reg); | |
282 | } | |
283 | ||
284 | static u32 dma_readl(struct owl_dma *od, u32 reg) | |
285 | { | |
286 | return readl(od->base + reg); | |
287 | } | |
288 | ||
289 | static inline struct owl_dma *to_owl_dma(struct dma_device *dd) | |
290 | { | |
291 | return container_of(dd, struct owl_dma, dma); | |
292 | } | |
293 | ||
294 | static struct device *chan2dev(struct dma_chan *chan) | |
295 | { | |
296 | return &chan->dev->device; | |
297 | } | |
298 | ||
299 | static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) | |
300 | { | |
301 | return container_of(chan, struct owl_dma_vchan, vc.chan); | |
302 | } | |
303 | ||
304 | static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) | |
305 | { | |
306 | return container_of(tx, struct owl_dma_txd, vd.tx); | |
307 | } | |
308 | ||
309 | static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) | |
310 | { | |
311 | u32 ctl; | |
312 | ||
313 | ctl = BIT_FIELD(mode, 4, 28, 28) | | |
314 | BIT_FIELD(mode, 8, 16, 20) | | |
315 | BIT_FIELD(mode, 4, 8, 16) | | |
316 | BIT_FIELD(mode, 6, 0, 10) | | |
317 | BIT_FIELD(llc_ctl, 2, 10, 8) | | |
318 | BIT_FIELD(llc_ctl, 2, 8, 6); | |
319 | ||
320 | return ctl; | |
321 | } | |
322 | ||
323 | static inline u32 llc_hw_ctrlb(u32 int_ctl) | |
324 | { | |
325 | u32 ctl; | |
326 | ||
6f9e40d4 AST |
327 | /* |
328 | * Irrespective of the SoC, ctrlb value starts filling from | |
329 | * bit 18. | |
330 | */ | |
47e20577 MS |
331 | ctl = BIT_FIELD(int_ctl, 7, 0, 18); |
332 | ||
333 | return ctl; | |
334 | } | |
335 | ||
57937fae AST |
336 | static u32 llc_hw_flen(struct owl_dma_lli *lli) |
337 | { | |
338 | return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); | |
339 | } | |
340 | ||
47e20577 MS |
341 | static void owl_dma_free_lli(struct owl_dma *od, |
342 | struct owl_dma_lli *lli) | |
343 | { | |
344 | list_del(&lli->node); | |
345 | dma_pool_free(od->lli_pool, lli, lli->phys); | |
346 | } | |
347 | ||
348 | static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) | |
349 | { | |
350 | struct owl_dma_lli *lli; | |
351 | dma_addr_t phys; | |
352 | ||
353 | lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); | |
354 | if (!lli) | |
355 | return NULL; | |
356 | ||
357 | INIT_LIST_HEAD(&lli->node); | |
358 | lli->phys = phys; | |
359 | ||
360 | return lli; | |
361 | } | |
362 | ||
363 | static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, | |
364 | struct owl_dma_lli *prev, | |
d64e1b3f MS |
365 | struct owl_dma_lli *next, |
366 | bool is_cyclic) | |
47e20577 | 367 | { |
d64e1b3f MS |
368 | if (!is_cyclic) |
369 | list_add_tail(&next->node, &txd->lli_list); | |
47e20577 MS |
370 | |
371 | if (prev) { | |
57937fae AST |
372 | prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; |
373 | prev->hw[OWL_DMADESC_CTRLA] |= | |
374 | llc_hw_ctrla(OWL_DMA_MODE_LME, 0); | |
47e20577 MS |
375 | } |
376 | ||
377 | return next; | |
378 | } | |
379 | ||
380 | static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, | |
381 | struct owl_dma_lli *lli, | |
382 | dma_addr_t src, dma_addr_t dst, | |
d64e1b3f MS |
383 | u32 len, enum dma_transfer_direction dir, |
384 | struct dma_slave_config *sconfig, | |
385 | bool is_cyclic) | |
47e20577 | 386 | { |
6f9e40d4 | 387 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); |
57937fae | 388 | u32 mode, ctrlb; |
47e20577 MS |
389 | |
390 | mode = OWL_DMA_MODE_PW(0); | |
391 | ||
392 | switch (dir) { | |
393 | case DMA_MEM_TO_MEM: | |
394 | mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | | |
395 | OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | | |
396 | OWL_DMA_MODE_DAM_INC; | |
397 | ||
d64e1b3f MS |
398 | break; |
399 | case DMA_MEM_TO_DEV: | |
400 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
401 | | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV | |
402 | | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; | |
403 | ||
404 | /* | |
405 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
406 | * default is 32bit, select 8bit only when requested. | |
407 | */ | |
408 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
409 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
410 | ||
411 | break; | |
412 | case DMA_DEV_TO_MEM: | |
413 | mode |= OWL_DMA_MODE_TS(vchan->drq) | |
414 | | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU | |
415 | | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; | |
416 | ||
417 | /* | |
418 | * Hardware only supports 32bit and 8bit buswidth. Since the | |
419 | * default is 32bit, select 8bit only when requested. | |
420 | */ | |
421 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) | |
422 | mode |= OWL_DMA_MODE_NDDBW_8BIT; | |
423 | ||
47e20577 MS |
424 | break; |
425 | default: | |
426 | return -EINVAL; | |
427 | } | |
428 | ||
57937fae AST |
429 | lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, |
430 | OWL_DMA_LLC_SAV_LOAD_NEXT | | |
431 | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
47e20577 | 432 | |
d64e1b3f | 433 | if (is_cyclic) |
57937fae | 434 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); |
d64e1b3f | 435 | else |
57937fae AST |
436 | ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); |
437 | ||
438 | lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ | |
439 | lli->hw[OWL_DMADESC_SADDR] = src; | |
440 | lli->hw[OWL_DMADESC_DADDR] = dst; | |
441 | lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; | |
442 | lli->hw[OWL_DMADESC_DST_STRIDE] = 0; | |
6f9e40d4 AST |
443 | |
444 | if (od->devid == S700_DMA) { | |
445 | /* Max frame length is 1MB */ | |
446 | lli->hw[OWL_DMADESC_FLEN] = len; | |
447 | /* | |
448 | * On S700, word starts from offset 0x1C is shared between | |
449 | * frame count and ctrlb, where first 12 bits are for frame | |
450 | * count and rest of 20 bits are for ctrlb. | |
451 | */ | |
452 | lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; | |
453 | } else { | |
454 | /* | |
455 | * On S900, word starts from offset 0xC is shared between | |
456 | * frame length (max frame length is 1MB) and frame count, | |
457 | * where first 20 bits are for frame length and rest of | |
458 | * 12 bits are for frame count. | |
459 | */ | |
460 | lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; | |
461 | lli->hw[OWL_DMADESC_CTRLB] = ctrlb; | |
462 | } | |
47e20577 MS |
463 | |
464 | return 0; | |
465 | } | |
466 | ||
467 | static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, | |
468 | struct owl_dma_vchan *vchan) | |
469 | { | |
470 | struct owl_dma_pchan *pchan = NULL; | |
471 | unsigned long flags; | |
472 | int i; | |
473 | ||
474 | for (i = 0; i < od->nr_pchans; i++) { | |
475 | pchan = &od->pchans[i]; | |
476 | ||
f8f482de | 477 | spin_lock_irqsave(&od->lock, flags); |
47e20577 MS |
478 | if (!pchan->vchan) { |
479 | pchan->vchan = vchan; | |
f8f482de | 480 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
481 | break; |
482 | } | |
483 | ||
f8f482de | 484 | spin_unlock_irqrestore(&od->lock, flags); |
47e20577 MS |
485 | } |
486 | ||
487 | return pchan; | |
488 | } | |
489 | ||
490 | static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) | |
491 | { | |
492 | unsigned int val; | |
493 | ||
494 | val = dma_readl(od, OWL_DMA_IDLE_STAT); | |
495 | ||
496 | return !(val & (1 << pchan->id)); | |
497 | } | |
498 | ||
499 | static void owl_dma_terminate_pchan(struct owl_dma *od, | |
500 | struct owl_dma_pchan *pchan) | |
501 | { | |
502 | unsigned long flags; | |
503 | u32 irq_pd; | |
504 | ||
505 | pchan_writel(pchan, OWL_DMAX_START, 0); | |
506 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
507 | ||
508 | spin_lock_irqsave(&od->lock, flags); | |
509 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); | |
510 | ||
511 | irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); | |
512 | if (irq_pd & (1 << pchan->id)) { | |
513 | dev_warn(od->dma.dev, | |
514 | "terminating pchan %d that still has pending irq\n", | |
515 | pchan->id); | |
516 | dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); | |
517 | } | |
518 | ||
519 | pchan->vchan = NULL; | |
520 | ||
521 | spin_unlock_irqrestore(&od->lock, flags); | |
522 | } | |
523 | ||
d64e1b3f MS |
524 | static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) |
525 | { | |
526 | pchan_writel(pchan, 1, OWL_DMAX_PAUSE); | |
527 | } | |
528 | ||
529 | static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) | |
530 | { | |
531 | pchan_writel(pchan, 0, OWL_DMAX_PAUSE); | |
532 | } | |
533 | ||
47e20577 MS |
534 | static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) |
535 | { | |
536 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
537 | struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); | |
538 | struct owl_dma_pchan *pchan = vchan->pchan; | |
539 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
540 | struct owl_dma_lli *lli; | |
541 | unsigned long flags; | |
542 | u32 int_ctl; | |
543 | ||
544 | list_del(&vd->node); | |
545 | ||
546 | vchan->txd = txd; | |
547 | ||
548 | /* Wait for channel inactive */ | |
549 | while (owl_dma_pchan_busy(od, pchan)) | |
550 | cpu_relax(); | |
551 | ||
552 | lli = list_first_entry(&txd->lli_list, | |
553 | struct owl_dma_lli, node); | |
554 | ||
d64e1b3f MS |
555 | if (txd->cyclic) |
556 | int_ctl = OWL_DMA_INTCTL_BLOCK; | |
557 | else | |
558 | int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; | |
47e20577 MS |
559 | |
560 | pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); | |
561 | pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, | |
562 | OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); | |
563 | pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); | |
564 | pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); | |
565 | ||
566 | /* Clear IRQ status for this pchan */ | |
567 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
568 | ||
569 | spin_lock_irqsave(&od->lock, flags); | |
570 | ||
571 | dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); | |
572 | ||
573 | spin_unlock_irqrestore(&od->lock, flags); | |
574 | ||
575 | dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); | |
576 | ||
577 | /* Start DMA transfer for this pchan */ | |
578 | pchan_writel(pchan, OWL_DMAX_START, 0x1); | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) | |
584 | { | |
585 | /* Ensure that the physical channel is stopped */ | |
586 | owl_dma_terminate_pchan(od, vchan->pchan); | |
587 | ||
588 | vchan->pchan = NULL; | |
589 | } | |
590 | ||
591 | static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) | |
592 | { | |
593 | struct owl_dma *od = dev_id; | |
594 | struct owl_dma_vchan *vchan; | |
595 | struct owl_dma_pchan *pchan; | |
596 | unsigned long pending; | |
597 | int i; | |
598 | unsigned int global_irq_pending, chan_irq_pending; | |
599 | ||
600 | spin_lock(&od->lock); | |
601 | ||
602 | pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
603 | ||
604 | /* Clear IRQ status for each pchan */ | |
605 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
606 | pchan = &od->pchans[i]; | |
607 | pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); | |
608 | } | |
609 | ||
610 | /* Clear pending IRQ */ | |
611 | dma_writel(od, OWL_DMA_IRQ_PD0, pending); | |
612 | ||
613 | /* Check missed pending IRQ */ | |
614 | for (i = 0; i < od->nr_pchans; i++) { | |
615 | pchan = &od->pchans[i]; | |
616 | chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & | |
617 | pchan_readl(pchan, OWL_DMAX_INT_STATUS); | |
618 | ||
619 | /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ | |
620 | dma_readl(od, OWL_DMA_IRQ_PD0); | |
621 | ||
622 | global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); | |
623 | ||
6f9e40d4 | 624 | if (chan_irq_pending && !(global_irq_pending & BIT(i))) { |
47e20577 MS |
625 | dev_dbg(od->dma.dev, |
626 | "global and channel IRQ pending match err\n"); | |
627 | ||
628 | /* Clear IRQ status for this pchan */ | |
629 | pchan_update(pchan, OWL_DMAX_INT_STATUS, | |
630 | 0xff, false); | |
631 | ||
632 | /* Update global IRQ pending */ | |
633 | pending |= BIT(i); | |
634 | } | |
635 | } | |
636 | ||
637 | spin_unlock(&od->lock); | |
638 | ||
639 | for_each_set_bit(i, &pending, od->nr_pchans) { | |
640 | struct owl_dma_txd *txd; | |
641 | ||
642 | pchan = &od->pchans[i]; | |
643 | ||
644 | vchan = pchan->vchan; | |
645 | if (!vchan) { | |
646 | dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", | |
647 | pchan->id); | |
648 | continue; | |
649 | } | |
650 | ||
651 | spin_lock(&vchan->vc.lock); | |
652 | ||
653 | txd = vchan->txd; | |
654 | if (txd) { | |
655 | vchan->txd = NULL; | |
656 | ||
657 | vchan_cookie_complete(&txd->vd); | |
658 | ||
659 | /* | |
660 | * Start the next descriptor (if any), | |
661 | * otherwise free this channel. | |
662 | */ | |
663 | if (vchan_next_desc(&vchan->vc)) | |
664 | owl_dma_start_next_txd(vchan); | |
665 | else | |
666 | owl_dma_phy_free(od, vchan); | |
667 | } | |
668 | ||
669 | spin_unlock(&vchan->vc.lock); | |
670 | } | |
671 | ||
672 | return IRQ_HANDLED; | |
673 | } | |
674 | ||
675 | static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) | |
676 | { | |
677 | struct owl_dma_lli *lli, *_lli; | |
678 | ||
679 | if (unlikely(!txd)) | |
680 | return; | |
681 | ||
682 | list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) | |
683 | owl_dma_free_lli(od, lli); | |
684 | ||
685 | kfree(txd); | |
686 | } | |
687 | ||
688 | static void owl_dma_desc_free(struct virt_dma_desc *vd) | |
689 | { | |
690 | struct owl_dma *od = to_owl_dma(vd->tx.chan->device); | |
691 | struct owl_dma_txd *txd = to_owl_txd(&vd->tx); | |
692 | ||
693 | owl_dma_free_txd(od, txd); | |
694 | } | |
695 | ||
696 | static int owl_dma_terminate_all(struct dma_chan *chan) | |
697 | { | |
698 | struct owl_dma *od = to_owl_dma(chan->device); | |
699 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
700 | unsigned long flags; | |
701 | LIST_HEAD(head); | |
702 | ||
703 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
704 | ||
705 | if (vchan->pchan) | |
706 | owl_dma_phy_free(od, vchan); | |
707 | ||
708 | if (vchan->txd) { | |
709 | owl_dma_desc_free(&vchan->txd->vd); | |
710 | vchan->txd = NULL; | |
711 | } | |
712 | ||
713 | vchan_get_all_descriptors(&vchan->vc, &head); | |
47e20577 MS |
714 | |
715 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
716 | ||
51fe9cd2 SH |
717 | vchan_dma_desc_free_list(&vchan->vc, &head); |
718 | ||
47e20577 MS |
719 | return 0; |
720 | } | |
721 | ||
d64e1b3f MS |
722 | static int owl_dma_config(struct dma_chan *chan, |
723 | struct dma_slave_config *config) | |
724 | { | |
725 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
726 | ||
727 | /* Reject definitely invalid configurations */ | |
728 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | |
729 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
730 | return -EINVAL; | |
731 | ||
732 | memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static int owl_dma_pause(struct dma_chan *chan) | |
738 | { | |
739 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
740 | unsigned long flags; | |
741 | ||
742 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
743 | ||
744 | owl_dma_pause_pchan(vchan->pchan); | |
745 | ||
746 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
747 | ||
748 | return 0; | |
749 | } | |
750 | ||
751 | static int owl_dma_resume(struct dma_chan *chan) | |
752 | { | |
753 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
754 | unsigned long flags; | |
755 | ||
756 | if (!vchan->pchan && !vchan->txd) | |
757 | return 0; | |
758 | ||
759 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | |
760 | ||
761 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
762 | ||
763 | owl_dma_resume_pchan(vchan->pchan); | |
764 | ||
765 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
766 | ||
767 | return 0; | |
768 | } | |
769 | ||
47e20577 MS |
770 | static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) |
771 | { | |
772 | struct owl_dma_pchan *pchan; | |
773 | struct owl_dma_txd *txd; | |
774 | struct owl_dma_lli *lli; | |
775 | unsigned int next_lli_phy; | |
776 | size_t bytes; | |
777 | ||
778 | pchan = vchan->pchan; | |
779 | txd = vchan->txd; | |
780 | ||
781 | if (!pchan || !txd) | |
782 | return 0; | |
783 | ||
784 | /* Get remain count of current node in link list */ | |
785 | bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); | |
786 | ||
787 | /* Loop through the preceding nodes to get total remaining bytes */ | |
788 | if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { | |
789 | next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); | |
790 | list_for_each_entry(lli, &txd->lli_list, node) { | |
791 | /* Start from the next active node */ | |
792 | if (lli->phys == next_lli_phy) { | |
793 | list_for_each_entry(lli, &txd->lli_list, node) | |
57937fae | 794 | bytes += llc_hw_flen(lli); |
47e20577 MS |
795 | break; |
796 | } | |
797 | } | |
798 | } | |
799 | ||
800 | return bytes; | |
801 | } | |
802 | ||
803 | static enum dma_status owl_dma_tx_status(struct dma_chan *chan, | |
804 | dma_cookie_t cookie, | |
805 | struct dma_tx_state *state) | |
806 | { | |
807 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
808 | struct owl_dma_lli *lli; | |
809 | struct virt_dma_desc *vd; | |
810 | struct owl_dma_txd *txd; | |
811 | enum dma_status ret; | |
812 | unsigned long flags; | |
813 | size_t bytes = 0; | |
814 | ||
815 | ret = dma_cookie_status(chan, cookie, state); | |
816 | if (ret == DMA_COMPLETE || !state) | |
817 | return ret; | |
818 | ||
819 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
820 | ||
821 | vd = vchan_find_desc(&vchan->vc, cookie); | |
822 | if (vd) { | |
823 | txd = to_owl_txd(&vd->tx); | |
824 | list_for_each_entry(lli, &txd->lli_list, node) | |
57937fae | 825 | bytes += llc_hw_flen(lli); |
47e20577 MS |
826 | } else { |
827 | bytes = owl_dma_getbytes_chan(vchan); | |
828 | } | |
829 | ||
830 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
831 | ||
832 | dma_set_residue(state, bytes); | |
833 | ||
834 | return ret; | |
835 | } | |
836 | ||
837 | static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) | |
838 | { | |
839 | struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); | |
840 | struct owl_dma_pchan *pchan; | |
841 | ||
842 | pchan = owl_dma_get_pchan(od, vchan); | |
843 | if (!pchan) | |
844 | return; | |
845 | ||
846 | dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); | |
847 | ||
848 | vchan->pchan = pchan; | |
849 | owl_dma_start_next_txd(vchan); | |
850 | } | |
851 | ||
852 | static void owl_dma_issue_pending(struct dma_chan *chan) | |
853 | { | |
854 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
855 | unsigned long flags; | |
856 | ||
857 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
858 | if (vchan_issue_pending(&vchan->vc)) { | |
859 | if (!vchan->pchan) | |
860 | owl_dma_phy_alloc_and_start(vchan); | |
861 | } | |
862 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
863 | } | |
864 | ||
865 | static struct dma_async_tx_descriptor | |
866 | *owl_dma_prep_memcpy(struct dma_chan *chan, | |
867 | dma_addr_t dst, dma_addr_t src, | |
868 | size_t len, unsigned long flags) | |
869 | { | |
870 | struct owl_dma *od = to_owl_dma(chan->device); | |
871 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
872 | struct owl_dma_txd *txd; | |
873 | struct owl_dma_lli *lli, *prev = NULL; | |
874 | size_t offset, bytes; | |
875 | int ret; | |
876 | ||
877 | if (!len) | |
878 | return NULL; | |
879 | ||
880 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
881 | if (!txd) | |
882 | return NULL; | |
883 | ||
884 | INIT_LIST_HEAD(&txd->lli_list); | |
885 | ||
886 | /* Process the transfer as frame by frame */ | |
887 | for (offset = 0; offset < len; offset += bytes) { | |
888 | lli = owl_dma_alloc_lli(od); | |
889 | if (!lli) { | |
890 | dev_warn(chan2dev(chan), "failed to allocate lli\n"); | |
891 | goto err_txd_free; | |
892 | } | |
893 | ||
894 | bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); | |
895 | ||
896 | ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, | |
d64e1b3f MS |
897 | bytes, DMA_MEM_TO_MEM, |
898 | &vchan->cfg, txd->cyclic); | |
47e20577 MS |
899 | if (ret) { |
900 | dev_warn(chan2dev(chan), "failed to config lli\n"); | |
901 | goto err_txd_free; | |
902 | } | |
903 | ||
d64e1b3f | 904 | prev = owl_dma_add_lli(txd, prev, lli, false); |
47e20577 MS |
905 | } |
906 | ||
907 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
908 | ||
909 | err_txd_free: | |
910 | owl_dma_free_txd(od, txd); | |
911 | return NULL; | |
912 | } | |
913 | ||
d64e1b3f MS |
914 | static struct dma_async_tx_descriptor |
915 | *owl_dma_prep_slave_sg(struct dma_chan *chan, | |
916 | struct scatterlist *sgl, | |
917 | unsigned int sg_len, | |
918 | enum dma_transfer_direction dir, | |
919 | unsigned long flags, void *context) | |
920 | { | |
921 | struct owl_dma *od = to_owl_dma(chan->device); | |
922 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
923 | struct dma_slave_config *sconfig = &vchan->cfg; | |
924 | struct owl_dma_txd *txd; | |
925 | struct owl_dma_lli *lli, *prev = NULL; | |
926 | struct scatterlist *sg; | |
927 | dma_addr_t addr, src = 0, dst = 0; | |
928 | size_t len; | |
929 | int ret, i; | |
930 | ||
931 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
932 | if (!txd) | |
933 | return NULL; | |
934 | ||
935 | INIT_LIST_HEAD(&txd->lli_list); | |
936 | ||
937 | for_each_sg(sgl, sg, sg_len, i) { | |
938 | addr = sg_dma_address(sg); | |
939 | len = sg_dma_len(sg); | |
940 | ||
941 | if (len > OWL_DMA_FRAME_MAX_LENGTH) { | |
942 | dev_err(od->dma.dev, | |
943 | "frame length exceeds max supported length"); | |
944 | goto err_txd_free; | |
945 | } | |
946 | ||
947 | lli = owl_dma_alloc_lli(od); | |
948 | if (!lli) { | |
949 | dev_err(chan2dev(chan), "failed to allocate lli"); | |
950 | goto err_txd_free; | |
951 | } | |
952 | ||
953 | if (dir == DMA_MEM_TO_DEV) { | |
954 | src = addr; | |
955 | dst = sconfig->dst_addr; | |
956 | } else { | |
957 | src = sconfig->src_addr; | |
958 | dst = addr; | |
959 | } | |
960 | ||
961 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, | |
962 | txd->cyclic); | |
963 | if (ret) { | |
964 | dev_warn(chan2dev(chan), "failed to config lli"); | |
965 | goto err_txd_free; | |
966 | } | |
967 | ||
968 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
969 | } | |
970 | ||
971 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
972 | ||
973 | err_txd_free: | |
974 | owl_dma_free_txd(od, txd); | |
975 | ||
976 | return NULL; | |
977 | } | |
978 | ||
979 | static struct dma_async_tx_descriptor | |
980 | *owl_prep_dma_cyclic(struct dma_chan *chan, | |
981 | dma_addr_t buf_addr, size_t buf_len, | |
982 | size_t period_len, | |
983 | enum dma_transfer_direction dir, | |
984 | unsigned long flags) | |
985 | { | |
986 | struct owl_dma *od = to_owl_dma(chan->device); | |
987 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
988 | struct dma_slave_config *sconfig = &vchan->cfg; | |
989 | struct owl_dma_txd *txd; | |
990 | struct owl_dma_lli *lli, *prev = NULL, *first = NULL; | |
991 | dma_addr_t src = 0, dst = 0; | |
992 | unsigned int periods = buf_len / period_len; | |
993 | int ret, i; | |
994 | ||
995 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | |
996 | if (!txd) | |
997 | return NULL; | |
998 | ||
999 | INIT_LIST_HEAD(&txd->lli_list); | |
1000 | txd->cyclic = true; | |
1001 | ||
1002 | for (i = 0; i < periods; i++) { | |
1003 | lli = owl_dma_alloc_lli(od); | |
1004 | if (!lli) { | |
1005 | dev_warn(chan2dev(chan), "failed to allocate lli"); | |
1006 | goto err_txd_free; | |
1007 | } | |
1008 | ||
1009 | if (dir == DMA_MEM_TO_DEV) { | |
1010 | src = buf_addr + (period_len * i); | |
1011 | dst = sconfig->dst_addr; | |
1012 | } else if (dir == DMA_DEV_TO_MEM) { | |
1013 | src = sconfig->src_addr; | |
1014 | dst = buf_addr + (period_len * i); | |
1015 | } | |
1016 | ||
1017 | ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, | |
1018 | dir, sconfig, txd->cyclic); | |
1019 | if (ret) { | |
1020 | dev_warn(chan2dev(chan), "failed to config lli"); | |
1021 | goto err_txd_free; | |
1022 | } | |
1023 | ||
1024 | if (!first) | |
1025 | first = lli; | |
1026 | ||
1027 | prev = owl_dma_add_lli(txd, prev, lli, false); | |
1028 | } | |
1029 | ||
1030 | /* close the cyclic list */ | |
1031 | owl_dma_add_lli(txd, prev, first, true); | |
1032 | ||
1033 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | |
1034 | ||
1035 | err_txd_free: | |
1036 | owl_dma_free_txd(od, txd); | |
1037 | ||
1038 | return NULL; | |
1039 | } | |
1040 | ||
47e20577 MS |
1041 | static void owl_dma_free_chan_resources(struct dma_chan *chan) |
1042 | { | |
1043 | struct owl_dma_vchan *vchan = to_owl_vchan(chan); | |
1044 | ||
1045 | /* Ensure all queued descriptors are freed */ | |
1046 | vchan_free_chan_resources(&vchan->vc); | |
1047 | } | |
1048 | ||
1049 | static inline void owl_dma_free(struct owl_dma *od) | |
1050 | { | |
1051 | struct owl_dma_vchan *vchan = NULL; | |
1052 | struct owl_dma_vchan *next; | |
1053 | ||
1054 | list_for_each_entry_safe(vchan, | |
1055 | next, &od->dma.channels, vc.chan.device_node) { | |
1056 | list_del(&vchan->vc.chan.device_node); | |
1057 | tasklet_kill(&vchan->vc.task); | |
1058 | } | |
1059 | } | |
1060 | ||
d64e1b3f MS |
1061 | static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, |
1062 | struct of_dma *ofdma) | |
1063 | { | |
1064 | struct owl_dma *od = ofdma->of_dma_data; | |
1065 | struct owl_dma_vchan *vchan; | |
1066 | struct dma_chan *chan; | |
1067 | u8 drq = dma_spec->args[0]; | |
1068 | ||
1069 | if (drq > od->nr_vchans) | |
1070 | return NULL; | |
1071 | ||
1072 | chan = dma_get_any_slave_channel(&od->dma); | |
1073 | if (!chan) | |
1074 | return NULL; | |
1075 | ||
1076 | vchan = to_owl_vchan(chan); | |
1077 | vchan->drq = drq; | |
1078 | ||
1079 | return chan; | |
1080 | } | |
1081 | ||
6f9e40d4 AST |
1082 | static const struct of_device_id owl_dma_match[] = { |
1083 | { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,}, | |
1084 | { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,}, | |
1085 | { /* sentinel */ }, | |
1086 | }; | |
1087 | MODULE_DEVICE_TABLE(of, owl_dma_match); | |
1088 | ||
47e20577 MS |
1089 | static int owl_dma_probe(struct platform_device *pdev) |
1090 | { | |
1091 | struct device_node *np = pdev->dev.of_node; | |
1092 | struct owl_dma *od; | |
47e20577 MS |
1093 | int ret, i, nr_channels, nr_requests; |
1094 | ||
1095 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
1096 | if (!od) | |
1097 | return -ENOMEM; | |
1098 | ||
ecb4d34f | 1099 | od->base = devm_platform_ioremap_resource(pdev, 0); |
47e20577 MS |
1100 | if (IS_ERR(od->base)) |
1101 | return PTR_ERR(od->base); | |
1102 | ||
1103 | ret = of_property_read_u32(np, "dma-channels", &nr_channels); | |
1104 | if (ret) { | |
1105 | dev_err(&pdev->dev, "can't get dma-channels\n"); | |
1106 | return ret; | |
1107 | } | |
1108 | ||
1109 | ret = of_property_read_u32(np, "dma-requests", &nr_requests); | |
1110 | if (ret) { | |
1111 | dev_err(&pdev->dev, "can't get dma-requests\n"); | |
1112 | return ret; | |
1113 | } | |
1114 | ||
1115 | dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", | |
1116 | nr_channels, nr_requests); | |
1117 | ||
6f9e40d4 AST |
1118 | od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); |
1119 | ||
47e20577 MS |
1120 | od->nr_pchans = nr_channels; |
1121 | od->nr_vchans = nr_requests; | |
1122 | ||
1123 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | |
1124 | ||
1125 | platform_set_drvdata(pdev, od); | |
1126 | spin_lock_init(&od->lock); | |
1127 | ||
1128 | dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); | |
d64e1b3f MS |
1129 | dma_cap_set(DMA_SLAVE, od->dma.cap_mask); |
1130 | dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); | |
47e20577 MS |
1131 | |
1132 | od->dma.dev = &pdev->dev; | |
1133 | od->dma.device_free_chan_resources = owl_dma_free_chan_resources; | |
1134 | od->dma.device_tx_status = owl_dma_tx_status; | |
1135 | od->dma.device_issue_pending = owl_dma_issue_pending; | |
1136 | od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; | |
d64e1b3f MS |
1137 | od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; |
1138 | od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; | |
1139 | od->dma.device_config = owl_dma_config; | |
1140 | od->dma.device_pause = owl_dma_pause; | |
1141 | od->dma.device_resume = owl_dma_resume; | |
47e20577 MS |
1142 | od->dma.device_terminate_all = owl_dma_terminate_all; |
1143 | od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1144 | od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1145 | od->dma.directions = BIT(DMA_MEM_TO_MEM); | |
1146 | od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1147 | ||
1148 | INIT_LIST_HEAD(&od->dma.channels); | |
1149 | ||
1150 | od->clk = devm_clk_get(&pdev->dev, NULL); | |
1151 | if (IS_ERR(od->clk)) { | |
1152 | dev_err(&pdev->dev, "unable to get clock\n"); | |
1153 | return PTR_ERR(od->clk); | |
1154 | } | |
1155 | ||
1156 | /* | |
1157 | * Eventhough the DMA controller is capable of generating 4 | |
1158 | * IRQ's for DMA priority feature, we only use 1 IRQ for | |
1159 | * simplification. | |
1160 | */ | |
1161 | od->irq = platform_get_irq(pdev, 0); | |
1162 | ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, | |
1163 | dev_name(&pdev->dev), od); | |
1164 | if (ret) { | |
1165 | dev_err(&pdev->dev, "unable to request IRQ\n"); | |
1166 | return ret; | |
1167 | } | |
1168 | ||
1169 | /* Init physical channel */ | |
1170 | od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, | |
1171 | sizeof(struct owl_dma_pchan), GFP_KERNEL); | |
1172 | if (!od->pchans) | |
1173 | return -ENOMEM; | |
1174 | ||
1175 | for (i = 0; i < od->nr_pchans; i++) { | |
1176 | struct owl_dma_pchan *pchan = &od->pchans[i]; | |
1177 | ||
1178 | pchan->id = i; | |
1179 | pchan->base = od->base + OWL_DMA_CHAN_BASE(i); | |
1180 | } | |
1181 | ||
1182 | /* Init virtual channel */ | |
1183 | od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, | |
1184 | sizeof(struct owl_dma_vchan), GFP_KERNEL); | |
1185 | if (!od->vchans) | |
1186 | return -ENOMEM; | |
1187 | ||
1188 | for (i = 0; i < od->nr_vchans; i++) { | |
1189 | struct owl_dma_vchan *vchan = &od->vchans[i]; | |
1190 | ||
1191 | vchan->vc.desc_free = owl_dma_desc_free; | |
1192 | vchan_init(&vchan->vc, &od->dma); | |
1193 | } | |
1194 | ||
1195 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
1196 | od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, | |
1197 | sizeof(struct owl_dma_lli), | |
1198 | __alignof__(struct owl_dma_lli), | |
1199 | 0); | |
1200 | if (!od->lli_pool) { | |
1201 | dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); | |
1202 | return -ENOMEM; | |
1203 | } | |
1204 | ||
1205 | clk_prepare_enable(od->clk); | |
1206 | ||
1207 | ret = dma_async_device_register(&od->dma); | |
1208 | if (ret) { | |
1209 | dev_err(&pdev->dev, "failed to register DMA engine device\n"); | |
1210 | goto err_pool_free; | |
1211 | } | |
1212 | ||
d64e1b3f MS |
1213 | /* Device-tree DMA controller registration */ |
1214 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1215 | owl_dma_of_xlate, od); | |
1216 | if (ret) { | |
1217 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | |
1218 | goto err_dma_unregister; | |
1219 | } | |
1220 | ||
47e20577 MS |
1221 | return 0; |
1222 | ||
d64e1b3f MS |
1223 | err_dma_unregister: |
1224 | dma_async_device_unregister(&od->dma); | |
47e20577 MS |
1225 | err_pool_free: |
1226 | clk_disable_unprepare(od->clk); | |
1227 | dma_pool_destroy(od->lli_pool); | |
1228 | ||
1229 | return ret; | |
1230 | } | |
1231 | ||
1232 | static int owl_dma_remove(struct platform_device *pdev) | |
1233 | { | |
1234 | struct owl_dma *od = platform_get_drvdata(pdev); | |
1235 | ||
d64e1b3f | 1236 | of_dma_controller_free(pdev->dev.of_node); |
47e20577 MS |
1237 | dma_async_device_unregister(&od->dma); |
1238 | ||
1239 | /* Mask all interrupts for this execution environment */ | |
1240 | dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); | |
1241 | ||
1242 | /* Make sure we won't have any further interrupts */ | |
1243 | devm_free_irq(od->dma.dev, od->irq, od); | |
1244 | ||
1245 | owl_dma_free(od); | |
1246 | ||
1247 | clk_disable_unprepare(od->clk); | |
1248 | ||
1249 | return 0; | |
1250 | } | |
1251 | ||
47e20577 MS |
1252 | static struct platform_driver owl_dma_driver = { |
1253 | .probe = owl_dma_probe, | |
1254 | .remove = owl_dma_remove, | |
1255 | .driver = { | |
1256 | .name = "dma-owl", | |
1257 | .of_match_table = of_match_ptr(owl_dma_match), | |
1258 | }, | |
1259 | }; | |
1260 | ||
1261 | static int owl_dma_init(void) | |
1262 | { | |
1263 | return platform_driver_register(&owl_dma_driver); | |
1264 | } | |
1265 | subsys_initcall(owl_dma_init); | |
1266 | ||
1267 | static void __exit owl_dma_exit(void) | |
1268 | { | |
1269 | platform_driver_unregister(&owl_dma_driver); | |
1270 | } | |
1271 | module_exit(owl_dma_exit); | |
1272 | ||
1273 | MODULE_AUTHOR("David Liu <[email protected]>"); | |
1274 | MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>"); | |
1275 | MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); | |
1276 | MODULE_LICENSE("GPL"); |