]>
Commit | Line | Data |
---|---|---|
9d831528 AD |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Copyright (c) 2013-2014 Freescale Semiconductor, Inc | |
4 | // Copyright (c) 2017 Sysam, Angelo Dureghello <[email protected]> | |
5 | ||
6 | #include <linux/dmapool.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/slab.h> | |
0fa89f97 | 9 | #include <linux/dma-mapping.h> |
9d831528 AD |
10 | |
11 | #include "fsl-edma-common.h" | |
12 | ||
13 | #define EDMA_CR 0x00 | |
14 | #define EDMA_ES 0x04 | |
15 | #define EDMA_ERQ 0x0C | |
16 | #define EDMA_EEI 0x14 | |
17 | #define EDMA_SERQ 0x1B | |
18 | #define EDMA_CERQ 0x1A | |
19 | #define EDMA_SEEI 0x19 | |
20 | #define EDMA_CEEI 0x18 | |
21 | #define EDMA_CINT 0x1F | |
22 | #define EDMA_CERR 0x1E | |
23 | #define EDMA_SSRT 0x1D | |
24 | #define EDMA_CDNE 0x1C | |
25 | #define EDMA_INTR 0x24 | |
26 | #define EDMA_ERR 0x2C | |
27 | ||
28 | #define EDMA64_ERQH 0x08 | |
29 | #define EDMA64_EEIH 0x10 | |
30 | #define EDMA64_SERQ 0x18 | |
31 | #define EDMA64_CERQ 0x19 | |
32 | #define EDMA64_SEEI 0x1a | |
33 | #define EDMA64_CEEI 0x1b | |
34 | #define EDMA64_CINT 0x1c | |
35 | #define EDMA64_CERR 0x1d | |
36 | #define EDMA64_SSRT 0x1e | |
37 | #define EDMA64_CDNE 0x1f | |
38 | #define EDMA64_INTH 0x20 | |
39 | #define EDMA64_INTL 0x24 | |
40 | #define EDMA64_ERRH 0x28 | |
41 | #define EDMA64_ERRL 0x2c | |
42 | ||
43 | #define EDMA_TCD 0x1000 | |
44 | ||
45 | static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) | |
46 | { | |
377eaf3b | 47 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
48 | u32 ch = fsl_chan->vchan.chan.chan_id; |
49 | ||
af802728 | 50 | if (fsl_chan->edma->drvdata->version == v1) { |
e7a3ff92 AD |
51 | edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); |
52 | edma_writeb(fsl_chan->edma, ch, regs->serq); | |
53 | } else { | |
54 | /* ColdFire is big endian, and accesses natively | |
55 | * big endian I/O peripherals | |
56 | */ | |
57 | iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); | |
58 | iowrite8(ch, regs->serq); | |
59 | } | |
9d831528 AD |
60 | } |
61 | ||
62 | void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) | |
63 | { | |
377eaf3b | 64 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
65 | u32 ch = fsl_chan->vchan.chan.chan_id; |
66 | ||
af802728 | 67 | if (fsl_chan->edma->drvdata->version == v1) { |
e7a3ff92 AD |
68 | edma_writeb(fsl_chan->edma, ch, regs->cerq); |
69 | edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); | |
70 | } else { | |
71 | /* ColdFire is big endian, and accesses natively | |
72 | * big endian I/O peripherals | |
73 | */ | |
74 | iowrite8(ch, regs->cerq); | |
75 | iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); | |
76 | } | |
9d831528 AD |
77 | } |
78 | EXPORT_SYMBOL_GPL(fsl_edma_disable_request); | |
79 | ||
78690bf3 RG |
80 | static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
81 | u32 off, u32 slot, bool enable) | |
82 | { | |
83 | u8 val8; | |
84 | ||
85 | if (enable) | |
86 | val8 = EDMAMUX_CHCFG_ENBL | slot; | |
87 | else | |
88 | val8 = EDMAMUX_CHCFG_DIS; | |
89 | ||
90 | iowrite8(val8, addr + off); | |
91 | } | |
92 | ||
4f48e29f | 93 | static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
d071fd29 | 94 | u32 off, u32 slot, bool enable) |
232a7f18 RG |
95 | { |
96 | u32 val; | |
97 | ||
98 | if (enable) | |
99 | val = EDMAMUX_CHCFG_ENBL << 24 | slot; | |
100 | else | |
101 | val = EDMAMUX_CHCFG_DIS; | |
102 | ||
103 | iowrite32(val, addr + off * 4); | |
104 | } | |
105 | ||
9d831528 | 106 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, |
d071fd29 | 107 | unsigned int slot, bool enable) |
9d831528 AD |
108 | { |
109 | u32 ch = fsl_chan->vchan.chan.chan_id; | |
110 | void __iomem *muxaddr; | |
111 | unsigned int chans_per_mux, ch_off; | |
ed5a0ab4 | 112 | int endian_diff[4] = {3, 1, -1, -3}; |
af802728 | 113 | u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; |
9d831528 | 114 | |
af802728 | 115 | chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; |
9d831528 | 116 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; |
ed5a0ab4 PM |
117 | |
118 | if (fsl_chan->edma->drvdata->mux_swap) | |
119 | ch_off += endian_diff[ch_off % 4]; | |
120 | ||
9d831528 AD |
121 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; |
122 | slot = EDMAMUX_CHCFG_SOURCE(slot); | |
123 | ||
232a7f18 RG |
124 | if (fsl_chan->edma->drvdata->version == v3) |
125 | mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); | |
126 | else | |
127 | mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); | |
9d831528 AD |
128 | } |
129 | EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); | |
130 | ||
131 | static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) | |
132 | { | |
133 | switch (addr_width) { | |
134 | case 1: | |
135 | return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT; | |
136 | case 2: | |
137 | return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT; | |
138 | case 4: | |
139 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
140 | case 8: | |
141 | return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT; | |
142 | default: | |
143 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
144 | } | |
145 | } | |
146 | ||
147 | void fsl_edma_free_desc(struct virt_dma_desc *vdesc) | |
148 | { | |
149 | struct fsl_edma_desc *fsl_desc; | |
150 | int i; | |
151 | ||
152 | fsl_desc = to_fsl_edma_desc(vdesc); | |
153 | for (i = 0; i < fsl_desc->n_tcds; i++) | |
154 | dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
155 | fsl_desc->tcd[i].ptcd); | |
156 | kfree(fsl_desc); | |
157 | } | |
158 | EXPORT_SYMBOL_GPL(fsl_edma_free_desc); | |
159 | ||
160 | int fsl_edma_terminate_all(struct dma_chan *chan) | |
161 | { | |
162 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
163 | unsigned long flags; | |
164 | LIST_HEAD(head); | |
165 | ||
166 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
167 | fsl_edma_disable_request(fsl_chan); | |
168 | fsl_chan->edesc = NULL; | |
169 | fsl_chan->idle = true; | |
170 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
171 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
172 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
173 | return 0; | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(fsl_edma_terminate_all); | |
176 | ||
177 | int fsl_edma_pause(struct dma_chan *chan) | |
178 | { | |
179 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
180 | unsigned long flags; | |
181 | ||
182 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
183 | if (fsl_chan->edesc) { | |
184 | fsl_edma_disable_request(fsl_chan); | |
185 | fsl_chan->status = DMA_PAUSED; | |
186 | fsl_chan->idle = true; | |
187 | } | |
188 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
189 | return 0; | |
190 | } | |
191 | EXPORT_SYMBOL_GPL(fsl_edma_pause); | |
192 | ||
193 | int fsl_edma_resume(struct dma_chan *chan) | |
194 | { | |
195 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
196 | unsigned long flags; | |
197 | ||
198 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
199 | if (fsl_chan->edesc) { | |
200 | fsl_edma_enable_request(fsl_chan); | |
201 | fsl_chan->status = DMA_IN_PROGRESS; | |
202 | fsl_chan->idle = false; | |
203 | } | |
204 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
205 | return 0; | |
206 | } | |
207 | EXPORT_SYMBOL_GPL(fsl_edma_resume); | |
208 | ||
0fa89f97 LT |
209 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) |
210 | { | |
211 | if (fsl_chan->dma_dir != DMA_NONE) | |
212 | dma_unmap_resource(fsl_chan->vchan.chan.device->dev, | |
213 | fsl_chan->dma_dev_addr, | |
214 | fsl_chan->dma_dev_size, | |
215 | fsl_chan->dma_dir, 0); | |
216 | fsl_chan->dma_dir = DMA_NONE; | |
217 | } | |
218 | ||
219 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, | |
220 | enum dma_transfer_direction dir) | |
221 | { | |
222 | struct device *dev = fsl_chan->vchan.chan.device->dev; | |
223 | enum dma_data_direction dma_dir; | |
224 | phys_addr_t addr = 0; | |
225 | u32 size = 0; | |
226 | ||
227 | switch (dir) { | |
228 | case DMA_MEM_TO_DEV: | |
229 | dma_dir = DMA_FROM_DEVICE; | |
230 | addr = fsl_chan->cfg.dst_addr; | |
231 | size = fsl_chan->cfg.dst_maxburst; | |
232 | break; | |
233 | case DMA_DEV_TO_MEM: | |
234 | dma_dir = DMA_TO_DEVICE; | |
235 | addr = fsl_chan->cfg.src_addr; | |
236 | size = fsl_chan->cfg.src_maxburst; | |
237 | break; | |
238 | default: | |
239 | dma_dir = DMA_NONE; | |
240 | break; | |
241 | } | |
242 | ||
243 | /* Already mapped for this config? */ | |
244 | if (fsl_chan->dma_dir == dma_dir) | |
245 | return true; | |
246 | ||
247 | fsl_edma_unprep_slave_dma(fsl_chan); | |
248 | ||
249 | fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); | |
250 | if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) | |
251 | return false; | |
252 | fsl_chan->dma_dev_size = size; | |
253 | fsl_chan->dma_dir = dma_dir; | |
254 | ||
255 | return true; | |
256 | } | |
257 | ||
9d831528 AD |
258 | int fsl_edma_slave_config(struct dma_chan *chan, |
259 | struct dma_slave_config *cfg) | |
260 | { | |
261 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
262 | ||
0e819e35 | 263 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
0fa89f97 | 264 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
265 | |
266 | return 0; | |
267 | } | |
268 | EXPORT_SYMBOL_GPL(fsl_edma_slave_config); | |
269 | ||
270 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, | |
271 | struct virt_dma_desc *vdesc, bool in_progress) | |
272 | { | |
273 | struct fsl_edma_desc *edesc = fsl_chan->edesc; | |
377eaf3b | 274 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 | 275 | u32 ch = fsl_chan->vchan.chan.chan_id; |
0e819e35 | 276 | enum dma_transfer_direction dir = edesc->dirn; |
9d831528 AD |
277 | dma_addr_t cur_addr, dma_addr; |
278 | size_t len, size; | |
279 | int i; | |
280 | ||
281 | /* calculate the total size in this desc */ | |
282 | for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) | |
283 | len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
284 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
285 | ||
286 | if (!in_progress) | |
287 | return len; | |
288 | ||
289 | if (dir == DMA_MEM_TO_DEV) | |
377eaf3b | 290 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr); |
9d831528 | 291 | else |
377eaf3b | 292 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr); |
9d831528 AD |
293 | |
294 | /* figure out the finished and calculate the residue */ | |
295 | for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { | |
296 | size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
297 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
298 | if (dir == DMA_MEM_TO_DEV) | |
299 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); | |
300 | else | |
301 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); | |
302 | ||
303 | len -= size; | |
304 | if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { | |
305 | len += dma_addr + size - cur_addr; | |
306 | break; | |
307 | } | |
308 | } | |
309 | ||
310 | return len; | |
311 | } | |
312 | ||
313 | enum dma_status fsl_edma_tx_status(struct dma_chan *chan, | |
314 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
315 | { | |
316 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
317 | struct virt_dma_desc *vdesc; | |
318 | enum dma_status status; | |
319 | unsigned long flags; | |
320 | ||
321 | status = dma_cookie_status(chan, cookie, txstate); | |
322 | if (status == DMA_COMPLETE) | |
323 | return status; | |
324 | ||
325 | if (!txstate) | |
326 | return fsl_chan->status; | |
327 | ||
328 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
329 | vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); | |
330 | if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) | |
331 | txstate->residue = | |
332 | fsl_edma_desc_residue(fsl_chan, vdesc, true); | |
333 | else if (vdesc) | |
334 | txstate->residue = | |
335 | fsl_edma_desc_residue(fsl_chan, vdesc, false); | |
336 | else | |
337 | txstate->residue = 0; | |
338 | ||
339 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
340 | ||
341 | return fsl_chan->status; | |
342 | } | |
343 | EXPORT_SYMBOL_GPL(fsl_edma_tx_status); | |
344 | ||
345 | static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, | |
346 | struct fsl_edma_hw_tcd *tcd) | |
347 | { | |
348 | struct fsl_edma_engine *edma = fsl_chan->edma; | |
377eaf3b | 349 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
350 | u32 ch = fsl_chan->vchan.chan.chan_id; |
351 | ||
352 | /* | |
353 | * TCD parameters are stored in struct fsl_edma_hw_tcd in little | |
354 | * endian format. However, we need to load the TCD registers in | |
8678c71c AD |
355 | * big- or little-endian obeying the eDMA engine model endian, |
356 | * and this is performed from specific edma_write functions | |
9d831528 | 357 | */ |
377eaf3b | 358 | edma_writew(edma, 0, ®s->tcd[ch].csr); |
9d831528 | 359 | |
8678c71c AD |
360 | edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); |
361 | edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); | |
9d831528 | 362 | |
8678c71c AD |
363 | edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); |
364 | edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); | |
9d831528 | 365 | |
8678c71c AD |
366 | edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); |
367 | edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); | |
9d831528 | 368 | |
8678c71c AD |
369 | edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); |
370 | edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); | |
371 | edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); | |
372 | ||
373 | edma_writel(edma, (s32)tcd->dlast_sga, | |
377eaf3b | 374 | ®s->tcd[ch].dlast_sga); |
9d831528 | 375 | |
8678c71c | 376 | edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); |
9d831528 AD |
377 | } |
378 | ||
379 | static inline | |
380 | void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, | |
381 | u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, | |
382 | u16 biter, u16 doff, u32 dlast_sga, bool major_int, | |
383 | bool disable_req, bool enable_sg) | |
384 | { | |
385 | u16 csr = 0; | |
386 | ||
387 | /* | |
388 | * eDMA hardware SGs require the TCDs to be stored in little | |
389 | * endian format irrespective of the register endian model. | |
390 | * So we put the value in little endian in memory, waiting | |
391 | * for fsl_edma_set_tcd_regs doing the swap. | |
392 | */ | |
393 | tcd->saddr = cpu_to_le32(src); | |
394 | tcd->daddr = cpu_to_le32(dst); | |
395 | ||
396 | tcd->attr = cpu_to_le16(attr); | |
397 | ||
377eaf3b | 398 | tcd->soff = cpu_to_le16(soff); |
9d831528 | 399 | |
377eaf3b AD |
400 | tcd->nbytes = cpu_to_le32(nbytes); |
401 | tcd->slast = cpu_to_le32(slast); | |
9d831528 AD |
402 | |
403 | tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); | |
377eaf3b | 404 | tcd->doff = cpu_to_le16(doff); |
9d831528 | 405 | |
377eaf3b | 406 | tcd->dlast_sga = cpu_to_le32(dlast_sga); |
9d831528 AD |
407 | |
408 | tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); | |
409 | if (major_int) | |
410 | csr |= EDMA_TCD_CSR_INT_MAJOR; | |
411 | ||
412 | if (disable_req) | |
413 | csr |= EDMA_TCD_CSR_D_REQ; | |
414 | ||
415 | if (enable_sg) | |
416 | csr |= EDMA_TCD_CSR_E_SG; | |
417 | ||
418 | tcd->csr = cpu_to_le16(csr); | |
419 | } | |
420 | ||
421 | static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, | |
422 | int sg_len) | |
423 | { | |
424 | struct fsl_edma_desc *fsl_desc; | |
425 | int i; | |
426 | ||
de1fa4f6 | 427 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
9d831528 AD |
428 | if (!fsl_desc) |
429 | return NULL; | |
430 | ||
431 | fsl_desc->echan = fsl_chan; | |
432 | fsl_desc->n_tcds = sg_len; | |
433 | for (i = 0; i < sg_len; i++) { | |
434 | fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, | |
435 | GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); | |
436 | if (!fsl_desc->tcd[i].vtcd) | |
437 | goto err; | |
438 | } | |
439 | return fsl_desc; | |
440 | ||
441 | err: | |
442 | while (--i >= 0) | |
443 | dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
444 | fsl_desc->tcd[i].ptcd); | |
445 | kfree(fsl_desc); | |
446 | return NULL; | |
447 | } | |
448 | ||
449 | struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |
450 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
451 | size_t period_len, enum dma_transfer_direction direction, | |
452 | unsigned long flags) | |
453 | { | |
454 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
455 | struct fsl_edma_desc *fsl_desc; | |
456 | dma_addr_t dma_buf_next; | |
457 | int sg_len, i; | |
458 | u32 src_addr, dst_addr, last_sg, nbytes; | |
459 | u16 soff, doff, iter; | |
460 | ||
0e819e35 | 461 | if (!is_slave_direction(direction)) |
9d831528 AD |
462 | return NULL; |
463 | ||
0fa89f97 LT |
464 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
465 | return NULL; | |
466 | ||
9d831528 AD |
467 | sg_len = buf_len / period_len; |
468 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | |
469 | if (!fsl_desc) | |
470 | return NULL; | |
471 | fsl_desc->iscyclic = true; | |
0e819e35 | 472 | fsl_desc->dirn = direction; |
9d831528 AD |
473 | |
474 | dma_buf_next = dma_addr; | |
0e819e35 VK |
475 | if (direction == DMA_MEM_TO_DEV) { |
476 | fsl_chan->attr = | |
477 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
478 | nbytes = fsl_chan->cfg.dst_addr_width * | |
479 | fsl_chan->cfg.dst_maxburst; | |
480 | } else { | |
481 | fsl_chan->attr = | |
482 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
483 | nbytes = fsl_chan->cfg.src_addr_width * | |
484 | fsl_chan->cfg.src_maxburst; | |
485 | } | |
486 | ||
9d831528 AD |
487 | iter = period_len / nbytes; |
488 | ||
489 | for (i = 0; i < sg_len; i++) { | |
490 | if (dma_buf_next >= dma_addr + buf_len) | |
491 | dma_buf_next = dma_addr; | |
492 | ||
493 | /* get next sg's physical address */ | |
494 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; | |
495 | ||
0e819e35 | 496 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 497 | src_addr = dma_buf_next; |
0fa89f97 | 498 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 499 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
500 | doff = 0; |
501 | } else { | |
0fa89f97 | 502 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
503 | dst_addr = dma_buf_next; |
504 | soff = 0; | |
0e819e35 | 505 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
506 | } |
507 | ||
508 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, | |
0e819e35 | 509 | fsl_chan->attr, soff, nbytes, 0, iter, |
9d831528 AD |
510 | iter, doff, last_sg, true, false, true); |
511 | dma_buf_next += period_len; | |
512 | } | |
513 | ||
514 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
515 | } | |
516 | EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic); | |
517 | ||
518 | struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |
519 | struct dma_chan *chan, struct scatterlist *sgl, | |
520 | unsigned int sg_len, enum dma_transfer_direction direction, | |
521 | unsigned long flags, void *context) | |
522 | { | |
523 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
524 | struct fsl_edma_desc *fsl_desc; | |
525 | struct scatterlist *sg; | |
526 | u32 src_addr, dst_addr, last_sg, nbytes; | |
527 | u16 soff, doff, iter; | |
528 | int i; | |
529 | ||
0e819e35 | 530 | if (!is_slave_direction(direction)) |
9d831528 AD |
531 | return NULL; |
532 | ||
0fa89f97 LT |
533 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
534 | return NULL; | |
535 | ||
9d831528 AD |
536 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
537 | if (!fsl_desc) | |
538 | return NULL; | |
539 | fsl_desc->iscyclic = false; | |
0e819e35 VK |
540 | fsl_desc->dirn = direction; |
541 | ||
542 | if (direction == DMA_MEM_TO_DEV) { | |
543 | fsl_chan->attr = | |
544 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
545 | nbytes = fsl_chan->cfg.dst_addr_width * | |
546 | fsl_chan->cfg.dst_maxburst; | |
547 | } else { | |
548 | fsl_chan->attr = | |
549 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
550 | nbytes = fsl_chan->cfg.src_addr_width * | |
551 | fsl_chan->cfg.src_maxburst; | |
552 | } | |
9d831528 | 553 | |
9d831528 AD |
554 | for_each_sg(sgl, sg, sg_len, i) { |
555 | /* get next sg's physical address */ | |
556 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; | |
557 | ||
0e819e35 | 558 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 559 | src_addr = sg_dma_address(sg); |
0fa89f97 | 560 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 561 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
562 | doff = 0; |
563 | } else { | |
0fa89f97 | 564 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
565 | dst_addr = sg_dma_address(sg); |
566 | soff = 0; | |
0e819e35 | 567 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
568 | } |
569 | ||
570 | iter = sg_dma_len(sg) / nbytes; | |
571 | if (i < sg_len - 1) { | |
572 | last_sg = fsl_desc->tcd[(i + 1)].ptcd; | |
573 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 574 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
575 | nbytes, 0, iter, iter, doff, last_sg, |
576 | false, false, true); | |
577 | } else { | |
578 | last_sg = 0; | |
579 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 580 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
581 | nbytes, 0, iter, iter, doff, last_sg, |
582 | true, true, false); | |
583 | } | |
584 | } | |
585 | ||
586 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
587 | } | |
588 | EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg); | |
589 | ||
590 | void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) | |
591 | { | |
592 | struct virt_dma_desc *vdesc; | |
593 | ||
bfc1d5bf KK |
594 | lockdep_assert_held(&fsl_chan->vchan.lock); |
595 | ||
9d831528 AD |
596 | vdesc = vchan_next_desc(&fsl_chan->vchan); |
597 | if (!vdesc) | |
598 | return; | |
599 | fsl_chan->edesc = to_fsl_edma_desc(vdesc); | |
600 | fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); | |
601 | fsl_edma_enable_request(fsl_chan); | |
602 | fsl_chan->status = DMA_IN_PROGRESS; | |
603 | fsl_chan->idle = false; | |
604 | } | |
605 | EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc); | |
606 | ||
607 | void fsl_edma_issue_pending(struct dma_chan *chan) | |
608 | { | |
609 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
610 | unsigned long flags; | |
611 | ||
612 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
613 | ||
614 | if (unlikely(fsl_chan->pm_state != RUNNING)) { | |
615 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
616 | /* cannot submit due to suspend */ | |
617 | return; | |
618 | } | |
619 | ||
620 | if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) | |
621 | fsl_edma_xfer_desc(fsl_chan); | |
622 | ||
623 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
624 | } | |
625 | EXPORT_SYMBOL_GPL(fsl_edma_issue_pending); | |
626 | ||
627 | int fsl_edma_alloc_chan_resources(struct dma_chan *chan) | |
628 | { | |
629 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
630 | ||
631 | fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, | |
632 | sizeof(struct fsl_edma_hw_tcd), | |
633 | 32, 0); | |
634 | return 0; | |
635 | } | |
636 | EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources); | |
637 | ||
638 | void fsl_edma_free_chan_resources(struct dma_chan *chan) | |
639 | { | |
640 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
5b5b5aa5 | 641 | struct fsl_edma_engine *edma = fsl_chan->edma; |
9d831528 AD |
642 | unsigned long flags; |
643 | LIST_HEAD(head); | |
644 | ||
645 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
646 | fsl_edma_disable_request(fsl_chan); | |
5b5b5aa5 AD |
647 | if (edma->drvdata->dmamuxs) |
648 | fsl_edma_chan_mux(fsl_chan, 0, false); | |
9d831528 AD |
649 | fsl_chan->edesc = NULL; |
650 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
0fa89f97 | 651 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
652 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
653 | ||
654 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
655 | dma_pool_destroy(fsl_chan->tcd_pool); | |
656 | fsl_chan->tcd_pool = NULL; | |
657 | } | |
658 | EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources); | |
659 | ||
660 | void fsl_edma_cleanup_vchan(struct dma_device *dmadev) | |
661 | { | |
662 | struct fsl_edma_chan *chan, *_chan; | |
663 | ||
664 | list_for_each_entry_safe(chan, _chan, | |
665 | &dmadev->channels, vchan.chan.device_node) { | |
666 | list_del(&chan->vchan.chan.device_node); | |
667 | tasklet_kill(&chan->vchan.task); | |
668 | } | |
669 | } | |
670 | EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan); | |
671 | ||
377eaf3b AD |
672 | /* |
673 | * On the 32 channels Vybrid/mpc577x edma version (here called "v1"), | |
674 | * register offsets are different compared to ColdFire mcf5441x 64 channels | |
675 | * edma (here called "v2"). | |
676 | * | |
677 | * This function sets up register offsets as per proper declared version | |
678 | * so must be called in xxx_edma_probe() just after setting the | |
679 | * edma "version" and "membase" appropriately. | |
680 | */ | |
681 | void fsl_edma_setup_regs(struct fsl_edma_engine *edma) | |
682 | { | |
683 | edma->regs.cr = edma->membase + EDMA_CR; | |
684 | edma->regs.es = edma->membase + EDMA_ES; | |
685 | edma->regs.erql = edma->membase + EDMA_ERQ; | |
686 | edma->regs.eeil = edma->membase + EDMA_EEI; | |
687 | ||
b12650cc RG |
688 | edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? |
689 | EDMA64_SERQ : EDMA_SERQ); | |
690 | edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? | |
691 | EDMA64_CERQ : EDMA_CERQ); | |
692 | edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? | |
693 | EDMA64_SEEI : EDMA_SEEI); | |
694 | edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? | |
695 | EDMA64_CEEI : EDMA_CEEI); | |
696 | edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? | |
697 | EDMA64_CINT : EDMA_CINT); | |
698 | edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? | |
699 | EDMA64_CERR : EDMA_CERR); | |
700 | edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? | |
701 | EDMA64_SSRT : EDMA_SSRT); | |
702 | edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? | |
703 | EDMA64_CDNE : EDMA_CDNE); | |
704 | edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? | |
705 | EDMA64_INTL : EDMA_INTR); | |
706 | edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? | |
707 | EDMA64_ERRL : EDMA_ERR); | |
377eaf3b | 708 | |
af802728 | 709 | if (edma->drvdata->version == v2) { |
377eaf3b AD |
710 | edma->regs.erqh = edma->membase + EDMA64_ERQH; |
711 | edma->regs.eeih = edma->membase + EDMA64_EEIH; | |
712 | edma->regs.errh = edma->membase + EDMA64_ERRH; | |
713 | edma->regs.inth = edma->membase + EDMA64_INTH; | |
714 | } | |
715 | ||
716 | edma->regs.tcd = edma->membase + EDMA_TCD; | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(fsl_edma_setup_regs); | |
719 | ||
9d831528 | 720 | MODULE_LICENSE("GPL v2"); |