]>
Commit | Line | Data |
---|---|---|
9d831528 AD |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Copyright (c) 2013-2014 Freescale Semiconductor, Inc | |
4 | // Copyright (c) 2017 Sysam, Angelo Dureghello <[email protected]> | |
5 | ||
6 | #include <linux/dmapool.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/slab.h> | |
0fa89f97 | 9 | #include <linux/dma-mapping.h> |
9d831528 AD |
10 | |
11 | #include "fsl-edma-common.h" | |
12 | ||
13 | #define EDMA_CR 0x00 | |
14 | #define EDMA_ES 0x04 | |
15 | #define EDMA_ERQ 0x0C | |
16 | #define EDMA_EEI 0x14 | |
17 | #define EDMA_SERQ 0x1B | |
18 | #define EDMA_CERQ 0x1A | |
19 | #define EDMA_SEEI 0x19 | |
20 | #define EDMA_CEEI 0x18 | |
21 | #define EDMA_CINT 0x1F | |
22 | #define EDMA_CERR 0x1E | |
23 | #define EDMA_SSRT 0x1D | |
24 | #define EDMA_CDNE 0x1C | |
25 | #define EDMA_INTR 0x24 | |
26 | #define EDMA_ERR 0x2C | |
27 | ||
28 | #define EDMA64_ERQH 0x08 | |
29 | #define EDMA64_EEIH 0x10 | |
30 | #define EDMA64_SERQ 0x18 | |
31 | #define EDMA64_CERQ 0x19 | |
32 | #define EDMA64_SEEI 0x1a | |
33 | #define EDMA64_CEEI 0x1b | |
34 | #define EDMA64_CINT 0x1c | |
35 | #define EDMA64_CERR 0x1d | |
36 | #define EDMA64_SSRT 0x1e | |
37 | #define EDMA64_CDNE 0x1f | |
38 | #define EDMA64_INTH 0x20 | |
39 | #define EDMA64_INTL 0x24 | |
40 | #define EDMA64_ERRH 0x28 | |
41 | #define EDMA64_ERRL 0x2c | |
42 | ||
43 | #define EDMA_TCD 0x1000 | |
44 | ||
45 | static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) | |
46 | { | |
377eaf3b | 47 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
48 | u32 ch = fsl_chan->vchan.chan.chan_id; |
49 | ||
af802728 | 50 | if (fsl_chan->edma->drvdata->version == v1) { |
e7a3ff92 AD |
51 | edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); |
52 | edma_writeb(fsl_chan->edma, ch, regs->serq); | |
53 | } else { | |
54 | /* ColdFire is big endian, and accesses natively | |
55 | * big endian I/O peripherals | |
56 | */ | |
57 | iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); | |
58 | iowrite8(ch, regs->serq); | |
59 | } | |
9d831528 AD |
60 | } |
61 | ||
62 | void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) | |
63 | { | |
377eaf3b | 64 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 AD |
65 | u32 ch = fsl_chan->vchan.chan.chan_id; |
66 | ||
af802728 | 67 | if (fsl_chan->edma->drvdata->version == v1) { |
e7a3ff92 AD |
68 | edma_writeb(fsl_chan->edma, ch, regs->cerq); |
69 | edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); | |
70 | } else { | |
71 | /* ColdFire is big endian, and accesses natively | |
72 | * big endian I/O peripherals | |
73 | */ | |
74 | iowrite8(ch, regs->cerq); | |
75 | iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); | |
76 | } | |
9d831528 AD |
77 | } |
78 | EXPORT_SYMBOL_GPL(fsl_edma_disable_request); | |
79 | ||
78690bf3 RG |
80 | static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
81 | u32 off, u32 slot, bool enable) | |
82 | { | |
83 | u8 val8; | |
84 | ||
85 | if (enable) | |
86 | val8 = EDMAMUX_CHCFG_ENBL | slot; | |
87 | else | |
88 | val8 = EDMAMUX_CHCFG_DIS; | |
89 | ||
90 | iowrite8(val8, addr + off); | |
91 | } | |
92 | ||
4f48e29f | 93 | static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, |
d071fd29 | 94 | u32 off, u32 slot, bool enable) |
232a7f18 RG |
95 | { |
96 | u32 val; | |
97 | ||
98 | if (enable) | |
99 | val = EDMAMUX_CHCFG_ENBL << 24 | slot; | |
100 | else | |
101 | val = EDMAMUX_CHCFG_DIS; | |
102 | ||
103 | iowrite32(val, addr + off * 4); | |
104 | } | |
105 | ||
9d831528 | 106 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, |
d071fd29 | 107 | unsigned int slot, bool enable) |
9d831528 AD |
108 | { |
109 | u32 ch = fsl_chan->vchan.chan.chan_id; | |
110 | void __iomem *muxaddr; | |
111 | unsigned int chans_per_mux, ch_off; | |
ed5a0ab4 | 112 | int endian_diff[4] = {3, 1, -1, -3}; |
af802728 | 113 | u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; |
9d831528 | 114 | |
af802728 | 115 | chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; |
9d831528 | 116 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; |
ed5a0ab4 PM |
117 | |
118 | if (fsl_chan->edma->drvdata->mux_swap) | |
119 | ch_off += endian_diff[ch_off % 4]; | |
120 | ||
9d831528 AD |
121 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; |
122 | slot = EDMAMUX_CHCFG_SOURCE(slot); | |
123 | ||
232a7f18 RG |
124 | if (fsl_chan->edma->drvdata->version == v3) |
125 | mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); | |
126 | else | |
127 | mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); | |
9d831528 AD |
128 | } |
129 | EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); | |
130 | ||
131 | static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) | |
132 | { | |
133 | switch (addr_width) { | |
134 | case 1: | |
135 | return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT; | |
136 | case 2: | |
137 | return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT; | |
138 | case 4: | |
139 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
140 | case 8: | |
141 | return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT; | |
142 | default: | |
143 | return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; | |
144 | } | |
145 | } | |
146 | ||
147 | void fsl_edma_free_desc(struct virt_dma_desc *vdesc) | |
148 | { | |
149 | struct fsl_edma_desc *fsl_desc; | |
150 | int i; | |
151 | ||
152 | fsl_desc = to_fsl_edma_desc(vdesc); | |
153 | for (i = 0; i < fsl_desc->n_tcds; i++) | |
154 | dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
155 | fsl_desc->tcd[i].ptcd); | |
156 | kfree(fsl_desc); | |
157 | } | |
158 | EXPORT_SYMBOL_GPL(fsl_edma_free_desc); | |
159 | ||
160 | int fsl_edma_terminate_all(struct dma_chan *chan) | |
161 | { | |
162 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
163 | unsigned long flags; | |
164 | LIST_HEAD(head); | |
165 | ||
166 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
167 | fsl_edma_disable_request(fsl_chan); | |
168 | fsl_chan->edesc = NULL; | |
169 | fsl_chan->idle = true; | |
170 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
171 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
172 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
173 | return 0; | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(fsl_edma_terminate_all); | |
176 | ||
177 | int fsl_edma_pause(struct dma_chan *chan) | |
178 | { | |
179 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
180 | unsigned long flags; | |
181 | ||
182 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
183 | if (fsl_chan->edesc) { | |
184 | fsl_edma_disable_request(fsl_chan); | |
185 | fsl_chan->status = DMA_PAUSED; | |
186 | fsl_chan->idle = true; | |
187 | } | |
188 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
189 | return 0; | |
190 | } | |
191 | EXPORT_SYMBOL_GPL(fsl_edma_pause); | |
192 | ||
193 | int fsl_edma_resume(struct dma_chan *chan) | |
194 | { | |
195 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
196 | unsigned long flags; | |
197 | ||
198 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
199 | if (fsl_chan->edesc) { | |
200 | fsl_edma_enable_request(fsl_chan); | |
201 | fsl_chan->status = DMA_IN_PROGRESS; | |
202 | fsl_chan->idle = false; | |
203 | } | |
204 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
205 | return 0; | |
206 | } | |
207 | EXPORT_SYMBOL_GPL(fsl_edma_resume); | |
208 | ||
0fa89f97 LT |
209 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) |
210 | { | |
211 | if (fsl_chan->dma_dir != DMA_NONE) | |
212 | dma_unmap_resource(fsl_chan->vchan.chan.device->dev, | |
213 | fsl_chan->dma_dev_addr, | |
214 | fsl_chan->dma_dev_size, | |
215 | fsl_chan->dma_dir, 0); | |
216 | fsl_chan->dma_dir = DMA_NONE; | |
217 | } | |
218 | ||
219 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, | |
220 | enum dma_transfer_direction dir) | |
221 | { | |
222 | struct device *dev = fsl_chan->vchan.chan.device->dev; | |
223 | enum dma_data_direction dma_dir; | |
224 | phys_addr_t addr = 0; | |
225 | u32 size = 0; | |
226 | ||
227 | switch (dir) { | |
228 | case DMA_MEM_TO_DEV: | |
229 | dma_dir = DMA_FROM_DEVICE; | |
230 | addr = fsl_chan->cfg.dst_addr; | |
231 | size = fsl_chan->cfg.dst_maxburst; | |
232 | break; | |
233 | case DMA_DEV_TO_MEM: | |
234 | dma_dir = DMA_TO_DEVICE; | |
235 | addr = fsl_chan->cfg.src_addr; | |
236 | size = fsl_chan->cfg.src_maxburst; | |
237 | break; | |
238 | default: | |
239 | dma_dir = DMA_NONE; | |
240 | break; | |
241 | } | |
242 | ||
243 | /* Already mapped for this config? */ | |
244 | if (fsl_chan->dma_dir == dma_dir) | |
245 | return true; | |
246 | ||
247 | fsl_edma_unprep_slave_dma(fsl_chan); | |
248 | ||
249 | fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); | |
250 | if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) | |
251 | return false; | |
252 | fsl_chan->dma_dev_size = size; | |
253 | fsl_chan->dma_dir = dma_dir; | |
254 | ||
255 | return true; | |
256 | } | |
257 | ||
9d831528 AD |
258 | int fsl_edma_slave_config(struct dma_chan *chan, |
259 | struct dma_slave_config *cfg) | |
260 | { | |
261 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
262 | ||
0e819e35 | 263 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
0fa89f97 | 264 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
265 | |
266 | return 0; | |
267 | } | |
268 | EXPORT_SYMBOL_GPL(fsl_edma_slave_config); | |
269 | ||
270 | static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, | |
271 | struct virt_dma_desc *vdesc, bool in_progress) | |
272 | { | |
273 | struct fsl_edma_desc *edesc = fsl_chan->edesc; | |
377eaf3b | 274 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 | 275 | u32 ch = fsl_chan->vchan.chan.chan_id; |
0e819e35 | 276 | enum dma_transfer_direction dir = edesc->dirn; |
9d831528 AD |
277 | dma_addr_t cur_addr, dma_addr; |
278 | size_t len, size; | |
279 | int i; | |
280 | ||
281 | /* calculate the total size in this desc */ | |
282 | for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) | |
283 | len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
284 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
285 | ||
286 | if (!in_progress) | |
287 | return len; | |
288 | ||
289 | if (dir == DMA_MEM_TO_DEV) | |
377eaf3b | 290 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr); |
9d831528 | 291 | else |
377eaf3b | 292 | cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr); |
9d831528 AD |
293 | |
294 | /* figure out the finished and calculate the residue */ | |
295 | for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { | |
296 | size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) | |
297 | * le16_to_cpu(edesc->tcd[i].vtcd->biter); | |
298 | if (dir == DMA_MEM_TO_DEV) | |
299 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); | |
300 | else | |
301 | dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); | |
302 | ||
303 | len -= size; | |
304 | if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { | |
305 | len += dma_addr + size - cur_addr; | |
306 | break; | |
307 | } | |
308 | } | |
309 | ||
310 | return len; | |
311 | } | |
312 | ||
313 | enum dma_status fsl_edma_tx_status(struct dma_chan *chan, | |
314 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
315 | { | |
316 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
317 | struct virt_dma_desc *vdesc; | |
318 | enum dma_status status; | |
319 | unsigned long flags; | |
320 | ||
321 | status = dma_cookie_status(chan, cookie, txstate); | |
322 | if (status == DMA_COMPLETE) | |
323 | return status; | |
324 | ||
325 | if (!txstate) | |
326 | return fsl_chan->status; | |
327 | ||
328 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
329 | vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); | |
330 | if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) | |
331 | txstate->residue = | |
332 | fsl_edma_desc_residue(fsl_chan, vdesc, true); | |
333 | else if (vdesc) | |
334 | txstate->residue = | |
335 | fsl_edma_desc_residue(fsl_chan, vdesc, false); | |
336 | else | |
337 | txstate->residue = 0; | |
338 | ||
339 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
340 | ||
341 | return fsl_chan->status; | |
342 | } | |
343 | EXPORT_SYMBOL_GPL(fsl_edma_tx_status); | |
344 | ||
345 | static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, | |
346 | struct fsl_edma_hw_tcd *tcd) | |
347 | { | |
348 | struct fsl_edma_engine *edma = fsl_chan->edma; | |
377eaf3b | 349 | struct edma_regs *regs = &fsl_chan->edma->regs; |
9d831528 | 350 | u32 ch = fsl_chan->vchan.chan.chan_id; |
e0674853 | 351 | u16 csr = 0; |
9d831528 AD |
352 | |
353 | /* | |
354 | * TCD parameters are stored in struct fsl_edma_hw_tcd in little | |
355 | * endian format. However, we need to load the TCD registers in | |
8678c71c AD |
356 | * big- or little-endian obeying the eDMA engine model endian, |
357 | * and this is performed from specific edma_write functions | |
9d831528 | 358 | */ |
377eaf3b | 359 | edma_writew(edma, 0, ®s->tcd[ch].csr); |
9d831528 | 360 | |
8678c71c AD |
361 | edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); |
362 | edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); | |
9d831528 | 363 | |
8678c71c AD |
364 | edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); |
365 | edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); | |
9d831528 | 366 | |
8678c71c AD |
367 | edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); |
368 | edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); | |
9d831528 | 369 | |
8678c71c AD |
370 | edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); |
371 | edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); | |
372 | edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); | |
373 | ||
374 | edma_writel(edma, (s32)tcd->dlast_sga, | |
377eaf3b | 375 | ®s->tcd[ch].dlast_sga); |
9d831528 | 376 | |
e0674853 JZ |
377 | if (fsl_chan->is_sw) { |
378 | csr = le16_to_cpu(tcd->csr); | |
379 | csr |= EDMA_TCD_CSR_START; | |
380 | tcd->csr = cpu_to_le16(csr); | |
381 | } | |
382 | ||
8678c71c | 383 | edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); |
9d831528 AD |
384 | } |
385 | ||
386 | static inline | |
387 | void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, | |
388 | u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, | |
389 | u16 biter, u16 doff, u32 dlast_sga, bool major_int, | |
390 | bool disable_req, bool enable_sg) | |
391 | { | |
392 | u16 csr = 0; | |
393 | ||
394 | /* | |
395 | * eDMA hardware SGs require the TCDs to be stored in little | |
396 | * endian format irrespective of the register endian model. | |
397 | * So we put the value in little endian in memory, waiting | |
398 | * for fsl_edma_set_tcd_regs doing the swap. | |
399 | */ | |
400 | tcd->saddr = cpu_to_le32(src); | |
401 | tcd->daddr = cpu_to_le32(dst); | |
402 | ||
403 | tcd->attr = cpu_to_le16(attr); | |
404 | ||
377eaf3b | 405 | tcd->soff = cpu_to_le16(soff); |
9d831528 | 406 | |
377eaf3b AD |
407 | tcd->nbytes = cpu_to_le32(nbytes); |
408 | tcd->slast = cpu_to_le32(slast); | |
9d831528 AD |
409 | |
410 | tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); | |
377eaf3b | 411 | tcd->doff = cpu_to_le16(doff); |
9d831528 | 412 | |
377eaf3b | 413 | tcd->dlast_sga = cpu_to_le32(dlast_sga); |
9d831528 AD |
414 | |
415 | tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); | |
416 | if (major_int) | |
417 | csr |= EDMA_TCD_CSR_INT_MAJOR; | |
418 | ||
419 | if (disable_req) | |
420 | csr |= EDMA_TCD_CSR_D_REQ; | |
421 | ||
422 | if (enable_sg) | |
423 | csr |= EDMA_TCD_CSR_E_SG; | |
424 | ||
425 | tcd->csr = cpu_to_le16(csr); | |
426 | } | |
427 | ||
428 | static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, | |
429 | int sg_len) | |
430 | { | |
431 | struct fsl_edma_desc *fsl_desc; | |
432 | int i; | |
433 | ||
de1fa4f6 | 434 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
9d831528 AD |
435 | if (!fsl_desc) |
436 | return NULL; | |
437 | ||
438 | fsl_desc->echan = fsl_chan; | |
439 | fsl_desc->n_tcds = sg_len; | |
440 | for (i = 0; i < sg_len; i++) { | |
441 | fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, | |
442 | GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); | |
443 | if (!fsl_desc->tcd[i].vtcd) | |
444 | goto err; | |
445 | } | |
446 | return fsl_desc; | |
447 | ||
448 | err: | |
449 | while (--i >= 0) | |
450 | dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, | |
451 | fsl_desc->tcd[i].ptcd); | |
452 | kfree(fsl_desc); | |
453 | return NULL; | |
454 | } | |
455 | ||
456 | struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |
457 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
458 | size_t period_len, enum dma_transfer_direction direction, | |
459 | unsigned long flags) | |
460 | { | |
461 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
462 | struct fsl_edma_desc *fsl_desc; | |
463 | dma_addr_t dma_buf_next; | |
464 | int sg_len, i; | |
465 | u32 src_addr, dst_addr, last_sg, nbytes; | |
466 | u16 soff, doff, iter; | |
467 | ||
0e819e35 | 468 | if (!is_slave_direction(direction)) |
9d831528 AD |
469 | return NULL; |
470 | ||
0fa89f97 LT |
471 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
472 | return NULL; | |
473 | ||
9d831528 AD |
474 | sg_len = buf_len / period_len; |
475 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | |
476 | if (!fsl_desc) | |
477 | return NULL; | |
478 | fsl_desc->iscyclic = true; | |
0e819e35 | 479 | fsl_desc->dirn = direction; |
9d831528 AD |
480 | |
481 | dma_buf_next = dma_addr; | |
0e819e35 VK |
482 | if (direction == DMA_MEM_TO_DEV) { |
483 | fsl_chan->attr = | |
484 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
485 | nbytes = fsl_chan->cfg.dst_addr_width * | |
486 | fsl_chan->cfg.dst_maxburst; | |
487 | } else { | |
488 | fsl_chan->attr = | |
489 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
490 | nbytes = fsl_chan->cfg.src_addr_width * | |
491 | fsl_chan->cfg.src_maxburst; | |
492 | } | |
493 | ||
9d831528 AD |
494 | iter = period_len / nbytes; |
495 | ||
496 | for (i = 0; i < sg_len; i++) { | |
497 | if (dma_buf_next >= dma_addr + buf_len) | |
498 | dma_buf_next = dma_addr; | |
499 | ||
500 | /* get next sg's physical address */ | |
501 | last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; | |
502 | ||
0e819e35 | 503 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 504 | src_addr = dma_buf_next; |
0fa89f97 | 505 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 506 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
507 | doff = 0; |
508 | } else { | |
0fa89f97 | 509 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
510 | dst_addr = dma_buf_next; |
511 | soff = 0; | |
0e819e35 | 512 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
513 | } |
514 | ||
515 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, | |
0e819e35 | 516 | fsl_chan->attr, soff, nbytes, 0, iter, |
9d831528 AD |
517 | iter, doff, last_sg, true, false, true); |
518 | dma_buf_next += period_len; | |
519 | } | |
520 | ||
521 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
522 | } | |
523 | EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic); | |
524 | ||
525 | struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |
526 | struct dma_chan *chan, struct scatterlist *sgl, | |
527 | unsigned int sg_len, enum dma_transfer_direction direction, | |
528 | unsigned long flags, void *context) | |
529 | { | |
530 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
531 | struct fsl_edma_desc *fsl_desc; | |
532 | struct scatterlist *sg; | |
533 | u32 src_addr, dst_addr, last_sg, nbytes; | |
534 | u16 soff, doff, iter; | |
535 | int i; | |
536 | ||
0e819e35 | 537 | if (!is_slave_direction(direction)) |
9d831528 AD |
538 | return NULL; |
539 | ||
0fa89f97 LT |
540 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) |
541 | return NULL; | |
542 | ||
9d831528 AD |
543 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
544 | if (!fsl_desc) | |
545 | return NULL; | |
546 | fsl_desc->iscyclic = false; | |
0e819e35 VK |
547 | fsl_desc->dirn = direction; |
548 | ||
549 | if (direction == DMA_MEM_TO_DEV) { | |
550 | fsl_chan->attr = | |
551 | fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); | |
552 | nbytes = fsl_chan->cfg.dst_addr_width * | |
553 | fsl_chan->cfg.dst_maxburst; | |
554 | } else { | |
555 | fsl_chan->attr = | |
556 | fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); | |
557 | nbytes = fsl_chan->cfg.src_addr_width * | |
558 | fsl_chan->cfg.src_maxburst; | |
559 | } | |
9d831528 | 560 | |
9d831528 | 561 | for_each_sg(sgl, sg, sg_len, i) { |
0e819e35 | 562 | if (direction == DMA_MEM_TO_DEV) { |
9d831528 | 563 | src_addr = sg_dma_address(sg); |
0fa89f97 | 564 | dst_addr = fsl_chan->dma_dev_addr; |
0e819e35 | 565 | soff = fsl_chan->cfg.dst_addr_width; |
9d831528 AD |
566 | doff = 0; |
567 | } else { | |
0fa89f97 | 568 | src_addr = fsl_chan->dma_dev_addr; |
9d831528 AD |
569 | dst_addr = sg_dma_address(sg); |
570 | soff = 0; | |
0e819e35 | 571 | doff = fsl_chan->cfg.src_addr_width; |
9d831528 AD |
572 | } |
573 | ||
574 | iter = sg_dma_len(sg) / nbytes; | |
575 | if (i < sg_len - 1) { | |
576 | last_sg = fsl_desc->tcd[(i + 1)].ptcd; | |
577 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 578 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
579 | nbytes, 0, iter, iter, doff, last_sg, |
580 | false, false, true); | |
581 | } else { | |
582 | last_sg = 0; | |
583 | fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, | |
0e819e35 | 584 | dst_addr, fsl_chan->attr, soff, |
9d831528 AD |
585 | nbytes, 0, iter, iter, doff, last_sg, |
586 | true, true, false); | |
587 | } | |
588 | } | |
589 | ||
590 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
591 | } | |
592 | EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg); | |
593 | ||
e0674853 JZ |
594 | struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, |
595 | dma_addr_t dma_dst, dma_addr_t dma_src, | |
596 | size_t len, unsigned long flags) | |
597 | { | |
598 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
599 | struct fsl_edma_desc *fsl_desc; | |
600 | ||
601 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1); | |
602 | if (!fsl_desc) | |
603 | return NULL; | |
604 | fsl_desc->iscyclic = false; | |
605 | ||
606 | fsl_chan->is_sw = true; | |
607 | ||
608 | /* To match with copy_align and max_seg_size so 1 tcd is enough */ | |
609 | fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst, | |
610 | EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE, | |
611 | 32, len, 0, 1, 1, 32, 0, true, true, false); | |
612 | ||
613 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); | |
614 | } | |
615 | EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy); | |
616 | ||
9d831528 AD |
617 | void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) |
618 | { | |
619 | struct virt_dma_desc *vdesc; | |
620 | ||
bfc1d5bf KK |
621 | lockdep_assert_held(&fsl_chan->vchan.lock); |
622 | ||
9d831528 AD |
623 | vdesc = vchan_next_desc(&fsl_chan->vchan); |
624 | if (!vdesc) | |
625 | return; | |
626 | fsl_chan->edesc = to_fsl_edma_desc(vdesc); | |
627 | fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); | |
628 | fsl_edma_enable_request(fsl_chan); | |
629 | fsl_chan->status = DMA_IN_PROGRESS; | |
630 | fsl_chan->idle = false; | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc); | |
633 | ||
634 | void fsl_edma_issue_pending(struct dma_chan *chan) | |
635 | { | |
636 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
637 | unsigned long flags; | |
638 | ||
639 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
640 | ||
641 | if (unlikely(fsl_chan->pm_state != RUNNING)) { | |
642 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
643 | /* cannot submit due to suspend */ | |
644 | return; | |
645 | } | |
646 | ||
647 | if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) | |
648 | fsl_edma_xfer_desc(fsl_chan); | |
649 | ||
650 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
651 | } | |
652 | EXPORT_SYMBOL_GPL(fsl_edma_issue_pending); | |
653 | ||
654 | int fsl_edma_alloc_chan_resources(struct dma_chan *chan) | |
655 | { | |
656 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
657 | ||
658 | fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, | |
659 | sizeof(struct fsl_edma_hw_tcd), | |
660 | 32, 0); | |
661 | return 0; | |
662 | } | |
663 | EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources); | |
664 | ||
665 | void fsl_edma_free_chan_resources(struct dma_chan *chan) | |
666 | { | |
667 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
5b5b5aa5 | 668 | struct fsl_edma_engine *edma = fsl_chan->edma; |
9d831528 AD |
669 | unsigned long flags; |
670 | LIST_HEAD(head); | |
671 | ||
672 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
673 | fsl_edma_disable_request(fsl_chan); | |
5b5b5aa5 AD |
674 | if (edma->drvdata->dmamuxs) |
675 | fsl_edma_chan_mux(fsl_chan, 0, false); | |
9d831528 AD |
676 | fsl_chan->edesc = NULL; |
677 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | |
0fa89f97 | 678 | fsl_edma_unprep_slave_dma(fsl_chan); |
9d831528 AD |
679 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
680 | ||
681 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | |
682 | dma_pool_destroy(fsl_chan->tcd_pool); | |
683 | fsl_chan->tcd_pool = NULL; | |
e0674853 | 684 | fsl_chan->is_sw = false; |
9d831528 AD |
685 | } |
686 | EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources); | |
687 | ||
688 | void fsl_edma_cleanup_vchan(struct dma_device *dmadev) | |
689 | { | |
690 | struct fsl_edma_chan *chan, *_chan; | |
691 | ||
692 | list_for_each_entry_safe(chan, _chan, | |
693 | &dmadev->channels, vchan.chan.device_node) { | |
694 | list_del(&chan->vchan.chan.device_node); | |
695 | tasklet_kill(&chan->vchan.task); | |
696 | } | |
697 | } | |
698 | EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan); | |
699 | ||
377eaf3b AD |
700 | /* |
701 | * On the 32 channels Vybrid/mpc577x edma version (here called "v1"), | |
702 | * register offsets are different compared to ColdFire mcf5441x 64 channels | |
703 | * edma (here called "v2"). | |
704 | * | |
705 | * This function sets up register offsets as per proper declared version | |
706 | * so must be called in xxx_edma_probe() just after setting the | |
707 | * edma "version" and "membase" appropriately. | |
708 | */ | |
709 | void fsl_edma_setup_regs(struct fsl_edma_engine *edma) | |
710 | { | |
711 | edma->regs.cr = edma->membase + EDMA_CR; | |
712 | edma->regs.es = edma->membase + EDMA_ES; | |
713 | edma->regs.erql = edma->membase + EDMA_ERQ; | |
714 | edma->regs.eeil = edma->membase + EDMA_EEI; | |
715 | ||
b12650cc RG |
716 | edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? |
717 | EDMA64_SERQ : EDMA_SERQ); | |
718 | edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? | |
719 | EDMA64_CERQ : EDMA_CERQ); | |
720 | edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? | |
721 | EDMA64_SEEI : EDMA_SEEI); | |
722 | edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? | |
723 | EDMA64_CEEI : EDMA_CEEI); | |
724 | edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? | |
725 | EDMA64_CINT : EDMA_CINT); | |
726 | edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? | |
727 | EDMA64_CERR : EDMA_CERR); | |
728 | edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? | |
729 | EDMA64_SSRT : EDMA_SSRT); | |
730 | edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? | |
731 | EDMA64_CDNE : EDMA_CDNE); | |
732 | edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? | |
733 | EDMA64_INTL : EDMA_INTR); | |
734 | edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? | |
735 | EDMA64_ERRL : EDMA_ERR); | |
377eaf3b | 736 | |
af802728 | 737 | if (edma->drvdata->version == v2) { |
377eaf3b AD |
738 | edma->regs.erqh = edma->membase + EDMA64_ERQH; |
739 | edma->regs.eeih = edma->membase + EDMA64_EEIH; | |
740 | edma->regs.errh = edma->membase + EDMA64_ERRH; | |
741 | edma->regs.inth = edma->membase + EDMA64_INTH; | |
742 | } | |
743 | ||
744 | edma->regs.tcd = edma->membase + EDMA_TCD; | |
745 | } | |
746 | EXPORT_SYMBOL_GPL(fsl_edma_setup_regs); | |
747 | ||
9d831528 | 748 | MODULE_LICENSE("GPL v2"); |