]>
Commit | Line | Data |
---|---|---|
e3fa9841 JN |
1 | /* |
2 | * Copyright 2015 Linaro. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/sched.h> | |
9 | #include <linux/device.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/dmapool.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/of_device.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/clk.h> | |
23 | #include <linux/of_dma.h> | |
24 | ||
25 | #include "virt-dma.h" | |
26 | ||
27 | #define DRIVER_NAME "zx-dma" | |
28 | #define DMA_ALIGN 4 | |
067fdeb2 | 29 | #define DMA_MAX_SIZE (0x10000 - 512) |
e3fa9841 JN |
30 | #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) |
31 | ||
32 | #define REG_ZX_SRC_ADDR 0x00 | |
33 | #define REG_ZX_DST_ADDR 0x04 | |
34 | #define REG_ZX_TX_X_COUNT 0x08 | |
35 | #define REG_ZX_TX_ZY_COUNT 0x0c | |
36 | #define REG_ZX_SRC_ZY_STEP 0x10 | |
37 | #define REG_ZX_DST_ZY_STEP 0x14 | |
38 | #define REG_ZX_LLI_ADDR 0x1c | |
39 | #define REG_ZX_CTRL 0x20 | |
40 | #define REG_ZX_TC_IRQ 0x800 | |
41 | #define REG_ZX_SRC_ERR_IRQ 0x804 | |
42 | #define REG_ZX_DST_ERR_IRQ 0x808 | |
43 | #define REG_ZX_CFG_ERR_IRQ 0x80c | |
44 | #define REG_ZX_TC_IRQ_RAW 0x810 | |
45 | #define REG_ZX_SRC_ERR_IRQ_RAW 0x814 | |
46 | #define REG_ZX_DST_ERR_IRQ_RAW 0x818 | |
47 | #define REG_ZX_CFG_ERR_IRQ_RAW 0x81c | |
48 | #define REG_ZX_STATUS 0x820 | |
49 | #define REG_ZX_DMA_GRP_PRIO 0x824 | |
50 | #define REG_ZX_DMA_ARB 0x828 | |
51 | ||
52 | #define ZX_FORCE_CLOSE BIT(31) | |
53 | #define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13) | |
54 | #define ZX_MAX_BURST_LEN 16 | |
55 | #define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9) | |
56 | #define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6) | |
57 | #define ZX_IRQ_ENABLE_ALL (3 << 4) | |
58 | #define ZX_DST_FIFO_MODE BIT(3) | |
59 | #define ZX_SRC_FIFO_MODE BIT(2) | |
60 | #define ZX_SOFT_REQ BIT(1) | |
61 | #define ZX_CH_ENABLE BIT(0) | |
62 | ||
63 | #define ZX_DMA_BUSWIDTHS \ | |
64 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
65 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
66 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
67 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
68 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
69 | ||
70 | enum zx_dma_burst_width { | |
71 | ZX_DMA_WIDTH_8BIT = 0, | |
72 | ZX_DMA_WIDTH_16BIT = 1, | |
73 | ZX_DMA_WIDTH_32BIT = 2, | |
74 | ZX_DMA_WIDTH_64BIT = 3, | |
75 | }; | |
76 | ||
77 | struct zx_desc_hw { | |
78 | u32 saddr; | |
79 | u32 daddr; | |
80 | u32 src_x; | |
81 | u32 src_zy; | |
82 | u32 src_zy_step; | |
83 | u32 dst_zy_step; | |
84 | u32 reserved1; | |
85 | u32 lli; | |
86 | u32 ctr; | |
87 | u32 reserved[7]; /* pack as hardware registers region size */ | |
88 | } __aligned(32); | |
89 | ||
90 | struct zx_dma_desc_sw { | |
91 | struct virt_dma_desc vd; | |
92 | dma_addr_t desc_hw_lli; | |
93 | size_t desc_num; | |
94 | size_t size; | |
95 | struct zx_desc_hw *desc_hw; | |
96 | }; | |
97 | ||
98 | struct zx_dma_phy; | |
99 | ||
100 | struct zx_dma_chan { | |
101 | struct dma_slave_config slave_cfg; | |
102 | int id; /* Request phy chan id */ | |
103 | u32 ccfg; | |
2f2560e3 | 104 | u32 cyclic; |
e3fa9841 JN |
105 | struct virt_dma_chan vc; |
106 | struct zx_dma_phy *phy; | |
107 | struct list_head node; | |
108 | dma_addr_t dev_addr; | |
109 | enum dma_status status; | |
110 | }; | |
111 | ||
112 | struct zx_dma_phy { | |
113 | u32 idx; | |
114 | void __iomem *base; | |
115 | struct zx_dma_chan *vchan; | |
116 | struct zx_dma_desc_sw *ds_run; | |
117 | struct zx_dma_desc_sw *ds_done; | |
118 | }; | |
119 | ||
120 | struct zx_dma_dev { | |
121 | struct dma_device slave; | |
122 | void __iomem *base; | |
123 | spinlock_t lock; /* lock for ch and phy */ | |
124 | struct list_head chan_pending; | |
125 | struct zx_dma_phy *phy; | |
126 | struct zx_dma_chan *chans; | |
127 | struct clk *clk; | |
128 | struct dma_pool *pool; | |
129 | u32 dma_channels; | |
130 | u32 dma_requests; | |
9bde2823 | 131 | int irq; |
e3fa9841 JN |
132 | }; |
133 | ||
134 | #define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave) | |
135 | ||
136 | static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan) | |
137 | { | |
138 | return container_of(chan, struct zx_dma_chan, vc.chan); | |
139 | } | |
140 | ||
141 | static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d) | |
142 | { | |
143 | u32 val = 0; | |
144 | ||
145 | val = readl_relaxed(phy->base + REG_ZX_CTRL); | |
146 | val &= ~ZX_CH_ENABLE; | |
ed9c87b3 | 147 | val |= ZX_FORCE_CLOSE; |
e3fa9841 JN |
148 | writel_relaxed(val, phy->base + REG_ZX_CTRL); |
149 | ||
150 | val = 0x1 << phy->idx; | |
151 | writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); | |
152 | writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
153 | writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
154 | writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
155 | } | |
156 | ||
157 | static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw) | |
158 | { | |
159 | writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); | |
160 | writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); | |
161 | writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); | |
162 | writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); | |
163 | writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); | |
164 | writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); | |
165 | writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); | |
166 | writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); | |
167 | } | |
168 | ||
169 | static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy) | |
170 | { | |
171 | return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); | |
172 | } | |
173 | ||
174 | static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d) | |
175 | { | |
176 | return readl_relaxed(d->base + REG_ZX_STATUS); | |
177 | } | |
178 | ||
179 | static void zx_dma_init_state(struct zx_dma_dev *d) | |
180 | { | |
181 | /* set same priority */ | |
182 | writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); | |
183 | /* clear all irq */ | |
184 | writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); | |
185 | writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
186 | writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
187 | writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
188 | } | |
189 | ||
190 | static int zx_dma_start_txd(struct zx_dma_chan *c) | |
191 | { | |
192 | struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); | |
193 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | |
194 | ||
195 | if (!c->phy) | |
196 | return -EAGAIN; | |
197 | ||
198 | if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) | |
199 | return -EAGAIN; | |
200 | ||
201 | if (vd) { | |
202 | struct zx_dma_desc_sw *ds = | |
203 | container_of(vd, struct zx_dma_desc_sw, vd); | |
204 | /* | |
205 | * fetch and remove request from vc->desc_issued | |
206 | * so vc->desc_issued only contains desc pending | |
207 | */ | |
208 | list_del(&ds->vd.node); | |
209 | c->phy->ds_run = ds; | |
210 | c->phy->ds_done = NULL; | |
211 | /* start dma */ | |
212 | zx_dma_set_desc(c->phy, ds->desc_hw); | |
213 | return 0; | |
214 | } | |
215 | c->phy->ds_done = NULL; | |
216 | c->phy->ds_run = NULL; | |
217 | return -EAGAIN; | |
218 | } | |
219 | ||
220 | static void zx_dma_task(struct zx_dma_dev *d) | |
221 | { | |
222 | struct zx_dma_phy *p; | |
223 | struct zx_dma_chan *c, *cn; | |
224 | unsigned pch, pch_alloc = 0; | |
225 | unsigned long flags; | |
226 | ||
227 | /* check new dma request of running channel in vc->desc_issued */ | |
228 | list_for_each_entry_safe(c, cn, &d->slave.channels, | |
229 | vc.chan.device_node) { | |
230 | spin_lock_irqsave(&c->vc.lock, flags); | |
231 | p = c->phy; | |
232 | if (p && p->ds_done && zx_dma_start_txd(c)) { | |
233 | /* No current txd associated with this channel */ | |
234 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); | |
235 | /* Mark this channel free */ | |
236 | c->phy = NULL; | |
237 | p->vchan = NULL; | |
238 | } | |
239 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
240 | } | |
241 | ||
242 | /* check new channel request in d->chan_pending */ | |
243 | spin_lock_irqsave(&d->lock, flags); | |
244 | while (!list_empty(&d->chan_pending)) { | |
245 | c = list_first_entry(&d->chan_pending, | |
246 | struct zx_dma_chan, node); | |
247 | p = &d->phy[c->id]; | |
248 | if (!p->vchan) { | |
249 | /* remove from d->chan_pending */ | |
250 | list_del_init(&c->node); | |
251 | pch_alloc |= 1 << c->id; | |
252 | /* Mark this channel allocated */ | |
253 | p->vchan = c; | |
254 | c->phy = p; | |
255 | } else { | |
256 | dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); | |
257 | } | |
258 | } | |
259 | spin_unlock_irqrestore(&d->lock, flags); | |
260 | ||
261 | for (pch = 0; pch < d->dma_channels; pch++) { | |
262 | if (pch_alloc & (1 << pch)) { | |
263 | p = &d->phy[pch]; | |
264 | c = p->vchan; | |
265 | if (c) { | |
266 | spin_lock_irqsave(&c->vc.lock, flags); | |
267 | zx_dma_start_txd(c); | |
268 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
269 | } | |
270 | } | |
271 | } | |
272 | } | |
273 | ||
274 | static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) | |
275 | { | |
276 | struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id; | |
277 | struct zx_dma_phy *p; | |
278 | struct zx_dma_chan *c; | |
279 | u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); | |
280 | u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); | |
281 | u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); | |
282 | u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); | |
2f2560e3 | 283 | u32 i, irq_chan = 0, task = 0; |
e3fa9841 JN |
284 | |
285 | while (tc) { | |
286 | i = __ffs(tc); | |
287 | tc &= ~BIT(i); | |
288 | p = &d->phy[i]; | |
289 | c = p->vchan; | |
290 | if (c) { | |
291 | unsigned long flags; | |
292 | ||
293 | spin_lock_irqsave(&c->vc.lock, flags); | |
2f2560e3 JN |
294 | if (c->cyclic) { |
295 | vchan_cyclic_callback(&p->ds_run->vd); | |
296 | } else { | |
297 | vchan_cookie_complete(&p->ds_run->vd); | |
298 | p->ds_done = p->ds_run; | |
299 | task = 1; | |
300 | } | |
e3fa9841 | 301 | spin_unlock_irqrestore(&c->vc.lock, flags); |
2f2560e3 | 302 | irq_chan |= BIT(i); |
e3fa9841 | 303 | } |
e3fa9841 JN |
304 | } |
305 | ||
306 | if (serr || derr || cfg) | |
307 | dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", | |
308 | serr, derr, cfg); | |
309 | ||
310 | writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); | |
311 | writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
312 | writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
313 | writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
314 | ||
2f2560e3 | 315 | if (task) |
e3fa9841 | 316 | zx_dma_task(d); |
2f2560e3 | 317 | return IRQ_HANDLED; |
e3fa9841 JN |
318 | } |
319 | ||
320 | static void zx_dma_free_chan_resources(struct dma_chan *chan) | |
321 | { | |
322 | struct zx_dma_chan *c = to_zx_chan(chan); | |
323 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
324 | unsigned long flags; | |
325 | ||
326 | spin_lock_irqsave(&d->lock, flags); | |
327 | list_del_init(&c->node); | |
328 | spin_unlock_irqrestore(&d->lock, flags); | |
329 | ||
330 | vchan_free_chan_resources(&c->vc); | |
331 | c->ccfg = 0; | |
332 | } | |
333 | ||
334 | static enum dma_status zx_dma_tx_status(struct dma_chan *chan, | |
335 | dma_cookie_t cookie, | |
336 | struct dma_tx_state *state) | |
337 | { | |
338 | struct zx_dma_chan *c = to_zx_chan(chan); | |
339 | struct zx_dma_phy *p; | |
340 | struct virt_dma_desc *vd; | |
341 | unsigned long flags; | |
342 | enum dma_status ret; | |
343 | size_t bytes = 0; | |
344 | ||
345 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | |
346 | if (ret == DMA_COMPLETE || !state) | |
347 | return ret; | |
348 | ||
349 | spin_lock_irqsave(&c->vc.lock, flags); | |
350 | p = c->phy; | |
351 | ret = c->status; | |
352 | ||
353 | /* | |
354 | * If the cookie is on our issue queue, then the residue is | |
355 | * its total size. | |
356 | */ | |
357 | vd = vchan_find_desc(&c->vc, cookie); | |
358 | if (vd) { | |
359 | bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; | |
360 | } else if ((!p) || (!p->ds_run)) { | |
361 | bytes = 0; | |
362 | } else { | |
363 | struct zx_dma_desc_sw *ds = p->ds_run; | |
364 | u32 clli = 0, index = 0; | |
365 | ||
366 | bytes = 0; | |
367 | clli = zx_dma_get_curr_lli(p); | |
156ae092 SG |
368 | index = (clli - ds->desc_hw_lli) / |
369 | sizeof(struct zx_desc_hw) + 1; | |
e3fa9841 JN |
370 | for (; index < ds->desc_num; index++) { |
371 | bytes += ds->desc_hw[index].src_x; | |
372 | /* end of lli */ | |
373 | if (!ds->desc_hw[index].lli) | |
374 | break; | |
375 | } | |
376 | } | |
377 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
378 | dma_set_residue(state, bytes); | |
379 | return ret; | |
380 | } | |
381 | ||
382 | static void zx_dma_issue_pending(struct dma_chan *chan) | |
383 | { | |
384 | struct zx_dma_chan *c = to_zx_chan(chan); | |
385 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
386 | unsigned long flags; | |
387 | int issue = 0; | |
388 | ||
389 | spin_lock_irqsave(&c->vc.lock, flags); | |
390 | /* add request to vc->desc_issued */ | |
391 | if (vchan_issue_pending(&c->vc)) { | |
392 | spin_lock(&d->lock); | |
393 | if (!c->phy && list_empty(&c->node)) { | |
394 | /* if new channel, add chan_pending */ | |
395 | list_add_tail(&c->node, &d->chan_pending); | |
396 | issue = 1; | |
397 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); | |
398 | } | |
399 | spin_unlock(&d->lock); | |
400 | } else { | |
401 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); | |
402 | } | |
403 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
404 | ||
405 | if (issue) | |
406 | zx_dma_task(d); | |
407 | } | |
408 | ||
409 | static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst, | |
410 | dma_addr_t src, size_t len, u32 num, u32 ccfg) | |
411 | { | |
412 | if ((num + 1) < ds->desc_num) | |
413 | ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * | |
414 | sizeof(struct zx_desc_hw); | |
415 | ds->desc_hw[num].saddr = src; | |
416 | ds->desc_hw[num].daddr = dst; | |
417 | ds->desc_hw[num].src_x = len; | |
418 | ds->desc_hw[num].ctr = ccfg; | |
419 | } | |
420 | ||
421 | static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, | |
422 | struct dma_chan *chan) | |
423 | { | |
424 | struct zx_dma_chan *c = to_zx_chan(chan); | |
425 | struct zx_dma_desc_sw *ds; | |
426 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
427 | int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw); | |
428 | ||
429 | if (num > lli_limit) { | |
430 | dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", | |
431 | &c->vc, num, lli_limit); | |
432 | return NULL; | |
433 | } | |
434 | ||
435 | ds = kzalloc(sizeof(*ds), GFP_ATOMIC); | |
436 | if (!ds) | |
437 | return NULL; | |
438 | ||
c2e60fc7 | 439 | ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); |
e3fa9841 JN |
440 | if (!ds->desc_hw) { |
441 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); | |
442 | kfree(ds); | |
443 | return NULL; | |
444 | } | |
e3fa9841 JN |
445 | ds->desc_num = num; |
446 | return ds; | |
447 | } | |
448 | ||
449 | static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width) | |
450 | { | |
451 | switch (width) { | |
452 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
453 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
454 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
455 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
456 | return ffs(width) - 1; | |
457 | default: | |
458 | return ZX_DMA_WIDTH_32BIT; | |
459 | } | |
460 | } | |
461 | ||
462 | static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir) | |
463 | { | |
464 | struct dma_slave_config *cfg = &c->slave_cfg; | |
465 | enum zx_dma_burst_width src_width; | |
466 | enum zx_dma_burst_width dst_width; | |
467 | u32 maxburst = 0; | |
468 | ||
469 | switch (dir) { | |
470 | case DMA_MEM_TO_MEM: | |
471 | c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ | |
472 | | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) | |
473 | | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT) | |
474 | | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT); | |
475 | break; | |
476 | case DMA_MEM_TO_DEV: | |
477 | c->dev_addr = cfg->dst_addr; | |
478 | /* dst len is calculated from src width, len and dst width. | |
479 | * We need make sure dst len not exceed MAX LEN. | |
2092539b JN |
480 | * Trailing single transaction that does not fill a full |
481 | * burst also require identical src/dst data width. | |
e3fa9841 JN |
482 | */ |
483 | dst_width = zx_dma_burst_width(cfg->dst_addr_width); | |
2092539b | 484 | maxburst = cfg->dst_maxburst; |
e3fa9841 JN |
485 | maxburst = maxburst < ZX_MAX_BURST_LEN ? |
486 | maxburst : ZX_MAX_BURST_LEN; | |
487 | c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE | |
488 | | ZX_SRC_BURST_LEN(maxburst - 1) | |
2092539b | 489 | | ZX_SRC_BURST_WIDTH(dst_width) |
e3fa9841 JN |
490 | | ZX_DST_BURST_WIDTH(dst_width); |
491 | break; | |
492 | case DMA_DEV_TO_MEM: | |
493 | c->dev_addr = cfg->src_addr; | |
494 | src_width = zx_dma_burst_width(cfg->src_addr_width); | |
495 | maxburst = cfg->src_maxburst; | |
496 | maxburst = maxburst < ZX_MAX_BURST_LEN ? | |
497 | maxburst : ZX_MAX_BURST_LEN; | |
498 | c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE | |
499 | | ZX_SRC_BURST_LEN(maxburst - 1) | |
500 | | ZX_SRC_BURST_WIDTH(src_width) | |
2092539b | 501 | | ZX_DST_BURST_WIDTH(src_width); |
e3fa9841 JN |
502 | break; |
503 | default: | |
504 | return -EINVAL; | |
505 | } | |
506 | return 0; | |
507 | } | |
508 | ||
509 | static struct dma_async_tx_descriptor *zx_dma_prep_memcpy( | |
510 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |
511 | size_t len, unsigned long flags) | |
512 | { | |
513 | struct zx_dma_chan *c = to_zx_chan(chan); | |
514 | struct zx_dma_desc_sw *ds; | |
515 | size_t copy = 0; | |
516 | int num = 0; | |
517 | ||
518 | if (!len) | |
519 | return NULL; | |
520 | ||
521 | if (zx_pre_config(c, DMA_MEM_TO_MEM)) | |
522 | return NULL; | |
523 | ||
524 | num = DIV_ROUND_UP(len, DMA_MAX_SIZE); | |
525 | ||
526 | ds = zx_alloc_desc_resource(num, chan); | |
527 | if (!ds) | |
528 | return NULL; | |
529 | ||
530 | ds->size = len; | |
531 | num = 0; | |
532 | ||
533 | do { | |
534 | copy = min_t(size_t, len, DMA_MAX_SIZE); | |
535 | zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); | |
536 | ||
537 | src += copy; | |
538 | dst += copy; | |
539 | len -= copy; | |
540 | } while (len); | |
541 | ||
2f2560e3 | 542 | c->cyclic = 0; |
e3fa9841 JN |
543 | ds->desc_hw[num - 1].lli = 0; /* end of link */ |
544 | ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; | |
545 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
546 | } | |
547 | ||
548 | static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( | |
549 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, | |
550 | enum dma_transfer_direction dir, unsigned long flags, void *context) | |
551 | { | |
552 | struct zx_dma_chan *c = to_zx_chan(chan); | |
553 | struct zx_dma_desc_sw *ds; | |
554 | size_t len, avail, total = 0; | |
555 | struct scatterlist *sg; | |
556 | dma_addr_t addr, src = 0, dst = 0; | |
557 | int num = sglen, i; | |
558 | ||
559 | if (!sgl) | |
560 | return NULL; | |
561 | ||
562 | if (zx_pre_config(c, dir)) | |
563 | return NULL; | |
564 | ||
565 | for_each_sg(sgl, sg, sglen, i) { | |
566 | avail = sg_dma_len(sg); | |
567 | if (avail > DMA_MAX_SIZE) | |
568 | num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; | |
569 | } | |
570 | ||
571 | ds = zx_alloc_desc_resource(num, chan); | |
572 | if (!ds) | |
573 | return NULL; | |
574 | ||
2f2560e3 | 575 | c->cyclic = 0; |
e3fa9841 JN |
576 | num = 0; |
577 | for_each_sg(sgl, sg, sglen, i) { | |
578 | addr = sg_dma_address(sg); | |
579 | avail = sg_dma_len(sg); | |
580 | total += avail; | |
581 | ||
582 | do { | |
583 | len = min_t(size_t, avail, DMA_MAX_SIZE); | |
584 | ||
585 | if (dir == DMA_MEM_TO_DEV) { | |
586 | src = addr; | |
587 | dst = c->dev_addr; | |
588 | } else if (dir == DMA_DEV_TO_MEM) { | |
589 | src = c->dev_addr; | |
590 | dst = addr; | |
591 | } | |
592 | ||
593 | zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); | |
594 | ||
595 | addr += len; | |
596 | avail -= len; | |
597 | } while (avail); | |
598 | } | |
599 | ||
600 | ds->desc_hw[num - 1].lli = 0; /* end of link */ | |
601 | ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; | |
602 | ds->size = total; | |
603 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
604 | } | |
605 | ||
2f2560e3 JN |
606 | static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic( |
607 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
608 | size_t period_len, enum dma_transfer_direction dir, | |
609 | unsigned long flags) | |
610 | { | |
611 | struct zx_dma_chan *c = to_zx_chan(chan); | |
612 | struct zx_dma_desc_sw *ds; | |
613 | dma_addr_t src = 0, dst = 0; | |
614 | int num_periods = buf_len / period_len; | |
615 | int buf = 0, num = 0; | |
616 | ||
617 | if (period_len > DMA_MAX_SIZE) { | |
618 | dev_err(chan->device->dev, "maximum period size exceeded\n"); | |
619 | return NULL; | |
620 | } | |
621 | ||
622 | if (zx_pre_config(c, dir)) | |
623 | return NULL; | |
624 | ||
625 | ds = zx_alloc_desc_resource(num_periods, chan); | |
626 | if (!ds) | |
627 | return NULL; | |
628 | c->cyclic = 1; | |
629 | ||
630 | while (buf < buf_len) { | |
631 | if (dir == DMA_MEM_TO_DEV) { | |
632 | src = dma_addr; | |
633 | dst = c->dev_addr; | |
634 | } else if (dir == DMA_DEV_TO_MEM) { | |
635 | src = c->dev_addr; | |
636 | dst = dma_addr; | |
637 | } | |
638 | zx_dma_fill_desc(ds, dst, src, period_len, num++, | |
639 | c->ccfg | ZX_IRQ_ENABLE_ALL); | |
640 | dma_addr += period_len; | |
641 | buf += period_len; | |
642 | } | |
643 | ||
644 | ds->desc_hw[num - 1].lli = ds->desc_hw_lli; | |
645 | ds->size = buf_len; | |
646 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
647 | } | |
648 | ||
e3fa9841 JN |
649 | static int zx_dma_config(struct dma_chan *chan, |
650 | struct dma_slave_config *cfg) | |
651 | { | |
652 | struct zx_dma_chan *c = to_zx_chan(chan); | |
653 | ||
654 | if (!cfg) | |
655 | return -EINVAL; | |
656 | ||
657 | memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
662 | static int zx_dma_terminate_all(struct dma_chan *chan) | |
663 | { | |
664 | struct zx_dma_chan *c = to_zx_chan(chan); | |
665 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
666 | struct zx_dma_phy *p = c->phy; | |
667 | unsigned long flags; | |
668 | LIST_HEAD(head); | |
669 | ||
670 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | |
671 | ||
672 | /* Prevent this channel being scheduled */ | |
673 | spin_lock(&d->lock); | |
674 | list_del_init(&c->node); | |
675 | spin_unlock(&d->lock); | |
676 | ||
677 | /* Clear the tx descriptor lists */ | |
678 | spin_lock_irqsave(&c->vc.lock, flags); | |
679 | vchan_get_all_descriptors(&c->vc, &head); | |
680 | if (p) { | |
681 | /* vchan is assigned to a pchan - stop the channel */ | |
682 | zx_dma_terminate_chan(p, d); | |
683 | c->phy = NULL; | |
684 | p->vchan = NULL; | |
685 | p->ds_run = NULL; | |
686 | p->ds_done = NULL; | |
687 | } | |
688 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
689 | vchan_dma_desc_free_list(&c->vc, &head); | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
2f2560e3 JN |
694 | static int zx_dma_transfer_pause(struct dma_chan *chan) |
695 | { | |
696 | struct zx_dma_chan *c = to_zx_chan(chan); | |
697 | u32 val = 0; | |
698 | ||
699 | val = readl_relaxed(c->phy->base + REG_ZX_CTRL); | |
700 | val &= ~ZX_CH_ENABLE; | |
701 | writel_relaxed(val, c->phy->base + REG_ZX_CTRL); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int zx_dma_transfer_resume(struct dma_chan *chan) | |
707 | { | |
708 | struct zx_dma_chan *c = to_zx_chan(chan); | |
709 | u32 val = 0; | |
710 | ||
711 | val = readl_relaxed(c->phy->base + REG_ZX_CTRL); | |
712 | val |= ZX_CH_ENABLE; | |
713 | writel_relaxed(val, c->phy->base + REG_ZX_CTRL); | |
714 | ||
715 | return 0; | |
716 | } | |
717 | ||
e3fa9841 JN |
718 | static void zx_dma_free_desc(struct virt_dma_desc *vd) |
719 | { | |
720 | struct zx_dma_desc_sw *ds = | |
721 | container_of(vd, struct zx_dma_desc_sw, vd); | |
722 | struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); | |
723 | ||
724 | dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); | |
725 | kfree(ds); | |
726 | } | |
727 | ||
728 | static const struct of_device_id zx6702_dma_dt_ids[] = { | |
729 | { .compatible = "zte,zx296702-dma", }, | |
730 | {} | |
731 | }; | |
732 | MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids); | |
733 | ||
734 | static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |
735 | struct of_dma *ofdma) | |
736 | { | |
737 | struct zx_dma_dev *d = ofdma->of_dma_data; | |
738 | unsigned int request = dma_spec->args[0]; | |
739 | struct dma_chan *chan; | |
740 | struct zx_dma_chan *c; | |
741 | ||
aa3ee5f5 | 742 | if (request >= d->dma_requests) |
e3fa9841 JN |
743 | return NULL; |
744 | ||
745 | chan = dma_get_any_slave_channel(&d->slave); | |
746 | if (!chan) { | |
747 | dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); | |
748 | return NULL; | |
749 | } | |
750 | c = to_zx_chan(chan); | |
751 | c->id = request; | |
752 | dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", | |
753 | c->id, &c->vc); | |
754 | return chan; | |
755 | } | |
756 | ||
757 | static int zx_dma_probe(struct platform_device *op) | |
758 | { | |
759 | struct zx_dma_dev *d; | |
760 | struct resource *iores; | |
9bde2823 | 761 | int i, ret = 0; |
e3fa9841 JN |
762 | |
763 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | |
764 | if (!iores) | |
765 | return -EINVAL; | |
766 | ||
767 | d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); | |
768 | if (!d) | |
769 | return -ENOMEM; | |
770 | ||
771 | d->base = devm_ioremap_resource(&op->dev, iores); | |
772 | if (IS_ERR(d->base)) | |
773 | return PTR_ERR(d->base); | |
774 | ||
775 | of_property_read_u32((&op->dev)->of_node, | |
776 | "dma-channels", &d->dma_channels); | |
777 | of_property_read_u32((&op->dev)->of_node, | |
778 | "dma-requests", &d->dma_requests); | |
779 | if (!d->dma_requests || !d->dma_channels) | |
780 | return -EINVAL; | |
781 | ||
782 | d->clk = devm_clk_get(&op->dev, NULL); | |
783 | if (IS_ERR(d->clk)) { | |
784 | dev_err(&op->dev, "no dma clk\n"); | |
785 | return PTR_ERR(d->clk); | |
786 | } | |
787 | ||
9bde2823 VK |
788 | d->irq = platform_get_irq(op, 0); |
789 | ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, | |
e3fa9841 JN |
790 | 0, DRIVER_NAME, d); |
791 | if (ret) | |
792 | return ret; | |
793 | ||
794 | /* A DMA memory pool for LLIs, align on 32-byte boundary */ | |
795 | d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, | |
796 | LLI_BLOCK_SIZE, 32, 0); | |
797 | if (!d->pool) | |
798 | return -ENOMEM; | |
799 | ||
800 | /* init phy channel */ | |
801 | d->phy = devm_kzalloc(&op->dev, | |
802 | d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL); | |
803 | if (!d->phy) | |
804 | return -ENOMEM; | |
805 | ||
806 | for (i = 0; i < d->dma_channels; i++) { | |
807 | struct zx_dma_phy *p = &d->phy[i]; | |
808 | ||
809 | p->idx = i; | |
810 | p->base = d->base + i * 0x40; | |
811 | } | |
812 | ||
813 | INIT_LIST_HEAD(&d->slave.channels); | |
814 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | |
815 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); | |
fc318d64 | 816 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); |
e3fa9841 JN |
817 | dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); |
818 | d->slave.dev = &op->dev; | |
819 | d->slave.device_free_chan_resources = zx_dma_free_chan_resources; | |
820 | d->slave.device_tx_status = zx_dma_tx_status; | |
821 | d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; | |
822 | d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; | |
2f2560e3 | 823 | d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; |
e3fa9841 JN |
824 | d->slave.device_issue_pending = zx_dma_issue_pending; |
825 | d->slave.device_config = zx_dma_config; | |
826 | d->slave.device_terminate_all = zx_dma_terminate_all; | |
2f2560e3 JN |
827 | d->slave.device_pause = zx_dma_transfer_pause; |
828 | d->slave.device_resume = zx_dma_transfer_resume; | |
e3fa9841 JN |
829 | d->slave.copy_align = DMA_ALIGN; |
830 | d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; | |
831 | d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; | |
832 | d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) | |
833 | | BIT(DMA_DEV_TO_MEM); | |
834 | d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | |
835 | ||
836 | /* init virtual channel */ | |
837 | d->chans = devm_kzalloc(&op->dev, | |
838 | d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL); | |
839 | if (!d->chans) | |
840 | return -ENOMEM; | |
841 | ||
842 | for (i = 0; i < d->dma_requests; i++) { | |
843 | struct zx_dma_chan *c = &d->chans[i]; | |
844 | ||
845 | c->status = DMA_IN_PROGRESS; | |
846 | INIT_LIST_HEAD(&c->node); | |
847 | c->vc.desc_free = zx_dma_free_desc; | |
848 | vchan_init(&c->vc, &d->slave); | |
849 | } | |
850 | ||
851 | /* Enable clock before accessing registers */ | |
852 | ret = clk_prepare_enable(d->clk); | |
853 | if (ret < 0) { | |
854 | dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); | |
855 | goto zx_dma_out; | |
856 | } | |
857 | ||
858 | zx_dma_init_state(d); | |
859 | ||
860 | spin_lock_init(&d->lock); | |
861 | INIT_LIST_HEAD(&d->chan_pending); | |
862 | platform_set_drvdata(op, d); | |
863 | ||
864 | ret = dma_async_device_register(&d->slave); | |
865 | if (ret) | |
866 | goto clk_dis; | |
867 | ||
868 | ret = of_dma_controller_register((&op->dev)->of_node, | |
869 | zx_of_dma_simple_xlate, d); | |
870 | if (ret) | |
871 | goto of_dma_register_fail; | |
872 | ||
873 | dev_info(&op->dev, "initialized\n"); | |
874 | return 0; | |
875 | ||
876 | of_dma_register_fail: | |
877 | dma_async_device_unregister(&d->slave); | |
878 | clk_dis: | |
879 | clk_disable_unprepare(d->clk); | |
880 | zx_dma_out: | |
881 | return ret; | |
882 | } | |
883 | ||
884 | static int zx_dma_remove(struct platform_device *op) | |
885 | { | |
886 | struct zx_dma_chan *c, *cn; | |
887 | struct zx_dma_dev *d = platform_get_drvdata(op); | |
888 | ||
9bde2823 VK |
889 | /* explictly free the irq */ |
890 | devm_free_irq(&op->dev, d->irq, d); | |
891 | ||
e3fa9841 JN |
892 | dma_async_device_unregister(&d->slave); |
893 | of_dma_controller_free((&op->dev)->of_node); | |
894 | ||
895 | list_for_each_entry_safe(c, cn, &d->slave.channels, | |
896 | vc.chan.device_node) { | |
897 | list_del(&c->vc.chan.device_node); | |
898 | } | |
899 | clk_disable_unprepare(d->clk); | |
900 | dmam_pool_destroy(d->pool); | |
901 | ||
902 | return 0; | |
903 | } | |
904 | ||
905 | #ifdef CONFIG_PM_SLEEP | |
906 | static int zx_dma_suspend_dev(struct device *dev) | |
907 | { | |
908 | struct zx_dma_dev *d = dev_get_drvdata(dev); | |
909 | u32 stat = 0; | |
910 | ||
911 | stat = zx_dma_get_chan_stat(d); | |
912 | if (stat) { | |
913 | dev_warn(d->slave.dev, | |
914 | "chan %d is running fail to suspend\n", stat); | |
915 | return -1; | |
916 | } | |
917 | clk_disable_unprepare(d->clk); | |
918 | return 0; | |
919 | } | |
920 | ||
921 | static int zx_dma_resume_dev(struct device *dev) | |
922 | { | |
923 | struct zx_dma_dev *d = dev_get_drvdata(dev); | |
924 | int ret = 0; | |
925 | ||
926 | ret = clk_prepare_enable(d->clk); | |
927 | if (ret < 0) { | |
928 | dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); | |
929 | return ret; | |
930 | } | |
931 | zx_dma_init_state(d); | |
932 | return 0; | |
933 | } | |
934 | #endif | |
935 | ||
936 | static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev); | |
937 | ||
938 | static struct platform_driver zx_pdma_driver = { | |
939 | .driver = { | |
940 | .name = DRIVER_NAME, | |
941 | .pm = &zx_dma_pmops, | |
942 | .of_match_table = zx6702_dma_dt_ids, | |
943 | }, | |
944 | .probe = zx_dma_probe, | |
945 | .remove = zx_dma_remove, | |
946 | }; | |
947 | ||
948 | module_platform_driver(zx_pdma_driver); | |
949 | ||
950 | MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver"); | |
951 | MODULE_AUTHOR("Jun Nie [email protected]"); | |
952 | MODULE_LICENSE("GPL v2"); |