]>
Commit | Line | Data |
---|---|---|
0e3b67b3 LPC |
1 | /* |
2 | * Driver for the Analog Devices AXI-DMAC core | |
3 | * | |
4 | * Copyright 2013-2015 Analog Devices Inc. | |
5 | * Author: Lars-Peter Clausen <[email protected]> | |
6 | * | |
7 | * Licensed under the GPL-2. | |
8 | */ | |
9 | ||
10 | #include <linux/clk.h> | |
11 | #include <linux/device.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/io.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/of_dma.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/slab.h> | |
23 | ||
24 | #include <dt-bindings/dma/axi-dmac.h> | |
25 | ||
26 | #include "dmaengine.h" | |
27 | #include "virt-dma.h" | |
28 | ||
29 | /* | |
30 | * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has | |
31 | * various instantiation parameters which decided the exact feature set support | |
32 | * by the core. | |
33 | * | |
34 | * Each channel of the core has a source interface and a destination interface. | |
35 | * The number of channels and the type of the channel interfaces is selected at | |
36 | * configuration time. A interface can either be a connected to a central memory | |
37 | * interconnect, which allows access to system memory, or it can be connected to | |
38 | * a dedicated bus which is directly connected to a data port on a peripheral. | |
39 | * Given that those are configuration options of the core that are selected when | |
40 | * it is instantiated this means that they can not be changed by software at | |
41 | * runtime. By extension this means that each channel is uni-directional. It can | |
42 | * either be device to memory or memory to device, but not both. Also since the | |
43 | * device side is a dedicated data bus only connected to a single peripheral | |
44 | * there is no address than can or needs to be configured for the device side. | |
45 | */ | |
46 | ||
47 | #define AXI_DMAC_REG_IRQ_MASK 0x80 | |
48 | #define AXI_DMAC_REG_IRQ_PENDING 0x84 | |
49 | #define AXI_DMAC_REG_IRQ_SOURCE 0x88 | |
50 | ||
51 | #define AXI_DMAC_REG_CTRL 0x400 | |
52 | #define AXI_DMAC_REG_TRANSFER_ID 0x404 | |
53 | #define AXI_DMAC_REG_START_TRANSFER 0x408 | |
54 | #define AXI_DMAC_REG_FLAGS 0x40c | |
55 | #define AXI_DMAC_REG_DEST_ADDRESS 0x410 | |
56 | #define AXI_DMAC_REG_SRC_ADDRESS 0x414 | |
57 | #define AXI_DMAC_REG_X_LENGTH 0x418 | |
58 | #define AXI_DMAC_REG_Y_LENGTH 0x41c | |
59 | #define AXI_DMAC_REG_DEST_STRIDE 0x420 | |
60 | #define AXI_DMAC_REG_SRC_STRIDE 0x424 | |
61 | #define AXI_DMAC_REG_TRANSFER_DONE 0x428 | |
62 | #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c | |
63 | #define AXI_DMAC_REG_STATUS 0x430 | |
64 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 | |
65 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 | |
66 | ||
67 | #define AXI_DMAC_CTRL_ENABLE BIT(0) | |
68 | #define AXI_DMAC_CTRL_PAUSE BIT(1) | |
69 | ||
70 | #define AXI_DMAC_IRQ_SOT BIT(0) | |
71 | #define AXI_DMAC_IRQ_EOT BIT(1) | |
72 | ||
73 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) | |
74 | ||
75 | struct axi_dmac_sg { | |
76 | dma_addr_t src_addr; | |
77 | dma_addr_t dest_addr; | |
78 | unsigned int x_len; | |
79 | unsigned int y_len; | |
80 | unsigned int dest_stride; | |
81 | unsigned int src_stride; | |
82 | unsigned int id; | |
83 | }; | |
84 | ||
85 | struct axi_dmac_desc { | |
86 | struct virt_dma_desc vdesc; | |
87 | bool cyclic; | |
88 | ||
89 | unsigned int num_submitted; | |
90 | unsigned int num_completed; | |
91 | unsigned int num_sgs; | |
92 | struct axi_dmac_sg sg[]; | |
93 | }; | |
94 | ||
95 | struct axi_dmac_chan { | |
96 | struct virt_dma_chan vchan; | |
97 | ||
98 | struct axi_dmac_desc *next_desc; | |
99 | struct list_head active_descs; | |
100 | enum dma_transfer_direction direction; | |
101 | ||
102 | unsigned int src_width; | |
103 | unsigned int dest_width; | |
104 | unsigned int src_type; | |
105 | unsigned int dest_type; | |
106 | ||
107 | unsigned int max_length; | |
108 | unsigned int align_mask; | |
109 | ||
110 | bool hw_cyclic; | |
111 | bool hw_2d; | |
112 | }; | |
113 | ||
114 | struct axi_dmac { | |
115 | void __iomem *base; | |
116 | int irq; | |
117 | ||
118 | struct clk *clk; | |
119 | ||
120 | struct dma_device dma_dev; | |
121 | struct axi_dmac_chan chan; | |
122 | ||
123 | struct device_dma_parameters dma_parms; | |
124 | }; | |
125 | ||
126 | static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) | |
127 | { | |
128 | return container_of(chan->vchan.chan.device, struct axi_dmac, | |
129 | dma_dev); | |
130 | } | |
131 | ||
132 | static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) | |
133 | { | |
134 | return container_of(c, struct axi_dmac_chan, vchan.chan); | |
135 | } | |
136 | ||
137 | static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) | |
138 | { | |
139 | return container_of(vdesc, struct axi_dmac_desc, vdesc); | |
140 | } | |
141 | ||
142 | static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, | |
143 | unsigned int val) | |
144 | { | |
145 | writel(val, axi_dmac->base + reg); | |
146 | } | |
147 | ||
148 | static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) | |
149 | { | |
150 | return readl(axi_dmac->base + reg); | |
151 | } | |
152 | ||
153 | static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) | |
154 | { | |
155 | return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
156 | } | |
157 | ||
158 | static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) | |
159 | { | |
160 | return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
161 | } | |
162 | ||
163 | static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) | |
164 | { | |
165 | if (len == 0 || len > chan->max_length) | |
166 | return false; | |
167 | if ((len & chan->align_mask) != 0) /* Not aligned */ | |
168 | return false; | |
169 | return true; | |
170 | } | |
171 | ||
172 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) | |
173 | { | |
174 | if ((addr & chan->align_mask) != 0) /* Not aligned */ | |
175 | return false; | |
176 | return true; | |
177 | } | |
178 | ||
179 | static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |
180 | { | |
181 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
182 | struct virt_dma_desc *vdesc; | |
183 | struct axi_dmac_desc *desc; | |
184 | struct axi_dmac_sg *sg; | |
185 | unsigned int flags = 0; | |
186 | unsigned int val; | |
187 | ||
188 | val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); | |
189 | if (val) /* Queue is full, wait for the next SOT IRQ */ | |
190 | return; | |
191 | ||
192 | desc = chan->next_desc; | |
193 | ||
194 | if (!desc) { | |
195 | vdesc = vchan_next_desc(&chan->vchan); | |
196 | if (!vdesc) | |
197 | return; | |
198 | list_move_tail(&vdesc->node, &chan->active_descs); | |
199 | desc = to_axi_dmac_desc(vdesc); | |
200 | } | |
201 | sg = &desc->sg[desc->num_submitted]; | |
202 | ||
203 | desc->num_submitted++; | |
204 | if (desc->num_submitted == desc->num_sgs) | |
205 | chan->next_desc = NULL; | |
206 | else | |
207 | chan->next_desc = desc; | |
208 | ||
209 | sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); | |
210 | ||
211 | if (axi_dmac_dest_is_mem(chan)) { | |
212 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); | |
213 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); | |
214 | } | |
215 | ||
216 | if (axi_dmac_src_is_mem(chan)) { | |
217 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); | |
218 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); | |
219 | } | |
220 | ||
221 | /* | |
222 | * If the hardware supports cyclic transfers and there is no callback to | |
223 | * call, enable hw cyclic mode to avoid unnecessary interrupts. | |
224 | */ | |
225 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) | |
226 | flags |= AXI_DMAC_FLAG_CYCLIC; | |
227 | ||
228 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); | |
229 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); | |
230 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); | |
231 | axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); | |
232 | } | |
233 | ||
234 | static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) | |
235 | { | |
236 | return list_first_entry_or_null(&chan->active_descs, | |
237 | struct axi_dmac_desc, vdesc.node); | |
238 | } | |
239 | ||
240 | static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, | |
241 | unsigned int completed_transfers) | |
242 | { | |
243 | struct axi_dmac_desc *active; | |
244 | struct axi_dmac_sg *sg; | |
245 | ||
246 | active = axi_dmac_active_desc(chan); | |
247 | if (!active) | |
248 | return; | |
249 | ||
250 | if (active->cyclic) { | |
251 | vchan_cyclic_callback(&active->vdesc); | |
252 | } else { | |
253 | do { | |
254 | sg = &active->sg[active->num_completed]; | |
255 | if (!(BIT(sg->id) & completed_transfers)) | |
256 | break; | |
257 | active->num_completed++; | |
258 | if (active->num_completed == active->num_sgs) { | |
259 | list_del(&active->vdesc.node); | |
260 | vchan_cookie_complete(&active->vdesc); | |
261 | active = axi_dmac_active_desc(chan); | |
262 | } | |
263 | } while (active); | |
264 | } | |
265 | } | |
266 | ||
267 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) | |
268 | { | |
269 | struct axi_dmac *dmac = devid; | |
270 | unsigned int pending; | |
271 | ||
272 | pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); | |
273 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); | |
274 | ||
275 | spin_lock(&dmac->chan.vchan.lock); | |
276 | /* One or more transfers have finished */ | |
277 | if (pending & AXI_DMAC_IRQ_EOT) { | |
278 | unsigned int completed; | |
279 | ||
280 | completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | |
281 | axi_dmac_transfer_done(&dmac->chan, completed); | |
282 | } | |
283 | /* Space has become available in the descriptor queue */ | |
284 | if (pending & AXI_DMAC_IRQ_SOT) | |
285 | axi_dmac_start_transfer(&dmac->chan); | |
286 | spin_unlock(&dmac->chan.vchan.lock); | |
287 | ||
288 | return IRQ_HANDLED; | |
289 | } | |
290 | ||
291 | static int axi_dmac_terminate_all(struct dma_chan *c) | |
292 | { | |
293 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
294 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
295 | unsigned long flags; | |
296 | LIST_HEAD(head); | |
297 | ||
298 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
299 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); | |
300 | chan->next_desc = NULL; | |
301 | vchan_get_all_descriptors(&chan->vchan, &head); | |
302 | list_splice_tail_init(&chan->active_descs, &head); | |
303 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
304 | ||
305 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
310 | static void axi_dmac_issue_pending(struct dma_chan *c) | |
311 | { | |
312 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
313 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
314 | unsigned long flags; | |
315 | ||
316 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); | |
317 | ||
318 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
319 | if (vchan_issue_pending(&chan->vchan)) | |
320 | axi_dmac_start_transfer(chan); | |
321 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
322 | } | |
323 | ||
324 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | |
325 | { | |
326 | struct axi_dmac_desc *desc; | |
327 | ||
328 | desc = kzalloc(sizeof(struct axi_dmac_desc) + | |
329 | sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); | |
330 | if (!desc) | |
331 | return NULL; | |
332 | ||
333 | desc->num_sgs = num_sgs; | |
334 | ||
335 | return desc; | |
336 | } | |
337 | ||
338 | static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( | |
339 | struct dma_chan *c, struct scatterlist *sgl, | |
340 | unsigned int sg_len, enum dma_transfer_direction direction, | |
341 | unsigned long flags, void *context) | |
342 | { | |
343 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
344 | struct axi_dmac_desc *desc; | |
345 | struct scatterlist *sg; | |
346 | unsigned int i; | |
347 | ||
348 | if (direction != chan->direction) | |
349 | return NULL; | |
350 | ||
351 | desc = axi_dmac_alloc_desc(sg_len); | |
352 | if (!desc) | |
353 | return NULL; | |
354 | ||
355 | for_each_sg(sgl, sg, sg_len, i) { | |
356 | if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || | |
357 | !axi_dmac_check_len(chan, sg_dma_len(sg))) { | |
358 | kfree(desc); | |
359 | return NULL; | |
360 | } | |
361 | ||
362 | if (direction == DMA_DEV_TO_MEM) | |
363 | desc->sg[i].dest_addr = sg_dma_address(sg); | |
364 | else | |
365 | desc->sg[i].src_addr = sg_dma_address(sg); | |
366 | desc->sg[i].x_len = sg_dma_len(sg); | |
367 | desc->sg[i].y_len = 1; | |
368 | } | |
369 | ||
370 | desc->cyclic = false; | |
371 | ||
372 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
373 | } | |
374 | ||
375 | static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( | |
376 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
377 | size_t period_len, enum dma_transfer_direction direction, | |
378 | unsigned long flags) | |
379 | { | |
380 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
381 | struct axi_dmac_desc *desc; | |
382 | unsigned int num_periods, i; | |
383 | ||
384 | if (direction != chan->direction) | |
385 | return NULL; | |
386 | ||
387 | if (!axi_dmac_check_len(chan, buf_len) || | |
388 | !axi_dmac_check_addr(chan, buf_addr)) | |
389 | return NULL; | |
390 | ||
391 | if (period_len == 0 || buf_len % period_len) | |
392 | return NULL; | |
393 | ||
394 | num_periods = buf_len / period_len; | |
395 | ||
396 | desc = axi_dmac_alloc_desc(num_periods); | |
397 | if (!desc) | |
398 | return NULL; | |
399 | ||
400 | for (i = 0; i < num_periods; i++) { | |
401 | if (direction == DMA_DEV_TO_MEM) | |
402 | desc->sg[i].dest_addr = buf_addr; | |
403 | else | |
404 | desc->sg[i].src_addr = buf_addr; | |
405 | desc->sg[i].x_len = period_len; | |
406 | desc->sg[i].y_len = 1; | |
407 | buf_addr += period_len; | |
408 | } | |
409 | ||
410 | desc->cyclic = true; | |
411 | ||
412 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
413 | } | |
414 | ||
415 | static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( | |
416 | struct dma_chan *c, struct dma_interleaved_template *xt, | |
417 | unsigned long flags) | |
418 | { | |
419 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
420 | struct axi_dmac_desc *desc; | |
421 | size_t dst_icg, src_icg; | |
422 | ||
423 | if (xt->frame_size != 1) | |
424 | return NULL; | |
425 | ||
426 | if (xt->dir != chan->direction) | |
427 | return NULL; | |
428 | ||
429 | if (axi_dmac_src_is_mem(chan)) { | |
430 | if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) | |
431 | return NULL; | |
432 | } | |
433 | ||
434 | if (axi_dmac_dest_is_mem(chan)) { | |
435 | if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) | |
436 | return NULL; | |
437 | } | |
438 | ||
439 | dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); | |
440 | src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); | |
441 | ||
442 | if (chan->hw_2d) { | |
443 | if (!axi_dmac_check_len(chan, xt->sgl[0].size) || | |
444 | !axi_dmac_check_len(chan, xt->numf)) | |
445 | return NULL; | |
446 | if (xt->sgl[0].size + dst_icg > chan->max_length || | |
447 | xt->sgl[0].size + src_icg > chan->max_length) | |
448 | return NULL; | |
449 | } else { | |
450 | if (dst_icg != 0 || src_icg != 0) | |
451 | return NULL; | |
452 | if (chan->max_length / xt->sgl[0].size < xt->numf) | |
453 | return NULL; | |
454 | if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) | |
455 | return NULL; | |
456 | } | |
457 | ||
458 | desc = axi_dmac_alloc_desc(1); | |
459 | if (!desc) | |
460 | return NULL; | |
461 | ||
462 | if (axi_dmac_src_is_mem(chan)) { | |
463 | desc->sg[0].src_addr = xt->src_start; | |
464 | desc->sg[0].src_stride = xt->sgl[0].size + src_icg; | |
465 | } | |
466 | ||
467 | if (axi_dmac_dest_is_mem(chan)) { | |
468 | desc->sg[0].dest_addr = xt->dst_start; | |
469 | desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; | |
470 | } | |
471 | ||
472 | if (chan->hw_2d) { | |
473 | desc->sg[0].x_len = xt->sgl[0].size; | |
474 | desc->sg[0].y_len = xt->numf; | |
475 | } else { | |
476 | desc->sg[0].x_len = xt->sgl[0].size * xt->numf; | |
477 | desc->sg[0].y_len = 1; | |
478 | } | |
479 | ||
480 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
481 | } | |
482 | ||
483 | static void axi_dmac_free_chan_resources(struct dma_chan *c) | |
484 | { | |
485 | vchan_free_chan_resources(to_virt_chan(c)); | |
486 | } | |
487 | ||
488 | static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) | |
489 | { | |
490 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); | |
491 | } | |
492 | ||
493 | /* | |
494 | * The configuration stored in the devicetree matches the configuration | |
495 | * parameters of the peripheral instance and allows the driver to know which | |
496 | * features are implemented and how it should behave. | |
497 | */ | |
498 | static int axi_dmac_parse_chan_dt(struct device_node *of_chan, | |
499 | struct axi_dmac_chan *chan) | |
500 | { | |
501 | u32 val; | |
502 | int ret; | |
503 | ||
504 | ret = of_property_read_u32(of_chan, "reg", &val); | |
505 | if (ret) | |
506 | return ret; | |
507 | ||
508 | /* We only support 1 channel for now */ | |
509 | if (val != 0) | |
510 | return -EINVAL; | |
511 | ||
512 | ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); | |
513 | if (ret) | |
514 | return ret; | |
515 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
516 | return -EINVAL; | |
517 | chan->src_type = val; | |
518 | ||
519 | ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); | |
520 | if (ret) | |
521 | return ret; | |
522 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
523 | return -EINVAL; | |
524 | chan->dest_type = val; | |
525 | ||
526 | ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); | |
527 | if (ret) | |
528 | return ret; | |
529 | chan->src_width = val / 8; | |
530 | ||
531 | ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); | |
532 | if (ret) | |
533 | return ret; | |
534 | chan->dest_width = val / 8; | |
535 | ||
536 | ret = of_property_read_u32(of_chan, "adi,length-width", &val); | |
537 | if (ret) | |
538 | return ret; | |
539 | ||
540 | if (val >= 32) | |
541 | chan->max_length = UINT_MAX; | |
542 | else | |
543 | chan->max_length = (1ULL << val) - 1; | |
544 | ||
545 | chan->align_mask = max(chan->dest_width, chan->src_width) - 1; | |
546 | ||
547 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
548 | chan->direction = DMA_MEM_TO_MEM; | |
549 | else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
550 | chan->direction = DMA_MEM_TO_DEV; | |
551 | else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) | |
552 | chan->direction = DMA_DEV_TO_MEM; | |
553 | else | |
554 | chan->direction = DMA_DEV_TO_DEV; | |
555 | ||
556 | chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic"); | |
557 | chan->hw_2d = of_property_read_bool(of_chan, "adi,2d"); | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | static int axi_dmac_probe(struct platform_device *pdev) | |
563 | { | |
564 | struct device_node *of_channels, *of_chan; | |
565 | struct dma_device *dma_dev; | |
566 | struct axi_dmac *dmac; | |
567 | struct resource *res; | |
568 | int ret; | |
569 | ||
570 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | |
571 | if (!dmac) | |
572 | return -ENOMEM; | |
573 | ||
574 | dmac->irq = platform_get_irq(pdev, 0); | |
575 | if (dmac->irq <= 0) | |
576 | return -EINVAL; | |
577 | ||
578 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
579 | dmac->base = devm_ioremap_resource(&pdev->dev, res); | |
580 | if (IS_ERR(dmac->base)) | |
581 | return PTR_ERR(dmac->base); | |
582 | ||
583 | dmac->clk = devm_clk_get(&pdev->dev, NULL); | |
584 | if (IS_ERR(dmac->clk)) | |
585 | return PTR_ERR(dmac->clk); | |
586 | ||
587 | INIT_LIST_HEAD(&dmac->chan.active_descs); | |
588 | ||
589 | of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); | |
590 | if (of_channels == NULL) | |
591 | return -ENODEV; | |
592 | ||
593 | for_each_child_of_node(of_channels, of_chan) { | |
594 | ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); | |
595 | if (ret) { | |
596 | of_node_put(of_chan); | |
597 | of_node_put(of_channels); | |
598 | return -EINVAL; | |
599 | } | |
600 | } | |
601 | of_node_put(of_channels); | |
602 | ||
603 | pdev->dev.dma_parms = &dmac->dma_parms; | |
604 | dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); | |
605 | ||
606 | dma_dev = &dmac->dma_dev; | |
607 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | |
608 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); | |
609 | dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; | |
610 | dma_dev->device_tx_status = dma_cookie_status; | |
611 | dma_dev->device_issue_pending = axi_dmac_issue_pending; | |
612 | dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; | |
613 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; | |
614 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; | |
615 | dma_dev->device_terminate_all = axi_dmac_terminate_all; | |
616 | dma_dev->dev = &pdev->dev; | |
617 | dma_dev->chancnt = 1; | |
618 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); | |
619 | dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); | |
620 | dma_dev->directions = BIT(dmac->chan.direction); | |
621 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
622 | INIT_LIST_HEAD(&dma_dev->channels); | |
623 | ||
624 | dmac->chan.vchan.desc_free = axi_dmac_desc_free; | |
625 | vchan_init(&dmac->chan.vchan, dma_dev); | |
626 | ||
627 | ret = clk_prepare_enable(dmac->clk); | |
628 | if (ret < 0) | |
629 | return ret; | |
630 | ||
631 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); | |
632 | ||
633 | ret = dma_async_device_register(dma_dev); | |
634 | if (ret) | |
635 | goto err_clk_disable; | |
636 | ||
637 | ret = of_dma_controller_register(pdev->dev.of_node, | |
638 | of_dma_xlate_by_chan_id, dma_dev); | |
639 | if (ret) | |
640 | goto err_unregister_device; | |
641 | ||
642 | ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0, | |
643 | dev_name(&pdev->dev), dmac); | |
644 | if (ret) | |
645 | goto err_unregister_of; | |
646 | ||
647 | platform_set_drvdata(pdev, dmac); | |
648 | ||
649 | return 0; | |
650 | ||
651 | err_unregister_of: | |
652 | of_dma_controller_free(pdev->dev.of_node); | |
653 | err_unregister_device: | |
654 | dma_async_device_unregister(&dmac->dma_dev); | |
655 | err_clk_disable: | |
656 | clk_disable_unprepare(dmac->clk); | |
657 | ||
658 | return ret; | |
659 | } | |
660 | ||
661 | static int axi_dmac_remove(struct platform_device *pdev) | |
662 | { | |
663 | struct axi_dmac *dmac = platform_get_drvdata(pdev); | |
664 | ||
665 | of_dma_controller_free(pdev->dev.of_node); | |
666 | free_irq(dmac->irq, dmac); | |
667 | tasklet_kill(&dmac->chan.vchan.task); | |
668 | dma_async_device_unregister(&dmac->dma_dev); | |
669 | clk_disable_unprepare(dmac->clk); | |
670 | ||
671 | return 0; | |
672 | } | |
673 | ||
674 | static const struct of_device_id axi_dmac_of_match_table[] = { | |
675 | { .compatible = "adi,axi-dmac-1.00.a" }, | |
676 | { }, | |
677 | }; | |
678 | ||
679 | static struct platform_driver axi_dmac_driver = { | |
680 | .driver = { | |
681 | .name = "dma-axi-dmac", | |
682 | .of_match_table = axi_dmac_of_match_table, | |
683 | }, | |
684 | .probe = axi_dmac_probe, | |
685 | .remove = axi_dmac_remove, | |
686 | }; | |
687 | module_platform_driver(axi_dmac_driver); | |
688 | ||
689 | MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); | |
690 | MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); | |
691 | MODULE_LICENSE("GPL v2"); |