]>
Commit | Line | Data |
---|---|---|
96286b57 FM |
1 | /* |
2 | * BCM2835 DMA engine support | |
3 | * | |
4 | * This driver only supports cyclic DMA transfers | |
5 | * as needed for the I2S module. | |
6 | * | |
7 | * Author: Florian Meier <[email protected]> | |
8 | * Copyright 2013 | |
9 | * | |
10 | * Based on | |
11 | * OMAP DMAengine support by Russell King | |
12 | * | |
13 | * BCM2708 DMA Driver | |
14 | * Copyright (C) 2010 Broadcom | |
15 | * | |
16 | * Raspberry Pi PCM I2S ALSA Driver | |
17 | * Copyright (c) by Phil Poole 2013 | |
18 | * | |
19 | * MARVELL MMP Peripheral DMA Driver | |
20 | * Copyright 2012 Marvell International Ltd. | |
21 | * | |
22 | * This program is free software; you can redistribute it and/or modify | |
23 | * it under the terms of the GNU General Public License as published by | |
24 | * the Free Software Foundation; either version 2 of the License, or | |
25 | * (at your option) any later version. | |
26 | * | |
27 | * This program is distributed in the hope that it will be useful, | |
28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
30 | * GNU General Public License for more details. | |
31 | */ | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/dma-mapping.h> | |
34 | #include <linux/err.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/interrupt.h> | |
37 | #include <linux/list.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/platform_device.h> | |
40 | #include <linux/slab.h> | |
41 | #include <linux/io.h> | |
42 | #include <linux/spinlock.h> | |
43 | #include <linux/of.h> | |
44 | #include <linux/of_dma.h> | |
45 | ||
46 | #include "virt-dma.h" | |
47 | ||
48 | struct bcm2835_dmadev { | |
49 | struct dma_device ddev; | |
50 | spinlock_t lock; | |
51 | void __iomem *base; | |
52 | struct device_dma_parameters dma_parms; | |
53 | }; | |
54 | ||
55 | struct bcm2835_dma_cb { | |
56 | uint32_t info; | |
57 | uint32_t src; | |
58 | uint32_t dst; | |
59 | uint32_t length; | |
60 | uint32_t stride; | |
61 | uint32_t next; | |
62 | uint32_t pad[2]; | |
63 | }; | |
64 | ||
65 | struct bcm2835_chan { | |
66 | struct virt_dma_chan vc; | |
67 | struct list_head node; | |
68 | ||
69 | struct dma_slave_config cfg; | |
70 | bool cyclic; | |
71 | unsigned int dreq; | |
72 | ||
73 | int ch; | |
74 | struct bcm2835_desc *desc; | |
75 | ||
76 | void __iomem *chan_base; | |
77 | int irq_number; | |
78 | }; | |
79 | ||
80 | struct bcm2835_desc { | |
81 | struct virt_dma_desc vd; | |
82 | enum dma_transfer_direction dir; | |
83 | ||
84 | unsigned int control_block_size; | |
85 | struct bcm2835_dma_cb *control_block_base; | |
86 | dma_addr_t control_block_base_phys; | |
87 | ||
88 | unsigned int frames; | |
89 | size_t size; | |
90 | }; | |
91 | ||
92 | #define BCM2835_DMA_CS 0x00 | |
93 | #define BCM2835_DMA_ADDR 0x04 | |
94 | #define BCM2835_DMA_SOURCE_AD 0x0c | |
95 | #define BCM2835_DMA_DEST_AD 0x10 | |
96 | #define BCM2835_DMA_NEXTCB 0x1C | |
97 | ||
98 | /* DMA CS Control and Status bits */ | |
99 | #define BCM2835_DMA_ACTIVE BIT(0) | |
100 | #define BCM2835_DMA_INT BIT(2) | |
101 | #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ | |
102 | #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ | |
103 | #define BCM2835_DMA_ERR BIT(8) | |
104 | #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ | |
105 | #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ | |
106 | ||
107 | #define BCM2835_DMA_INT_EN BIT(0) | |
108 | #define BCM2835_DMA_D_INC BIT(4) | |
109 | #define BCM2835_DMA_D_DREQ BIT(6) | |
110 | #define BCM2835_DMA_S_INC BIT(8) | |
111 | #define BCM2835_DMA_S_DREQ BIT(10) | |
112 | ||
113 | #define BCM2835_DMA_PER_MAP(x) ((x) << 16) | |
114 | ||
115 | #define BCM2835_DMA_DATA_TYPE_S8 1 | |
116 | #define BCM2835_DMA_DATA_TYPE_S16 2 | |
117 | #define BCM2835_DMA_DATA_TYPE_S32 4 | |
118 | #define BCM2835_DMA_DATA_TYPE_S128 16 | |
119 | ||
120 | #define BCM2835_DMA_BULK_MASK BIT(0) | |
121 | #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) | |
122 | ||
123 | /* Valid only for channels 0 - 14, 15 has its own base address */ | |
124 | #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ | |
125 | #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) | |
126 | ||
127 | static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) | |
128 | { | |
129 | return container_of(d, struct bcm2835_dmadev, ddev); | |
130 | } | |
131 | ||
132 | static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) | |
133 | { | |
134 | return container_of(c, struct bcm2835_chan, vc.chan); | |
135 | } | |
136 | ||
137 | static inline struct bcm2835_desc *to_bcm2835_dma_desc( | |
138 | struct dma_async_tx_descriptor *t) | |
139 | { | |
140 | return container_of(t, struct bcm2835_desc, vd.tx); | |
141 | } | |
142 | ||
143 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | |
144 | { | |
145 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); | |
146 | dma_free_coherent(desc->vd.tx.chan->device->dev, | |
147 | desc->control_block_size, | |
148 | desc->control_block_base, | |
149 | desc->control_block_base_phys); | |
150 | kfree(desc); | |
151 | } | |
152 | ||
153 | static int bcm2835_dma_abort(void __iomem *chan_base) | |
154 | { | |
155 | unsigned long cs; | |
156 | long int timeout = 10000; | |
157 | ||
158 | cs = readl(chan_base + BCM2835_DMA_CS); | |
159 | if (!(cs & BCM2835_DMA_ACTIVE)) | |
160 | return 0; | |
161 | ||
162 | /* Write 0 to the active bit - Pause the DMA */ | |
163 | writel(0, chan_base + BCM2835_DMA_CS); | |
164 | ||
165 | /* Wait for any current AXI transfer to complete */ | |
166 | while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { | |
167 | cpu_relax(); | |
168 | cs = readl(chan_base + BCM2835_DMA_CS); | |
169 | } | |
170 | ||
171 | /* We'll un-pause when we set of our next DMA */ | |
172 | if (!timeout) | |
173 | return -ETIMEDOUT; | |
174 | ||
175 | if (!(cs & BCM2835_DMA_ACTIVE)) | |
176 | return 0; | |
177 | ||
178 | /* Terminate the control block chain */ | |
179 | writel(0, chan_base + BCM2835_DMA_NEXTCB); | |
180 | ||
181 | /* Abort the whole DMA */ | |
182 | writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, | |
183 | chan_base + BCM2835_DMA_CS); | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | |
189 | { | |
190 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | |
191 | struct bcm2835_desc *d; | |
192 | ||
193 | if (!vd) { | |
194 | c->desc = NULL; | |
195 | return; | |
196 | } | |
197 | ||
198 | list_del(&vd->node); | |
199 | ||
200 | c->desc = d = to_bcm2835_dma_desc(&vd->tx); | |
201 | ||
202 | writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); | |
203 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | |
204 | } | |
205 | ||
206 | static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |
207 | { | |
208 | struct bcm2835_chan *c = data; | |
209 | struct bcm2835_desc *d; | |
210 | unsigned long flags; | |
211 | ||
212 | spin_lock_irqsave(&c->vc.lock, flags); | |
213 | ||
214 | /* Acknowledge interrupt */ | |
215 | writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); | |
216 | ||
217 | d = c->desc; | |
218 | ||
219 | if (d) { | |
220 | /* TODO Only works for cyclic DMA */ | |
221 | vchan_cyclic_callback(&d->vd); | |
222 | } | |
223 | ||
224 | /* Keep the DMA engine running */ | |
225 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | |
226 | ||
227 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
228 | ||
229 | return IRQ_HANDLED; | |
230 | } | |
231 | ||
232 | static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |
233 | { | |
234 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | |
235 | ||
236 | dev_dbg(c->vc.chan.device->dev, | |
237 | "Allocating DMA channel %d\n", c->ch); | |
238 | ||
239 | return request_irq(c->irq_number, | |
240 | bcm2835_dma_callback, 0, "DMA IRQ", c); | |
241 | } | |
242 | ||
243 | static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) | |
244 | { | |
245 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | |
246 | ||
247 | vchan_free_chan_resources(&c->vc); | |
248 | free_irq(c->irq_number, c); | |
249 | ||
250 | dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); | |
251 | } | |
252 | ||
253 | static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) | |
254 | { | |
255 | return d->size; | |
256 | } | |
257 | ||
258 | static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) | |
259 | { | |
260 | unsigned int i; | |
261 | size_t size; | |
262 | ||
263 | for (size = i = 0; i < d->frames; i++) { | |
264 | struct bcm2835_dma_cb *control_block = | |
265 | &d->control_block_base[i]; | |
266 | size_t this_size = control_block->length; | |
267 | dma_addr_t dma; | |
268 | ||
269 | if (d->dir == DMA_DEV_TO_MEM) | |
270 | dma = control_block->dst; | |
271 | else | |
272 | dma = control_block->src; | |
273 | ||
274 | if (size) | |
275 | size += this_size; | |
276 | else if (addr >= dma && addr < dma + this_size) | |
277 | size += dma + this_size - addr; | |
278 | } | |
279 | ||
280 | return size; | |
281 | } | |
282 | ||
283 | static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, | |
284 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
285 | { | |
286 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | |
287 | struct virt_dma_desc *vd; | |
288 | enum dma_status ret; | |
289 | unsigned long flags; | |
290 | ||
291 | ret = dma_cookie_status(chan, cookie, txstate); | |
292 | if (ret == DMA_COMPLETE || !txstate) | |
293 | return ret; | |
294 | ||
295 | spin_lock_irqsave(&c->vc.lock, flags); | |
296 | vd = vchan_find_desc(&c->vc, cookie); | |
297 | if (vd) { | |
298 | txstate->residue = | |
299 | bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); | |
300 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | |
301 | struct bcm2835_desc *d = c->desc; | |
302 | dma_addr_t pos; | |
303 | ||
304 | if (d->dir == DMA_MEM_TO_DEV) | |
305 | pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); | |
306 | else if (d->dir == DMA_DEV_TO_MEM) | |
307 | pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); | |
308 | else | |
309 | pos = 0; | |
310 | ||
311 | txstate->residue = bcm2835_dma_desc_size_pos(d, pos); | |
312 | } else { | |
313 | txstate->residue = 0; | |
314 | } | |
315 | ||
316 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
321 | static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |
322 | { | |
323 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | |
324 | unsigned long flags; | |
325 | ||
326 | c->cyclic = true; /* Nothing else is implemented */ | |
327 | ||
328 | spin_lock_irqsave(&c->vc.lock, flags); | |
329 | if (vchan_issue_pending(&c->vc) && !c->desc) | |
330 | bcm2835_dma_start_desc(c); | |
331 | ||
332 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
333 | } | |
334 | ||
335 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |
336 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
337 | size_t period_len, enum dma_transfer_direction direction, | |
31c1e5a1 | 338 | unsigned long flags) |
96286b57 FM |
339 | { |
340 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | |
341 | enum dma_slave_buswidth dev_width; | |
342 | struct bcm2835_desc *d; | |
343 | dma_addr_t dev_addr; | |
344 | unsigned int es, sync_type; | |
345 | unsigned int frame; | |
346 | ||
347 | /* Grab configuration */ | |
348 | if (!is_slave_direction(direction)) { | |
349 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | |
350 | return NULL; | |
351 | } | |
352 | ||
353 | if (direction == DMA_DEV_TO_MEM) { | |
354 | dev_addr = c->cfg.src_addr; | |
355 | dev_width = c->cfg.src_addr_width; | |
356 | sync_type = BCM2835_DMA_S_DREQ; | |
357 | } else { | |
358 | dev_addr = c->cfg.dst_addr; | |
359 | dev_width = c->cfg.dst_addr_width; | |
360 | sync_type = BCM2835_DMA_D_DREQ; | |
361 | } | |
362 | ||
363 | /* Bus width translates to the element size (ES) */ | |
364 | switch (dev_width) { | |
365 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
366 | es = BCM2835_DMA_DATA_TYPE_S32; | |
367 | break; | |
368 | default: | |
369 | return NULL; | |
370 | } | |
371 | ||
372 | /* Now allocate and setup the descriptor. */ | |
373 | d = kzalloc(sizeof(*d), GFP_NOWAIT); | |
374 | if (!d) | |
375 | return NULL; | |
376 | ||
377 | d->dir = direction; | |
378 | d->frames = buf_len / period_len; | |
379 | ||
380 | /* Allocate memory for control blocks */ | |
381 | d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); | |
382 | d->control_block_base = dma_zalloc_coherent(chan->device->dev, | |
383 | d->control_block_size, &d->control_block_base_phys, | |
384 | GFP_NOWAIT); | |
385 | ||
386 | if (!d->control_block_base) { | |
387 | kfree(d); | |
388 | return NULL; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Iterate over all frames, create a control block | |
393 | * for each frame and link them together. | |
394 | */ | |
395 | for (frame = 0; frame < d->frames; frame++) { | |
396 | struct bcm2835_dma_cb *control_block = | |
397 | &d->control_block_base[frame]; | |
398 | ||
399 | /* Setup adresses */ | |
400 | if (d->dir == DMA_DEV_TO_MEM) { | |
401 | control_block->info = BCM2835_DMA_D_INC; | |
402 | control_block->src = dev_addr; | |
403 | control_block->dst = buf_addr + frame * period_len; | |
404 | } else { | |
405 | control_block->info = BCM2835_DMA_S_INC; | |
406 | control_block->src = buf_addr + frame * period_len; | |
407 | control_block->dst = dev_addr; | |
408 | } | |
409 | ||
410 | /* Enable interrupt */ | |
411 | control_block->info |= BCM2835_DMA_INT_EN; | |
412 | ||
413 | /* Setup synchronization */ | |
414 | if (sync_type != 0) | |
415 | control_block->info |= sync_type; | |
416 | ||
417 | /* Setup DREQ channel */ | |
418 | if (c->dreq != 0) | |
419 | control_block->info |= | |
420 | BCM2835_DMA_PER_MAP(c->dreq); | |
421 | ||
422 | /* Length of a frame */ | |
423 | control_block->length = period_len; | |
424 | d->size += control_block->length; | |
425 | ||
426 | /* | |
427 | * Next block is the next frame. | |
428 | * This DMA engine driver currently only supports cyclic DMA. | |
429 | * Therefore, wrap around at number of frames. | |
430 | */ | |
431 | control_block->next = d->control_block_base_phys + | |
432 | sizeof(struct bcm2835_dma_cb) | |
433 | * ((frame + 1) % d->frames); | |
434 | } | |
435 | ||
436 | return vchan_tx_prep(&c->vc, &d->vd, flags); | |
437 | } | |
438 | ||
39159bea MR |
439 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |
440 | struct dma_slave_config *cfg) | |
96286b57 | 441 | { |
39159bea MR |
442 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
443 | ||
96286b57 FM |
444 | if ((cfg->direction == DMA_DEV_TO_MEM && |
445 | cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
446 | (cfg->direction == DMA_MEM_TO_DEV && | |
447 | cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
448 | !is_slave_direction(cfg->direction)) { | |
449 | return -EINVAL; | |
450 | } | |
451 | ||
452 | c->cfg = *cfg; | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
39159bea | 457 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) |
96286b57 | 458 | { |
39159bea | 459 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
96286b57 FM |
460 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); |
461 | unsigned long flags; | |
462 | int timeout = 10000; | |
463 | LIST_HEAD(head); | |
464 | ||
465 | spin_lock_irqsave(&c->vc.lock, flags); | |
466 | ||
467 | /* Prevent this channel being scheduled */ | |
468 | spin_lock(&d->lock); | |
469 | list_del_init(&c->node); | |
470 | spin_unlock(&d->lock); | |
471 | ||
472 | /* | |
473 | * Stop DMA activity: we assume the callback will not be called | |
474 | * after bcm_dma_abort() returns (even if it does, it will see | |
475 | * c->desc is NULL and exit.) | |
476 | */ | |
477 | if (c->desc) { | |
f9317829 | 478 | bcm2835_dma_desc_free(&c->desc->vd); |
96286b57 FM |
479 | c->desc = NULL; |
480 | bcm2835_dma_abort(c->chan_base); | |
481 | ||
482 | /* Wait for stopping */ | |
483 | while (--timeout) { | |
484 | if (!(readl(c->chan_base + BCM2835_DMA_CS) & | |
485 | BCM2835_DMA_ACTIVE)) | |
486 | break; | |
487 | ||
488 | cpu_relax(); | |
489 | } | |
490 | ||
491 | if (!timeout) | |
492 | dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); | |
493 | } | |
494 | ||
495 | vchan_get_all_descriptors(&c->vc, &head); | |
496 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
497 | vchan_dma_desc_free_list(&c->vc, &head); | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
96286b57 FM |
502 | static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) |
503 | { | |
504 | struct bcm2835_chan *c; | |
505 | ||
506 | c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); | |
507 | if (!c) | |
508 | return -ENOMEM; | |
509 | ||
510 | c->vc.desc_free = bcm2835_dma_desc_free; | |
511 | vchan_init(&c->vc, &d->ddev); | |
512 | INIT_LIST_HEAD(&c->node); | |
513 | ||
96286b57 FM |
514 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
515 | c->ch = chan_id; | |
516 | c->irq_number = irq; | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | static void bcm2835_dma_free(struct bcm2835_dmadev *od) | |
522 | { | |
523 | struct bcm2835_chan *c, *next; | |
524 | ||
525 | list_for_each_entry_safe(c, next, &od->ddev.channels, | |
526 | vc.chan.device_node) { | |
527 | list_del(&c->vc.chan.device_node); | |
528 | tasklet_kill(&c->vc.task); | |
529 | } | |
530 | } | |
531 | ||
532 | static const struct of_device_id bcm2835_dma_of_match[] = { | |
533 | { .compatible = "brcm,bcm2835-dma", }, | |
534 | {}, | |
535 | }; | |
536 | MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); | |
537 | ||
538 | static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, | |
539 | struct of_dma *ofdma) | |
540 | { | |
541 | struct bcm2835_dmadev *d = ofdma->of_dma_data; | |
542 | struct dma_chan *chan; | |
543 | ||
544 | chan = dma_get_any_slave_channel(&d->ddev); | |
545 | if (!chan) | |
546 | return NULL; | |
547 | ||
548 | /* Set DREQ from param */ | |
549 | to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; | |
550 | ||
551 | return chan; | |
552 | } | |
553 | ||
96286b57 FM |
554 | static int bcm2835_dma_probe(struct platform_device *pdev) |
555 | { | |
556 | struct bcm2835_dmadev *od; | |
557 | struct resource *res; | |
558 | void __iomem *base; | |
559 | int rc; | |
560 | int i; | |
561 | int irq; | |
562 | uint32_t chans_available; | |
563 | ||
564 | if (!pdev->dev.dma_mask) | |
565 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | |
566 | ||
567 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
568 | if (rc) | |
569 | return rc; | |
570 | ||
571 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | |
572 | if (!od) | |
573 | return -ENOMEM; | |
574 | ||
575 | pdev->dev.dma_parms = &od->dma_parms; | |
576 | dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); | |
577 | ||
578 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
579 | base = devm_ioremap_resource(&pdev->dev, res); | |
580 | if (IS_ERR(base)) | |
581 | return PTR_ERR(base); | |
582 | ||
583 | od->base = base; | |
584 | ||
585 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | |
7f5ae355 | 586 | dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); |
96286b57 FM |
587 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
588 | od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; | |
589 | od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; | |
590 | od->ddev.device_tx_status = bcm2835_dma_tx_status; | |
591 | od->ddev.device_issue_pending = bcm2835_dma_issue_pending; | |
96286b57 | 592 | od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; |
39159bea MR |
593 | od->ddev.device_config = bcm2835_dma_slave_config; |
594 | od->ddev.device_terminate_all = bcm2835_dma_terminate_all; | |
b5743680 MR |
595 | od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
596 | od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
597 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
96286b57 FM |
598 | od->ddev.dev = &pdev->dev; |
599 | INIT_LIST_HEAD(&od->ddev.channels); | |
600 | spin_lock_init(&od->lock); | |
601 | ||
602 | platform_set_drvdata(pdev, od); | |
603 | ||
604 | /* Request DMA channel mask from device tree */ | |
605 | if (of_property_read_u32(pdev->dev.of_node, | |
606 | "brcm,dma-channel-mask", | |
607 | &chans_available)) { | |
608 | dev_err(&pdev->dev, "Failed to get channel mask\n"); | |
609 | rc = -EINVAL; | |
610 | goto err_no_dma; | |
611 | } | |
612 | ||
613 | /* | |
614 | * Do not use the FIQ and BULK channels, | |
615 | * because they are used by the GPU. | |
616 | */ | |
617 | chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); | |
618 | ||
619 | for (i = 0; i < pdev->num_resources; i++) { | |
620 | irq = platform_get_irq(pdev, i); | |
621 | if (irq < 0) | |
622 | break; | |
623 | ||
624 | if (chans_available & (1 << i)) { | |
625 | rc = bcm2835_dma_chan_init(od, i, irq); | |
626 | if (rc) | |
627 | goto err_no_dma; | |
628 | } | |
629 | } | |
630 | ||
631 | dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); | |
632 | ||
633 | /* Device-tree DMA controller registration */ | |
634 | rc = of_dma_controller_register(pdev->dev.of_node, | |
635 | bcm2835_dma_xlate, od); | |
636 | if (rc) { | |
637 | dev_err(&pdev->dev, "Failed to register DMA controller\n"); | |
638 | goto err_no_dma; | |
639 | } | |
640 | ||
641 | rc = dma_async_device_register(&od->ddev); | |
642 | if (rc) { | |
643 | dev_err(&pdev->dev, | |
644 | "Failed to register slave DMA engine device: %d\n", rc); | |
645 | goto err_no_dma; | |
646 | } | |
647 | ||
648 | dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); | |
649 | ||
650 | return 0; | |
651 | ||
652 | err_no_dma: | |
653 | bcm2835_dma_free(od); | |
654 | return rc; | |
655 | } | |
656 | ||
657 | static int bcm2835_dma_remove(struct platform_device *pdev) | |
658 | { | |
659 | struct bcm2835_dmadev *od = platform_get_drvdata(pdev); | |
660 | ||
661 | dma_async_device_unregister(&od->ddev); | |
662 | bcm2835_dma_free(od); | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
667 | static struct platform_driver bcm2835_dma_driver = { | |
668 | .probe = bcm2835_dma_probe, | |
669 | .remove = bcm2835_dma_remove, | |
670 | .driver = { | |
671 | .name = "bcm2835-dma", | |
96286b57 FM |
672 | .of_match_table = of_match_ptr(bcm2835_dma_of_match), |
673 | }, | |
674 | }; | |
675 | ||
676 | module_platform_driver(bcm2835_dma_driver); | |
677 | ||
678 | MODULE_ALIAS("platform:bcm2835-dma"); | |
679 | MODULE_DESCRIPTION("BCM2835 DMA engine driver"); | |
680 | MODULE_AUTHOR("Florian Meier <[email protected]>"); | |
681 | MODULE_LICENSE("GPL v2"); |