]>
Commit | Line | Data |
---|---|---|
b45b262c GL |
1 | /* |
2 | * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. | |
3 | * Author: Guennadi Liakhovetski <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of version 2 of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/bitmap.h> | |
11 | #include <linux/bitops.h> | |
12 | #include <linux/clk.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/log2.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/of_device.h> | |
22 | #include <linux/of_dma.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | ||
26 | #include <dt-bindings/dma/nbpfaxi.h> | |
27 | ||
28 | #include "dmaengine.h" | |
29 | ||
30 | #define NBPF_REG_CHAN_OFFSET 0 | |
31 | #define NBPF_REG_CHAN_SIZE 0x40 | |
32 | ||
33 | /* Channel Current Transaction Byte register */ | |
34 | #define NBPF_CHAN_CUR_TR_BYTE 0x20 | |
35 | ||
36 | /* Channel Status register */ | |
37 | #define NBPF_CHAN_STAT 0x24 | |
38 | #define NBPF_CHAN_STAT_EN 1 | |
39 | #define NBPF_CHAN_STAT_TACT 4 | |
40 | #define NBPF_CHAN_STAT_ERR 0x10 | |
41 | #define NBPF_CHAN_STAT_END 0x20 | |
42 | #define NBPF_CHAN_STAT_TC 0x40 | |
43 | #define NBPF_CHAN_STAT_DER 0x400 | |
44 | ||
45 | /* Channel Control register */ | |
46 | #define NBPF_CHAN_CTRL 0x28 | |
47 | #define NBPF_CHAN_CTRL_SETEN 1 | |
48 | #define NBPF_CHAN_CTRL_CLREN 2 | |
49 | #define NBPF_CHAN_CTRL_STG 4 | |
50 | #define NBPF_CHAN_CTRL_SWRST 8 | |
51 | #define NBPF_CHAN_CTRL_CLRRQ 0x10 | |
52 | #define NBPF_CHAN_CTRL_CLREND 0x20 | |
53 | #define NBPF_CHAN_CTRL_CLRTC 0x40 | |
54 | #define NBPF_CHAN_CTRL_SETSUS 0x100 | |
55 | #define NBPF_CHAN_CTRL_CLRSUS 0x200 | |
56 | ||
57 | /* Channel Configuration register */ | |
58 | #define NBPF_CHAN_CFG 0x2c | |
59 | #define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ | |
60 | #define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ | |
61 | #define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ | |
62 | #define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ | |
63 | #define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ | |
64 | #define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ | |
65 | #define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ | |
66 | #define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ | |
67 | #define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ | |
68 | #define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ | |
69 | #define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ | |
70 | #define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ | |
71 | #define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ | |
72 | #define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ | |
73 | #define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ | |
74 | #define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ | |
75 | #define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ | |
76 | #define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ | |
77 | ||
78 | #define NBPF_CHAN_NXLA 0x38 | |
79 | #define NBPF_CHAN_CRLA 0x3c | |
80 | ||
81 | /* Link Header field */ | |
82 | #define NBPF_HEADER_LV 1 | |
83 | #define NBPF_HEADER_LE 2 | |
84 | #define NBPF_HEADER_WBD 4 | |
85 | #define NBPF_HEADER_DIM 8 | |
86 | ||
87 | #define NBPF_CTRL 0x300 | |
88 | #define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ | |
89 | #define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ | |
90 | ||
91 | #define NBPF_DSTAT_ER 0x314 | |
92 | #define NBPF_DSTAT_END 0x318 | |
93 | ||
94 | #define NBPF_DMA_BUSWIDTHS \ | |
95 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
96 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
97 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
98 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
99 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
100 | ||
101 | struct nbpf_config { | |
102 | int num_channels; | |
103 | int buffer_size; | |
104 | }; | |
105 | ||
106 | /* | |
107 | * We've got 3 types of objects, used to describe DMA transfers: | |
108 | * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object | |
109 | * in it, used to communicate with the user | |
110 | * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer | |
111 | * queuing, these must be DMAable, using either the streaming DMA API or | |
112 | * allocated from coherent memory - one per SG segment | |
113 | * 3. one per SG segment descriptors, used to manage HW link descriptors from | |
114 | * (2). They do not have to be DMAable. They can either be (a) allocated | |
115 | * together with link descriptors as mixed (DMA / CPU) objects, or (b) | |
116 | * separately. Even if allocated separately it would be best to link them | |
117 | * to link descriptors once during channel resource allocation and always | |
118 | * use them as a single object. | |
119 | * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be | |
120 | * treated as a single SG segment descriptor. | |
121 | */ | |
122 | ||
123 | struct nbpf_link_reg { | |
124 | u32 header; | |
125 | u32 src_addr; | |
126 | u32 dst_addr; | |
127 | u32 transaction_size; | |
128 | u32 config; | |
129 | u32 interval; | |
130 | u32 extension; | |
131 | u32 next; | |
132 | } __packed; | |
133 | ||
134 | struct nbpf_device; | |
135 | struct nbpf_channel; | |
136 | struct nbpf_desc; | |
137 | ||
138 | struct nbpf_link_desc { | |
139 | struct nbpf_link_reg *hwdesc; | |
140 | dma_addr_t hwdesc_dma_addr; | |
141 | struct nbpf_desc *desc; | |
142 | struct list_head node; | |
143 | }; | |
144 | ||
145 | /** | |
146 | * struct nbpf_desc - DMA transfer descriptor | |
147 | * @async_tx: dmaengine object | |
148 | * @user_wait: waiting for a user ack | |
149 | * @length: total transfer length | |
150 | * @sg: list of hardware descriptors, represented by struct nbpf_link_desc | |
151 | * @node: member in channel descriptor lists | |
152 | */ | |
153 | struct nbpf_desc { | |
154 | struct dma_async_tx_descriptor async_tx; | |
155 | bool user_wait; | |
156 | size_t length; | |
157 | struct nbpf_channel *chan; | |
158 | struct list_head sg; | |
159 | struct list_head node; | |
160 | }; | |
161 | ||
162 | /* Take a wild guess: allocate 4 segments per descriptor */ | |
163 | #define NBPF_SEGMENTS_PER_DESC 4 | |
164 | #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ | |
165 | (sizeof(struct nbpf_desc) + \ | |
166 | NBPF_SEGMENTS_PER_DESC * \ | |
167 | (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) | |
168 | #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) | |
169 | ||
170 | struct nbpf_desc_page { | |
171 | struct list_head node; | |
172 | struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; | |
173 | struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; | |
174 | struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; | |
175 | }; | |
176 | ||
177 | /** | |
178 | * struct nbpf_channel - one DMAC channel | |
179 | * @dma_chan: standard dmaengine channel object | |
180 | * @base: register address base | |
181 | * @nbpf: DMAC | |
182 | * @name: IRQ name | |
183 | * @irq: IRQ number | |
184 | * @slave_addr: address for slave DMA | |
185 | * @slave_width:slave data size in bytes | |
186 | * @slave_burst:maximum slave burst size in bytes | |
187 | * @terminal: DMA terminal, assigned to this channel | |
188 | * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG | |
189 | * @flags: configuration flags from DT | |
190 | * @lock: protect descriptor lists | |
191 | * @free_links: list of free link descriptors | |
192 | * @free: list of free descriptors | |
193 | * @queued: list of queued descriptors | |
194 | * @active: list of descriptors, scheduled for processing | |
195 | * @done: list of completed descriptors, waiting post-processing | |
196 | * @desc_page: list of additionally allocated descriptor pages - if any | |
197 | */ | |
198 | struct nbpf_channel { | |
199 | struct dma_chan dma_chan; | |
f02323ec | 200 | struct tasklet_struct tasklet; |
b45b262c GL |
201 | void __iomem *base; |
202 | struct nbpf_device *nbpf; | |
203 | char name[16]; | |
204 | int irq; | |
205 | dma_addr_t slave_src_addr; | |
206 | size_t slave_src_width; | |
207 | size_t slave_src_burst; | |
208 | dma_addr_t slave_dst_addr; | |
209 | size_t slave_dst_width; | |
210 | size_t slave_dst_burst; | |
211 | unsigned int terminal; | |
212 | u32 dmarq_cfg; | |
213 | unsigned long flags; | |
214 | spinlock_t lock; | |
215 | struct list_head free_links; | |
216 | struct list_head free; | |
217 | struct list_head queued; | |
218 | struct list_head active; | |
219 | struct list_head done; | |
220 | struct list_head desc_page; | |
221 | struct nbpf_desc *running; | |
222 | bool paused; | |
223 | }; | |
224 | ||
225 | struct nbpf_device { | |
226 | struct dma_device dma_dev; | |
227 | void __iomem *base; | |
228 | struct clk *clk; | |
229 | const struct nbpf_config *config; | |
84c610ba | 230 | unsigned int eirq; |
b45b262c GL |
231 | struct nbpf_channel chan[]; |
232 | }; | |
233 | ||
234 | enum nbpf_model { | |
235 | NBPF1B4, | |
236 | NBPF1B8, | |
237 | NBPF1B16, | |
238 | NBPF4B4, | |
239 | NBPF4B8, | |
240 | NBPF4B16, | |
241 | NBPF8B4, | |
242 | NBPF8B8, | |
243 | NBPF8B16, | |
244 | }; | |
245 | ||
246 | static struct nbpf_config nbpf_cfg[] = { | |
247 | [NBPF1B4] = { | |
248 | .num_channels = 1, | |
249 | .buffer_size = 4, | |
250 | }, | |
251 | [NBPF1B8] = { | |
252 | .num_channels = 1, | |
253 | .buffer_size = 8, | |
254 | }, | |
255 | [NBPF1B16] = { | |
256 | .num_channels = 1, | |
257 | .buffer_size = 16, | |
258 | }, | |
259 | [NBPF4B4] = { | |
260 | .num_channels = 4, | |
261 | .buffer_size = 4, | |
262 | }, | |
263 | [NBPF4B8] = { | |
264 | .num_channels = 4, | |
265 | .buffer_size = 8, | |
266 | }, | |
267 | [NBPF4B16] = { | |
268 | .num_channels = 4, | |
269 | .buffer_size = 16, | |
270 | }, | |
271 | [NBPF8B4] = { | |
272 | .num_channels = 8, | |
273 | .buffer_size = 4, | |
274 | }, | |
275 | [NBPF8B8] = { | |
276 | .num_channels = 8, | |
277 | .buffer_size = 8, | |
278 | }, | |
279 | [NBPF8B16] = { | |
280 | .num_channels = 8, | |
281 | .buffer_size = 16, | |
282 | }, | |
283 | }; | |
284 | ||
285 | #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) | |
286 | ||
287 | /* | |
288 | * dmaengine drivers seem to have a lot in common and instead of sharing more | |
289 | * code, they reimplement those common algorithms independently. In this driver | |
290 | * we try to separate the hardware-specific part from the (largely) generic | |
291 | * part. This improves code readability and makes it possible in the future to | |
292 | * reuse the generic code in form of a helper library. That generic code should | |
293 | * be suitable for various DMA controllers, using transfer descriptors in RAM | |
294 | * and pushing one SG list at a time to the DMA controller. | |
295 | */ | |
296 | ||
297 | /* Hardware-specific part */ | |
298 | ||
299 | static inline u32 nbpf_chan_read(struct nbpf_channel *chan, | |
300 | unsigned int offset) | |
301 | { | |
302 | u32 data = ioread32(chan->base + offset); | |
303 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", | |
304 | __func__, chan->base, offset, data); | |
305 | return data; | |
306 | } | |
307 | ||
308 | static inline void nbpf_chan_write(struct nbpf_channel *chan, | |
309 | unsigned int offset, u32 data) | |
310 | { | |
311 | iowrite32(data, chan->base + offset); | |
312 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", | |
313 | __func__, chan->base, offset, data); | |
314 | } | |
315 | ||
316 | static inline u32 nbpf_read(struct nbpf_device *nbpf, | |
317 | unsigned int offset) | |
318 | { | |
319 | u32 data = ioread32(nbpf->base + offset); | |
320 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", | |
321 | __func__, nbpf->base, offset, data); | |
322 | return data; | |
323 | } | |
324 | ||
325 | static inline void nbpf_write(struct nbpf_device *nbpf, | |
326 | unsigned int offset, u32 data) | |
327 | { | |
328 | iowrite32(data, nbpf->base + offset); | |
329 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", | |
330 | __func__, nbpf->base, offset, data); | |
331 | } | |
332 | ||
333 | static void nbpf_chan_halt(struct nbpf_channel *chan) | |
334 | { | |
335 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); | |
336 | } | |
337 | ||
338 | static bool nbpf_status_get(struct nbpf_channel *chan) | |
339 | { | |
340 | u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); | |
341 | ||
342 | return status & BIT(chan - chan->nbpf->chan); | |
343 | } | |
344 | ||
345 | static void nbpf_status_ack(struct nbpf_channel *chan) | |
346 | { | |
347 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); | |
348 | } | |
349 | ||
350 | static u32 nbpf_error_get(struct nbpf_device *nbpf) | |
351 | { | |
352 | return nbpf_read(nbpf, NBPF_DSTAT_ER); | |
353 | } | |
354 | ||
1141b7e9 | 355 | static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) |
b45b262c GL |
356 | { |
357 | return nbpf->chan + __ffs(error); | |
358 | } | |
359 | ||
360 | static void nbpf_error_clear(struct nbpf_channel *chan) | |
361 | { | |
362 | u32 status; | |
363 | int i; | |
364 | ||
365 | /* Stop the channel, make sure DMA has been aborted */ | |
366 | nbpf_chan_halt(chan); | |
367 | ||
368 | for (i = 1000; i; i--) { | |
369 | status = nbpf_chan_read(chan, NBPF_CHAN_STAT); | |
370 | if (!(status & NBPF_CHAN_STAT_TACT)) | |
371 | break; | |
372 | cpu_relax(); | |
373 | } | |
374 | ||
375 | if (!i) | |
376 | dev_err(chan->dma_chan.device->dev, | |
377 | "%s(): abort timeout, channel status 0x%x\n", __func__, status); | |
378 | ||
379 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); | |
380 | } | |
381 | ||
382 | static int nbpf_start(struct nbpf_desc *desc) | |
383 | { | |
384 | struct nbpf_channel *chan = desc->chan; | |
385 | struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); | |
386 | ||
387 | nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); | |
388 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); | |
389 | chan->paused = false; | |
390 | ||
391 | /* Software trigger MEMCPY - only MEMCPY uses the block mode */ | |
392 | if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) | |
393 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); | |
394 | ||
395 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, | |
396 | nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | static void nbpf_chan_prepare(struct nbpf_channel *chan) | |
402 | { | |
403 | chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | | |
404 | (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | | |
405 | (chan->flags & NBPF_SLAVE_RQ_LEVEL ? | |
406 | NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | | |
407 | chan->terminal; | |
408 | } | |
409 | ||
410 | static void nbpf_chan_prepare_default(struct nbpf_channel *chan) | |
411 | { | |
412 | /* Don't output DMAACK */ | |
413 | chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; | |
414 | chan->terminal = 0; | |
415 | chan->flags = 0; | |
416 | } | |
417 | ||
418 | static void nbpf_chan_configure(struct nbpf_channel *chan) | |
419 | { | |
420 | /* | |
421 | * We assume, that only the link mode and DMA request line configuration | |
422 | * have to be set in the configuration register manually. Dynamic | |
423 | * per-transfer configuration will be loaded from transfer descriptors. | |
424 | */ | |
425 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); | |
426 | } | |
427 | ||
428 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) | |
429 | { | |
430 | /* Maximum supported bursts depend on the buffer size */ | |
431 | return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); | |
432 | } | |
433 | ||
434 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, | |
435 | enum dma_slave_buswidth width, u32 burst) | |
436 | { | |
437 | size_t size; | |
438 | ||
439 | if (!burst) | |
440 | burst = 1; | |
441 | ||
442 | switch (width) { | |
443 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
444 | size = 8 * burst; | |
445 | break; | |
446 | ||
447 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
448 | size = 4 * burst; | |
449 | break; | |
450 | ||
451 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
452 | size = 2 * burst; | |
453 | break; | |
454 | ||
455 | default: | |
456 | pr_warn("%s(): invalid bus width %u\n", __func__, width); | |
457 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
458 | size = burst; | |
459 | } | |
460 | ||
461 | return nbpf_xfer_ds(nbpf, size); | |
462 | } | |
463 | ||
464 | /* | |
465 | * We need a way to recognise slaves, whose data is sent "raw" over the bus, | |
466 | * i.e. it isn't known in advance how many bytes will be received. Therefore | |
467 | * the slave driver has to provide a "large enough" buffer and either read the | |
468 | * buffer, when it is full, or detect, that some data has arrived, then wait for | |
469 | * a timeout, if no more data arrives - receive what's already there. We want to | |
470 | * handle such slaves in a special way to allow an optimised mode for other | |
471 | * users, for whom the amount of data is known in advance. So far there's no way | |
472 | * to recognise such slaves. We use a data-width check to distinguish between | |
473 | * the SD host and the PL011 UART. | |
474 | */ | |
475 | ||
476 | static int nbpf_prep_one(struct nbpf_link_desc *ldesc, | |
477 | enum dma_transfer_direction direction, | |
478 | dma_addr_t src, dma_addr_t dst, size_t size, bool last) | |
479 | { | |
480 | struct nbpf_link_reg *hwdesc = ldesc->hwdesc; | |
481 | struct nbpf_desc *desc = ldesc->desc; | |
482 | struct nbpf_channel *chan = desc->chan; | |
483 | struct device *dev = chan->dma_chan.device->dev; | |
484 | size_t mem_xfer, slave_xfer; | |
485 | bool can_burst; | |
486 | ||
487 | hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | | |
488 | (last ? NBPF_HEADER_LE : 0); | |
489 | ||
490 | hwdesc->src_addr = src; | |
491 | hwdesc->dst_addr = dst; | |
492 | hwdesc->transaction_size = size; | |
493 | ||
494 | /* | |
495 | * set config: SAD, DAD, DDS, SDS, etc. | |
496 | * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, | |
497 | * but it is important to have transaction size a multiple of both | |
498 | * receiver and transmitter transfer sizes. It is also possible to use | |
499 | * different RAM and device transfer sizes, and it does work well with | |
500 | * some devices, e.g. with V08R07S01E SD host controllers, which can use | |
501 | * 128 byte transfers. But this doesn't work with other devices, | |
502 | * especially when the transaction size is unknown. This is the case, | |
503 | * e.g. with serial drivers like amba-pl011.c. For reception it sets up | |
504 | * the transaction size of 4K and if fewer bytes are received, it | |
505 | * pauses DMA and reads out data received via DMA as well as those left | |
506 | * in the Rx FIFO. For this to work with the RAM side using burst | |
507 | * transfers we enable the SBE bit and terminate the transfer in our | |
fbde2867 | 508 | * .device_pause handler. |
b45b262c GL |
509 | */ |
510 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size); | |
511 | ||
512 | switch (direction) { | |
513 | case DMA_DEV_TO_MEM: | |
514 | can_burst = chan->slave_src_width >= 3; | |
515 | slave_xfer = min(mem_xfer, can_burst ? | |
516 | chan->slave_src_burst : chan->slave_src_width); | |
517 | /* | |
518 | * Is the slave narrower than 64 bits, i.e. isn't using the full | |
519 | * bus width and cannot use bursts? | |
520 | */ | |
521 | if (mem_xfer > chan->slave_src_burst && !can_burst) | |
522 | mem_xfer = chan->slave_src_burst; | |
523 | /* Device-to-RAM DMA is unreliable without REQD set */ | |
524 | hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | | |
525 | (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | | |
526 | NBPF_CHAN_CFG_SBE; | |
527 | break; | |
528 | ||
529 | case DMA_MEM_TO_DEV: | |
530 | slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? | |
531 | chan->slave_dst_burst : chan->slave_dst_width); | |
532 | hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | | |
533 | (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; | |
534 | break; | |
535 | ||
536 | case DMA_MEM_TO_MEM: | |
537 | hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | | |
538 | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | | |
539 | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); | |
540 | break; | |
541 | ||
542 | default: | |
543 | return -EINVAL; | |
544 | } | |
545 | ||
546 | hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | | |
547 | NBPF_CHAN_CFG_DMS; | |
548 | ||
549 | dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", | |
550 | __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, | |
551 | hwdesc->config, size, &src, &dst); | |
552 | ||
553 | dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), | |
554 | DMA_TO_DEVICE); | |
555 | ||
556 | return 0; | |
557 | } | |
558 | ||
559 | static size_t nbpf_bytes_left(struct nbpf_channel *chan) | |
560 | { | |
561 | return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); | |
562 | } | |
563 | ||
564 | static void nbpf_configure(struct nbpf_device *nbpf) | |
565 | { | |
566 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); | |
567 | } | |
568 | ||
b45b262c GL |
569 | /* Generic part */ |
570 | ||
571 | /* DMA ENGINE functions */ | |
572 | static void nbpf_issue_pending(struct dma_chan *dchan) | |
573 | { | |
574 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
575 | unsigned long flags; | |
576 | ||
577 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | |
578 | ||
579 | spin_lock_irqsave(&chan->lock, flags); | |
580 | if (list_empty(&chan->queued)) | |
581 | goto unlock; | |
582 | ||
583 | list_splice_tail_init(&chan->queued, &chan->active); | |
584 | ||
585 | if (!chan->running) { | |
586 | struct nbpf_desc *desc = list_first_entry(&chan->active, | |
587 | struct nbpf_desc, node); | |
588 | if (!nbpf_start(desc)) | |
589 | chan->running = desc; | |
590 | } | |
591 | ||
592 | unlock: | |
593 | spin_unlock_irqrestore(&chan->lock, flags); | |
594 | } | |
595 | ||
596 | static enum dma_status nbpf_tx_status(struct dma_chan *dchan, | |
597 | dma_cookie_t cookie, struct dma_tx_state *state) | |
598 | { | |
599 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
600 | enum dma_status status = dma_cookie_status(dchan, cookie, state); | |
601 | ||
602 | if (state) { | |
603 | dma_cookie_t running; | |
604 | unsigned long flags; | |
605 | ||
606 | spin_lock_irqsave(&chan->lock, flags); | |
607 | running = chan->running ? chan->running->async_tx.cookie : -EINVAL; | |
608 | ||
609 | if (cookie == running) { | |
610 | state->residue = nbpf_bytes_left(chan); | |
611 | dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, | |
612 | state->residue); | |
613 | } else if (status == DMA_IN_PROGRESS) { | |
614 | struct nbpf_desc *desc; | |
615 | bool found = false; | |
616 | ||
617 | list_for_each_entry(desc, &chan->active, node) | |
618 | if (desc->async_tx.cookie == cookie) { | |
619 | found = true; | |
620 | break; | |
621 | } | |
622 | ||
623 | if (!found) | |
624 | list_for_each_entry(desc, &chan->queued, node) | |
625 | if (desc->async_tx.cookie == cookie) { | |
626 | found = true; | |
627 | break; | |
628 | ||
629 | } | |
630 | ||
631 | state->residue = found ? desc->length : 0; | |
632 | } | |
633 | ||
634 | spin_unlock_irqrestore(&chan->lock, flags); | |
635 | } | |
636 | ||
637 | if (chan->paused) | |
638 | status = DMA_PAUSED; | |
639 | ||
640 | return status; | |
641 | } | |
642 | ||
643 | static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) | |
644 | { | |
645 | struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); | |
646 | struct nbpf_channel *chan = desc->chan; | |
647 | unsigned long flags; | |
648 | dma_cookie_t cookie; | |
649 | ||
650 | spin_lock_irqsave(&chan->lock, flags); | |
651 | cookie = dma_cookie_assign(tx); | |
652 | list_add_tail(&desc->node, &chan->queued); | |
653 | spin_unlock_irqrestore(&chan->lock, flags); | |
654 | ||
655 | dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); | |
656 | ||
657 | return cookie; | |
658 | } | |
659 | ||
660 | static int nbpf_desc_page_alloc(struct nbpf_channel *chan) | |
661 | { | |
662 | struct dma_chan *dchan = &chan->dma_chan; | |
663 | struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | |
664 | struct nbpf_link_desc *ldesc; | |
665 | struct nbpf_link_reg *hwdesc; | |
666 | struct nbpf_desc *desc; | |
667 | LIST_HEAD(head); | |
668 | LIST_HEAD(lhead); | |
669 | int i; | |
670 | struct device *dev = dchan->device->dev; | |
671 | ||
672 | if (!dpage) | |
673 | return -ENOMEM; | |
674 | ||
675 | dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", | |
676 | __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); | |
677 | ||
678 | for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; | |
679 | i < ARRAY_SIZE(dpage->ldesc); | |
680 | i++, ldesc++, hwdesc++) { | |
681 | ldesc->hwdesc = hwdesc; | |
682 | list_add_tail(&ldesc->node, &lhead); | |
683 | ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, | |
684 | hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); | |
685 | ||
686 | dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, | |
687 | hwdesc, &ldesc->hwdesc_dma_addr); | |
688 | } | |
689 | ||
690 | for (i = 0, desc = dpage->desc; | |
691 | i < ARRAY_SIZE(dpage->desc); | |
692 | i++, desc++) { | |
693 | dma_async_tx_descriptor_init(&desc->async_tx, dchan); | |
694 | desc->async_tx.tx_submit = nbpf_tx_submit; | |
695 | desc->chan = chan; | |
696 | INIT_LIST_HEAD(&desc->sg); | |
697 | list_add_tail(&desc->node, &head); | |
698 | } | |
699 | ||
700 | /* | |
701 | * This function cannot be called from interrupt context, so, no need to | |
702 | * save flags | |
703 | */ | |
704 | spin_lock_irq(&chan->lock); | |
705 | list_splice_tail(&lhead, &chan->free_links); | |
706 | list_splice_tail(&head, &chan->free); | |
707 | list_add(&dpage->node, &chan->desc_page); | |
708 | spin_unlock_irq(&chan->lock); | |
709 | ||
710 | return ARRAY_SIZE(dpage->desc); | |
711 | } | |
712 | ||
713 | static void nbpf_desc_put(struct nbpf_desc *desc) | |
714 | { | |
715 | struct nbpf_channel *chan = desc->chan; | |
716 | struct nbpf_link_desc *ldesc, *tmp; | |
717 | unsigned long flags; | |
718 | ||
719 | spin_lock_irqsave(&chan->lock, flags); | |
720 | list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) | |
721 | list_move(&ldesc->node, &chan->free_links); | |
722 | ||
723 | list_add(&desc->node, &chan->free); | |
724 | spin_unlock_irqrestore(&chan->lock, flags); | |
725 | } | |
726 | ||
727 | static void nbpf_scan_acked(struct nbpf_channel *chan) | |
728 | { | |
729 | struct nbpf_desc *desc, *tmp; | |
730 | unsigned long flags; | |
731 | LIST_HEAD(head); | |
732 | ||
733 | spin_lock_irqsave(&chan->lock, flags); | |
734 | list_for_each_entry_safe(desc, tmp, &chan->done, node) | |
735 | if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { | |
736 | list_move(&desc->node, &head); | |
737 | desc->user_wait = false; | |
738 | } | |
739 | spin_unlock_irqrestore(&chan->lock, flags); | |
740 | ||
741 | list_for_each_entry_safe(desc, tmp, &head, node) { | |
742 | list_del(&desc->node); | |
743 | nbpf_desc_put(desc); | |
744 | } | |
745 | } | |
746 | ||
747 | /* | |
748 | * We have to allocate descriptors with the channel lock dropped. This means, | |
749 | * before we re-acquire the lock buffers can be taken already, so we have to | |
750 | * re-check after re-acquiring the lock and possibly retry, if buffers are gone | |
751 | * again. | |
752 | */ | |
753 | static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) | |
754 | { | |
755 | struct nbpf_desc *desc = NULL; | |
756 | struct nbpf_link_desc *ldesc, *prev = NULL; | |
757 | ||
758 | nbpf_scan_acked(chan); | |
759 | ||
760 | spin_lock_irq(&chan->lock); | |
761 | ||
762 | do { | |
763 | int i = 0, ret; | |
764 | ||
765 | if (list_empty(&chan->free)) { | |
766 | /* No more free descriptors */ | |
767 | spin_unlock_irq(&chan->lock); | |
768 | ret = nbpf_desc_page_alloc(chan); | |
769 | if (ret < 0) | |
770 | return NULL; | |
771 | spin_lock_irq(&chan->lock); | |
772 | continue; | |
773 | } | |
774 | desc = list_first_entry(&chan->free, struct nbpf_desc, node); | |
775 | list_del(&desc->node); | |
776 | ||
777 | do { | |
778 | if (list_empty(&chan->free_links)) { | |
779 | /* No more free link descriptors */ | |
780 | spin_unlock_irq(&chan->lock); | |
781 | ret = nbpf_desc_page_alloc(chan); | |
782 | if (ret < 0) { | |
783 | nbpf_desc_put(desc); | |
784 | return NULL; | |
785 | } | |
786 | spin_lock_irq(&chan->lock); | |
787 | continue; | |
788 | } | |
789 | ||
790 | ldesc = list_first_entry(&chan->free_links, | |
791 | struct nbpf_link_desc, node); | |
792 | ldesc->desc = desc; | |
793 | if (prev) | |
794 | prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; | |
795 | ||
796 | prev = ldesc; | |
797 | list_move_tail(&ldesc->node, &desc->sg); | |
798 | ||
799 | i++; | |
800 | } while (i < len); | |
801 | } while (!desc); | |
802 | ||
803 | prev->hwdesc->next = 0; | |
804 | ||
805 | spin_unlock_irq(&chan->lock); | |
806 | ||
807 | return desc; | |
808 | } | |
809 | ||
810 | static void nbpf_chan_idle(struct nbpf_channel *chan) | |
811 | { | |
812 | struct nbpf_desc *desc, *tmp; | |
813 | unsigned long flags; | |
814 | LIST_HEAD(head); | |
815 | ||
816 | spin_lock_irqsave(&chan->lock, flags); | |
817 | ||
818 | list_splice_init(&chan->done, &head); | |
819 | list_splice_init(&chan->active, &head); | |
820 | list_splice_init(&chan->queued, &head); | |
821 | ||
822 | chan->running = NULL; | |
823 | ||
824 | spin_unlock_irqrestore(&chan->lock, flags); | |
825 | ||
826 | list_for_each_entry_safe(desc, tmp, &head, node) { | |
827 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", | |
828 | __func__, desc, desc->async_tx.cookie); | |
829 | list_del(&desc->node); | |
830 | nbpf_desc_put(desc); | |
831 | } | |
832 | } | |
833 | ||
e22aec0f | 834 | static int nbpf_pause(struct dma_chan *dchan) |
b45b262c GL |
835 | { |
836 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
b45b262c | 837 | |
e22aec0f | 838 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
b45b262c | 839 | |
e22aec0f MR |
840 | chan->paused = true; |
841 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); | |
842 | /* See comment in nbpf_prep_one() */ | |
843 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); | |
b45b262c | 844 | |
e22aec0f MR |
845 | return 0; |
846 | } | |
b45b262c | 847 | |
e22aec0f MR |
848 | static int nbpf_terminate_all(struct dma_chan *dchan) |
849 | { | |
850 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
b45b262c | 851 | |
e22aec0f MR |
852 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
853 | dev_dbg(dchan->device->dev, "Terminating\n"); | |
b45b262c | 854 | |
e22aec0f MR |
855 | nbpf_chan_halt(chan); |
856 | nbpf_chan_idle(chan); | |
b45b262c | 857 | |
e22aec0f MR |
858 | return 0; |
859 | } | |
860 | ||
861 | static int nbpf_config(struct dma_chan *dchan, | |
862 | struct dma_slave_config *config) | |
863 | { | |
864 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
865 | ||
866 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); | |
867 | ||
868 | /* | |
869 | * We could check config->slave_id to match chan->terminal here, | |
870 | * but with DT they would be coming from the same source, so | |
871 | * such a check would be superflous | |
872 | */ | |
873 | ||
874 | chan->slave_dst_addr = config->dst_addr; | |
875 | chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, | |
876 | config->dst_addr_width, 1); | |
877 | chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, | |
878 | config->dst_addr_width, | |
879 | config->dst_maxburst); | |
880 | chan->slave_src_addr = config->src_addr; | |
881 | chan->slave_src_width = nbpf_xfer_size(chan->nbpf, | |
882 | config->src_addr_width, 1); | |
883 | chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, | |
884 | config->src_addr_width, | |
885 | config->src_maxburst); | |
b45b262c GL |
886 | |
887 | return 0; | |
888 | } | |
889 | ||
890 | static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, | |
891 | struct scatterlist *src_sg, struct scatterlist *dst_sg, | |
892 | size_t len, enum dma_transfer_direction direction, | |
893 | unsigned long flags) | |
894 | { | |
895 | struct nbpf_link_desc *ldesc; | |
896 | struct scatterlist *mem_sg; | |
897 | struct nbpf_desc *desc; | |
898 | bool inc_src, inc_dst; | |
899 | size_t data_len = 0; | |
900 | int i = 0; | |
901 | ||
902 | switch (direction) { | |
903 | case DMA_DEV_TO_MEM: | |
904 | mem_sg = dst_sg; | |
905 | inc_src = false; | |
906 | inc_dst = true; | |
907 | break; | |
908 | ||
909 | case DMA_MEM_TO_DEV: | |
910 | mem_sg = src_sg; | |
911 | inc_src = true; | |
912 | inc_dst = false; | |
913 | break; | |
914 | ||
915 | default: | |
916 | case DMA_MEM_TO_MEM: | |
917 | mem_sg = src_sg; | |
918 | inc_src = true; | |
919 | inc_dst = true; | |
920 | } | |
921 | ||
922 | desc = nbpf_desc_get(chan, len); | |
923 | if (!desc) | |
924 | return NULL; | |
925 | ||
926 | desc->async_tx.flags = flags; | |
927 | desc->async_tx.cookie = -EBUSY; | |
928 | desc->user_wait = false; | |
929 | ||
930 | /* | |
931 | * This is a private descriptor list, and we own the descriptor. No need | |
932 | * to lock. | |
933 | */ | |
934 | list_for_each_entry(ldesc, &desc->sg, node) { | |
935 | int ret = nbpf_prep_one(ldesc, direction, | |
936 | sg_dma_address(src_sg), | |
937 | sg_dma_address(dst_sg), | |
938 | sg_dma_len(mem_sg), | |
939 | i == len - 1); | |
940 | if (ret < 0) { | |
941 | nbpf_desc_put(desc); | |
942 | return NULL; | |
943 | } | |
944 | data_len += sg_dma_len(mem_sg); | |
945 | if (inc_src) | |
946 | src_sg = sg_next(src_sg); | |
947 | if (inc_dst) | |
948 | dst_sg = sg_next(dst_sg); | |
949 | mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; | |
950 | i++; | |
951 | } | |
952 | ||
953 | desc->length = data_len; | |
954 | ||
955 | /* The user has to return the descriptor to us ASAP via .tx_submit() */ | |
956 | return &desc->async_tx; | |
957 | } | |
958 | ||
959 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy( | |
960 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | |
961 | size_t len, unsigned long flags) | |
962 | { | |
963 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
964 | struct scatterlist dst_sg; | |
965 | struct scatterlist src_sg; | |
966 | ||
967 | sg_init_table(&dst_sg, 1); | |
968 | sg_init_table(&src_sg, 1); | |
969 | ||
970 | sg_dma_address(&dst_sg) = dst; | |
971 | sg_dma_address(&src_sg) = src; | |
972 | ||
973 | sg_dma_len(&dst_sg) = len; | |
974 | sg_dma_len(&src_sg) = len; | |
975 | ||
976 | dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", | |
977 | __func__, len, &src, &dst); | |
978 | ||
979 | return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, | |
980 | DMA_MEM_TO_MEM, flags); | |
981 | } | |
982 | ||
983 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( | |
984 | struct dma_chan *dchan, | |
985 | struct scatterlist *dst_sg, unsigned int dst_nents, | |
986 | struct scatterlist *src_sg, unsigned int src_nents, | |
987 | unsigned long flags) | |
988 | { | |
989 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
990 | ||
991 | if (dst_nents != src_nents) | |
992 | return NULL; | |
993 | ||
994 | return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, | |
995 | DMA_MEM_TO_MEM, flags); | |
996 | } | |
997 | ||
998 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( | |
999 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | |
1000 | enum dma_transfer_direction direction, unsigned long flags, void *context) | |
1001 | { | |
1002 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
1003 | struct scatterlist slave_sg; | |
1004 | ||
1005 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | |
1006 | ||
1007 | sg_init_table(&slave_sg, 1); | |
1008 | ||
1009 | switch (direction) { | |
1010 | case DMA_MEM_TO_DEV: | |
1011 | sg_dma_address(&slave_sg) = chan->slave_dst_addr; | |
1012 | return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, | |
1013 | direction, flags); | |
1014 | ||
1015 | case DMA_DEV_TO_MEM: | |
1016 | sg_dma_address(&slave_sg) = chan->slave_src_addr; | |
1017 | return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, | |
1018 | direction, flags); | |
1019 | ||
1020 | default: | |
1021 | return NULL; | |
1022 | } | |
1023 | } | |
1024 | ||
1025 | static int nbpf_alloc_chan_resources(struct dma_chan *dchan) | |
1026 | { | |
1027 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
1028 | int ret; | |
1029 | ||
1030 | INIT_LIST_HEAD(&chan->free); | |
1031 | INIT_LIST_HEAD(&chan->free_links); | |
1032 | INIT_LIST_HEAD(&chan->queued); | |
1033 | INIT_LIST_HEAD(&chan->active); | |
1034 | INIT_LIST_HEAD(&chan->done); | |
1035 | ||
1036 | ret = nbpf_desc_page_alloc(chan); | |
1037 | if (ret < 0) | |
1038 | return ret; | |
1039 | ||
1040 | dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, | |
1041 | chan->terminal); | |
1042 | ||
1043 | nbpf_chan_configure(chan); | |
1044 | ||
1045 | return ret; | |
1046 | } | |
1047 | ||
1048 | static void nbpf_free_chan_resources(struct dma_chan *dchan) | |
1049 | { | |
1050 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | |
1051 | struct nbpf_desc_page *dpage, *tmp; | |
1052 | ||
1053 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | |
1054 | ||
1055 | nbpf_chan_halt(chan); | |
67b16684 | 1056 | nbpf_chan_idle(chan); |
b45b262c GL |
1057 | /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ |
1058 | nbpf_chan_prepare_default(chan); | |
1059 | ||
1060 | list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { | |
1061 | struct nbpf_link_desc *ldesc; | |
1062 | int i; | |
1063 | list_del(&dpage->node); | |
1064 | for (i = 0, ldesc = dpage->ldesc; | |
1065 | i < ARRAY_SIZE(dpage->ldesc); | |
1066 | i++, ldesc++) | |
1067 | dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, | |
1068 | sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); | |
1069 | free_page((unsigned long)dpage); | |
1070 | } | |
1071 | } | |
1072 | ||
b45b262c GL |
1073 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, |
1074 | struct of_dma *ofdma) | |
1075 | { | |
1076 | struct nbpf_device *nbpf = ofdma->of_dma_data; | |
1077 | struct dma_chan *dchan; | |
1078 | struct nbpf_channel *chan; | |
1079 | ||
1080 | if (dma_spec->args_count != 2) | |
1081 | return NULL; | |
1082 | ||
1083 | dchan = dma_get_any_slave_channel(&nbpf->dma_dev); | |
1084 | if (!dchan) | |
1085 | return NULL; | |
1086 | ||
1087 | dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, | |
1088 | dma_spec->np->name); | |
1089 | ||
1090 | chan = nbpf_to_chan(dchan); | |
1091 | ||
1092 | chan->terminal = dma_spec->args[0]; | |
1093 | chan->flags = dma_spec->args[1]; | |
1094 | ||
1095 | nbpf_chan_prepare(chan); | |
1096 | nbpf_chan_configure(chan); | |
1097 | ||
1098 | return dchan; | |
1099 | } | |
1100 | ||
f02323ec | 1101 | static void nbpf_chan_tasklet(unsigned long data) |
b45b262c | 1102 | { |
f02323ec | 1103 | struct nbpf_channel *chan = (struct nbpf_channel *)data; |
b45b262c | 1104 | struct nbpf_desc *desc, *tmp; |
0024b2ac | 1105 | struct dmaengine_desc_callback cb; |
b45b262c GL |
1106 | |
1107 | while (!list_empty(&chan->done)) { | |
1108 | bool found = false, must_put, recycling = false; | |
1109 | ||
1110 | spin_lock_irq(&chan->lock); | |
1111 | ||
1112 | list_for_each_entry_safe(desc, tmp, &chan->done, node) { | |
1113 | if (!desc->user_wait) { | |
1114 | /* Newly completed descriptor, have to process */ | |
1115 | found = true; | |
1116 | break; | |
1117 | } else if (async_tx_test_ack(&desc->async_tx)) { | |
1118 | /* | |
1119 | * This descriptor was waiting for a user ACK, | |
1120 | * it can be recycled now. | |
1121 | */ | |
1122 | list_del(&desc->node); | |
1123 | spin_unlock_irq(&chan->lock); | |
1124 | nbpf_desc_put(desc); | |
1125 | recycling = true; | |
1126 | break; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | if (recycling) | |
1131 | continue; | |
1132 | ||
1133 | if (!found) { | |
1134 | /* This can happen if TERMINATE_ALL has been called */ | |
1135 | spin_unlock_irq(&chan->lock); | |
1136 | break; | |
1137 | } | |
1138 | ||
1139 | dma_cookie_complete(&desc->async_tx); | |
1140 | ||
1141 | /* | |
1142 | * With released lock we cannot dereference desc, maybe it's | |
1143 | * still on the "done" list | |
1144 | */ | |
1145 | if (async_tx_test_ack(&desc->async_tx)) { | |
1146 | list_del(&desc->node); | |
1147 | must_put = true; | |
1148 | } else { | |
1149 | desc->user_wait = true; | |
1150 | must_put = false; | |
1151 | } | |
1152 | ||
0024b2ac | 1153 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
b45b262c GL |
1154 | |
1155 | /* ack and callback completed descriptor */ | |
1156 | spin_unlock_irq(&chan->lock); | |
1157 | ||
0024b2ac | 1158 | dmaengine_desc_callback_invoke(&cb, NULL); |
b45b262c GL |
1159 | |
1160 | if (must_put) | |
1161 | nbpf_desc_put(desc); | |
1162 | } | |
b45b262c GL |
1163 | } |
1164 | ||
1165 | static irqreturn_t nbpf_chan_irq(int irq, void *dev) | |
1166 | { | |
1167 | struct nbpf_channel *chan = dev; | |
1168 | bool done = nbpf_status_get(chan); | |
1169 | struct nbpf_desc *desc; | |
1170 | irqreturn_t ret; | |
f02323ec | 1171 | bool bh = false; |
b45b262c GL |
1172 | |
1173 | if (!done) | |
1174 | return IRQ_NONE; | |
1175 | ||
1176 | nbpf_status_ack(chan); | |
1177 | ||
1178 | dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); | |
1179 | ||
1180 | spin_lock(&chan->lock); | |
1181 | desc = chan->running; | |
1182 | if (WARN_ON(!desc)) { | |
1183 | ret = IRQ_NONE; | |
1184 | goto unlock; | |
1185 | } else { | |
f02323ec GL |
1186 | ret = IRQ_HANDLED; |
1187 | bh = true; | |
b45b262c GL |
1188 | } |
1189 | ||
1190 | list_move_tail(&desc->node, &chan->done); | |
1191 | chan->running = NULL; | |
1192 | ||
1193 | if (!list_empty(&chan->active)) { | |
1194 | desc = list_first_entry(&chan->active, | |
1195 | struct nbpf_desc, node); | |
1196 | if (!nbpf_start(desc)) | |
1197 | chan->running = desc; | |
1198 | } | |
1199 | ||
1200 | unlock: | |
1201 | spin_unlock(&chan->lock); | |
1202 | ||
f02323ec GL |
1203 | if (bh) |
1204 | tasklet_schedule(&chan->tasklet); | |
1205 | ||
b45b262c GL |
1206 | return ret; |
1207 | } | |
1208 | ||
1209 | static irqreturn_t nbpf_err_irq(int irq, void *dev) | |
1210 | { | |
1211 | struct nbpf_device *nbpf = dev; | |
1212 | u32 error = nbpf_error_get(nbpf); | |
1213 | ||
1214 | dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); | |
1215 | ||
1216 | if (!error) | |
1217 | return IRQ_NONE; | |
1218 | ||
1219 | do { | |
1220 | struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); | |
1221 | /* On error: abort all queued transfers, no callback */ | |
1222 | nbpf_error_clear(chan); | |
1223 | nbpf_chan_idle(chan); | |
1224 | error = nbpf_error_get(nbpf); | |
1225 | } while (error); | |
1226 | ||
1227 | return IRQ_HANDLED; | |
1228 | } | |
1229 | ||
1230 | static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) | |
1231 | { | |
1232 | struct dma_device *dma_dev = &nbpf->dma_dev; | |
1233 | struct nbpf_channel *chan = nbpf->chan + n; | |
1234 | int ret; | |
1235 | ||
1236 | chan->nbpf = nbpf; | |
1237 | chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; | |
1238 | INIT_LIST_HEAD(&chan->desc_page); | |
1239 | spin_lock_init(&chan->lock); | |
1240 | chan->dma_chan.device = dma_dev; | |
1241 | dma_cookie_init(&chan->dma_chan); | |
1242 | nbpf_chan_prepare_default(chan); | |
1243 | ||
1244 | dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); | |
1245 | ||
1246 | snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); | |
1247 | ||
f02323ec GL |
1248 | tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); |
1249 | ret = devm_request_irq(dma_dev->dev, chan->irq, | |
1250 | nbpf_chan_irq, IRQF_SHARED, | |
b45b262c GL |
1251 | chan->name, chan); |
1252 | if (ret < 0) | |
1253 | return ret; | |
1254 | ||
1255 | /* Add the channel to DMA device channel list */ | |
1256 | list_add_tail(&chan->dma_chan.device_node, | |
1257 | &dma_dev->channels); | |
1258 | ||
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | static const struct of_device_id nbpf_match[] = { | |
1263 | {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, | |
1264 | {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, | |
1265 | {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, | |
1266 | {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, | |
1267 | {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, | |
1268 | {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, | |
1269 | {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, | |
1270 | {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, | |
1271 | {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, | |
1272 | {} | |
1273 | }; | |
1274 | MODULE_DEVICE_TABLE(of, nbpf_match); | |
1275 | ||
1276 | static int nbpf_probe(struct platform_device *pdev) | |
1277 | { | |
1278 | struct device *dev = &pdev->dev; | |
1279 | const struct of_device_id *of_id = of_match_device(nbpf_match, dev); | |
1280 | struct device_node *np = dev->of_node; | |
1281 | struct nbpf_device *nbpf; | |
1282 | struct dma_device *dma_dev; | |
1283 | struct resource *iomem, *irq_res; | |
1284 | const struct nbpf_config *cfg; | |
1285 | int num_channels; | |
1286 | int ret, irq, eirq, i; | |
1287 | int irqbuf[9] /* maximum 8 channels + error IRQ */; | |
1288 | unsigned int irqs = 0; | |
1289 | ||
1290 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); | |
1291 | ||
1292 | /* DT only */ | |
1293 | if (!np || !of_id || !of_id->data) | |
1294 | return -ENODEV; | |
1295 | ||
1296 | cfg = of_id->data; | |
1297 | num_channels = cfg->num_channels; | |
1298 | ||
1299 | nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * | |
1300 | sizeof(nbpf->chan[0]), GFP_KERNEL); | |
aef94fea | 1301 | if (!nbpf) |
b45b262c | 1302 | return -ENOMEM; |
aef94fea | 1303 | |
b45b262c GL |
1304 | dma_dev = &nbpf->dma_dev; |
1305 | dma_dev->dev = dev; | |
1306 | ||
1307 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1308 | nbpf->base = devm_ioremap_resource(dev, iomem); | |
1309 | if (IS_ERR(nbpf->base)) | |
1310 | return PTR_ERR(nbpf->base); | |
1311 | ||
1312 | nbpf->clk = devm_clk_get(dev, NULL); | |
1313 | if (IS_ERR(nbpf->clk)) | |
1314 | return PTR_ERR(nbpf->clk); | |
1315 | ||
1316 | nbpf->config = cfg; | |
1317 | ||
1318 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { | |
1319 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | |
1320 | if (!irq_res) | |
1321 | break; | |
1322 | ||
1323 | for (irq = irq_res->start; irq <= irq_res->end; | |
1324 | irq++, irqs++) | |
1325 | irqbuf[irqs] = irq; | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * 3 IRQ resource schemes are supported: | |
1330 | * 1. 1 shared IRQ for error and all channels | |
1331 | * 2. 2 IRQs: one for error and one shared for all channels | |
1332 | * 3. 1 IRQ for error and an own IRQ for each channel | |
1333 | */ | |
1334 | if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) | |
1335 | return -ENXIO; | |
1336 | ||
1337 | if (irqs == 1) { | |
1338 | eirq = irqbuf[0]; | |
1339 | ||
1340 | for (i = 0; i <= num_channels; i++) | |
1341 | nbpf->chan[i].irq = irqbuf[0]; | |
1342 | } else { | |
1343 | eirq = platform_get_irq_byname(pdev, "error"); | |
1344 | if (eirq < 0) | |
1345 | return eirq; | |
1346 | ||
1347 | if (irqs == num_channels + 1) { | |
1348 | struct nbpf_channel *chan; | |
1349 | ||
1350 | for (i = 0, chan = nbpf->chan; i <= num_channels; | |
1351 | i++, chan++) { | |
1352 | /* Skip the error IRQ */ | |
1353 | if (irqbuf[i] == eirq) | |
1354 | i++; | |
1355 | chan->irq = irqbuf[i]; | |
1356 | } | |
1357 | ||
1358 | if (chan != nbpf->chan + num_channels) | |
1359 | return -EINVAL; | |
1360 | } else { | |
1361 | /* 2 IRQs and more than one channel */ | |
1362 | if (irqbuf[0] == eirq) | |
1363 | irq = irqbuf[1]; | |
1364 | else | |
1365 | irq = irqbuf[0]; | |
1366 | ||
1367 | for (i = 0; i <= num_channels; i++) | |
1368 | nbpf->chan[i].irq = irq; | |
1369 | } | |
1370 | } | |
1371 | ||
1372 | ret = devm_request_irq(dev, eirq, nbpf_err_irq, | |
1373 | IRQF_SHARED, "dma error", nbpf); | |
1374 | if (ret < 0) | |
1375 | return ret; | |
84c610ba | 1376 | nbpf->eirq = eirq; |
b45b262c GL |
1377 | |
1378 | INIT_LIST_HEAD(&dma_dev->channels); | |
1379 | ||
1380 | /* Create DMA Channel */ | |
1381 | for (i = 0; i < num_channels; i++) { | |
1382 | ret = nbpf_chan_probe(nbpf, i); | |
1383 | if (ret < 0) | |
1384 | return ret; | |
1385 | } | |
1386 | ||
1387 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
1388 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | |
1389 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | |
1390 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | |
1391 | ||
1392 | /* Common and MEMCPY operations */ | |
1393 | dma_dev->device_alloc_chan_resources | |
1394 | = nbpf_alloc_chan_resources; | |
1395 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; | |
1396 | dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; | |
1397 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | |
1398 | dma_dev->device_tx_status = nbpf_tx_status; | |
1399 | dma_dev->device_issue_pending = nbpf_issue_pending; | |
b45b262c GL |
1400 | |
1401 | /* | |
1402 | * If we drop support for unaligned MEMCPY buffer addresses and / or | |
1403 | * lengths by setting | |
1404 | * dma_dev->copy_align = 4; | |
1405 | * then we can set transfer length to 4 bytes in nbpf_prep_one() for | |
1406 | * DMA_MEM_TO_MEM | |
1407 | */ | |
1408 | ||
1409 | /* Compulsory for DMA_SLAVE fields */ | |
1410 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; | |
e22aec0f MR |
1411 | dma_dev->device_config = nbpf_config; |
1412 | dma_dev->device_pause = nbpf_pause; | |
1413 | dma_dev->device_terminate_all = nbpf_terminate_all; | |
b45b262c | 1414 | |
03526d3a MR |
1415 | dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; |
1416 | dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; | |
1417 | dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1418 | ||
b45b262c GL |
1419 | platform_set_drvdata(pdev, nbpf); |
1420 | ||
1421 | ret = clk_prepare_enable(nbpf->clk); | |
1422 | if (ret < 0) | |
1423 | return ret; | |
1424 | ||
1425 | nbpf_configure(nbpf); | |
1426 | ||
1427 | ret = dma_async_device_register(dma_dev); | |
1428 | if (ret < 0) | |
1429 | goto e_clk_off; | |
1430 | ||
1431 | ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); | |
1432 | if (ret < 0) | |
1433 | goto e_dma_dev_unreg; | |
1434 | ||
1435 | return 0; | |
1436 | ||
1437 | e_dma_dev_unreg: | |
1438 | dma_async_device_unregister(dma_dev); | |
1439 | e_clk_off: | |
1440 | clk_disable_unprepare(nbpf->clk); | |
1441 | ||
1442 | return ret; | |
1443 | } | |
1444 | ||
1445 | static int nbpf_remove(struct platform_device *pdev) | |
1446 | { | |
1447 | struct nbpf_device *nbpf = platform_get_drvdata(pdev); | |
84c610ba VK |
1448 | int i; |
1449 | ||
1450 | devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); | |
1451 | ||
1452 | for (i = 0; i < nbpf->config->num_channels; i++) { | |
1453 | struct nbpf_channel *chan = nbpf->chan + i; | |
1454 | ||
1455 | devm_free_irq(&pdev->dev, chan->irq, chan); | |
b63abf18 VK |
1456 | |
1457 | tasklet_kill(&chan->tasklet); | |
84c610ba | 1458 | } |
b45b262c GL |
1459 | |
1460 | of_dma_controller_free(pdev->dev.of_node); | |
1461 | dma_async_device_unregister(&nbpf->dma_dev); | |
1462 | clk_disable_unprepare(nbpf->clk); | |
1463 | ||
1464 | return 0; | |
1465 | } | |
1466 | ||
47157273 | 1467 | static const struct platform_device_id nbpf_ids[] = { |
b45b262c GL |
1468 | {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, |
1469 | {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, | |
1470 | {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, | |
1471 | {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, | |
1472 | {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, | |
1473 | {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, | |
1474 | {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, | |
1475 | {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, | |
1476 | {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, | |
1477 | {}, | |
1478 | }; | |
1479 | MODULE_DEVICE_TABLE(platform, nbpf_ids); | |
1480 | ||
ee343504 | 1481 | #ifdef CONFIG_PM |
b45b262c GL |
1482 | static int nbpf_runtime_suspend(struct device *dev) |
1483 | { | |
1484 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); | |
1485 | clk_disable_unprepare(nbpf->clk); | |
1486 | return 0; | |
1487 | } | |
1488 | ||
1489 | static int nbpf_runtime_resume(struct device *dev) | |
1490 | { | |
1491 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); | |
1492 | return clk_prepare_enable(nbpf->clk); | |
1493 | } | |
1494 | #endif | |
1495 | ||
1496 | static const struct dev_pm_ops nbpf_pm_ops = { | |
1497 | SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) | |
1498 | }; | |
1499 | ||
1500 | static struct platform_driver nbpf_driver = { | |
1501 | .driver = { | |
b45b262c GL |
1502 | .name = "dma-nbpf", |
1503 | .of_match_table = nbpf_match, | |
1504 | .pm = &nbpf_pm_ops, | |
1505 | }, | |
1506 | .id_table = nbpf_ids, | |
1507 | .probe = nbpf_probe, | |
1508 | .remove = nbpf_remove, | |
1509 | }; | |
1510 | ||
1511 | module_platform_driver(nbpf_driver); | |
1512 | ||
1513 | MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>"); | |
1514 | MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); | |
1515 | MODULE_LICENSE("GPL v2"); |