]>
Commit | Line | Data |
---|---|---|
8d318a50 | 1 | /* |
d49278e3 PF |
2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * Copyright (C) ST-Ericsson SA 2008-2010 | |
661385f9 | 4 | * Author: Per Forlin <[email protected]> for ST-Ericsson |
767a9675 | 5 | * Author: Jonas Aaberg <[email protected]> for ST-Ericsson |
8d318a50 | 6 | * License terms: GNU General Public License (GPL) version 2 |
8d318a50 LW |
7 | */ |
8 | ||
b7f080cf | 9 | #include <linux/dma-mapping.h> |
8d318a50 LW |
10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> | |
f492b210 | 12 | #include <linux/export.h> |
8d318a50 LW |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/clk.h> | |
16 | #include <linux/delay.h> | |
c95905a6 | 17 | #include <linux/log2.h> |
7fb3e75e N |
18 | #include <linux/pm.h> |
19 | #include <linux/pm_runtime.h> | |
698e4732 | 20 | #include <linux/err.h> |
1814a170 | 21 | #include <linux/of.h> |
fa332de5 | 22 | #include <linux/of_dma.h> |
f4b89764 | 23 | #include <linux/amba/bus.h> |
15e4b78d | 24 | #include <linux/regulator/consumer.h> |
865fab60 | 25 | #include <linux/platform_data/dma-ste-dma40.h> |
8d318a50 | 26 | |
d2ebfb33 | 27 | #include "dmaengine.h" |
8d318a50 LW |
28 | #include "ste_dma40_ll.h" |
29 | ||
30 | #define D40_NAME "dma40" | |
31 | ||
32 | #define D40_PHY_CHAN -1 | |
33 | ||
34 | /* For masking out/in 2 bit channel positions */ | |
35 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) | |
36 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | |
37 | ||
38 | /* Maximum iterations taken before giving up suspending a channel */ | |
39 | #define D40_SUSPEND_MAX_IT 500 | |
40 | ||
7fb3e75e N |
41 | /* Milliseconds */ |
42 | #define DMA40_AUTOSUSPEND_DELAY 100 | |
43 | ||
508849ad LW |
44 | /* Hardware requirement on LCLA alignment */ |
45 | #define LCLA_ALIGNMENT 0x40000 | |
698e4732 JA |
46 | |
47 | /* Max number of links per event group */ | |
48 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | |
49 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | |
50 | ||
db72da92 LJ |
51 | /* Max number of logical channels per physical channel */ |
52 | #define D40_MAX_LOG_CHAN_PER_PHY 32 | |
53 | ||
508849ad LW |
54 | /* Attempts before giving up to trying to get pages that are aligned */ |
55 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | |
56 | ||
57 | /* Bit markings for allocation map */ | |
8a3b6e14 LJ |
58 | #define D40_ALLOC_FREE BIT(31) |
59 | #define D40_ALLOC_PHY BIT(30) | |
8d318a50 LW |
60 | #define D40_ALLOC_LOG_FREE 0 |
61 | ||
a7dacb68 LJ |
62 | #define D40_MEMCPY_MAX_CHANS 8 |
63 | ||
664a57ec | 64 | /* Reserved event lines for memcpy only. */ |
a2acaa21 LW |
65 | #define DB8500_DMA_MEMCPY_EV_0 51 |
66 | #define DB8500_DMA_MEMCPY_EV_1 56 | |
67 | #define DB8500_DMA_MEMCPY_EV_2 57 | |
68 | #define DB8500_DMA_MEMCPY_EV_3 58 | |
69 | #define DB8500_DMA_MEMCPY_EV_4 59 | |
70 | #define DB8500_DMA_MEMCPY_EV_5 60 | |
71 | ||
72 | static int dma40_memcpy_channels[] = { | |
73 | DB8500_DMA_MEMCPY_EV_0, | |
74 | DB8500_DMA_MEMCPY_EV_1, | |
75 | DB8500_DMA_MEMCPY_EV_2, | |
76 | DB8500_DMA_MEMCPY_EV_3, | |
77 | DB8500_DMA_MEMCPY_EV_4, | |
78 | DB8500_DMA_MEMCPY_EV_5, | |
79 | }; | |
664a57ec | 80 | |
29027a1e | 81 | /* Default configuration for physcial memcpy */ |
b4a1ccdf | 82 | static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { |
29027a1e | 83 | .mode = STEDMA40_MODE_PHYSICAL, |
2c2b62d5 | 84 | .dir = DMA_MEM_TO_MEM, |
29027a1e | 85 | |
43f2e1a3 | 86 | .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
29027a1e LJ |
87 | .src_info.psize = STEDMA40_PSIZE_PHY_1, |
88 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | |
89 | ||
43f2e1a3 | 90 | .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
29027a1e LJ |
91 | .dst_info.psize = STEDMA40_PSIZE_PHY_1, |
92 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | |
93 | }; | |
94 | ||
95 | /* Default configuration for logical memcpy */ | |
b4a1ccdf | 96 | static struct stedma40_chan_cfg dma40_memcpy_conf_log = { |
29027a1e | 97 | .mode = STEDMA40_MODE_LOGICAL, |
2c2b62d5 | 98 | .dir = DMA_MEM_TO_MEM, |
29027a1e | 99 | |
43f2e1a3 | 100 | .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
29027a1e LJ |
101 | .src_info.psize = STEDMA40_PSIZE_LOG_1, |
102 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | |
103 | ||
43f2e1a3 | 104 | .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
29027a1e LJ |
105 | .dst_info.psize = STEDMA40_PSIZE_LOG_1, |
106 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | |
107 | }; | |
108 | ||
8d318a50 LW |
109 | /** |
110 | * enum 40_command - The different commands and/or statuses. | |
111 | * | |
112 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | |
113 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | |
114 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | |
115 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | |
116 | */ | |
117 | enum d40_command { | |
118 | D40_DMA_STOP = 0, | |
119 | D40_DMA_RUN = 1, | |
120 | D40_DMA_SUSPEND_REQ = 2, | |
121 | D40_DMA_SUSPENDED = 3 | |
122 | }; | |
123 | ||
1bdae6f4 N |
124 | /* |
125 | * enum d40_events - The different Event Enables for the event lines. | |
126 | * | |
127 | * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. | |
128 | * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. | |
129 | * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. | |
130 | * @D40_ROUND_EVENTLINE: Status check for event line. | |
131 | */ | |
132 | ||
133 | enum d40_events { | |
134 | D40_DEACTIVATE_EVENTLINE = 0, | |
135 | D40_ACTIVATE_EVENTLINE = 1, | |
136 | D40_SUSPEND_REQ_EVENTLINE = 2, | |
137 | D40_ROUND_EVENTLINE = 3 | |
138 | }; | |
139 | ||
7fb3e75e N |
140 | /* |
141 | * These are the registers that has to be saved and later restored | |
142 | * when the DMA hw is powered off. | |
143 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | |
144 | */ | |
145 | static u32 d40_backup_regs[] = { | |
146 | D40_DREG_LCPA, | |
147 | D40_DREG_LCLA, | |
148 | D40_DREG_PRMSE, | |
149 | D40_DREG_PRMSO, | |
150 | D40_DREG_PRMOE, | |
151 | D40_DREG_PRMOO, | |
152 | }; | |
153 | ||
154 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | |
155 | ||
3cb645dc TL |
156 | /* |
157 | * since 9540 and 8540 has the same HW revision | |
158 | * use v4a for 9540 or ealier | |
159 | * use v4b for 8540 or later | |
160 | * HW revision: | |
161 | * DB8500ed has revision 0 | |
162 | * DB8500v1 has revision 2 | |
163 | * DB8500v2 has revision 3 | |
164 | * AP9540v1 has revision 4 | |
165 | * DB8540v1 has revision 4 | |
166 | * TODO: Check if all these registers have to be saved/restored on dma40 v4a | |
167 | */ | |
168 | static u32 d40_backup_regs_v4a[] = { | |
7fb3e75e N |
169 | D40_DREG_PSEG1, |
170 | D40_DREG_PSEG2, | |
171 | D40_DREG_PSEG3, | |
172 | D40_DREG_PSEG4, | |
173 | D40_DREG_PCEG1, | |
174 | D40_DREG_PCEG2, | |
175 | D40_DREG_PCEG3, | |
176 | D40_DREG_PCEG4, | |
177 | D40_DREG_RSEG1, | |
178 | D40_DREG_RSEG2, | |
179 | D40_DREG_RSEG3, | |
180 | D40_DREG_RSEG4, | |
181 | D40_DREG_RCEG1, | |
182 | D40_DREG_RCEG2, | |
183 | D40_DREG_RCEG3, | |
184 | D40_DREG_RCEG4, | |
185 | }; | |
186 | ||
3cb645dc TL |
187 | #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) |
188 | ||
189 | static u32 d40_backup_regs_v4b[] = { | |
190 | D40_DREG_CPSEG1, | |
191 | D40_DREG_CPSEG2, | |
192 | D40_DREG_CPSEG3, | |
193 | D40_DREG_CPSEG4, | |
194 | D40_DREG_CPSEG5, | |
195 | D40_DREG_CPCEG1, | |
196 | D40_DREG_CPCEG2, | |
197 | D40_DREG_CPCEG3, | |
198 | D40_DREG_CPCEG4, | |
199 | D40_DREG_CPCEG5, | |
200 | D40_DREG_CRSEG1, | |
201 | D40_DREG_CRSEG2, | |
202 | D40_DREG_CRSEG3, | |
203 | D40_DREG_CRSEG4, | |
204 | D40_DREG_CRSEG5, | |
205 | D40_DREG_CRCEG1, | |
206 | D40_DREG_CRCEG2, | |
207 | D40_DREG_CRCEG3, | |
208 | D40_DREG_CRCEG4, | |
209 | D40_DREG_CRCEG5, | |
210 | }; | |
211 | ||
212 | #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) | |
7fb3e75e N |
213 | |
214 | static u32 d40_backup_regs_chan[] = { | |
215 | D40_CHAN_REG_SSCFG, | |
216 | D40_CHAN_REG_SSELT, | |
217 | D40_CHAN_REG_SSPTR, | |
218 | D40_CHAN_REG_SSLNK, | |
219 | D40_CHAN_REG_SDCFG, | |
220 | D40_CHAN_REG_SDELT, | |
221 | D40_CHAN_REG_SDPTR, | |
222 | D40_CHAN_REG_SDLNK, | |
223 | }; | |
224 | ||
84b3da14 LJ |
225 | #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ |
226 | BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) | |
227 | ||
3cb645dc TL |
228 | /** |
229 | * struct d40_interrupt_lookup - lookup table for interrupt handler | |
230 | * | |
231 | * @src: Interrupt mask register. | |
232 | * @clr: Interrupt clear register. | |
233 | * @is_error: true if this is an error interrupt. | |
234 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | |
235 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | |
236 | */ | |
237 | struct d40_interrupt_lookup { | |
238 | u32 src; | |
239 | u32 clr; | |
240 | bool is_error; | |
241 | int offset; | |
242 | }; | |
243 | ||
244 | ||
245 | static struct d40_interrupt_lookup il_v4a[] = { | |
246 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | |
247 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | |
248 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | |
249 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | |
250 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | |
251 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | |
252 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | |
253 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | |
254 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | |
255 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | |
256 | }; | |
257 | ||
258 | static struct d40_interrupt_lookup il_v4b[] = { | |
259 | {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, | |
260 | {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, | |
261 | {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, | |
262 | {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, | |
263 | {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, | |
264 | {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, | |
265 | {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, | |
266 | {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, | |
267 | {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, | |
268 | {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, | |
269 | {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, | |
270 | {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, | |
271 | }; | |
272 | ||
273 | /** | |
274 | * struct d40_reg_val - simple lookup struct | |
275 | * | |
276 | * @reg: The register. | |
277 | * @val: The value that belongs to the register in reg. | |
278 | */ | |
279 | struct d40_reg_val { | |
280 | unsigned int reg; | |
281 | unsigned int val; | |
282 | }; | |
283 | ||
284 | static __initdata struct d40_reg_val dma_init_reg_v4a[] = { | |
285 | /* Clock every part of the DMA block from start */ | |
286 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | |
287 | ||
288 | /* Interrupts on all logical channels */ | |
289 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | |
290 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | |
291 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | |
292 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | |
293 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | |
294 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | |
295 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | |
296 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | |
297 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | |
298 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | |
299 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | |
300 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | |
301 | }; | |
302 | static __initdata struct d40_reg_val dma_init_reg_v4b[] = { | |
303 | /* Clock every part of the DMA block from start */ | |
304 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, | |
305 | ||
306 | /* Interrupts on all logical channels */ | |
307 | { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, | |
308 | { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, | |
309 | { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, | |
310 | { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, | |
311 | { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, | |
312 | { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, | |
313 | { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, | |
314 | { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, | |
315 | { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, | |
316 | { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, | |
317 | { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, | |
318 | { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, | |
319 | { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, | |
320 | { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, | |
321 | { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} | |
322 | }; | |
323 | ||
8d318a50 LW |
324 | /** |
325 | * struct d40_lli_pool - Structure for keeping LLIs in memory | |
326 | * | |
327 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | |
328 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | |
329 | * pre_alloc_lli is used. | |
b00f938c | 330 | * @dma_addr: DMA address, if mapped |
8d318a50 LW |
331 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
332 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | |
333 | * one buffer to one buffer. | |
334 | */ | |
335 | struct d40_lli_pool { | |
336 | void *base; | |
508849ad | 337 | int size; |
b00f938c | 338 | dma_addr_t dma_addr; |
8d318a50 | 339 | /* Space for dst and src, plus an extra for padding */ |
508849ad | 340 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
8d318a50 LW |
341 | }; |
342 | ||
343 | /** | |
344 | * struct d40_desc - A descriptor is one DMA job. | |
345 | * | |
346 | * @lli_phy: LLI settings for physical channel. Both src and dst= | |
347 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | |
348 | * lli_len equals one. | |
349 | * @lli_log: Same as above but for logical channels. | |
350 | * @lli_pool: The pool with two entries pre-allocated. | |
941b77a3 | 351 | * @lli_len: Number of llis of current descriptor. |
25985edc | 352 | * @lli_current: Number of transferred llis. |
698e4732 | 353 | * @lcla_alloc: Number of LCLA entries allocated. |
8d318a50 LW |
354 | * @txd: DMA engine struct. Used for among other things for communication |
355 | * during a transfer. | |
356 | * @node: List entry. | |
8d318a50 | 357 | * @is_in_client_list: true if the client owns this descriptor. |
7fb3e75e | 358 | * @cyclic: true if this is a cyclic job |
8d318a50 LW |
359 | * |
360 | * This descriptor is used for both logical and physical transfers. | |
361 | */ | |
8d318a50 LW |
362 | struct d40_desc { |
363 | /* LLI physical */ | |
364 | struct d40_phy_lli_bidir lli_phy; | |
365 | /* LLI logical */ | |
366 | struct d40_log_lli_bidir lli_log; | |
367 | ||
368 | struct d40_lli_pool lli_pool; | |
941b77a3 | 369 | int lli_len; |
698e4732 JA |
370 | int lli_current; |
371 | int lcla_alloc; | |
8d318a50 LW |
372 | |
373 | struct dma_async_tx_descriptor txd; | |
374 | struct list_head node; | |
375 | ||
8d318a50 | 376 | bool is_in_client_list; |
0c842b55 | 377 | bool cyclic; |
8d318a50 LW |
378 | }; |
379 | ||
380 | /** | |
381 | * struct d40_lcla_pool - LCLA pool settings and data. | |
382 | * | |
508849ad LW |
383 | * @base: The virtual address of LCLA. 18 bit aligned. |
384 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. | |
385 | * This pointer is only there for clean-up on error. | |
386 | * @pages: The number of pages needed for all physical channels. | |
387 | * Only used later for clean-up on error | |
8d318a50 | 388 | * @lock: Lock to protect the content in this struct. |
698e4732 | 389 | * @alloc_map: big map over which LCLA entry is own by which job. |
8d318a50 LW |
390 | */ |
391 | struct d40_lcla_pool { | |
392 | void *base; | |
026cbc42 | 393 | dma_addr_t dma_addr; |
508849ad LW |
394 | void *base_unaligned; |
395 | int pages; | |
8d318a50 | 396 | spinlock_t lock; |
698e4732 | 397 | struct d40_desc **alloc_map; |
8d318a50 LW |
398 | }; |
399 | ||
400 | /** | |
401 | * struct d40_phy_res - struct for handling eventlines mapped to physical | |
402 | * channels. | |
403 | * | |
404 | * @lock: A lock protection this entity. | |
7fb3e75e | 405 | * @reserved: True if used by secure world or otherwise. |
8d318a50 LW |
406 | * @num: The physical channel number of this entity. |
407 | * @allocated_src: Bit mapped to show which src event line's are mapped to | |
408 | * this physical channel. Can also be free or physically allocated. | |
409 | * @allocated_dst: Same as for src but is dst. | |
410 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | |
767a9675 | 411 | * event line number. |
7407048b | 412 | * @use_soft_lli: To mark if the linked lists of channel are managed by SW. |
8d318a50 LW |
413 | */ |
414 | struct d40_phy_res { | |
415 | spinlock_t lock; | |
7fb3e75e | 416 | bool reserved; |
8d318a50 LW |
417 | int num; |
418 | u32 allocated_src; | |
419 | u32 allocated_dst; | |
7407048b | 420 | bool use_soft_lli; |
8d318a50 LW |
421 | }; |
422 | ||
423 | struct d40_base; | |
424 | ||
425 | /** | |
426 | * struct d40_chan - Struct that describes a channel. | |
427 | * | |
428 | * @lock: A spinlock to protect this struct. | |
429 | * @log_num: The logical number, if any of this channel. | |
8d318a50 LW |
430 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
431 | * and tasklet. | |
432 | * @busy: Set to true when transfer is ongoing on this channel. | |
2a614340 JA |
433 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
434 | * point is NULL, then the channel is not allocated. | |
8d318a50 LW |
435 | * @chan: DMA engine handle. |
436 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | |
437 | * transfer and call client callback. | |
438 | * @client: Cliented owned descriptor list. | |
da063d26 | 439 | * @pending_queue: Submitted jobs, to be issued by issue_pending() |
8d318a50 | 440 | * @active: Active descriptor. |
4226dd86 | 441 | * @done: Completed jobs |
8d318a50 | 442 | * @queue: Queued jobs. |
82babbb3 | 443 | * @prepare_queue: Prepared jobs. |
8d318a50 | 444 | * @dma_cfg: The client configuration of this dma channel. |
ce2ca125 | 445 | * @configured: whether the dma_cfg configuration is valid |
8d318a50 LW |
446 | * @base: Pointer to the device instance struct. |
447 | * @src_def_cfg: Default cfg register setting for src. | |
448 | * @dst_def_cfg: Default cfg register setting for dst. | |
449 | * @log_def: Default logical channel settings. | |
8d318a50 | 450 | * @lcpa: Pointer to dst and src lcpa settings. |
ae752bf4 | 451 | * @runtime_addr: runtime configured address. |
452 | * @runtime_direction: runtime configured direction. | |
8d318a50 LW |
453 | * |
454 | * This struct can either "be" a logical or a physical channel. | |
455 | */ | |
456 | struct d40_chan { | |
457 | spinlock_t lock; | |
458 | int log_num; | |
8d318a50 LW |
459 | int pending_tx; |
460 | bool busy; | |
461 | struct d40_phy_res *phy_chan; | |
462 | struct dma_chan chan; | |
463 | struct tasklet_struct tasklet; | |
464 | struct list_head client; | |
a8f3067b | 465 | struct list_head pending_queue; |
8d318a50 | 466 | struct list_head active; |
4226dd86 | 467 | struct list_head done; |
8d318a50 | 468 | struct list_head queue; |
82babbb3 | 469 | struct list_head prepare_queue; |
8d318a50 | 470 | struct stedma40_chan_cfg dma_cfg; |
ce2ca125 | 471 | bool configured; |
8d318a50 LW |
472 | struct d40_base *base; |
473 | /* Default register configurations */ | |
474 | u32 src_def_cfg; | |
475 | u32 dst_def_cfg; | |
476 | struct d40_def_lcsp log_def; | |
8d318a50 | 477 | struct d40_log_lli_full *lcpa; |
95e1400f LW |
478 | /* Runtime reconfiguration */ |
479 | dma_addr_t runtime_addr; | |
db8196df | 480 | enum dma_transfer_direction runtime_direction; |
8d318a50 LW |
481 | }; |
482 | ||
3cb645dc TL |
483 | /** |
484 | * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA | |
485 | * controller | |
486 | * | |
487 | * @backup: the pointer to the registers address array for backup | |
488 | * @backup_size: the size of the registers address array for backup | |
489 | * @realtime_en: the realtime enable register | |
490 | * @realtime_clear: the realtime clear register | |
491 | * @high_prio_en: the high priority enable register | |
492 | * @high_prio_clear: the high priority clear register | |
493 | * @interrupt_en: the interrupt enable register | |
494 | * @interrupt_clear: the interrupt clear register | |
495 | * @il: the pointer to struct d40_interrupt_lookup | |
496 | * @il_size: the size of d40_interrupt_lookup array | |
497 | * @init_reg: the pointer to the struct d40_reg_val | |
498 | * @init_reg_size: the size of d40_reg_val array | |
499 | */ | |
500 | struct d40_gen_dmac { | |
501 | u32 *backup; | |
502 | u32 backup_size; | |
503 | u32 realtime_en; | |
504 | u32 realtime_clear; | |
505 | u32 high_prio_en; | |
506 | u32 high_prio_clear; | |
507 | u32 interrupt_en; | |
508 | u32 interrupt_clear; | |
509 | struct d40_interrupt_lookup *il; | |
510 | u32 il_size; | |
511 | struct d40_reg_val *init_reg; | |
512 | u32 init_reg_size; | |
513 | }; | |
514 | ||
8d318a50 LW |
515 | /** |
516 | * struct d40_base - The big global struct, one for each probe'd instance. | |
517 | * | |
518 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | |
519 | * @execmd_lock: Lock for execute command usage since several channels share | |
520 | * the same physical register. | |
521 | * @dev: The device structure. | |
522 | * @virtbase: The virtual base address of the DMA's register. | |
f4185592 | 523 | * @rev: silicon revision detected. |
8d318a50 LW |
524 | * @clk: Pointer to the DMA clock structure. |
525 | * @phy_start: Physical memory start of the DMA registers. | |
526 | * @phy_size: Size of the DMA register map. | |
527 | * @irq: The IRQ number. | |
a7dacb68 LJ |
528 | * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem |
529 | * transfers). | |
8d318a50 LW |
530 | * @num_phy_chans: The number of physical channels. Read from HW. This |
531 | * is the number of available channels for this driver, not counting "Secure | |
532 | * mode" allocated physical channels. | |
533 | * @num_log_chans: The number of logical channels. Calculated from | |
534 | * num_phy_chans. | |
535 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | |
536 | * @dma_slave: dma_device channels that can do only do slave transfers. | |
537 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | |
7fb3e75e | 538 | * @phy_chans: Room for all possible physical channels in system. |
8d318a50 LW |
539 | * @log_chans: Room for all possible logical channels in system. |
540 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | |
541 | * to log_chans entries. | |
542 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | |
543 | * to phy_chans entries. | |
544 | * @plat_data: Pointer to provided platform_data which is the driver | |
545 | * configuration. | |
28c7a19d | 546 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. |
8d318a50 LW |
547 | * @phy_res: Vector containing all physical channels. |
548 | * @lcla_pool: lcla pool settings and data. | |
549 | * @lcpa_base: The virtual mapped address of LCPA. | |
550 | * @phy_lcpa: The physical address of the LCPA. | |
551 | * @lcpa_size: The size of the LCPA area. | |
c675b1b4 | 552 | * @desc_slab: cache for descriptors. |
7fb3e75e N |
553 | * @reg_val_backup: Here the values of some hardware registers are stored |
554 | * before the DMA is powered off. They are restored when the power is back on. | |
3cb645dc TL |
555 | * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and |
556 | * later | |
7fb3e75e N |
557 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. |
558 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | |
3cb645dc TL |
559 | * @gen_dmac: the struct for generic registers values to represent u8500/8540 |
560 | * DMA controller | |
8d318a50 LW |
561 | */ |
562 | struct d40_base { | |
563 | spinlock_t interrupt_lock; | |
564 | spinlock_t execmd_lock; | |
565 | struct device *dev; | |
566 | void __iomem *virtbase; | |
f4185592 | 567 | u8 rev:4; |
8d318a50 LW |
568 | struct clk *clk; |
569 | phys_addr_t phy_start; | |
570 | resource_size_t phy_size; | |
571 | int irq; | |
a7dacb68 | 572 | int num_memcpy_chans; |
8d318a50 LW |
573 | int num_phy_chans; |
574 | int num_log_chans; | |
b96710e5 | 575 | struct device_dma_parameters dma_parms; |
8d318a50 LW |
576 | struct dma_device dma_both; |
577 | struct dma_device dma_slave; | |
578 | struct dma_device dma_memcpy; | |
579 | struct d40_chan *phy_chans; | |
580 | struct d40_chan *log_chans; | |
581 | struct d40_chan **lookup_log_chans; | |
582 | struct d40_chan **lookup_phy_chans; | |
583 | struct stedma40_platform_data *plat_data; | |
28c7a19d | 584 | struct regulator *lcpa_regulator; |
8d318a50 LW |
585 | /* Physical half channels */ |
586 | struct d40_phy_res *phy_res; | |
587 | struct d40_lcla_pool lcla_pool; | |
588 | void *lcpa_base; | |
589 | dma_addr_t phy_lcpa; | |
590 | resource_size_t lcpa_size; | |
c675b1b4 | 591 | struct kmem_cache *desc_slab; |
7fb3e75e | 592 | u32 reg_val_backup[BACKUP_REGS_SZ]; |
84b3da14 | 593 | u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; |
7fb3e75e N |
594 | u32 *reg_val_backup_chan; |
595 | u16 gcc_pwr_off_mask; | |
3cb645dc | 596 | struct d40_gen_dmac gen_dmac; |
8d318a50 LW |
597 | }; |
598 | ||
262d2915 RV |
599 | static struct device *chan2dev(struct d40_chan *d40c) |
600 | { | |
601 | return &d40c->chan.dev->device; | |
602 | } | |
603 | ||
724a8577 RV |
604 | static bool chan_is_physical(struct d40_chan *chan) |
605 | { | |
606 | return chan->log_num == D40_PHY_CHAN; | |
607 | } | |
608 | ||
609 | static bool chan_is_logical(struct d40_chan *chan) | |
610 | { | |
611 | return !chan_is_physical(chan); | |
612 | } | |
613 | ||
8ca84687 RV |
614 | static void __iomem *chan_base(struct d40_chan *chan) |
615 | { | |
616 | return chan->base->virtbase + D40_DREG_PCBASE + | |
617 | chan->phy_chan->num * D40_DREG_PCDELTA; | |
618 | } | |
619 | ||
6db5a8ba RV |
620 | #define d40_err(dev, format, arg...) \ |
621 | dev_err(dev, "[%s] " format, __func__, ## arg) | |
622 | ||
623 | #define chan_err(d40c, format, arg...) \ | |
624 | d40_err(chan2dev(d40c), format, ## arg) | |
625 | ||
b00f938c | 626 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, |
dbd88788 | 627 | int lli_len) |
8d318a50 | 628 | { |
dbd88788 | 629 | bool is_log = chan_is_logical(d40c); |
8d318a50 LW |
630 | u32 align; |
631 | void *base; | |
632 | ||
633 | if (is_log) | |
634 | align = sizeof(struct d40_log_lli); | |
635 | else | |
636 | align = sizeof(struct d40_phy_lli); | |
637 | ||
638 | if (lli_len == 1) { | |
639 | base = d40d->lli_pool.pre_alloc_lli; | |
640 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | |
641 | d40d->lli_pool.base = NULL; | |
642 | } else { | |
594ece4d | 643 | d40d->lli_pool.size = lli_len * 2 * align; |
8d318a50 LW |
644 | |
645 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | |
646 | d40d->lli_pool.base = base; | |
647 | ||
648 | if (d40d->lli_pool.base == NULL) | |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
652 | if (is_log) { | |
d924abad | 653 | d40d->lli_log.src = PTR_ALIGN(base, align); |
594ece4d | 654 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
b00f938c RV |
655 | |
656 | d40d->lli_pool.dma_addr = 0; | |
8d318a50 | 657 | } else { |
d924abad | 658 | d40d->lli_phy.src = PTR_ALIGN(base, align); |
594ece4d | 659 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; |
b00f938c RV |
660 | |
661 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, | |
662 | d40d->lli_phy.src, | |
663 | d40d->lli_pool.size, | |
664 | DMA_TO_DEVICE); | |
665 | ||
666 | if (dma_mapping_error(d40c->base->dev, | |
667 | d40d->lli_pool.dma_addr)) { | |
668 | kfree(d40d->lli_pool.base); | |
669 | d40d->lli_pool.base = NULL; | |
670 | d40d->lli_pool.dma_addr = 0; | |
671 | return -ENOMEM; | |
672 | } | |
8d318a50 LW |
673 | } |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
b00f938c | 678 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
8d318a50 | 679 | { |
b00f938c RV |
680 | if (d40d->lli_pool.dma_addr) |
681 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | |
682 | d40d->lli_pool.size, DMA_TO_DEVICE); | |
683 | ||
8d318a50 LW |
684 | kfree(d40d->lli_pool.base); |
685 | d40d->lli_pool.base = NULL; | |
686 | d40d->lli_pool.size = 0; | |
687 | d40d->lli_log.src = NULL; | |
688 | d40d->lli_log.dst = NULL; | |
689 | d40d->lli_phy.src = NULL; | |
690 | d40d->lli_phy.dst = NULL; | |
8d318a50 LW |
691 | } |
692 | ||
698e4732 JA |
693 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
694 | struct d40_desc *d40d) | |
695 | { | |
696 | unsigned long flags; | |
697 | int i; | |
698 | int ret = -EINVAL; | |
698e4732 JA |
699 | |
700 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | |
701 | ||
698e4732 JA |
702 | /* |
703 | * Allocate both src and dst at the same time, therefore the half | |
704 | * start on 1 since 0 can't be used since zero is used as end marker. | |
705 | */ | |
706 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | |
7ce529ef FB |
707 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
708 | ||
709 | if (!d40c->base->lcla_pool.alloc_map[idx]) { | |
710 | d40c->base->lcla_pool.alloc_map[idx] = d40d; | |
698e4732 JA |
711 | d40d->lcla_alloc++; |
712 | ret = i; | |
713 | break; | |
714 | } | |
715 | } | |
716 | ||
717 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
718 | ||
719 | return ret; | |
720 | } | |
721 | ||
722 | static int d40_lcla_free_all(struct d40_chan *d40c, | |
723 | struct d40_desc *d40d) | |
724 | { | |
725 | unsigned long flags; | |
726 | int i; | |
727 | int ret = -EINVAL; | |
728 | ||
724a8577 | 729 | if (chan_is_physical(d40c)) |
698e4732 JA |
730 | return 0; |
731 | ||
732 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | |
733 | ||
734 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | |
7ce529ef FB |
735 | int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; |
736 | ||
737 | if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { | |
738 | d40c->base->lcla_pool.alloc_map[idx] = NULL; | |
698e4732 JA |
739 | d40d->lcla_alloc--; |
740 | if (d40d->lcla_alloc == 0) { | |
741 | ret = 0; | |
742 | break; | |
743 | } | |
744 | } | |
745 | } | |
746 | ||
747 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
748 | ||
749 | return ret; | |
750 | ||
751 | } | |
752 | ||
8d318a50 LW |
753 | static void d40_desc_remove(struct d40_desc *d40d) |
754 | { | |
755 | list_del(&d40d->node); | |
756 | } | |
757 | ||
758 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |
759 | { | |
a2c15fa4 | 760 | struct d40_desc *desc = NULL; |
8d318a50 LW |
761 | |
762 | if (!list_empty(&d40c->client)) { | |
a2c15fa4 RV |
763 | struct d40_desc *d; |
764 | struct d40_desc *_d; | |
765 | ||
7fb3e75e | 766 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
8d318a50 | 767 | if (async_tx_test_ack(&d->txd)) { |
8d318a50 | 768 | d40_desc_remove(d); |
a2c15fa4 RV |
769 | desc = d; |
770 | memset(desc, 0, sizeof(*desc)); | |
c675b1b4 | 771 | break; |
8d318a50 | 772 | } |
7fb3e75e | 773 | } |
8d318a50 | 774 | } |
a2c15fa4 RV |
775 | |
776 | if (!desc) | |
777 | desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); | |
778 | ||
779 | if (desc) | |
780 | INIT_LIST_HEAD(&desc->node); | |
781 | ||
782 | return desc; | |
8d318a50 LW |
783 | } |
784 | ||
785 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | |
786 | { | |
698e4732 | 787 | |
b00f938c | 788 | d40_pool_lli_free(d40c, d40d); |
698e4732 | 789 | d40_lcla_free_all(d40c, d40d); |
c675b1b4 | 790 | kmem_cache_free(d40c->base->desc_slab, d40d); |
8d318a50 LW |
791 | } |
792 | ||
793 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |
794 | { | |
795 | list_add_tail(&desc->node, &d40c->active); | |
796 | } | |
797 | ||
1c4b0927 RV |
798 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) |
799 | { | |
800 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; | |
801 | struct d40_phy_lli *lli_src = desc->lli_phy.src; | |
802 | void __iomem *base = chan_base(chan); | |
803 | ||
804 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | |
805 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | |
806 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | |
807 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | |
808 | ||
809 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | |
810 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | |
811 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | |
812 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | |
813 | } | |
814 | ||
4226dd86 FB |
815 | static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) |
816 | { | |
817 | list_add_tail(&desc->node, &d40c->done); | |
818 | } | |
819 | ||
e65889c7 | 820 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) |
698e4732 | 821 | { |
e65889c7 RV |
822 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; |
823 | struct d40_log_lli_bidir *lli = &desc->lli_log; | |
824 | int lli_current = desc->lli_current; | |
825 | int lli_len = desc->lli_len; | |
0c842b55 | 826 | bool cyclic = desc->cyclic; |
e65889c7 | 827 | int curr_lcla = -EINVAL; |
0c842b55 | 828 | int first_lcla = 0; |
28c7a19d | 829 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; |
0c842b55 | 830 | bool linkback; |
e65889c7 | 831 | |
0c842b55 RV |
832 | /* |
833 | * We may have partially running cyclic transfers, in case we did't get | |
834 | * enough LCLA entries. | |
835 | */ | |
836 | linkback = cyclic && lli_current == 0; | |
837 | ||
838 | /* | |
839 | * For linkback, we need one LCLA even with only one link, because we | |
840 | * can't link back to the one in LCPA space | |
841 | */ | |
842 | if (linkback || (lli_len - lli_current > 1)) { | |
7407048b FB |
843 | /* |
844 | * If the channel is expected to use only soft_lli don't | |
845 | * allocate a lcla. This is to avoid a HW issue that exists | |
846 | * in some controller during a peripheral to memory transfer | |
847 | * that uses linked lists. | |
848 | */ | |
849 | if (!(chan->phy_chan->use_soft_lli && | |
2c2b62d5 | 850 | chan->dma_cfg.dir == DMA_DEV_TO_MEM)) |
7407048b FB |
851 | curr_lcla = d40_lcla_alloc_one(chan, desc); |
852 | ||
0c842b55 RV |
853 | first_lcla = curr_lcla; |
854 | } | |
855 | ||
856 | /* | |
857 | * For linkback, we normally load the LCPA in the loop since we need to | |
858 | * link it to the second LCLA and not the first. However, if we | |
859 | * couldn't even get a first LCLA, then we have to run in LCPA and | |
860 | * reload manually. | |
861 | */ | |
862 | if (!linkback || curr_lcla == -EINVAL) { | |
863 | unsigned int flags = 0; | |
e65889c7 | 864 | |
0c842b55 RV |
865 | if (curr_lcla == -EINVAL) |
866 | flags |= LLI_TERM_INT; | |
e65889c7 | 867 | |
0c842b55 RV |
868 | d40_log_lli_lcpa_write(chan->lcpa, |
869 | &lli->dst[lli_current], | |
870 | &lli->src[lli_current], | |
871 | curr_lcla, | |
872 | flags); | |
873 | lli_current++; | |
874 | } | |
6045f0bb RV |
875 | |
876 | if (curr_lcla < 0) | |
877 | goto out; | |
878 | ||
e65889c7 RV |
879 | for (; lli_current < lli_len; lli_current++) { |
880 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + | |
881 | 8 * curr_lcla * 2; | |
882 | struct d40_log_lli *lcla = pool->base + lcla_offset; | |
0c842b55 | 883 | unsigned int flags = 0; |
e65889c7 RV |
884 | int next_lcla; |
885 | ||
886 | if (lli_current + 1 < lli_len) | |
887 | next_lcla = d40_lcla_alloc_one(chan, desc); | |
888 | else | |
0c842b55 RV |
889 | next_lcla = linkback ? first_lcla : -EINVAL; |
890 | ||
891 | if (cyclic || next_lcla == -EINVAL) | |
892 | flags |= LLI_TERM_INT; | |
e65889c7 | 893 | |
0c842b55 RV |
894 | if (linkback && curr_lcla == first_lcla) { |
895 | /* First link goes in both LCPA and LCLA */ | |
896 | d40_log_lli_lcpa_write(chan->lcpa, | |
897 | &lli->dst[lli_current], | |
898 | &lli->src[lli_current], | |
899 | next_lcla, flags); | |
900 | } | |
901 | ||
902 | /* | |
903 | * One unused LCLA in the cyclic case if the very first | |
904 | * next_lcla fails... | |
905 | */ | |
e65889c7 RV |
906 | d40_log_lli_lcla_write(lcla, |
907 | &lli->dst[lli_current], | |
908 | &lli->src[lli_current], | |
0c842b55 | 909 | next_lcla, flags); |
e65889c7 | 910 | |
28c7a19d N |
911 | /* |
912 | * Cache maintenance is not needed if lcla is | |
913 | * mapped in esram | |
914 | */ | |
915 | if (!use_esram_lcla) { | |
916 | dma_sync_single_range_for_device(chan->base->dev, | |
917 | pool->dma_addr, lcla_offset, | |
918 | 2 * sizeof(struct d40_log_lli), | |
919 | DMA_TO_DEVICE); | |
920 | } | |
e65889c7 RV |
921 | curr_lcla = next_lcla; |
922 | ||
0c842b55 | 923 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
e65889c7 RV |
924 | lli_current++; |
925 | break; | |
926 | } | |
927 | } | |
928 | ||
6045f0bb | 929 | out: |
e65889c7 RV |
930 | desc->lli_current = lli_current; |
931 | } | |
698e4732 | 932 | |
e65889c7 RV |
933 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
934 | { | |
724a8577 | 935 | if (chan_is_physical(d40c)) { |
1c4b0927 | 936 | d40_phy_lli_load(d40c, d40d); |
698e4732 | 937 | d40d->lli_current = d40d->lli_len; |
e65889c7 RV |
938 | } else |
939 | d40_log_lli_to_lcxa(d40c, d40d); | |
698e4732 JA |
940 | } |
941 | ||
8d318a50 LW |
942 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
943 | { | |
944 | struct d40_desc *d; | |
945 | ||
946 | if (list_empty(&d40c->active)) | |
947 | return NULL; | |
948 | ||
949 | d = list_first_entry(&d40c->active, | |
950 | struct d40_desc, | |
951 | node); | |
952 | return d; | |
953 | } | |
954 | ||
7404368c | 955 | /* remove desc from current queue and add it to the pending_queue */ |
8d318a50 LW |
956 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
957 | { | |
7404368c PF |
958 | d40_desc_remove(desc); |
959 | desc->is_in_client_list = false; | |
a8f3067b PF |
960 | list_add_tail(&desc->node, &d40c->pending_queue); |
961 | } | |
962 | ||
963 | static struct d40_desc *d40_first_pending(struct d40_chan *d40c) | |
964 | { | |
965 | struct d40_desc *d; | |
966 | ||
967 | if (list_empty(&d40c->pending_queue)) | |
968 | return NULL; | |
969 | ||
970 | d = list_first_entry(&d40c->pending_queue, | |
971 | struct d40_desc, | |
972 | node); | |
973 | return d; | |
8d318a50 LW |
974 | } |
975 | ||
976 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |
977 | { | |
978 | struct d40_desc *d; | |
979 | ||
980 | if (list_empty(&d40c->queue)) | |
981 | return NULL; | |
982 | ||
983 | d = list_first_entry(&d40c->queue, | |
984 | struct d40_desc, | |
985 | node); | |
986 | return d; | |
987 | } | |
988 | ||
4226dd86 FB |
989 | static struct d40_desc *d40_first_done(struct d40_chan *d40c) |
990 | { | |
991 | if (list_empty(&d40c->done)) | |
992 | return NULL; | |
993 | ||
994 | return list_first_entry(&d40c->done, struct d40_desc, node); | |
995 | } | |
996 | ||
d49278e3 PF |
997 | static int d40_psize_2_burst_size(bool is_log, int psize) |
998 | { | |
999 | if (is_log) { | |
1000 | if (psize == STEDMA40_PSIZE_LOG_1) | |
1001 | return 1; | |
1002 | } else { | |
1003 | if (psize == STEDMA40_PSIZE_PHY_1) | |
1004 | return 1; | |
1005 | } | |
1006 | ||
1007 | return 2 << psize; | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * The dma only supports transmitting packages up to | |
43f2e1a3 LJ |
1012 | * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. |
1013 | * | |
1014 | * Calculate the total number of dma elements required to send the entire sg list. | |
d49278e3 PF |
1015 | */ |
1016 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | |
1017 | { | |
1018 | int dmalen; | |
1019 | u32 max_w = max(data_width1, data_width2); | |
1020 | u32 min_w = min(data_width1, data_width2); | |
43f2e1a3 | 1021 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); |
d49278e3 PF |
1022 | |
1023 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | |
43f2e1a3 | 1024 | seg_max -= max_w; |
d49278e3 | 1025 | |
43f2e1a3 | 1026 | if (!IS_ALIGNED(size, max_w)) |
d49278e3 PF |
1027 | return -EINVAL; |
1028 | ||
1029 | if (size <= seg_max) | |
1030 | dmalen = 1; | |
1031 | else { | |
1032 | dmalen = size / seg_max; | |
1033 | if (dmalen * seg_max < size) | |
1034 | dmalen++; | |
1035 | } | |
1036 | return dmalen; | |
1037 | } | |
1038 | ||
1039 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |
1040 | u32 data_width1, u32 data_width2) | |
1041 | { | |
1042 | struct scatterlist *sg; | |
1043 | int i; | |
1044 | int len = 0; | |
1045 | int ret; | |
1046 | ||
1047 | for_each_sg(sgl, sg, sg_len, i) { | |
1048 | ret = d40_size_2_dmalen(sg_dma_len(sg), | |
1049 | data_width1, data_width2); | |
1050 | if (ret < 0) | |
1051 | return ret; | |
1052 | len += ret; | |
1053 | } | |
1054 | return len; | |
1055 | } | |
8d318a50 | 1056 | |
1bdae6f4 N |
1057 | static int __d40_execute_command_phy(struct d40_chan *d40c, |
1058 | enum d40_command command) | |
8d318a50 | 1059 | { |
767a9675 JA |
1060 | u32 status; |
1061 | int i; | |
8d318a50 LW |
1062 | void __iomem *active_reg; |
1063 | int ret = 0; | |
1064 | unsigned long flags; | |
1d392a7b | 1065 | u32 wmask; |
8d318a50 | 1066 | |
1bdae6f4 N |
1067 | if (command == D40_DMA_STOP) { |
1068 | ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); | |
1069 | if (ret) | |
1070 | return ret; | |
1071 | } | |
1072 | ||
8d318a50 LW |
1073 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
1074 | ||
1075 | if (d40c->phy_chan->num % 2 == 0) | |
1076 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
1077 | else | |
1078 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
1079 | ||
1080 | if (command == D40_DMA_SUSPEND_REQ) { | |
1081 | status = (readl(active_reg) & | |
1082 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
1083 | D40_CHAN_POS(d40c->phy_chan->num); | |
1084 | ||
1085 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
1086 | goto done; | |
1087 | } | |
1088 | ||
1d392a7b JA |
1089 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
1090 | writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), | |
1091 | active_reg); | |
8d318a50 LW |
1092 | |
1093 | if (command == D40_DMA_SUSPEND_REQ) { | |
1094 | ||
1095 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | |
1096 | status = (readl(active_reg) & | |
1097 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
1098 | D40_CHAN_POS(d40c->phy_chan->num); | |
1099 | ||
1100 | cpu_relax(); | |
1101 | /* | |
1102 | * Reduce the number of bus accesses while | |
1103 | * waiting for the DMA to suspend. | |
1104 | */ | |
1105 | udelay(3); | |
1106 | ||
1107 | if (status == D40_DMA_STOP || | |
1108 | status == D40_DMA_SUSPENDED) | |
1109 | break; | |
1110 | } | |
1111 | ||
1112 | if (i == D40_SUSPEND_MAX_IT) { | |
6db5a8ba RV |
1113 | chan_err(d40c, |
1114 | "unable to suspend the chl %d (log: %d) status %x\n", | |
1115 | d40c->phy_chan->num, d40c->log_num, | |
8d318a50 LW |
1116 | status); |
1117 | dump_stack(); | |
1118 | ret = -EBUSY; | |
1119 | } | |
1120 | ||
1121 | } | |
1122 | done: | |
1123 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | |
1124 | return ret; | |
1125 | } | |
1126 | ||
1127 | static void d40_term_all(struct d40_chan *d40c) | |
1128 | { | |
1129 | struct d40_desc *d40d; | |
7404368c | 1130 | struct d40_desc *_d; |
8d318a50 | 1131 | |
4226dd86 FB |
1132 | /* Release completed descriptors */ |
1133 | while ((d40d = d40_first_done(d40c))) { | |
1134 | d40_desc_remove(d40d); | |
1135 | d40_desc_free(d40c, d40d); | |
1136 | } | |
1137 | ||
8d318a50 LW |
1138 | /* Release active descriptors */ |
1139 | while ((d40d = d40_first_active_get(d40c))) { | |
1140 | d40_desc_remove(d40d); | |
8d318a50 LW |
1141 | d40_desc_free(d40c, d40d); |
1142 | } | |
1143 | ||
1144 | /* Release queued descriptors waiting for transfer */ | |
1145 | while ((d40d = d40_first_queued(d40c))) { | |
1146 | d40_desc_remove(d40d); | |
8d318a50 LW |
1147 | d40_desc_free(d40c, d40d); |
1148 | } | |
1149 | ||
a8f3067b PF |
1150 | /* Release pending descriptors */ |
1151 | while ((d40d = d40_first_pending(d40c))) { | |
1152 | d40_desc_remove(d40d); | |
1153 | d40_desc_free(d40c, d40d); | |
1154 | } | |
8d318a50 | 1155 | |
7404368c PF |
1156 | /* Release client owned descriptors */ |
1157 | if (!list_empty(&d40c->client)) | |
1158 | list_for_each_entry_safe(d40d, _d, &d40c->client, node) { | |
1159 | d40_desc_remove(d40d); | |
1160 | d40_desc_free(d40c, d40d); | |
1161 | } | |
1162 | ||
82babbb3 PF |
1163 | /* Release descriptors in prepare queue */ |
1164 | if (!list_empty(&d40c->prepare_queue)) | |
1165 | list_for_each_entry_safe(d40d, _d, | |
1166 | &d40c->prepare_queue, node) { | |
1167 | d40_desc_remove(d40d); | |
1168 | d40_desc_free(d40c, d40d); | |
1169 | } | |
7404368c | 1170 | |
8d318a50 | 1171 | d40c->pending_tx = 0; |
8d318a50 LW |
1172 | } |
1173 | ||
1bdae6f4 N |
1174 | static void __d40_config_set_event(struct d40_chan *d40c, |
1175 | enum d40_events event_type, u32 event, | |
1176 | int reg) | |
262d2915 | 1177 | { |
8ca84687 | 1178 | void __iomem *addr = chan_base(d40c) + reg; |
262d2915 | 1179 | int tries; |
1bdae6f4 N |
1180 | u32 status; |
1181 | ||
1182 | switch (event_type) { | |
1183 | ||
1184 | case D40_DEACTIVATE_EVENTLINE: | |
262d2915 | 1185 | |
262d2915 RV |
1186 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) |
1187 | | ~D40_EVENTLINE_MASK(event), addr); | |
1bdae6f4 N |
1188 | break; |
1189 | ||
1190 | case D40_SUSPEND_REQ_EVENTLINE: | |
1191 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | |
1192 | D40_EVENTLINE_POS(event); | |
1193 | ||
1194 | if (status == D40_DEACTIVATE_EVENTLINE || | |
1195 | status == D40_SUSPEND_REQ_EVENTLINE) | |
1196 | break; | |
262d2915 | 1197 | |
1bdae6f4 N |
1198 | writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) |
1199 | | ~D40_EVENTLINE_MASK(event), addr); | |
1200 | ||
1201 | for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { | |
1202 | ||
1203 | status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> | |
1204 | D40_EVENTLINE_POS(event); | |
1205 | ||
1206 | cpu_relax(); | |
1207 | /* | |
1208 | * Reduce the number of bus accesses while | |
1209 | * waiting for the DMA to suspend. | |
1210 | */ | |
1211 | udelay(3); | |
1212 | ||
1213 | if (status == D40_DEACTIVATE_EVENTLINE) | |
1214 | break; | |
1215 | } | |
1216 | ||
1217 | if (tries == D40_SUSPEND_MAX_IT) { | |
1218 | chan_err(d40c, | |
1219 | "unable to stop the event_line chl %d (log: %d)" | |
1220 | "status %x\n", d40c->phy_chan->num, | |
1221 | d40c->log_num, status); | |
1222 | } | |
1223 | break; | |
1224 | ||
1225 | case D40_ACTIVATE_EVENTLINE: | |
262d2915 RV |
1226 | /* |
1227 | * The hardware sometimes doesn't register the enable when src and dst | |
1228 | * event lines are active on the same logical channel. Retry to ensure | |
1229 | * it does. Usually only one retry is sufficient. | |
1230 | */ | |
1bdae6f4 N |
1231 | tries = 100; |
1232 | while (--tries) { | |
1233 | writel((D40_ACTIVATE_EVENTLINE << | |
1234 | D40_EVENTLINE_POS(event)) | | |
1235 | ~D40_EVENTLINE_MASK(event), addr); | |
262d2915 | 1236 | |
1bdae6f4 N |
1237 | if (readl(addr) & D40_EVENTLINE_MASK(event)) |
1238 | break; | |
1239 | } | |
262d2915 | 1240 | |
1bdae6f4 N |
1241 | if (tries != 99) |
1242 | dev_dbg(chan2dev(d40c), | |
1243 | "[%s] workaround enable S%cLNK (%d tries)\n", | |
1244 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | |
1245 | 100 - tries); | |
262d2915 | 1246 | |
1bdae6f4 N |
1247 | WARN_ON(!tries); |
1248 | break; | |
262d2915 | 1249 | |
1bdae6f4 N |
1250 | case D40_ROUND_EVENTLINE: |
1251 | BUG(); | |
1252 | break; | |
8d318a50 | 1253 | |
1bdae6f4 N |
1254 | } |
1255 | } | |
8d318a50 | 1256 | |
1bdae6f4 N |
1257 | static void d40_config_set_event(struct d40_chan *d40c, |
1258 | enum d40_events event_type) | |
1259 | { | |
26955c07 LJ |
1260 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
1261 | ||
8d318a50 | 1262 | /* Enable event line connected to device (or memcpy) */ |
2c2b62d5 LJ |
1263 | if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || |
1264 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) | |
1bdae6f4 | 1265 | __d40_config_set_event(d40c, event_type, event, |
262d2915 | 1266 | D40_CHAN_REG_SSLNK); |
8d318a50 | 1267 | |
2c2b62d5 | 1268 | if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) |
1bdae6f4 | 1269 | __d40_config_set_event(d40c, event_type, event, |
262d2915 | 1270 | D40_CHAN_REG_SDLNK); |
8d318a50 LW |
1271 | } |
1272 | ||
a5ebca47 | 1273 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
8d318a50 | 1274 | { |
8ca84687 | 1275 | void __iomem *chanbase = chan_base(d40c); |
be8cb7df | 1276 | u32 val; |
8d318a50 | 1277 | |
8ca84687 RV |
1278 | val = readl(chanbase + D40_CHAN_REG_SSLNK); |
1279 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); | |
be8cb7df | 1280 | |
a5ebca47 | 1281 | return val; |
8d318a50 LW |
1282 | } |
1283 | ||
1bdae6f4 N |
1284 | static int |
1285 | __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) | |
1286 | { | |
1287 | unsigned long flags; | |
1288 | int ret = 0; | |
1289 | u32 active_status; | |
1290 | void __iomem *active_reg; | |
1291 | ||
1292 | if (d40c->phy_chan->num % 2 == 0) | |
1293 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
1294 | else | |
1295 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
1296 | ||
1297 | ||
1298 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | |
1299 | ||
1300 | switch (command) { | |
1301 | case D40_DMA_STOP: | |
1302 | case D40_DMA_SUSPEND_REQ: | |
1303 | ||
1304 | active_status = (readl(active_reg) & | |
1305 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
1306 | D40_CHAN_POS(d40c->phy_chan->num); | |
1307 | ||
1308 | if (active_status == D40_DMA_RUN) | |
1309 | d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); | |
1310 | else | |
1311 | d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); | |
1312 | ||
1313 | if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) | |
1314 | ret = __d40_execute_command_phy(d40c, command); | |
1315 | ||
1316 | break; | |
1317 | ||
1318 | case D40_DMA_RUN: | |
1319 | ||
1320 | d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); | |
1321 | ret = __d40_execute_command_phy(d40c, command); | |
1322 | break; | |
1323 | ||
1324 | case D40_DMA_SUSPENDED: | |
1325 | BUG(); | |
1326 | break; | |
1327 | } | |
1328 | ||
1329 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | |
1330 | return ret; | |
1331 | } | |
1332 | ||
1333 | static int d40_channel_execute_command(struct d40_chan *d40c, | |
1334 | enum d40_command command) | |
1335 | { | |
1336 | if (chan_is_logical(d40c)) | |
1337 | return __d40_execute_command_log(d40c, command); | |
1338 | else | |
1339 | return __d40_execute_command_phy(d40c, command); | |
1340 | } | |
1341 | ||
20a5b6d0 RV |
1342 | static u32 d40_get_prmo(struct d40_chan *d40c) |
1343 | { | |
1344 | static const unsigned int phy_map[] = { | |
1345 | [STEDMA40_PCHAN_BASIC_MODE] | |
1346 | = D40_DREG_PRMO_PCHAN_BASIC, | |
1347 | [STEDMA40_PCHAN_MODULO_MODE] | |
1348 | = D40_DREG_PRMO_PCHAN_MODULO, | |
1349 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] | |
1350 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, | |
1351 | }; | |
1352 | static const unsigned int log_map[] = { | |
1353 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] | |
1354 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, | |
1355 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] | |
1356 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, | |
1357 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] | |
1358 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | |
1359 | }; | |
1360 | ||
724a8577 | 1361 | if (chan_is_physical(d40c)) |
20a5b6d0 RV |
1362 | return phy_map[d40c->dma_cfg.mode_opt]; |
1363 | else | |
1364 | return log_map[d40c->dma_cfg.mode_opt]; | |
1365 | } | |
1366 | ||
b55912c6 | 1367 | static void d40_config_write(struct d40_chan *d40c) |
8d318a50 LW |
1368 | { |
1369 | u32 addr_base; | |
1370 | u32 var; | |
8d318a50 LW |
1371 | |
1372 | /* Odd addresses are even addresses + 4 */ | |
1373 | addr_base = (d40c->phy_chan->num % 2) * 4; | |
1374 | /* Setup channel mode to logical or physical */ | |
724a8577 | 1375 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
8d318a50 LW |
1376 | D40_CHAN_POS(d40c->phy_chan->num); |
1377 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | |
1378 | ||
1379 | /* Setup operational mode option register */ | |
20a5b6d0 | 1380 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
8d318a50 LW |
1381 | |
1382 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | |
1383 | ||
724a8577 | 1384 | if (chan_is_logical(d40c)) { |
8ca84687 RV |
1385 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) |
1386 | & D40_SREG_ELEM_LOG_LIDX_MASK; | |
1387 | void __iomem *chanbase = chan_base(d40c); | |
1388 | ||
8d318a50 | 1389 | /* Set default config for CFG reg */ |
8ca84687 RV |
1390 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
1391 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); | |
8d318a50 | 1392 | |
b55912c6 | 1393 | /* Set LIDX for lcla */ |
8ca84687 RV |
1394 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
1395 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | |
e9f3a49c RV |
1396 | |
1397 | /* Clear LNK which will be used by d40_chan_has_events() */ | |
1398 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | |
1399 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | |
8d318a50 | 1400 | } |
8d318a50 LW |
1401 | } |
1402 | ||
aa182ae2 JA |
1403 | static u32 d40_residue(struct d40_chan *d40c) |
1404 | { | |
1405 | u32 num_elt; | |
1406 | ||
724a8577 | 1407 | if (chan_is_logical(d40c)) |
aa182ae2 JA |
1408 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1409 | >> D40_MEM_LCSP2_ECNT_POS; | |
8ca84687 RV |
1410 | else { |
1411 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); | |
1412 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) | |
1413 | >> D40_SREG_ELEM_PHY_ECNT_POS; | |
1414 | } | |
1415 | ||
43f2e1a3 | 1416 | return num_elt * d40c->dma_cfg.dst_info.data_width; |
aa182ae2 JA |
1417 | } |
1418 | ||
1419 | static bool d40_tx_is_linked(struct d40_chan *d40c) | |
1420 | { | |
1421 | bool is_link; | |
1422 | ||
724a8577 | 1423 | if (chan_is_logical(d40c)) |
aa182ae2 JA |
1424 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
1425 | else | |
8ca84687 RV |
1426 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) |
1427 | & D40_SREG_LNK_PHYS_LNK_MASK; | |
1428 | ||
aa182ae2 JA |
1429 | return is_link; |
1430 | } | |
1431 | ||
6f5bad03 | 1432 | static int d40_pause(struct dma_chan *chan) |
aa182ae2 | 1433 | { |
6f5bad03 | 1434 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
aa182ae2 JA |
1435 | int res = 0; |
1436 | unsigned long flags; | |
1437 | ||
6f5bad03 MR |
1438 | if (d40c->phy_chan == NULL) { |
1439 | chan_err(d40c, "Channel is not allocated!\n"); | |
1440 | return -EINVAL; | |
1441 | } | |
1442 | ||
3ac012af JA |
1443 | if (!d40c->busy) |
1444 | return 0; | |
1445 | ||
aa182ae2 | 1446 | spin_lock_irqsave(&d40c->lock, flags); |
80245216 | 1447 | pm_runtime_get_sync(d40c->base->dev); |
aa182ae2 JA |
1448 | |
1449 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | |
1bdae6f4 | 1450 | |
7fb3e75e N |
1451 | pm_runtime_mark_last_busy(d40c->base->dev); |
1452 | pm_runtime_put_autosuspend(d40c->base->dev); | |
aa182ae2 JA |
1453 | spin_unlock_irqrestore(&d40c->lock, flags); |
1454 | return res; | |
1455 | } | |
1456 | ||
6f5bad03 | 1457 | static int d40_resume(struct dma_chan *chan) |
aa182ae2 | 1458 | { |
6f5bad03 | 1459 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
aa182ae2 JA |
1460 | int res = 0; |
1461 | unsigned long flags; | |
1462 | ||
6f5bad03 MR |
1463 | if (d40c->phy_chan == NULL) { |
1464 | chan_err(d40c, "Channel is not allocated!\n"); | |
1465 | return -EINVAL; | |
1466 | } | |
1467 | ||
3ac012af JA |
1468 | if (!d40c->busy) |
1469 | return 0; | |
1470 | ||
aa182ae2 | 1471 | spin_lock_irqsave(&d40c->lock, flags); |
7fb3e75e | 1472 | pm_runtime_get_sync(d40c->base->dev); |
aa182ae2 JA |
1473 | |
1474 | /* If bytes left to transfer or linked tx resume job */ | |
1bdae6f4 | 1475 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) |
aa182ae2 | 1476 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
aa182ae2 | 1477 | |
7fb3e75e N |
1478 | pm_runtime_mark_last_busy(d40c->base->dev); |
1479 | pm_runtime_put_autosuspend(d40c->base->dev); | |
aa182ae2 JA |
1480 | spin_unlock_irqrestore(&d40c->lock, flags); |
1481 | return res; | |
1482 | } | |
1483 | ||
8d318a50 LW |
1484 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
1485 | { | |
1486 | struct d40_chan *d40c = container_of(tx->chan, | |
1487 | struct d40_chan, | |
1488 | chan); | |
1489 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | |
1490 | unsigned long flags; | |
884485e1 | 1491 | dma_cookie_t cookie; |
8d318a50 LW |
1492 | |
1493 | spin_lock_irqsave(&d40c->lock, flags); | |
884485e1 | 1494 | cookie = dma_cookie_assign(tx); |
8d318a50 | 1495 | d40_desc_queue(d40c, d40d); |
8d318a50 LW |
1496 | spin_unlock_irqrestore(&d40c->lock, flags); |
1497 | ||
884485e1 | 1498 | return cookie; |
8d318a50 LW |
1499 | } |
1500 | ||
1501 | static int d40_start(struct d40_chan *d40c) | |
1502 | { | |
0c32269d | 1503 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
8d318a50 LW |
1504 | } |
1505 | ||
1506 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |
1507 | { | |
1508 | struct d40_desc *d40d; | |
1509 | int err; | |
1510 | ||
1511 | /* Start queued jobs, if any */ | |
1512 | d40d = d40_first_queued(d40c); | |
1513 | ||
1514 | if (d40d != NULL) { | |
1bdae6f4 | 1515 | if (!d40c->busy) { |
7fb3e75e | 1516 | d40c->busy = true; |
1bdae6f4 N |
1517 | pm_runtime_get_sync(d40c->base->dev); |
1518 | } | |
8d318a50 LW |
1519 | |
1520 | /* Remove from queue */ | |
1521 | d40_desc_remove(d40d); | |
1522 | ||
1523 | /* Add to active queue */ | |
1524 | d40_desc_submit(d40c, d40d); | |
1525 | ||
7d83a854 RV |
1526 | /* Initiate DMA job */ |
1527 | d40_desc_load(d40c, d40d); | |
8d318a50 | 1528 | |
7d83a854 RV |
1529 | /* Start dma job */ |
1530 | err = d40_start(d40c); | |
8d318a50 | 1531 | |
7d83a854 RV |
1532 | if (err) |
1533 | return NULL; | |
8d318a50 LW |
1534 | } |
1535 | ||
1536 | return d40d; | |
1537 | } | |
1538 | ||
1539 | /* called from interrupt context */ | |
1540 | static void dma_tc_handle(struct d40_chan *d40c) | |
1541 | { | |
1542 | struct d40_desc *d40d; | |
1543 | ||
8d318a50 LW |
1544 | /* Get first active entry from list */ |
1545 | d40d = d40_first_active_get(d40c); | |
1546 | ||
1547 | if (d40d == NULL) | |
1548 | return; | |
1549 | ||
0c842b55 RV |
1550 | if (d40d->cyclic) { |
1551 | /* | |
1552 | * If this was a paritially loaded list, we need to reloaded | |
1553 | * it, and only when the list is completed. We need to check | |
1554 | * for done because the interrupt will hit for every link, and | |
1555 | * not just the last one. | |
1556 | */ | |
1557 | if (d40d->lli_current < d40d->lli_len | |
1558 | && !d40_tx_is_linked(d40c) | |
1559 | && !d40_residue(d40c)) { | |
1560 | d40_lcla_free_all(d40c, d40d); | |
1561 | d40_desc_load(d40c, d40d); | |
1562 | (void) d40_start(d40c); | |
8d318a50 | 1563 | |
0c842b55 RV |
1564 | if (d40d->lli_current == d40d->lli_len) |
1565 | d40d->lli_current = 0; | |
1566 | } | |
1567 | } else { | |
1568 | d40_lcla_free_all(d40c, d40d); | |
8d318a50 | 1569 | |
0c842b55 RV |
1570 | if (d40d->lli_current < d40d->lli_len) { |
1571 | d40_desc_load(d40c, d40d); | |
1572 | /* Start dma job */ | |
1573 | (void) d40_start(d40c); | |
1574 | return; | |
1575 | } | |
1576 | ||
9ecb41bd | 1577 | if (d40_queue_start(d40c) == NULL) { |
0c842b55 | 1578 | d40c->busy = false; |
9ecb41bd RV |
1579 | |
1580 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1581 | pm_runtime_put_autosuspend(d40c->base->dev); | |
1582 | } | |
8d318a50 | 1583 | |
7dd14525 FB |
1584 | d40_desc_remove(d40d); |
1585 | d40_desc_done(d40c, d40d); | |
1586 | } | |
4226dd86 | 1587 | |
8d318a50 LW |
1588 | d40c->pending_tx++; |
1589 | tasklet_schedule(&d40c->tasklet); | |
1590 | ||
1591 | } | |
1592 | ||
1593 | static void dma_tasklet(unsigned long data) | |
1594 | { | |
1595 | struct d40_chan *d40c = (struct d40_chan *) data; | |
767a9675 | 1596 | struct d40_desc *d40d; |
8d318a50 | 1597 | unsigned long flags; |
e9baa9d9 | 1598 | bool callback_active; |
8d318a50 LW |
1599 | dma_async_tx_callback callback; |
1600 | void *callback_param; | |
1601 | ||
1602 | spin_lock_irqsave(&d40c->lock, flags); | |
1603 | ||
4226dd86 FB |
1604 | /* Get first entry from the done list */ |
1605 | d40d = d40_first_done(d40c); | |
1606 | if (d40d == NULL) { | |
1607 | /* Check if we have reached here for cyclic job */ | |
1608 | d40d = d40_first_active_get(d40c); | |
1609 | if (d40d == NULL || !d40d->cyclic) | |
1610 | goto err; | |
1611 | } | |
8d318a50 | 1612 | |
0c842b55 | 1613 | if (!d40d->cyclic) |
f7fbce07 | 1614 | dma_cookie_complete(&d40d->txd); |
8d318a50 LW |
1615 | |
1616 | /* | |
1617 | * If terminating a channel pending_tx is set to zero. | |
1618 | * This prevents any finished active jobs to return to the client. | |
1619 | */ | |
1620 | if (d40c->pending_tx == 0) { | |
1621 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1622 | return; | |
1623 | } | |
1624 | ||
1625 | /* Callback to client */ | |
e9baa9d9 | 1626 | callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); |
767a9675 JA |
1627 | callback = d40d->txd.callback; |
1628 | callback_param = d40d->txd.callback_param; | |
1629 | ||
0c842b55 RV |
1630 | if (!d40d->cyclic) { |
1631 | if (async_tx_test_ack(&d40d->txd)) { | |
767a9675 | 1632 | d40_desc_remove(d40d); |
0c842b55 | 1633 | d40_desc_free(d40c, d40d); |
f26e03ad FB |
1634 | } else if (!d40d->is_in_client_list) { |
1635 | d40_desc_remove(d40d); | |
1636 | d40_lcla_free_all(d40c, d40d); | |
1637 | list_add_tail(&d40d->node, &d40c->client); | |
1638 | d40d->is_in_client_list = true; | |
8d318a50 LW |
1639 | } |
1640 | } | |
1641 | ||
1642 | d40c->pending_tx--; | |
1643 | ||
1644 | if (d40c->pending_tx) | |
1645 | tasklet_schedule(&d40c->tasklet); | |
1646 | ||
1647 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1648 | ||
e9baa9d9 | 1649 | if (callback_active && callback) |
8d318a50 LW |
1650 | callback(callback_param); |
1651 | ||
1652 | return; | |
1653 | ||
1bdae6f4 N |
1654 | err: |
1655 | /* Rescue manouver if receiving double interrupts */ | |
8d318a50 LW |
1656 | if (d40c->pending_tx > 0) |
1657 | d40c->pending_tx--; | |
1658 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1659 | } | |
1660 | ||
1661 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | |
1662 | { | |
8d318a50 | 1663 | int i; |
8d318a50 LW |
1664 | u32 idx; |
1665 | u32 row; | |
1666 | long chan = -1; | |
1667 | struct d40_chan *d40c; | |
1668 | unsigned long flags; | |
1669 | struct d40_base *base = data; | |
3cb645dc TL |
1670 | u32 regs[base->gen_dmac.il_size]; |
1671 | struct d40_interrupt_lookup *il = base->gen_dmac.il; | |
1672 | u32 il_size = base->gen_dmac.il_size; | |
8d318a50 LW |
1673 | |
1674 | spin_lock_irqsave(&base->interrupt_lock, flags); | |
1675 | ||
1676 | /* Read interrupt status of both logical and physical channels */ | |
3cb645dc | 1677 | for (i = 0; i < il_size; i++) |
8d318a50 LW |
1678 | regs[i] = readl(base->virtbase + il[i].src); |
1679 | ||
1680 | for (;;) { | |
1681 | ||
1682 | chan = find_next_bit((unsigned long *)regs, | |
3cb645dc | 1683 | BITS_PER_LONG * il_size, chan + 1); |
8d318a50 LW |
1684 | |
1685 | /* No more set bits found? */ | |
3cb645dc | 1686 | if (chan == BITS_PER_LONG * il_size) |
8d318a50 LW |
1687 | break; |
1688 | ||
1689 | row = chan / BITS_PER_LONG; | |
1690 | idx = chan & (BITS_PER_LONG - 1); | |
1691 | ||
8d318a50 LW |
1692 | if (il[row].offset == D40_PHY_CHAN) |
1693 | d40c = base->lookup_phy_chans[idx]; | |
1694 | else | |
1695 | d40c = base->lookup_log_chans[il[row].offset + idx]; | |
53d6d68f FB |
1696 | |
1697 | if (!d40c) { | |
1698 | /* | |
1699 | * No error because this can happen if something else | |
1700 | * in the system is using the channel. | |
1701 | */ | |
1702 | continue; | |
1703 | } | |
1704 | ||
1705 | /* ACK interrupt */ | |
8a3b6e14 | 1706 | writel(BIT(idx), base->virtbase + il[row].clr); |
53d6d68f | 1707 | |
8d318a50 LW |
1708 | spin_lock(&d40c->lock); |
1709 | ||
1710 | if (!il[row].is_error) | |
1711 | dma_tc_handle(d40c); | |
1712 | else | |
6db5a8ba RV |
1713 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
1714 | chan, il[row].offset, idx); | |
8d318a50 LW |
1715 | |
1716 | spin_unlock(&d40c->lock); | |
1717 | } | |
1718 | ||
1719 | spin_unlock_irqrestore(&base->interrupt_lock, flags); | |
1720 | ||
1721 | return IRQ_HANDLED; | |
1722 | } | |
1723 | ||
8d318a50 LW |
1724 | static int d40_validate_conf(struct d40_chan *d40c, |
1725 | struct stedma40_chan_cfg *conf) | |
1726 | { | |
1727 | int res = 0; | |
38bdbf02 | 1728 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
8d318a50 | 1729 | |
0747c7ba | 1730 | if (!conf->dir) { |
6db5a8ba | 1731 | chan_err(d40c, "Invalid direction.\n"); |
0747c7ba LW |
1732 | res = -EINVAL; |
1733 | } | |
1734 | ||
26955c07 LJ |
1735 | if ((is_log && conf->dev_type > d40c->base->num_log_chans) || |
1736 | (!is_log && conf->dev_type > d40c->base->num_phy_chans) || | |
1737 | (conf->dev_type < 0)) { | |
1738 | chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); | |
0747c7ba LW |
1739 | res = -EINVAL; |
1740 | } | |
1741 | ||
2c2b62d5 | 1742 | if (conf->dir == DMA_DEV_TO_DEV) { |
8d318a50 LW |
1743 | /* |
1744 | * DMAC HW supports it. Will be added to this driver, | |
1745 | * in case any dma client requires it. | |
1746 | */ | |
6db5a8ba | 1747 | chan_err(d40c, "periph to periph not supported\n"); |
8d318a50 LW |
1748 | res = -EINVAL; |
1749 | } | |
1750 | ||
d49278e3 | 1751 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * |
43f2e1a3 | 1752 | conf->src_info.data_width != |
d49278e3 | 1753 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * |
43f2e1a3 | 1754 | conf->dst_info.data_width) { |
d49278e3 PF |
1755 | /* |
1756 | * The DMAC hardware only supports | |
1757 | * src (burst x width) == dst (burst x width) | |
1758 | */ | |
1759 | ||
6db5a8ba | 1760 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); |
d49278e3 PF |
1761 | res = -EINVAL; |
1762 | } | |
1763 | ||
8d318a50 LW |
1764 | return res; |
1765 | } | |
1766 | ||
5cd326fd N |
1767 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1768 | bool is_src, int log_event_line, bool is_log, | |
1769 | bool *first_user) | |
8d318a50 LW |
1770 | { |
1771 | unsigned long flags; | |
1772 | spin_lock_irqsave(&phy->lock, flags); | |
5cd326fd N |
1773 | |
1774 | *first_user = ((phy->allocated_src | phy->allocated_dst) | |
1775 | == D40_ALLOC_FREE); | |
1776 | ||
4aed79b2 | 1777 | if (!is_log) { |
8d318a50 LW |
1778 | /* Physical interrupts are masked per physical full channel */ |
1779 | if (phy->allocated_src == D40_ALLOC_FREE && | |
1780 | phy->allocated_dst == D40_ALLOC_FREE) { | |
1781 | phy->allocated_dst = D40_ALLOC_PHY; | |
1782 | phy->allocated_src = D40_ALLOC_PHY; | |
1783 | goto found; | |
1784 | } else | |
1785 | goto not_found; | |
1786 | } | |
1787 | ||
1788 | /* Logical channel */ | |
1789 | if (is_src) { | |
1790 | if (phy->allocated_src == D40_ALLOC_PHY) | |
1791 | goto not_found; | |
1792 | ||
1793 | if (phy->allocated_src == D40_ALLOC_FREE) | |
1794 | phy->allocated_src = D40_ALLOC_LOG_FREE; | |
1795 | ||
8a3b6e14 LJ |
1796 | if (!(phy->allocated_src & BIT(log_event_line))) { |
1797 | phy->allocated_src |= BIT(log_event_line); | |
8d318a50 LW |
1798 | goto found; |
1799 | } else | |
1800 | goto not_found; | |
1801 | } else { | |
1802 | if (phy->allocated_dst == D40_ALLOC_PHY) | |
1803 | goto not_found; | |
1804 | ||
1805 | if (phy->allocated_dst == D40_ALLOC_FREE) | |
1806 | phy->allocated_dst = D40_ALLOC_LOG_FREE; | |
1807 | ||
8a3b6e14 LJ |
1808 | if (!(phy->allocated_dst & BIT(log_event_line))) { |
1809 | phy->allocated_dst |= BIT(log_event_line); | |
8d318a50 LW |
1810 | goto found; |
1811 | } else | |
1812 | goto not_found; | |
1813 | } | |
1814 | ||
1815 | not_found: | |
1816 | spin_unlock_irqrestore(&phy->lock, flags); | |
1817 | return false; | |
1818 | found: | |
1819 | spin_unlock_irqrestore(&phy->lock, flags); | |
1820 | return true; | |
1821 | } | |
1822 | ||
1823 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | |
1824 | int log_event_line) | |
1825 | { | |
1826 | unsigned long flags; | |
1827 | bool is_free = false; | |
1828 | ||
1829 | spin_lock_irqsave(&phy->lock, flags); | |
1830 | if (!log_event_line) { | |
8d318a50 LW |
1831 | phy->allocated_dst = D40_ALLOC_FREE; |
1832 | phy->allocated_src = D40_ALLOC_FREE; | |
1833 | is_free = true; | |
1834 | goto out; | |
1835 | } | |
1836 | ||
1837 | /* Logical channel */ | |
1838 | if (is_src) { | |
8a3b6e14 | 1839 | phy->allocated_src &= ~BIT(log_event_line); |
8d318a50 LW |
1840 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) |
1841 | phy->allocated_src = D40_ALLOC_FREE; | |
1842 | } else { | |
8a3b6e14 | 1843 | phy->allocated_dst &= ~BIT(log_event_line); |
8d318a50 LW |
1844 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) |
1845 | phy->allocated_dst = D40_ALLOC_FREE; | |
1846 | } | |
1847 | ||
1848 | is_free = ((phy->allocated_src | phy->allocated_dst) == | |
1849 | D40_ALLOC_FREE); | |
1850 | ||
1851 | out: | |
1852 | spin_unlock_irqrestore(&phy->lock, flags); | |
1853 | ||
1854 | return is_free; | |
1855 | } | |
1856 | ||
5cd326fd | 1857 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
8d318a50 | 1858 | { |
26955c07 | 1859 | int dev_type = d40c->dma_cfg.dev_type; |
8d318a50 LW |
1860 | int event_group; |
1861 | int event_line; | |
1862 | struct d40_phy_res *phys; | |
1863 | int i; | |
1864 | int j; | |
1865 | int log_num; | |
f000df8c | 1866 | int num_phy_chans; |
8d318a50 | 1867 | bool is_src; |
38bdbf02 | 1868 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
8d318a50 LW |
1869 | |
1870 | phys = d40c->base->phy_res; | |
f000df8c | 1871 | num_phy_chans = d40c->base->num_phy_chans; |
8d318a50 | 1872 | |
2c2b62d5 | 1873 | if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { |
8d318a50 LW |
1874 | log_num = 2 * dev_type; |
1875 | is_src = true; | |
2c2b62d5 LJ |
1876 | } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
1877 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { | |
8d318a50 | 1878 | /* dst event lines are used for logical memcpy */ |
8d318a50 LW |
1879 | log_num = 2 * dev_type + 1; |
1880 | is_src = false; | |
1881 | } else | |
1882 | return -EINVAL; | |
1883 | ||
1884 | event_group = D40_TYPE_TO_GROUP(dev_type); | |
1885 | event_line = D40_TYPE_TO_EVENT(dev_type); | |
1886 | ||
1887 | if (!is_log) { | |
2c2b62d5 | 1888 | if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { |
8d318a50 | 1889 | /* Find physical half channel */ |
f000df8c GB |
1890 | if (d40c->dma_cfg.use_fixed_channel) { |
1891 | i = d40c->dma_cfg.phy_channel; | |
4aed79b2 | 1892 | if (d40_alloc_mask_set(&phys[i], is_src, |
5cd326fd N |
1893 | 0, is_log, |
1894 | first_phy_user)) | |
8d318a50 | 1895 | goto found_phy; |
f000df8c GB |
1896 | } else { |
1897 | for (i = 0; i < num_phy_chans; i++) { | |
1898 | if (d40_alloc_mask_set(&phys[i], is_src, | |
1899 | 0, is_log, | |
1900 | first_phy_user)) | |
1901 | goto found_phy; | |
1902 | } | |
8d318a50 LW |
1903 | } |
1904 | } else | |
1905 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1906 | int phy_num = j + event_group * 2; | |
1907 | for (i = phy_num; i < phy_num + 2; i++) { | |
508849ad LW |
1908 | if (d40_alloc_mask_set(&phys[i], |
1909 | is_src, | |
1910 | 0, | |
5cd326fd N |
1911 | is_log, |
1912 | first_phy_user)) | |
8d318a50 LW |
1913 | goto found_phy; |
1914 | } | |
1915 | } | |
1916 | return -EINVAL; | |
1917 | found_phy: | |
1918 | d40c->phy_chan = &phys[i]; | |
1919 | d40c->log_num = D40_PHY_CHAN; | |
1920 | goto out; | |
1921 | } | |
1922 | if (dev_type == -1) | |
1923 | return -EINVAL; | |
1924 | ||
1925 | /* Find logical channel */ | |
1926 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1927 | int phy_num = j + event_group * 2; | |
5cd326fd N |
1928 | |
1929 | if (d40c->dma_cfg.use_fixed_channel) { | |
1930 | i = d40c->dma_cfg.phy_channel; | |
1931 | ||
1932 | if ((i != phy_num) && (i != phy_num + 1)) { | |
1933 | dev_err(chan2dev(d40c), | |
1934 | "invalid fixed phy channel %d\n", i); | |
1935 | return -EINVAL; | |
1936 | } | |
1937 | ||
1938 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | |
1939 | is_log, first_phy_user)) | |
1940 | goto found_log; | |
1941 | ||
1942 | dev_err(chan2dev(d40c), | |
1943 | "could not allocate fixed phy channel %d\n", i); | |
1944 | return -EINVAL; | |
1945 | } | |
1946 | ||
8d318a50 LW |
1947 | /* |
1948 | * Spread logical channels across all available physical rather | |
1949 | * than pack every logical channel at the first available phy | |
1950 | * channels. | |
1951 | */ | |
1952 | if (is_src) { | |
1953 | for (i = phy_num; i < phy_num + 2; i++) { | |
1954 | if (d40_alloc_mask_set(&phys[i], is_src, | |
5cd326fd N |
1955 | event_line, is_log, |
1956 | first_phy_user)) | |
8d318a50 LW |
1957 | goto found_log; |
1958 | } | |
1959 | } else { | |
1960 | for (i = phy_num + 1; i >= phy_num; i--) { | |
1961 | if (d40_alloc_mask_set(&phys[i], is_src, | |
5cd326fd N |
1962 | event_line, is_log, |
1963 | first_phy_user)) | |
8d318a50 LW |
1964 | goto found_log; |
1965 | } | |
1966 | } | |
1967 | } | |
1968 | return -EINVAL; | |
1969 | ||
1970 | found_log: | |
1971 | d40c->phy_chan = &phys[i]; | |
1972 | d40c->log_num = log_num; | |
1973 | out: | |
1974 | ||
1975 | if (is_log) | |
1976 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; | |
1977 | else | |
1978 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | |
1979 | ||
1980 | return 0; | |
1981 | ||
1982 | } | |
1983 | ||
8d318a50 LW |
1984 | static int d40_config_memcpy(struct d40_chan *d40c) |
1985 | { | |
1986 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | |
1987 | ||
1988 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | |
29027a1e | 1989 | d40c->dma_cfg = dma40_memcpy_conf_log; |
26955c07 | 1990 | d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; |
8d318a50 | 1991 | |
9b233f9b LJ |
1992 | d40_log_cfg(&d40c->dma_cfg, |
1993 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | |
1994 | ||
8d318a50 LW |
1995 | } else if (dma_has_cap(DMA_MEMCPY, cap) && |
1996 | dma_has_cap(DMA_SLAVE, cap)) { | |
29027a1e | 1997 | d40c->dma_cfg = dma40_memcpy_conf_phy; |
57e65ad7 LJ |
1998 | |
1999 | /* Generate interrrupt at end of transfer or relink. */ | |
2000 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); | |
2001 | ||
2002 | /* Generate interrupt on error. */ | |
2003 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); | |
2004 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); | |
2005 | ||
8d318a50 | 2006 | } else { |
6db5a8ba | 2007 | chan_err(d40c, "No memcpy\n"); |
8d318a50 LW |
2008 | return -EINVAL; |
2009 | } | |
2010 | ||
2011 | return 0; | |
2012 | } | |
2013 | ||
8d318a50 LW |
2014 | static int d40_free_dma(struct d40_chan *d40c) |
2015 | { | |
2016 | ||
2017 | int res = 0; | |
26955c07 | 2018 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
8d318a50 LW |
2019 | struct d40_phy_res *phy = d40c->phy_chan; |
2020 | bool is_src; | |
2021 | ||
2022 | /* Terminate all queued and active transfers */ | |
2023 | d40_term_all(d40c); | |
2024 | ||
2025 | if (phy == NULL) { | |
6db5a8ba | 2026 | chan_err(d40c, "phy == null\n"); |
8d318a50 LW |
2027 | return -EINVAL; |
2028 | } | |
2029 | ||
2030 | if (phy->allocated_src == D40_ALLOC_FREE && | |
2031 | phy->allocated_dst == D40_ALLOC_FREE) { | |
6db5a8ba | 2032 | chan_err(d40c, "channel already free\n"); |
8d318a50 LW |
2033 | return -EINVAL; |
2034 | } | |
2035 | ||
2c2b62d5 LJ |
2036 | if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
2037 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) | |
8d318a50 | 2038 | is_src = false; |
2c2b62d5 | 2039 | else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) |
8d318a50 | 2040 | is_src = true; |
26955c07 | 2041 | else { |
6db5a8ba | 2042 | chan_err(d40c, "Unknown direction\n"); |
8d318a50 LW |
2043 | return -EINVAL; |
2044 | } | |
2045 | ||
7fb3e75e | 2046 | pm_runtime_get_sync(d40c->base->dev); |
1bdae6f4 | 2047 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
d181b3a8 | 2048 | if (res) { |
1bdae6f4 | 2049 | chan_err(d40c, "stop failed\n"); |
7fb3e75e | 2050 | goto out; |
d181b3a8 JA |
2051 | } |
2052 | ||
1bdae6f4 | 2053 | d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); |
8d318a50 | 2054 | |
1bdae6f4 | 2055 | if (chan_is_logical(d40c)) |
8d318a50 | 2056 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1bdae6f4 N |
2057 | else |
2058 | d40c->base->lookup_phy_chans[phy->num] = NULL; | |
7fb3e75e N |
2059 | |
2060 | if (d40c->busy) { | |
2061 | pm_runtime_mark_last_busy(d40c->base->dev); | |
2062 | pm_runtime_put_autosuspend(d40c->base->dev); | |
2063 | } | |
2064 | ||
2065 | d40c->busy = false; | |
8d318a50 | 2066 | d40c->phy_chan = NULL; |
ce2ca125 | 2067 | d40c->configured = false; |
7fb3e75e | 2068 | out: |
8d318a50 | 2069 | |
7fb3e75e N |
2070 | pm_runtime_mark_last_busy(d40c->base->dev); |
2071 | pm_runtime_put_autosuspend(d40c->base->dev); | |
2072 | return res; | |
8d318a50 LW |
2073 | } |
2074 | ||
a5ebca47 JA |
2075 | static bool d40_is_paused(struct d40_chan *d40c) |
2076 | { | |
8ca84687 | 2077 | void __iomem *chanbase = chan_base(d40c); |
a5ebca47 JA |
2078 | bool is_paused = false; |
2079 | unsigned long flags; | |
2080 | void __iomem *active_reg; | |
2081 | u32 status; | |
26955c07 | 2082 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); |
a5ebca47 JA |
2083 | |
2084 | spin_lock_irqsave(&d40c->lock, flags); | |
2085 | ||
724a8577 | 2086 | if (chan_is_physical(d40c)) { |
a5ebca47 JA |
2087 | if (d40c->phy_chan->num % 2 == 0) |
2088 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
2089 | else | |
2090 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
2091 | ||
2092 | status = (readl(active_reg) & | |
2093 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
2094 | D40_CHAN_POS(d40c->phy_chan->num); | |
2095 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
2096 | is_paused = true; | |
2097 | ||
2098 | goto _exit; | |
2099 | } | |
2100 | ||
2c2b62d5 LJ |
2101 | if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || |
2102 | d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { | |
8ca84687 | 2103 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
2c2b62d5 | 2104 | } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { |
8ca84687 | 2105 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
9dbfbd35 | 2106 | } else { |
6db5a8ba | 2107 | chan_err(d40c, "Unknown direction\n"); |
a5ebca47 JA |
2108 | goto _exit; |
2109 | } | |
9dbfbd35 | 2110 | |
a5ebca47 JA |
2111 | status = (status & D40_EVENTLINE_MASK(event)) >> |
2112 | D40_EVENTLINE_POS(event); | |
2113 | ||
2114 | if (status != D40_DMA_RUN) | |
2115 | is_paused = true; | |
a5ebca47 JA |
2116 | _exit: |
2117 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2118 | return is_paused; | |
2119 | ||
2120 | } | |
2121 | ||
8d318a50 LW |
2122 | static u32 stedma40_residue(struct dma_chan *chan) |
2123 | { | |
2124 | struct d40_chan *d40c = | |
2125 | container_of(chan, struct d40_chan, chan); | |
2126 | u32 bytes_left; | |
2127 | unsigned long flags; | |
2128 | ||
2129 | spin_lock_irqsave(&d40c->lock, flags); | |
2130 | bytes_left = d40_residue(d40c); | |
2131 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2132 | ||
2133 | return bytes_left; | |
2134 | } | |
2135 | ||
3e3a0763 RV |
2136 | static int |
2137 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, | |
2138 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | |
822c5676 RV |
2139 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2140 | dma_addr_t dst_dev_addr) | |
3e3a0763 RV |
2141 | { |
2142 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | |
2143 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | |
2144 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | |
5ed04b85 | 2145 | int ret; |
3e3a0763 | 2146 | |
5ed04b85 RV |
2147 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
2148 | src_dev_addr, | |
2149 | desc->lli_log.src, | |
2150 | chan->log_def.lcsp1, | |
2151 | src_info->data_width, | |
2152 | dst_info->data_width); | |
2153 | ||
2154 | ret = d40_log_sg_to_lli(sg_dst, sg_len, | |
2155 | dst_dev_addr, | |
2156 | desc->lli_log.dst, | |
2157 | chan->log_def.lcsp3, | |
2158 | dst_info->data_width, | |
2159 | src_info->data_width); | |
2160 | ||
2161 | return ret < 0 ? ret : 0; | |
3e3a0763 RV |
2162 | } |
2163 | ||
2164 | static int | |
2165 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | |
2166 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | |
822c5676 RV |
2167 | unsigned int sg_len, dma_addr_t src_dev_addr, |
2168 | dma_addr_t dst_dev_addr) | |
3e3a0763 | 2169 | { |
3e3a0763 RV |
2170 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
2171 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | |
2172 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | |
0c842b55 | 2173 | unsigned long flags = 0; |
3e3a0763 RV |
2174 | int ret; |
2175 | ||
0c842b55 RV |
2176 | if (desc->cyclic) |
2177 | flags |= LLI_CYCLIC | LLI_TERM_INT; | |
2178 | ||
3e3a0763 RV |
2179 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, |
2180 | desc->lli_phy.src, | |
2181 | virt_to_phys(desc->lli_phy.src), | |
2182 | chan->src_def_cfg, | |
0c842b55 | 2183 | src_info, dst_info, flags); |
3e3a0763 RV |
2184 | |
2185 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | |
2186 | desc->lli_phy.dst, | |
2187 | virt_to_phys(desc->lli_phy.dst), | |
2188 | chan->dst_def_cfg, | |
0c842b55 | 2189 | dst_info, src_info, flags); |
3e3a0763 RV |
2190 | |
2191 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | |
2192 | desc->lli_pool.size, DMA_TO_DEVICE); | |
2193 | ||
2194 | return ret < 0 ? ret : 0; | |
2195 | } | |
2196 | ||
5f81158f RV |
2197 | static struct d40_desc * |
2198 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, | |
2199 | unsigned int sg_len, unsigned long dma_flags) | |
2200 | { | |
2201 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | |
2202 | struct d40_desc *desc; | |
dbd88788 | 2203 | int ret; |
5f81158f RV |
2204 | |
2205 | desc = d40_desc_get(chan); | |
2206 | if (!desc) | |
2207 | return NULL; | |
2208 | ||
2209 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, | |
2210 | cfg->dst_info.data_width); | |
2211 | if (desc->lli_len < 0) { | |
2212 | chan_err(chan, "Unaligned size\n"); | |
dbd88788 RV |
2213 | goto err; |
2214 | } | |
5f81158f | 2215 | |
dbd88788 RV |
2216 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
2217 | if (ret < 0) { | |
2218 | chan_err(chan, "Could not allocate lli\n"); | |
2219 | goto err; | |
5f81158f RV |
2220 | } |
2221 | ||
2222 | desc->lli_current = 0; | |
2223 | desc->txd.flags = dma_flags; | |
2224 | desc->txd.tx_submit = d40_tx_submit; | |
2225 | ||
2226 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); | |
2227 | ||
2228 | return desc; | |
dbd88788 RV |
2229 | |
2230 | err: | |
2231 | d40_desc_free(chan, desc); | |
2232 | return NULL; | |
5f81158f RV |
2233 | } |
2234 | ||
cade1d30 RV |
2235 | static struct dma_async_tx_descriptor * |
2236 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |
2237 | struct scatterlist *sg_dst, unsigned int sg_len, | |
db8196df | 2238 | enum dma_transfer_direction direction, unsigned long dma_flags) |
cade1d30 RV |
2239 | { |
2240 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | |
822c5676 RV |
2241 | dma_addr_t src_dev_addr = 0; |
2242 | dma_addr_t dst_dev_addr = 0; | |
cade1d30 | 2243 | struct d40_desc *desc; |
2a614340 | 2244 | unsigned long flags; |
cade1d30 | 2245 | int ret; |
8d318a50 | 2246 | |
cade1d30 RV |
2247 | if (!chan->phy_chan) { |
2248 | chan_err(chan, "Cannot prepare unallocated channel\n"); | |
2249 | return NULL; | |
0d0f6b8b JA |
2250 | } |
2251 | ||
cade1d30 | 2252 | spin_lock_irqsave(&chan->lock, flags); |
8d318a50 | 2253 | |
cade1d30 RV |
2254 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
2255 | if (desc == NULL) | |
8d318a50 LW |
2256 | goto err; |
2257 | ||
0c842b55 RV |
2258 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
2259 | desc->cyclic = true; | |
2260 | ||
ef9c89b3 LJ |
2261 | if (direction == DMA_DEV_TO_MEM) |
2262 | src_dev_addr = chan->runtime_addr; | |
2263 | else if (direction == DMA_MEM_TO_DEV) | |
2264 | dst_dev_addr = chan->runtime_addr; | |
cade1d30 RV |
2265 | |
2266 | if (chan_is_logical(chan)) | |
2267 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | |
822c5676 | 2268 | sg_len, src_dev_addr, dst_dev_addr); |
cade1d30 RV |
2269 | else |
2270 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | |
822c5676 | 2271 | sg_len, src_dev_addr, dst_dev_addr); |
cade1d30 RV |
2272 | |
2273 | if (ret) { | |
2274 | chan_err(chan, "Failed to prepare %s sg job: %d\n", | |
2275 | chan_is_logical(chan) ? "log" : "phy", ret); | |
2276 | goto err; | |
8d318a50 LW |
2277 | } |
2278 | ||
82babbb3 PF |
2279 | /* |
2280 | * add descriptor to the prepare queue in order to be able | |
2281 | * to free them later in terminate_all | |
2282 | */ | |
2283 | list_add_tail(&desc->node, &chan->prepare_queue); | |
2284 | ||
cade1d30 RV |
2285 | spin_unlock_irqrestore(&chan->lock, flags); |
2286 | ||
2287 | return &desc->txd; | |
8d318a50 | 2288 | |
8d318a50 | 2289 | err: |
cade1d30 RV |
2290 | if (desc) |
2291 | d40_desc_free(chan, desc); | |
2292 | spin_unlock_irqrestore(&chan->lock, flags); | |
8d318a50 LW |
2293 | return NULL; |
2294 | } | |
8d318a50 LW |
2295 | |
2296 | bool stedma40_filter(struct dma_chan *chan, void *data) | |
2297 | { | |
2298 | struct stedma40_chan_cfg *info = data; | |
2299 | struct d40_chan *d40c = | |
2300 | container_of(chan, struct d40_chan, chan); | |
2301 | int err; | |
2302 | ||
2303 | if (data) { | |
2304 | err = d40_validate_conf(d40c, info); | |
2305 | if (!err) | |
2306 | d40c->dma_cfg = *info; | |
2307 | } else | |
2308 | err = d40_config_memcpy(d40c); | |
2309 | ||
ce2ca125 RV |
2310 | if (!err) |
2311 | d40c->configured = true; | |
2312 | ||
8d318a50 LW |
2313 | return err == 0; |
2314 | } | |
2315 | EXPORT_SYMBOL(stedma40_filter); | |
2316 | ||
ac2c0a38 RV |
2317 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) |
2318 | { | |
2319 | bool realtime = d40c->dma_cfg.realtime; | |
2320 | bool highprio = d40c->dma_cfg.high_priority; | |
3cb645dc | 2321 | u32 rtreg; |
ac2c0a38 RV |
2322 | u32 event = D40_TYPE_TO_EVENT(dev_type); |
2323 | u32 group = D40_TYPE_TO_GROUP(dev_type); | |
8a3b6e14 | 2324 | u32 bit = BIT(event); |
ccc3d697 | 2325 | u32 prioreg; |
3cb645dc | 2326 | struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; |
ccc3d697 | 2327 | |
3cb645dc | 2328 | rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; |
ccc3d697 RV |
2329 | /* |
2330 | * Due to a hardware bug, in some cases a logical channel triggered by | |
2331 | * a high priority destination event line can generate extra packet | |
2332 | * transactions. | |
2333 | * | |
2334 | * The workaround is to not set the high priority level for the | |
2335 | * destination event lines that trigger logical channels. | |
2336 | */ | |
2337 | if (!src && chan_is_logical(d40c)) | |
2338 | highprio = false; | |
2339 | ||
3cb645dc | 2340 | prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; |
ac2c0a38 RV |
2341 | |
2342 | /* Destination event lines are stored in the upper halfword */ | |
2343 | if (!src) | |
2344 | bit <<= 16; | |
2345 | ||
2346 | writel(bit, d40c->base->virtbase + prioreg + group * 4); | |
2347 | writel(bit, d40c->base->virtbase + rtreg + group * 4); | |
2348 | } | |
2349 | ||
2350 | static void d40_set_prio_realtime(struct d40_chan *d40c) | |
2351 | { | |
2352 | if (d40c->base->rev < 3) | |
2353 | return; | |
2354 | ||
2c2b62d5 LJ |
2355 | if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || |
2356 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) | |
26955c07 | 2357 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); |
ac2c0a38 | 2358 | |
2c2b62d5 LJ |
2359 | if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || |
2360 | (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) | |
26955c07 | 2361 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); |
ac2c0a38 RV |
2362 | } |
2363 | ||
fa332de5 LJ |
2364 | #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) |
2365 | #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) | |
2366 | #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) | |
2367 | #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) | |
bddd5a2b | 2368 | #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1) |
fa332de5 LJ |
2369 | |
2370 | static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, | |
2371 | struct of_dma *ofdma) | |
2372 | { | |
2373 | struct stedma40_chan_cfg cfg; | |
2374 | dma_cap_mask_t cap; | |
2375 | u32 flags; | |
2376 | ||
2377 | memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); | |
2378 | ||
2379 | dma_cap_zero(cap); | |
2380 | dma_cap_set(DMA_SLAVE, cap); | |
2381 | ||
2382 | cfg.dev_type = dma_spec->args[0]; | |
2383 | flags = dma_spec->args[2]; | |
2384 | ||
2385 | switch (D40_DT_FLAGS_MODE(flags)) { | |
2386 | case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; | |
2387 | case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; | |
2388 | } | |
2389 | ||
2390 | switch (D40_DT_FLAGS_DIR(flags)) { | |
2391 | case 0: | |
2c2b62d5 | 2392 | cfg.dir = DMA_MEM_TO_DEV; |
fa332de5 LJ |
2393 | cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); |
2394 | break; | |
2395 | case 1: | |
2c2b62d5 | 2396 | cfg.dir = DMA_DEV_TO_MEM; |
fa332de5 LJ |
2397 | cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); |
2398 | break; | |
2399 | } | |
2400 | ||
2401 | if (D40_DT_FLAGS_FIXED_CHAN(flags)) { | |
2402 | cfg.phy_channel = dma_spec->args[1]; | |
2403 | cfg.use_fixed_channel = true; | |
2404 | } | |
2405 | ||
bddd5a2b LJ |
2406 | if (D40_DT_FLAGS_HIGH_PRIO(flags)) |
2407 | cfg.high_priority = true; | |
2408 | ||
fa332de5 LJ |
2409 | return dma_request_channel(cap, stedma40_filter, &cfg); |
2410 | } | |
2411 | ||
8d318a50 LW |
2412 | /* DMA ENGINE functions */ |
2413 | static int d40_alloc_chan_resources(struct dma_chan *chan) | |
2414 | { | |
2415 | int err; | |
2416 | unsigned long flags; | |
2417 | struct d40_chan *d40c = | |
2418 | container_of(chan, struct d40_chan, chan); | |
ef1872ec | 2419 | bool is_free_phy; |
8d318a50 LW |
2420 | spin_lock_irqsave(&d40c->lock, flags); |
2421 | ||
d3ee98cd | 2422 | dma_cookie_init(chan); |
8d318a50 | 2423 | |
ce2ca125 RV |
2424 | /* If no dma configuration is set use default configuration (memcpy) */ |
2425 | if (!d40c->configured) { | |
8d318a50 | 2426 | err = d40_config_memcpy(d40c); |
ff0b12ba | 2427 | if (err) { |
6db5a8ba | 2428 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
ff0b12ba JA |
2429 | goto fail; |
2430 | } | |
8d318a50 LW |
2431 | } |
2432 | ||
5cd326fd | 2433 | err = d40_allocate_channel(d40c, &is_free_phy); |
8d318a50 | 2434 | if (err) { |
6db5a8ba | 2435 | chan_err(d40c, "Failed to allocate channel\n"); |
7fb3e75e | 2436 | d40c->configured = false; |
ff0b12ba | 2437 | goto fail; |
8d318a50 LW |
2438 | } |
2439 | ||
7fb3e75e | 2440 | pm_runtime_get_sync(d40c->base->dev); |
ef1872ec | 2441 | |
ac2c0a38 RV |
2442 | d40_set_prio_realtime(d40c); |
2443 | ||
724a8577 | 2444 | if (chan_is_logical(d40c)) { |
2c2b62d5 | 2445 | if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) |
ef1872ec | 2446 | d40c->lcpa = d40c->base->lcpa_base + |
26955c07 | 2447 | d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; |
ef1872ec LW |
2448 | else |
2449 | d40c->lcpa = d40c->base->lcpa_base + | |
26955c07 | 2450 | d40c->dma_cfg.dev_type * |
f26e03ad | 2451 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
9778256b LJ |
2452 | |
2453 | /* Unmask the Global Interrupt Mask. */ | |
2454 | d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); | |
2455 | d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); | |
ef1872ec LW |
2456 | } |
2457 | ||
5cd326fd N |
2458 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", |
2459 | chan_is_logical(d40c) ? "logical" : "physical", | |
2460 | d40c->phy_chan->num, | |
2461 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | |
2462 | ||
2463 | ||
ef1872ec LW |
2464 | /* |
2465 | * Only write channel configuration to the DMA if the physical | |
2466 | * resource is free. In case of multiple logical channels | |
2467 | * on the same physical resource, only the first write is necessary. | |
2468 | */ | |
b55912c6 JA |
2469 | if (is_free_phy) |
2470 | d40_config_write(d40c); | |
ff0b12ba | 2471 | fail: |
7fb3e75e N |
2472 | pm_runtime_mark_last_busy(d40c->base->dev); |
2473 | pm_runtime_put_autosuspend(d40c->base->dev); | |
8d318a50 | 2474 | spin_unlock_irqrestore(&d40c->lock, flags); |
ff0b12ba | 2475 | return err; |
8d318a50 LW |
2476 | } |
2477 | ||
2478 | static void d40_free_chan_resources(struct dma_chan *chan) | |
2479 | { | |
2480 | struct d40_chan *d40c = | |
2481 | container_of(chan, struct d40_chan, chan); | |
2482 | int err; | |
2483 | unsigned long flags; | |
2484 | ||
0d0f6b8b | 2485 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2486 | chan_err(d40c, "Cannot free unallocated channel\n"); |
0d0f6b8b JA |
2487 | return; |
2488 | } | |
2489 | ||
8d318a50 LW |
2490 | spin_lock_irqsave(&d40c->lock, flags); |
2491 | ||
2492 | err = d40_free_dma(d40c); | |
2493 | ||
2494 | if (err) | |
6db5a8ba | 2495 | chan_err(d40c, "Failed to free channel\n"); |
8d318a50 LW |
2496 | spin_unlock_irqrestore(&d40c->lock, flags); |
2497 | } | |
2498 | ||
2499 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |
2500 | dma_addr_t dst, | |
2501 | dma_addr_t src, | |
2502 | size_t size, | |
2a614340 | 2503 | unsigned long dma_flags) |
8d318a50 | 2504 | { |
95944c6e RV |
2505 | struct scatterlist dst_sg; |
2506 | struct scatterlist src_sg; | |
8d318a50 | 2507 | |
95944c6e RV |
2508 | sg_init_table(&dst_sg, 1); |
2509 | sg_init_table(&src_sg, 1); | |
8d318a50 | 2510 | |
95944c6e RV |
2511 | sg_dma_address(&dst_sg) = dst; |
2512 | sg_dma_address(&src_sg) = src; | |
8d318a50 | 2513 | |
95944c6e RV |
2514 | sg_dma_len(&dst_sg) = size; |
2515 | sg_dma_len(&src_sg) = size; | |
8d318a50 | 2516 | |
de6b641e SA |
2517 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, |
2518 | DMA_MEM_TO_MEM, dma_flags); | |
8d318a50 LW |
2519 | } |
2520 | ||
0d688662 | 2521 | static struct dma_async_tx_descriptor * |
cade1d30 RV |
2522 | d40_prep_memcpy_sg(struct dma_chan *chan, |
2523 | struct scatterlist *dst_sg, unsigned int dst_nents, | |
2524 | struct scatterlist *src_sg, unsigned int src_nents, | |
2525 | unsigned long dma_flags) | |
0d688662 IS |
2526 | { |
2527 | if (dst_nents != src_nents) | |
2528 | return NULL; | |
2529 | ||
de6b641e SA |
2530 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, |
2531 | DMA_MEM_TO_MEM, dma_flags); | |
00ac0341 RV |
2532 | } |
2533 | ||
f26e03ad FB |
2534 | static struct dma_async_tx_descriptor * |
2535 | d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
2536 | unsigned int sg_len, enum dma_transfer_direction direction, | |
2537 | unsigned long dma_flags, void *context) | |
8d318a50 | 2538 | { |
a725dcc0 | 2539 | if (!is_slave_direction(direction)) |
00ac0341 RV |
2540 | return NULL; |
2541 | ||
cade1d30 | 2542 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
8d318a50 LW |
2543 | } |
2544 | ||
0c842b55 RV |
2545 | static struct dma_async_tx_descriptor * |
2546 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |
2547 | size_t buf_len, size_t period_len, | |
31c1e5a1 | 2548 | enum dma_transfer_direction direction, unsigned long flags) |
0c842b55 RV |
2549 | { |
2550 | unsigned int periods = buf_len / period_len; | |
2551 | struct dma_async_tx_descriptor *txd; | |
2552 | struct scatterlist *sg; | |
2553 | int i; | |
2554 | ||
79ca7ec3 | 2555 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); |
2ec7e2e7 SK |
2556 | if (!sg) |
2557 | return NULL; | |
2558 | ||
0c842b55 RV |
2559 | for (i = 0; i < periods; i++) { |
2560 | sg_dma_address(&sg[i]) = dma_addr; | |
2561 | sg_dma_len(&sg[i]) = period_len; | |
2562 | dma_addr += period_len; | |
2563 | } | |
2564 | ||
2565 | sg[periods].offset = 0; | |
fdaf9c4b | 2566 | sg_dma_len(&sg[periods]) = 0; |
0c842b55 RV |
2567 | sg[periods].page_link = |
2568 | ((unsigned long)sg | 0x01) & ~0x02; | |
2569 | ||
2570 | txd = d40_prep_sg(chan, sg, sg, periods, direction, | |
2571 | DMA_PREP_INTERRUPT); | |
2572 | ||
2573 | kfree(sg); | |
2574 | ||
2575 | return txd; | |
2576 | } | |
2577 | ||
8d318a50 LW |
2578 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2579 | dma_cookie_t cookie, | |
2580 | struct dma_tx_state *txstate) | |
2581 | { | |
2582 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
96a2af41 | 2583 | enum dma_status ret; |
8d318a50 | 2584 | |
0d0f6b8b | 2585 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2586 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
0d0f6b8b JA |
2587 | return -EINVAL; |
2588 | } | |
2589 | ||
96a2af41 | 2590 | ret = dma_cookie_status(chan, cookie, txstate); |
e2360adb | 2591 | if (ret != DMA_COMPLETE) |
96a2af41 | 2592 | dma_set_residue(txstate, stedma40_residue(chan)); |
8d318a50 | 2593 | |
a5ebca47 JA |
2594 | if (d40_is_paused(d40c)) |
2595 | ret = DMA_PAUSED; | |
8d318a50 LW |
2596 | |
2597 | return ret; | |
2598 | } | |
2599 | ||
2600 | static void d40_issue_pending(struct dma_chan *chan) | |
2601 | { | |
2602 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2603 | unsigned long flags; | |
2604 | ||
0d0f6b8b | 2605 | if (d40c->phy_chan == NULL) { |
6db5a8ba | 2606 | chan_err(d40c, "Channel is not allocated!\n"); |
0d0f6b8b JA |
2607 | return; |
2608 | } | |
2609 | ||
8d318a50 LW |
2610 | spin_lock_irqsave(&d40c->lock, flags); |
2611 | ||
a8f3067b PF |
2612 | list_splice_tail_init(&d40c->pending_queue, &d40c->queue); |
2613 | ||
2614 | /* Busy means that queued jobs are already being processed */ | |
8d318a50 LW |
2615 | if (!d40c->busy) |
2616 | (void) d40_queue_start(d40c); | |
2617 | ||
2618 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2619 | } | |
2620 | ||
35e639d1 | 2621 | static int d40_terminate_all(struct dma_chan *chan) |
1bdae6f4 N |
2622 | { |
2623 | unsigned long flags; | |
2624 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2625 | int ret; | |
2626 | ||
6f5bad03 MR |
2627 | if (d40c->phy_chan == NULL) { |
2628 | chan_err(d40c, "Channel is not allocated!\n"); | |
2629 | return -EINVAL; | |
2630 | } | |
2631 | ||
1bdae6f4 N |
2632 | spin_lock_irqsave(&d40c->lock, flags); |
2633 | ||
2634 | pm_runtime_get_sync(d40c->base->dev); | |
2635 | ret = d40_channel_execute_command(d40c, D40_DMA_STOP); | |
2636 | if (ret) | |
2637 | chan_err(d40c, "Failed to stop channel\n"); | |
2638 | ||
2639 | d40_term_all(d40c); | |
2640 | pm_runtime_mark_last_busy(d40c->base->dev); | |
2641 | pm_runtime_put_autosuspend(d40c->base->dev); | |
2642 | if (d40c->busy) { | |
2643 | pm_runtime_mark_last_busy(d40c->base->dev); | |
2644 | pm_runtime_put_autosuspend(d40c->base->dev); | |
2645 | } | |
2646 | d40c->busy = false; | |
2647 | ||
2648 | spin_unlock_irqrestore(&d40c->lock, flags); | |
35e639d1 | 2649 | return 0; |
1bdae6f4 N |
2650 | } |
2651 | ||
98ca5289 RV |
2652 | static int |
2653 | dma40_config_to_halfchannel(struct d40_chan *d40c, | |
2654 | struct stedma40_half_channel_info *info, | |
98ca5289 RV |
2655 | u32 maxburst) |
2656 | { | |
98ca5289 RV |
2657 | int psize; |
2658 | ||
98ca5289 RV |
2659 | if (chan_is_logical(d40c)) { |
2660 | if (maxburst >= 16) | |
2661 | psize = STEDMA40_PSIZE_LOG_16; | |
2662 | else if (maxburst >= 8) | |
2663 | psize = STEDMA40_PSIZE_LOG_8; | |
2664 | else if (maxburst >= 4) | |
2665 | psize = STEDMA40_PSIZE_LOG_4; | |
2666 | else | |
2667 | psize = STEDMA40_PSIZE_LOG_1; | |
2668 | } else { | |
2669 | if (maxburst >= 16) | |
2670 | psize = STEDMA40_PSIZE_PHY_16; | |
2671 | else if (maxburst >= 8) | |
2672 | psize = STEDMA40_PSIZE_PHY_8; | |
2673 | else if (maxburst >= 4) | |
2674 | psize = STEDMA40_PSIZE_PHY_4; | |
2675 | else | |
2676 | psize = STEDMA40_PSIZE_PHY_1; | |
2677 | } | |
2678 | ||
98ca5289 RV |
2679 | info->psize = psize; |
2680 | info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; | |
2681 | ||
2682 | return 0; | |
2683 | } | |
2684 | ||
95e1400f | 2685 | /* Runtime reconfiguration extension */ |
98ca5289 RV |
2686 | static int d40_set_runtime_config(struct dma_chan *chan, |
2687 | struct dma_slave_config *config) | |
95e1400f LW |
2688 | { |
2689 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2690 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | |
98ca5289 | 2691 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
95e1400f | 2692 | dma_addr_t config_addr; |
98ca5289 RV |
2693 | u32 src_maxburst, dst_maxburst; |
2694 | int ret; | |
2695 | ||
6f5bad03 MR |
2696 | if (d40c->phy_chan == NULL) { |
2697 | chan_err(d40c, "Channel is not allocated!\n"); | |
2698 | return -EINVAL; | |
2699 | } | |
2700 | ||
98ca5289 RV |
2701 | src_addr_width = config->src_addr_width; |
2702 | src_maxburst = config->src_maxburst; | |
2703 | dst_addr_width = config->dst_addr_width; | |
2704 | dst_maxburst = config->dst_maxburst; | |
95e1400f | 2705 | |
db8196df | 2706 | if (config->direction == DMA_DEV_TO_MEM) { |
95e1400f | 2707 | config_addr = config->src_addr; |
ef9c89b3 | 2708 | |
2c2b62d5 | 2709 | if (cfg->dir != DMA_DEV_TO_MEM) |
95e1400f LW |
2710 | dev_dbg(d40c->base->dev, |
2711 | "channel was not configured for peripheral " | |
2712 | "to memory transfer (%d) overriding\n", | |
2713 | cfg->dir); | |
2c2b62d5 | 2714 | cfg->dir = DMA_DEV_TO_MEM; |
95e1400f | 2715 | |
98ca5289 RV |
2716 | /* Configure the memory side */ |
2717 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
2718 | dst_addr_width = src_addr_width; | |
2719 | if (dst_maxburst == 0) | |
2720 | dst_maxburst = src_maxburst; | |
95e1400f | 2721 | |
db8196df | 2722 | } else if (config->direction == DMA_MEM_TO_DEV) { |
95e1400f | 2723 | config_addr = config->dst_addr; |
ef9c89b3 | 2724 | |
2c2b62d5 | 2725 | if (cfg->dir != DMA_MEM_TO_DEV) |
95e1400f LW |
2726 | dev_dbg(d40c->base->dev, |
2727 | "channel was not configured for memory " | |
2728 | "to peripheral transfer (%d) overriding\n", | |
2729 | cfg->dir); | |
2c2b62d5 | 2730 | cfg->dir = DMA_MEM_TO_DEV; |
95e1400f | 2731 | |
98ca5289 RV |
2732 | /* Configure the memory side */ |
2733 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
2734 | src_addr_width = dst_addr_width; | |
2735 | if (src_maxburst == 0) | |
2736 | src_maxburst = dst_maxburst; | |
95e1400f LW |
2737 | } else { |
2738 | dev_err(d40c->base->dev, | |
2739 | "unrecognized channel direction %d\n", | |
2740 | config->direction); | |
98ca5289 | 2741 | return -EINVAL; |
95e1400f LW |
2742 | } |
2743 | ||
ef9c89b3 LJ |
2744 | if (config_addr <= 0) { |
2745 | dev_err(d40c->base->dev, "no address supplied\n"); | |
2746 | return -EINVAL; | |
2747 | } | |
2748 | ||
98ca5289 | 2749 | if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { |
95e1400f | 2750 | dev_err(d40c->base->dev, |
98ca5289 RV |
2751 | "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", |
2752 | src_maxburst, | |
2753 | src_addr_width, | |
2754 | dst_maxburst, | |
2755 | dst_addr_width); | |
2756 | return -EINVAL; | |
95e1400f LW |
2757 | } |
2758 | ||
92bb6cdb PF |
2759 | if (src_maxburst > 16) { |
2760 | src_maxburst = 16; | |
2761 | dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; | |
2762 | } else if (dst_maxburst > 16) { | |
2763 | dst_maxburst = 16; | |
2764 | src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; | |
2765 | } | |
2766 | ||
43f2e1a3 LJ |
2767 | /* Only valid widths are; 1, 2, 4 and 8. */ |
2768 | if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || | |
2769 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | |
2770 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || | |
2771 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | |
c95905a6 GL |
2772 | !is_power_of_2(src_addr_width) || |
2773 | !is_power_of_2(dst_addr_width)) | |
43f2e1a3 LJ |
2774 | return -EINVAL; |
2775 | ||
2776 | cfg->src_info.data_width = src_addr_width; | |
2777 | cfg->dst_info.data_width = dst_addr_width; | |
2778 | ||
98ca5289 | 2779 | ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, |
98ca5289 RV |
2780 | src_maxburst); |
2781 | if (ret) | |
2782 | return ret; | |
95e1400f | 2783 | |
98ca5289 | 2784 | ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, |
98ca5289 RV |
2785 | dst_maxburst); |
2786 | if (ret) | |
2787 | return ret; | |
95e1400f | 2788 | |
a59670a4 | 2789 | /* Fill in register values */ |
724a8577 | 2790 | if (chan_is_logical(d40c)) |
a59670a4 PF |
2791 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
2792 | else | |
57e65ad7 | 2793 | d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); |
a59670a4 | 2794 | |
95e1400f LW |
2795 | /* These settings will take precedence later */ |
2796 | d40c->runtime_addr = config_addr; | |
2797 | d40c->runtime_direction = config->direction; | |
2798 | dev_dbg(d40c->base->dev, | |
98ca5289 RV |
2799 | "configured channel %s for %s, data width %d/%d, " |
2800 | "maxburst %d/%d elements, LE, no flow control\n", | |
95e1400f | 2801 | dma_chan_name(chan), |
db8196df | 2802 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
98ca5289 RV |
2803 | src_addr_width, dst_addr_width, |
2804 | src_maxburst, dst_maxburst); | |
2805 | ||
2806 | return 0; | |
95e1400f LW |
2807 | } |
2808 | ||
8d318a50 LW |
2809 | /* Initialization functions */ |
2810 | ||
2811 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |
2812 | struct d40_chan *chans, int offset, | |
2813 | int num_chans) | |
2814 | { | |
2815 | int i = 0; | |
2816 | struct d40_chan *d40c; | |
2817 | ||
2818 | INIT_LIST_HEAD(&dma->channels); | |
2819 | ||
2820 | for (i = offset; i < offset + num_chans; i++) { | |
2821 | d40c = &chans[i]; | |
2822 | d40c->base = base; | |
2823 | d40c->chan.device = dma; | |
2824 | ||
8d318a50 LW |
2825 | spin_lock_init(&d40c->lock); |
2826 | ||
2827 | d40c->log_num = D40_PHY_CHAN; | |
2828 | ||
4226dd86 | 2829 | INIT_LIST_HEAD(&d40c->done); |
8d318a50 LW |
2830 | INIT_LIST_HEAD(&d40c->active); |
2831 | INIT_LIST_HEAD(&d40c->queue); | |
a8f3067b | 2832 | INIT_LIST_HEAD(&d40c->pending_queue); |
8d318a50 | 2833 | INIT_LIST_HEAD(&d40c->client); |
82babbb3 | 2834 | INIT_LIST_HEAD(&d40c->prepare_queue); |
8d318a50 | 2835 | |
8d318a50 LW |
2836 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2837 | (unsigned long) d40c); | |
2838 | ||
2839 | list_add_tail(&d40c->chan.device_node, | |
2840 | &dma->channels); | |
2841 | } | |
2842 | } | |
2843 | ||
7ad74a7c RV |
2844 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) |
2845 | { | |
2846 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | |
2847 | dev->device_prep_slave_sg = d40_prep_slave_sg; | |
2848 | ||
2849 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | |
2850 | dev->device_prep_dma_memcpy = d40_prep_memcpy; | |
2851 | ||
2852 | /* | |
2853 | * This controller can only access address at even | |
2854 | * 32bit boundaries, i.e. 2^2 | |
2855 | */ | |
77a68e56 | 2856 | dev->copy_align = DMAENGINE_ALIGN_4_BYTES; |
7ad74a7c RV |
2857 | } |
2858 | ||
2859 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | |
2860 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | |
2861 | ||
0c842b55 RV |
2862 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) |
2863 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | |
2864 | ||
7ad74a7c RV |
2865 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; |
2866 | dev->device_free_chan_resources = d40_free_chan_resources; | |
2867 | dev->device_issue_pending = d40_issue_pending; | |
2868 | dev->device_tx_status = d40_tx_status; | |
6f5bad03 MR |
2869 | dev->device_config = d40_set_runtime_config; |
2870 | dev->device_pause = d40_pause; | |
2871 | dev->device_resume = d40_resume; | |
2872 | dev->device_terminate_all = d40_terminate_all; | |
7ad74a7c RV |
2873 | dev->dev = base->dev; |
2874 | } | |
2875 | ||
8d318a50 LW |
2876 | static int __init d40_dmaengine_init(struct d40_base *base, |
2877 | int num_reserved_chans) | |
2878 | { | |
2879 | int err ; | |
2880 | ||
2881 | d40_chan_init(base, &base->dma_slave, base->log_chans, | |
2882 | 0, base->num_log_chans); | |
2883 | ||
2884 | dma_cap_zero(base->dma_slave.cap_mask); | |
2885 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | |
0c842b55 | 2886 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
8d318a50 | 2887 | |
7ad74a7c | 2888 | d40_ops_init(base, &base->dma_slave); |
8d318a50 LW |
2889 | |
2890 | err = dma_async_device_register(&base->dma_slave); | |
2891 | ||
2892 | if (err) { | |
6db5a8ba | 2893 | d40_err(base->dev, "Failed to register slave channels\n"); |
8d318a50 LW |
2894 | goto failure1; |
2895 | } | |
2896 | ||
2897 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | |
a7dacb68 | 2898 | base->num_log_chans, base->num_memcpy_chans); |
8d318a50 LW |
2899 | |
2900 | dma_cap_zero(base->dma_memcpy.cap_mask); | |
2901 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | |
7ad74a7c RV |
2902 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); |
2903 | ||
2904 | d40_ops_init(base, &base->dma_memcpy); | |
8d318a50 LW |
2905 | |
2906 | err = dma_async_device_register(&base->dma_memcpy); | |
2907 | ||
2908 | if (err) { | |
6db5a8ba | 2909 | d40_err(base->dev, |
52984aab | 2910 | "Failed to register memcpy only channels\n"); |
8d318a50 LW |
2911 | goto failure2; |
2912 | } | |
2913 | ||
2914 | d40_chan_init(base, &base->dma_both, base->phy_chans, | |
2915 | 0, num_reserved_chans); | |
2916 | ||
2917 | dma_cap_zero(base->dma_both.cap_mask); | |
2918 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | |
2919 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | |
7ad74a7c | 2920 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); |
0c842b55 | 2921 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); |
7ad74a7c RV |
2922 | |
2923 | d40_ops_init(base, &base->dma_both); | |
8d318a50 LW |
2924 | err = dma_async_device_register(&base->dma_both); |
2925 | ||
2926 | if (err) { | |
6db5a8ba RV |
2927 | d40_err(base->dev, |
2928 | "Failed to register logical and physical capable channels\n"); | |
8d318a50 LW |
2929 | goto failure3; |
2930 | } | |
2931 | return 0; | |
2932 | failure3: | |
2933 | dma_async_device_unregister(&base->dma_memcpy); | |
2934 | failure2: | |
2935 | dma_async_device_unregister(&base->dma_slave); | |
2936 | failure1: | |
2937 | return err; | |
2938 | } | |
2939 | ||
7fb3e75e | 2940 | /* Suspend resume functionality */ |
123e4ca1 UH |
2941 | #ifdef CONFIG_PM_SLEEP |
2942 | static int dma40_suspend(struct device *dev) | |
7fb3e75e | 2943 | { |
28c7a19d N |
2944 | struct platform_device *pdev = to_platform_device(dev); |
2945 | struct d40_base *base = platform_get_drvdata(pdev); | |
c906a3ec UH |
2946 | int ret; |
2947 | ||
2948 | ret = pm_runtime_force_suspend(dev); | |
2949 | if (ret) | |
2950 | return ret; | |
7fb3e75e | 2951 | |
28c7a19d N |
2952 | if (base->lcpa_regulator) |
2953 | ret = regulator_disable(base->lcpa_regulator); | |
2954 | return ret; | |
7fb3e75e N |
2955 | } |
2956 | ||
123e4ca1 UH |
2957 | static int dma40_resume(struct device *dev) |
2958 | { | |
2959 | struct platform_device *pdev = to_platform_device(dev); | |
2960 | struct d40_base *base = platform_get_drvdata(pdev); | |
2961 | int ret = 0; | |
2962 | ||
c906a3ec | 2963 | if (base->lcpa_regulator) { |
123e4ca1 | 2964 | ret = regulator_enable(base->lcpa_regulator); |
c906a3ec UH |
2965 | if (ret) |
2966 | return ret; | |
2967 | } | |
123e4ca1 | 2968 | |
c906a3ec | 2969 | return pm_runtime_force_resume(dev); |
123e4ca1 UH |
2970 | } |
2971 | #endif | |
2972 | ||
2973 | #ifdef CONFIG_PM | |
2974 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | |
2975 | u32 *regaddr, int num, bool save) | |
2976 | { | |
2977 | int i; | |
2978 | ||
2979 | for (i = 0; i < num; i++) { | |
2980 | void __iomem *addr = baseaddr + regaddr[i]; | |
2981 | ||
2982 | if (save) | |
2983 | backup[i] = readl_relaxed(addr); | |
2984 | else | |
2985 | writel_relaxed(backup[i], addr); | |
2986 | } | |
2987 | } | |
2988 | ||
2989 | static void d40_save_restore_registers(struct d40_base *base, bool save) | |
2990 | { | |
2991 | int i; | |
2992 | ||
2993 | /* Save/Restore channel specific registers */ | |
2994 | for (i = 0; i < base->num_phy_chans; i++) { | |
2995 | void __iomem *addr; | |
2996 | int idx; | |
2997 | ||
2998 | if (base->phy_res[i].reserved) | |
2999 | continue; | |
3000 | ||
3001 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | |
3002 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | |
3003 | ||
3004 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | |
3005 | d40_backup_regs_chan, | |
3006 | ARRAY_SIZE(d40_backup_regs_chan), | |
3007 | save); | |
3008 | } | |
3009 | ||
3010 | /* Save/Restore global registers */ | |
3011 | dma40_backup(base->virtbase, base->reg_val_backup, | |
3012 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | |
3013 | save); | |
3014 | ||
3015 | /* Save/Restore registers only existing on dma40 v3 and later */ | |
3016 | if (base->gen_dmac.backup) | |
3017 | dma40_backup(base->virtbase, base->reg_val_backup_v4, | |
3018 | base->gen_dmac.backup, | |
3019 | base->gen_dmac.backup_size, | |
3020 | save); | |
3021 | } | |
3022 | ||
7fb3e75e N |
3023 | static int dma40_runtime_suspend(struct device *dev) |
3024 | { | |
3025 | struct platform_device *pdev = to_platform_device(dev); | |
3026 | struct d40_base *base = platform_get_drvdata(pdev); | |
3027 | ||
3028 | d40_save_restore_registers(base, true); | |
3029 | ||
3030 | /* Don't disable/enable clocks for v1 due to HW bugs */ | |
3031 | if (base->rev != 1) | |
3032 | writel_relaxed(base->gcc_pwr_off_mask, | |
3033 | base->virtbase + D40_DREG_GCC); | |
3034 | ||
3035 | return 0; | |
3036 | } | |
3037 | ||
3038 | static int dma40_runtime_resume(struct device *dev) | |
3039 | { | |
3040 | struct platform_device *pdev = to_platform_device(dev); | |
3041 | struct d40_base *base = platform_get_drvdata(pdev); | |
3042 | ||
2dafca17 | 3043 | d40_save_restore_registers(base, false); |
7fb3e75e N |
3044 | |
3045 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | |
3046 | base->virtbase + D40_DREG_GCC); | |
3047 | return 0; | |
3048 | } | |
123e4ca1 | 3049 | #endif |
7fb3e75e N |
3050 | |
3051 | static const struct dev_pm_ops dma40_pm_ops = { | |
673d3773 | 3052 | SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) |
6ed23b80 | 3053 | SET_RUNTIME_PM_OPS(dma40_runtime_suspend, |
123e4ca1 UH |
3054 | dma40_runtime_resume, |
3055 | NULL) | |
7fb3e75e | 3056 | }; |
7fb3e75e | 3057 | |
8d318a50 LW |
3058 | /* Initialization functions. */ |
3059 | ||
3060 | static int __init d40_phy_res_init(struct d40_base *base) | |
3061 | { | |
3062 | int i; | |
3063 | int num_phy_chans_avail = 0; | |
3064 | u32 val[2]; | |
3065 | int odd_even_bit = -2; | |
7fb3e75e | 3066 | int gcc = D40_DREG_GCC_ENA; |
8d318a50 LW |
3067 | |
3068 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | |
3069 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | |
3070 | ||
3071 | for (i = 0; i < base->num_phy_chans; i++) { | |
3072 | base->phy_res[i].num = i; | |
3073 | odd_even_bit += 2 * ((i % 2) == 0); | |
3074 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | |
3075 | /* Mark security only channels as occupied */ | |
3076 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | |
3077 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | |
7fb3e75e N |
3078 | base->phy_res[i].reserved = true; |
3079 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | |
3080 | D40_DREG_GCC_SRC); | |
3081 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | |
3082 | D40_DREG_GCC_DST); | |
3083 | ||
3084 | ||
8d318a50 LW |
3085 | } else { |
3086 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | |
3087 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | |
7fb3e75e | 3088 | base->phy_res[i].reserved = false; |
8d318a50 LW |
3089 | num_phy_chans_avail++; |
3090 | } | |
3091 | spin_lock_init(&base->phy_res[i].lock); | |
3092 | } | |
6b7acd84 JA |
3093 | |
3094 | /* Mark disabled channels as occupied */ | |
3095 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | |
f57b407c RV |
3096 | int chan = base->plat_data->disabled_channels[i]; |
3097 | ||
3098 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | |
3099 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | |
7fb3e75e N |
3100 | base->phy_res[chan].reserved = true; |
3101 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | |
3102 | D40_DREG_GCC_SRC); | |
3103 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | |
3104 | D40_DREG_GCC_DST); | |
f57b407c | 3105 | num_phy_chans_avail--; |
6b7acd84 JA |
3106 | } |
3107 | ||
7407048b FB |
3108 | /* Mark soft_lli channels */ |
3109 | for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { | |
3110 | int chan = base->plat_data->soft_lli_chans[i]; | |
3111 | ||
3112 | base->phy_res[chan].use_soft_lli = true; | |
3113 | } | |
3114 | ||
8d318a50 LW |
3115 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
3116 | num_phy_chans_avail, base->num_phy_chans); | |
3117 | ||
3118 | /* Verify settings extended vs standard */ | |
3119 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); | |
3120 | ||
3121 | for (i = 0; i < base->num_phy_chans; i++) { | |
3122 | ||
3123 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | |
3124 | (val[0] & 0x3) != 1) | |
3125 | dev_info(base->dev, | |
3126 | "[%s] INFO: channel %d is misconfigured (%d)\n", | |
3127 | __func__, i, val[0] & 0x3); | |
3128 | ||
3129 | val[0] = val[0] >> 2; | |
3130 | } | |
3131 | ||
7fb3e75e N |
3132 | /* |
3133 | * To keep things simple, Enable all clocks initially. | |
3134 | * The clocks will get managed later post channel allocation. | |
3135 | * The clocks for the event lines on which reserved channels exists | |
3136 | * are not managed here. | |
3137 | */ | |
3138 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | |
3139 | base->gcc_pwr_off_mask = gcc; | |
3140 | ||
8d318a50 LW |
3141 | return num_phy_chans_avail; |
3142 | } | |
3143 | ||
3144 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |
3145 | { | |
d4adcc01 | 3146 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
8d318a50 LW |
3147 | struct clk *clk = NULL; |
3148 | void __iomem *virtbase = NULL; | |
3149 | struct resource *res = NULL; | |
3150 | struct d40_base *base = NULL; | |
3151 | int num_log_chans = 0; | |
3152 | int num_phy_chans; | |
a7dacb68 | 3153 | int num_memcpy_chans; |
b707c658 | 3154 | int clk_ret = -EINVAL; |
8d318a50 | 3155 | int i; |
f4b89764 LW |
3156 | u32 pid; |
3157 | u32 cid; | |
3158 | u8 rev; | |
8d318a50 LW |
3159 | |
3160 | clk = clk_get(&pdev->dev, NULL); | |
8d318a50 | 3161 | if (IS_ERR(clk)) { |
6db5a8ba | 3162 | d40_err(&pdev->dev, "No matching clock found\n"); |
8d318a50 LW |
3163 | goto failure; |
3164 | } | |
3165 | ||
b707c658 UH |
3166 | clk_ret = clk_prepare_enable(clk); |
3167 | if (clk_ret) { | |
3168 | d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); | |
3169 | goto failure; | |
3170 | } | |
8d318a50 LW |
3171 | |
3172 | /* Get IO for DMAC base address */ | |
3173 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | |
3174 | if (!res) | |
3175 | goto failure; | |
3176 | ||
3177 | if (request_mem_region(res->start, resource_size(res), | |
3178 | D40_NAME " I/O base") == NULL) | |
3179 | goto failure; | |
3180 | ||
3181 | virtbase = ioremap(res->start, resource_size(res)); | |
3182 | if (!virtbase) | |
3183 | goto failure; | |
3184 | ||
f4b89764 LW |
3185 | /* This is just a regular AMBA PrimeCell ID actually */ |
3186 | for (pid = 0, i = 0; i < 4; i++) | |
3187 | pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) | |
3188 | & 255) << (i * 8); | |
3189 | for (cid = 0, i = 0; i < 4; i++) | |
3190 | cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) | |
3191 | & 255) << (i * 8); | |
8d318a50 | 3192 | |
f4b89764 LW |
3193 | if (cid != AMBA_CID) { |
3194 | d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); | |
3195 | goto failure; | |
3196 | } | |
3197 | if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { | |
6db5a8ba | 3198 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
f4b89764 LW |
3199 | AMBA_MANF_BITS(pid), |
3200 | AMBA_VENDOR_ST); | |
8d318a50 LW |
3201 | goto failure; |
3202 | } | |
f4b89764 LW |
3203 | /* |
3204 | * HW revision: | |
3205 | * DB8500ed has revision 0 | |
3206 | * ? has revision 1 | |
3207 | * DB8500v1 has revision 2 | |
3208 | * DB8500v2 has revision 3 | |
47db92f4 GB |
3209 | * AP9540v1 has revision 4 |
3210 | * DB8540v1 has revision 4 | |
f4b89764 LW |
3211 | */ |
3212 | rev = AMBA_REV_BITS(pid); | |
8b2fe9b6 LJ |
3213 | if (rev < 2) { |
3214 | d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); | |
3215 | goto failure; | |
3216 | } | |
3ae0267f | 3217 | |
8d318a50 | 3218 | /* The number of physical channels on this HW */ |
47db92f4 GB |
3219 | if (plat_data->num_of_phy_chans) |
3220 | num_phy_chans = plat_data->num_of_phy_chans; | |
3221 | else | |
3222 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | |
8d318a50 | 3223 | |
a7dacb68 LJ |
3224 | /* The number of channels used for memcpy */ |
3225 | if (plat_data->num_of_memcpy_chans) | |
3226 | num_memcpy_chans = plat_data->num_of_memcpy_chans; | |
3227 | else | |
3228 | num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); | |
3229 | ||
db72da92 LJ |
3230 | num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; |
3231 | ||
b2abb249 | 3232 | dev_info(&pdev->dev, |
3a919d5b FE |
3233 | "hardware rev: %d @ %pa with %d physical and %d logical channels\n", |
3234 | rev, &res->start, num_phy_chans, num_log_chans); | |
8d318a50 | 3235 | |
8d318a50 | 3236 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + |
a7dacb68 | 3237 | (num_phy_chans + num_log_chans + num_memcpy_chans) * |
8d318a50 LW |
3238 | sizeof(struct d40_chan), GFP_KERNEL); |
3239 | ||
3240 | if (base == NULL) { | |
6db5a8ba | 3241 | d40_err(&pdev->dev, "Out of memory\n"); |
8d318a50 LW |
3242 | goto failure; |
3243 | } | |
3244 | ||
3ae0267f | 3245 | base->rev = rev; |
8d318a50 | 3246 | base->clk = clk; |
a7dacb68 | 3247 | base->num_memcpy_chans = num_memcpy_chans; |
8d318a50 LW |
3248 | base->num_phy_chans = num_phy_chans; |
3249 | base->num_log_chans = num_log_chans; | |
3250 | base->phy_start = res->start; | |
3251 | base->phy_size = resource_size(res); | |
3252 | base->virtbase = virtbase; | |
3253 | base->plat_data = plat_data; | |
3254 | base->dev = &pdev->dev; | |
3255 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | |
3256 | base->log_chans = &base->phy_chans[num_phy_chans]; | |
3257 | ||
3cb645dc TL |
3258 | if (base->plat_data->num_of_phy_chans == 14) { |
3259 | base->gen_dmac.backup = d40_backup_regs_v4b; | |
3260 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; | |
3261 | base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; | |
3262 | base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; | |
3263 | base->gen_dmac.realtime_en = D40_DREG_CRSEG1; | |
3264 | base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; | |
3265 | base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; | |
3266 | base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; | |
3267 | base->gen_dmac.il = il_v4b; | |
3268 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); | |
3269 | base->gen_dmac.init_reg = dma_init_reg_v4b; | |
3270 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); | |
3271 | } else { | |
3272 | if (base->rev >= 3) { | |
3273 | base->gen_dmac.backup = d40_backup_regs_v4a; | |
3274 | base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; | |
3275 | } | |
3276 | base->gen_dmac.interrupt_en = D40_DREG_PCMIS; | |
3277 | base->gen_dmac.interrupt_clear = D40_DREG_PCICR; | |
3278 | base->gen_dmac.realtime_en = D40_DREG_RSEG1; | |
3279 | base->gen_dmac.realtime_clear = D40_DREG_RCEG1; | |
3280 | base->gen_dmac.high_prio_en = D40_DREG_PSEG1; | |
3281 | base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; | |
3282 | base->gen_dmac.il = il_v4a; | |
3283 | base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); | |
3284 | base->gen_dmac.init_reg = dma_init_reg_v4a; | |
3285 | base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); | |
3286 | } | |
3287 | ||
8d318a50 LW |
3288 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), |
3289 | GFP_KERNEL); | |
3290 | if (!base->phy_res) | |
3291 | goto failure; | |
3292 | ||
3293 | base->lookup_phy_chans = kzalloc(num_phy_chans * | |
3294 | sizeof(struct d40_chan *), | |
3295 | GFP_KERNEL); | |
3296 | if (!base->lookup_phy_chans) | |
3297 | goto failure; | |
3298 | ||
8a59fed3 LJ |
3299 | base->lookup_log_chans = kzalloc(num_log_chans * |
3300 | sizeof(struct d40_chan *), | |
3301 | GFP_KERNEL); | |
3302 | if (!base->lookup_log_chans) | |
3303 | goto failure; | |
698e4732 | 3304 | |
7fb3e75e N |
3305 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
3306 | sizeof(d40_backup_regs_chan), | |
8d318a50 | 3307 | GFP_KERNEL); |
7fb3e75e N |
3308 | if (!base->reg_val_backup_chan) |
3309 | goto failure; | |
3310 | ||
3311 | base->lcla_pool.alloc_map = | |
3312 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | |
3313 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | |
8d318a50 LW |
3314 | if (!base->lcla_pool.alloc_map) |
3315 | goto failure; | |
3316 | ||
c675b1b4 JA |
3317 | base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), |
3318 | 0, SLAB_HWCACHE_ALIGN, | |
3319 | NULL); | |
3320 | if (base->desc_slab == NULL) | |
3321 | goto failure; | |
3322 | ||
8d318a50 LW |
3323 | return base; |
3324 | ||
3325 | failure: | |
b707c658 UH |
3326 | if (!clk_ret) |
3327 | clk_disable_unprepare(clk); | |
3328 | if (!IS_ERR(clk)) | |
8d318a50 | 3329 | clk_put(clk); |
8d318a50 LW |
3330 | if (virtbase) |
3331 | iounmap(virtbase); | |
3332 | if (res) | |
3333 | release_mem_region(res->start, | |
3334 | resource_size(res)); | |
3335 | if (virtbase) | |
3336 | iounmap(virtbase); | |
3337 | ||
3338 | if (base) { | |
3339 | kfree(base->lcla_pool.alloc_map); | |
1bdae6f4 | 3340 | kfree(base->reg_val_backup_chan); |
8d318a50 LW |
3341 | kfree(base->lookup_log_chans); |
3342 | kfree(base->lookup_phy_chans); | |
3343 | kfree(base->phy_res); | |
3344 | kfree(base); | |
3345 | } | |
3346 | ||
3347 | return NULL; | |
3348 | } | |
3349 | ||
3350 | static void __init d40_hw_init(struct d40_base *base) | |
3351 | { | |
3352 | ||
8d318a50 LW |
3353 | int i; |
3354 | u32 prmseo[2] = {0, 0}; | |
3355 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | |
3356 | u32 pcmis = 0; | |
3357 | u32 pcicr = 0; | |
3cb645dc TL |
3358 | struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; |
3359 | u32 reg_size = base->gen_dmac.init_reg_size; | |
8d318a50 | 3360 | |
3cb645dc | 3361 | for (i = 0; i < reg_size; i++) |
8d318a50 LW |
3362 | writel(dma_init_reg[i].val, |
3363 | base->virtbase + dma_init_reg[i].reg); | |
3364 | ||
3365 | /* Configure all our dma channels to default settings */ | |
3366 | for (i = 0; i < base->num_phy_chans; i++) { | |
3367 | ||
3368 | activeo[i % 2] = activeo[i % 2] << 2; | |
3369 | ||
3370 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | |
3371 | == D40_ALLOC_PHY) { | |
3372 | activeo[i % 2] |= 3; | |
3373 | continue; | |
3374 | } | |
3375 | ||
3376 | /* Enable interrupt # */ | |
3377 | pcmis = (pcmis << 1) | 1; | |
3378 | ||
3379 | /* Clear interrupt # */ | |
3380 | pcicr = (pcicr << 1) | 1; | |
3381 | ||
3382 | /* Set channel to physical mode */ | |
3383 | prmseo[i % 2] = prmseo[i % 2] << 2; | |
3384 | prmseo[i % 2] |= 1; | |
3385 | ||
3386 | } | |
3387 | ||
3388 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | |
3389 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | |
3390 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | |
3391 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | |
3392 | ||
3393 | /* Write which interrupt to enable */ | |
3cb645dc | 3394 | writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); |
8d318a50 LW |
3395 | |
3396 | /* Write which interrupt to clear */ | |
3cb645dc | 3397 | writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); |
8d318a50 | 3398 | |
3cb645dc TL |
3399 | /* These are __initdata and cannot be accessed after init */ |
3400 | base->gen_dmac.init_reg = NULL; | |
3401 | base->gen_dmac.init_reg_size = 0; | |
8d318a50 LW |
3402 | } |
3403 | ||
508849ad LW |
3404 | static int __init d40_lcla_allocate(struct d40_base *base) |
3405 | { | |
026cbc42 | 3406 | struct d40_lcla_pool *pool = &base->lcla_pool; |
508849ad LW |
3407 | unsigned long *page_list; |
3408 | int i, j; | |
3409 | int ret = 0; | |
3410 | ||
3411 | /* | |
3412 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | |
3413 | * To full fill this hardware requirement without wasting 256 kb | |
3414 | * we allocate pages until we get an aligned one. | |
3415 | */ | |
3416 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | |
3417 | GFP_KERNEL); | |
3418 | ||
3419 | if (!page_list) { | |
3420 | ret = -ENOMEM; | |
3421 | goto failure; | |
3422 | } | |
3423 | ||
3424 | /* Calculating how many pages that are required */ | |
3425 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | |
3426 | ||
3427 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | |
3428 | page_list[i] = __get_free_pages(GFP_KERNEL, | |
3429 | base->lcla_pool.pages); | |
3430 | if (!page_list[i]) { | |
3431 | ||
6db5a8ba RV |
3432 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
3433 | base->lcla_pool.pages); | |
39375334 | 3434 | ret = -ENOMEM; |
508849ad LW |
3435 | |
3436 | for (j = 0; j < i; j++) | |
3437 | free_pages(page_list[j], base->lcla_pool.pages); | |
3438 | goto failure; | |
3439 | } | |
3440 | ||
3441 | if ((virt_to_phys((void *)page_list[i]) & | |
3442 | (LCLA_ALIGNMENT - 1)) == 0) | |
3443 | break; | |
3444 | } | |
3445 | ||
3446 | for (j = 0; j < i; j++) | |
3447 | free_pages(page_list[j], base->lcla_pool.pages); | |
3448 | ||
3449 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | |
3450 | base->lcla_pool.base = (void *)page_list[i]; | |
3451 | } else { | |
767a9675 JA |
3452 | /* |
3453 | * After many attempts and no succees with finding the correct | |
3454 | * alignment, try with allocating a big buffer. | |
3455 | */ | |
508849ad LW |
3456 | dev_warn(base->dev, |
3457 | "[%s] Failed to get %d pages @ 18 bit align.\n", | |
3458 | __func__, base->lcla_pool.pages); | |
3459 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | |
3460 | base->num_phy_chans + | |
3461 | LCLA_ALIGNMENT, | |
3462 | GFP_KERNEL); | |
3463 | if (!base->lcla_pool.base_unaligned) { | |
3464 | ret = -ENOMEM; | |
3465 | goto failure; | |
3466 | } | |
3467 | ||
3468 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | |
3469 | LCLA_ALIGNMENT); | |
3470 | } | |
3471 | ||
026cbc42 RV |
3472 | pool->dma_addr = dma_map_single(base->dev, pool->base, |
3473 | SZ_1K * base->num_phy_chans, | |
3474 | DMA_TO_DEVICE); | |
3475 | if (dma_mapping_error(base->dev, pool->dma_addr)) { | |
3476 | pool->dma_addr = 0; | |
3477 | ret = -ENOMEM; | |
3478 | goto failure; | |
3479 | } | |
3480 | ||
508849ad LW |
3481 | writel(virt_to_phys(base->lcla_pool.base), |
3482 | base->virtbase + D40_DREG_LCLA); | |
3483 | failure: | |
3484 | kfree(page_list); | |
3485 | return ret; | |
3486 | } | |
3487 | ||
1814a170 LJ |
3488 | static int __init d40_of_probe(struct platform_device *pdev, |
3489 | struct device_node *np) | |
3490 | { | |
3491 | struct stedma40_platform_data *pdata; | |
499c2bc3 | 3492 | int num_phy = 0, num_memcpy = 0, num_disabled = 0; |
cbbe13ea | 3493 | const __be32 *list; |
1814a170 LJ |
3494 | |
3495 | pdata = devm_kzalloc(&pdev->dev, | |
3496 | sizeof(struct stedma40_platform_data), | |
3497 | GFP_KERNEL); | |
3498 | if (!pdata) | |
3499 | return -ENOMEM; | |
3500 | ||
fd59f9e6 LJ |
3501 | /* If absent this value will be obtained from h/w. */ |
3502 | of_property_read_u32(np, "dma-channels", &num_phy); | |
3503 | if (num_phy > 0) | |
3504 | pdata->num_of_phy_chans = num_phy; | |
3505 | ||
a7dacb68 LJ |
3506 | list = of_get_property(np, "memcpy-channels", &num_memcpy); |
3507 | num_memcpy /= sizeof(*list); | |
3508 | ||
3509 | if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { | |
3510 | d40_err(&pdev->dev, | |
3511 | "Invalid number of memcpy channels specified (%d)\n", | |
3512 | num_memcpy); | |
3513 | return -EINVAL; | |
3514 | } | |
3515 | pdata->num_of_memcpy_chans = num_memcpy; | |
3516 | ||
3517 | of_property_read_u32_array(np, "memcpy-channels", | |
3518 | dma40_memcpy_channels, | |
3519 | num_memcpy); | |
3520 | ||
499c2bc3 LJ |
3521 | list = of_get_property(np, "disabled-channels", &num_disabled); |
3522 | num_disabled /= sizeof(*list); | |
3523 | ||
5be2190a | 3524 | if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { |
499c2bc3 LJ |
3525 | d40_err(&pdev->dev, |
3526 | "Invalid number of disabled channels specified (%d)\n", | |
3527 | num_disabled); | |
3528 | return -EINVAL; | |
3529 | } | |
3530 | ||
3531 | of_property_read_u32_array(np, "disabled-channels", | |
3532 | pdata->disabled_channels, | |
3533 | num_disabled); | |
3534 | pdata->disabled_channels[num_disabled] = -1; | |
3535 | ||
1814a170 LJ |
3536 | pdev->dev.platform_data = pdata; |
3537 | ||
3538 | return 0; | |
3539 | } | |
3540 | ||
8d318a50 LW |
3541 | static int __init d40_probe(struct platform_device *pdev) |
3542 | { | |
d4adcc01 | 3543 | struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); |
1814a170 | 3544 | struct device_node *np = pdev->dev.of_node; |
8d318a50 | 3545 | int ret = -ENOENT; |
a9bae06d | 3546 | struct d40_base *base; |
aeb8974a | 3547 | struct resource *res; |
8d318a50 LW |
3548 | int num_reserved_chans; |
3549 | u32 val; | |
3550 | ||
1814a170 LJ |
3551 | if (!plat_data) { |
3552 | if (np) { | |
fe146473 | 3553 | if (d40_of_probe(pdev, np)) { |
1814a170 | 3554 | ret = -ENOMEM; |
a9bae06d | 3555 | goto report_failure; |
1814a170 LJ |
3556 | } |
3557 | } else { | |
3558 | d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); | |
a9bae06d | 3559 | goto report_failure; |
1814a170 LJ |
3560 | } |
3561 | } | |
8d318a50 | 3562 | |
1814a170 | 3563 | base = d40_hw_detect_init(pdev); |
8d318a50 | 3564 | if (!base) |
a9bae06d | 3565 | goto report_failure; |
8d318a50 LW |
3566 | |
3567 | num_reserved_chans = d40_phy_res_init(base); | |
3568 | ||
3569 | platform_set_drvdata(pdev, base); | |
3570 | ||
3571 | spin_lock_init(&base->interrupt_lock); | |
3572 | spin_lock_init(&base->execmd_lock); | |
3573 | ||
3574 | /* Get IO for logical channel parameter address */ | |
3575 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | |
3576 | if (!res) { | |
3577 | ret = -ENOENT; | |
6db5a8ba | 3578 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
8d318a50 LW |
3579 | goto failure; |
3580 | } | |
3581 | base->lcpa_size = resource_size(res); | |
3582 | base->phy_lcpa = res->start; | |
3583 | ||
3584 | if (request_mem_region(res->start, resource_size(res), | |
3585 | D40_NAME " I/O lcpa") == NULL) { | |
3586 | ret = -EBUSY; | |
3a919d5b | 3587 | d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); |
8d318a50 LW |
3588 | goto failure; |
3589 | } | |
3590 | ||
3591 | /* We make use of ESRAM memory for this. */ | |
3592 | val = readl(base->virtbase + D40_DREG_LCPA); | |
3593 | if (res->start != val && val != 0) { | |
3594 | dev_warn(&pdev->dev, | |
3a919d5b FE |
3595 | "[%s] Mismatch LCPA dma 0x%x, def %pa\n", |
3596 | __func__, val, &res->start); | |
8d318a50 LW |
3597 | } else |
3598 | writel(res->start, base->virtbase + D40_DREG_LCPA); | |
3599 | ||
3600 | base->lcpa_base = ioremap(res->start, resource_size(res)); | |
3601 | if (!base->lcpa_base) { | |
3602 | ret = -ENOMEM; | |
6db5a8ba | 3603 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
8d318a50 LW |
3604 | goto failure; |
3605 | } | |
28c7a19d N |
3606 | /* If lcla has to be located in ESRAM we don't need to allocate */ |
3607 | if (base->plat_data->use_esram_lcla) { | |
3608 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
3609 | "lcla_esram"); | |
3610 | if (!res) { | |
3611 | ret = -ENOENT; | |
3612 | d40_err(&pdev->dev, | |
3613 | "No \"lcla_esram\" memory resource\n"); | |
3614 | goto failure; | |
3615 | } | |
3616 | base->lcla_pool.base = ioremap(res->start, | |
3617 | resource_size(res)); | |
3618 | if (!base->lcla_pool.base) { | |
3619 | ret = -ENOMEM; | |
3620 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | |
3621 | goto failure; | |
3622 | } | |
3623 | writel(res->start, base->virtbase + D40_DREG_LCLA); | |
8d318a50 | 3624 | |
28c7a19d N |
3625 | } else { |
3626 | ret = d40_lcla_allocate(base); | |
3627 | if (ret) { | |
3628 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | |
3629 | goto failure; | |
3630 | } | |
8d318a50 LW |
3631 | } |
3632 | ||
3633 | spin_lock_init(&base->lcla_pool.lock); | |
3634 | ||
8d318a50 LW |
3635 | base->irq = platform_get_irq(pdev, 0); |
3636 | ||
3637 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | |
8d318a50 | 3638 | if (ret) { |
6db5a8ba | 3639 | d40_err(&pdev->dev, "No IRQ defined\n"); |
8d318a50 LW |
3640 | goto failure; |
3641 | } | |
3642 | ||
28c7a19d N |
3643 | if (base->plat_data->use_esram_lcla) { |
3644 | ||
3645 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | |
3646 | if (IS_ERR(base->lcpa_regulator)) { | |
3647 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | |
8581bbcd | 3648 | ret = PTR_ERR(base->lcpa_regulator); |
28c7a19d N |
3649 | base->lcpa_regulator = NULL; |
3650 | goto failure; | |
3651 | } | |
3652 | ||
3653 | ret = regulator_enable(base->lcpa_regulator); | |
3654 | if (ret) { | |
3655 | d40_err(&pdev->dev, | |
3656 | "Failed to enable lcpa_regulator\n"); | |
3657 | regulator_put(base->lcpa_regulator); | |
3658 | base->lcpa_regulator = NULL; | |
3659 | goto failure; | |
3660 | } | |
3661 | } | |
3662 | ||
2dafca17 UH |
3663 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); |
3664 | ||
3665 | pm_runtime_irq_safe(base->dev); | |
3666 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | |
3667 | pm_runtime_use_autosuspend(base->dev); | |
3668 | pm_runtime_mark_last_busy(base->dev); | |
3669 | pm_runtime_set_active(base->dev); | |
3670 | pm_runtime_enable(base->dev); | |
3671 | ||
8581bbcd WY |
3672 | ret = d40_dmaengine_init(base, num_reserved_chans); |
3673 | if (ret) | |
8d318a50 LW |
3674 | goto failure; |
3675 | ||
b96710e5 | 3676 | base->dev->dma_parms = &base->dma_parms; |
8581bbcd WY |
3677 | ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); |
3678 | if (ret) { | |
b96710e5 PF |
3679 | d40_err(&pdev->dev, "Failed to set dma max seg size\n"); |
3680 | goto failure; | |
3681 | } | |
3682 | ||
8d318a50 LW |
3683 | d40_hw_init(base); |
3684 | ||
fa332de5 | 3685 | if (np) { |
8581bbcd WY |
3686 | ret = of_dma_controller_register(np, d40_xlate, NULL); |
3687 | if (ret) | |
fa332de5 LJ |
3688 | dev_err(&pdev->dev, |
3689 | "could not register of_dma_controller\n"); | |
3690 | } | |
3691 | ||
8d318a50 LW |
3692 | dev_info(base->dev, "initialized\n"); |
3693 | return 0; | |
3694 | ||
3695 | failure: | |
a9bae06d ME |
3696 | kmem_cache_destroy(base->desc_slab); |
3697 | if (base->virtbase) | |
3698 | iounmap(base->virtbase); | |
026cbc42 | 3699 | |
a9bae06d ME |
3700 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { |
3701 | iounmap(base->lcla_pool.base); | |
3702 | base->lcla_pool.base = NULL; | |
3703 | } | |
28c7a19d | 3704 | |
a9bae06d ME |
3705 | if (base->lcla_pool.dma_addr) |
3706 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | |
3707 | SZ_1K * base->num_phy_chans, | |
3708 | DMA_TO_DEVICE); | |
8d318a50 | 3709 | |
a9bae06d ME |
3710 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
3711 | free_pages((unsigned long)base->lcla_pool.base, | |
3712 | base->lcla_pool.pages); | |
28c7a19d | 3713 | |
a9bae06d ME |
3714 | kfree(base->lcla_pool.base_unaligned); |
3715 | ||
3716 | if (base->phy_lcpa) | |
3717 | release_mem_region(base->phy_lcpa, | |
3718 | base->lcpa_size); | |
3719 | if (base->phy_start) | |
3720 | release_mem_region(base->phy_start, | |
3721 | base->phy_size); | |
3722 | if (base->clk) { | |
3723 | clk_disable_unprepare(base->clk); | |
3724 | clk_put(base->clk); | |
3725 | } | |
3726 | ||
3727 | if (base->lcpa_regulator) { | |
3728 | regulator_disable(base->lcpa_regulator); | |
3729 | regulator_put(base->lcpa_regulator); | |
8d318a50 LW |
3730 | } |
3731 | ||
a9bae06d ME |
3732 | kfree(base->lcla_pool.alloc_map); |
3733 | kfree(base->lookup_log_chans); | |
3734 | kfree(base->lookup_phy_chans); | |
3735 | kfree(base->phy_res); | |
3736 | kfree(base); | |
3737 | report_failure: | |
6db5a8ba | 3738 | d40_err(&pdev->dev, "probe failed\n"); |
8d318a50 LW |
3739 | return ret; |
3740 | } | |
3741 | ||
1814a170 LJ |
3742 | static const struct of_device_id d40_match[] = { |
3743 | { .compatible = "stericsson,dma40", }, | |
3744 | {} | |
3745 | }; | |
3746 | ||
8d318a50 LW |
3747 | static struct platform_driver d40_driver = { |
3748 | .driver = { | |
8d318a50 | 3749 | .name = D40_NAME, |
123e4ca1 | 3750 | .pm = &dma40_pm_ops, |
1814a170 | 3751 | .of_match_table = d40_match, |
8d318a50 LW |
3752 | }, |
3753 | }; | |
3754 | ||
cb9ab2d8 | 3755 | static int __init stedma40_init(void) |
8d318a50 LW |
3756 | { |
3757 | return platform_driver_probe(&d40_driver, d40_probe); | |
3758 | } | |
a0eb221a | 3759 | subsys_initcall(stedma40_init); |