]>
Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
9f2fd0df RPS |
2 | /* |
3 | * Applied Micro X-Gene SoC DMA engine Driver | |
4 | * | |
5 | * Copyright (c) 2015, Applied Micro Circuits Corporation | |
6 | * Authors: Rameshwar Prasad Sahu <[email protected]> | |
7 | * Loc Ho <[email protected]> | |
8 | * | |
9f2fd0df RPS |
9 | * NOTE: PM support is currently not available. |
10 | */ | |
11 | ||
89079493 | 12 | #include <linux/acpi.h> |
9f2fd0df RPS |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/dmapool.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
b0b79024 | 20 | #include <linux/irq.h> |
9f2fd0df RPS |
21 | #include <linux/module.h> |
22 | #include <linux/of_device.h> | |
23 | ||
24 | #include "dmaengine.h" | |
25 | ||
26 | /* X-Gene DMA ring csr registers and bit definations */ | |
27 | #define XGENE_DMA_RING_CONFIG 0x04 | |
28 | #define XGENE_DMA_RING_ENABLE BIT(31) | |
29 | #define XGENE_DMA_RING_ID 0x08 | |
30 | #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31)) | |
31 | #define XGENE_DMA_RING_ID_BUF 0x0C | |
32 | #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21)) | |
33 | #define XGENE_DMA_RING_THRESLD0_SET1 0x30 | |
34 | #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64 | |
35 | #define XGENE_DMA_RING_THRESLD1_SET1 0x34 | |
36 | #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8 | |
37 | #define XGENE_DMA_RING_HYSTERESIS 0x68 | |
38 | #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF | |
39 | #define XGENE_DMA_RING_STATE 0x6C | |
40 | #define XGENE_DMA_RING_STATE_WR_BASE 0x70 | |
41 | #define XGENE_DMA_RING_NE_INT_MODE 0x017C | |
42 | #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \ | |
43 | ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v))) | |
44 | #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \ | |
45 | ((m) &= (~BIT(31 - (v)))) | |
46 | #define XGENE_DMA_RING_CLKEN 0xC208 | |
47 | #define XGENE_DMA_RING_SRST 0xC200 | |
48 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | |
49 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | |
50 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | |
9f2fd0df RPS |
51 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) |
52 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | |
53 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | |
54 | #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6) | |
55 | #define XGENE_DMA_RING_COHERENT_SET(m) \ | |
56 | (((u32 *)(m))[2] |= BIT(4)) | |
57 | #define XGENE_DMA_RING_ADDRL_SET(m, v) \ | |
58 | (((u32 *)(m))[2] |= (((v) >> 8) << 5)) | |
59 | #define XGENE_DMA_RING_ADDRH_SET(m, v) \ | |
60 | (((u32 *)(m))[3] |= ((v) >> 35)) | |
61 | #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ | |
62 | (((u32 *)(m))[3] |= BIT(19)) | |
63 | #define XGENE_DMA_RING_SIZE_SET(m, v) \ | |
64 | (((u32 *)(m))[3] |= ((v) << 23)) | |
65 | #define XGENE_DMA_RING_RECOMBBUF_SET(m) \ | |
66 | (((u32 *)(m))[3] |= BIT(27)) | |
67 | #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \ | |
68 | (((u32 *)(m))[3] |= (0x7 << 28)) | |
69 | #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \ | |
70 | (((u32 *)(m))[4] |= 0x3) | |
71 | #define XGENE_DMA_RING_SELTHRSH_SET(m) \ | |
72 | (((u32 *)(m))[4] |= BIT(3)) | |
73 | #define XGENE_DMA_RING_TYPE_SET(m, v) \ | |
74 | (((u32 *)(m))[4] |= ((v) << 19)) | |
75 | ||
76 | /* X-Gene DMA device csr registers and bit definitions */ | |
77 | #define XGENE_DMA_IPBRR 0x0 | |
78 | #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF) | |
79 | #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3) | |
80 | #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3) | |
81 | #define XGENE_DMA_GCR 0x10 | |
82 | #define XGENE_DMA_CH_SETUP(v) \ | |
83 | ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF) | |
84 | #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31)) | |
85 | #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31)) | |
86 | #define XGENE_DMA_RAID6_CONT 0x14 | |
87 | #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24) | |
88 | #define XGENE_DMA_INT 0x70 | |
89 | #define XGENE_DMA_INT_MASK 0x74 | |
90 | #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF | |
91 | #define XGENE_DMA_INT_ALL_UNMASK 0x0 | |
92 | #define XGENE_DMA_INT_MASK_SHIFT 0x14 | |
93 | #define XGENE_DMA_RING_INT0_MASK 0x90A0 | |
94 | #define XGENE_DMA_RING_INT1_MASK 0x90A8 | |
95 | #define XGENE_DMA_RING_INT2_MASK 0x90B0 | |
96 | #define XGENE_DMA_RING_INT3_MASK 0x90B8 | |
97 | #define XGENE_DMA_RING_INT4_MASK 0x90C0 | |
98 | #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0 | |
99 | #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF | |
100 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 | |
101 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 | |
102 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF | |
cda8e937 | 103 | #define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 |
9f2fd0df RPS |
104 | |
105 | /* X-Gene SoC EFUSE csr register and bit defination */ | |
106 | #define XGENE_SOC_JTAG1_SHADOW 0x18 | |
107 | #define XGENE_DMA_PQ_DISABLE_MASK BIT(13) | |
108 | ||
109 | /* X-Gene DMA Descriptor format */ | |
110 | #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50) | |
111 | #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55) | |
112 | #define XGENE_DMA_DESC_C_BIT BIT_ULL(63) | |
113 | #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61) | |
114 | #define XGENE_DMA_DESC_ELERR_POS 46 | |
115 | #define XGENE_DMA_DESC_RTYPE_POS 56 | |
116 | #define XGENE_DMA_DESC_LERR_POS 60 | |
9f2fd0df RPS |
117 | #define XGENE_DMA_DESC_BUFLEN_POS 48 |
118 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 | |
9f2fd0df RPS |
119 | #define XGENE_DMA_DESC_ELERR_RD(m) \ |
120 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) | |
121 | #define XGENE_DMA_DESC_LERR_RD(m) \ | |
122 | (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7) | |
123 | #define XGENE_DMA_DESC_STATUS(elerr, lerr) \ | |
124 | (((elerr) << 4) | (lerr)) | |
125 | ||
126 | /* X-Gene DMA descriptor empty s/w signature */ | |
9f2fd0df | 127 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL |
9f2fd0df RPS |
128 | |
129 | /* X-Gene DMA configurable parameters defines */ | |
130 | #define XGENE_DMA_RING_NUM 512 | |
131 | #define XGENE_DMA_BUFNUM 0x0 | |
132 | #define XGENE_DMA_CPU_BUFNUM 0x18 | |
133 | #define XGENE_DMA_RING_OWNER_DMA 0x03 | |
134 | #define XGENE_DMA_RING_OWNER_CPU 0x0F | |
135 | #define XGENE_DMA_RING_TYPE_REGULAR 0x01 | |
136 | #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */ | |
137 | #define XGENE_DMA_RING_NUM_CONFIG 5 | |
138 | #define XGENE_DMA_MAX_CHANNEL 4 | |
139 | #define XGENE_DMA_XOR_CHANNEL 0 | |
140 | #define XGENE_DMA_PQ_CHANNEL 1 | |
141 | #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ | |
142 | #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ | |
9f2fd0df RPS |
143 | #define XGENE_DMA_MAX_XOR_SRC 5 |
144 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 | |
6d0767c1 | 145 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL |
9f2fd0df RPS |
146 | |
147 | /* X-Gene DMA descriptor error codes */ | |
148 | #define ERR_DESC_AXI 0x01 | |
149 | #define ERR_BAD_DESC 0x02 | |
150 | #define ERR_READ_DATA_AXI 0x03 | |
151 | #define ERR_WRITE_DATA_AXI 0x04 | |
152 | #define ERR_FBP_TIMEOUT 0x05 | |
153 | #define ERR_ECC 0x06 | |
154 | #define ERR_DIFF_SIZE 0x08 | |
155 | #define ERR_SCT_GAT_LEN 0x09 | |
156 | #define ERR_CRC_ERR 0x11 | |
157 | #define ERR_CHKSUM 0x12 | |
158 | #define ERR_DIF 0x13 | |
159 | ||
160 | /* X-Gene DMA error interrupt codes */ | |
161 | #define ERR_DIF_SIZE_INT 0x0 | |
162 | #define ERR_GS_ERR_INT 0x1 | |
163 | #define ERR_FPB_TIMEO_INT 0x2 | |
164 | #define ERR_WFIFO_OVF_INT 0x3 | |
165 | #define ERR_RFIFO_OVF_INT 0x4 | |
166 | #define ERR_WR_TIMEO_INT 0x5 | |
167 | #define ERR_RD_TIMEO_INT 0x6 | |
168 | #define ERR_WR_ERR_INT 0x7 | |
169 | #define ERR_RD_ERR_INT 0x8 | |
170 | #define ERR_BAD_DESC_INT 0x9 | |
171 | #define ERR_DESC_DST_INT 0xA | |
172 | #define ERR_DESC_SRC_INT 0xB | |
173 | ||
174 | /* X-Gene DMA flyby operation code */ | |
6d0767c1 RPS |
175 | #define FLYBY_2SRC_XOR 0x80 |
176 | #define FLYBY_3SRC_XOR 0x90 | |
177 | #define FLYBY_4SRC_XOR 0xA0 | |
178 | #define FLYBY_5SRC_XOR 0xB0 | |
9f2fd0df RPS |
179 | |
180 | /* X-Gene DMA SW descriptor flags */ | |
181 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) | |
182 | ||
183 | /* Define to dump X-Gene DMA descriptor */ | |
184 | #define XGENE_DMA_DESC_DUMP(desc, m) \ | |
185 | print_hex_dump(KERN_ERR, (m), \ | |
186 | DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0) | |
187 | ||
188 | #define to_dma_desc_sw(tx) \ | |
189 | container_of(tx, struct xgene_dma_desc_sw, tx) | |
190 | #define to_dma_chan(dchan) \ | |
191 | container_of(dchan, struct xgene_dma_chan, dma_chan) | |
192 | ||
193 | #define chan_dbg(chan, fmt, arg...) \ | |
194 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) | |
195 | #define chan_err(chan, fmt, arg...) \ | |
196 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) | |
197 | ||
198 | struct xgene_dma_desc_hw { | |
6d0767c1 RPS |
199 | __le64 m0; |
200 | __le64 m1; | |
201 | __le64 m2; | |
202 | __le64 m3; | |
9f2fd0df RPS |
203 | }; |
204 | ||
205 | enum xgene_dma_ring_cfgsize { | |
206 | XGENE_DMA_RING_CFG_SIZE_512B, | |
207 | XGENE_DMA_RING_CFG_SIZE_2KB, | |
208 | XGENE_DMA_RING_CFG_SIZE_16KB, | |
209 | XGENE_DMA_RING_CFG_SIZE_64KB, | |
210 | XGENE_DMA_RING_CFG_SIZE_512KB, | |
211 | XGENE_DMA_RING_CFG_SIZE_INVALID | |
212 | }; | |
213 | ||
214 | struct xgene_dma_ring { | |
215 | struct xgene_dma *pdma; | |
216 | u8 buf_num; | |
217 | u16 id; | |
218 | u16 num; | |
219 | u16 head; | |
220 | u16 owner; | |
221 | u16 slots; | |
222 | u16 dst_ring_num; | |
223 | u32 size; | |
224 | void __iomem *cmd; | |
225 | void __iomem *cmd_base; | |
226 | dma_addr_t desc_paddr; | |
227 | u32 state[XGENE_DMA_RING_NUM_CONFIG]; | |
228 | enum xgene_dma_ring_cfgsize cfgsize; | |
229 | union { | |
230 | void *desc_vaddr; | |
231 | struct xgene_dma_desc_hw *desc_hw; | |
232 | }; | |
233 | }; | |
234 | ||
235 | struct xgene_dma_desc_sw { | |
236 | struct xgene_dma_desc_hw desc1; | |
237 | struct xgene_dma_desc_hw desc2; | |
238 | u32 flags; | |
239 | struct list_head node; | |
240 | struct list_head tx_list; | |
241 | struct dma_async_tx_descriptor tx; | |
242 | }; | |
243 | ||
244 | /** | |
245 | * struct xgene_dma_chan - internal representation of an X-Gene DMA channel | |
246 | * @dma_chan: dmaengine channel object member | |
247 | * @pdma: X-Gene DMA device structure reference | |
248 | * @dev: struct device reference for dma mapping api | |
249 | * @id: raw id of this channel | |
250 | * @rx_irq: channel IRQ | |
251 | * @name: name of X-Gene DMA channel | |
252 | * @lock: serializes enqueue/dequeue operations to the descriptor pool | |
253 | * @pending: number of transaction request pushed to DMA controller for | |
254 | * execution, but still waiting for completion, | |
255 | * @max_outstanding: max number of outstanding request we can push to channel | |
256 | * @ld_pending: descriptors which are queued to run, but have not yet been | |
257 | * submitted to the hardware for execution | |
258 | * @ld_running: descriptors which are currently being executing by the hardware | |
259 | * @ld_completed: descriptors which have finished execution by the hardware. | |
260 | * These descriptors have already had their cleanup actions run. They | |
261 | * are waiting for the ACK bit to be set by the async tx API. | |
262 | * @desc_pool: descriptor pool for DMA operations | |
263 | * @tasklet: bottom half where all completed descriptors cleans | |
264 | * @tx_ring: transmit ring descriptor that we use to prepare actual | |
265 | * descriptors for further executions | |
266 | * @rx_ring: receive ring descriptor that we use to get completed DMA | |
267 | * descriptors during cleanup time | |
268 | */ | |
269 | struct xgene_dma_chan { | |
270 | struct dma_chan dma_chan; | |
271 | struct xgene_dma *pdma; | |
272 | struct device *dev; | |
273 | int id; | |
274 | int rx_irq; | |
ed1f0418 | 275 | char name[10]; |
9f2fd0df RPS |
276 | spinlock_t lock; |
277 | int pending; | |
278 | int max_outstanding; | |
279 | struct list_head ld_pending; | |
280 | struct list_head ld_running; | |
281 | struct list_head ld_completed; | |
282 | struct dma_pool *desc_pool; | |
283 | struct tasklet_struct tasklet; | |
284 | struct xgene_dma_ring tx_ring; | |
285 | struct xgene_dma_ring rx_ring; | |
286 | }; | |
287 | ||
288 | /** | |
289 | * struct xgene_dma - internal representation of an X-Gene DMA device | |
5726164f LJ |
290 | * @dev: reference to this device's struct device |
291 | * @clk: reference to this device's clock | |
9f2fd0df RPS |
292 | * @err_irq: DMA error irq number |
293 | * @ring_num: start id number for DMA ring | |
294 | * @csr_dma: base for DMA register access | |
295 | * @csr_ring: base for DMA ring register access | |
296 | * @csr_ring_cmd: base for DMA ring command register access | |
297 | * @csr_efuse: base for efuse register access | |
298 | * @dma_dev: embedded struct dma_device | |
299 | * @chan: reference to X-Gene DMA channels | |
300 | */ | |
301 | struct xgene_dma { | |
302 | struct device *dev; | |
303 | struct clk *clk; | |
304 | int err_irq; | |
305 | int ring_num; | |
306 | void __iomem *csr_dma; | |
307 | void __iomem *csr_ring; | |
308 | void __iomem *csr_ring_cmd; | |
309 | void __iomem *csr_efuse; | |
310 | struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL]; | |
311 | struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL]; | |
312 | }; | |
313 | ||
314 | static const char * const xgene_dma_desc_err[] = { | |
315 | [ERR_DESC_AXI] = "AXI error when reading src/dst link list", | |
316 | [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc", | |
317 | [ERR_READ_DATA_AXI] = "AXI error when reading data", | |
318 | [ERR_WRITE_DATA_AXI] = "AXI error when writing data", | |
319 | [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch", | |
320 | [ERR_ECC] = "ECC double bit error", | |
321 | [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result", | |
322 | [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same", | |
323 | [ERR_CRC_ERR] = "CRC error", | |
324 | [ERR_CHKSUM] = "Checksum error", | |
325 | [ERR_DIF] = "DIF error", | |
326 | }; | |
327 | ||
328 | static const char * const xgene_dma_err[] = { | |
329 | [ERR_DIF_SIZE_INT] = "DIF size error", | |
330 | [ERR_GS_ERR_INT] = "Gather scatter not same size error", | |
331 | [ERR_FPB_TIMEO_INT] = "Free pool time out error", | |
332 | [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error", | |
333 | [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error", | |
334 | [ERR_WR_TIMEO_INT] = "Write time out error", | |
335 | [ERR_RD_TIMEO_INT] = "Read time out error", | |
336 | [ERR_WR_ERR_INT] = "HBF bus write error", | |
337 | [ERR_RD_ERR_INT] = "HBF bus read error", | |
338 | [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error", | |
339 | [ERR_DESC_DST_INT] = "HFB reading dst link address error", | |
340 | [ERR_DESC_SRC_INT] = "HFB reading src link address error", | |
341 | }; | |
342 | ||
343 | static bool is_pq_enabled(struct xgene_dma *pdma) | |
344 | { | |
345 | u32 val; | |
346 | ||
347 | val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); | |
348 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); | |
349 | } | |
350 | ||
6d0767c1 | 351 | static u64 xgene_dma_encode_len(size_t len) |
9f2fd0df RPS |
352 | { |
353 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? | |
6d0767c1 RPS |
354 | ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) : |
355 | XGENE_DMA_16K_BUFFER_LEN_CODE; | |
9f2fd0df RPS |
356 | } |
357 | ||
358 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | |
359 | { | |
360 | static u8 flyby_type[] = { | |
361 | FLYBY_2SRC_XOR, /* Dummy */ | |
362 | FLYBY_2SRC_XOR, /* Dummy */ | |
363 | FLYBY_2SRC_XOR, | |
364 | FLYBY_3SRC_XOR, | |
365 | FLYBY_4SRC_XOR, | |
366 | FLYBY_5SRC_XOR | |
367 | }; | |
368 | ||
369 | return flyby_type[src_cnt]; | |
370 | } | |
371 | ||
6d0767c1 | 372 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
9f2fd0df RPS |
373 | dma_addr_t *paddr) |
374 | { | |
375 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? | |
376 | *len : XGENE_DMA_MAX_BYTE_CNT; | |
377 | ||
6d0767c1 RPS |
378 | *ext8 |= cpu_to_le64(*paddr); |
379 | *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes)); | |
9f2fd0df RPS |
380 | *len -= nbytes; |
381 | *paddr += nbytes; | |
382 | } | |
383 | ||
6d0767c1 | 384 | static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) |
9f2fd0df | 385 | { |
6d0767c1 RPS |
386 | switch (idx) { |
387 | case 0: | |
388 | return &desc->m1; | |
389 | case 1: | |
390 | return &desc->m0; | |
391 | case 2: | |
392 | return &desc->m3; | |
393 | case 3: | |
394 | return &desc->m2; | |
395 | default: | |
396 | pr_err("Invalid dma descriptor index\n"); | |
397 | } | |
398 | ||
399 | return NULL; | |
9f2fd0df RPS |
400 | } |
401 | ||
6d0767c1 RPS |
402 | static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, |
403 | u16 dst_ring_num) | |
9f2fd0df | 404 | { |
6d0767c1 RPS |
405 | desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); |
406 | desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << | |
407 | XGENE_DMA_DESC_RTYPE_POS); | |
408 | desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); | |
409 | desc->m3 |= cpu_to_le64((u64)dst_ring_num << | |
410 | XGENE_DMA_DESC_HOENQ_NUM_POS); | |
9f2fd0df RPS |
411 | } |
412 | ||
9f2fd0df RPS |
413 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, |
414 | struct xgene_dma_desc_sw *desc_sw, | |
415 | dma_addr_t *dst, dma_addr_t *src, | |
416 | u32 src_cnt, size_t *nbytes, | |
417 | const u8 *scf) | |
418 | { | |
6d0767c1 | 419 | struct xgene_dma_desc_hw *desc1, *desc2; |
9f2fd0df RPS |
420 | size_t len = *nbytes; |
421 | int i; | |
422 | ||
423 | desc1 = &desc_sw->desc1; | |
424 | desc2 = &desc_sw->desc2; | |
425 | ||
426 | /* Initialize DMA descriptor */ | |
427 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | |
428 | ||
429 | /* Set destination address */ | |
6d0767c1 RPS |
430 | desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); |
431 | desc1->m3 |= cpu_to_le64(*dst); | |
9f2fd0df RPS |
432 | |
433 | /* We have multiple source addresses, so need to set NV bit*/ | |
6d0767c1 | 434 | desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); |
9f2fd0df RPS |
435 | |
436 | /* Set flyby opcode */ | |
6d0767c1 | 437 | desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); |
9f2fd0df RPS |
438 | |
439 | /* Set 1st to 5th source addresses */ | |
440 | for (i = 0; i < src_cnt; i++) { | |
441 | len = *nbytes; | |
6d0767c1 | 442 | xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : |
9f2fd0df RPS |
443 | xgene_dma_lookup_ext8(desc2, i - 1), |
444 | &len, &src[i]); | |
6d0767c1 | 445 | desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); |
9f2fd0df RPS |
446 | } |
447 | ||
9f2fd0df RPS |
448 | /* Update meta data */ |
449 | *nbytes = len; | |
450 | *dst += XGENE_DMA_MAX_BYTE_CNT; | |
451 | ||
452 | /* We need always 64B descriptor to perform xor or pq operations */ | |
453 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | |
454 | } | |
455 | ||
456 | static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |
457 | { | |
458 | struct xgene_dma_desc_sw *desc; | |
459 | struct xgene_dma_chan *chan; | |
460 | dma_cookie_t cookie; | |
461 | ||
462 | if (unlikely(!tx)) | |
463 | return -EINVAL; | |
464 | ||
465 | chan = to_dma_chan(tx->chan); | |
466 | desc = to_dma_desc_sw(tx); | |
467 | ||
468 | spin_lock_bh(&chan->lock); | |
469 | ||
470 | cookie = dma_cookie_assign(tx); | |
471 | ||
472 | /* Add this transaction list onto the tail of the pending queue */ | |
473 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); | |
474 | ||
475 | spin_unlock_bh(&chan->lock); | |
476 | ||
477 | return cookie; | |
478 | } | |
479 | ||
480 | static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan, | |
481 | struct xgene_dma_desc_sw *desc) | |
482 | { | |
483 | list_del(&desc->node); | |
484 | chan_dbg(chan, "LD %p free\n", desc); | |
485 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | |
486 | } | |
487 | ||
488 | static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( | |
489 | struct xgene_dma_chan *chan) | |
490 | { | |
491 | struct xgene_dma_desc_sw *desc; | |
492 | dma_addr_t phys; | |
493 | ||
9c811209 | 494 | desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); |
9f2fd0df RPS |
495 | if (!desc) { |
496 | chan_err(chan, "Failed to allocate LDs\n"); | |
497 | return NULL; | |
498 | } | |
499 | ||
9f2fd0df RPS |
500 | INIT_LIST_HEAD(&desc->tx_list); |
501 | desc->tx.phys = phys; | |
502 | desc->tx.tx_submit = xgene_dma_tx_submit; | |
503 | dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); | |
504 | ||
505 | chan_dbg(chan, "LD %p allocated\n", desc); | |
506 | ||
507 | return desc; | |
508 | } | |
509 | ||
510 | /** | |
511 | * xgene_dma_clean_completed_descriptor - free all descriptors which | |
512 | * has been completed and acked | |
513 | * @chan: X-Gene DMA channel | |
514 | * | |
515 | * This function is used on all completed and acked descriptors. | |
516 | */ | |
517 | static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan) | |
518 | { | |
519 | struct xgene_dma_desc_sw *desc, *_desc; | |
520 | ||
521 | /* Run the callback for each descriptor, in order */ | |
522 | list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { | |
523 | if (async_tx_test_ack(&desc->tx)) | |
524 | xgene_dma_clean_descriptor(chan, desc); | |
525 | } | |
526 | } | |
527 | ||
528 | /** | |
529 | * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor | |
530 | * @chan: X-Gene DMA channel | |
531 | * @desc: descriptor to cleanup and free | |
532 | * | |
533 | * This function is used on a descriptor which has been executed by the DMA | |
534 | * controller. It will run any callbacks, submit any dependencies. | |
535 | */ | |
536 | static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, | |
537 | struct xgene_dma_desc_sw *desc) | |
538 | { | |
539 | struct dma_async_tx_descriptor *tx = &desc->tx; | |
540 | ||
541 | /* | |
542 | * If this is not the last transaction in the group, | |
543 | * then no need to complete cookie and run any callback as | |
544 | * this is not the tx_descriptor which had been sent to caller | |
545 | * of this DMA request | |
546 | */ | |
547 | ||
548 | if (tx->cookie == 0) | |
549 | return; | |
550 | ||
551 | dma_cookie_complete(tx); | |
fd3c69bd | 552 | dma_descriptor_unmap(tx); |
9f2fd0df RPS |
553 | |
554 | /* Run the link descriptor callback function */ | |
b1f884a5 | 555 | dmaengine_desc_get_callback_invoke(tx, NULL); |
9f2fd0df | 556 | |
9f2fd0df RPS |
557 | /* Run any dependencies */ |
558 | dma_run_dependencies(tx); | |
559 | } | |
560 | ||
561 | /** | |
562 | * xgene_dma_clean_running_descriptor - move the completed descriptor from | |
563 | * ld_running to ld_completed | |
564 | * @chan: X-Gene DMA channel | |
565 | * @desc: the descriptor which is completed | |
566 | * | |
567 | * Free the descriptor directly if acked by async_tx api, | |
568 | * else move it to queue ld_completed. | |
569 | */ | |
570 | static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | |
571 | struct xgene_dma_desc_sw *desc) | |
572 | { | |
573 | /* Remove from the list of running transactions */ | |
574 | list_del(&desc->node); | |
575 | ||
576 | /* | |
577 | * the client is allowed to attach dependent operations | |
578 | * until 'ack' is set | |
579 | */ | |
580 | if (!async_tx_test_ack(&desc->tx)) { | |
581 | /* | |
582 | * Move this descriptor to the list of descriptors which is | |
583 | * completed, but still awaiting the 'ack' bit to be set. | |
584 | */ | |
585 | list_add_tail(&desc->node, &chan->ld_completed); | |
586 | return; | |
587 | } | |
588 | ||
589 | chan_dbg(chan, "LD %p free\n", desc); | |
590 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | |
591 | } | |
592 | ||
ee08b59d RPS |
593 | static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, |
594 | struct xgene_dma_desc_sw *desc_sw) | |
9f2fd0df | 595 | { |
ee08b59d | 596 | struct xgene_dma_ring *ring = &chan->tx_ring; |
9f2fd0df RPS |
597 | struct xgene_dma_desc_hw *desc_hw; |
598 | ||
9f2fd0df RPS |
599 | /* Get hw descriptor from DMA tx ring */ |
600 | desc_hw = &ring->desc_hw[ring->head]; | |
601 | ||
602 | /* | |
603 | * Increment the head count to point next | |
604 | * descriptor for next time | |
605 | */ | |
606 | if (++ring->head == ring->slots) | |
607 | ring->head = 0; | |
608 | ||
609 | /* Copy prepared sw descriptor data to hw descriptor */ | |
610 | memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); | |
611 | ||
612 | /* | |
613 | * Check if we have prepared 64B descriptor, | |
614 | * in this case we need one more hw descriptor | |
615 | */ | |
616 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { | |
617 | desc_hw = &ring->desc_hw[ring->head]; | |
618 | ||
619 | if (++ring->head == ring->slots) | |
620 | ring->head = 0; | |
621 | ||
622 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | |
623 | } | |
624 | ||
ee08b59d RPS |
625 | /* Increment the pending transaction count */ |
626 | chan->pending += ((desc_sw->flags & | |
627 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | |
628 | ||
9f2fd0df RPS |
629 | /* Notify the hw that we have descriptor ready for execution */ |
630 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | |
631 | 2 : 1, ring->cmd); | |
9f2fd0df RPS |
632 | } |
633 | ||
634 | /** | |
635 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw | |
636 | * @chan : X-Gene DMA channel | |
637 | * | |
6d0767c1 | 638 | * LOCKING: must hold chan->lock |
9f2fd0df RPS |
639 | */ |
640 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | |
641 | { | |
642 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | |
9f2fd0df RPS |
643 | |
644 | /* | |
645 | * If the list of pending descriptors is empty, then we | |
646 | * don't need to do any work at all | |
647 | */ | |
648 | if (list_empty(&chan->ld_pending)) { | |
649 | chan_dbg(chan, "No pending LDs\n"); | |
650 | return; | |
651 | } | |
652 | ||
653 | /* | |
654 | * Move elements from the queue of pending transactions onto the list | |
655 | * of running transactions and push it to hw for further executions | |
656 | */ | |
657 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { | |
658 | /* | |
659 | * Check if have pushed max number of transactions to hw | |
660 | * as capable, so let's stop here and will push remaining | |
661 | * elements from pening ld queue after completing some | |
662 | * descriptors that we have already pushed | |
663 | */ | |
664 | if (chan->pending >= chan->max_outstanding) | |
665 | return; | |
666 | ||
ee08b59d | 667 | xgene_chan_xfer_request(chan, desc_sw); |
9f2fd0df RPS |
668 | |
669 | /* | |
670 | * Delete this element from ld pending queue and append it to | |
671 | * ld running queue | |
672 | */ | |
673 | list_move_tail(&desc_sw->node, &chan->ld_running); | |
9f2fd0df RPS |
674 | } |
675 | } | |
676 | ||
677 | /** | |
678 | * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed | |
679 | * and move them to ld_completed to free until flag 'ack' is set | |
680 | * @chan: X-Gene DMA channel | |
681 | * | |
682 | * This function is used on descriptors which have been executed by the DMA | |
683 | * controller. It will run any callbacks, submit any dependencies, then | |
684 | * free these descriptors if flag 'ack' is set. | |
685 | */ | |
686 | static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |
687 | { | |
688 | struct xgene_dma_ring *ring = &chan->rx_ring; | |
689 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | |
690 | struct xgene_dma_desc_hw *desc_hw; | |
005ce70b | 691 | struct list_head ld_completed; |
9f2fd0df RPS |
692 | u8 status; |
693 | ||
005ce70b RPS |
694 | INIT_LIST_HEAD(&ld_completed); |
695 | ||
f177a431 | 696 | spin_lock(&chan->lock); |
005ce70b | 697 | |
9f2fd0df RPS |
698 | /* Clean already completed and acked descriptors */ |
699 | xgene_dma_clean_completed_descriptor(chan); | |
700 | ||
005ce70b | 701 | /* Move all completed descriptors to ld completed queue, in order */ |
9f2fd0df RPS |
702 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { |
703 | /* Get subsequent hw descriptor from DMA rx ring */ | |
704 | desc_hw = &ring->desc_hw[ring->head]; | |
705 | ||
706 | /* Check if this descriptor has been completed */ | |
6d0767c1 RPS |
707 | if (unlikely(le64_to_cpu(desc_hw->m0) == |
708 | XGENE_DMA_DESC_EMPTY_SIGNATURE)) | |
9f2fd0df RPS |
709 | break; |
710 | ||
711 | if (++ring->head == ring->slots) | |
712 | ring->head = 0; | |
713 | ||
714 | /* Check if we have any error with DMA transactions */ | |
715 | status = XGENE_DMA_DESC_STATUS( | |
716 | XGENE_DMA_DESC_ELERR_RD(le64_to_cpu( | |
717 | desc_hw->m0)), | |
718 | XGENE_DMA_DESC_LERR_RD(le64_to_cpu( | |
719 | desc_hw->m0))); | |
720 | if (status) { | |
721 | /* Print the DMA error type */ | |
722 | chan_err(chan, "%s\n", xgene_dma_desc_err[status]); | |
723 | ||
724 | /* | |
725 | * We have DMA transactions error here. Dump DMA Tx | |
726 | * and Rx descriptors for this request */ | |
727 | XGENE_DMA_DESC_DUMP(&desc_sw->desc1, | |
728 | "X-Gene DMA TX DESC1: "); | |
729 | ||
730 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) | |
731 | XGENE_DMA_DESC_DUMP(&desc_sw->desc2, | |
732 | "X-Gene DMA TX DESC2: "); | |
733 | ||
734 | XGENE_DMA_DESC_DUMP(desc_hw, | |
735 | "X-Gene DMA RX ERR DESC: "); | |
736 | } | |
737 | ||
738 | /* Notify the hw about this completed descriptor */ | |
739 | iowrite32(-1, ring->cmd); | |
740 | ||
741 | /* Mark this hw descriptor as processed */ | |
6d0767c1 | 742 | desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
9f2fd0df | 743 | |
9f2fd0df RPS |
744 | /* |
745 | * Decrement the pending transaction count | |
746 | * as we have processed one | |
747 | */ | |
ee08b59d RPS |
748 | chan->pending -= ((desc_sw->flags & |
749 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | |
005ce70b RPS |
750 | |
751 | /* | |
752 | * Delete this node from ld running queue and append it to | |
753 | * ld completed queue for further processing | |
754 | */ | |
755 | list_move_tail(&desc_sw->node, &ld_completed); | |
9f2fd0df RPS |
756 | } |
757 | ||
758 | /* | |
759 | * Start any pending transactions automatically | |
760 | * In the ideal case, we keep the DMA controller busy while we go | |
761 | * ahead and free the descriptors below. | |
762 | */ | |
763 | xgene_chan_xfer_ld_pending(chan); | |
005ce70b | 764 | |
f177a431 | 765 | spin_unlock(&chan->lock); |
005ce70b RPS |
766 | |
767 | /* Run the callback for each descriptor, in order */ | |
768 | list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { | |
769 | xgene_dma_run_tx_complete_actions(chan, desc_sw); | |
770 | xgene_dma_clean_running_descriptor(chan, desc_sw); | |
771 | } | |
9f2fd0df RPS |
772 | } |
773 | ||
774 | static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) | |
775 | { | |
776 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
777 | ||
778 | /* Has this channel already been allocated? */ | |
779 | if (chan->desc_pool) | |
780 | return 1; | |
781 | ||
782 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, | |
783 | sizeof(struct xgene_dma_desc_sw), | |
784 | 0, 0); | |
785 | if (!chan->desc_pool) { | |
786 | chan_err(chan, "Failed to allocate descriptor pool\n"); | |
787 | return -ENOMEM; | |
788 | } | |
789 | ||
9e1630b8 | 790 | chan_dbg(chan, "Allocate descriptor pool\n"); |
9f2fd0df RPS |
791 | |
792 | return 1; | |
793 | } | |
794 | ||
795 | /** | |
796 | * xgene_dma_free_desc_list - Free all descriptors in a queue | |
797 | * @chan: X-Gene DMA channel | |
798 | * @list: the list to free | |
799 | * | |
6d0767c1 | 800 | * LOCKING: must hold chan->lock |
9f2fd0df RPS |
801 | */ |
802 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, | |
803 | struct list_head *list) | |
804 | { | |
805 | struct xgene_dma_desc_sw *desc, *_desc; | |
806 | ||
807 | list_for_each_entry_safe(desc, _desc, list, node) | |
808 | xgene_dma_clean_descriptor(chan, desc); | |
809 | } | |
810 | ||
9f2fd0df RPS |
811 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) |
812 | { | |
813 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
814 | ||
815 | chan_dbg(chan, "Free all resources\n"); | |
816 | ||
817 | if (!chan->desc_pool) | |
818 | return; | |
819 | ||
9f2fd0df RPS |
820 | /* Process all running descriptor */ |
821 | xgene_dma_cleanup_descriptors(chan); | |
822 | ||
005ce70b RPS |
823 | spin_lock_bh(&chan->lock); |
824 | ||
9f2fd0df RPS |
825 | /* Clean all link descriptor queues */ |
826 | xgene_dma_free_desc_list(chan, &chan->ld_pending); | |
827 | xgene_dma_free_desc_list(chan, &chan->ld_running); | |
828 | xgene_dma_free_desc_list(chan, &chan->ld_completed); | |
829 | ||
830 | spin_unlock_bh(&chan->lock); | |
831 | ||
832 | /* Delete this channel DMA pool */ | |
833 | dma_pool_destroy(chan->desc_pool); | |
834 | chan->desc_pool = NULL; | |
835 | } | |
836 | ||
9f2fd0df RPS |
837 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( |
838 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, | |
839 | u32 src_cnt, size_t len, unsigned long flags) | |
840 | { | |
841 | struct xgene_dma_desc_sw *first = NULL, *new; | |
842 | struct xgene_dma_chan *chan; | |
843 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = { | |
844 | 0x01, 0x01, 0x01, 0x01, 0x01}; | |
845 | ||
846 | if (unlikely(!dchan || !len)) | |
847 | return NULL; | |
848 | ||
849 | chan = to_dma_chan(dchan); | |
850 | ||
851 | do { | |
852 | /* Allocate the link descriptor from DMA pool */ | |
853 | new = xgene_dma_alloc_descriptor(chan); | |
854 | if (!new) | |
855 | goto fail; | |
856 | ||
857 | /* Prepare xor DMA descriptor */ | |
858 | xgene_dma_prep_xor_desc(chan, new, &dst, src, | |
859 | src_cnt, &len, multi); | |
860 | ||
861 | if (!first) | |
862 | first = new; | |
863 | ||
864 | new->tx.cookie = 0; | |
865 | async_tx_ack(&new->tx); | |
866 | ||
867 | /* Insert the link descriptor to the LD ring */ | |
868 | list_add_tail(&new->node, &first->tx_list); | |
869 | } while (len); | |
870 | ||
871 | new->tx.flags = flags; /* client is in control of this ack */ | |
872 | new->tx.cookie = -EBUSY; | |
873 | list_splice(&first->tx_list, &new->tx_list); | |
874 | ||
875 | return &new->tx; | |
876 | ||
877 | fail: | |
878 | if (!first) | |
879 | return NULL; | |
880 | ||
6d0767c1 | 881 | xgene_dma_free_desc_list(chan, &first->tx_list); |
9f2fd0df RPS |
882 | return NULL; |
883 | } | |
884 | ||
885 | static struct dma_async_tx_descriptor *xgene_dma_prep_pq( | |
886 | struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, | |
887 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | |
888 | { | |
889 | struct xgene_dma_desc_sw *first = NULL, *new; | |
890 | struct xgene_dma_chan *chan; | |
891 | size_t _len = len; | |
892 | dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC]; | |
893 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01}; | |
894 | ||
895 | if (unlikely(!dchan || !len)) | |
896 | return NULL; | |
897 | ||
898 | chan = to_dma_chan(dchan); | |
899 | ||
900 | /* | |
901 | * Save source addresses on local variable, may be we have to | |
902 | * prepare two descriptor to generate P and Q if both enabled | |
903 | * in the flags by client | |
904 | */ | |
905 | memcpy(_src, src, sizeof(*src) * src_cnt); | |
906 | ||
907 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
908 | len = 0; | |
909 | ||
910 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
911 | _len = 0; | |
912 | ||
913 | do { | |
914 | /* Allocate the link descriptor from DMA pool */ | |
915 | new = xgene_dma_alloc_descriptor(chan); | |
916 | if (!new) | |
917 | goto fail; | |
918 | ||
919 | if (!first) | |
920 | first = new; | |
921 | ||
922 | new->tx.cookie = 0; | |
923 | async_tx_ack(&new->tx); | |
924 | ||
925 | /* Insert the link descriptor to the LD ring */ | |
926 | list_add_tail(&new->node, &first->tx_list); | |
927 | ||
928 | /* | |
929 | * Prepare DMA descriptor to generate P, | |
930 | * if DMA_PREP_PQ_DISABLE_P flag is not set | |
931 | */ | |
932 | if (len) { | |
933 | xgene_dma_prep_xor_desc(chan, new, &dst[0], src, | |
934 | src_cnt, &len, multi); | |
935 | continue; | |
936 | } | |
937 | ||
938 | /* | |
939 | * Prepare DMA descriptor to generate Q, | |
940 | * if DMA_PREP_PQ_DISABLE_Q flag is not set | |
941 | */ | |
942 | if (_len) { | |
943 | xgene_dma_prep_xor_desc(chan, new, &dst[1], _src, | |
944 | src_cnt, &_len, scf); | |
945 | } | |
946 | } while (len || _len); | |
947 | ||
948 | new->tx.flags = flags; /* client is in control of this ack */ | |
949 | new->tx.cookie = -EBUSY; | |
950 | list_splice(&first->tx_list, &new->tx_list); | |
951 | ||
952 | return &new->tx; | |
953 | ||
954 | fail: | |
955 | if (!first) | |
956 | return NULL; | |
957 | ||
6d0767c1 | 958 | xgene_dma_free_desc_list(chan, &first->tx_list); |
9f2fd0df RPS |
959 | return NULL; |
960 | } | |
961 | ||
962 | static void xgene_dma_issue_pending(struct dma_chan *dchan) | |
963 | { | |
964 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
965 | ||
966 | spin_lock_bh(&chan->lock); | |
967 | xgene_chan_xfer_ld_pending(chan); | |
968 | spin_unlock_bh(&chan->lock); | |
969 | } | |
970 | ||
971 | static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan, | |
972 | dma_cookie_t cookie, | |
973 | struct dma_tx_state *txstate) | |
974 | { | |
975 | return dma_cookie_status(dchan, cookie, txstate); | |
976 | } | |
977 | ||
0e71d9b9 | 978 | static void xgene_dma_tasklet_cb(struct tasklet_struct *t) |
9f2fd0df | 979 | { |
0e71d9b9 | 980 | struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet); |
9f2fd0df | 981 | |
9f2fd0df RPS |
982 | /* Run all cleanup for descriptors which have been completed */ |
983 | xgene_dma_cleanup_descriptors(chan); | |
984 | ||
985 | /* Re-enable DMA channel IRQ */ | |
986 | enable_irq(chan->rx_irq); | |
9f2fd0df RPS |
987 | } |
988 | ||
989 | static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) | |
990 | { | |
991 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id; | |
992 | ||
993 | BUG_ON(!chan); | |
994 | ||
995 | /* | |
996 | * Disable DMA channel IRQ until we process completed | |
997 | * descriptors | |
998 | */ | |
999 | disable_irq_nosync(chan->rx_irq); | |
1000 | ||
1001 | /* | |
1002 | * Schedule the tasklet to handle all cleanup of the current | |
1003 | * transaction. It will start a new transaction if there is | |
1004 | * one pending. | |
1005 | */ | |
1006 | tasklet_schedule(&chan->tasklet); | |
1007 | ||
1008 | return IRQ_HANDLED; | |
1009 | } | |
1010 | ||
1011 | static irqreturn_t xgene_dma_err_isr(int irq, void *id) | |
1012 | { | |
1013 | struct xgene_dma *pdma = (struct xgene_dma *)id; | |
1014 | unsigned long int_mask; | |
1015 | u32 val, i; | |
1016 | ||
1017 | val = ioread32(pdma->csr_dma + XGENE_DMA_INT); | |
1018 | ||
1019 | /* Clear DMA interrupts */ | |
1020 | iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); | |
1021 | ||
1022 | /* Print DMA error info */ | |
1023 | int_mask = val >> XGENE_DMA_INT_MASK_SHIFT; | |
1024 | for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err)) | |
1025 | dev_err(pdma->dev, | |
1026 | "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]); | |
1027 | ||
1028 | return IRQ_HANDLED; | |
1029 | } | |
1030 | ||
1031 | static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring) | |
1032 | { | |
1033 | int i; | |
1034 | ||
1035 | iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); | |
1036 | ||
1037 | for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++) | |
1038 | iowrite32(ring->state[i], ring->pdma->csr_ring + | |
1039 | XGENE_DMA_RING_STATE_WR_BASE + (i * 4)); | |
1040 | } | |
1041 | ||
1042 | static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring) | |
1043 | { | |
1044 | memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); | |
1045 | xgene_dma_wr_ring_state(ring); | |
1046 | } | |
1047 | ||
1048 | static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) | |
1049 | { | |
1050 | void *ring_cfg = ring->state; | |
1051 | u64 addr = ring->desc_paddr; | |
9f2fd0df RPS |
1052 | u32 i, val; |
1053 | ||
1054 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; | |
1055 | ||
1056 | /* Clear DMA ring state */ | |
1057 | xgene_dma_clr_ring_state(ring); | |
1058 | ||
1059 | /* Set DMA ring type */ | |
1060 | XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR); | |
1061 | ||
1062 | if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { | |
1063 | /* Set recombination buffer and timeout */ | |
1064 | XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg); | |
1065 | XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg); | |
1066 | XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg); | |
1067 | } | |
1068 | ||
1069 | /* Initialize DMA ring state */ | |
1070 | XGENE_DMA_RING_SELTHRSH_SET(ring_cfg); | |
1071 | XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg); | |
1072 | XGENE_DMA_RING_COHERENT_SET(ring_cfg); | |
1073 | XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr); | |
1074 | XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr); | |
1075 | XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); | |
1076 | ||
1077 | /* Write DMA ring configurations */ | |
1078 | xgene_dma_wr_ring_state(ring); | |
1079 | ||
1080 | /* Set DMA ring id */ | |
1081 | iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), | |
1082 | ring->pdma->csr_ring + XGENE_DMA_RING_ID); | |
1083 | ||
1084 | /* Set DMA ring buffer */ | |
1085 | iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), | |
1086 | ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | |
1087 | ||
1088 | if (ring->owner != XGENE_DMA_RING_OWNER_CPU) | |
1089 | return; | |
1090 | ||
1091 | /* Set empty signature to DMA Rx ring descriptors */ | |
1092 | for (i = 0; i < ring->slots; i++) { | |
6d0767c1 RPS |
1093 | struct xgene_dma_desc_hw *desc; |
1094 | ||
9f2fd0df | 1095 | desc = &ring->desc_hw[i]; |
6d0767c1 | 1096 | desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
9f2fd0df RPS |
1097 | } |
1098 | ||
1099 | /* Enable DMA Rx ring interrupt */ | |
1100 | val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | |
1101 | XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); | |
1102 | iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | |
1103 | } | |
1104 | ||
1105 | static void xgene_dma_clear_ring(struct xgene_dma_ring *ring) | |
1106 | { | |
1107 | u32 ring_id, val; | |
1108 | ||
1109 | if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { | |
1110 | /* Disable DMA Rx ring interrupt */ | |
1111 | val = ioread32(ring->pdma->csr_ring + | |
1112 | XGENE_DMA_RING_NE_INT_MODE); | |
1113 | XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); | |
1114 | iowrite32(val, ring->pdma->csr_ring + | |
1115 | XGENE_DMA_RING_NE_INT_MODE); | |
1116 | } | |
1117 | ||
1118 | /* Clear DMA ring state */ | |
1119 | ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); | |
1120 | iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); | |
1121 | ||
1122 | iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | |
1123 | xgene_dma_clr_ring_state(ring); | |
1124 | } | |
1125 | ||
1126 | static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring) | |
1127 | { | |
1128 | ring->cmd_base = ring->pdma->csr_ring_cmd + | |
1129 | XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - | |
1130 | XGENE_DMA_RING_NUM)); | |
1131 | ||
1132 | ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; | |
1133 | } | |
1134 | ||
1135 | static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan, | |
1136 | enum xgene_dma_ring_cfgsize cfgsize) | |
1137 | { | |
1138 | int size; | |
1139 | ||
1140 | switch (cfgsize) { | |
1141 | case XGENE_DMA_RING_CFG_SIZE_512B: | |
1142 | size = 0x200; | |
1143 | break; | |
1144 | case XGENE_DMA_RING_CFG_SIZE_2KB: | |
1145 | size = 0x800; | |
1146 | break; | |
1147 | case XGENE_DMA_RING_CFG_SIZE_16KB: | |
1148 | size = 0x4000; | |
1149 | break; | |
1150 | case XGENE_DMA_RING_CFG_SIZE_64KB: | |
1151 | size = 0x10000; | |
1152 | break; | |
1153 | case XGENE_DMA_RING_CFG_SIZE_512KB: | |
1154 | size = 0x80000; | |
1155 | break; | |
1156 | default: | |
1157 | chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize); | |
1158 | return -EINVAL; | |
1159 | } | |
1160 | ||
1161 | return size; | |
1162 | } | |
1163 | ||
1164 | static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring) | |
1165 | { | |
1166 | /* Clear DMA ring configurations */ | |
1167 | xgene_dma_clear_ring(ring); | |
1168 | ||
1169 | /* De-allocate DMA ring descriptor */ | |
1170 | if (ring->desc_vaddr) { | |
1171 | dma_free_coherent(ring->pdma->dev, ring->size, | |
1172 | ring->desc_vaddr, ring->desc_paddr); | |
1173 | ring->desc_vaddr = NULL; | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan) | |
1178 | { | |
1179 | xgene_dma_delete_ring_one(&chan->rx_ring); | |
1180 | xgene_dma_delete_ring_one(&chan->tx_ring); | |
1181 | } | |
1182 | ||
1183 | static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |
1184 | struct xgene_dma_ring *ring, | |
1185 | enum xgene_dma_ring_cfgsize cfgsize) | |
1186 | { | |
c1492b4c AH |
1187 | int ret; |
1188 | ||
9f2fd0df RPS |
1189 | /* Setup DMA ring descriptor variables */ |
1190 | ring->pdma = chan->pdma; | |
1191 | ring->cfgsize = cfgsize; | |
1192 | ring->num = chan->pdma->ring_num++; | |
1193 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | |
1194 | ||
c1492b4c AH |
1195 | ret = xgene_dma_get_ring_size(chan, cfgsize); |
1196 | if (ret <= 0) | |
1197 | return ret; | |
1198 | ring->size = ret; | |
9f2fd0df RPS |
1199 | |
1200 | /* Allocate memory for DMA ring descriptor */ | |
750afb08 LC |
1201 | ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size, |
1202 | &ring->desc_paddr, GFP_KERNEL); | |
9f2fd0df RPS |
1203 | if (!ring->desc_vaddr) { |
1204 | chan_err(chan, "Failed to allocate ring desc\n"); | |
1205 | return -ENOMEM; | |
1206 | } | |
1207 | ||
1208 | /* Configure and enable DMA ring */ | |
1209 | xgene_dma_set_ring_cmd(ring); | |
1210 | xgene_dma_setup_ring(ring); | |
1211 | ||
1212 | return 0; | |
1213 | } | |
1214 | ||
1215 | static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | |
1216 | { | |
1217 | struct xgene_dma_ring *rx_ring = &chan->rx_ring; | |
1218 | struct xgene_dma_ring *tx_ring = &chan->tx_ring; | |
1219 | int ret; | |
1220 | ||
1221 | /* Create DMA Rx ring descriptor */ | |
1222 | rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; | |
1223 | rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; | |
1224 | ||
1225 | ret = xgene_dma_create_ring_one(chan, rx_ring, | |
1226 | XGENE_DMA_RING_CFG_SIZE_64KB); | |
1227 | if (ret) | |
1228 | return ret; | |
1229 | ||
1230 | chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n", | |
1231 | rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); | |
1232 | ||
1233 | /* Create DMA Tx ring descriptor */ | |
1234 | tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; | |
1235 | tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; | |
1236 | ||
1237 | ret = xgene_dma_create_ring_one(chan, tx_ring, | |
1238 | XGENE_DMA_RING_CFG_SIZE_64KB); | |
1239 | if (ret) { | |
1240 | xgene_dma_delete_ring_one(rx_ring); | |
1241 | return ret; | |
1242 | } | |
1243 | ||
1244 | tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); | |
1245 | ||
1246 | chan_dbg(chan, | |
1247 | "Tx ring id 0x%X num %d desc 0x%p\n", | |
1248 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | |
1249 | ||
1250 | /* Set the max outstanding request possible to this channel */ | |
ee08b59d | 1251 | chan->max_outstanding = tx_ring->slots; |
9f2fd0df RPS |
1252 | |
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | static int xgene_dma_init_rings(struct xgene_dma *pdma) | |
1257 | { | |
1258 | int ret, i, j; | |
1259 | ||
1260 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1261 | ret = xgene_dma_create_chan_rings(&pdma->chan[i]); | |
1262 | if (ret) { | |
1263 | for (j = 0; j < i; j++) | |
1264 | xgene_dma_delete_chan_rings(&pdma->chan[j]); | |
1265 | return ret; | |
1266 | } | |
1267 | } | |
1268 | ||
1269 | return ret; | |
1270 | } | |
1271 | ||
1272 | static void xgene_dma_enable(struct xgene_dma *pdma) | |
1273 | { | |
1274 | u32 val; | |
1275 | ||
1276 | /* Configure and enable DMA engine */ | |
1277 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | |
1278 | XGENE_DMA_CH_SETUP(val); | |
1279 | XGENE_DMA_ENABLE(val); | |
1280 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | |
1281 | } | |
1282 | ||
1283 | static void xgene_dma_disable(struct xgene_dma *pdma) | |
1284 | { | |
1285 | u32 val; | |
1286 | ||
1287 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | |
1288 | XGENE_DMA_DISABLE(val); | |
1289 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | |
1290 | } | |
1291 | ||
1292 | static void xgene_dma_mask_interrupts(struct xgene_dma *pdma) | |
1293 | { | |
1294 | /* | |
1295 | * Mask DMA ring overflow, underflow and | |
1296 | * AXI write/read error interrupts | |
1297 | */ | |
1298 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1299 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | |
1300 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1301 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | |
1302 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1303 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | |
1304 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1305 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | |
1306 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1307 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | |
1308 | ||
1309 | /* Mask DMA error interrupts */ | |
1310 | iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); | |
1311 | } | |
1312 | ||
1313 | static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma) | |
1314 | { | |
1315 | /* | |
1316 | * Unmask DMA ring overflow, underflow and | |
1317 | * AXI write/read error interrupts | |
1318 | */ | |
1319 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1320 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | |
1321 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1322 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | |
1323 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1324 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | |
1325 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1326 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | |
1327 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1328 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | |
1329 | ||
1330 | /* Unmask DMA error interrupts */ | |
1331 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1332 | pdma->csr_dma + XGENE_DMA_INT_MASK); | |
1333 | } | |
1334 | ||
1335 | static void xgene_dma_init_hw(struct xgene_dma *pdma) | |
1336 | { | |
1337 | u32 val; | |
1338 | ||
1339 | /* Associate DMA ring to corresponding ring HW */ | |
1340 | iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, | |
1341 | pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); | |
1342 | ||
1343 | /* Configure RAID6 polynomial control setting */ | |
1344 | if (is_pq_enabled(pdma)) | |
1345 | iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D), | |
1346 | pdma->csr_dma + XGENE_DMA_RAID6_CONT); | |
1347 | else | |
1348 | dev_info(pdma->dev, "PQ is disabled in HW\n"); | |
1349 | ||
1350 | xgene_dma_enable(pdma); | |
1351 | xgene_dma_unmask_interrupts(pdma); | |
1352 | ||
1353 | /* Get DMA id and version info */ | |
1354 | val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); | |
1355 | ||
1356 | /* DMA device info */ | |
1357 | dev_info(pdma->dev, | |
1358 | "X-Gene DMA v%d.%02d.%02d driver registered %d channels", | |
1359 | XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val), | |
1360 | XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL); | |
1361 | } | |
1362 | ||
a3f92e8e | 1363 | static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma) |
9f2fd0df RPS |
1364 | { |
1365 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && | |
1366 | (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) | |
1367 | return 0; | |
1368 | ||
1369 | iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); | |
1370 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); | |
1371 | ||
1372 | /* Bring up memory */ | |
1373 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | |
1374 | ||
1375 | /* Force a barrier */ | |
1376 | ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | |
1377 | ||
1378 | /* reset may take up to 1ms */ | |
1379 | usleep_range(1000, 1100); | |
1380 | ||
1381 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) | |
1382 | != XGENE_DMA_RING_BLK_MEM_RDY_VAL) { | |
1383 | dev_err(pdma->dev, | |
1384 | "Failed to release ring mngr memory from shutdown\n"); | |
1385 | return -ENODEV; | |
1386 | } | |
1387 | ||
1388 | /* program threshold set 1 and all hysteresis */ | |
1389 | iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL, | |
1390 | pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); | |
1391 | iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL, | |
1392 | pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); | |
1393 | iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL, | |
1394 | pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); | |
1395 | ||
1396 | /* Enable QPcore and assign error queue */ | |
1397 | iowrite32(XGENE_DMA_RING_ENABLE, | |
1398 | pdma->csr_ring + XGENE_DMA_RING_CONFIG); | |
1399 | ||
1400 | return 0; | |
1401 | } | |
1402 | ||
1403 | static int xgene_dma_init_mem(struct xgene_dma *pdma) | |
1404 | { | |
1405 | int ret; | |
1406 | ||
1407 | ret = xgene_dma_init_ring_mngr(pdma); | |
1408 | if (ret) | |
1409 | return ret; | |
1410 | ||
1411 | /* Bring up memory */ | |
1412 | iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | |
1413 | ||
1414 | /* Force a barrier */ | |
1415 | ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | |
1416 | ||
1417 | /* reset may take up to 1ms */ | |
1418 | usleep_range(1000, 1100); | |
1419 | ||
1420 | if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) | |
1421 | != XGENE_DMA_BLK_MEM_RDY_VAL) { | |
1422 | dev_err(pdma->dev, | |
1423 | "Failed to release DMA memory from shutdown\n"); | |
1424 | return -ENODEV; | |
1425 | } | |
1426 | ||
1427 | return 0; | |
1428 | } | |
1429 | ||
1430 | static int xgene_dma_request_irqs(struct xgene_dma *pdma) | |
1431 | { | |
1432 | struct xgene_dma_chan *chan; | |
1433 | int ret, i, j; | |
1434 | ||
1435 | /* Register DMA error irq */ | |
1436 | ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr, | |
1437 | 0, "dma_error", pdma); | |
1438 | if (ret) { | |
1439 | dev_err(pdma->dev, | |
1440 | "Failed to register error IRQ %d\n", pdma->err_irq); | |
1441 | return ret; | |
1442 | } | |
1443 | ||
1444 | /* Register DMA channel rx irq */ | |
1445 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1446 | chan = &pdma->chan[i]; | |
b0b79024 | 1447 | irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); |
9f2fd0df RPS |
1448 | ret = devm_request_irq(chan->dev, chan->rx_irq, |
1449 | xgene_dma_chan_ring_isr, | |
1450 | 0, chan->name, chan); | |
1451 | if (ret) { | |
1452 | chan_err(chan, "Failed to register Rx IRQ %d\n", | |
1453 | chan->rx_irq); | |
1454 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | |
1455 | ||
1456 | for (j = 0; j < i; j++) { | |
1457 | chan = &pdma->chan[i]; | |
b0b79024 | 1458 | irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); |
9f2fd0df RPS |
1459 | devm_free_irq(chan->dev, chan->rx_irq, chan); |
1460 | } | |
1461 | ||
1462 | return ret; | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | static void xgene_dma_free_irqs(struct xgene_dma *pdma) | |
1470 | { | |
1471 | struct xgene_dma_chan *chan; | |
1472 | int i; | |
1473 | ||
1474 | /* Free DMA device error irq */ | |
1475 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | |
1476 | ||
1477 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1478 | chan = &pdma->chan[i]; | |
b0b79024 | 1479 | irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); |
9f2fd0df RPS |
1480 | devm_free_irq(chan->dev, chan->rx_irq, chan); |
1481 | } | |
1482 | } | |
1483 | ||
1484 | static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |
1485 | struct dma_device *dma_dev) | |
1486 | { | |
1487 | /* Initialize DMA device capability mask */ | |
1488 | dma_cap_zero(dma_dev->cap_mask); | |
1489 | ||
1490 | /* Set DMA device capability */ | |
9f2fd0df RPS |
1491 | |
1492 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR | |
1493 | * and channel 1 supports XOR, PQ both. First thing here is we have | |
1494 | * mechanism in hw to enable/disable PQ/XOR supports on channel 1, | |
1495 | * we can make sure this by reading SoC Efuse register. | |
1496 | * Second thing, we have hw errata that if we run channel 0 and | |
1497 | * channel 1 simultaneously with executing XOR and PQ request, | |
1498 | * suddenly DMA engine hangs, So here we enable XOR on channel 0 only | |
1499 | * if XOR and PQ supports on channel 1 is disabled. | |
1500 | */ | |
1501 | if ((chan->id == XGENE_DMA_PQ_CHANNEL) && | |
1502 | is_pq_enabled(chan->pdma)) { | |
1503 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); | |
1504 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
1505 | } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && | |
1506 | !is_pq_enabled(chan->pdma)) { | |
1507 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
1508 | } | |
1509 | ||
1510 | /* Set base and prep routines */ | |
1511 | dma_dev->dev = chan->dev; | |
1512 | dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; | |
1513 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | |
1514 | dma_dev->device_issue_pending = xgene_dma_issue_pending; | |
1515 | dma_dev->device_tx_status = xgene_dma_tx_status; | |
9f2fd0df RPS |
1516 | |
1517 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
1518 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | |
1519 | dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; | |
77a68e56 | 1520 | dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES; |
9f2fd0df RPS |
1521 | } |
1522 | ||
1523 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | |
1524 | dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; | |
1525 | dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; | |
77a68e56 | 1526 | dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES; |
9f2fd0df RPS |
1527 | } |
1528 | } | |
1529 | ||
1530 | static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | |
1531 | { | |
1532 | struct xgene_dma_chan *chan = &pdma->chan[id]; | |
1533 | struct dma_device *dma_dev = &pdma->dma_dev[id]; | |
1534 | int ret; | |
1535 | ||
1536 | chan->dma_chan.device = dma_dev; | |
1537 | ||
1538 | spin_lock_init(&chan->lock); | |
1539 | INIT_LIST_HEAD(&chan->ld_pending); | |
1540 | INIT_LIST_HEAD(&chan->ld_running); | |
1541 | INIT_LIST_HEAD(&chan->ld_completed); | |
0e71d9b9 | 1542 | tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb); |
9f2fd0df RPS |
1543 | |
1544 | chan->pending = 0; | |
1545 | chan->desc_pool = NULL; | |
1546 | dma_cookie_init(&chan->dma_chan); | |
1547 | ||
1548 | /* Setup dma device capabilities and prep routines */ | |
1549 | xgene_dma_set_caps(chan, dma_dev); | |
1550 | ||
1551 | /* Initialize DMA device list head */ | |
1552 | INIT_LIST_HEAD(&dma_dev->channels); | |
1553 | list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels); | |
1554 | ||
1555 | /* Register with Linux async DMA framework*/ | |
1556 | ret = dma_async_device_register(dma_dev); | |
1557 | if (ret) { | |
1558 | chan_err(chan, "Failed to register async device %d", ret); | |
1559 | tasklet_kill(&chan->tasklet); | |
1560 | ||
1561 | return ret; | |
1562 | } | |
1563 | ||
1564 | /* DMA capability info */ | |
1565 | dev_info(pdma->dev, | |
c678fa66 | 1566 | "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan), |
9f2fd0df RPS |
1567 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", |
1568 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | |
1569 | ||
1570 | return 0; | |
1571 | } | |
1572 | ||
1573 | static int xgene_dma_init_async(struct xgene_dma *pdma) | |
1574 | { | |
1575 | int ret, i, j; | |
1576 | ||
1577 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) { | |
1578 | ret = xgene_dma_async_register(pdma, i); | |
1579 | if (ret) { | |
1580 | for (j = 0; j < i; j++) { | |
1581 | dma_async_device_unregister(&pdma->dma_dev[j]); | |
1582 | tasklet_kill(&pdma->chan[j].tasklet); | |
1583 | } | |
1584 | ||
1585 | return ret; | |
1586 | } | |
1587 | } | |
1588 | ||
1589 | return ret; | |
1590 | } | |
1591 | ||
1592 | static void xgene_dma_async_unregister(struct xgene_dma *pdma) | |
1593 | { | |
1594 | int i; | |
1595 | ||
1596 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | |
1597 | dma_async_device_unregister(&pdma->dma_dev[i]); | |
1598 | } | |
1599 | ||
1600 | static void xgene_dma_init_channels(struct xgene_dma *pdma) | |
1601 | { | |
1602 | struct xgene_dma_chan *chan; | |
1603 | int i; | |
1604 | ||
1605 | pdma->ring_num = XGENE_DMA_RING_NUM; | |
1606 | ||
1607 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1608 | chan = &pdma->chan[i]; | |
1609 | chan->dev = pdma->dev; | |
1610 | chan->pdma = pdma; | |
1611 | chan->id = i; | |
ed1f0418 | 1612 | snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id); |
9f2fd0df RPS |
1613 | } |
1614 | } | |
1615 | ||
1616 | static int xgene_dma_get_resources(struct platform_device *pdev, | |
1617 | struct xgene_dma *pdma) | |
1618 | { | |
1619 | struct resource *res; | |
1620 | int irq, i; | |
1621 | ||
1622 | /* Get DMA csr region */ | |
1623 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1624 | if (!res) { | |
1625 | dev_err(&pdev->dev, "Failed to get csr region\n"); | |
1626 | return -ENXIO; | |
1627 | } | |
1628 | ||
1629 | pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, | |
1630 | resource_size(res)); | |
9c361b1a | 1631 | if (!pdma->csr_dma) { |
9f2fd0df | 1632 | dev_err(&pdev->dev, "Failed to ioremap csr region"); |
9c361b1a | 1633 | return -ENOMEM; |
9f2fd0df RPS |
1634 | } |
1635 | ||
1636 | /* Get DMA ring csr region */ | |
1637 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1638 | if (!res) { | |
1639 | dev_err(&pdev->dev, "Failed to get ring csr region\n"); | |
1640 | return -ENXIO; | |
1641 | } | |
1642 | ||
1643 | pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, | |
1644 | resource_size(res)); | |
9c361b1a | 1645 | if (!pdma->csr_ring) { |
9f2fd0df | 1646 | dev_err(&pdev->dev, "Failed to ioremap ring csr region"); |
9c361b1a | 1647 | return -ENOMEM; |
9f2fd0df RPS |
1648 | } |
1649 | ||
1650 | /* Get DMA ring cmd csr region */ | |
1651 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | |
1652 | if (!res) { | |
1653 | dev_err(&pdev->dev, "Failed to get ring cmd csr region\n"); | |
1654 | return -ENXIO; | |
1655 | } | |
1656 | ||
1657 | pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, | |
1658 | resource_size(res)); | |
9c361b1a | 1659 | if (!pdma->csr_ring_cmd) { |
9f2fd0df | 1660 | dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region"); |
9c361b1a | 1661 | return -ENOMEM; |
9f2fd0df RPS |
1662 | } |
1663 | ||
cda8e937 RPS |
1664 | pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; |
1665 | ||
9f2fd0df RPS |
1666 | /* Get efuse csr region */ |
1667 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); | |
1668 | if (!res) { | |
1669 | dev_err(&pdev->dev, "Failed to get efuse csr region\n"); | |
1670 | return -ENXIO; | |
1671 | } | |
1672 | ||
1673 | pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, | |
1674 | resource_size(res)); | |
9c361b1a | 1675 | if (!pdma->csr_efuse) { |
9f2fd0df | 1676 | dev_err(&pdev->dev, "Failed to ioremap efuse csr region"); |
9c361b1a | 1677 | return -ENOMEM; |
9f2fd0df RPS |
1678 | } |
1679 | ||
1680 | /* Get DMA error interrupt */ | |
1681 | irq = platform_get_irq(pdev, 0); | |
e17be6e1 | 1682 | if (irq <= 0) |
9f2fd0df | 1683 | return -ENXIO; |
9f2fd0df RPS |
1684 | |
1685 | pdma->err_irq = irq; | |
1686 | ||
1687 | /* Get DMA Rx ring descriptor interrupts for all DMA channels */ | |
1688 | for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) { | |
1689 | irq = platform_get_irq(pdev, i); | |
e17be6e1 | 1690 | if (irq <= 0) |
9f2fd0df | 1691 | return -ENXIO; |
9f2fd0df RPS |
1692 | |
1693 | pdma->chan[i - 1].rx_irq = irq; | |
1694 | } | |
1695 | ||
1696 | return 0; | |
1697 | } | |
1698 | ||
1699 | static int xgene_dma_probe(struct platform_device *pdev) | |
1700 | { | |
1701 | struct xgene_dma *pdma; | |
1702 | int ret, i; | |
1703 | ||
1704 | pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL); | |
1705 | if (!pdma) | |
1706 | return -ENOMEM; | |
1707 | ||
1708 | pdma->dev = &pdev->dev; | |
1709 | platform_set_drvdata(pdev, pdma); | |
1710 | ||
1711 | ret = xgene_dma_get_resources(pdev, pdma); | |
1712 | if (ret) | |
1713 | return ret; | |
1714 | ||
1715 | pdma->clk = devm_clk_get(&pdev->dev, NULL); | |
89079493 | 1716 | if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) { |
9f2fd0df RPS |
1717 | dev_err(&pdev->dev, "Failed to get clk\n"); |
1718 | return PTR_ERR(pdma->clk); | |
1719 | } | |
1720 | ||
1721 | /* Enable clk before accessing registers */ | |
89079493 RPS |
1722 | if (!IS_ERR(pdma->clk)) { |
1723 | ret = clk_prepare_enable(pdma->clk); | |
1724 | if (ret) { | |
1725 | dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); | |
1726 | return ret; | |
1727 | } | |
9f2fd0df RPS |
1728 | } |
1729 | ||
1730 | /* Remove DMA RAM out of shutdown */ | |
1731 | ret = xgene_dma_init_mem(pdma); | |
1732 | if (ret) | |
1733 | goto err_clk_enable; | |
1734 | ||
1735 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42)); | |
1736 | if (ret) { | |
1737 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | |
1738 | goto err_dma_mask; | |
1739 | } | |
1740 | ||
1741 | /* Initialize DMA channels software state */ | |
1742 | xgene_dma_init_channels(pdma); | |
1743 | ||
1744 | /* Configue DMA rings */ | |
1745 | ret = xgene_dma_init_rings(pdma); | |
1746 | if (ret) | |
1747 | goto err_clk_enable; | |
1748 | ||
1749 | ret = xgene_dma_request_irqs(pdma); | |
1750 | if (ret) | |
1751 | goto err_request_irq; | |
1752 | ||
1753 | /* Configure and enable DMA engine */ | |
1754 | xgene_dma_init_hw(pdma); | |
1755 | ||
1756 | /* Register DMA device with linux async framework */ | |
1757 | ret = xgene_dma_init_async(pdma); | |
1758 | if (ret) | |
1759 | goto err_async_init; | |
1760 | ||
1761 | return 0; | |
1762 | ||
1763 | err_async_init: | |
1764 | xgene_dma_free_irqs(pdma); | |
1765 | ||
1766 | err_request_irq: | |
1767 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | |
1768 | xgene_dma_delete_chan_rings(&pdma->chan[i]); | |
1769 | ||
1770 | err_dma_mask: | |
1771 | err_clk_enable: | |
89079493 RPS |
1772 | if (!IS_ERR(pdma->clk)) |
1773 | clk_disable_unprepare(pdma->clk); | |
9f2fd0df RPS |
1774 | |
1775 | return ret; | |
1776 | } | |
1777 | ||
1778 | static int xgene_dma_remove(struct platform_device *pdev) | |
1779 | { | |
1780 | struct xgene_dma *pdma = platform_get_drvdata(pdev); | |
1781 | struct xgene_dma_chan *chan; | |
1782 | int i; | |
1783 | ||
1784 | xgene_dma_async_unregister(pdma); | |
1785 | ||
1786 | /* Mask interrupts and disable DMA engine */ | |
1787 | xgene_dma_mask_interrupts(pdma); | |
1788 | xgene_dma_disable(pdma); | |
1789 | xgene_dma_free_irqs(pdma); | |
1790 | ||
1791 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1792 | chan = &pdma->chan[i]; | |
1793 | tasklet_kill(&chan->tasklet); | |
1794 | xgene_dma_delete_chan_rings(chan); | |
1795 | } | |
1796 | ||
89079493 RPS |
1797 | if (!IS_ERR(pdma->clk)) |
1798 | clk_disable_unprepare(pdma->clk); | |
9f2fd0df RPS |
1799 | |
1800 | return 0; | |
1801 | } | |
1802 | ||
89079493 RPS |
1803 | #ifdef CONFIG_ACPI |
1804 | static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = { | |
1805 | {"APMC0D43", 0}, | |
1806 | {}, | |
1807 | }; | |
1808 | MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr); | |
1809 | #endif | |
1810 | ||
9f2fd0df RPS |
1811 | static const struct of_device_id xgene_dma_of_match_ptr[] = { |
1812 | {.compatible = "apm,xgene-storm-dma",}, | |
1813 | {}, | |
1814 | }; | |
1815 | MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); | |
1816 | ||
1817 | static struct platform_driver xgene_dma_driver = { | |
1818 | .probe = xgene_dma_probe, | |
1819 | .remove = xgene_dma_remove, | |
1820 | .driver = { | |
1821 | .name = "X-Gene-DMA", | |
9f2fd0df | 1822 | .of_match_table = xgene_dma_of_match_ptr, |
89079493 | 1823 | .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr), |
9f2fd0df RPS |
1824 | }, |
1825 | }; | |
1826 | ||
1827 | module_platform_driver(xgene_dma_driver); | |
1828 | ||
1829 | MODULE_DESCRIPTION("APM X-Gene SoC DMA driver"); | |
1830 | MODULE_AUTHOR("Rameshwar Prasad Sahu <[email protected]>"); | |
1831 | MODULE_AUTHOR("Loc Ho <[email protected]>"); | |
1832 | MODULE_LICENSE("GPL"); | |
1833 | MODULE_VERSION("1.0"); |