]>
Commit | Line | Data |
---|---|---|
04000dc6 GM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2017-2018, The Linux foundation. All rights reserved. | |
3 | ||
4 | #include <linux/clk.h> | |
b5762d95 VKN |
5 | #include <linux/dmapool.h> |
6 | #include <linux/dma-mapping.h> | |
cff80645 | 7 | #include <linux/interconnect.h> |
04000dc6 GM |
8 | #include <linux/interrupt.h> |
9 | #include <linux/io.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/of.h> | |
749396cb | 12 | #include <linux/platform_device.h> |
0098c527 | 13 | #include <linux/pinctrl/consumer.h> |
04000dc6 | 14 | #include <linux/pm_runtime.h> |
f79a158d | 15 | #include <linux/pm_opp.h> |
04000dc6 GM |
16 | #include <linux/spi/spi.h> |
17 | #include <linux/spi/spi-mem.h> | |
18 | ||
19 | ||
20 | #define QSPI_NUM_CS 2 | |
21 | #define QSPI_BYTES_PER_WORD 4 | |
22 | ||
23 | #define MSTR_CONFIG 0x0000 | |
24 | #define FULL_CYCLE_MODE BIT(3) | |
25 | #define FB_CLK_EN BIT(4) | |
26 | #define PIN_HOLDN BIT(6) | |
27 | #define PIN_WPN BIT(7) | |
28 | #define DMA_ENABLE BIT(8) | |
29 | #define BIG_ENDIAN_MODE BIT(9) | |
30 | #define SPI_MODE_MSK 0xc00 | |
31 | #define SPI_MODE_SHFT 10 | |
32 | #define CHIP_SELECT_NUM BIT(12) | |
33 | #define SBL_EN BIT(13) | |
34 | #define LPA_BASE_MSK 0x3c000 | |
35 | #define LPA_BASE_SHFT 14 | |
36 | #define TX_DATA_DELAY_MSK 0xc0000 | |
37 | #define TX_DATA_DELAY_SHFT 18 | |
38 | #define TX_CLK_DELAY_MSK 0x300000 | |
39 | #define TX_CLK_DELAY_SHFT 20 | |
40 | #define TX_CS_N_DELAY_MSK 0xc00000 | |
41 | #define TX_CS_N_DELAY_SHFT 22 | |
42 | #define TX_DATA_OE_DELAY_MSK 0x3000000 | |
43 | #define TX_DATA_OE_DELAY_SHFT 24 | |
44 | ||
45 | #define AHB_MASTER_CFG 0x0004 | |
46 | #define HMEM_TYPE_START_MID_TRANS_MSK 0x7 | |
47 | #define HMEM_TYPE_START_MID_TRANS_SHFT 0 | |
48 | #define HMEM_TYPE_LAST_TRANS_MSK 0x38 | |
49 | #define HMEM_TYPE_LAST_TRANS_SHFT 3 | |
50 | #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0 | |
51 | #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6 | |
52 | #define HMEMTYPE_READ_TRANS_MSK 0x700 | |
53 | #define HMEMTYPE_READ_TRANS_SHFT 8 | |
54 | #define HSHARED BIT(11) | |
55 | #define HINNERSHARED BIT(12) | |
56 | ||
57 | #define MSTR_INT_EN 0x000C | |
58 | #define MSTR_INT_STATUS 0x0010 | |
59 | #define RESP_FIFO_UNDERRUN BIT(0) | |
60 | #define RESP_FIFO_NOT_EMPTY BIT(1) | |
61 | #define RESP_FIFO_RDY BIT(2) | |
62 | #define HRESP_FROM_NOC_ERR BIT(3) | |
63 | #define WR_FIFO_EMPTY BIT(9) | |
64 | #define WR_FIFO_FULL BIT(10) | |
65 | #define WR_FIFO_OVERRUN BIT(11) | |
66 | #define TRANSACTION_DONE BIT(16) | |
b5762d95 | 67 | #define DMA_CHAIN_DONE BIT(31) |
04000dc6 GM |
68 | #define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \ |
69 | WR_FIFO_OVERRUN) | |
70 | #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \ | |
71 | WR_FIFO_EMPTY | WR_FIFO_FULL | \ | |
916a4edf | 72 | TRANSACTION_DONE | DMA_CHAIN_DONE) |
04000dc6 GM |
73 | |
74 | #define PIO_XFER_CTRL 0x0014 | |
75 | #define REQUEST_COUNT_MSK 0xffff | |
76 | ||
77 | #define PIO_XFER_CFG 0x0018 | |
78 | #define TRANSFER_DIRECTION BIT(0) | |
79 | #define MULTI_IO_MODE_MSK 0xe | |
80 | #define MULTI_IO_MODE_SHFT 1 | |
81 | #define TRANSFER_FRAGMENT BIT(8) | |
82 | #define SDR_1BIT 1 | |
83 | #define SDR_2BIT 2 | |
84 | #define SDR_4BIT 3 | |
85 | #define DDR_1BIT 5 | |
86 | #define DDR_2BIT 6 | |
87 | #define DDR_4BIT 7 | |
88 | #define DMA_DESC_SINGLE_SPI 1 | |
89 | #define DMA_DESC_DUAL_SPI 2 | |
90 | #define DMA_DESC_QUAD_SPI 3 | |
91 | ||
92 | #define PIO_XFER_STATUS 0x001c | |
93 | #define WR_FIFO_BYTES_MSK 0xffff0000 | |
94 | #define WR_FIFO_BYTES_SHFT 16 | |
95 | ||
96 | #define PIO_DATAOUT_1B 0x0020 | |
97 | #define PIO_DATAOUT_4B 0x0024 | |
98 | ||
478652f3 RC |
99 | #define RD_FIFO_CFG 0x0028 |
100 | #define CONTINUOUS_MODE BIT(0) | |
101 | ||
04000dc6 GM |
102 | #define RD_FIFO_STATUS 0x002c |
103 | #define FIFO_EMPTY BIT(11) | |
104 | #define WR_CNTS_MSK 0x7f0 | |
105 | #define WR_CNTS_SHFT 4 | |
106 | #define RDY_64BYTE BIT(3) | |
107 | #define RDY_32BYTE BIT(2) | |
108 | #define RDY_16BYTE BIT(1) | |
109 | #define FIFO_RDY BIT(0) | |
110 | ||
04000dc6 GM |
111 | #define RD_FIFO_RESET 0x0030 |
112 | #define RESET_FIFO BIT(0) | |
113 | ||
b5762d95 VKN |
114 | #define NEXT_DMA_DESC_ADDR 0x0040 |
115 | #define CURRENT_DMA_DESC_ADDR 0x0044 | |
116 | #define CURRENT_MEM_ADDR 0x0048 | |
117 | ||
04000dc6 GM |
118 | #define CUR_MEM_ADDR 0x0048 |
119 | #define HW_VERSION 0x004c | |
120 | #define RD_FIFO 0x0050 | |
121 | #define SAMPLING_CLK_CFG 0x0090 | |
122 | #define SAMPLING_CLK_STATUS 0x0094 | |
123 | ||
b5762d95 | 124 | #define QSPI_ALIGN_REQ 32 |
04000dc6 GM |
125 | |
126 | enum qspi_dir { | |
127 | QSPI_READ, | |
128 | QSPI_WRITE, | |
129 | }; | |
130 | ||
b5762d95 VKN |
131 | struct qspi_cmd_desc { |
132 | u32 data_address; | |
133 | u32 next_descriptor; | |
134 | u32 direction:1; | |
135 | u32 multi_io_mode:3; | |
136 | u32 reserved1:4; | |
137 | u32 fragment:1; | |
138 | u32 reserved2:7; | |
139 | u32 length:16; | |
140 | }; | |
141 | ||
04000dc6 GM |
142 | struct qspi_xfer { |
143 | union { | |
144 | const void *tx_buf; | |
145 | void *rx_buf; | |
146 | }; | |
147 | unsigned int rem_bytes; | |
148 | unsigned int buswidth; | |
149 | enum qspi_dir dir; | |
150 | bool is_last; | |
151 | }; | |
152 | ||
153 | enum qspi_clocks { | |
154 | QSPI_CLK_CORE, | |
155 | QSPI_CLK_IFACE, | |
156 | QSPI_NUM_CLKS | |
157 | }; | |
158 | ||
b5762d95 VKN |
159 | /* |
160 | * Number of entries in sgt returned from spi framework that- | |
161 | * will be supported. Can be modified as required. | |
162 | * In practice, given max_dma_len is 64KB, the number of | |
163 | * entries is not expected to exceed 1. | |
164 | */ | |
165 | #define QSPI_MAX_SG 5 | |
166 | ||
04000dc6 GM |
167 | struct qcom_qspi { |
168 | void __iomem *base; | |
169 | struct device *dev; | |
b8d40d77 | 170 | struct clk_bulk_data *clks; |
04000dc6 | 171 | struct qspi_xfer xfer; |
b5762d95 VKN |
172 | struct dma_pool *dma_cmd_pool; |
173 | dma_addr_t dma_cmd_desc[QSPI_MAX_SG]; | |
174 | void *virt_cmd_desc[QSPI_MAX_SG]; | |
175 | unsigned int n_cmd_desc; | |
cff80645 | 176 | struct icc_path *icc_path_cpu_to_qspi; |
21243314 | 177 | unsigned long last_speed; |
cff80645 | 178 | /* Lock to protect data accessed by IRQs */ |
04000dc6 GM |
179 | spinlock_t lock; |
180 | }; | |
181 | ||
182 | static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl, | |
183 | unsigned int buswidth) | |
184 | { | |
185 | switch (buswidth) { | |
186 | case 1: | |
b5762d95 | 187 | return SDR_1BIT; |
04000dc6 | 188 | case 2: |
b5762d95 | 189 | return SDR_2BIT; |
04000dc6 | 190 | case 4: |
b5762d95 | 191 | return SDR_4BIT; |
04000dc6 GM |
192 | default: |
193 | dev_warn_once(ctrl->dev, | |
194 | "Unexpected bus width: %u\n", buswidth); | |
b5762d95 | 195 | return SDR_1BIT; |
04000dc6 GM |
196 | } |
197 | } | |
198 | ||
199 | static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl) | |
200 | { | |
201 | u32 pio_xfer_cfg; | |
b5762d95 | 202 | u32 iomode; |
04000dc6 GM |
203 | const struct qspi_xfer *xfer; |
204 | ||
205 | xfer = &ctrl->xfer; | |
206 | pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG); | |
207 | pio_xfer_cfg &= ~TRANSFER_DIRECTION; | |
208 | pio_xfer_cfg |= xfer->dir; | |
209 | if (xfer->is_last) | |
210 | pio_xfer_cfg &= ~TRANSFER_FRAGMENT; | |
211 | else | |
212 | pio_xfer_cfg |= TRANSFER_FRAGMENT; | |
213 | pio_xfer_cfg &= ~MULTI_IO_MODE_MSK; | |
b5762d95 VKN |
214 | iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth); |
215 | pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT; | |
04000dc6 GM |
216 | |
217 | writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG); | |
218 | } | |
219 | ||
220 | static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl) | |
221 | { | |
222 | u32 pio_xfer_ctrl; | |
223 | ||
224 | pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL); | |
225 | pio_xfer_ctrl &= ~REQUEST_COUNT_MSK; | |
226 | pio_xfer_ctrl |= ctrl->xfer.rem_bytes; | |
227 | writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL); | |
228 | } | |
229 | ||
230 | static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl) | |
231 | { | |
232 | u32 ints; | |
233 | ||
234 | qcom_qspi_pio_xfer_cfg(ctrl); | |
235 | ||
236 | /* Ack any previous interrupts that might be hanging around */ | |
237 | writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS); | |
238 | ||
239 | /* Setup new interrupts */ | |
240 | if (ctrl->xfer.dir == QSPI_WRITE) | |
241 | ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY; | |
242 | else | |
243 | ints = QSPI_ERR_IRQS | RESP_FIFO_RDY; | |
244 | writel(ints, ctrl->base + MSTR_INT_EN); | |
245 | ||
246 | /* Kick off the transfer */ | |
247 | qcom_qspi_pio_xfer_ctrl(ctrl); | |
248 | } | |
249 | ||
8d3ad99a | 250 | static void qcom_qspi_handle_err(struct spi_controller *host, |
04000dc6 GM |
251 | struct spi_message *msg) |
252 | { | |
b5762d95 | 253 | u32 int_status; |
8d3ad99a | 254 | struct qcom_qspi *ctrl = spi_controller_get_devdata(host); |
04000dc6 | 255 | unsigned long flags; |
b5762d95 | 256 | int i; |
04000dc6 GM |
257 | |
258 | spin_lock_irqsave(&ctrl->lock, flags); | |
259 | writel(0, ctrl->base + MSTR_INT_EN); | |
b5762d95 VKN |
260 | int_status = readl(ctrl->base + MSTR_INT_STATUS); |
261 | writel(int_status, ctrl->base + MSTR_INT_STATUS); | |
04000dc6 | 262 | ctrl->xfer.rem_bytes = 0; |
b5762d95 VKN |
263 | |
264 | /* free cmd descriptors if they are around (DMA mode) */ | |
265 | for (i = 0; i < ctrl->n_cmd_desc; i++) | |
266 | dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], | |
267 | ctrl->dma_cmd_desc[i]); | |
268 | ctrl->n_cmd_desc = 0; | |
04000dc6 GM |
269 | spin_unlock_irqrestore(&ctrl->lock, flags); |
270 | } | |
271 | ||
21243314 | 272 | static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz) |
04000dc6 | 273 | { |
04000dc6 | 274 | int ret; |
cff80645 | 275 | unsigned int avg_bw_cpu; |
04000dc6 | 276 | |
21243314 DA |
277 | if (speed_hz == ctrl->last_speed) |
278 | return 0; | |
04000dc6 GM |
279 | |
280 | /* In regular operation (SBL_EN=1) core must be 4x transfer clock */ | |
f79a158d | 281 | ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4); |
04000dc6 GM |
282 | if (ret) { |
283 | dev_err(ctrl->dev, "Failed to set core clk %d\n", ret); | |
284 | return ret; | |
285 | } | |
286 | ||
cff80645 | 287 | /* |
b5762d95 | 288 | * Set BW quota for CPU. |
cff80645 AA |
289 | * We don't have explicit peak requirement so keep it equal to avg_bw. |
290 | */ | |
291 | avg_bw_cpu = Bps_to_icc(speed_hz); | |
292 | ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu); | |
293 | if (ret) { | |
294 | dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n", | |
295 | __func__, ret); | |
296 | return ret; | |
297 | } | |
298 | ||
21243314 DA |
299 | ctrl->last_speed = speed_hz; |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
b5762d95 VKN |
304 | static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr, |
305 | uint32_t n_bytes) | |
306 | { | |
307 | struct qspi_cmd_desc *virt_cmd_desc, *prev; | |
308 | dma_addr_t dma_cmd_desc; | |
309 | ||
310 | /* allocate for dma cmd descriptor */ | |
f7ba36d3 VKN |
311 | virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_ATOMIC | __GFP_ZERO, &dma_cmd_desc); |
312 | if (!virt_cmd_desc) { | |
313 | dev_warn_once(ctrl->dev, "Couldn't find memory for descriptor\n"); | |
314 | return -EAGAIN; | |
315 | } | |
b5762d95 VKN |
316 | |
317 | ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc; | |
318 | ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc; | |
319 | ctrl->n_cmd_desc++; | |
320 | ||
321 | /* setup cmd descriptor */ | |
322 | virt_cmd_desc->data_address = dma_ptr; | |
323 | virt_cmd_desc->direction = ctrl->xfer.dir; | |
324 | virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth); | |
325 | virt_cmd_desc->fragment = !ctrl->xfer.is_last; | |
326 | virt_cmd_desc->length = n_bytes; | |
327 | ||
328 | /* update previous descriptor */ | |
329 | if (ctrl->n_cmd_desc >= 2) { | |
330 | prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2]; | |
331 | prev->next_descriptor = dma_cmd_desc; | |
332 | prev->fragment = 1; | |
333 | } | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
338 | static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl, | |
339 | struct spi_transfer *xfer) | |
340 | { | |
341 | int ret; | |
342 | struct sg_table *sgt; | |
343 | dma_addr_t dma_ptr_sg; | |
344 | unsigned int dma_len_sg; | |
345 | int i; | |
346 | ||
347 | if (ctrl->n_cmd_desc) { | |
348 | dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc); | |
349 | return -EIO; | |
350 | } | |
351 | ||
352 | sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg; | |
353 | if (!sgt->nents || sgt->nents > QSPI_MAX_SG) { | |
354 | dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents); | |
355 | return -EAGAIN; | |
356 | } | |
357 | ||
358 | for (i = 0; i < sgt->nents; i++) { | |
359 | dma_ptr_sg = sg_dma_address(sgt->sgl + i); | |
138d73b6 | 360 | dma_len_sg = sg_dma_len(sgt->sgl + i); |
b5762d95 VKN |
361 | if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) { |
362 | dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ); | |
363 | return -EAGAIN; | |
364 | } | |
138d73b6 DA |
365 | /* |
366 | * When reading with DMA the controller writes to memory 1 word | |
367 | * at a time. If the length isn't a multiple of 4 bytes then | |
368 | * the controller can clobber the things later in memory. | |
369 | * Fallback to PIO to be safe. | |
370 | */ | |
371 | if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) { | |
372 | dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n", | |
373 | dma_len_sg); | |
374 | return -EAGAIN; | |
375 | } | |
b5762d95 VKN |
376 | } |
377 | ||
378 | for (i = 0; i < sgt->nents; i++) { | |
379 | dma_ptr_sg = sg_dma_address(sgt->sgl + i); | |
380 | dma_len_sg = sg_dma_len(sgt->sgl + i); | |
381 | ||
382 | ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg); | |
383 | if (ret) | |
384 | goto cleanup; | |
385 | } | |
386 | return 0; | |
387 | ||
388 | cleanup: | |
389 | for (i = 0; i < ctrl->n_cmd_desc; i++) | |
390 | dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], | |
391 | ctrl->dma_cmd_desc[i]); | |
392 | ctrl->n_cmd_desc = 0; | |
393 | return ret; | |
394 | } | |
395 | ||
396 | static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl) | |
397 | { | |
398 | /* Setup new interrupts */ | |
399 | writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN); | |
400 | ||
401 | /* kick off transfer */ | |
402 | writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR); | |
403 | } | |
404 | ||
405 | /* Switch to DMA if transfer length exceeds this */ | |
406 | #define QSPI_MAX_BYTES_FIFO 64 | |
407 | ||
408 | static bool qcom_qspi_can_dma(struct spi_controller *ctlr, | |
409 | struct spi_device *slv, struct spi_transfer *xfer) | |
410 | { | |
411 | return xfer->len > QSPI_MAX_BYTES_FIFO; | |
412 | } | |
413 | ||
8d3ad99a | 414 | static int qcom_qspi_transfer_one(struct spi_controller *host, |
21243314 DA |
415 | struct spi_device *slv, |
416 | struct spi_transfer *xfer) | |
417 | { | |
8d3ad99a | 418 | struct qcom_qspi *ctrl = spi_controller_get_devdata(host); |
21243314 DA |
419 | int ret; |
420 | unsigned long speed_hz; | |
421 | unsigned long flags; | |
b5762d95 | 422 | u32 mstr_cfg; |
21243314 DA |
423 | |
424 | speed_hz = slv->max_speed_hz; | |
425 | if (xfer->speed_hz) | |
426 | speed_hz = xfer->speed_hz; | |
427 | ||
428 | ret = qcom_qspi_set_speed(ctrl, speed_hz); | |
429 | if (ret) | |
430 | return ret; | |
431 | ||
04000dc6 | 432 | spin_lock_irqsave(&ctrl->lock, flags); |
b5762d95 | 433 | mstr_cfg = readl(ctrl->base + MSTR_CONFIG); |
04000dc6 GM |
434 | |
435 | /* We are half duplex, so either rx or tx will be set */ | |
436 | if (xfer->rx_buf) { | |
437 | ctrl->xfer.dir = QSPI_READ; | |
438 | ctrl->xfer.buswidth = xfer->rx_nbits; | |
439 | ctrl->xfer.rx_buf = xfer->rx_buf; | |
440 | } else { | |
441 | ctrl->xfer.dir = QSPI_WRITE; | |
442 | ctrl->xfer.buswidth = xfer->tx_nbits; | |
443 | ctrl->xfer.tx_buf = xfer->tx_buf; | |
444 | } | |
445 | ctrl->xfer.is_last = list_is_last(&xfer->transfer_list, | |
8d3ad99a | 446 | &host->cur_msg->transfers); |
04000dc6 | 447 | ctrl->xfer.rem_bytes = xfer->len; |
b5762d95 VKN |
448 | |
449 | if (xfer->rx_sg.nents || xfer->tx_sg.nents) { | |
450 | /* do DMA transfer */ | |
451 | if (!(mstr_cfg & DMA_ENABLE)) { | |
452 | mstr_cfg |= DMA_ENABLE; | |
453 | writel(mstr_cfg, ctrl->base + MSTR_CONFIG); | |
454 | } | |
455 | ||
456 | ret = qcom_qspi_setup_dma_desc(ctrl, xfer); | |
457 | if (ret != -EAGAIN) { | |
cfb81f22 VKN |
458 | if (!ret) { |
459 | dma_wmb(); | |
b5762d95 | 460 | qcom_qspi_dma_xfer(ctrl); |
cfb81f22 | 461 | } |
b5762d95 VKN |
462 | goto exit; |
463 | } | |
55c33e5e | 464 | dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n"); |
b5762d95 VKN |
465 | ret = 0; /* We'll retry w/ PIO */ |
466 | } | |
467 | ||
468 | if (mstr_cfg & DMA_ENABLE) { | |
469 | mstr_cfg &= ~DMA_ENABLE; | |
470 | writel(mstr_cfg, ctrl->base + MSTR_CONFIG); | |
471 | } | |
04000dc6 GM |
472 | qcom_qspi_pio_xfer(ctrl); |
473 | ||
b5762d95 | 474 | exit: |
04000dc6 GM |
475 | spin_unlock_irqrestore(&ctrl->lock, flags); |
476 | ||
b5762d95 VKN |
477 | if (ret) |
478 | return ret; | |
479 | ||
04000dc6 GM |
480 | /* We'll call spi_finalize_current_transfer() when done */ |
481 | return 1; | |
482 | } | |
483 | ||
8d3ad99a | 484 | static int qcom_qspi_prepare_message(struct spi_controller *host, |
04000dc6 GM |
485 | struct spi_message *message) |
486 | { | |
487 | u32 mstr_cfg; | |
488 | struct qcom_qspi *ctrl; | |
489 | int tx_data_oe_delay = 1; | |
490 | int tx_data_delay = 1; | |
491 | unsigned long flags; | |
492 | ||
8d3ad99a | 493 | ctrl = spi_controller_get_devdata(host); |
04000dc6 GM |
494 | spin_lock_irqsave(&ctrl->lock, flags); |
495 | ||
496 | mstr_cfg = readl(ctrl->base + MSTR_CONFIG); | |
497 | mstr_cfg &= ~CHIP_SELECT_NUM; | |
9e264f3f | 498 | if (spi_get_chipselect(message->spi, 0)) |
04000dc6 GM |
499 | mstr_cfg |= CHIP_SELECT_NUM; |
500 | ||
501 | mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE; | |
502 | mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK); | |
503 | mstr_cfg |= message->spi->mode << SPI_MODE_SHFT; | |
504 | mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT; | |
505 | mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT; | |
506 | mstr_cfg &= ~DMA_ENABLE; | |
507 | ||
508 | writel(mstr_cfg, ctrl->base + MSTR_CONFIG); | |
509 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
510 | ||
511 | return 0; | |
512 | } | |
513 | ||
b5762d95 VKN |
514 | static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl) |
515 | { | |
516 | ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool", | |
517 | ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0); | |
518 | if (!ctrl->dma_cmd_pool) | |
519 | return -ENOMEM; | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
04000dc6 GM |
524 | static irqreturn_t pio_read(struct qcom_qspi *ctrl) |
525 | { | |
526 | u32 rd_fifo_status; | |
527 | u32 rd_fifo; | |
528 | unsigned int wr_cnts; | |
529 | unsigned int bytes_to_read; | |
530 | unsigned int words_to_read; | |
531 | u32 *word_buf; | |
532 | u8 *byte_buf; | |
533 | int i; | |
534 | ||
535 | rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS); | |
536 | ||
537 | if (!(rd_fifo_status & FIFO_RDY)) { | |
538 | dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status); | |
539 | return IRQ_NONE; | |
540 | } | |
541 | ||
542 | wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT; | |
543 | wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes); | |
544 | ||
545 | words_to_read = wr_cnts / QSPI_BYTES_PER_WORD; | |
546 | bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD; | |
547 | ||
548 | if (words_to_read) { | |
549 | word_buf = ctrl->xfer.rx_buf; | |
550 | ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD; | |
551 | ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read); | |
552 | ctrl->xfer.rx_buf = word_buf + words_to_read; | |
553 | } | |
554 | ||
555 | if (bytes_to_read) { | |
556 | byte_buf = ctrl->xfer.rx_buf; | |
557 | rd_fifo = readl(ctrl->base + RD_FIFO); | |
558 | ctrl->xfer.rem_bytes -= bytes_to_read; | |
559 | for (i = 0; i < bytes_to_read; i++) | |
560 | *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE); | |
561 | ctrl->xfer.rx_buf = byte_buf; | |
562 | } | |
563 | ||
564 | return IRQ_HANDLED; | |
565 | } | |
566 | ||
567 | static irqreturn_t pio_write(struct qcom_qspi *ctrl) | |
568 | { | |
569 | const void *xfer_buf = ctrl->xfer.tx_buf; | |
570 | const int *word_buf; | |
571 | const char *byte_buf; | |
572 | unsigned int wr_fifo_bytes; | |
573 | unsigned int wr_fifo_words; | |
574 | unsigned int wr_size; | |
575 | unsigned int rem_words; | |
576 | ||
577 | wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS); | |
578 | wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT; | |
579 | ||
580 | if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) { | |
581 | /* Process the last 1-3 bytes */ | |
582 | wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes); | |
583 | ctrl->xfer.rem_bytes -= wr_size; | |
584 | ||
585 | byte_buf = xfer_buf; | |
586 | while (wr_size--) | |
587 | writel(*byte_buf++, | |
588 | ctrl->base + PIO_DATAOUT_1B); | |
589 | ctrl->xfer.tx_buf = byte_buf; | |
590 | } else { | |
591 | /* | |
592 | * Process all the whole words; to keep things simple we'll | |
593 | * just wait for the next interrupt to handle the last 1-3 | |
594 | * bytes if we don't have an even number of words. | |
595 | */ | |
596 | rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD; | |
597 | wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD; | |
598 | ||
599 | wr_size = min(rem_words, wr_fifo_words); | |
600 | ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD; | |
601 | ||
602 | word_buf = xfer_buf; | |
603 | iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size); | |
604 | ctrl->xfer.tx_buf = word_buf + wr_size; | |
605 | ||
606 | } | |
607 | ||
608 | return IRQ_HANDLED; | |
609 | } | |
610 | ||
611 | static irqreturn_t qcom_qspi_irq(int irq, void *dev_id) | |
612 | { | |
613 | u32 int_status; | |
614 | struct qcom_qspi *ctrl = dev_id; | |
615 | irqreturn_t ret = IRQ_NONE; | |
04000dc6 | 616 | |
bfc430ca | 617 | spin_lock(&ctrl->lock); |
04000dc6 GM |
618 | |
619 | int_status = readl(ctrl->base + MSTR_INT_STATUS); | |
620 | writel(int_status, ctrl->base + MSTR_INT_STATUS); | |
621 | ||
17aaf9ea VKN |
622 | /* Ignore disabled interrupts */ |
623 | int_status &= readl(ctrl->base + MSTR_INT_EN); | |
624 | ||
b5762d95 | 625 | /* PIO mode handling */ |
04000dc6 GM |
626 | if (ctrl->xfer.dir == QSPI_WRITE) { |
627 | if (int_status & WR_FIFO_EMPTY) | |
628 | ret = pio_write(ctrl); | |
629 | } else { | |
630 | if (int_status & RESP_FIFO_RDY) | |
631 | ret = pio_read(ctrl); | |
632 | } | |
633 | ||
634 | if (int_status & QSPI_ERR_IRQS) { | |
635 | if (int_status & RESP_FIFO_UNDERRUN) | |
636 | dev_err(ctrl->dev, "IRQ error: FIFO underrun\n"); | |
637 | if (int_status & WR_FIFO_OVERRUN) | |
638 | dev_err(ctrl->dev, "IRQ error: FIFO overrun\n"); | |
639 | if (int_status & HRESP_FROM_NOC_ERR) | |
640 | dev_err(ctrl->dev, "IRQ error: NOC response error\n"); | |
641 | ret = IRQ_HANDLED; | |
642 | } | |
643 | ||
644 | if (!ctrl->xfer.rem_bytes) { | |
645 | writel(0, ctrl->base + MSTR_INT_EN); | |
646 | spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev)); | |
647 | } | |
648 | ||
b5762d95 VKN |
649 | /* DMA mode handling */ |
650 | if (int_status & DMA_CHAIN_DONE) { | |
651 | int i; | |
652 | ||
653 | writel(0, ctrl->base + MSTR_INT_EN); | |
654 | ctrl->xfer.rem_bytes = 0; | |
655 | ||
656 | for (i = 0; i < ctrl->n_cmd_desc; i++) | |
657 | dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], | |
658 | ctrl->dma_cmd_desc[i]); | |
659 | ctrl->n_cmd_desc = 0; | |
660 | ||
661 | ret = IRQ_HANDLED; | |
662 | spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev)); | |
663 | } | |
664 | ||
bfc430ca | 665 | spin_unlock(&ctrl->lock); |
04000dc6 GM |
666 | return ret; |
667 | } | |
668 | ||
cc71c42b DA |
669 | static int qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) |
670 | { | |
671 | /* | |
672 | * If qcom_qspi_can_dma() is going to return false we don't need to | |
673 | * adjust anything. | |
674 | */ | |
675 | if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO) | |
676 | return 0; | |
677 | ||
678 | /* | |
679 | * When reading, the transfer needs to be a multiple of 4 bytes so | |
680 | * shrink the transfer if that's not true. The caller will then do a | |
681 | * second transfer to finish things up. | |
682 | */ | |
683 | if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3)) | |
684 | op->data.nbytes &= ~0x3; | |
685 | ||
686 | return 0; | |
687 | } | |
688 | ||
689 | static const struct spi_controller_mem_ops qcom_qspi_mem_ops = { | |
690 | .adjust_op_size = qcom_qspi_adjust_op_size, | |
691 | }; | |
692 | ||
04000dc6 GM |
693 | static int qcom_qspi_probe(struct platform_device *pdev) |
694 | { | |
695 | int ret; | |
696 | struct device *dev; | |
8d3ad99a | 697 | struct spi_controller *host; |
04000dc6 GM |
698 | struct qcom_qspi *ctrl; |
699 | ||
700 | dev = &pdev->dev; | |
701 | ||
8d3ad99a YY |
702 | host = devm_spi_alloc_host(dev, sizeof(*ctrl)); |
703 | if (!host) | |
04000dc6 GM |
704 | return -ENOMEM; |
705 | ||
8d3ad99a | 706 | platform_set_drvdata(pdev, host); |
04000dc6 | 707 | |
8d3ad99a | 708 | ctrl = spi_controller_get_devdata(host); |
04000dc6 GM |
709 | |
710 | spin_lock_init(&ctrl->lock); | |
711 | ctrl->dev = dev; | |
e0ea3cc2 | 712 | ctrl->base = devm_platform_ioremap_resource(pdev, 0); |
6cfd39e2 LW |
713 | if (IS_ERR(ctrl->base)) |
714 | return PTR_ERR(ctrl->base); | |
04000dc6 | 715 | |
b8d40d77 MK |
716 | ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, |
717 | sizeof(*ctrl->clks), GFP_KERNEL); | |
6cfd39e2 LW |
718 | if (!ctrl->clks) |
719 | return -ENOMEM; | |
b8d40d77 | 720 | |
04000dc6 GM |
721 | ctrl->clks[QSPI_CLK_CORE].id = "core"; |
722 | ctrl->clks[QSPI_CLK_IFACE].id = "iface"; | |
723 | ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); | |
724 | if (ret) | |
6cfd39e2 | 725 | return ret; |
04000dc6 | 726 | |
cff80645 | 727 | ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config"); |
6cfd39e2 LW |
728 | if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) |
729 | return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi), | |
730 | "Failed to get cpu path\n"); | |
731 | ||
cff80645 AA |
732 | /* Set BW vote for register access */ |
733 | ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000), | |
734 | Bps_to_icc(1000)); | |
735 | if (ret) { | |
736 | dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n", | |
737 | __func__, ret); | |
6cfd39e2 | 738 | return ret; |
cff80645 AA |
739 | } |
740 | ||
741 | ret = icc_disable(ctrl->icc_path_cpu_to_qspi); | |
742 | if (ret) { | |
743 | dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n", | |
744 | __func__, ret); | |
6cfd39e2 | 745 | return ret; |
cff80645 AA |
746 | } |
747 | ||
04000dc6 | 748 | ret = platform_get_irq(pdev, 0); |
6b8ac10e | 749 | if (ret < 0) |
6cfd39e2 | 750 | return ret; |
eaecba87 | 751 | ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl); |
04000dc6 GM |
752 | if (ret) { |
753 | dev_err(dev, "Failed to request irq %d\n", ret); | |
6cfd39e2 | 754 | return ret; |
04000dc6 GM |
755 | } |
756 | ||
b5762d95 VKN |
757 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
758 | if (ret) | |
759 | return dev_err_probe(dev, ret, "could not set DMA mask\n"); | |
760 | ||
8d3ad99a YY |
761 | host->max_speed_hz = 300000000; |
762 | host->max_dma_len = 65536; /* as per HPG */ | |
763 | host->dma_alignment = QSPI_ALIGN_REQ; | |
764 | host->num_chipselect = QSPI_NUM_CS; | |
765 | host->bus_num = -1; | |
766 | host->dev.of_node = pdev->dev.of_node; | |
767 | host->mode_bits = SPI_MODE_0 | | |
768 | SPI_TX_DUAL | SPI_RX_DUAL | | |
769 | SPI_TX_QUAD | SPI_RX_QUAD; | |
770 | host->flags = SPI_CONTROLLER_HALF_DUPLEX; | |
771 | host->prepare_message = qcom_qspi_prepare_message; | |
772 | host->transfer_one = qcom_qspi_transfer_one; | |
773 | host->handle_err = qcom_qspi_handle_err; | |
b5762d95 | 774 | if (of_property_read_bool(pdev->dev.of_node, "iommus")) |
8d3ad99a YY |
775 | host->can_dma = qcom_qspi_can_dma; |
776 | host->auto_runtime_pm = true; | |
777 | host->mem_ops = &qcom_qspi_mem_ops; | |
04000dc6 | 778 | |
6504dcae YL |
779 | ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); |
780 | if (ret) | |
781 | return ret; | |
f79a158d | 782 | /* OPP table is optional */ |
6504dcae | 783 | ret = devm_pm_opp_of_add_table(&pdev->dev); |
062cf7fc | 784 | if (ret && ret != -ENODEV) { |
f79a158d | 785 | dev_err(&pdev->dev, "invalid OPP table in device tree\n"); |
6504dcae | 786 | return ret; |
f79a158d RN |
787 | } |
788 | ||
b5762d95 VKN |
789 | ret = qcom_qspi_alloc_dma(ctrl); |
790 | if (ret) | |
791 | return ret; | |
792 | ||
8592eb95 DA |
793 | pm_runtime_use_autosuspend(dev); |
794 | pm_runtime_set_autosuspend_delay(dev, 250); | |
04000dc6 GM |
795 | pm_runtime_enable(dev); |
796 | ||
8d3ad99a | 797 | ret = spi_register_controller(host); |
04000dc6 GM |
798 | if (!ret) |
799 | return 0; | |
800 | ||
801 | pm_runtime_disable(dev); | |
802 | ||
04000dc6 GM |
803 | return ret; |
804 | } | |
805 | ||
e0c30566 | 806 | static void qcom_qspi_remove(struct platform_device *pdev) |
04000dc6 | 807 | { |
8d3ad99a | 808 | struct spi_controller *host = platform_get_drvdata(pdev); |
04000dc6 GM |
809 | |
810 | /* Unregister _before_ disabling pm_runtime() so we stop transfers */ | |
8d3ad99a | 811 | spi_unregister_controller(host); |
04000dc6 GM |
812 | |
813 | pm_runtime_disable(&pdev->dev); | |
04000dc6 GM |
814 | } |
815 | ||
816 | static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev) | |
817 | { | |
8d3ad99a YY |
818 | struct spi_controller *host = dev_get_drvdata(dev); |
819 | struct qcom_qspi *ctrl = spi_controller_get_devdata(host); | |
cff80645 | 820 | int ret; |
04000dc6 | 821 | |
f79a158d RN |
822 | /* Drop the performance state vote */ |
823 | dev_pm_opp_set_rate(dev, 0); | |
04000dc6 GM |
824 | clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks); |
825 | ||
cff80645 AA |
826 | ret = icc_disable(ctrl->icc_path_cpu_to_qspi); |
827 | if (ret) { | |
828 | dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n", | |
829 | __func__, ret); | |
830 | return ret; | |
831 | } | |
832 | ||
0098c527 DA |
833 | pinctrl_pm_select_sleep_state(dev); |
834 | ||
04000dc6 GM |
835 | return 0; |
836 | } | |
837 | ||
838 | static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev) | |
839 | { | |
8d3ad99a YY |
840 | struct spi_controller *host = dev_get_drvdata(dev); |
841 | struct qcom_qspi *ctrl = spi_controller_get_devdata(host); | |
cff80645 AA |
842 | int ret; |
843 | ||
0098c527 DA |
844 | pinctrl_pm_select_default_state(dev); |
845 | ||
cff80645 AA |
846 | ret = icc_enable(ctrl->icc_path_cpu_to_qspi); |
847 | if (ret) { | |
848 | dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n", | |
849 | __func__, ret); | |
850 | return ret; | |
851 | } | |
04000dc6 | 852 | |
21243314 DA |
853 | ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks); |
854 | if (ret) | |
855 | return ret; | |
856 | ||
857 | return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4); | |
04000dc6 GM |
858 | } |
859 | ||
860 | static int __maybe_unused qcom_qspi_suspend(struct device *dev) | |
861 | { | |
8d3ad99a | 862 | struct spi_controller *host = dev_get_drvdata(dev); |
04000dc6 GM |
863 | int ret; |
864 | ||
8d3ad99a | 865 | ret = spi_controller_suspend(host); |
04000dc6 GM |
866 | if (ret) |
867 | return ret; | |
868 | ||
869 | ret = pm_runtime_force_suspend(dev); | |
870 | if (ret) | |
8d3ad99a | 871 | spi_controller_resume(host); |
04000dc6 GM |
872 | |
873 | return ret; | |
874 | } | |
875 | ||
876 | static int __maybe_unused qcom_qspi_resume(struct device *dev) | |
877 | { | |
8d3ad99a | 878 | struct spi_controller *host = dev_get_drvdata(dev); |
04000dc6 GM |
879 | int ret; |
880 | ||
881 | ret = pm_runtime_force_resume(dev); | |
882 | if (ret) | |
883 | return ret; | |
884 | ||
8d3ad99a | 885 | ret = spi_controller_resume(host); |
04000dc6 GM |
886 | if (ret) |
887 | pm_runtime_force_suspend(dev); | |
888 | ||
889 | return ret; | |
890 | } | |
891 | ||
892 | static const struct dev_pm_ops qcom_qspi_dev_pm_ops = { | |
893 | SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend, | |
894 | qcom_qspi_runtime_resume, NULL) | |
895 | SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume) | |
896 | }; | |
897 | ||
898 | static const struct of_device_id qcom_qspi_dt_match[] = { | |
899 | { .compatible = "qcom,qspi-v1", }, | |
900 | { } | |
901 | }; | |
902 | MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match); | |
903 | ||
904 | static struct platform_driver qcom_qspi_driver = { | |
905 | .driver = { | |
906 | .name = "qcom_qspi", | |
907 | .pm = &qcom_qspi_dev_pm_ops, | |
908 | .of_match_table = qcom_qspi_dt_match, | |
909 | }, | |
910 | .probe = qcom_qspi_probe, | |
e0c30566 | 911 | .remove_new = qcom_qspi_remove, |
04000dc6 GM |
912 | }; |
913 | module_platform_driver(qcom_qspi_driver); | |
914 | ||
915 | MODULE_DESCRIPTION("SPI driver for QSPI cores"); | |
916 | MODULE_LICENSE("GPL v2"); |