1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
5 #include <linux/dmapool.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/interconnect.h>
8 #include <linux/interrupt.h>
10 #include <linux/module.h>
12 #include <linux/of_platform.h>
13 #include <linux/pinctrl/consumer.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/pm_opp.h>
16 #include <linux/spi/spi.h>
17 #include <linux/spi/spi-mem.h>
21 #define QSPI_BYTES_PER_WORD 4
23 #define MSTR_CONFIG 0x0000
24 #define FULL_CYCLE_MODE BIT(3)
25 #define FB_CLK_EN BIT(4)
26 #define PIN_HOLDN BIT(6)
27 #define PIN_WPN BIT(7)
28 #define DMA_ENABLE BIT(8)
29 #define BIG_ENDIAN_MODE BIT(9)
30 #define SPI_MODE_MSK 0xc00
31 #define SPI_MODE_SHFT 10
32 #define CHIP_SELECT_NUM BIT(12)
33 #define SBL_EN BIT(13)
34 #define LPA_BASE_MSK 0x3c000
35 #define LPA_BASE_SHFT 14
36 #define TX_DATA_DELAY_MSK 0xc0000
37 #define TX_DATA_DELAY_SHFT 18
38 #define TX_CLK_DELAY_MSK 0x300000
39 #define TX_CLK_DELAY_SHFT 20
40 #define TX_CS_N_DELAY_MSK 0xc00000
41 #define TX_CS_N_DELAY_SHFT 22
42 #define TX_DATA_OE_DELAY_MSK 0x3000000
43 #define TX_DATA_OE_DELAY_SHFT 24
45 #define AHB_MASTER_CFG 0x0004
46 #define HMEM_TYPE_START_MID_TRANS_MSK 0x7
47 #define HMEM_TYPE_START_MID_TRANS_SHFT 0
48 #define HMEM_TYPE_LAST_TRANS_MSK 0x38
49 #define HMEM_TYPE_LAST_TRANS_SHFT 3
50 #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
51 #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
52 #define HMEMTYPE_READ_TRANS_MSK 0x700
53 #define HMEMTYPE_READ_TRANS_SHFT 8
54 #define HSHARED BIT(11)
55 #define HINNERSHARED BIT(12)
57 #define MSTR_INT_EN 0x000C
58 #define MSTR_INT_STATUS 0x0010
59 #define RESP_FIFO_UNDERRUN BIT(0)
60 #define RESP_FIFO_NOT_EMPTY BIT(1)
61 #define RESP_FIFO_RDY BIT(2)
62 #define HRESP_FROM_NOC_ERR BIT(3)
63 #define WR_FIFO_EMPTY BIT(9)
64 #define WR_FIFO_FULL BIT(10)
65 #define WR_FIFO_OVERRUN BIT(11)
66 #define TRANSACTION_DONE BIT(16)
67 #define DMA_CHAIN_DONE BIT(31)
68 #define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
70 #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
71 WR_FIFO_EMPTY | WR_FIFO_FULL | \
74 #define PIO_XFER_CTRL 0x0014
75 #define REQUEST_COUNT_MSK 0xffff
77 #define PIO_XFER_CFG 0x0018
78 #define TRANSFER_DIRECTION BIT(0)
79 #define MULTI_IO_MODE_MSK 0xe
80 #define MULTI_IO_MODE_SHFT 1
81 #define TRANSFER_FRAGMENT BIT(8)
88 #define DMA_DESC_SINGLE_SPI 1
89 #define DMA_DESC_DUAL_SPI 2
90 #define DMA_DESC_QUAD_SPI 3
92 #define PIO_XFER_STATUS 0x001c
93 #define WR_FIFO_BYTES_MSK 0xffff0000
94 #define WR_FIFO_BYTES_SHFT 16
96 #define PIO_DATAOUT_1B 0x0020
97 #define PIO_DATAOUT_4B 0x0024
99 #define RD_FIFO_CFG 0x0028
100 #define CONTINUOUS_MODE BIT(0)
102 #define RD_FIFO_STATUS 0x002c
103 #define FIFO_EMPTY BIT(11)
104 #define WR_CNTS_MSK 0x7f0
105 #define WR_CNTS_SHFT 4
106 #define RDY_64BYTE BIT(3)
107 #define RDY_32BYTE BIT(2)
108 #define RDY_16BYTE BIT(1)
109 #define FIFO_RDY BIT(0)
111 #define RD_FIFO_RESET 0x0030
112 #define RESET_FIFO BIT(0)
114 #define NEXT_DMA_DESC_ADDR 0x0040
115 #define CURRENT_DMA_DESC_ADDR 0x0044
116 #define CURRENT_MEM_ADDR 0x0048
118 #define CUR_MEM_ADDR 0x0048
119 #define HW_VERSION 0x004c
120 #define RD_FIFO 0x0050
121 #define SAMPLING_CLK_CFG 0x0090
122 #define SAMPLING_CLK_STATUS 0x0094
124 #define QSPI_ALIGN_REQ 32
131 struct qspi_cmd_desc {
147 unsigned int rem_bytes;
148 unsigned int buswidth;
160 * Number of entries in sgt returned from spi framework that-
161 * will be supported. Can be modified as required.
162 * In practice, given max_dma_len is 64KB, the number of
163 * entries is not expected to exceed 1.
165 #define QSPI_MAX_SG 5
170 struct clk_bulk_data *clks;
171 struct qspi_xfer xfer;
172 struct dma_pool *dma_cmd_pool;
173 dma_addr_t dma_cmd_desc[QSPI_MAX_SG];
174 void *virt_cmd_desc[QSPI_MAX_SG];
175 unsigned int n_cmd_desc;
176 struct icc_path *icc_path_cpu_to_qspi;
177 unsigned long last_speed;
178 /* Lock to protect data accessed by IRQs */
182 static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
183 unsigned int buswidth)
193 dev_warn_once(ctrl->dev,
194 "Unexpected bus width: %u\n", buswidth);
199 static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
203 const struct qspi_xfer *xfer;
206 pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
207 pio_xfer_cfg &= ~TRANSFER_DIRECTION;
208 pio_xfer_cfg |= xfer->dir;
210 pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
212 pio_xfer_cfg |= TRANSFER_FRAGMENT;
213 pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
214 iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
215 pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT;
217 writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
220 static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
224 pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
225 pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
226 pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
227 writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
230 static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
234 qcom_qspi_pio_xfer_cfg(ctrl);
236 /* Ack any previous interrupts that might be hanging around */
237 writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
239 /* Setup new interrupts */
240 if (ctrl->xfer.dir == QSPI_WRITE)
241 ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
243 ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
244 writel(ints, ctrl->base + MSTR_INT_EN);
246 /* Kick off the transfer */
247 qcom_qspi_pio_xfer_ctrl(ctrl);
250 static void qcom_qspi_handle_err(struct spi_master *master,
251 struct spi_message *msg)
254 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
258 spin_lock_irqsave(&ctrl->lock, flags);
259 writel(0, ctrl->base + MSTR_INT_EN);
260 int_status = readl(ctrl->base + MSTR_INT_STATUS);
261 writel(int_status, ctrl->base + MSTR_INT_STATUS);
262 ctrl->xfer.rem_bytes = 0;
264 /* free cmd descriptors if they are around (DMA mode) */
265 for (i = 0; i < ctrl->n_cmd_desc; i++)
266 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
267 ctrl->dma_cmd_desc[i]);
268 ctrl->n_cmd_desc = 0;
269 spin_unlock_irqrestore(&ctrl->lock, flags);
272 static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
275 unsigned int avg_bw_cpu;
277 if (speed_hz == ctrl->last_speed)
280 /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
281 ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
283 dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
288 * Set BW quota for CPU.
289 * We don't have explicit peak requirement so keep it equal to avg_bw.
291 avg_bw_cpu = Bps_to_icc(speed_hz);
292 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
294 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
299 ctrl->last_speed = speed_hz;
304 static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr,
307 struct qspi_cmd_desc *virt_cmd_desc, *prev;
308 dma_addr_t dma_cmd_desc;
310 /* allocate for dma cmd descriptor */
311 virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc);
315 ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
316 ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
319 /* setup cmd descriptor */
320 virt_cmd_desc->data_address = dma_ptr;
321 virt_cmd_desc->direction = ctrl->xfer.dir;
322 virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth);
323 virt_cmd_desc->fragment = !ctrl->xfer.is_last;
324 virt_cmd_desc->length = n_bytes;
326 /* update previous descriptor */
327 if (ctrl->n_cmd_desc >= 2) {
328 prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2];
329 prev->next_descriptor = dma_cmd_desc;
336 static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl,
337 struct spi_transfer *xfer)
340 struct sg_table *sgt;
341 dma_addr_t dma_ptr_sg;
342 unsigned int dma_len_sg;
345 if (ctrl->n_cmd_desc) {
346 dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc);
350 sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg;
351 if (!sgt->nents || sgt->nents > QSPI_MAX_SG) {
352 dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents);
356 for (i = 0; i < sgt->nents; i++) {
357 dma_ptr_sg = sg_dma_address(sgt->sgl + i);
358 if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
359 dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
364 for (i = 0; i < sgt->nents; i++) {
365 dma_ptr_sg = sg_dma_address(sgt->sgl + i);
366 dma_len_sg = sg_dma_len(sgt->sgl + i);
368 ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg);
375 for (i = 0; i < ctrl->n_cmd_desc; i++)
376 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
377 ctrl->dma_cmd_desc[i]);
378 ctrl->n_cmd_desc = 0;
382 static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl)
384 /* Setup new interrupts */
385 writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN);
387 /* kick off transfer */
388 writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR);
391 /* Switch to DMA if transfer length exceeds this */
392 #define QSPI_MAX_BYTES_FIFO 64
394 static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
395 struct spi_device *slv, struct spi_transfer *xfer)
397 return xfer->len > QSPI_MAX_BYTES_FIFO;
400 static int qcom_qspi_transfer_one(struct spi_master *master,
401 struct spi_device *slv,
402 struct spi_transfer *xfer)
404 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
406 unsigned long speed_hz;
410 speed_hz = slv->max_speed_hz;
412 speed_hz = xfer->speed_hz;
414 ret = qcom_qspi_set_speed(ctrl, speed_hz);
418 spin_lock_irqsave(&ctrl->lock, flags);
419 mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
421 /* We are half duplex, so either rx or tx will be set */
423 ctrl->xfer.dir = QSPI_READ;
424 ctrl->xfer.buswidth = xfer->rx_nbits;
425 ctrl->xfer.rx_buf = xfer->rx_buf;
427 ctrl->xfer.dir = QSPI_WRITE;
428 ctrl->xfer.buswidth = xfer->tx_nbits;
429 ctrl->xfer.tx_buf = xfer->tx_buf;
431 ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
432 &master->cur_msg->transfers);
433 ctrl->xfer.rem_bytes = xfer->len;
435 if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
436 /* do DMA transfer */
437 if (!(mstr_cfg & DMA_ENABLE)) {
438 mstr_cfg |= DMA_ENABLE;
439 writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
442 ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
443 if (ret != -EAGAIN) {
445 qcom_qspi_dma_xfer(ctrl);
448 dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
449 ret = 0; /* We'll retry w/ PIO */
452 if (mstr_cfg & DMA_ENABLE) {
453 mstr_cfg &= ~DMA_ENABLE;
454 writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
456 qcom_qspi_pio_xfer(ctrl);
459 spin_unlock_irqrestore(&ctrl->lock, flags);
464 /* We'll call spi_finalize_current_transfer() when done */
468 static int qcom_qspi_prepare_message(struct spi_master *master,
469 struct spi_message *message)
472 struct qcom_qspi *ctrl;
473 int tx_data_oe_delay = 1;
474 int tx_data_delay = 1;
477 ctrl = spi_master_get_devdata(master);
478 spin_lock_irqsave(&ctrl->lock, flags);
480 mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
481 mstr_cfg &= ~CHIP_SELECT_NUM;
482 if (spi_get_chipselect(message->spi, 0))
483 mstr_cfg |= CHIP_SELECT_NUM;
485 mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
486 mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
487 mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
488 mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
489 mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
490 mstr_cfg &= ~DMA_ENABLE;
492 writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
493 spin_unlock_irqrestore(&ctrl->lock, flags);
498 static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl)
500 ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool",
501 ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0);
502 if (!ctrl->dma_cmd_pool)
508 static irqreturn_t pio_read(struct qcom_qspi *ctrl)
512 unsigned int wr_cnts;
513 unsigned int bytes_to_read;
514 unsigned int words_to_read;
519 rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
521 if (!(rd_fifo_status & FIFO_RDY)) {
522 dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
526 wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
527 wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
529 words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
530 bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
533 word_buf = ctrl->xfer.rx_buf;
534 ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
535 ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
536 ctrl->xfer.rx_buf = word_buf + words_to_read;
540 byte_buf = ctrl->xfer.rx_buf;
541 rd_fifo = readl(ctrl->base + RD_FIFO);
542 ctrl->xfer.rem_bytes -= bytes_to_read;
543 for (i = 0; i < bytes_to_read; i++)
544 *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
545 ctrl->xfer.rx_buf = byte_buf;
551 static irqreturn_t pio_write(struct qcom_qspi *ctrl)
553 const void *xfer_buf = ctrl->xfer.tx_buf;
555 const char *byte_buf;
556 unsigned int wr_fifo_bytes;
557 unsigned int wr_fifo_words;
558 unsigned int wr_size;
559 unsigned int rem_words;
561 wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
562 wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
564 if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
565 /* Process the last 1-3 bytes */
566 wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
567 ctrl->xfer.rem_bytes -= wr_size;
572 ctrl->base + PIO_DATAOUT_1B);
573 ctrl->xfer.tx_buf = byte_buf;
576 * Process all the whole words; to keep things simple we'll
577 * just wait for the next interrupt to handle the last 1-3
578 * bytes if we don't have an even number of words.
580 rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
581 wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
583 wr_size = min(rem_words, wr_fifo_words);
584 ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
587 iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
588 ctrl->xfer.tx_buf = word_buf + wr_size;
595 static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
598 struct qcom_qspi *ctrl = dev_id;
599 irqreturn_t ret = IRQ_NONE;
601 spin_lock(&ctrl->lock);
603 int_status = readl(ctrl->base + MSTR_INT_STATUS);
604 writel(int_status, ctrl->base + MSTR_INT_STATUS);
606 /* PIO mode handling */
607 if (ctrl->xfer.dir == QSPI_WRITE) {
608 if (int_status & WR_FIFO_EMPTY)
609 ret = pio_write(ctrl);
611 if (int_status & RESP_FIFO_RDY)
612 ret = pio_read(ctrl);
615 if (int_status & QSPI_ERR_IRQS) {
616 if (int_status & RESP_FIFO_UNDERRUN)
617 dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
618 if (int_status & WR_FIFO_OVERRUN)
619 dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
620 if (int_status & HRESP_FROM_NOC_ERR)
621 dev_err(ctrl->dev, "IRQ error: NOC response error\n");
625 if (!ctrl->xfer.rem_bytes) {
626 writel(0, ctrl->base + MSTR_INT_EN);
627 spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
630 /* DMA mode handling */
631 if (int_status & DMA_CHAIN_DONE) {
634 writel(0, ctrl->base + MSTR_INT_EN);
635 ctrl->xfer.rem_bytes = 0;
637 for (i = 0; i < ctrl->n_cmd_desc; i++)
638 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
639 ctrl->dma_cmd_desc[i]);
640 ctrl->n_cmd_desc = 0;
643 spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
646 spin_unlock(&ctrl->lock);
650 static int qcom_qspi_probe(struct platform_device *pdev)
654 struct spi_master *master;
655 struct qcom_qspi *ctrl;
659 master = devm_spi_alloc_master(dev, sizeof(*ctrl));
663 platform_set_drvdata(pdev, master);
665 ctrl = spi_master_get_devdata(master);
667 spin_lock_init(&ctrl->lock);
669 ctrl->base = devm_platform_ioremap_resource(pdev, 0);
670 if (IS_ERR(ctrl->base))
671 return PTR_ERR(ctrl->base);
673 ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
674 sizeof(*ctrl->clks), GFP_KERNEL);
678 ctrl->clks[QSPI_CLK_CORE].id = "core";
679 ctrl->clks[QSPI_CLK_IFACE].id = "iface";
680 ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
684 ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
685 if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
686 return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
687 "Failed to get cpu path\n");
689 /* Set BW vote for register access */
690 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
693 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
698 ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
700 dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
705 ret = platform_get_irq(pdev, 0);
708 ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl);
710 dev_err(dev, "Failed to request irq %d\n", ret);
714 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
716 return dev_err_probe(dev, ret, "could not set DMA mask\n");
718 master->max_speed_hz = 300000000;
719 master->max_dma_len = 65536; /* as per HPG */
720 master->dma_alignment = QSPI_ALIGN_REQ;
721 master->num_chipselect = QSPI_NUM_CS;
722 master->bus_num = -1;
723 master->dev.of_node = pdev->dev.of_node;
724 master->mode_bits = SPI_MODE_0 |
725 SPI_TX_DUAL | SPI_RX_DUAL |
726 SPI_TX_QUAD | SPI_RX_QUAD;
727 master->flags = SPI_MASTER_HALF_DUPLEX;
728 master->prepare_message = qcom_qspi_prepare_message;
729 master->transfer_one = qcom_qspi_transfer_one;
730 master->handle_err = qcom_qspi_handle_err;
731 if (of_property_read_bool(pdev->dev.of_node, "iommus"))
732 master->can_dma = qcom_qspi_can_dma;
733 master->auto_runtime_pm = true;
735 ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
738 /* OPP table is optional */
739 ret = devm_pm_opp_of_add_table(&pdev->dev);
740 if (ret && ret != -ENODEV) {
741 dev_err(&pdev->dev, "invalid OPP table in device tree\n");
745 ret = qcom_qspi_alloc_dma(ctrl);
749 pm_runtime_use_autosuspend(dev);
750 pm_runtime_set_autosuspend_delay(dev, 250);
751 pm_runtime_enable(dev);
753 ret = spi_register_master(master);
757 pm_runtime_disable(dev);
762 static void qcom_qspi_remove(struct platform_device *pdev)
764 struct spi_master *master = platform_get_drvdata(pdev);
766 /* Unregister _before_ disabling pm_runtime() so we stop transfers */
767 spi_unregister_master(master);
769 pm_runtime_disable(&pdev->dev);
772 static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
774 struct spi_master *master = dev_get_drvdata(dev);
775 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
778 /* Drop the performance state vote */
779 dev_pm_opp_set_rate(dev, 0);
780 clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
782 ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
784 dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
789 pinctrl_pm_select_sleep_state(dev);
794 static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
796 struct spi_master *master = dev_get_drvdata(dev);
797 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
800 pinctrl_pm_select_default_state(dev);
802 ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
804 dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
809 ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
813 return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
816 static int __maybe_unused qcom_qspi_suspend(struct device *dev)
818 struct spi_master *master = dev_get_drvdata(dev);
821 ret = spi_master_suspend(master);
825 ret = pm_runtime_force_suspend(dev);
827 spi_master_resume(master);
832 static int __maybe_unused qcom_qspi_resume(struct device *dev)
834 struct spi_master *master = dev_get_drvdata(dev);
837 ret = pm_runtime_force_resume(dev);
841 ret = spi_master_resume(master);
843 pm_runtime_force_suspend(dev);
848 static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
849 SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
850 qcom_qspi_runtime_resume, NULL)
851 SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
854 static const struct of_device_id qcom_qspi_dt_match[] = {
855 { .compatible = "qcom,qspi-v1", },
858 MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
860 static struct platform_driver qcom_qspi_driver = {
863 .pm = &qcom_qspi_dev_pm_ops,
864 .of_match_table = qcom_qspi_dt_match,
866 .probe = qcom_qspi_probe,
867 .remove_new = qcom_qspi_remove,
869 module_platform_driver(qcom_qspi_driver);
871 MODULE_DESCRIPTION("SPI driver for QSPI cores");
872 MODULE_LICENSE("GPL v2");