2 * Driver for Nvidia TEGRA spi controller.
4 * Copyright (C) 2010 Google, Inc.
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/err.h>
24 #include <linux/platform_device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/clk.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
32 #include <linux/spi/spi.h>
33 #include <linux/dmaengine.h>
37 #define SLINK_COMMAND 0x000
38 #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
39 #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
40 #define SLINK_BOTH_EN (1 << 10)
41 #define SLINK_CS_SW (1 << 11)
42 #define SLINK_CS_VALUE (1 << 12)
43 #define SLINK_CS_POLARITY (1 << 13)
44 #define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
45 #define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
46 #define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
47 #define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
48 #define SLINK_IDLE_SDA_MASK (3 << 16)
49 #define SLINK_CS_POLARITY1 (1 << 20)
50 #define SLINK_CK_SDA (1 << 21)
51 #define SLINK_CS_POLARITY2 (1 << 22)
52 #define SLINK_CS_POLARITY3 (1 << 23)
53 #define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
54 #define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
55 #define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
56 #define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
57 #define SLINK_IDLE_SCLK_MASK (3 << 24)
58 #define SLINK_M_S (1 << 28)
59 #define SLINK_WAIT (1 << 29)
60 #define SLINK_GO (1 << 30)
61 #define SLINK_ENB (1 << 31)
63 #define SLINK_COMMAND2 0x004
64 #define SLINK_LSBFE (1 << 0)
65 #define SLINK_SSOE (1 << 1)
66 #define SLINK_SPIE (1 << 4)
67 #define SLINK_BIDIROE (1 << 6)
68 #define SLINK_MODFEN (1 << 7)
69 #define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
70 #define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
71 #define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
72 #define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
73 #define SLINK_FIFO_REFILLS_0 (0 << 22)
74 #define SLINK_FIFO_REFILLS_1 (1 << 22)
75 #define SLINK_FIFO_REFILLS_2 (2 << 22)
76 #define SLINK_FIFO_REFILLS_3 (3 << 22)
77 #define SLINK_FIFO_REFILLS_MASK (3 << 22)
78 #define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
79 #define SLINK_SPC0 (1 << 29)
80 #define SLINK_TXEN (1 << 30)
81 #define SLINK_RXEN (1 << 31)
83 #define SLINK_STATUS 0x008
84 #define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
85 #define SLINK_WORD(val) (((val) >> 5) & 0x1f)
86 #define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
87 #define SLINK_MODF (1 << 16)
88 #define SLINK_RX_UNF (1 << 18)
89 #define SLINK_TX_OVF (1 << 19)
90 #define SLINK_TX_FULL (1 << 20)
91 #define SLINK_TX_EMPTY (1 << 21)
92 #define SLINK_RX_FULL (1 << 22)
93 #define SLINK_RX_EMPTY (1 << 23)
94 #define SLINK_TX_UNF (1 << 24)
95 #define SLINK_RX_OVF (1 << 25)
96 #define SLINK_TX_FLUSH (1 << 26)
97 #define SLINK_RX_FLUSH (1 << 27)
98 #define SLINK_SCLK (1 << 28)
99 #define SLINK_ERR (1 << 29)
100 #define SLINK_RDY (1 << 30)
101 #define SLINK_BSY (1 << 31)
103 #define SLINK_MAS_DATA 0x010
104 #define SLINK_SLAVE_DATA 0x014
106 #define SLINK_DMA_CTL 0x018
107 #define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
108 #define SLINK_TX_TRIG_1 (0 << 16)
109 #define SLINK_TX_TRIG_4 (1 << 16)
110 #define SLINK_TX_TRIG_8 (2 << 16)
111 #define SLINK_TX_TRIG_16 (3 << 16)
112 #define SLINK_TX_TRIG_MASK (3 << 16)
113 #define SLINK_RX_TRIG_1 (0 << 18)
114 #define SLINK_RX_TRIG_4 (1 << 18)
115 #define SLINK_RX_TRIG_8 (2 << 18)
116 #define SLINK_RX_TRIG_16 (3 << 18)
117 #define SLINK_RX_TRIG_MASK (3 << 18)
118 #define SLINK_PACKED (1 << 20)
119 #define SLINK_PACK_SIZE_4 (0 << 21)
120 #define SLINK_PACK_SIZE_8 (1 << 21)
121 #define SLINK_PACK_SIZE_16 (2 << 21)
122 #define SLINK_PACK_SIZE_32 (3 << 21)
123 #define SLINK_PACK_SIZE_MASK (3 << 21)
124 #define SLINK_IE_TXC (1 << 26)
125 #define SLINK_IE_RXC (1 << 27)
126 #define SLINK_DMA_EN (1 << 31)
128 #define SLINK_STATUS2 0x01c
129 #define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
130 #define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16)
132 #define SLINK_TX_FIFO 0x100
133 #define SLINK_RX_FIFO 0x180
135 static const unsigned long spi_tegra_req_sels[] = {
136 TEGRA_DMA_REQ_SEL_SL2B1,
137 TEGRA_DMA_REQ_SEL_SL2B2,
138 TEGRA_DMA_REQ_SEL_SL2B3,
139 TEGRA_DMA_REQ_SEL_SL2B4,
144 struct spi_tegra_data {
145 struct spi_master *master;
146 struct platform_device *pdev;
155 struct list_head queue;
156 struct spi_transfer *cur;
159 unsigned cur_bytes_per_word;
161 /* The tegra spi controller has a bug which causes the first word
162 * in PIO transactions to be garbage. Since packed DMA transactions
163 * require transfers to be 4 byte aligned we need a bounce buffer
164 * for the generic case.
167 struct dma_chan *rx_dma;
168 struct dma_slave_config sconfig;
169 struct dma_async_tx_descriptor *rx_dma_desc;
170 dma_cookie_t rx_cookie;
172 dma_addr_t rx_bb_phys;
175 static void tegra_spi_rx_dma_complete(void *args);
176 static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
179 return readl(tspi->base + reg);
182 static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
186 writel(val, tspi->base + reg);
189 static void spi_tegra_go(struct spi_tegra_data *tspi)
195 val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
196 val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
197 val |= SLINK_DMA_BLOCK_SIZE(tspi->dma_req_len / 4 - 1);
198 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
199 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma,
200 tspi->rx_bb_phys, tspi->dma_req_len,
201 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
202 if (!tspi->rx_dma_desc) {
203 dev_err(&tspi->pdev->dev, "dmaengine slave prep failed\n");
206 tspi->rx_dma_desc->callback = tegra_spi_rx_dma_complete;
207 tspi->rx_dma_desc->callback_param = tspi;
208 tspi->rx_cookie = dmaengine_submit(tspi->rx_dma_desc);
209 dma_async_issue_pending(tspi->rx_dma);
212 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
215 static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
216 struct spi_transfer *t)
218 unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
219 tspi->cur_bytes_per_word);
220 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
224 val = spi_tegra_readl(tspi, SLINK_COMMAND);
225 val &= ~SLINK_WORD_SIZE(~0);
226 val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
227 spi_tegra_writel(tspi, val, SLINK_COMMAND);
229 for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
231 for (j = 0; j < tspi->cur_bytes_per_word; j++)
232 val |= tx_buf[i + j] << j * 8;
234 spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
237 tspi->dma_req_len = len / tspi->cur_bytes_per_word * 4;
242 static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
243 struct spi_transfer *t)
245 unsigned len = tspi->cur_len;
246 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
250 for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
251 val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
252 for (j = 0; j < tspi->cur_bytes_per_word; j++)
253 rx_buf[i + j] = (val >> (j * 8)) & 0xff;
259 static void spi_tegra_start_transfer(struct spi_device *spi,
260 struct spi_transfer *t)
262 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
267 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
268 bits_per_word = t->bits_per_word ? t->bits_per_word :
271 tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
273 if (speed != tspi->cur_speed)
274 clk_set_rate(tspi->clk, speed);
276 if (tspi->cur_speed == 0)
277 clk_prepare_enable(tspi->clk);
279 tspi->cur_speed = speed;
281 val = spi_tegra_readl(tspi, SLINK_COMMAND2);
282 val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
287 val |= SLINK_SS_EN_CS(spi->chip_select);
289 spi_tegra_writel(tspi, val, SLINK_COMMAND2);
291 val = spi_tegra_readl(tspi, SLINK_COMMAND);
292 val &= ~SLINK_BIT_LENGTH(~0);
293 val |= SLINK_BIT_LENGTH(bits_per_word - 1);
295 /* FIXME: should probably control CS manually so that we can be sure
296 * it does not go low between transfer and to support delay_usecs
299 val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
301 if (spi->mode & SPI_CPHA)
304 if (spi->mode & SPI_CPOL)
305 val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
307 val |= SLINK_IDLE_SCLK_DRIVE_LOW;
311 spi_tegra_writel(tspi, val, SLINK_COMMAND);
313 spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
317 tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
322 static void spi_tegra_start_message(struct spi_device *spi,
323 struct spi_message *m)
325 struct spi_transfer *t;
327 m->actual_length = 0;
330 t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
331 spi_tegra_start_transfer(spi, t);
334 static void handle_spi_rx_dma_complete(struct spi_tegra_data *tspi)
337 struct spi_message *m;
338 struct spi_device *spi;
342 /* the SPI controller may come back with both the BSY and RDY bits
343 * set. In this case we need to wait for the BSY bit to clear so
344 * that we are sure the DMA is finished. 1000 reads was empirically
345 * determined to be long enough.
347 while (timeout++ < 1000) {
348 if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
352 spin_lock_irqsave(&tspi->lock, flags);
354 val = spi_tegra_readl(tspi, SLINK_STATUS);
356 spi_tegra_writel(tspi, val, SLINK_STATUS);
358 m = list_first_entry(&tspi->queue, struct spi_message, queue);
365 tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
366 m->actual_length += tspi->cur_pos;
368 if (tspi->cur_pos < tspi->cur->len) {
369 tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
371 } else if (!list_is_last(&tspi->cur->transfer_list,
373 tspi->cur = list_first_entry(&tspi->cur->transfer_list,
376 spi_tegra_start_transfer(spi, tspi->cur);
380 m->complete(m->context);
382 if (!list_empty(&tspi->queue)) {
383 m = list_first_entry(&tspi->queue, struct spi_message,
386 spi_tegra_start_message(spi, m);
388 clk_disable_unprepare(tspi->clk);
393 spin_unlock_irqrestore(&tspi->lock, flags);
396 static void tegra_spi_rx_dma_complete(void *args)
398 struct spi_tegra_data *tspi = args;
399 handle_spi_rx_dma_complete(tspi);
402 static int spi_tegra_setup(struct spi_device *spi)
404 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
405 unsigned long cs_bit;
409 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
411 spi->mode & SPI_CPOL ? "" : "~",
412 spi->mode & SPI_CPHA ? "" : "~",
416 switch (spi->chip_select) {
418 cs_bit = SLINK_CS_POLARITY;
422 cs_bit = SLINK_CS_POLARITY1;
426 cs_bit = SLINK_CS_POLARITY2;
430 cs_bit = SLINK_CS_POLARITY3;
437 spin_lock_irqsave(&tspi->lock, flags);
439 val = spi_tegra_readl(tspi, SLINK_COMMAND);
440 if (spi->mode & SPI_CS_HIGH)
444 spi_tegra_writel(tspi, val, SLINK_COMMAND);
446 spin_unlock_irqrestore(&tspi->lock, flags);
451 static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
453 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
454 struct spi_transfer *t;
458 if (list_empty(&m->transfers) || !m->complete)
461 list_for_each_entry(t, &m->transfers, transfer_list) {
462 if (t->bits_per_word < 0 || t->bits_per_word > 32)
468 if (!t->rx_buf && !t->tx_buf)
474 spin_lock_irqsave(&tspi->lock, flags);
475 was_empty = list_empty(&tspi->queue);
476 list_add_tail(&m->queue, &tspi->queue);
479 spi_tegra_start_message(spi, m);
481 spin_unlock_irqrestore(&tspi->lock, flags);
486 static int __devinit spi_tegra_probe(struct platform_device *pdev)
488 struct spi_master *master;
489 struct spi_tegra_data *tspi;
494 master = spi_alloc_master(&pdev->dev, sizeof *tspi);
495 if (master == NULL) {
496 dev_err(&pdev->dev, "master allocation failed\n");
500 /* the spi->mode bits understood by this driver: */
501 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
503 master->bus_num = pdev->id;
505 master->setup = spi_tegra_setup;
506 master->transfer = spi_tegra_transfer;
507 master->num_chipselect = 4;
509 dev_set_drvdata(&pdev->dev, master);
510 tspi = spi_master_get_devdata(master);
511 tspi->master = master;
513 spin_lock_init(&tspi->lock);
515 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
521 if (!request_mem_region(r->start, resource_size(r),
522 dev_name(&pdev->dev))) {
527 tspi->phys = r->start;
528 tspi->base = ioremap(r->start, resource_size(r));
530 dev_err(&pdev->dev, "can't ioremap iomem\n");
535 tspi->clk = clk_get(&pdev->dev, NULL);
536 if (IS_ERR(tspi->clk)) {
537 dev_err(&pdev->dev, "can not get clock\n");
538 ret = PTR_ERR(tspi->clk);
542 INIT_LIST_HEAD(&tspi->queue);
545 dma_cap_set(DMA_SLAVE, mask);
546 tspi->rx_dma = dma_request_channel(mask, NULL, NULL);
548 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
553 tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
554 &tspi->rx_bb_phys, GFP_KERNEL);
556 dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
561 /* Dmaengine Dma slave config */
562 tspi->sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
563 tspi->sconfig.dst_addr = tspi->phys + SLINK_RX_FIFO;
564 tspi->sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
565 tspi->sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
566 tspi->sconfig.slave_id = spi_tegra_req_sels[pdev->id];
567 tspi->sconfig.src_maxburst = 1;
568 tspi->sconfig.dst_maxburst = 1;
569 ret = dmaengine_device_control(tspi->rx_dma,
570 DMA_SLAVE_CONFIG, (unsigned long) &tspi->sconfig);
572 dev_err(&pdev->dev, "can not do slave configure for dma %d\n",
577 master->dev.of_node = pdev->dev.of_node;
578 ret = spi_register_master(master);
586 dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
587 tspi->rx_bb, tspi->rx_bb_phys);
589 dma_release_channel(tspi->rx_dma);
595 release_mem_region(r->start, resource_size(r));
597 spi_master_put(master);
601 static int __devexit spi_tegra_remove(struct platform_device *pdev)
603 struct spi_master *master;
604 struct spi_tegra_data *tspi;
607 master = dev_get_drvdata(&pdev->dev);
608 tspi = spi_master_get_devdata(master);
610 spi_unregister_master(master);
611 dma_release_channel(tspi->rx_dma);
612 dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
613 tspi->rx_bb, tspi->rx_bb_phys);
618 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
619 release_mem_region(r->start, resource_size(r));
624 MODULE_ALIAS("platform:spi_tegra");
627 static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
628 { .compatible = "nvidia,tegra20-spi", },
631 MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
632 #else /* CONFIG_OF */
633 #define spi_tegra_of_match_table NULL
634 #endif /* CONFIG_OF */
636 static struct platform_driver spi_tegra_driver = {
639 .owner = THIS_MODULE,
640 .of_match_table = spi_tegra_of_match_table,
642 .probe = spi_tegra_probe,
643 .remove = __devexit_p(spi_tegra_remove),
645 module_platform_driver(spi_tegra_driver);
647 MODULE_LICENSE("GPL");