1 // SPDX-License-Identifier: GPL-2.0-only
3 * SPI-Engine SPI controller driver
4 * Copyright 2015 Analog Devices Inc.
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
19 #define SPI_ENGINE_REG_RESET 0x40
21 #define SPI_ENGINE_REG_INT_ENABLE 0x80
22 #define SPI_ENGINE_REG_INT_PENDING 0x84
23 #define SPI_ENGINE_REG_INT_SOURCE 0x88
25 #define SPI_ENGINE_REG_SYNC_ID 0xc0
27 #define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
28 #define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
29 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
31 #define SPI_ENGINE_REG_CMD_FIFO 0xe0
32 #define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
33 #define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
36 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
37 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
38 #define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
39 #define SPI_ENGINE_INT_SYNC BIT(3)
41 #define SPI_ENGINE_CONFIG_CPHA BIT(0)
42 #define SPI_ENGINE_CONFIG_CPOL BIT(1)
43 #define SPI_ENGINE_CONFIG_3WIRE BIT(2)
45 #define SPI_ENGINE_INST_TRANSFER 0x0
46 #define SPI_ENGINE_INST_ASSERT 0x1
47 #define SPI_ENGINE_INST_WRITE 0x2
48 #define SPI_ENGINE_INST_MISC 0x3
49 #define SPI_ENGINE_INST_CS_INV 0x4
51 #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
52 #define SPI_ENGINE_CMD_REG_CONFIG 0x1
53 #define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
55 #define SPI_ENGINE_MISC_SYNC 0x0
56 #define SPI_ENGINE_MISC_SLEEP 0x1
58 #define SPI_ENGINE_TRANSFER_WRITE 0x1
59 #define SPI_ENGINE_TRANSFER_READ 0x2
61 /* Arbitrary sync ID for use by host->cur_msg */
62 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID 0x1
64 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
65 (((inst) << 12) | ((arg1) << 8) | (arg2))
67 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
68 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
69 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
70 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
71 #define SPI_ENGINE_CMD_WRITE(reg, val) \
72 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
73 #define SPI_ENGINE_CMD_SLEEP(delay) \
74 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
75 #define SPI_ENGINE_CMD_SYNC(id) \
76 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
77 #define SPI_ENGINE_CMD_CS_INV(flags) \
78 SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
80 struct spi_engine_program {
82 uint16_t instructions[] __counted_by(length);
86 * struct spi_engine_message_state - SPI engine per-message state
88 struct spi_engine_message_state {
89 /** @cmd_length: Number of elements in cmd_buf array. */
91 /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
92 const uint16_t *cmd_buf;
93 /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
94 struct spi_transfer *tx_xfer;
95 /** @tx_length: Size of tx_buf in bytes. */
96 unsigned int tx_length;
97 /** @tx_buf: Bytes not yet written to TX FIFO. */
98 const uint8_t *tx_buf;
99 /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
100 struct spi_transfer *rx_xfer;
101 /** @rx_length: Size of tx_buf in bytes. */
102 unsigned int rx_length;
103 /** @rx_buf: Bytes not yet written to the RX FIFO. */
114 struct spi_engine_message_state msg_state;
115 struct completion msg_complete;
116 unsigned int int_enable;
117 /* shadows hardware CS inversion flag state */
121 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
122 bool dry, uint16_t cmd)
127 p->instructions[p->length - 1] = cmd;
130 static unsigned int spi_engine_get_config(struct spi_device *spi)
132 unsigned int config = 0;
134 if (spi->mode & SPI_CPOL)
135 config |= SPI_ENGINE_CONFIG_CPOL;
136 if (spi->mode & SPI_CPHA)
137 config |= SPI_ENGINE_CONFIG_CPHA;
138 if (spi->mode & SPI_3WIRE)
139 config |= SPI_ENGINE_CONFIG_3WIRE;
144 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
145 struct spi_transfer *xfer)
149 if (xfer->bits_per_word <= 8)
151 else if (xfer->bits_per_word <= 16)
157 unsigned int n = min(len, 256U);
158 unsigned int flags = 0;
161 flags |= SPI_ENGINE_TRANSFER_WRITE;
163 flags |= SPI_ENGINE_TRANSFER_READ;
165 spi_engine_program_add_cmd(p, dry,
166 SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
171 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
172 int delay_ns, int inst_ns, u32 sclk_hz)
177 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
178 * delay is less that the instruction execution time, there is no need
179 * for an extra sleep instruction since the instruction execution time
180 * will already cover the required delay.
182 if (delay_ns < 0 || delay_ns <= inst_ns)
185 t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
187 unsigned int n = min(t, 256U);
189 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
194 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
195 struct spi_device *spi, bool assert)
197 unsigned int mask = 0xff;
200 mask ^= BIT(spi_get_chipselect(spi, 0));
202 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
206 * Performs precompile steps on the message.
208 * The SPI core does most of the message/transfer validation and filling in
209 * fields for us via __spi_validate(). This fixes up anything remaining not
212 * NB: This is separate from spi_engine_compile_message() because the latter
213 * is called twice and would otherwise result in double-evaluation.
215 static void spi_engine_precompile_message(struct spi_message *msg)
217 unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
218 struct spi_transfer *xfer;
220 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
221 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
222 xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
226 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
227 struct spi_engine_program *p)
229 struct spi_device *spi = msg->spi;
230 struct spi_controller *host = spi->controller;
231 struct spi_transfer *xfer;
232 int clk_div, new_clk_div, inst_ns;
233 bool keep_cs = false;
234 u8 bits_per_word = 0;
237 * Take into account instruction execution time for more accurate sleep
238 * times, especially when the delay is small.
240 inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
244 spi_engine_program_add_cmd(p, dry,
245 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
246 spi_engine_get_config(spi)));
248 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
249 spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
251 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
252 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
253 if (new_clk_div != clk_div) {
254 clk_div = new_clk_div;
255 /* actual divider used is register value + 1 */
256 spi_engine_program_add_cmd(p, dry,
257 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
261 if (bits_per_word != xfer->bits_per_word) {
262 bits_per_word = xfer->bits_per_word;
263 spi_engine_program_add_cmd(p, dry,
264 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
268 spi_engine_gen_xfer(p, dry, xfer);
269 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
270 inst_ns, xfer->effective_speed_hz);
272 if (xfer->cs_change) {
273 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
277 spi_engine_gen_cs(p, dry, spi, false);
279 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
280 &xfer->cs_change_delay, xfer), inst_ns,
281 xfer->effective_speed_hz);
283 if (!list_next_entry(xfer, transfer_list)->cs_off)
284 spi_engine_gen_cs(p, dry, spi, true);
286 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
287 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
288 spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
293 spi_engine_gen_cs(p, dry, spi, false);
296 * Restore clockdiv to default so that future gen_sleep commands don't
297 * have to be aware of the current register state.
300 spi_engine_program_add_cmd(p, dry,
301 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
304 static void spi_engine_xfer_next(struct spi_message *msg,
305 struct spi_transfer **_xfer)
307 struct spi_transfer *xfer = *_xfer;
310 xfer = list_first_entry(&msg->transfers,
311 struct spi_transfer, transfer_list);
312 } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
315 xfer = list_next_entry(xfer, transfer_list);
321 static void spi_engine_tx_next(struct spi_message *msg)
323 struct spi_engine_message_state *st = msg->state;
324 struct spi_transfer *xfer = st->tx_xfer;
327 spi_engine_xfer_next(msg, &xfer);
328 } while (xfer && !xfer->tx_buf);
332 st->tx_length = xfer->len;
333 st->tx_buf = xfer->tx_buf;
339 static void spi_engine_rx_next(struct spi_message *msg)
341 struct spi_engine_message_state *st = msg->state;
342 struct spi_transfer *xfer = st->rx_xfer;
345 spi_engine_xfer_next(msg, &xfer);
346 } while (xfer && !xfer->rx_buf);
350 st->rx_length = xfer->len;
351 st->rx_buf = xfer->rx_buf;
357 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
358 struct spi_message *msg)
360 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
361 struct spi_engine_message_state *st = msg->state;
362 unsigned int n, m, i;
365 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
366 while (n && st->cmd_length) {
367 m = min(n, st->cmd_length);
369 for (i = 0; i < m; i++)
370 writel_relaxed(buf[i], addr);
376 return st->cmd_length != 0;
379 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
380 struct spi_message *msg)
382 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
383 struct spi_engine_message_state *st = msg->state;
384 unsigned int n, m, i;
386 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
387 while (n && st->tx_length) {
388 if (st->tx_xfer->bits_per_word <= 8) {
389 const u8 *buf = st->tx_buf;
391 m = min(n, st->tx_length);
392 for (i = 0; i < m; i++)
393 writel_relaxed(buf[i], addr);
396 } else if (st->tx_xfer->bits_per_word <= 16) {
397 const u16 *buf = (const u16 *)st->tx_buf;
399 m = min(n, st->tx_length / 2);
400 for (i = 0; i < m; i++)
401 writel_relaxed(buf[i], addr);
403 st->tx_length -= m * 2;
405 const u32 *buf = (const u32 *)st->tx_buf;
407 m = min(n, st->tx_length / 4);
408 for (i = 0; i < m; i++)
409 writel_relaxed(buf[i], addr);
411 st->tx_length -= m * 4;
414 if (st->tx_length == 0)
415 spi_engine_tx_next(msg);
418 return st->tx_length != 0;
421 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
422 struct spi_message *msg)
424 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
425 struct spi_engine_message_state *st = msg->state;
426 unsigned int n, m, i;
428 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
429 while (n && st->rx_length) {
430 if (st->rx_xfer->bits_per_word <= 8) {
431 u8 *buf = st->rx_buf;
433 m = min(n, st->rx_length);
434 for (i = 0; i < m; i++)
435 buf[i] = readl_relaxed(addr);
438 } else if (st->rx_xfer->bits_per_word <= 16) {
439 u16 *buf = (u16 *)st->rx_buf;
441 m = min(n, st->rx_length / 2);
442 for (i = 0; i < m; i++)
443 buf[i] = readl_relaxed(addr);
445 st->rx_length -= m * 2;
447 u32 *buf = (u32 *)st->rx_buf;
449 m = min(n, st->rx_length / 4);
450 for (i = 0; i < m; i++)
451 buf[i] = readl_relaxed(addr);
453 st->rx_length -= m * 4;
456 if (st->rx_length == 0)
457 spi_engine_rx_next(msg);
460 return st->rx_length != 0;
463 static irqreturn_t spi_engine_irq(int irq, void *devid)
465 struct spi_controller *host = devid;
466 struct spi_message *msg = host->cur_msg;
467 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
468 unsigned int disable_int = 0;
469 unsigned int pending;
470 int completed_id = -1;
472 pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
474 if (pending & SPI_ENGINE_INT_SYNC) {
475 writel_relaxed(SPI_ENGINE_INT_SYNC,
476 spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
477 completed_id = readl_relaxed(
478 spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
481 spin_lock(&spi_engine->lock);
483 if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
484 if (!spi_engine_write_cmd_fifo(spi_engine, msg))
485 disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
488 if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
489 if (!spi_engine_write_tx_fifo(spi_engine, msg))
490 disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
493 if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
494 if (!spi_engine_read_rx_fifo(spi_engine, msg))
495 disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
498 if (pending & SPI_ENGINE_INT_SYNC && msg) {
499 if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
501 msg->actual_length = msg->frame_length;
502 complete(&spi_engine->msg_complete);
503 disable_int |= SPI_ENGINE_INT_SYNC;
508 spi_engine->int_enable &= ~disable_int;
509 writel_relaxed(spi_engine->int_enable,
510 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
513 spin_unlock(&spi_engine->lock);
518 static int spi_engine_optimize_message(struct spi_message *msg)
520 struct spi_engine_program p_dry, *p;
522 spi_engine_precompile_message(msg);
525 spi_engine_compile_message(msg, true, &p_dry);
527 p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
531 spi_engine_compile_message(msg, false, p);
533 spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
534 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
541 static int spi_engine_unoptimize_message(struct spi_message *msg)
543 kfree(msg->opt_state);
548 static int spi_engine_setup(struct spi_device *device)
550 struct spi_controller *host = device->controller;
551 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
553 if (device->mode & SPI_CS_HIGH)
554 spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
556 spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
558 writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
559 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
562 * In addition to setting the flags, we have to do a CS assert command
563 * to make the new setting actually take effect.
565 writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
566 spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
571 static int spi_engine_transfer_one_message(struct spi_controller *host,
572 struct spi_message *msg)
574 struct spi_engine *spi_engine = spi_controller_get_devdata(host);
575 struct spi_engine_message_state *st = &spi_engine->msg_state;
576 struct spi_engine_program *p = msg->opt_state;
577 unsigned int int_enable = 0;
580 /* reinitialize message state for this transfer */
581 memset(st, 0, sizeof(*st));
582 st->cmd_buf = p->instructions;
583 st->cmd_length = p->length;
586 reinit_completion(&spi_engine->msg_complete);
588 spin_lock_irqsave(&spi_engine->lock, flags);
590 if (spi_engine_write_cmd_fifo(spi_engine, msg))
591 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
593 spi_engine_tx_next(msg);
594 if (spi_engine_write_tx_fifo(spi_engine, msg))
595 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
597 spi_engine_rx_next(msg);
598 if (st->rx_length != 0)
599 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
601 int_enable |= SPI_ENGINE_INT_SYNC;
603 writel_relaxed(int_enable,
604 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
605 spi_engine->int_enable = int_enable;
606 spin_unlock_irqrestore(&spi_engine->lock, flags);
608 if (!wait_for_completion_timeout(&spi_engine->msg_complete,
609 msecs_to_jiffies(5000))) {
611 "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
612 msg->status = -ETIMEDOUT;
615 spi_finalize_current_message(host);
620 static void spi_engine_release_hw(void *p)
622 struct spi_engine *spi_engine = p;
624 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
625 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
626 writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
629 static int spi_engine_probe(struct platform_device *pdev)
631 struct spi_engine *spi_engine;
632 struct spi_controller *host;
633 unsigned int version;
637 irq = platform_get_irq(pdev, 0);
641 host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
645 spi_engine = spi_controller_get_devdata(host);
647 spin_lock_init(&spi_engine->lock);
648 init_completion(&spi_engine->msg_complete);
650 spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
651 if (IS_ERR(spi_engine->clk))
652 return PTR_ERR(spi_engine->clk);
654 spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
655 if (IS_ERR(spi_engine->ref_clk))
656 return PTR_ERR(spi_engine->ref_clk);
658 spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
659 if (IS_ERR(spi_engine->base))
660 return PTR_ERR(spi_engine->base);
662 version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
663 if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
664 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
665 ADI_AXI_PCORE_VER_MAJOR(version),
666 ADI_AXI_PCORE_VER_MINOR(version),
667 ADI_AXI_PCORE_VER_PATCH(version));
671 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
672 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
673 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
675 ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
680 ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
685 host->dev.of_node = pdev->dev.of_node;
686 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
687 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
688 host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
689 host->transfer_one_message = spi_engine_transfer_one_message;
690 host->optimize_message = spi_engine_optimize_message;
691 host->unoptimize_message = spi_engine_unoptimize_message;
692 host->num_chipselect = 8;
694 /* Some features depend of the IP core version. */
695 if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
696 host->mode_bits |= SPI_CS_HIGH;
697 host->setup = spi_engine_setup;
700 if (host->max_speed_hz == 0)
701 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
703 return devm_spi_register_controller(&pdev->dev, host);
706 static const struct of_device_id spi_engine_match_table[] = {
707 { .compatible = "adi,axi-spi-engine-1.00.a" },
710 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
712 static struct platform_driver spi_engine_driver = {
713 .probe = spi_engine_probe,
715 .name = "spi-engine",
716 .of_match_table = spi_engine_match_table,
719 module_platform_driver(spi_engine_driver);
722 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
723 MODULE_LICENSE("GPL");