]> Git Repo - linux.git/blob - drivers/spi/spi-axi-spi-engine.c
Linux 6.14-rc3
[linux.git] / drivers / spi / spi-axi-spi-engine.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <[email protected]>
6  */
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
18 #include <trace/events/spi.h>
19
20 #define SPI_ENGINE_REG_RESET                    0x40
21
22 #define SPI_ENGINE_REG_INT_ENABLE               0x80
23 #define SPI_ENGINE_REG_INT_PENDING              0x84
24 #define SPI_ENGINE_REG_INT_SOURCE               0x88
25
26 #define SPI_ENGINE_REG_SYNC_ID                  0xc0
27
28 #define SPI_ENGINE_REG_CMD_FIFO_ROOM            0xd0
29 #define SPI_ENGINE_REG_SDO_FIFO_ROOM            0xd4
30 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL           0xd8
31
32 #define SPI_ENGINE_REG_CMD_FIFO                 0xe0
33 #define SPI_ENGINE_REG_SDO_DATA_FIFO            0xe4
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO            0xe8
35 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK       0xec
36
37 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY         BIT(0)
38 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY         BIT(1)
39 #define SPI_ENGINE_INT_SDI_ALMOST_FULL          BIT(2)
40 #define SPI_ENGINE_INT_SYNC                     BIT(3)
41
42 #define SPI_ENGINE_CONFIG_CPHA                  BIT(0)
43 #define SPI_ENGINE_CONFIG_CPOL                  BIT(1)
44 #define SPI_ENGINE_CONFIG_3WIRE                 BIT(2)
45 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH         BIT(3)
46
47 #define SPI_ENGINE_INST_TRANSFER                0x0
48 #define SPI_ENGINE_INST_ASSERT                  0x1
49 #define SPI_ENGINE_INST_WRITE                   0x2
50 #define SPI_ENGINE_INST_MISC                    0x3
51 #define SPI_ENGINE_INST_CS_INV                  0x4
52
53 #define SPI_ENGINE_CMD_REG_CLK_DIV              0x0
54 #define SPI_ENGINE_CMD_REG_CONFIG               0x1
55 #define SPI_ENGINE_CMD_REG_XFER_BITS            0x2
56
57 #define SPI_ENGINE_MISC_SYNC                    0x0
58 #define SPI_ENGINE_MISC_SLEEP                   0x1
59
60 #define SPI_ENGINE_TRANSFER_WRITE               0x1
61 #define SPI_ENGINE_TRANSFER_READ                0x2
62
63 /* Arbitrary sync ID for use by host->cur_msg */
64 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID          0x1
65
66 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
67         (((inst) << 12) | ((arg1) << 8) | (arg2))
68
69 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
70         SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
71 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
72         SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
73 #define SPI_ENGINE_CMD_WRITE(reg, val) \
74         SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
75 #define SPI_ENGINE_CMD_SLEEP(delay) \
76         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
77 #define SPI_ENGINE_CMD_SYNC(id) \
78         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
79 #define SPI_ENGINE_CMD_CS_INV(flags) \
80         SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
81
82 struct spi_engine_program {
83         unsigned int length;
84         uint16_t instructions[] __counted_by(length);
85 };
86
87 /**
88  * struct spi_engine_message_state - SPI engine per-message state
89  */
90 struct spi_engine_message_state {
91         /** @cmd_length: Number of elements in cmd_buf array. */
92         unsigned cmd_length;
93         /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
94         const uint16_t *cmd_buf;
95         /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
96         struct spi_transfer *tx_xfer;
97         /** @tx_length: Size of tx_buf in bytes. */
98         unsigned int tx_length;
99         /** @tx_buf: Bytes not yet written to TX FIFO. */
100         const uint8_t *tx_buf;
101         /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
102         struct spi_transfer *rx_xfer;
103         /** @rx_length: Size of tx_buf in bytes. */
104         unsigned int rx_length;
105         /** @rx_buf: Bytes not yet written to the RX FIFO. */
106         uint8_t *rx_buf;
107 };
108
109 struct spi_engine {
110         struct clk *clk;
111         struct clk *ref_clk;
112
113         spinlock_t lock;
114
115         void __iomem *base;
116         struct spi_engine_message_state msg_state;
117         struct completion msg_complete;
118         unsigned int int_enable;
119         /* shadows hardware CS inversion flag state */
120         u8 cs_inv;
121 };
122
123 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
124         bool dry, uint16_t cmd)
125 {
126         p->length++;
127
128         if (!dry)
129                 p->instructions[p->length - 1] = cmd;
130 }
131
132 static unsigned int spi_engine_get_config(struct spi_device *spi)
133 {
134         unsigned int config = 0;
135
136         if (spi->mode & SPI_CPOL)
137                 config |= SPI_ENGINE_CONFIG_CPOL;
138         if (spi->mode & SPI_CPHA)
139                 config |= SPI_ENGINE_CONFIG_CPHA;
140         if (spi->mode & SPI_3WIRE)
141                 config |= SPI_ENGINE_CONFIG_3WIRE;
142         if (spi->mode & SPI_MOSI_IDLE_HIGH)
143                 config |= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
144         if (spi->mode & SPI_MOSI_IDLE_LOW)
145                 config &= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH;
146
147         return config;
148 }
149
150 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
151         struct spi_transfer *xfer)
152 {
153         unsigned int len;
154
155         if (xfer->bits_per_word <= 8)
156                 len = xfer->len;
157         else if (xfer->bits_per_word <= 16)
158                 len = xfer->len / 2;
159         else
160                 len = xfer->len / 4;
161
162         while (len) {
163                 unsigned int n = min(len, 256U);
164                 unsigned int flags = 0;
165
166                 if (xfer->tx_buf)
167                         flags |= SPI_ENGINE_TRANSFER_WRITE;
168                 if (xfer->rx_buf)
169                         flags |= SPI_ENGINE_TRANSFER_READ;
170
171                 spi_engine_program_add_cmd(p, dry,
172                         SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
173                 len -= n;
174         }
175 }
176
177 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
178                                  int delay_ns, int inst_ns, u32 sclk_hz)
179 {
180         unsigned int t;
181
182         /*
183          * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
184          * delay is less that the instruction execution time, there is no need
185          * for an extra sleep instruction since the instruction execution time
186          * will already cover the required delay.
187          */
188         if (delay_ns < 0 || delay_ns <= inst_ns)
189                 return;
190
191         t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
192         while (t) {
193                 unsigned int n = min(t, 256U);
194
195                 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
196                 t -= n;
197         }
198 }
199
200 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
201                 struct spi_device *spi, bool assert)
202 {
203         unsigned int mask = 0xff;
204
205         if (assert)
206                 mask ^= BIT(spi_get_chipselect(spi, 0));
207
208         spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
209 }
210
211 /*
212  * Performs precompile steps on the message.
213  *
214  * The SPI core does most of the message/transfer validation and filling in
215  * fields for us via __spi_validate(). This fixes up anything remaining not
216  * done there.
217  *
218  * NB: This is separate from spi_engine_compile_message() because the latter
219  * is called twice and would otherwise result in double-evaluation.
220  */
221 static void spi_engine_precompile_message(struct spi_message *msg)
222 {
223         unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
224         struct spi_transfer *xfer;
225
226         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
227                 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
228                 xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
229         }
230 }
231
232 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
233                                        struct spi_engine_program *p)
234 {
235         struct spi_device *spi = msg->spi;
236         struct spi_controller *host = spi->controller;
237         struct spi_transfer *xfer;
238         int clk_div, new_clk_div, inst_ns;
239         bool keep_cs = false;
240         u8 bits_per_word = 0;
241
242         /*
243          * Take into account instruction execution time for more accurate sleep
244          * times, especially when the delay is small.
245          */
246         inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
247
248         clk_div = 1;
249
250         spi_engine_program_add_cmd(p, dry,
251                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
252                         spi_engine_get_config(spi)));
253
254         xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
255         spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
256
257         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
258                 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
259                 if (new_clk_div != clk_div) {
260                         clk_div = new_clk_div;
261                         /* actual divider used is register value + 1 */
262                         spi_engine_program_add_cmd(p, dry,
263                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
264                                         clk_div - 1));
265                 }
266
267                 if (bits_per_word != xfer->bits_per_word && xfer->len) {
268                         bits_per_word = xfer->bits_per_word;
269                         spi_engine_program_add_cmd(p, dry,
270                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
271                                         bits_per_word));
272                 }
273
274                 spi_engine_gen_xfer(p, dry, xfer);
275                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
276                                      inst_ns, xfer->effective_speed_hz);
277
278                 if (xfer->cs_change) {
279                         if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
280                                 keep_cs = true;
281                         } else {
282                                 if (!xfer->cs_off)
283                                         spi_engine_gen_cs(p, dry, spi, false);
284
285                                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
286                                         &xfer->cs_change_delay, xfer), inst_ns,
287                                         xfer->effective_speed_hz);
288
289                                 if (!list_next_entry(xfer, transfer_list)->cs_off)
290                                         spi_engine_gen_cs(p, dry, spi, true);
291                         }
292                 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
293                            xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
294                         spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
295                 }
296         }
297
298         if (!keep_cs)
299                 spi_engine_gen_cs(p, dry, spi, false);
300
301         /*
302          * Restore clockdiv to default so that future gen_sleep commands don't
303          * have to be aware of the current register state.
304          */
305         if (clk_div != 1)
306                 spi_engine_program_add_cmd(p, dry,
307                         SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
308 }
309
310 static void spi_engine_xfer_next(struct spi_message *msg,
311         struct spi_transfer **_xfer)
312 {
313         struct spi_transfer *xfer = *_xfer;
314
315         if (!xfer) {
316                 xfer = list_first_entry(&msg->transfers,
317                         struct spi_transfer, transfer_list);
318         } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
319                 xfer = NULL;
320         } else {
321                 xfer = list_next_entry(xfer, transfer_list);
322         }
323
324         *_xfer = xfer;
325 }
326
327 static void spi_engine_tx_next(struct spi_message *msg)
328 {
329         struct spi_engine_message_state *st = msg->state;
330         struct spi_transfer *xfer = st->tx_xfer;
331
332         do {
333                 spi_engine_xfer_next(msg, &xfer);
334         } while (xfer && !xfer->tx_buf);
335
336         st->tx_xfer = xfer;
337         if (xfer) {
338                 st->tx_length = xfer->len;
339                 st->tx_buf = xfer->tx_buf;
340         } else {
341                 st->tx_buf = NULL;
342         }
343 }
344
345 static void spi_engine_rx_next(struct spi_message *msg)
346 {
347         struct spi_engine_message_state *st = msg->state;
348         struct spi_transfer *xfer = st->rx_xfer;
349
350         do {
351                 spi_engine_xfer_next(msg, &xfer);
352         } while (xfer && !xfer->rx_buf);
353
354         st->rx_xfer = xfer;
355         if (xfer) {
356                 st->rx_length = xfer->len;
357                 st->rx_buf = xfer->rx_buf;
358         } else {
359                 st->rx_buf = NULL;
360         }
361 }
362
363 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
364                                       struct spi_message *msg)
365 {
366         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
367         struct spi_engine_message_state *st = msg->state;
368         unsigned int n, m, i;
369         const uint16_t *buf;
370
371         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
372         while (n && st->cmd_length) {
373                 m = min(n, st->cmd_length);
374                 buf = st->cmd_buf;
375                 for (i = 0; i < m; i++)
376                         writel_relaxed(buf[i], addr);
377                 st->cmd_buf += m;
378                 st->cmd_length -= m;
379                 n -= m;
380         }
381
382         return st->cmd_length != 0;
383 }
384
385 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
386                                      struct spi_message *msg)
387 {
388         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
389         struct spi_engine_message_state *st = msg->state;
390         unsigned int n, m, i;
391
392         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
393         while (n && st->tx_length) {
394                 if (st->tx_xfer->bits_per_word <= 8) {
395                         const u8 *buf = st->tx_buf;
396
397                         m = min(n, st->tx_length);
398                         for (i = 0; i < m; i++)
399                                 writel_relaxed(buf[i], addr);
400                         st->tx_buf += m;
401                         st->tx_length -= m;
402                 } else if (st->tx_xfer->bits_per_word <= 16) {
403                         const u16 *buf = (const u16 *)st->tx_buf;
404
405                         m = min(n, st->tx_length / 2);
406                         for (i = 0; i < m; i++)
407                                 writel_relaxed(buf[i], addr);
408                         st->tx_buf += m * 2;
409                         st->tx_length -= m * 2;
410                 } else {
411                         const u32 *buf = (const u32 *)st->tx_buf;
412
413                         m = min(n, st->tx_length / 4);
414                         for (i = 0; i < m; i++)
415                                 writel_relaxed(buf[i], addr);
416                         st->tx_buf += m * 4;
417                         st->tx_length -= m * 4;
418                 }
419                 n -= m;
420                 if (st->tx_length == 0)
421                         spi_engine_tx_next(msg);
422         }
423
424         return st->tx_length != 0;
425 }
426
427 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
428                                     struct spi_message *msg)
429 {
430         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
431         struct spi_engine_message_state *st = msg->state;
432         unsigned int n, m, i;
433
434         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
435         while (n && st->rx_length) {
436                 if (st->rx_xfer->bits_per_word <= 8) {
437                         u8 *buf = st->rx_buf;
438
439                         m = min(n, st->rx_length);
440                         for (i = 0; i < m; i++)
441                                 buf[i] = readl_relaxed(addr);
442                         st->rx_buf += m;
443                         st->rx_length -= m;
444                 } else if (st->rx_xfer->bits_per_word <= 16) {
445                         u16 *buf = (u16 *)st->rx_buf;
446
447                         m = min(n, st->rx_length / 2);
448                         for (i = 0; i < m; i++)
449                                 buf[i] = readl_relaxed(addr);
450                         st->rx_buf += m * 2;
451                         st->rx_length -= m * 2;
452                 } else {
453                         u32 *buf = (u32 *)st->rx_buf;
454
455                         m = min(n, st->rx_length / 4);
456                         for (i = 0; i < m; i++)
457                                 buf[i] = readl_relaxed(addr);
458                         st->rx_buf += m * 4;
459                         st->rx_length -= m * 4;
460                 }
461                 n -= m;
462                 if (st->rx_length == 0)
463                         spi_engine_rx_next(msg);
464         }
465
466         return st->rx_length != 0;
467 }
468
469 static irqreturn_t spi_engine_irq(int irq, void *devid)
470 {
471         struct spi_controller *host = devid;
472         struct spi_message *msg = host->cur_msg;
473         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
474         unsigned int disable_int = 0;
475         unsigned int pending;
476         int completed_id = -1;
477
478         pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
479
480         if (pending & SPI_ENGINE_INT_SYNC) {
481                 writel_relaxed(SPI_ENGINE_INT_SYNC,
482                         spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
483                 completed_id = readl_relaxed(
484                         spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
485         }
486
487         spin_lock(&spi_engine->lock);
488
489         if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
490                 if (!spi_engine_write_cmd_fifo(spi_engine, msg))
491                         disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
492         }
493
494         if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
495                 if (!spi_engine_write_tx_fifo(spi_engine, msg))
496                         disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
497         }
498
499         if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
500                 if (!spi_engine_read_rx_fifo(spi_engine, msg))
501                         disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
502         }
503
504         if (pending & SPI_ENGINE_INT_SYNC && msg) {
505                 if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
506                         msg->status = 0;
507                         msg->actual_length = msg->frame_length;
508                         complete(&spi_engine->msg_complete);
509                         disable_int |= SPI_ENGINE_INT_SYNC;
510                 }
511         }
512
513         if (disable_int) {
514                 spi_engine->int_enable &= ~disable_int;
515                 writel_relaxed(spi_engine->int_enable,
516                         spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
517         }
518
519         spin_unlock(&spi_engine->lock);
520
521         return IRQ_HANDLED;
522 }
523
524 static int spi_engine_optimize_message(struct spi_message *msg)
525 {
526         struct spi_engine_program p_dry, *p;
527
528         spi_engine_precompile_message(msg);
529
530         p_dry.length = 0;
531         spi_engine_compile_message(msg, true, &p_dry);
532
533         p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
534         if (!p)
535                 return -ENOMEM;
536
537         spi_engine_compile_message(msg, false, p);
538
539         spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
540                                                 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
541
542         msg->opt_state = p;
543
544         return 0;
545 }
546
547 static int spi_engine_unoptimize_message(struct spi_message *msg)
548 {
549         kfree(msg->opt_state);
550
551         return 0;
552 }
553
554 static int spi_engine_setup(struct spi_device *device)
555 {
556         struct spi_controller *host = device->controller;
557         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
558
559         if (device->mode & SPI_CS_HIGH)
560                 spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
561         else
562                 spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
563
564         writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
565                        spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
566
567         /*
568          * In addition to setting the flags, we have to do a CS assert command
569          * to make the new setting actually take effect.
570          */
571         writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
572                        spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
573
574         return 0;
575 }
576
577 static int spi_engine_transfer_one_message(struct spi_controller *host,
578         struct spi_message *msg)
579 {
580         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
581         struct spi_engine_message_state *st = &spi_engine->msg_state;
582         struct spi_engine_program *p = msg->opt_state;
583         unsigned int int_enable = 0;
584         unsigned long flags;
585
586         /* reinitialize message state for this transfer */
587         memset(st, 0, sizeof(*st));
588         st->cmd_buf = p->instructions;
589         st->cmd_length = p->length;
590         msg->state = st;
591
592         reinit_completion(&spi_engine->msg_complete);
593
594         if (trace_spi_transfer_start_enabled()) {
595                 struct spi_transfer *xfer;
596
597                 list_for_each_entry(xfer, &msg->transfers, transfer_list)
598                         trace_spi_transfer_start(msg, xfer);
599         }
600
601         spin_lock_irqsave(&spi_engine->lock, flags);
602
603         if (spi_engine_write_cmd_fifo(spi_engine, msg))
604                 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
605
606         spi_engine_tx_next(msg);
607         if (spi_engine_write_tx_fifo(spi_engine, msg))
608                 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
609
610         spi_engine_rx_next(msg);
611         if (st->rx_length != 0)
612                 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
613
614         int_enable |= SPI_ENGINE_INT_SYNC;
615
616         writel_relaxed(int_enable,
617                 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
618         spi_engine->int_enable = int_enable;
619         spin_unlock_irqrestore(&spi_engine->lock, flags);
620
621         if (!wait_for_completion_timeout(&spi_engine->msg_complete,
622                                          msecs_to_jiffies(5000))) {
623                 dev_err(&host->dev,
624                         "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
625                 msg->status = -ETIMEDOUT;
626         }
627
628         if (trace_spi_transfer_stop_enabled()) {
629                 struct spi_transfer *xfer;
630
631                 list_for_each_entry(xfer, &msg->transfers, transfer_list)
632                         trace_spi_transfer_stop(msg, xfer);
633         }
634
635         spi_finalize_current_message(host);
636
637         return msg->status;
638 }
639
640 static void spi_engine_release_hw(void *p)
641 {
642         struct spi_engine *spi_engine = p;
643
644         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
645         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
646         writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
647 }
648
649 static int spi_engine_probe(struct platform_device *pdev)
650 {
651         struct spi_engine *spi_engine;
652         struct spi_controller *host;
653         unsigned int version;
654         int irq;
655         int ret;
656
657         irq = platform_get_irq(pdev, 0);
658         if (irq < 0)
659                 return irq;
660
661         host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
662         if (!host)
663                 return -ENOMEM;
664
665         spi_engine = spi_controller_get_devdata(host);
666
667         spin_lock_init(&spi_engine->lock);
668         init_completion(&spi_engine->msg_complete);
669
670         spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
671         if (IS_ERR(spi_engine->clk))
672                 return PTR_ERR(spi_engine->clk);
673
674         spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
675         if (IS_ERR(spi_engine->ref_clk))
676                 return PTR_ERR(spi_engine->ref_clk);
677
678         spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
679         if (IS_ERR(spi_engine->base))
680                 return PTR_ERR(spi_engine->base);
681
682         version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
683         if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
684                 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
685                         ADI_AXI_PCORE_VER_MAJOR(version),
686                         ADI_AXI_PCORE_VER_MINOR(version),
687                         ADI_AXI_PCORE_VER_PATCH(version));
688                 return -ENODEV;
689         }
690
691         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
692         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
693         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
694
695         ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
696                                        spi_engine);
697         if (ret)
698                 return ret;
699
700         ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
701                                host);
702         if (ret)
703                 return ret;
704
705         host->dev.of_node = pdev->dev.of_node;
706         host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
707         host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
708         host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
709         host->transfer_one_message = spi_engine_transfer_one_message;
710         host->optimize_message = spi_engine_optimize_message;
711         host->unoptimize_message = spi_engine_unoptimize_message;
712         host->num_chipselect = 8;
713
714         /* Some features depend of the IP core version. */
715         if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) {
716                 if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
717                         host->mode_bits |= SPI_CS_HIGH;
718                         host->setup = spi_engine_setup;
719                 }
720                 if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
721                         host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
722         }
723
724         if (host->max_speed_hz == 0)
725                 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
726
727         return devm_spi_register_controller(&pdev->dev, host);
728 }
729
730 static const struct of_device_id spi_engine_match_table[] = {
731         { .compatible = "adi,axi-spi-engine-1.00.a" },
732         { },
733 };
734 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
735
736 static struct platform_driver spi_engine_driver = {
737         .probe = spi_engine_probe,
738         .driver = {
739                 .name = "spi-engine",
740                 .of_match_table = spi_engine_match_table,
741         },
742 };
743 module_platform_driver(spi_engine_driver);
744
745 MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
746 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
747 MODULE_LICENSE("GPL");
This page took 0.072559 seconds and 4 git commands to generate.