]> Git Repo - J-linux.git/blob - drivers/spi/spi-axi-spi-engine.c
Merge tag 'spi-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[J-linux.git] / drivers / spi / spi-axi-spi-engine.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <[email protected]>
6  */
7
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
18
19 #define SPI_ENGINE_REG_RESET                    0x40
20
21 #define SPI_ENGINE_REG_INT_ENABLE               0x80
22 #define SPI_ENGINE_REG_INT_PENDING              0x84
23 #define SPI_ENGINE_REG_INT_SOURCE               0x88
24
25 #define SPI_ENGINE_REG_SYNC_ID                  0xc0
26
27 #define SPI_ENGINE_REG_CMD_FIFO_ROOM            0xd0
28 #define SPI_ENGINE_REG_SDO_FIFO_ROOM            0xd4
29 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL           0xd8
30
31 #define SPI_ENGINE_REG_CMD_FIFO                 0xe0
32 #define SPI_ENGINE_REG_SDO_DATA_FIFO            0xe4
33 #define SPI_ENGINE_REG_SDI_DATA_FIFO            0xe8
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK       0xec
35
36 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY         BIT(0)
37 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY         BIT(1)
38 #define SPI_ENGINE_INT_SDI_ALMOST_FULL          BIT(2)
39 #define SPI_ENGINE_INT_SYNC                     BIT(3)
40
41 #define SPI_ENGINE_CONFIG_CPHA                  BIT(0)
42 #define SPI_ENGINE_CONFIG_CPOL                  BIT(1)
43 #define SPI_ENGINE_CONFIG_3WIRE                 BIT(2)
44
45 #define SPI_ENGINE_INST_TRANSFER                0x0
46 #define SPI_ENGINE_INST_ASSERT                  0x1
47 #define SPI_ENGINE_INST_WRITE                   0x2
48 #define SPI_ENGINE_INST_MISC                    0x3
49 #define SPI_ENGINE_INST_CS_INV                  0x4
50
51 #define SPI_ENGINE_CMD_REG_CLK_DIV              0x0
52 #define SPI_ENGINE_CMD_REG_CONFIG               0x1
53 #define SPI_ENGINE_CMD_REG_XFER_BITS            0x2
54
55 #define SPI_ENGINE_MISC_SYNC                    0x0
56 #define SPI_ENGINE_MISC_SLEEP                   0x1
57
58 #define SPI_ENGINE_TRANSFER_WRITE               0x1
59 #define SPI_ENGINE_TRANSFER_READ                0x2
60
61 /* Arbitrary sync ID for use by host->cur_msg */
62 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID          0x1
63
64 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
65         (((inst) << 12) | ((arg1) << 8) | (arg2))
66
67 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
68         SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
69 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
70         SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
71 #define SPI_ENGINE_CMD_WRITE(reg, val) \
72         SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
73 #define SPI_ENGINE_CMD_SLEEP(delay) \
74         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
75 #define SPI_ENGINE_CMD_SYNC(id) \
76         SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
77 #define SPI_ENGINE_CMD_CS_INV(flags) \
78         SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
79
80 struct spi_engine_program {
81         unsigned int length;
82         uint16_t instructions[] __counted_by(length);
83 };
84
85 /**
86  * struct spi_engine_message_state - SPI engine per-message state
87  */
88 struct spi_engine_message_state {
89         /** @cmd_length: Number of elements in cmd_buf array. */
90         unsigned cmd_length;
91         /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
92         const uint16_t *cmd_buf;
93         /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
94         struct spi_transfer *tx_xfer;
95         /** @tx_length: Size of tx_buf in bytes. */
96         unsigned int tx_length;
97         /** @tx_buf: Bytes not yet written to TX FIFO. */
98         const uint8_t *tx_buf;
99         /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
100         struct spi_transfer *rx_xfer;
101         /** @rx_length: Size of tx_buf in bytes. */
102         unsigned int rx_length;
103         /** @rx_buf: Bytes not yet written to the RX FIFO. */
104         uint8_t *rx_buf;
105 };
106
107 struct spi_engine {
108         struct clk *clk;
109         struct clk *ref_clk;
110
111         spinlock_t lock;
112
113         void __iomem *base;
114         struct spi_engine_message_state msg_state;
115         struct completion msg_complete;
116         unsigned int int_enable;
117         /* shadows hardware CS inversion flag state */
118         u8 cs_inv;
119 };
120
121 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
122         bool dry, uint16_t cmd)
123 {
124         p->length++;
125
126         if (!dry)
127                 p->instructions[p->length - 1] = cmd;
128 }
129
130 static unsigned int spi_engine_get_config(struct spi_device *spi)
131 {
132         unsigned int config = 0;
133
134         if (spi->mode & SPI_CPOL)
135                 config |= SPI_ENGINE_CONFIG_CPOL;
136         if (spi->mode & SPI_CPHA)
137                 config |= SPI_ENGINE_CONFIG_CPHA;
138         if (spi->mode & SPI_3WIRE)
139                 config |= SPI_ENGINE_CONFIG_3WIRE;
140
141         return config;
142 }
143
144 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
145         struct spi_transfer *xfer)
146 {
147         unsigned int len;
148
149         if (xfer->bits_per_word <= 8)
150                 len = xfer->len;
151         else if (xfer->bits_per_word <= 16)
152                 len = xfer->len / 2;
153         else
154                 len = xfer->len / 4;
155
156         while (len) {
157                 unsigned int n = min(len, 256U);
158                 unsigned int flags = 0;
159
160                 if (xfer->tx_buf)
161                         flags |= SPI_ENGINE_TRANSFER_WRITE;
162                 if (xfer->rx_buf)
163                         flags |= SPI_ENGINE_TRANSFER_READ;
164
165                 spi_engine_program_add_cmd(p, dry,
166                         SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
167                 len -= n;
168         }
169 }
170
171 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
172                                  int delay_ns, int inst_ns, u32 sclk_hz)
173 {
174         unsigned int t;
175
176         /*
177          * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
178          * delay is less that the instruction execution time, there is no need
179          * for an extra sleep instruction since the instruction execution time
180          * will already cover the required delay.
181          */
182         if (delay_ns < 0 || delay_ns <= inst_ns)
183                 return;
184
185         t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
186         while (t) {
187                 unsigned int n = min(t, 256U);
188
189                 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
190                 t -= n;
191         }
192 }
193
194 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
195                 struct spi_device *spi, bool assert)
196 {
197         unsigned int mask = 0xff;
198
199         if (assert)
200                 mask ^= BIT(spi_get_chipselect(spi, 0));
201
202         spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
203 }
204
205 /*
206  * Performs precompile steps on the message.
207  *
208  * The SPI core does most of the message/transfer validation and filling in
209  * fields for us via __spi_validate(). This fixes up anything remaining not
210  * done there.
211  *
212  * NB: This is separate from spi_engine_compile_message() because the latter
213  * is called twice and would otherwise result in double-evaluation.
214  */
215 static void spi_engine_precompile_message(struct spi_message *msg)
216 {
217         unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
218         struct spi_transfer *xfer;
219
220         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
221                 clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
222                 xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
223         }
224 }
225
226 static void spi_engine_compile_message(struct spi_message *msg, bool dry,
227                                        struct spi_engine_program *p)
228 {
229         struct spi_device *spi = msg->spi;
230         struct spi_controller *host = spi->controller;
231         struct spi_transfer *xfer;
232         int clk_div, new_clk_div, inst_ns;
233         bool keep_cs = false;
234         u8 bits_per_word = 0;
235
236         /*
237          * Take into account instruction execution time for more accurate sleep
238          * times, especially when the delay is small.
239          */
240         inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
241
242         clk_div = 1;
243
244         spi_engine_program_add_cmd(p, dry,
245                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
246                         spi_engine_get_config(spi)));
247
248         xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
249         spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
250
251         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
252                 new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
253                 if (new_clk_div != clk_div) {
254                         clk_div = new_clk_div;
255                         /* actual divider used is register value + 1 */
256                         spi_engine_program_add_cmd(p, dry,
257                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
258                                         clk_div - 1));
259                 }
260
261                 if (bits_per_word != xfer->bits_per_word) {
262                         bits_per_word = xfer->bits_per_word;
263                         spi_engine_program_add_cmd(p, dry,
264                                 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
265                                         bits_per_word));
266                 }
267
268                 spi_engine_gen_xfer(p, dry, xfer);
269                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
270                                      inst_ns, xfer->effective_speed_hz);
271
272                 if (xfer->cs_change) {
273                         if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
274                                 keep_cs = true;
275                         } else {
276                                 if (!xfer->cs_off)
277                                         spi_engine_gen_cs(p, dry, spi, false);
278
279                                 spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
280                                         &xfer->cs_change_delay, xfer), inst_ns,
281                                         xfer->effective_speed_hz);
282
283                                 if (!list_next_entry(xfer, transfer_list)->cs_off)
284                                         spi_engine_gen_cs(p, dry, spi, true);
285                         }
286                 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
287                            xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
288                         spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
289                 }
290         }
291
292         if (!keep_cs)
293                 spi_engine_gen_cs(p, dry, spi, false);
294
295         /*
296          * Restore clockdiv to default so that future gen_sleep commands don't
297          * have to be aware of the current register state.
298          */
299         if (clk_div != 1)
300                 spi_engine_program_add_cmd(p, dry,
301                         SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
302 }
303
304 static void spi_engine_xfer_next(struct spi_message *msg,
305         struct spi_transfer **_xfer)
306 {
307         struct spi_transfer *xfer = *_xfer;
308
309         if (!xfer) {
310                 xfer = list_first_entry(&msg->transfers,
311                         struct spi_transfer, transfer_list);
312         } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
313                 xfer = NULL;
314         } else {
315                 xfer = list_next_entry(xfer, transfer_list);
316         }
317
318         *_xfer = xfer;
319 }
320
321 static void spi_engine_tx_next(struct spi_message *msg)
322 {
323         struct spi_engine_message_state *st = msg->state;
324         struct spi_transfer *xfer = st->tx_xfer;
325
326         do {
327                 spi_engine_xfer_next(msg, &xfer);
328         } while (xfer && !xfer->tx_buf);
329
330         st->tx_xfer = xfer;
331         if (xfer) {
332                 st->tx_length = xfer->len;
333                 st->tx_buf = xfer->tx_buf;
334         } else {
335                 st->tx_buf = NULL;
336         }
337 }
338
339 static void spi_engine_rx_next(struct spi_message *msg)
340 {
341         struct spi_engine_message_state *st = msg->state;
342         struct spi_transfer *xfer = st->rx_xfer;
343
344         do {
345                 spi_engine_xfer_next(msg, &xfer);
346         } while (xfer && !xfer->rx_buf);
347
348         st->rx_xfer = xfer;
349         if (xfer) {
350                 st->rx_length = xfer->len;
351                 st->rx_buf = xfer->rx_buf;
352         } else {
353                 st->rx_buf = NULL;
354         }
355 }
356
357 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
358                                       struct spi_message *msg)
359 {
360         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
361         struct spi_engine_message_state *st = msg->state;
362         unsigned int n, m, i;
363         const uint16_t *buf;
364
365         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
366         while (n && st->cmd_length) {
367                 m = min(n, st->cmd_length);
368                 buf = st->cmd_buf;
369                 for (i = 0; i < m; i++)
370                         writel_relaxed(buf[i], addr);
371                 st->cmd_buf += m;
372                 st->cmd_length -= m;
373                 n -= m;
374         }
375
376         return st->cmd_length != 0;
377 }
378
379 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
380                                      struct spi_message *msg)
381 {
382         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
383         struct spi_engine_message_state *st = msg->state;
384         unsigned int n, m, i;
385
386         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
387         while (n && st->tx_length) {
388                 if (st->tx_xfer->bits_per_word <= 8) {
389                         const u8 *buf = st->tx_buf;
390
391                         m = min(n, st->tx_length);
392                         for (i = 0; i < m; i++)
393                                 writel_relaxed(buf[i], addr);
394                         st->tx_buf += m;
395                         st->tx_length -= m;
396                 } else if (st->tx_xfer->bits_per_word <= 16) {
397                         const u16 *buf = (const u16 *)st->tx_buf;
398
399                         m = min(n, st->tx_length / 2);
400                         for (i = 0; i < m; i++)
401                                 writel_relaxed(buf[i], addr);
402                         st->tx_buf += m * 2;
403                         st->tx_length -= m * 2;
404                 } else {
405                         const u32 *buf = (const u32 *)st->tx_buf;
406
407                         m = min(n, st->tx_length / 4);
408                         for (i = 0; i < m; i++)
409                                 writel_relaxed(buf[i], addr);
410                         st->tx_buf += m * 4;
411                         st->tx_length -= m * 4;
412                 }
413                 n -= m;
414                 if (st->tx_length == 0)
415                         spi_engine_tx_next(msg);
416         }
417
418         return st->tx_length != 0;
419 }
420
421 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
422                                     struct spi_message *msg)
423 {
424         void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
425         struct spi_engine_message_state *st = msg->state;
426         unsigned int n, m, i;
427
428         n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
429         while (n && st->rx_length) {
430                 if (st->rx_xfer->bits_per_word <= 8) {
431                         u8 *buf = st->rx_buf;
432
433                         m = min(n, st->rx_length);
434                         for (i = 0; i < m; i++)
435                                 buf[i] = readl_relaxed(addr);
436                         st->rx_buf += m;
437                         st->rx_length -= m;
438                 } else if (st->rx_xfer->bits_per_word <= 16) {
439                         u16 *buf = (u16 *)st->rx_buf;
440
441                         m = min(n, st->rx_length / 2);
442                         for (i = 0; i < m; i++)
443                                 buf[i] = readl_relaxed(addr);
444                         st->rx_buf += m * 2;
445                         st->rx_length -= m * 2;
446                 } else {
447                         u32 *buf = (u32 *)st->rx_buf;
448
449                         m = min(n, st->rx_length / 4);
450                         for (i = 0; i < m; i++)
451                                 buf[i] = readl_relaxed(addr);
452                         st->rx_buf += m * 4;
453                         st->rx_length -= m * 4;
454                 }
455                 n -= m;
456                 if (st->rx_length == 0)
457                         spi_engine_rx_next(msg);
458         }
459
460         return st->rx_length != 0;
461 }
462
463 static irqreturn_t spi_engine_irq(int irq, void *devid)
464 {
465         struct spi_controller *host = devid;
466         struct spi_message *msg = host->cur_msg;
467         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
468         unsigned int disable_int = 0;
469         unsigned int pending;
470         int completed_id = -1;
471
472         pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
473
474         if (pending & SPI_ENGINE_INT_SYNC) {
475                 writel_relaxed(SPI_ENGINE_INT_SYNC,
476                         spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
477                 completed_id = readl_relaxed(
478                         spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
479         }
480
481         spin_lock(&spi_engine->lock);
482
483         if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
484                 if (!spi_engine_write_cmd_fifo(spi_engine, msg))
485                         disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
486         }
487
488         if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
489                 if (!spi_engine_write_tx_fifo(spi_engine, msg))
490                         disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
491         }
492
493         if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
494                 if (!spi_engine_read_rx_fifo(spi_engine, msg))
495                         disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
496         }
497
498         if (pending & SPI_ENGINE_INT_SYNC && msg) {
499                 if (completed_id == AXI_SPI_ENGINE_CUR_MSG_SYNC_ID) {
500                         msg->status = 0;
501                         msg->actual_length = msg->frame_length;
502                         complete(&spi_engine->msg_complete);
503                         disable_int |= SPI_ENGINE_INT_SYNC;
504                 }
505         }
506
507         if (disable_int) {
508                 spi_engine->int_enable &= ~disable_int;
509                 writel_relaxed(spi_engine->int_enable,
510                         spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
511         }
512
513         spin_unlock(&spi_engine->lock);
514
515         return IRQ_HANDLED;
516 }
517
518 static int spi_engine_optimize_message(struct spi_message *msg)
519 {
520         struct spi_engine_program p_dry, *p;
521
522         spi_engine_precompile_message(msg);
523
524         p_dry.length = 0;
525         spi_engine_compile_message(msg, true, &p_dry);
526
527         p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL);
528         if (!p)
529                 return -ENOMEM;
530
531         spi_engine_compile_message(msg, false, p);
532
533         spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
534                                                 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
535
536         msg->opt_state = p;
537
538         return 0;
539 }
540
541 static int spi_engine_unoptimize_message(struct spi_message *msg)
542 {
543         kfree(msg->opt_state);
544
545         return 0;
546 }
547
548 static int spi_engine_setup(struct spi_device *device)
549 {
550         struct spi_controller *host = device->controller;
551         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
552
553         if (device->mode & SPI_CS_HIGH)
554                 spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
555         else
556                 spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
557
558         writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
559                        spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
560
561         /*
562          * In addition to setting the flags, we have to do a CS assert command
563          * to make the new setting actually take effect.
564          */
565         writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
566                        spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
567
568         return 0;
569 }
570
571 static int spi_engine_transfer_one_message(struct spi_controller *host,
572         struct spi_message *msg)
573 {
574         struct spi_engine *spi_engine = spi_controller_get_devdata(host);
575         struct spi_engine_message_state *st = &spi_engine->msg_state;
576         struct spi_engine_program *p = msg->opt_state;
577         unsigned int int_enable = 0;
578         unsigned long flags;
579
580         /* reinitialize message state for this transfer */
581         memset(st, 0, sizeof(*st));
582         st->cmd_buf = p->instructions;
583         st->cmd_length = p->length;
584         msg->state = st;
585
586         reinit_completion(&spi_engine->msg_complete);
587
588         spin_lock_irqsave(&spi_engine->lock, flags);
589
590         if (spi_engine_write_cmd_fifo(spi_engine, msg))
591                 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
592
593         spi_engine_tx_next(msg);
594         if (spi_engine_write_tx_fifo(spi_engine, msg))
595                 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
596
597         spi_engine_rx_next(msg);
598         if (st->rx_length != 0)
599                 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
600
601         int_enable |= SPI_ENGINE_INT_SYNC;
602
603         writel_relaxed(int_enable,
604                 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
605         spi_engine->int_enable = int_enable;
606         spin_unlock_irqrestore(&spi_engine->lock, flags);
607
608         if (!wait_for_completion_timeout(&spi_engine->msg_complete,
609                                          msecs_to_jiffies(5000))) {
610                 dev_err(&host->dev,
611                         "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
612                 msg->status = -ETIMEDOUT;
613         }
614
615         spi_finalize_current_message(host);
616
617         return msg->status;
618 }
619
620 static void spi_engine_release_hw(void *p)
621 {
622         struct spi_engine *spi_engine = p;
623
624         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
625         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
626         writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
627 }
628
629 static int spi_engine_probe(struct platform_device *pdev)
630 {
631         struct spi_engine *spi_engine;
632         struct spi_controller *host;
633         unsigned int version;
634         int irq;
635         int ret;
636
637         irq = platform_get_irq(pdev, 0);
638         if (irq < 0)
639                 return irq;
640
641         host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
642         if (!host)
643                 return -ENOMEM;
644
645         spi_engine = spi_controller_get_devdata(host);
646
647         spin_lock_init(&spi_engine->lock);
648         init_completion(&spi_engine->msg_complete);
649
650         spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
651         if (IS_ERR(spi_engine->clk))
652                 return PTR_ERR(spi_engine->clk);
653
654         spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
655         if (IS_ERR(spi_engine->ref_clk))
656                 return PTR_ERR(spi_engine->ref_clk);
657
658         spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
659         if (IS_ERR(spi_engine->base))
660                 return PTR_ERR(spi_engine->base);
661
662         version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
663         if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
664                 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
665                         ADI_AXI_PCORE_VER_MAJOR(version),
666                         ADI_AXI_PCORE_VER_MINOR(version),
667                         ADI_AXI_PCORE_VER_PATCH(version));
668                 return -ENODEV;
669         }
670
671         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
672         writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
673         writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
674
675         ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
676                                        spi_engine);
677         if (ret)
678                 return ret;
679
680         ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
681                                host);
682         if (ret)
683                 return ret;
684
685         host->dev.of_node = pdev->dev.of_node;
686         host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
687         host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
688         host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
689         host->transfer_one_message = spi_engine_transfer_one_message;
690         host->optimize_message = spi_engine_optimize_message;
691         host->unoptimize_message = spi_engine_unoptimize_message;
692         host->num_chipselect = 8;
693
694         /* Some features depend of the IP core version. */
695         if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
696                 host->mode_bits |= SPI_CS_HIGH;
697                 host->setup = spi_engine_setup;
698         }
699
700         if (host->max_speed_hz == 0)
701                 return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
702
703         return devm_spi_register_controller(&pdev->dev, host);
704 }
705
706 static const struct of_device_id spi_engine_match_table[] = {
707         { .compatible = "adi,axi-spi-engine-1.00.a" },
708         { },
709 };
710 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
711
712 static struct platform_driver spi_engine_driver = {
713         .probe = spi_engine_probe,
714         .driver = {
715                 .name = "spi-engine",
716                 .of_match_table = spi_engine_match_table,
717         },
718 };
719 module_platform_driver(spi_engine_driver);
720
721 MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
722 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
723 MODULE_LICENSE("GPL");
This page took 0.070982 seconds and 4 git commands to generate.