]> Git Repo - J-linux.git/blob - drivers/staging/greybus/spilib.c
Merge tag 'trace-v5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[J-linux.git] / drivers / staging / greybus / spilib.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Greybus SPI library
4  *
5  * Copyright 2014-2016 Google Inc.
6  * Copyright 2014-2016 Linaro Ltd.
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/greybus.h>
14 #include <linux/spi/spi.h>
15
16 #include "spilib.h"
17
18 struct gb_spilib {
19         struct gb_connection    *connection;
20         struct device           *parent;
21         struct spi_transfer     *first_xfer;
22         struct spi_transfer     *last_xfer;
23         struct spilib_ops       *ops;
24         u32                     rx_xfer_offset;
25         u32                     tx_xfer_offset;
26         u32                     last_xfer_size;
27         unsigned int            op_timeout;
28         u16                     mode;
29         u16                     flags;
30         u32                     bits_per_word_mask;
31         u8                      num_chipselect;
32         u32                     min_speed_hz;
33         u32                     max_speed_hz;
34 };
35
36 #define GB_SPI_STATE_MSG_DONE           ((void *)0)
37 #define GB_SPI_STATE_MSG_IDLE           ((void *)1)
38 #define GB_SPI_STATE_MSG_RUNNING        ((void *)2)
39 #define GB_SPI_STATE_OP_READY           ((void *)3)
40 #define GB_SPI_STATE_OP_DONE            ((void *)4)
41 #define GB_SPI_STATE_MSG_ERROR          ((void *)-1)
42
43 #define XFER_TIMEOUT_TOLERANCE          200
44
45 static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
46 {
47         return gb_connection_get_data(spi->connection);
48 }
49
50 static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
51 {
52         size_t headers_size;
53
54         data_max -= sizeof(struct gb_spi_transfer_request);
55         headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
56
57         return tx_size + headers_size > data_max ? 0 : 1;
58 }
59
60 static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
61                                 size_t data_max)
62 {
63         size_t rx_xfer_size;
64
65         data_max -= sizeof(struct gb_spi_transfer_response);
66
67         if (rx_size + len > data_max)
68                 rx_xfer_size = data_max - rx_size;
69         else
70                 rx_xfer_size = len;
71
72         /* if this is a write_read, for symmetry read the same as write */
73         if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
74                 rx_xfer_size = *tx_xfer_size;
75         if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
76                 *tx_xfer_size = rx_xfer_size;
77
78         return rx_xfer_size;
79 }
80
81 static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
82                                 size_t data_max)
83 {
84         size_t headers_size;
85
86         data_max -= sizeof(struct gb_spi_transfer_request);
87         headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
88
89         if (tx_size + headers_size + len > data_max)
90                 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
91
92         return len;
93 }
94
95 static void clean_xfer_state(struct gb_spilib *spi)
96 {
97         spi->first_xfer = NULL;
98         spi->last_xfer = NULL;
99         spi->rx_xfer_offset = 0;
100         spi->tx_xfer_offset = 0;
101         spi->last_xfer_size = 0;
102         spi->op_timeout = 0;
103 }
104
105 static bool is_last_xfer_done(struct gb_spilib *spi)
106 {
107         struct spi_transfer *last_xfer = spi->last_xfer;
108
109         if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
110             (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
111                 return true;
112
113         return false;
114 }
115
116 static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
117 {
118         struct spi_transfer *last_xfer = spi->last_xfer;
119
120         if (msg->state != GB_SPI_STATE_OP_DONE)
121                 return 0;
122
123         /*
124          * if we transferred all content of the last transfer, reset values and
125          * check if this was the last transfer in the message
126          */
127         if (is_last_xfer_done(spi)) {
128                 spi->tx_xfer_offset = 0;
129                 spi->rx_xfer_offset = 0;
130                 spi->op_timeout = 0;
131                 if (last_xfer == list_last_entry(&msg->transfers,
132                                                  struct spi_transfer,
133                                                  transfer_list))
134                         msg->state = GB_SPI_STATE_MSG_DONE;
135                 else
136                         spi->first_xfer = list_next_entry(last_xfer,
137                                                           transfer_list);
138                 return 0;
139         }
140
141         spi->first_xfer = last_xfer;
142         if (last_xfer->tx_buf)
143                 spi->tx_xfer_offset += spi->last_xfer_size;
144
145         if (last_xfer->rx_buf)
146                 spi->rx_xfer_offset += spi->last_xfer_size;
147
148         return 0;
149 }
150
151 static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
152                                           struct spi_message *msg)
153 {
154         if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
155                                     transfer_list))
156                 return NULL;
157
158         return list_next_entry(xfer, transfer_list);
159 }
160
161 /* Routines to transfer data */
162 static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
163                 struct gb_connection *connection, struct spi_message *msg)
164 {
165         struct gb_spi_transfer_request *request;
166         struct spi_device *dev = msg->spi;
167         struct spi_transfer *xfer;
168         struct gb_spi_transfer *gb_xfer;
169         struct gb_operation *operation;
170         u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
171         u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
172         u32 total_len = 0;
173         unsigned int xfer_timeout;
174         size_t data_max;
175         void *tx_data;
176
177         data_max = gb_operation_get_payload_size_max(connection);
178         xfer = spi->first_xfer;
179
180         /* Find number of transfers queued and tx/rx length in the message */
181
182         while (msg->state != GB_SPI_STATE_OP_READY) {
183                 msg->state = GB_SPI_STATE_MSG_RUNNING;
184                 spi->last_xfer = xfer;
185
186                 if (!xfer->tx_buf && !xfer->rx_buf) {
187                         dev_err(spi->parent,
188                                 "bufferless transfer, length %u\n", xfer->len);
189                         msg->state = GB_SPI_STATE_MSG_ERROR;
190                         return NULL;
191                 }
192
193                 tx_xfer_size = 0;
194                 rx_xfer_size = 0;
195
196                 if (xfer->tx_buf) {
197                         len = xfer->len - spi->tx_xfer_offset;
198                         if (!tx_header_fit_operation(tx_size, count, data_max))
199                                 break;
200                         tx_xfer_size = calc_tx_xfer_size(tx_size, count,
201                                                          len, data_max);
202                         spi->last_xfer_size = tx_xfer_size;
203                 }
204
205                 if (xfer->rx_buf) {
206                         len = xfer->len - spi->rx_xfer_offset;
207                         rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
208                                                          len, data_max);
209                         spi->last_xfer_size = rx_xfer_size;
210                 }
211
212                 tx_size += tx_xfer_size;
213                 rx_size += rx_xfer_size;
214
215                 total_len += spi->last_xfer_size;
216                 count++;
217
218                 xfer = get_next_xfer(xfer, msg);
219                 if (!xfer || total_len >= data_max)
220                         msg->state = GB_SPI_STATE_OP_READY;
221         }
222
223         /*
224          * In addition to space for all message descriptors we need
225          * to have enough to hold all tx data.
226          */
227         request_size = sizeof(*request);
228         request_size += count * sizeof(*gb_xfer);
229         request_size += tx_size;
230
231         /* Response consists only of incoming data */
232         operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
233                                         request_size, rx_size, GFP_KERNEL);
234         if (!operation)
235                 return NULL;
236
237         request = operation->request->payload;
238         request->count = cpu_to_le16(count);
239         request->mode = dev->mode;
240         request->chip_select = dev->chip_select;
241
242         gb_xfer = &request->transfers[0];
243         tx_data = gb_xfer + count;      /* place tx data after last gb_xfer */
244
245         /* Fill in the transfers array */
246         xfer = spi->first_xfer;
247         while (msg->state != GB_SPI_STATE_OP_DONE) {
248                 int xfer_delay;
249                 if (xfer == spi->last_xfer)
250                         xfer_len = spi->last_xfer_size;
251                 else
252                         xfer_len = xfer->len;
253
254                 /* make sure we do not timeout in a slow transfer */
255                 xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
256                 xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
257
258                 if (xfer_timeout > spi->op_timeout)
259                         spi->op_timeout = xfer_timeout;
260
261                 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
262                 gb_xfer->len = cpu_to_le32(xfer_len);
263                 xfer_delay = spi_delay_to_ns(&xfer->delay, xfer) / 1000;
264                 xfer_delay = clamp_t(u16, xfer_delay, 0, U16_MAX);
265                 gb_xfer->delay_usecs = cpu_to_le16(xfer_delay);
266                 gb_xfer->cs_change = xfer->cs_change;
267                 gb_xfer->bits_per_word = xfer->bits_per_word;
268
269                 /* Copy tx data */
270                 if (xfer->tx_buf) {
271                         gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
272                         memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
273                                xfer_len);
274                         tx_data += xfer_len;
275                 }
276
277                 if (xfer->rx_buf)
278                         gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
279
280                 if (xfer == spi->last_xfer) {
281                         if (!is_last_xfer_done(spi))
282                                 gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
283                         msg->state = GB_SPI_STATE_OP_DONE;
284                         continue;
285                 }
286
287                 gb_xfer++;
288                 xfer = get_next_xfer(xfer, msg);
289         }
290
291         msg->actual_length += total_len;
292
293         return operation;
294 }
295
296 static void gb_spi_decode_response(struct gb_spilib *spi,
297                                    struct spi_message *msg,
298                                    struct gb_spi_transfer_response *response)
299 {
300         struct spi_transfer *xfer = spi->first_xfer;
301         void *rx_data = response->data;
302         u32 xfer_len;
303
304         while (xfer) {
305                 /* Copy rx data */
306                 if (xfer->rx_buf) {
307                         if (xfer == spi->first_xfer)
308                                 xfer_len = xfer->len - spi->rx_xfer_offset;
309                         else if (xfer == spi->last_xfer)
310                                 xfer_len = spi->last_xfer_size;
311                         else
312                                 xfer_len = xfer->len;
313
314                         memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
315                                xfer_len);
316                         rx_data += xfer_len;
317                 }
318
319                 if (xfer == spi->last_xfer)
320                         break;
321
322                 xfer = list_next_entry(xfer, transfer_list);
323         }
324 }
325
326 static int gb_spi_transfer_one_message(struct spi_master *master,
327                                        struct spi_message *msg)
328 {
329         struct gb_spilib *spi = spi_master_get_devdata(master);
330         struct gb_connection *connection = spi->connection;
331         struct gb_spi_transfer_response *response;
332         struct gb_operation *operation;
333         int ret = 0;
334
335         spi->first_xfer = list_first_entry_or_null(&msg->transfers,
336                                                    struct spi_transfer,
337                                                    transfer_list);
338         if (!spi->first_xfer) {
339                 ret = -ENOMEM;
340                 goto out;
341         }
342
343         msg->state = GB_SPI_STATE_MSG_IDLE;
344
345         while (msg->state != GB_SPI_STATE_MSG_DONE &&
346                msg->state != GB_SPI_STATE_MSG_ERROR) {
347                 operation = gb_spi_operation_create(spi, connection, msg);
348                 if (!operation) {
349                         msg->state = GB_SPI_STATE_MSG_ERROR;
350                         ret = -EINVAL;
351                         continue;
352                 }
353
354                 ret = gb_operation_request_send_sync_timeout(operation,
355                                                              spi->op_timeout);
356                 if (!ret) {
357                         response = operation->response->payload;
358                         if (response)
359                                 gb_spi_decode_response(spi, msg, response);
360                 } else {
361                         dev_err(spi->parent,
362                                 "transfer operation failed: %d\n", ret);
363                         msg->state = GB_SPI_STATE_MSG_ERROR;
364                 }
365
366                 gb_operation_put(operation);
367                 setup_next_xfer(spi, msg);
368         }
369
370 out:
371         msg->status = ret;
372         clean_xfer_state(spi);
373         spi_finalize_current_message(master);
374
375         return ret;
376 }
377
378 static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
379 {
380         struct gb_spilib *spi = spi_master_get_devdata(master);
381
382         return spi->ops->prepare_transfer_hardware(spi->parent);
383 }
384
385 static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
386 {
387         struct gb_spilib *spi = spi_master_get_devdata(master);
388
389         spi->ops->unprepare_transfer_hardware(spi->parent);
390
391         return 0;
392 }
393
394 static int gb_spi_setup(struct spi_device *spi)
395 {
396         /* Nothing to do for now */
397         return 0;
398 }
399
400 static void gb_spi_cleanup(struct spi_device *spi)
401 {
402         /* Nothing to do for now */
403 }
404
405 /* Routines to get controller information */
406
407 /*
408  * Map Greybus spi mode bits/flags/bpw into Linux ones.
409  * All bits are same for now and so these macro's return same values.
410  */
411 #define gb_spi_mode_map(mode) mode
412 #define gb_spi_flags_map(flags) flags
413
414 static int gb_spi_get_master_config(struct gb_spilib *spi)
415 {
416         struct gb_spi_master_config_response response;
417         u16 mode, flags;
418         int ret;
419
420         ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
421                                 NULL, 0, &response, sizeof(response));
422         if (ret < 0)
423                 return ret;
424
425         mode = le16_to_cpu(response.mode);
426         spi->mode = gb_spi_mode_map(mode);
427
428         flags = le16_to_cpu(response.flags);
429         spi->flags = gb_spi_flags_map(flags);
430
431         spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
432         spi->num_chipselect = response.num_chipselect;
433
434         spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
435         spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
436
437         return 0;
438 }
439
440 static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
441 {
442         struct spi_master *master = get_master_from_spi(spi);
443         struct gb_spi_device_config_request request;
444         struct gb_spi_device_config_response response;
445         struct spi_board_info spi_board = { {0} };
446         struct spi_device *spidev;
447         int ret;
448         u8 dev_type;
449
450         request.chip_select = cs;
451
452         ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
453                                 &request, sizeof(request),
454                                 &response, sizeof(response));
455         if (ret < 0)
456                 return ret;
457
458         dev_type = response.device_type;
459
460         if (dev_type == GB_SPI_SPI_DEV)
461                 strscpy(spi_board.modalias, "spidev",
462                         sizeof(spi_board.modalias));
463         else if (dev_type == GB_SPI_SPI_NOR)
464                 strscpy(spi_board.modalias, "spi-nor",
465                         sizeof(spi_board.modalias));
466         else if (dev_type == GB_SPI_SPI_MODALIAS)
467                 memcpy(spi_board.modalias, response.name,
468                        sizeof(spi_board.modalias));
469         else
470                 return -EINVAL;
471
472         spi_board.mode          = le16_to_cpu(response.mode);
473         spi_board.bus_num       = master->bus_num;
474         spi_board.chip_select   = cs;
475         spi_board.max_speed_hz  = le32_to_cpu(response.max_speed_hz);
476
477         spidev = spi_new_device(master, &spi_board);
478         if (!spidev)
479                 return -EINVAL;
480
481         return 0;
482 }
483
484 int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
485                           struct spilib_ops *ops)
486 {
487         struct gb_spilib *spi;
488         struct spi_master *master;
489         int ret;
490         u8 i;
491
492         /* Allocate master with space for data */
493         master = spi_alloc_master(dev, sizeof(*spi));
494         if (!master) {
495                 dev_err(dev, "cannot alloc SPI master\n");
496                 return -ENOMEM;
497         }
498
499         spi = spi_master_get_devdata(master);
500         spi->connection = connection;
501         gb_connection_set_data(connection, master);
502         spi->parent = dev;
503         spi->ops = ops;
504
505         /* get master configuration */
506         ret = gb_spi_get_master_config(spi);
507         if (ret)
508                 goto exit_spi_put;
509
510         master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
511         master->num_chipselect = spi->num_chipselect;
512         master->mode_bits = spi->mode;
513         master->flags = spi->flags;
514         master->bits_per_word_mask = spi->bits_per_word_mask;
515
516         /* Attach methods */
517         master->cleanup = gb_spi_cleanup;
518         master->setup = gb_spi_setup;
519         master->transfer_one_message = gb_spi_transfer_one_message;
520
521         if (ops && ops->prepare_transfer_hardware) {
522                 master->prepare_transfer_hardware =
523                         gb_spi_prepare_transfer_hardware;
524         }
525
526         if (ops && ops->unprepare_transfer_hardware) {
527                 master->unprepare_transfer_hardware =
528                         gb_spi_unprepare_transfer_hardware;
529         }
530
531         master->auto_runtime_pm = true;
532
533         ret = spi_register_master(master);
534         if (ret < 0)
535                 goto exit_spi_put;
536
537         /* now, fetch the devices configuration */
538         for (i = 0; i < spi->num_chipselect; i++) {
539                 ret = gb_spi_setup_device(spi, i);
540                 if (ret < 0) {
541                         dev_err(dev, "failed to allocate spi device %d: %d\n",
542                                 i, ret);
543                         goto exit_spi_unregister;
544                 }
545         }
546
547         return 0;
548
549 exit_spi_put:
550         spi_master_put(master);
551
552         return ret;
553
554 exit_spi_unregister:
555         spi_unregister_master(master);
556
557         return ret;
558 }
559 EXPORT_SYMBOL_GPL(gb_spilib_master_init);
560
561 void gb_spilib_master_exit(struct gb_connection *connection)
562 {
563         struct spi_master *master = gb_connection_get_data(connection);
564
565         spi_unregister_master(master);
566 }
567 EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
568
569 MODULE_LICENSE("GPL v2");
This page took 0.064919 seconds and 4 git commands to generate.