1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014-2016 Google Inc.
6 * Copyright 2014-2016 Linaro Ltd.
9 #include <linux/bitops.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/greybus.h>
14 #include <linux/spi/spi.h>
19 struct gb_connection *connection;
20 struct device *parent;
21 struct spi_transfer *first_xfer;
22 struct spi_transfer *last_xfer;
23 struct spilib_ops *ops;
27 unsigned int op_timeout;
30 u32 bits_per_word_mask;
36 #define GB_SPI_STATE_MSG_DONE ((void *)0)
37 #define GB_SPI_STATE_MSG_IDLE ((void *)1)
38 #define GB_SPI_STATE_MSG_RUNNING ((void *)2)
39 #define GB_SPI_STATE_OP_READY ((void *)3)
40 #define GB_SPI_STATE_OP_DONE ((void *)4)
41 #define GB_SPI_STATE_MSG_ERROR ((void *)-1)
43 #define XFER_TIMEOUT_TOLERANCE 200
45 static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
47 return gb_connection_get_data(spi->connection);
50 static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
54 data_max -= sizeof(struct gb_spi_transfer_request);
55 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
57 return tx_size + headers_size > data_max ? 0 : 1;
60 static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
65 data_max -= sizeof(struct gb_spi_transfer_response);
67 if (rx_size + len > data_max)
68 rx_xfer_size = data_max - rx_size;
72 /* if this is a write_read, for symmetry read the same as write */
73 if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
74 rx_xfer_size = *tx_xfer_size;
75 if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
76 *tx_xfer_size = rx_xfer_size;
81 static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
86 data_max -= sizeof(struct gb_spi_transfer_request);
87 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
89 if (tx_size + headers_size + len > data_max)
90 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
95 static void clean_xfer_state(struct gb_spilib *spi)
97 spi->first_xfer = NULL;
98 spi->last_xfer = NULL;
99 spi->rx_xfer_offset = 0;
100 spi->tx_xfer_offset = 0;
101 spi->last_xfer_size = 0;
105 static bool is_last_xfer_done(struct gb_spilib *spi)
107 struct spi_transfer *last_xfer = spi->last_xfer;
109 if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
110 (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
116 static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
118 struct spi_transfer *last_xfer = spi->last_xfer;
120 if (msg->state != GB_SPI_STATE_OP_DONE)
124 * if we transferred all content of the last transfer, reset values and
125 * check if this was the last transfer in the message
127 if (is_last_xfer_done(spi)) {
128 spi->tx_xfer_offset = 0;
129 spi->rx_xfer_offset = 0;
131 if (last_xfer == list_last_entry(&msg->transfers,
134 msg->state = GB_SPI_STATE_MSG_DONE;
136 spi->first_xfer = list_next_entry(last_xfer,
141 spi->first_xfer = last_xfer;
142 if (last_xfer->tx_buf)
143 spi->tx_xfer_offset += spi->last_xfer_size;
145 if (last_xfer->rx_buf)
146 spi->rx_xfer_offset += spi->last_xfer_size;
151 static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
152 struct spi_message *msg)
154 if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
158 return list_next_entry(xfer, transfer_list);
161 /* Routines to transfer data */
162 static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
163 struct gb_connection *connection, struct spi_message *msg)
165 struct gb_spi_transfer_request *request;
166 struct spi_device *dev = msg->spi;
167 struct spi_transfer *xfer;
168 struct gb_spi_transfer *gb_xfer;
169 struct gb_operation *operation;
170 u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
171 u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
173 unsigned int xfer_timeout;
177 data_max = gb_operation_get_payload_size_max(connection);
178 xfer = spi->first_xfer;
180 /* Find number of transfers queued and tx/rx length in the message */
182 while (msg->state != GB_SPI_STATE_OP_READY) {
183 msg->state = GB_SPI_STATE_MSG_RUNNING;
184 spi->last_xfer = xfer;
186 if (!xfer->tx_buf && !xfer->rx_buf) {
188 "bufferless transfer, length %u\n", xfer->len);
189 msg->state = GB_SPI_STATE_MSG_ERROR;
197 len = xfer->len - spi->tx_xfer_offset;
198 if (!tx_header_fit_operation(tx_size, count, data_max))
200 tx_xfer_size = calc_tx_xfer_size(tx_size, count,
202 spi->last_xfer_size = tx_xfer_size;
206 len = xfer->len - spi->rx_xfer_offset;
207 rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
209 spi->last_xfer_size = rx_xfer_size;
212 tx_size += tx_xfer_size;
213 rx_size += rx_xfer_size;
215 total_len += spi->last_xfer_size;
218 xfer = get_next_xfer(xfer, msg);
219 if (!xfer || total_len >= data_max)
220 msg->state = GB_SPI_STATE_OP_READY;
224 * In addition to space for all message descriptors we need
225 * to have enough to hold all tx data.
227 request_size = sizeof(*request);
228 request_size += count * sizeof(*gb_xfer);
229 request_size += tx_size;
231 /* Response consists only of incoming data */
232 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
233 request_size, rx_size, GFP_KERNEL);
237 request = operation->request->payload;
238 request->count = cpu_to_le16(count);
239 request->mode = dev->mode;
240 request->chip_select = dev->chip_select;
242 gb_xfer = &request->transfers[0];
243 tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
245 /* Fill in the transfers array */
246 xfer = spi->first_xfer;
247 while (msg->state != GB_SPI_STATE_OP_DONE) {
249 if (xfer == spi->last_xfer)
250 xfer_len = spi->last_xfer_size;
252 xfer_len = xfer->len;
254 /* make sure we do not timeout in a slow transfer */
255 xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
256 xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
258 if (xfer_timeout > spi->op_timeout)
259 spi->op_timeout = xfer_timeout;
261 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
262 gb_xfer->len = cpu_to_le32(xfer_len);
263 xfer_delay = spi_delay_to_ns(&xfer->delay, xfer) / 1000;
264 xfer_delay = clamp_t(u16, xfer_delay, 0, U16_MAX);
265 gb_xfer->delay_usecs = cpu_to_le16(xfer_delay);
266 gb_xfer->cs_change = xfer->cs_change;
267 gb_xfer->bits_per_word = xfer->bits_per_word;
271 gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
272 memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
278 gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
280 if (xfer == spi->last_xfer) {
281 if (!is_last_xfer_done(spi))
282 gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
283 msg->state = GB_SPI_STATE_OP_DONE;
288 xfer = get_next_xfer(xfer, msg);
291 msg->actual_length += total_len;
296 static void gb_spi_decode_response(struct gb_spilib *spi,
297 struct spi_message *msg,
298 struct gb_spi_transfer_response *response)
300 struct spi_transfer *xfer = spi->first_xfer;
301 void *rx_data = response->data;
307 if (xfer == spi->first_xfer)
308 xfer_len = xfer->len - spi->rx_xfer_offset;
309 else if (xfer == spi->last_xfer)
310 xfer_len = spi->last_xfer_size;
312 xfer_len = xfer->len;
314 memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
319 if (xfer == spi->last_xfer)
322 xfer = list_next_entry(xfer, transfer_list);
326 static int gb_spi_transfer_one_message(struct spi_master *master,
327 struct spi_message *msg)
329 struct gb_spilib *spi = spi_master_get_devdata(master);
330 struct gb_connection *connection = spi->connection;
331 struct gb_spi_transfer_response *response;
332 struct gb_operation *operation;
335 spi->first_xfer = list_first_entry_or_null(&msg->transfers,
338 if (!spi->first_xfer) {
343 msg->state = GB_SPI_STATE_MSG_IDLE;
345 while (msg->state != GB_SPI_STATE_MSG_DONE &&
346 msg->state != GB_SPI_STATE_MSG_ERROR) {
347 operation = gb_spi_operation_create(spi, connection, msg);
349 msg->state = GB_SPI_STATE_MSG_ERROR;
354 ret = gb_operation_request_send_sync_timeout(operation,
357 response = operation->response->payload;
359 gb_spi_decode_response(spi, msg, response);
362 "transfer operation failed: %d\n", ret);
363 msg->state = GB_SPI_STATE_MSG_ERROR;
366 gb_operation_put(operation);
367 setup_next_xfer(spi, msg);
372 clean_xfer_state(spi);
373 spi_finalize_current_message(master);
378 static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
380 struct gb_spilib *spi = spi_master_get_devdata(master);
382 return spi->ops->prepare_transfer_hardware(spi->parent);
385 static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
387 struct gb_spilib *spi = spi_master_get_devdata(master);
389 spi->ops->unprepare_transfer_hardware(spi->parent);
394 static int gb_spi_setup(struct spi_device *spi)
396 /* Nothing to do for now */
400 static void gb_spi_cleanup(struct spi_device *spi)
402 /* Nothing to do for now */
405 /* Routines to get controller information */
408 * Map Greybus spi mode bits/flags/bpw into Linux ones.
409 * All bits are same for now and so these macro's return same values.
411 #define gb_spi_mode_map(mode) mode
412 #define gb_spi_flags_map(flags) flags
414 static int gb_spi_get_master_config(struct gb_spilib *spi)
416 struct gb_spi_master_config_response response;
420 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
421 NULL, 0, &response, sizeof(response));
425 mode = le16_to_cpu(response.mode);
426 spi->mode = gb_spi_mode_map(mode);
428 flags = le16_to_cpu(response.flags);
429 spi->flags = gb_spi_flags_map(flags);
431 spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
432 spi->num_chipselect = response.num_chipselect;
434 spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
435 spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
440 static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
442 struct spi_master *master = get_master_from_spi(spi);
443 struct gb_spi_device_config_request request;
444 struct gb_spi_device_config_response response;
445 struct spi_board_info spi_board = { {0} };
446 struct spi_device *spidev;
450 request.chip_select = cs;
452 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
453 &request, sizeof(request),
454 &response, sizeof(response));
458 dev_type = response.device_type;
460 if (dev_type == GB_SPI_SPI_DEV)
461 strscpy(spi_board.modalias, "spidev",
462 sizeof(spi_board.modalias));
463 else if (dev_type == GB_SPI_SPI_NOR)
464 strscpy(spi_board.modalias, "spi-nor",
465 sizeof(spi_board.modalias));
466 else if (dev_type == GB_SPI_SPI_MODALIAS)
467 memcpy(spi_board.modalias, response.name,
468 sizeof(spi_board.modalias));
472 spi_board.mode = le16_to_cpu(response.mode);
473 spi_board.bus_num = master->bus_num;
474 spi_board.chip_select = cs;
475 spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
477 spidev = spi_new_device(master, &spi_board);
484 int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
485 struct spilib_ops *ops)
487 struct gb_spilib *spi;
488 struct spi_master *master;
492 /* Allocate master with space for data */
493 master = spi_alloc_master(dev, sizeof(*spi));
495 dev_err(dev, "cannot alloc SPI master\n");
499 spi = spi_master_get_devdata(master);
500 spi->connection = connection;
501 gb_connection_set_data(connection, master);
505 /* get master configuration */
506 ret = gb_spi_get_master_config(spi);
510 master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
511 master->num_chipselect = spi->num_chipselect;
512 master->mode_bits = spi->mode;
513 master->flags = spi->flags;
514 master->bits_per_word_mask = spi->bits_per_word_mask;
517 master->cleanup = gb_spi_cleanup;
518 master->setup = gb_spi_setup;
519 master->transfer_one_message = gb_spi_transfer_one_message;
521 if (ops && ops->prepare_transfer_hardware) {
522 master->prepare_transfer_hardware =
523 gb_spi_prepare_transfer_hardware;
526 if (ops && ops->unprepare_transfer_hardware) {
527 master->unprepare_transfer_hardware =
528 gb_spi_unprepare_transfer_hardware;
531 master->auto_runtime_pm = true;
533 ret = spi_register_master(master);
537 /* now, fetch the devices configuration */
538 for (i = 0; i < spi->num_chipselect; i++) {
539 ret = gb_spi_setup_device(spi, i);
541 dev_err(dev, "failed to allocate spi device %d: %d\n",
543 goto exit_spi_unregister;
550 spi_master_put(master);
555 spi_unregister_master(master);
559 EXPORT_SYMBOL_GPL(gb_spilib_master_init);
561 void gb_spilib_master_exit(struct gb_connection *connection)
563 struct spi_master *master = gb_connection_get_data(connection);
565 spi_unregister_master(master);
567 EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
569 MODULE_LICENSE("GPL v2");