]> Git Repo - linux.git/blob - drivers/spi/spi.c
platform/x86: amd-pmc: Move to later in the suspend process
[linux.git] / drivers / spi / spi.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/of_gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/property.h>
26 #include <linux/export.h>
27 #include <linux/sched/rt.h>
28 #include <uapi/linux/sched/types.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/ioport.h>
32 #include <linux/acpi.h>
33 #include <linux/highmem.h>
34 #include <linux/idr.h>
35 #include <linux/platform_data/x86/apple.h>
36 #include <linux/ptp_clock_kernel.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
47 static void spidev_release(struct device *dev)
48 {
49         struct spi_device       *spi = to_spi_device(dev);
50
51         spi_controller_put(spi->controller);
52         kfree(spi->driver_override);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 static ssize_t driver_override_store(struct device *dev,
71                                      struct device_attribute *a,
72                                      const char *buf, size_t count)
73 {
74         struct spi_device *spi = to_spi_device(dev);
75         const char *end = memchr(buf, '\n', count);
76         const size_t len = end ? end - buf : count;
77         const char *driver_override, *old;
78
79         /* We need to keep extra room for a newline when displaying value */
80         if (len >= (PAGE_SIZE - 1))
81                 return -EINVAL;
82
83         driver_override = kstrndup(buf, len, GFP_KERNEL);
84         if (!driver_override)
85                 return -ENOMEM;
86
87         device_lock(dev);
88         old = spi->driver_override;
89         if (len) {
90                 spi->driver_override = driver_override;
91         } else {
92                 /* Empty string, disable driver override */
93                 spi->driver_override = NULL;
94                 kfree(driver_override);
95         }
96         device_unlock(dev);
97         kfree(old);
98
99         return count;
100 }
101
102 static ssize_t driver_override_show(struct device *dev,
103                                     struct device_attribute *a, char *buf)
104 {
105         const struct spi_device *spi = to_spi_device(dev);
106         ssize_t len;
107
108         device_lock(dev);
109         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
110         device_unlock(dev);
111         return len;
112 }
113 static DEVICE_ATTR_RW(driver_override);
114
115 #define SPI_STATISTICS_ATTRS(field, file)                               \
116 static ssize_t spi_controller_##field##_show(struct device *dev,        \
117                                              struct device_attribute *attr, \
118                                              char *buf)                 \
119 {                                                                       \
120         struct spi_controller *ctlr = container_of(dev,                 \
121                                          struct spi_controller, dev);   \
122         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
123 }                                                                       \
124 static struct device_attribute dev_attr_spi_controller_##field = {      \
125         .attr = { .name = file, .mode = 0444 },                         \
126         .show = spi_controller_##field##_show,                          \
127 };                                                                      \
128 static ssize_t spi_device_##field##_show(struct device *dev,            \
129                                          struct device_attribute *attr, \
130                                         char *buf)                      \
131 {                                                                       \
132         struct spi_device *spi = to_spi_device(dev);                    \
133         return spi_statistics_##field##_show(&spi->statistics, buf);    \
134 }                                                                       \
135 static struct device_attribute dev_attr_spi_device_##field = {          \
136         .attr = { .name = file, .mode = 0444 },                         \
137         .show = spi_device_##field##_show,                              \
138 }
139
140 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
141 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
142                                             char *buf)                  \
143 {                                                                       \
144         unsigned long flags;                                            \
145         ssize_t len;                                                    \
146         spin_lock_irqsave(&stat->lock, flags);                          \
147         len = sprintf(buf, format_string, stat->field);                 \
148         spin_unlock_irqrestore(&stat->lock, flags);                     \
149         return len;                                                     \
150 }                                                                       \
151 SPI_STATISTICS_ATTRS(name, file)
152
153 #define SPI_STATISTICS_SHOW(field, format_string)                       \
154         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
155                                  field, format_string)
156
157 SPI_STATISTICS_SHOW(messages, "%lu");
158 SPI_STATISTICS_SHOW(transfers, "%lu");
159 SPI_STATISTICS_SHOW(errors, "%lu");
160 SPI_STATISTICS_SHOW(timedout, "%lu");
161
162 SPI_STATISTICS_SHOW(spi_sync, "%lu");
163 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
164 SPI_STATISTICS_SHOW(spi_async, "%lu");
165
166 SPI_STATISTICS_SHOW(bytes, "%llu");
167 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
168 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
169
170 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
171         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
172                                  "transfer_bytes_histo_" number,        \
173                                  transfer_bytes_histo[index],  "%lu")
174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
191
192 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
193
194 static struct attribute *spi_dev_attrs[] = {
195         &dev_attr_modalias.attr,
196         &dev_attr_driver_override.attr,
197         NULL,
198 };
199
200 static const struct attribute_group spi_dev_group = {
201         .attrs  = spi_dev_attrs,
202 };
203
204 static struct attribute *spi_device_statistics_attrs[] = {
205         &dev_attr_spi_device_messages.attr,
206         &dev_attr_spi_device_transfers.attr,
207         &dev_attr_spi_device_errors.attr,
208         &dev_attr_spi_device_timedout.attr,
209         &dev_attr_spi_device_spi_sync.attr,
210         &dev_attr_spi_device_spi_sync_immediate.attr,
211         &dev_attr_spi_device_spi_async.attr,
212         &dev_attr_spi_device_bytes.attr,
213         &dev_attr_spi_device_bytes_rx.attr,
214         &dev_attr_spi_device_bytes_tx.attr,
215         &dev_attr_spi_device_transfer_bytes_histo0.attr,
216         &dev_attr_spi_device_transfer_bytes_histo1.attr,
217         &dev_attr_spi_device_transfer_bytes_histo2.attr,
218         &dev_attr_spi_device_transfer_bytes_histo3.attr,
219         &dev_attr_spi_device_transfer_bytes_histo4.attr,
220         &dev_attr_spi_device_transfer_bytes_histo5.attr,
221         &dev_attr_spi_device_transfer_bytes_histo6.attr,
222         &dev_attr_spi_device_transfer_bytes_histo7.attr,
223         &dev_attr_spi_device_transfer_bytes_histo8.attr,
224         &dev_attr_spi_device_transfer_bytes_histo9.attr,
225         &dev_attr_spi_device_transfer_bytes_histo10.attr,
226         &dev_attr_spi_device_transfer_bytes_histo11.attr,
227         &dev_attr_spi_device_transfer_bytes_histo12.attr,
228         &dev_attr_spi_device_transfer_bytes_histo13.attr,
229         &dev_attr_spi_device_transfer_bytes_histo14.attr,
230         &dev_attr_spi_device_transfer_bytes_histo15.attr,
231         &dev_attr_spi_device_transfer_bytes_histo16.attr,
232         &dev_attr_spi_device_transfers_split_maxsize.attr,
233         NULL,
234 };
235
236 static const struct attribute_group spi_device_statistics_group = {
237         .name  = "statistics",
238         .attrs  = spi_device_statistics_attrs,
239 };
240
241 static const struct attribute_group *spi_dev_groups[] = {
242         &spi_dev_group,
243         &spi_device_statistics_group,
244         NULL,
245 };
246
247 static struct attribute *spi_controller_statistics_attrs[] = {
248         &dev_attr_spi_controller_messages.attr,
249         &dev_attr_spi_controller_transfers.attr,
250         &dev_attr_spi_controller_errors.attr,
251         &dev_attr_spi_controller_timedout.attr,
252         &dev_attr_spi_controller_spi_sync.attr,
253         &dev_attr_spi_controller_spi_sync_immediate.attr,
254         &dev_attr_spi_controller_spi_async.attr,
255         &dev_attr_spi_controller_bytes.attr,
256         &dev_attr_spi_controller_bytes_rx.attr,
257         &dev_attr_spi_controller_bytes_tx.attr,
258         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
259         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
260         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
261         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
262         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
263         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
264         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
265         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
266         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
267         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
268         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
269         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
270         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
271         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
272         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
273         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
274         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
275         &dev_attr_spi_controller_transfers_split_maxsize.attr,
276         NULL,
277 };
278
279 static const struct attribute_group spi_controller_statistics_group = {
280         .name  = "statistics",
281         .attrs  = spi_controller_statistics_attrs,
282 };
283
284 static const struct attribute_group *spi_master_groups[] = {
285         &spi_controller_statistics_group,
286         NULL,
287 };
288
289 static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
290                                               struct spi_transfer *xfer,
291                                               struct spi_controller *ctlr)
292 {
293         unsigned long flags;
294         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
295
296         if (l2len < 0)
297                 l2len = 0;
298
299         spin_lock_irqsave(&stats->lock, flags);
300
301         stats->transfers++;
302         stats->transfer_bytes_histo[l2len]++;
303
304         stats->bytes += xfer->len;
305         if ((xfer->tx_buf) &&
306             (xfer->tx_buf != ctlr->dummy_tx))
307                 stats->bytes_tx += xfer->len;
308         if ((xfer->rx_buf) &&
309             (xfer->rx_buf != ctlr->dummy_rx))
310                 stats->bytes_rx += xfer->len;
311
312         spin_unlock_irqrestore(&stats->lock, flags);
313 }
314
315 /*
316  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
317  * and the sysfs version makes coldplug work too.
318  */
319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
320 {
321         while (id->name[0]) {
322                 if (!strcmp(name, id->name))
323                         return id;
324                 id++;
325         }
326         return NULL;
327 }
328
329 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
330 {
331         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
332
333         return spi_match_id(sdrv->id_table, sdev->modalias);
334 }
335 EXPORT_SYMBOL_GPL(spi_get_device_id);
336
337 static int spi_match_device(struct device *dev, struct device_driver *drv)
338 {
339         const struct spi_device *spi = to_spi_device(dev);
340         const struct spi_driver *sdrv = to_spi_driver(drv);
341
342         /* Check override first, and if set, only use the named driver */
343         if (spi->driver_override)
344                 return strcmp(spi->driver_override, drv->name) == 0;
345
346         /* Attempt an OF style match */
347         if (of_driver_match_device(dev, drv))
348                 return 1;
349
350         /* Then try ACPI */
351         if (acpi_driver_match_device(dev, drv))
352                 return 1;
353
354         if (sdrv->id_table)
355                 return !!spi_match_id(sdrv->id_table, spi->modalias);
356
357         return strcmp(spi->modalias, drv->name) == 0;
358 }
359
360 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
361 {
362         const struct spi_device         *spi = to_spi_device(dev);
363         int rc;
364
365         rc = acpi_device_uevent_modalias(dev, env);
366         if (rc != -ENODEV)
367                 return rc;
368
369         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
370 }
371
372 static int spi_probe(struct device *dev)
373 {
374         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
375         struct spi_device               *spi = to_spi_device(dev);
376         int ret;
377
378         ret = of_clk_set_defaults(dev->of_node, false);
379         if (ret)
380                 return ret;
381
382         if (dev->of_node) {
383                 spi->irq = of_irq_get(dev->of_node, 0);
384                 if (spi->irq == -EPROBE_DEFER)
385                         return -EPROBE_DEFER;
386                 if (spi->irq < 0)
387                         spi->irq = 0;
388         }
389
390         ret = dev_pm_domain_attach(dev, true);
391         if (ret)
392                 return ret;
393
394         if (sdrv->probe) {
395                 ret = sdrv->probe(spi);
396                 if (ret)
397                         dev_pm_domain_detach(dev, true);
398         }
399
400         return ret;
401 }
402
403 static void spi_remove(struct device *dev)
404 {
405         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
406
407         if (sdrv->remove) {
408                 int ret;
409
410                 ret = sdrv->remove(to_spi_device(dev));
411                 if (ret)
412                         dev_warn(dev,
413                                  "Failed to unbind driver (%pe), ignoring\n",
414                                  ERR_PTR(ret));
415         }
416
417         dev_pm_domain_detach(dev, true);
418 }
419
420 static void spi_shutdown(struct device *dev)
421 {
422         if (dev->driver) {
423                 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
424
425                 if (sdrv->shutdown)
426                         sdrv->shutdown(to_spi_device(dev));
427         }
428 }
429
430 struct bus_type spi_bus_type = {
431         .name           = "spi",
432         .dev_groups     = spi_dev_groups,
433         .match          = spi_match_device,
434         .uevent         = spi_uevent,
435         .probe          = spi_probe,
436         .remove         = spi_remove,
437         .shutdown       = spi_shutdown,
438 };
439 EXPORT_SYMBOL_GPL(spi_bus_type);
440
441 /**
442  * __spi_register_driver - register a SPI driver
443  * @owner: owner module of the driver to register
444  * @sdrv: the driver to register
445  * Context: can sleep
446  *
447  * Return: zero on success, else a negative error code.
448  */
449 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
450 {
451         sdrv->driver.owner = owner;
452         sdrv->driver.bus = &spi_bus_type;
453
454         /*
455          * For Really Good Reasons we use spi: modaliases not of:
456          * modaliases for DT so module autoloading won't work if we
457          * don't have a spi_device_id as well as a compatible string.
458          */
459         if (sdrv->driver.of_match_table) {
460                 const struct of_device_id *of_id;
461
462                 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
463                      of_id++) {
464                         const char *of_name;
465
466                         /* Strip off any vendor prefix */
467                         of_name = strnchr(of_id->compatible,
468                                           sizeof(of_id->compatible), ',');
469                         if (of_name)
470                                 of_name++;
471                         else
472                                 of_name = of_id->compatible;
473
474                         if (sdrv->id_table) {
475                                 const struct spi_device_id *spi_id;
476
477                                 spi_id = spi_match_id(sdrv->id_table, of_name);
478                                 if (spi_id)
479                                         continue;
480                         } else {
481                                 if (strcmp(sdrv->driver.name, of_name) == 0)
482                                         continue;
483                         }
484
485                         pr_warn("SPI driver %s has no spi_device_id for %s\n",
486                                 sdrv->driver.name, of_id->compatible);
487                 }
488         }
489
490         return driver_register(&sdrv->driver);
491 }
492 EXPORT_SYMBOL_GPL(__spi_register_driver);
493
494 /*-------------------------------------------------------------------------*/
495
496 /*
497  * SPI devices should normally not be created by SPI device drivers; that
498  * would make them board-specific.  Similarly with SPI controller drivers.
499  * Device registration normally goes into like arch/.../mach.../board-YYY.c
500  * with other readonly (flashable) information about mainboard devices.
501  */
502
503 struct boardinfo {
504         struct list_head        list;
505         struct spi_board_info   board_info;
506 };
507
508 static LIST_HEAD(board_list);
509 static LIST_HEAD(spi_controller_list);
510
511 /*
512  * Used to protect add/del operation for board_info list and
513  * spi_controller list, and their matching process also used
514  * to protect object of type struct idr.
515  */
516 static DEFINE_MUTEX(board_lock);
517
518 /**
519  * spi_alloc_device - Allocate a new SPI device
520  * @ctlr: Controller to which device is connected
521  * Context: can sleep
522  *
523  * Allows a driver to allocate and initialize a spi_device without
524  * registering it immediately.  This allows a driver to directly
525  * fill the spi_device with device parameters before calling
526  * spi_add_device() on it.
527  *
528  * Caller is responsible to call spi_add_device() on the returned
529  * spi_device structure to add it to the SPI controller.  If the caller
530  * needs to discard the spi_device without adding it, then it should
531  * call spi_dev_put() on it.
532  *
533  * Return: a pointer to the new device, or NULL.
534  */
535 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
536 {
537         struct spi_device       *spi;
538
539         if (!spi_controller_get(ctlr))
540                 return NULL;
541
542         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
543         if (!spi) {
544                 spi_controller_put(ctlr);
545                 return NULL;
546         }
547
548         spi->master = spi->controller = ctlr;
549         spi->dev.parent = &ctlr->dev;
550         spi->dev.bus = &spi_bus_type;
551         spi->dev.release = spidev_release;
552         spi->cs_gpio = -ENOENT;
553         spi->mode = ctlr->buswidth_override_bits;
554
555         spin_lock_init(&spi->statistics.lock);
556
557         device_initialize(&spi->dev);
558         return spi;
559 }
560 EXPORT_SYMBOL_GPL(spi_alloc_device);
561
562 static void spi_dev_set_name(struct spi_device *spi)
563 {
564         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
565
566         if (adev) {
567                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
568                 return;
569         }
570
571         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
572                      spi->chip_select);
573 }
574
575 static int spi_dev_check(struct device *dev, void *data)
576 {
577         struct spi_device *spi = to_spi_device(dev);
578         struct spi_device *new_spi = data;
579
580         if (spi->controller == new_spi->controller &&
581             spi->chip_select == new_spi->chip_select)
582                 return -EBUSY;
583         return 0;
584 }
585
586 static void spi_cleanup(struct spi_device *spi)
587 {
588         if (spi->controller->cleanup)
589                 spi->controller->cleanup(spi);
590 }
591
592 static int __spi_add_device(struct spi_device *spi)
593 {
594         struct spi_controller *ctlr = spi->controller;
595         struct device *dev = ctlr->dev.parent;
596         int status;
597
598         /*
599          * We need to make sure there's no other device with this
600          * chipselect **BEFORE** we call setup(), else we'll trash
601          * its configuration.
602          */
603         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
604         if (status) {
605                 dev_err(dev, "chipselect %d already in use\n",
606                                 spi->chip_select);
607                 return status;
608         }
609
610         /* Controller may unregister concurrently */
611         if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
612             !device_is_registered(&ctlr->dev)) {
613                 return -ENODEV;
614         }
615
616         /* Descriptors take precedence */
617         if (ctlr->cs_gpiods)
618                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
619         else if (ctlr->cs_gpios)
620                 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
621
622         /*
623          * Drivers may modify this initial i/o setup, but will
624          * normally rely on the device being setup.  Devices
625          * using SPI_CS_HIGH can't coexist well otherwise...
626          */
627         status = spi_setup(spi);
628         if (status < 0) {
629                 dev_err(dev, "can't setup %s, status %d\n",
630                                 dev_name(&spi->dev), status);
631                 return status;
632         }
633
634         /* Device may be bound to an active driver when this returns */
635         status = device_add(&spi->dev);
636         if (status < 0) {
637                 dev_err(dev, "can't add %s, status %d\n",
638                                 dev_name(&spi->dev), status);
639                 spi_cleanup(spi);
640         } else {
641                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
642         }
643
644         return status;
645 }
646
647 /**
648  * spi_add_device - Add spi_device allocated with spi_alloc_device
649  * @spi: spi_device to register
650  *
651  * Companion function to spi_alloc_device.  Devices allocated with
652  * spi_alloc_device can be added onto the spi bus with this function.
653  *
654  * Return: 0 on success; negative errno on failure
655  */
656 int spi_add_device(struct spi_device *spi)
657 {
658         struct spi_controller *ctlr = spi->controller;
659         struct device *dev = ctlr->dev.parent;
660         int status;
661
662         /* Chipselects are numbered 0..max; validate. */
663         if (spi->chip_select >= ctlr->num_chipselect) {
664                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
665                         ctlr->num_chipselect);
666                 return -EINVAL;
667         }
668
669         /* Set the bus ID string */
670         spi_dev_set_name(spi);
671
672         mutex_lock(&ctlr->add_lock);
673         status = __spi_add_device(spi);
674         mutex_unlock(&ctlr->add_lock);
675         return status;
676 }
677 EXPORT_SYMBOL_GPL(spi_add_device);
678
679 static int spi_add_device_locked(struct spi_device *spi)
680 {
681         struct spi_controller *ctlr = spi->controller;
682         struct device *dev = ctlr->dev.parent;
683
684         /* Chipselects are numbered 0..max; validate. */
685         if (spi->chip_select >= ctlr->num_chipselect) {
686                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
687                         ctlr->num_chipselect);
688                 return -EINVAL;
689         }
690
691         /* Set the bus ID string */
692         spi_dev_set_name(spi);
693
694         WARN_ON(!mutex_is_locked(&ctlr->add_lock));
695         return __spi_add_device(spi);
696 }
697
698 /**
699  * spi_new_device - instantiate one new SPI device
700  * @ctlr: Controller to which device is connected
701  * @chip: Describes the SPI device
702  * Context: can sleep
703  *
704  * On typical mainboards, this is purely internal; and it's not needed
705  * after board init creates the hard-wired devices.  Some development
706  * platforms may not be able to use spi_register_board_info though, and
707  * this is exported so that for example a USB or parport based adapter
708  * driver could add devices (which it would learn about out-of-band).
709  *
710  * Return: the new device, or NULL.
711  */
712 struct spi_device *spi_new_device(struct spi_controller *ctlr,
713                                   struct spi_board_info *chip)
714 {
715         struct spi_device       *proxy;
716         int                     status;
717
718         /*
719          * NOTE:  caller did any chip->bus_num checks necessary.
720          *
721          * Also, unless we change the return value convention to use
722          * error-or-pointer (not NULL-or-pointer), troubleshootability
723          * suggests syslogged diagnostics are best here (ugh).
724          */
725
726         proxy = spi_alloc_device(ctlr);
727         if (!proxy)
728                 return NULL;
729
730         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
731
732         proxy->chip_select = chip->chip_select;
733         proxy->max_speed_hz = chip->max_speed_hz;
734         proxy->mode = chip->mode;
735         proxy->irq = chip->irq;
736         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
737         proxy->dev.platform_data = (void *) chip->platform_data;
738         proxy->controller_data = chip->controller_data;
739         proxy->controller_state = NULL;
740
741         if (chip->swnode) {
742                 status = device_add_software_node(&proxy->dev, chip->swnode);
743                 if (status) {
744                         dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
745                                 chip->modalias, status);
746                         goto err_dev_put;
747                 }
748         }
749
750         status = spi_add_device(proxy);
751         if (status < 0)
752                 goto err_dev_put;
753
754         return proxy;
755
756 err_dev_put:
757         device_remove_software_node(&proxy->dev);
758         spi_dev_put(proxy);
759         return NULL;
760 }
761 EXPORT_SYMBOL_GPL(spi_new_device);
762
763 /**
764  * spi_unregister_device - unregister a single SPI device
765  * @spi: spi_device to unregister
766  *
767  * Start making the passed SPI device vanish. Normally this would be handled
768  * by spi_unregister_controller().
769  */
770 void spi_unregister_device(struct spi_device *spi)
771 {
772         if (!spi)
773                 return;
774
775         if (spi->dev.of_node) {
776                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
777                 of_node_put(spi->dev.of_node);
778         }
779         if (ACPI_COMPANION(&spi->dev))
780                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
781         device_remove_software_node(&spi->dev);
782         device_del(&spi->dev);
783         spi_cleanup(spi);
784         put_device(&spi->dev);
785 }
786 EXPORT_SYMBOL_GPL(spi_unregister_device);
787
788 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
789                                               struct spi_board_info *bi)
790 {
791         struct spi_device *dev;
792
793         if (ctlr->bus_num != bi->bus_num)
794                 return;
795
796         dev = spi_new_device(ctlr, bi);
797         if (!dev)
798                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
799                         bi->modalias);
800 }
801
802 /**
803  * spi_register_board_info - register SPI devices for a given board
804  * @info: array of chip descriptors
805  * @n: how many descriptors are provided
806  * Context: can sleep
807  *
808  * Board-specific early init code calls this (probably during arch_initcall)
809  * with segments of the SPI device table.  Any device nodes are created later,
810  * after the relevant parent SPI controller (bus_num) is defined.  We keep
811  * this table of devices forever, so that reloading a controller driver will
812  * not make Linux forget about these hard-wired devices.
813  *
814  * Other code can also call this, e.g. a particular add-on board might provide
815  * SPI devices through its expansion connector, so code initializing that board
816  * would naturally declare its SPI devices.
817  *
818  * The board info passed can safely be __initdata ... but be careful of
819  * any embedded pointers (platform_data, etc), they're copied as-is.
820  *
821  * Return: zero on success, else a negative error code.
822  */
823 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
824 {
825         struct boardinfo *bi;
826         int i;
827
828         if (!n)
829                 return 0;
830
831         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
832         if (!bi)
833                 return -ENOMEM;
834
835         for (i = 0; i < n; i++, bi++, info++) {
836                 struct spi_controller *ctlr;
837
838                 memcpy(&bi->board_info, info, sizeof(*info));
839
840                 mutex_lock(&board_lock);
841                 list_add_tail(&bi->list, &board_list);
842                 list_for_each_entry(ctlr, &spi_controller_list, list)
843                         spi_match_controller_to_boardinfo(ctlr,
844                                                           &bi->board_info);
845                 mutex_unlock(&board_lock);
846         }
847
848         return 0;
849 }
850
851 /*-------------------------------------------------------------------------*/
852
853 /* Core methods for SPI resource management */
854
855 /**
856  * spi_res_alloc - allocate a spi resource that is life-cycle managed
857  *                 during the processing of a spi_message while using
858  *                 spi_transfer_one
859  * @spi:     the spi device for which we allocate memory
860  * @release: the release code to execute for this resource
861  * @size:    size to alloc and return
862  * @gfp:     GFP allocation flags
863  *
864  * Return: the pointer to the allocated data
865  *
866  * This may get enhanced in the future to allocate from a memory pool
867  * of the @spi_device or @spi_controller to avoid repeated allocations.
868  */
869 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
870                            size_t size, gfp_t gfp)
871 {
872         struct spi_res *sres;
873
874         sres = kzalloc(sizeof(*sres) + size, gfp);
875         if (!sres)
876                 return NULL;
877
878         INIT_LIST_HEAD(&sres->entry);
879         sres->release = release;
880
881         return sres->data;
882 }
883
884 /**
885  * spi_res_free - free an spi resource
886  * @res: pointer to the custom data of a resource
887  */
888 static void spi_res_free(void *res)
889 {
890         struct spi_res *sres = container_of(res, struct spi_res, data);
891
892         if (!res)
893                 return;
894
895         WARN_ON(!list_empty(&sres->entry));
896         kfree(sres);
897 }
898
899 /**
900  * spi_res_add - add a spi_res to the spi_message
901  * @message: the spi message
902  * @res:     the spi_resource
903  */
904 static void spi_res_add(struct spi_message *message, void *res)
905 {
906         struct spi_res *sres = container_of(res, struct spi_res, data);
907
908         WARN_ON(!list_empty(&sres->entry));
909         list_add_tail(&sres->entry, &message->resources);
910 }
911
912 /**
913  * spi_res_release - release all spi resources for this message
914  * @ctlr:  the @spi_controller
915  * @message: the @spi_message
916  */
917 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
918 {
919         struct spi_res *res, *tmp;
920
921         list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
922                 if (res->release)
923                         res->release(ctlr, message, res->data);
924
925                 list_del(&res->entry);
926
927                 kfree(res);
928         }
929 }
930
931 /*-------------------------------------------------------------------------*/
932
933 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
934 {
935         bool activate = enable;
936
937         /*
938          * Avoid calling into the driver (or doing delays) if the chip select
939          * isn't actually changing from the last time this was called.
940          */
941         if (!force && (spi->controller->last_cs_enable == enable) &&
942             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
943                 return;
944
945         trace_spi_set_cs(spi, activate);
946
947         spi->controller->last_cs_enable = enable;
948         spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
949
950         if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
951             !spi->controller->set_cs_timing) && !activate) {
952                 spi_delay_exec(&spi->cs_hold, NULL);
953         }
954
955         if (spi->mode & SPI_CS_HIGH)
956                 enable = !enable;
957
958         if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
959                 if (!(spi->mode & SPI_NO_CS)) {
960                         if (spi->cs_gpiod) {
961                                 /*
962                                  * Historically ACPI has no means of the GPIO polarity and
963                                  * thus the SPISerialBus() resource defines it on the per-chip
964                                  * basis. In order to avoid a chain of negations, the GPIO
965                                  * polarity is considered being Active High. Even for the cases
966                                  * when _DSD() is involved (in the updated versions of ACPI)
967                                  * the GPIO CS polarity must be defined Active High to avoid
968                                  * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
969                                  * into account.
970                                  */
971                                 if (has_acpi_companion(&spi->dev))
972                                         gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
973                                 else
974                                         /* Polarity handled by GPIO library */
975                                         gpiod_set_value_cansleep(spi->cs_gpiod, activate);
976                         } else {
977                                 /*
978                                  * Invert the enable line, as active low is
979                                  * default for SPI.
980                                  */
981                                 gpio_set_value_cansleep(spi->cs_gpio, !enable);
982                         }
983                 }
984                 /* Some SPI masters need both GPIO CS & slave_select */
985                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
986                     spi->controller->set_cs)
987                         spi->controller->set_cs(spi, !enable);
988         } else if (spi->controller->set_cs) {
989                 spi->controller->set_cs(spi, !enable);
990         }
991
992         if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
993             !spi->controller->set_cs_timing) {
994                 if (activate)
995                         spi_delay_exec(&spi->cs_setup, NULL);
996                 else
997                         spi_delay_exec(&spi->cs_inactive, NULL);
998         }
999 }
1000
1001 #ifdef CONFIG_HAS_DMA
1002 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1003                 struct sg_table *sgt, void *buf, size_t len,
1004                 enum dma_data_direction dir)
1005 {
1006         const bool vmalloced_buf = is_vmalloc_addr(buf);
1007         unsigned int max_seg_size = dma_get_max_seg_size(dev);
1008 #ifdef CONFIG_HIGHMEM
1009         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1010                                 (unsigned long)buf < (PKMAP_BASE +
1011                                         (LAST_PKMAP * PAGE_SIZE)));
1012 #else
1013         const bool kmap_buf = false;
1014 #endif
1015         int desc_len;
1016         int sgs;
1017         struct page *vm_page;
1018         struct scatterlist *sg;
1019         void *sg_buf;
1020         size_t min;
1021         int i, ret;
1022
1023         if (vmalloced_buf || kmap_buf) {
1024                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
1025                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1026         } else if (virt_addr_valid(buf)) {
1027                 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
1028                 sgs = DIV_ROUND_UP(len, desc_len);
1029         } else {
1030                 return -EINVAL;
1031         }
1032
1033         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1034         if (ret != 0)
1035                 return ret;
1036
1037         sg = &sgt->sgl[0];
1038         for (i = 0; i < sgs; i++) {
1039
1040                 if (vmalloced_buf || kmap_buf) {
1041                         /*
1042                          * Next scatterlist entry size is the minimum between
1043                          * the desc_len and the remaining buffer length that
1044                          * fits in a page.
1045                          */
1046                         min = min_t(size_t, desc_len,
1047                                     min_t(size_t, len,
1048                                           PAGE_SIZE - offset_in_page(buf)));
1049                         if (vmalloced_buf)
1050                                 vm_page = vmalloc_to_page(buf);
1051                         else
1052                                 vm_page = kmap_to_page(buf);
1053                         if (!vm_page) {
1054                                 sg_free_table(sgt);
1055                                 return -ENOMEM;
1056                         }
1057                         sg_set_page(sg, vm_page,
1058                                     min, offset_in_page(buf));
1059                 } else {
1060                         min = min_t(size_t, len, desc_len);
1061                         sg_buf = buf;
1062                         sg_set_buf(sg, sg_buf, min);
1063                 }
1064
1065                 buf += min;
1066                 len -= min;
1067                 sg = sg_next(sg);
1068         }
1069
1070         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
1071         if (!ret)
1072                 ret = -ENOMEM;
1073         if (ret < 0) {
1074                 sg_free_table(sgt);
1075                 return ret;
1076         }
1077
1078         sgt->nents = ret;
1079
1080         return 0;
1081 }
1082
1083 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1084                    struct sg_table *sgt, enum dma_data_direction dir)
1085 {
1086         if (sgt->orig_nents) {
1087                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1088                 sg_free_table(sgt);
1089         }
1090 }
1091
1092 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1093 {
1094         struct device *tx_dev, *rx_dev;
1095         struct spi_transfer *xfer;
1096         int ret;
1097
1098         if (!ctlr->can_dma)
1099                 return 0;
1100
1101         if (ctlr->dma_tx)
1102                 tx_dev = ctlr->dma_tx->device->dev;
1103         else if (ctlr->dma_map_dev)
1104                 tx_dev = ctlr->dma_map_dev;
1105         else
1106                 tx_dev = ctlr->dev.parent;
1107
1108         if (ctlr->dma_rx)
1109                 rx_dev = ctlr->dma_rx->device->dev;
1110         else if (ctlr->dma_map_dev)
1111                 rx_dev = ctlr->dma_map_dev;
1112         else
1113                 rx_dev = ctlr->dev.parent;
1114
1115         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1116                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1117                         continue;
1118
1119                 if (xfer->tx_buf != NULL) {
1120                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1121                                           (void *)xfer->tx_buf, xfer->len,
1122                                           DMA_TO_DEVICE);
1123                         if (ret != 0)
1124                                 return ret;
1125                 }
1126
1127                 if (xfer->rx_buf != NULL) {
1128                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1129                                           xfer->rx_buf, xfer->len,
1130                                           DMA_FROM_DEVICE);
1131                         if (ret != 0) {
1132                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1133                                               DMA_TO_DEVICE);
1134                                 return ret;
1135                         }
1136                 }
1137         }
1138
1139         ctlr->cur_msg_mapped = true;
1140
1141         return 0;
1142 }
1143
1144 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1145 {
1146         struct spi_transfer *xfer;
1147         struct device *tx_dev, *rx_dev;
1148
1149         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1150                 return 0;
1151
1152         if (ctlr->dma_tx)
1153                 tx_dev = ctlr->dma_tx->device->dev;
1154         else
1155                 tx_dev = ctlr->dev.parent;
1156
1157         if (ctlr->dma_rx)
1158                 rx_dev = ctlr->dma_rx->device->dev;
1159         else
1160                 rx_dev = ctlr->dev.parent;
1161
1162         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1163                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1164                         continue;
1165
1166                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1167                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1168         }
1169
1170         ctlr->cur_msg_mapped = false;
1171
1172         return 0;
1173 }
1174 #else /* !CONFIG_HAS_DMA */
1175 static inline int __spi_map_msg(struct spi_controller *ctlr,
1176                                 struct spi_message *msg)
1177 {
1178         return 0;
1179 }
1180
1181 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1182                                   struct spi_message *msg)
1183 {
1184         return 0;
1185 }
1186 #endif /* !CONFIG_HAS_DMA */
1187
1188 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1189                                 struct spi_message *msg)
1190 {
1191         struct spi_transfer *xfer;
1192
1193         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1194                 /*
1195                  * Restore the original value of tx_buf or rx_buf if they are
1196                  * NULL.
1197                  */
1198                 if (xfer->tx_buf == ctlr->dummy_tx)
1199                         xfer->tx_buf = NULL;
1200                 if (xfer->rx_buf == ctlr->dummy_rx)
1201                         xfer->rx_buf = NULL;
1202         }
1203
1204         return __spi_unmap_msg(ctlr, msg);
1205 }
1206
1207 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1208 {
1209         struct spi_transfer *xfer;
1210         void *tmp;
1211         unsigned int max_tx, max_rx;
1212
1213         if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1214                 && !(msg->spi->mode & SPI_3WIRE)) {
1215                 max_tx = 0;
1216                 max_rx = 0;
1217
1218                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1219                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1220                             !xfer->tx_buf)
1221                                 max_tx = max(xfer->len, max_tx);
1222                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1223                             !xfer->rx_buf)
1224                                 max_rx = max(xfer->len, max_rx);
1225                 }
1226
1227                 if (max_tx) {
1228                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1229                                        GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1230                         if (!tmp)
1231                                 return -ENOMEM;
1232                         ctlr->dummy_tx = tmp;
1233                 }
1234
1235                 if (max_rx) {
1236                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1237                                        GFP_KERNEL | GFP_DMA);
1238                         if (!tmp)
1239                                 return -ENOMEM;
1240                         ctlr->dummy_rx = tmp;
1241                 }
1242
1243                 if (max_tx || max_rx) {
1244                         list_for_each_entry(xfer, &msg->transfers,
1245                                             transfer_list) {
1246                                 if (!xfer->len)
1247                                         continue;
1248                                 if (!xfer->tx_buf)
1249                                         xfer->tx_buf = ctlr->dummy_tx;
1250                                 if (!xfer->rx_buf)
1251                                         xfer->rx_buf = ctlr->dummy_rx;
1252                         }
1253                 }
1254         }
1255
1256         return __spi_map_msg(ctlr, msg);
1257 }
1258
1259 static int spi_transfer_wait(struct spi_controller *ctlr,
1260                              struct spi_message *msg,
1261                              struct spi_transfer *xfer)
1262 {
1263         struct spi_statistics *statm = &ctlr->statistics;
1264         struct spi_statistics *stats = &msg->spi->statistics;
1265         u32 speed_hz = xfer->speed_hz;
1266         unsigned long long ms;
1267
1268         if (spi_controller_is_slave(ctlr)) {
1269                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1270                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1271                         return -EINTR;
1272                 }
1273         } else {
1274                 if (!speed_hz)
1275                         speed_hz = 100000;
1276
1277                 /*
1278                  * For each byte we wait for 8 cycles of the SPI clock.
1279                  * Since speed is defined in Hz and we want milliseconds,
1280                  * use respective multiplier, but before the division,
1281                  * otherwise we may get 0 for short transfers.
1282                  */
1283                 ms = 8LL * MSEC_PER_SEC * xfer->len;
1284                 do_div(ms, speed_hz);
1285
1286                 /*
1287                  * Increase it twice and add 200 ms tolerance, use
1288                  * predefined maximum in case of overflow.
1289                  */
1290                 ms += ms + 200;
1291                 if (ms > UINT_MAX)
1292                         ms = UINT_MAX;
1293
1294                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1295                                                  msecs_to_jiffies(ms));
1296
1297                 if (ms == 0) {
1298                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1299                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1300                         dev_err(&msg->spi->dev,
1301                                 "SPI transfer timed out\n");
1302                         return -ETIMEDOUT;
1303                 }
1304         }
1305
1306         return 0;
1307 }
1308
1309 static void _spi_transfer_delay_ns(u32 ns)
1310 {
1311         if (!ns)
1312                 return;
1313         if (ns <= NSEC_PER_USEC) {
1314                 ndelay(ns);
1315         } else {
1316                 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1317
1318                 if (us <= 10)
1319                         udelay(us);
1320                 else
1321                         usleep_range(us, us + DIV_ROUND_UP(us, 10));
1322         }
1323 }
1324
1325 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1326 {
1327         u32 delay = _delay->value;
1328         u32 unit = _delay->unit;
1329         u32 hz;
1330
1331         if (!delay)
1332                 return 0;
1333
1334         switch (unit) {
1335         case SPI_DELAY_UNIT_USECS:
1336                 delay *= NSEC_PER_USEC;
1337                 break;
1338         case SPI_DELAY_UNIT_NSECS:
1339                 /* Nothing to do here */
1340                 break;
1341         case SPI_DELAY_UNIT_SCK:
1342                 /* clock cycles need to be obtained from spi_transfer */
1343                 if (!xfer)
1344                         return -EINVAL;
1345                 /*
1346                  * If there is unknown effective speed, approximate it
1347                  * by underestimating with half of the requested hz.
1348                  */
1349                 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1350                 if (!hz)
1351                         return -EINVAL;
1352
1353                 /* Convert delay to nanoseconds */
1354                 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1355                 break;
1356         default:
1357                 return -EINVAL;
1358         }
1359
1360         return delay;
1361 }
1362 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1363
1364 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1365 {
1366         int delay;
1367
1368         might_sleep();
1369
1370         if (!_delay)
1371                 return -EINVAL;
1372
1373         delay = spi_delay_to_ns(_delay, xfer);
1374         if (delay < 0)
1375                 return delay;
1376
1377         _spi_transfer_delay_ns(delay);
1378
1379         return 0;
1380 }
1381 EXPORT_SYMBOL_GPL(spi_delay_exec);
1382
1383 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1384                                           struct spi_transfer *xfer)
1385 {
1386         u32 default_delay_ns = 10 * NSEC_PER_USEC;
1387         u32 delay = xfer->cs_change_delay.value;
1388         u32 unit = xfer->cs_change_delay.unit;
1389         int ret;
1390
1391         /* return early on "fast" mode - for everything but USECS */
1392         if (!delay) {
1393                 if (unit == SPI_DELAY_UNIT_USECS)
1394                         _spi_transfer_delay_ns(default_delay_ns);
1395                 return;
1396         }
1397
1398         ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1399         if (ret) {
1400                 dev_err_once(&msg->spi->dev,
1401                              "Use of unsupported delay unit %i, using default of %luus\n",
1402                              unit, default_delay_ns / NSEC_PER_USEC);
1403                 _spi_transfer_delay_ns(default_delay_ns);
1404         }
1405 }
1406
1407 /*
1408  * spi_transfer_one_message - Default implementation of transfer_one_message()
1409  *
1410  * This is a standard implementation of transfer_one_message() for
1411  * drivers which implement a transfer_one() operation.  It provides
1412  * standard handling of delays and chip select management.
1413  */
1414 static int spi_transfer_one_message(struct spi_controller *ctlr,
1415                                     struct spi_message *msg)
1416 {
1417         struct spi_transfer *xfer;
1418         bool keep_cs = false;
1419         int ret = 0;
1420         struct spi_statistics *statm = &ctlr->statistics;
1421         struct spi_statistics *stats = &msg->spi->statistics;
1422
1423         spi_set_cs(msg->spi, true, false);
1424
1425         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1426         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1427
1428         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1429                 trace_spi_transfer_start(msg, xfer);
1430
1431                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1432                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1433
1434                 if (!ctlr->ptp_sts_supported) {
1435                         xfer->ptp_sts_word_pre = 0;
1436                         ptp_read_system_prets(xfer->ptp_sts);
1437                 }
1438
1439                 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1440                         reinit_completion(&ctlr->xfer_completion);
1441
1442 fallback_pio:
1443                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1444                         if (ret < 0) {
1445                                 if (ctlr->cur_msg_mapped &&
1446                                    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1447                                         __spi_unmap_msg(ctlr, msg);
1448                                         ctlr->fallback = true;
1449                                         xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1450                                         goto fallback_pio;
1451                                 }
1452
1453                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1454                                                                errors);
1455                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1456                                                                errors);
1457                                 dev_err(&msg->spi->dev,
1458                                         "SPI transfer failed: %d\n", ret);
1459                                 goto out;
1460                         }
1461
1462                         if (ret > 0) {
1463                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1464                                 if (ret < 0)
1465                                         msg->status = ret;
1466                         }
1467                 } else {
1468                         if (xfer->len)
1469                                 dev_err(&msg->spi->dev,
1470                                         "Bufferless transfer has length %u\n",
1471                                         xfer->len);
1472                 }
1473
1474                 if (!ctlr->ptp_sts_supported) {
1475                         ptp_read_system_postts(xfer->ptp_sts);
1476                         xfer->ptp_sts_word_post = xfer->len;
1477                 }
1478
1479                 trace_spi_transfer_stop(msg, xfer);
1480
1481                 if (msg->status != -EINPROGRESS)
1482                         goto out;
1483
1484                 spi_transfer_delay_exec(xfer);
1485
1486                 if (xfer->cs_change) {
1487                         if (list_is_last(&xfer->transfer_list,
1488                                          &msg->transfers)) {
1489                                 keep_cs = true;
1490                         } else {
1491                                 spi_set_cs(msg->spi, false, false);
1492                                 _spi_transfer_cs_change_delay(msg, xfer);
1493                                 spi_set_cs(msg->spi, true, false);
1494                         }
1495                 }
1496
1497                 msg->actual_length += xfer->len;
1498         }
1499
1500 out:
1501         if (ret != 0 || !keep_cs)
1502                 spi_set_cs(msg->spi, false, false);
1503
1504         if (msg->status == -EINPROGRESS)
1505                 msg->status = ret;
1506
1507         if (msg->status && ctlr->handle_err)
1508                 ctlr->handle_err(ctlr, msg);
1509
1510         spi_finalize_current_message(ctlr);
1511
1512         return ret;
1513 }
1514
1515 /**
1516  * spi_finalize_current_transfer - report completion of a transfer
1517  * @ctlr: the controller reporting completion
1518  *
1519  * Called by SPI drivers using the core transfer_one_message()
1520  * implementation to notify it that the current interrupt driven
1521  * transfer has finished and the next one may be scheduled.
1522  */
1523 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1524 {
1525         complete(&ctlr->xfer_completion);
1526 }
1527 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1528
1529 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1530 {
1531         if (ctlr->auto_runtime_pm) {
1532                 pm_runtime_mark_last_busy(ctlr->dev.parent);
1533                 pm_runtime_put_autosuspend(ctlr->dev.parent);
1534         }
1535 }
1536
1537 /**
1538  * __spi_pump_messages - function which processes spi message queue
1539  * @ctlr: controller to process queue for
1540  * @in_kthread: true if we are in the context of the message pump thread
1541  *
1542  * This function checks if there is any spi message in the queue that
1543  * needs processing and if so call out to the driver to initialize hardware
1544  * and transfer each message.
1545  *
1546  * Note that it is called both from the kthread itself and also from
1547  * inside spi_sync(); the queue extraction handling at the top of the
1548  * function should deal with this safely.
1549  */
1550 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1551 {
1552         struct spi_transfer *xfer;
1553         struct spi_message *msg;
1554         bool was_busy = false;
1555         unsigned long flags;
1556         int ret;
1557
1558         /* Lock queue */
1559         spin_lock_irqsave(&ctlr->queue_lock, flags);
1560
1561         /* Make sure we are not already running a message */
1562         if (ctlr->cur_msg) {
1563                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1564                 return;
1565         }
1566
1567         /* If another context is idling the device then defer */
1568         if (ctlr->idling) {
1569                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1570                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1571                 return;
1572         }
1573
1574         /* Check if the queue is idle */
1575         if (list_empty(&ctlr->queue) || !ctlr->running) {
1576                 if (!ctlr->busy) {
1577                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1578                         return;
1579                 }
1580
1581                 /* Defer any non-atomic teardown to the thread */
1582                 if (!in_kthread) {
1583                         if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1584                             !ctlr->unprepare_transfer_hardware) {
1585                                 spi_idle_runtime_pm(ctlr);
1586                                 ctlr->busy = false;
1587                                 trace_spi_controller_idle(ctlr);
1588                         } else {
1589                                 kthread_queue_work(ctlr->kworker,
1590                                                    &ctlr->pump_messages);
1591                         }
1592                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1593                         return;
1594                 }
1595
1596                 ctlr->busy = false;
1597                 ctlr->idling = true;
1598                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1599
1600                 kfree(ctlr->dummy_rx);
1601                 ctlr->dummy_rx = NULL;
1602                 kfree(ctlr->dummy_tx);
1603                 ctlr->dummy_tx = NULL;
1604                 if (ctlr->unprepare_transfer_hardware &&
1605                     ctlr->unprepare_transfer_hardware(ctlr))
1606                         dev_err(&ctlr->dev,
1607                                 "failed to unprepare transfer hardware\n");
1608                 spi_idle_runtime_pm(ctlr);
1609                 trace_spi_controller_idle(ctlr);
1610
1611                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1612                 ctlr->idling = false;
1613                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1614                 return;
1615         }
1616
1617         /* Extract head of queue */
1618         msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1619         ctlr->cur_msg = msg;
1620
1621         list_del_init(&msg->queue);
1622         if (ctlr->busy)
1623                 was_busy = true;
1624         else
1625                 ctlr->busy = true;
1626         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1627
1628         mutex_lock(&ctlr->io_mutex);
1629
1630         if (!was_busy && ctlr->auto_runtime_pm) {
1631                 ret = pm_runtime_get_sync(ctlr->dev.parent);
1632                 if (ret < 0) {
1633                         pm_runtime_put_noidle(ctlr->dev.parent);
1634                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1635                                 ret);
1636                         mutex_unlock(&ctlr->io_mutex);
1637                         return;
1638                 }
1639         }
1640
1641         if (!was_busy)
1642                 trace_spi_controller_busy(ctlr);
1643
1644         if (!was_busy && ctlr->prepare_transfer_hardware) {
1645                 ret = ctlr->prepare_transfer_hardware(ctlr);
1646                 if (ret) {
1647                         dev_err(&ctlr->dev,
1648                                 "failed to prepare transfer hardware: %d\n",
1649                                 ret);
1650
1651                         if (ctlr->auto_runtime_pm)
1652                                 pm_runtime_put(ctlr->dev.parent);
1653
1654                         msg->status = ret;
1655                         spi_finalize_current_message(ctlr);
1656
1657                         mutex_unlock(&ctlr->io_mutex);
1658                         return;
1659                 }
1660         }
1661
1662         trace_spi_message_start(msg);
1663
1664         if (ctlr->prepare_message) {
1665                 ret = ctlr->prepare_message(ctlr, msg);
1666                 if (ret) {
1667                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1668                                 ret);
1669                         msg->status = ret;
1670                         spi_finalize_current_message(ctlr);
1671                         goto out;
1672                 }
1673                 ctlr->cur_msg_prepared = true;
1674         }
1675
1676         ret = spi_map_msg(ctlr, msg);
1677         if (ret) {
1678                 msg->status = ret;
1679                 spi_finalize_current_message(ctlr);
1680                 goto out;
1681         }
1682
1683         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1684                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1685                         xfer->ptp_sts_word_pre = 0;
1686                         ptp_read_system_prets(xfer->ptp_sts);
1687                 }
1688         }
1689
1690         ret = ctlr->transfer_one_message(ctlr, msg);
1691         if (ret) {
1692                 dev_err(&ctlr->dev,
1693                         "failed to transfer one message from queue\n");
1694                 goto out;
1695         }
1696
1697 out:
1698         mutex_unlock(&ctlr->io_mutex);
1699
1700         /* Prod the scheduler in case transfer_one() was busy waiting */
1701         if (!ret)
1702                 cond_resched();
1703 }
1704
1705 /**
1706  * spi_pump_messages - kthread work function which processes spi message queue
1707  * @work: pointer to kthread work struct contained in the controller struct
1708  */
1709 static void spi_pump_messages(struct kthread_work *work)
1710 {
1711         struct spi_controller *ctlr =
1712                 container_of(work, struct spi_controller, pump_messages);
1713
1714         __spi_pump_messages(ctlr, true);
1715 }
1716
1717 /**
1718  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1719  * @ctlr: Pointer to the spi_controller structure of the driver
1720  * @xfer: Pointer to the transfer being timestamped
1721  * @progress: How many words (not bytes) have been transferred so far
1722  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1723  *            transfer, for less jitter in time measurement. Only compatible
1724  *            with PIO drivers. If true, must follow up with
1725  *            spi_take_timestamp_post or otherwise system will crash.
1726  *            WARNING: for fully predictable results, the CPU frequency must
1727  *            also be under control (governor).
1728  *
1729  * This is a helper for drivers to collect the beginning of the TX timestamp
1730  * for the requested byte from the SPI transfer. The frequency with which this
1731  * function must be called (once per word, once for the whole transfer, once
1732  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1733  * greater than or equal to the requested byte at the time of the call. The
1734  * timestamp is only taken once, at the first such call. It is assumed that
1735  * the driver advances its @tx buffer pointer monotonically.
1736  */
1737 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1738                             struct spi_transfer *xfer,
1739                             size_t progress, bool irqs_off)
1740 {
1741         if (!xfer->ptp_sts)
1742                 return;
1743
1744         if (xfer->timestamped)
1745                 return;
1746
1747         if (progress > xfer->ptp_sts_word_pre)
1748                 return;
1749
1750         /* Capture the resolution of the timestamp */
1751         xfer->ptp_sts_word_pre = progress;
1752
1753         if (irqs_off) {
1754                 local_irq_save(ctlr->irq_flags);
1755                 preempt_disable();
1756         }
1757
1758         ptp_read_system_prets(xfer->ptp_sts);
1759 }
1760 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1761
1762 /**
1763  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1764  * @ctlr: Pointer to the spi_controller structure of the driver
1765  * @xfer: Pointer to the transfer being timestamped
1766  * @progress: How many words (not bytes) have been transferred so far
1767  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1768  *
1769  * This is a helper for drivers to collect the end of the TX timestamp for
1770  * the requested byte from the SPI transfer. Can be called with an arbitrary
1771  * frequency: only the first call where @tx exceeds or is equal to the
1772  * requested word will be timestamped.
1773  */
1774 void spi_take_timestamp_post(struct spi_controller *ctlr,
1775                              struct spi_transfer *xfer,
1776                              size_t progress, bool irqs_off)
1777 {
1778         if (!xfer->ptp_sts)
1779                 return;
1780
1781         if (xfer->timestamped)
1782                 return;
1783
1784         if (progress < xfer->ptp_sts_word_post)
1785                 return;
1786
1787         ptp_read_system_postts(xfer->ptp_sts);
1788
1789         if (irqs_off) {
1790                 local_irq_restore(ctlr->irq_flags);
1791                 preempt_enable();
1792         }
1793
1794         /* Capture the resolution of the timestamp */
1795         xfer->ptp_sts_word_post = progress;
1796
1797         xfer->timestamped = true;
1798 }
1799 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1800
1801 /**
1802  * spi_set_thread_rt - set the controller to pump at realtime priority
1803  * @ctlr: controller to boost priority of
1804  *
1805  * This can be called because the controller requested realtime priority
1806  * (by setting the ->rt value before calling spi_register_controller()) or
1807  * because a device on the bus said that its transfers needed realtime
1808  * priority.
1809  *
1810  * NOTE: at the moment if any device on a bus says it needs realtime then
1811  * the thread will be at realtime priority for all transfers on that
1812  * controller.  If this eventually becomes a problem we may see if we can
1813  * find a way to boost the priority only temporarily during relevant
1814  * transfers.
1815  */
1816 static void spi_set_thread_rt(struct spi_controller *ctlr)
1817 {
1818         dev_info(&ctlr->dev,
1819                 "will run message pump with realtime priority\n");
1820         sched_set_fifo(ctlr->kworker->task);
1821 }
1822
1823 static int spi_init_queue(struct spi_controller *ctlr)
1824 {
1825         ctlr->running = false;
1826         ctlr->busy = false;
1827
1828         ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1829         if (IS_ERR(ctlr->kworker)) {
1830                 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1831                 return PTR_ERR(ctlr->kworker);
1832         }
1833
1834         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1835
1836         /*
1837          * Controller config will indicate if this controller should run the
1838          * message pump with high (realtime) priority to reduce the transfer
1839          * latency on the bus by minimising the delay between a transfer
1840          * request and the scheduling of the message pump thread. Without this
1841          * setting the message pump thread will remain at default priority.
1842          */
1843         if (ctlr->rt)
1844                 spi_set_thread_rt(ctlr);
1845
1846         return 0;
1847 }
1848
1849 /**
1850  * spi_get_next_queued_message() - called by driver to check for queued
1851  * messages
1852  * @ctlr: the controller to check for queued messages
1853  *
1854  * If there are more messages in the queue, the next message is returned from
1855  * this call.
1856  *
1857  * Return: the next message in the queue, else NULL if the queue is empty.
1858  */
1859 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1860 {
1861         struct spi_message *next;
1862         unsigned long flags;
1863
1864         /* get a pointer to the next message, if any */
1865         spin_lock_irqsave(&ctlr->queue_lock, flags);
1866         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1867                                         queue);
1868         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1869
1870         return next;
1871 }
1872 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1873
1874 /**
1875  * spi_finalize_current_message() - the current message is complete
1876  * @ctlr: the controller to return the message to
1877  *
1878  * Called by the driver to notify the core that the message in the front of the
1879  * queue is complete and can be removed from the queue.
1880  */
1881 void spi_finalize_current_message(struct spi_controller *ctlr)
1882 {
1883         struct spi_transfer *xfer;
1884         struct spi_message *mesg;
1885         unsigned long flags;
1886         int ret;
1887
1888         spin_lock_irqsave(&ctlr->queue_lock, flags);
1889         mesg = ctlr->cur_msg;
1890         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1891
1892         if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1893                 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1894                         ptp_read_system_postts(xfer->ptp_sts);
1895                         xfer->ptp_sts_word_post = xfer->len;
1896                 }
1897         }
1898
1899         if (unlikely(ctlr->ptp_sts_supported))
1900                 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1901                         WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1902
1903         spi_unmap_msg(ctlr, mesg);
1904
1905         /*
1906          * In the prepare_messages callback the SPI bus has the opportunity
1907          * to split a transfer to smaller chunks.
1908          *
1909          * Release the split transfers here since spi_map_msg() is done on
1910          * the split transfers.
1911          */
1912         spi_res_release(ctlr, mesg);
1913
1914         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1915                 ret = ctlr->unprepare_message(ctlr, mesg);
1916                 if (ret) {
1917                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1918                                 ret);
1919                 }
1920         }
1921
1922         spin_lock_irqsave(&ctlr->queue_lock, flags);
1923         ctlr->cur_msg = NULL;
1924         ctlr->cur_msg_prepared = false;
1925         ctlr->fallback = false;
1926         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1927         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1928
1929         trace_spi_message_done(mesg);
1930
1931         mesg->state = NULL;
1932         if (mesg->complete)
1933                 mesg->complete(mesg->context);
1934 }
1935 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1936
1937 static int spi_start_queue(struct spi_controller *ctlr)
1938 {
1939         unsigned long flags;
1940
1941         spin_lock_irqsave(&ctlr->queue_lock, flags);
1942
1943         if (ctlr->running || ctlr->busy) {
1944                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1945                 return -EBUSY;
1946         }
1947
1948         ctlr->running = true;
1949         ctlr->cur_msg = NULL;
1950         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1951
1952         kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1953
1954         return 0;
1955 }
1956
1957 static int spi_stop_queue(struct spi_controller *ctlr)
1958 {
1959         unsigned long flags;
1960         unsigned limit = 500;
1961         int ret = 0;
1962
1963         spin_lock_irqsave(&ctlr->queue_lock, flags);
1964
1965         /*
1966          * This is a bit lame, but is optimized for the common execution path.
1967          * A wait_queue on the ctlr->busy could be used, but then the common
1968          * execution path (pump_messages) would be required to call wake_up or
1969          * friends on every SPI message. Do this instead.
1970          */
1971         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1972                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1973                 usleep_range(10000, 11000);
1974                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1975         }
1976
1977         if (!list_empty(&ctlr->queue) || ctlr->busy)
1978                 ret = -EBUSY;
1979         else
1980                 ctlr->running = false;
1981
1982         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1983
1984         if (ret) {
1985                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1986                 return ret;
1987         }
1988         return ret;
1989 }
1990
1991 static int spi_destroy_queue(struct spi_controller *ctlr)
1992 {
1993         int ret;
1994
1995         ret = spi_stop_queue(ctlr);
1996
1997         /*
1998          * kthread_flush_worker will block until all work is done.
1999          * If the reason that stop_queue timed out is that the work will never
2000          * finish, then it does no good to call flush/stop thread, so
2001          * return anyway.
2002          */
2003         if (ret) {
2004                 dev_err(&ctlr->dev, "problem destroying queue\n");
2005                 return ret;
2006         }
2007
2008         kthread_destroy_worker(ctlr->kworker);
2009
2010         return 0;
2011 }
2012
2013 static int __spi_queued_transfer(struct spi_device *spi,
2014                                  struct spi_message *msg,
2015                                  bool need_pump)
2016 {
2017         struct spi_controller *ctlr = spi->controller;
2018         unsigned long flags;
2019
2020         spin_lock_irqsave(&ctlr->queue_lock, flags);
2021
2022         if (!ctlr->running) {
2023                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2024                 return -ESHUTDOWN;
2025         }
2026         msg->actual_length = 0;
2027         msg->status = -EINPROGRESS;
2028
2029         list_add_tail(&msg->queue, &ctlr->queue);
2030         if (!ctlr->busy && need_pump)
2031                 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2032
2033         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2034         return 0;
2035 }
2036
2037 /**
2038  * spi_queued_transfer - transfer function for queued transfers
2039  * @spi: spi device which is requesting transfer
2040  * @msg: spi message which is to handled is queued to driver queue
2041  *
2042  * Return: zero on success, else a negative error code.
2043  */
2044 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2045 {
2046         return __spi_queued_transfer(spi, msg, true);
2047 }
2048
2049 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2050 {
2051         int ret;
2052
2053         ctlr->transfer = spi_queued_transfer;
2054         if (!ctlr->transfer_one_message)
2055                 ctlr->transfer_one_message = spi_transfer_one_message;
2056
2057         /* Initialize and start queue */
2058         ret = spi_init_queue(ctlr);
2059         if (ret) {
2060                 dev_err(&ctlr->dev, "problem initializing queue\n");
2061                 goto err_init_queue;
2062         }
2063         ctlr->queued = true;
2064         ret = spi_start_queue(ctlr);
2065         if (ret) {
2066                 dev_err(&ctlr->dev, "problem starting queue\n");
2067                 goto err_start_queue;
2068         }
2069
2070         return 0;
2071
2072 err_start_queue:
2073         spi_destroy_queue(ctlr);
2074 err_init_queue:
2075         return ret;
2076 }
2077
2078 /**
2079  * spi_flush_queue - Send all pending messages in the queue from the callers'
2080  *                   context
2081  * @ctlr: controller to process queue for
2082  *
2083  * This should be used when one wants to ensure all pending messages have been
2084  * sent before doing something. Is used by the spi-mem code to make sure SPI
2085  * memory operations do not preempt regular SPI transfers that have been queued
2086  * before the spi-mem operation.
2087  */
2088 void spi_flush_queue(struct spi_controller *ctlr)
2089 {
2090         if (ctlr->transfer == spi_queued_transfer)
2091                 __spi_pump_messages(ctlr, false);
2092 }
2093
2094 /*-------------------------------------------------------------------------*/
2095
2096 #if defined(CONFIG_OF)
2097 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2098                            struct device_node *nc)
2099 {
2100         u32 value;
2101         int rc;
2102
2103         /* Mode (clock phase/polarity/etc.) */
2104         if (of_property_read_bool(nc, "spi-cpha"))
2105                 spi->mode |= SPI_CPHA;
2106         if (of_property_read_bool(nc, "spi-cpol"))
2107                 spi->mode |= SPI_CPOL;
2108         if (of_property_read_bool(nc, "spi-3wire"))
2109                 spi->mode |= SPI_3WIRE;
2110         if (of_property_read_bool(nc, "spi-lsb-first"))
2111                 spi->mode |= SPI_LSB_FIRST;
2112         if (of_property_read_bool(nc, "spi-cs-high"))
2113                 spi->mode |= SPI_CS_HIGH;
2114
2115         /* Device DUAL/QUAD mode */
2116         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2117                 switch (value) {
2118                 case 0:
2119                         spi->mode |= SPI_NO_TX;
2120                         break;
2121                 case 1:
2122                         break;
2123                 case 2:
2124                         spi->mode |= SPI_TX_DUAL;
2125                         break;
2126                 case 4:
2127                         spi->mode |= SPI_TX_QUAD;
2128                         break;
2129                 case 8:
2130                         spi->mode |= SPI_TX_OCTAL;
2131                         break;
2132                 default:
2133                         dev_warn(&ctlr->dev,
2134                                 "spi-tx-bus-width %d not supported\n",
2135                                 value);
2136                         break;
2137                 }
2138         }
2139
2140         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2141                 switch (value) {
2142                 case 0:
2143                         spi->mode |= SPI_NO_RX;
2144                         break;
2145                 case 1:
2146                         break;
2147                 case 2:
2148                         spi->mode |= SPI_RX_DUAL;
2149                         break;
2150                 case 4:
2151                         spi->mode |= SPI_RX_QUAD;
2152                         break;
2153                 case 8:
2154                         spi->mode |= SPI_RX_OCTAL;
2155                         break;
2156                 default:
2157                         dev_warn(&ctlr->dev,
2158                                 "spi-rx-bus-width %d not supported\n",
2159                                 value);
2160                         break;
2161                 }
2162         }
2163
2164         if (spi_controller_is_slave(ctlr)) {
2165                 if (!of_node_name_eq(nc, "slave")) {
2166                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2167                                 nc);
2168                         return -EINVAL;
2169                 }
2170                 return 0;
2171         }
2172
2173         /* Device address */
2174         rc = of_property_read_u32(nc, "reg", &value);
2175         if (rc) {
2176                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2177                         nc, rc);
2178                 return rc;
2179         }
2180         spi->chip_select = value;
2181
2182         /* Device speed */
2183         if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2184                 spi->max_speed_hz = value;
2185
2186         return 0;
2187 }
2188
2189 static struct spi_device *
2190 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2191 {
2192         struct spi_device *spi;
2193         int rc;
2194
2195         /* Alloc an spi_device */
2196         spi = spi_alloc_device(ctlr);
2197         if (!spi) {
2198                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2199                 rc = -ENOMEM;
2200                 goto err_out;
2201         }
2202
2203         /* Select device driver */
2204         rc = of_modalias_node(nc, spi->modalias,
2205                                 sizeof(spi->modalias));
2206         if (rc < 0) {
2207                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2208                 goto err_out;
2209         }
2210
2211         rc = of_spi_parse_dt(ctlr, spi, nc);
2212         if (rc)
2213                 goto err_out;
2214
2215         /* Store a pointer to the node in the device structure */
2216         of_node_get(nc);
2217         spi->dev.of_node = nc;
2218         spi->dev.fwnode = of_fwnode_handle(nc);
2219
2220         /* Register the new device */
2221         rc = spi_add_device(spi);
2222         if (rc) {
2223                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2224                 goto err_of_node_put;
2225         }
2226
2227         return spi;
2228
2229 err_of_node_put:
2230         of_node_put(nc);
2231 err_out:
2232         spi_dev_put(spi);
2233         return ERR_PTR(rc);
2234 }
2235
2236 /**
2237  * of_register_spi_devices() - Register child devices onto the SPI bus
2238  * @ctlr:       Pointer to spi_controller device
2239  *
2240  * Registers an spi_device for each child node of controller node which
2241  * represents a valid SPI slave.
2242  */
2243 static void of_register_spi_devices(struct spi_controller *ctlr)
2244 {
2245         struct spi_device *spi;
2246         struct device_node *nc;
2247
2248         if (!ctlr->dev.of_node)
2249                 return;
2250
2251         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2252                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2253                         continue;
2254                 spi = of_register_spi_device(ctlr, nc);
2255                 if (IS_ERR(spi)) {
2256                         dev_warn(&ctlr->dev,
2257                                  "Failed to create SPI device for %pOF\n", nc);
2258                         of_node_clear_flag(nc, OF_POPULATED);
2259                 }
2260         }
2261 }
2262 #else
2263 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2264 #endif
2265
2266 /**
2267  * spi_new_ancillary_device() - Register ancillary SPI device
2268  * @spi:         Pointer to the main SPI device registering the ancillary device
2269  * @chip_select: Chip Select of the ancillary device
2270  *
2271  * Register an ancillary SPI device; for example some chips have a chip-select
2272  * for normal device usage and another one for setup/firmware upload.
2273  *
2274  * This may only be called from main SPI device's probe routine.
2275  *
2276  * Return: 0 on success; negative errno on failure
2277  */
2278 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2279                                              u8 chip_select)
2280 {
2281         struct spi_device *ancillary;
2282         int rc = 0;
2283
2284         /* Alloc an spi_device */
2285         ancillary = spi_alloc_device(spi->controller);
2286         if (!ancillary) {
2287                 rc = -ENOMEM;
2288                 goto err_out;
2289         }
2290
2291         strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2292
2293         /* Use provided chip-select for ancillary device */
2294         ancillary->chip_select = chip_select;
2295
2296         /* Take over SPI mode/speed from SPI main device */
2297         ancillary->max_speed_hz = spi->max_speed_hz;
2298         ancillary->mode = spi->mode;
2299
2300         /* Register the new device */
2301         rc = spi_add_device_locked(ancillary);
2302         if (rc) {
2303                 dev_err(&spi->dev, "failed to register ancillary device\n");
2304                 goto err_out;
2305         }
2306
2307         return ancillary;
2308
2309 err_out:
2310         spi_dev_put(ancillary);
2311         return ERR_PTR(rc);
2312 }
2313 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2314
2315 #ifdef CONFIG_ACPI
2316 struct acpi_spi_lookup {
2317         struct spi_controller   *ctlr;
2318         u32                     max_speed_hz;
2319         u32                     mode;
2320         int                     irq;
2321         u8                      bits_per_word;
2322         u8                      chip_select;
2323         int                     n;
2324         int                     index;
2325 };
2326
2327 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2328 {
2329         struct acpi_resource_spi_serialbus *sb;
2330         int *count = data;
2331
2332         if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2333                 return 1;
2334
2335         sb = &ares->data.spi_serial_bus;
2336         if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2337                 return 1;
2338
2339         *count = *count + 1;
2340
2341         return 1;
2342 }
2343
2344 /**
2345  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2346  * @adev:       ACPI device
2347  *
2348  * Returns the number of SpiSerialBus resources in the ACPI-device's
2349  * resource-list; or a negative error code.
2350  */
2351 int acpi_spi_count_resources(struct acpi_device *adev)
2352 {
2353         LIST_HEAD(r);
2354         int count = 0;
2355         int ret;
2356
2357         ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2358         if (ret < 0)
2359                 return ret;
2360
2361         acpi_dev_free_resource_list(&r);
2362
2363         return count;
2364 }
2365 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2366
2367 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2368                                             struct acpi_spi_lookup *lookup)
2369 {
2370         const union acpi_object *obj;
2371
2372         if (!x86_apple_machine)
2373                 return;
2374
2375         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2376             && obj->buffer.length >= 4)
2377                 lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2378
2379         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2380             && obj->buffer.length == 8)
2381                 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2382
2383         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2384             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2385                 lookup->mode |= SPI_LSB_FIRST;
2386
2387         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2388             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2389                 lookup->mode |= SPI_CPOL;
2390
2391         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2392             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2393                 lookup->mode |= SPI_CPHA;
2394 }
2395
2396 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2397
2398 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2399 {
2400         struct acpi_spi_lookup *lookup = data;
2401         struct spi_controller *ctlr = lookup->ctlr;
2402
2403         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2404                 struct acpi_resource_spi_serialbus *sb;
2405                 acpi_handle parent_handle;
2406                 acpi_status status;
2407
2408                 sb = &ares->data.spi_serial_bus;
2409                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2410
2411                         if (lookup->index != -1 && lookup->n++ != lookup->index)
2412                                 return 1;
2413
2414                         if (lookup->index == -1 && !ctlr)
2415                                 return -ENODEV;
2416
2417                         status = acpi_get_handle(NULL,
2418                                                  sb->resource_source.string_ptr,
2419                                                  &parent_handle);
2420
2421                         if (ACPI_FAILURE(status))
2422                                 return -ENODEV;
2423
2424                         if (ctlr) {
2425                                 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2426                                         return -ENODEV;
2427                         } else {
2428                                 struct acpi_device *adev;
2429
2430                                 if (acpi_bus_get_device(parent_handle, &adev))
2431                                         return -ENODEV;
2432
2433                                 ctlr = acpi_spi_find_controller_by_adev(adev);
2434                                 if (!ctlr)
2435                                         return -ENODEV;
2436
2437                                 lookup->ctlr = ctlr;
2438                         }
2439
2440                         /*
2441                          * ACPI DeviceSelection numbering is handled by the
2442                          * host controller driver in Windows and can vary
2443                          * from driver to driver. In Linux we always expect
2444                          * 0 .. max - 1 so we need to ask the driver to
2445                          * translate between the two schemes.
2446                          */
2447                         if (ctlr->fw_translate_cs) {
2448                                 int cs = ctlr->fw_translate_cs(ctlr,
2449                                                 sb->device_selection);
2450                                 if (cs < 0)
2451                                         return cs;
2452                                 lookup->chip_select = cs;
2453                         } else {
2454                                 lookup->chip_select = sb->device_selection;
2455                         }
2456
2457                         lookup->max_speed_hz = sb->connection_speed;
2458                         lookup->bits_per_word = sb->data_bit_length;
2459
2460                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2461                                 lookup->mode |= SPI_CPHA;
2462                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2463                                 lookup->mode |= SPI_CPOL;
2464                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2465                                 lookup->mode |= SPI_CS_HIGH;
2466                 }
2467         } else if (lookup->irq < 0) {
2468                 struct resource r;
2469
2470                 if (acpi_dev_resource_interrupt(ares, 0, &r))
2471                         lookup->irq = r.start;
2472         }
2473
2474         /* Always tell the ACPI core to skip this resource */
2475         return 1;
2476 }
2477
2478 /**
2479  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2480  * @ctlr: controller to which the spi device belongs
2481  * @adev: ACPI Device for the spi device
2482  * @index: Index of the spi resource inside the ACPI Node
2483  *
2484  * This should be used to allocate a new spi device from and ACPI Node.
2485  * The caller is responsible for calling spi_add_device to register the spi device.
2486  *
2487  * If ctlr is set to NULL, the Controller for the spi device will be looked up
2488  * using the resource.
2489  * If index is set to -1, index is not used.
2490  * Note: If index is -1, ctlr must be set.
2491  *
2492  * Return: a pointer to the new device, or ERR_PTR on error.
2493  */
2494 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2495                                          struct acpi_device *adev,
2496                                          int index)
2497 {
2498         acpi_handle parent_handle = NULL;
2499         struct list_head resource_list;
2500         struct acpi_spi_lookup lookup = {};
2501         struct spi_device *spi;
2502         int ret;
2503
2504         if (!ctlr && index == -1)
2505                 return ERR_PTR(-EINVAL);
2506
2507         lookup.ctlr             = ctlr;
2508         lookup.irq              = -1;
2509         lookup.index            = index;
2510         lookup.n                = 0;
2511
2512         INIT_LIST_HEAD(&resource_list);
2513         ret = acpi_dev_get_resources(adev, &resource_list,
2514                                      acpi_spi_add_resource, &lookup);
2515         acpi_dev_free_resource_list(&resource_list);
2516
2517         if (ret < 0)
2518                 /* found SPI in _CRS but it points to another controller */
2519                 return ERR_PTR(-ENODEV);
2520
2521         if (!lookup.max_speed_hz &&
2522             ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2523             ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2524                 /* Apple does not use _CRS but nested devices for SPI slaves */
2525                 acpi_spi_parse_apple_properties(adev, &lookup);
2526         }
2527
2528         if (!lookup.max_speed_hz)
2529                 return ERR_PTR(-ENODEV);
2530
2531         spi = spi_alloc_device(lookup.ctlr);
2532         if (!spi) {
2533                 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2534                         dev_name(&adev->dev));
2535                 return ERR_PTR(-ENOMEM);
2536         }
2537
2538         ACPI_COMPANION_SET(&spi->dev, adev);
2539         spi->max_speed_hz       = lookup.max_speed_hz;
2540         spi->mode               |= lookup.mode;
2541         spi->irq                = lookup.irq;
2542         spi->bits_per_word      = lookup.bits_per_word;
2543         spi->chip_select        = lookup.chip_select;
2544
2545         return spi;
2546 }
2547 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2548
2549 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2550                                             struct acpi_device *adev)
2551 {
2552         struct spi_device *spi;
2553
2554         if (acpi_bus_get_status(adev) || !adev->status.present ||
2555             acpi_device_enumerated(adev))
2556                 return AE_OK;
2557
2558         spi = acpi_spi_device_alloc(ctlr, adev, -1);
2559         if (IS_ERR(spi)) {
2560                 if (PTR_ERR(spi) == -ENOMEM)
2561                         return AE_NO_MEMORY;
2562                 else
2563                         return AE_OK;
2564         }
2565
2566         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2567                           sizeof(spi->modalias));
2568
2569         if (spi->irq < 0)
2570                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2571
2572         acpi_device_set_enumerated(adev);
2573
2574         adev->power.flags.ignore_parent = true;
2575         if (spi_add_device(spi)) {
2576                 adev->power.flags.ignore_parent = false;
2577                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2578                         dev_name(&adev->dev));
2579                 spi_dev_put(spi);
2580         }
2581
2582         return AE_OK;
2583 }
2584
2585 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2586                                        void *data, void **return_value)
2587 {
2588         struct spi_controller *ctlr = data;
2589         struct acpi_device *adev;
2590
2591         if (acpi_bus_get_device(handle, &adev))
2592                 return AE_OK;
2593
2594         return acpi_register_spi_device(ctlr, adev);
2595 }
2596
2597 #define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2598
2599 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2600 {
2601         acpi_status status;
2602         acpi_handle handle;
2603
2604         handle = ACPI_HANDLE(ctlr->dev.parent);
2605         if (!handle)
2606                 return;
2607
2608         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2609                                      SPI_ACPI_ENUMERATE_MAX_DEPTH,
2610                                      acpi_spi_add_device, NULL, ctlr, NULL);
2611         if (ACPI_FAILURE(status))
2612                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2613 }
2614 #else
2615 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2616 #endif /* CONFIG_ACPI */
2617
2618 static void spi_controller_release(struct device *dev)
2619 {
2620         struct spi_controller *ctlr;
2621
2622         ctlr = container_of(dev, struct spi_controller, dev);
2623         kfree(ctlr);
2624 }
2625
2626 static struct class spi_master_class = {
2627         .name           = "spi_master",
2628         .owner          = THIS_MODULE,
2629         .dev_release    = spi_controller_release,
2630         .dev_groups     = spi_master_groups,
2631 };
2632
2633 #ifdef CONFIG_SPI_SLAVE
2634 /**
2635  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2636  *                   controller
2637  * @spi: device used for the current transfer
2638  */
2639 int spi_slave_abort(struct spi_device *spi)
2640 {
2641         struct spi_controller *ctlr = spi->controller;
2642
2643         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2644                 return ctlr->slave_abort(ctlr);
2645
2646         return -ENOTSUPP;
2647 }
2648 EXPORT_SYMBOL_GPL(spi_slave_abort);
2649
2650 static int match_true(struct device *dev, void *data)
2651 {
2652         return 1;
2653 }
2654
2655 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2656                           char *buf)
2657 {
2658         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2659                                                    dev);
2660         struct device *child;
2661
2662         child = device_find_child(&ctlr->dev, NULL, match_true);
2663         return sprintf(buf, "%s\n",
2664                        child ? to_spi_device(child)->modalias : NULL);
2665 }
2666
2667 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2668                            const char *buf, size_t count)
2669 {
2670         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2671                                                    dev);
2672         struct spi_device *spi;
2673         struct device *child;
2674         char name[32];
2675         int rc;
2676
2677         rc = sscanf(buf, "%31s", name);
2678         if (rc != 1 || !name[0])
2679                 return -EINVAL;
2680
2681         child = device_find_child(&ctlr->dev, NULL, match_true);
2682         if (child) {
2683                 /* Remove registered slave */
2684                 device_unregister(child);
2685                 put_device(child);
2686         }
2687
2688         if (strcmp(name, "(null)")) {
2689                 /* Register new slave */
2690                 spi = spi_alloc_device(ctlr);
2691                 if (!spi)
2692                         return -ENOMEM;
2693
2694                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2695
2696                 rc = spi_add_device(spi);
2697                 if (rc) {
2698                         spi_dev_put(spi);
2699                         return rc;
2700                 }
2701         }
2702
2703         return count;
2704 }
2705
2706 static DEVICE_ATTR_RW(slave);
2707
2708 static struct attribute *spi_slave_attrs[] = {
2709         &dev_attr_slave.attr,
2710         NULL,
2711 };
2712
2713 static const struct attribute_group spi_slave_group = {
2714         .attrs = spi_slave_attrs,
2715 };
2716
2717 static const struct attribute_group *spi_slave_groups[] = {
2718         &spi_controller_statistics_group,
2719         &spi_slave_group,
2720         NULL,
2721 };
2722
2723 static struct class spi_slave_class = {
2724         .name           = "spi_slave",
2725         .owner          = THIS_MODULE,
2726         .dev_release    = spi_controller_release,
2727         .dev_groups     = spi_slave_groups,
2728 };
2729 #else
2730 extern struct class spi_slave_class;    /* dummy */
2731 #endif
2732
2733 /**
2734  * __spi_alloc_controller - allocate an SPI master or slave controller
2735  * @dev: the controller, possibly using the platform_bus
2736  * @size: how much zeroed driver-private data to allocate; the pointer to this
2737  *      memory is in the driver_data field of the returned device, accessible
2738  *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2739  *      drivers granting DMA access to portions of their private data need to
2740  *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2741  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2742  *      slave (true) controller
2743  * Context: can sleep
2744  *
2745  * This call is used only by SPI controller drivers, which are the
2746  * only ones directly touching chip registers.  It's how they allocate
2747  * an spi_controller structure, prior to calling spi_register_controller().
2748  *
2749  * This must be called from context that can sleep.
2750  *
2751  * The caller is responsible for assigning the bus number and initializing the
2752  * controller's methods before calling spi_register_controller(); and (after
2753  * errors adding the device) calling spi_controller_put() to prevent a memory
2754  * leak.
2755  *
2756  * Return: the SPI controller structure on success, else NULL.
2757  */
2758 struct spi_controller *__spi_alloc_controller(struct device *dev,
2759                                               unsigned int size, bool slave)
2760 {
2761         struct spi_controller   *ctlr;
2762         size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2763
2764         if (!dev)
2765                 return NULL;
2766
2767         ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2768         if (!ctlr)
2769                 return NULL;
2770
2771         device_initialize(&ctlr->dev);
2772         INIT_LIST_HEAD(&ctlr->queue);
2773         spin_lock_init(&ctlr->queue_lock);
2774         spin_lock_init(&ctlr->bus_lock_spinlock);
2775         mutex_init(&ctlr->bus_lock_mutex);
2776         mutex_init(&ctlr->io_mutex);
2777         mutex_init(&ctlr->add_lock);
2778         ctlr->bus_num = -1;
2779         ctlr->num_chipselect = 1;
2780         ctlr->slave = slave;
2781         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2782                 ctlr->dev.class = &spi_slave_class;
2783         else
2784                 ctlr->dev.class = &spi_master_class;
2785         ctlr->dev.parent = dev;
2786         pm_suspend_ignore_children(&ctlr->dev, true);
2787         spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2788
2789         return ctlr;
2790 }
2791 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2792
2793 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2794 {
2795         spi_controller_put(*(struct spi_controller **)ctlr);
2796 }
2797
2798 /**
2799  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2800  * @dev: physical device of SPI controller
2801  * @size: how much zeroed driver-private data to allocate
2802  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2803  * Context: can sleep
2804  *
2805  * Allocate an SPI controller and automatically release a reference on it
2806  * when @dev is unbound from its driver.  Drivers are thus relieved from
2807  * having to call spi_controller_put().
2808  *
2809  * The arguments to this function are identical to __spi_alloc_controller().
2810  *
2811  * Return: the SPI controller structure on success, else NULL.
2812  */
2813 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2814                                                    unsigned int size,
2815                                                    bool slave)
2816 {
2817         struct spi_controller **ptr, *ctlr;
2818
2819         ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2820                            GFP_KERNEL);
2821         if (!ptr)
2822                 return NULL;
2823
2824         ctlr = __spi_alloc_controller(dev, size, slave);
2825         if (ctlr) {
2826                 ctlr->devm_allocated = true;
2827                 *ptr = ctlr;
2828                 devres_add(dev, ptr);
2829         } else {
2830                 devres_free(ptr);
2831         }
2832
2833         return ctlr;
2834 }
2835 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2836
2837 #ifdef CONFIG_OF
2838 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2839 {
2840         int nb, i, *cs;
2841         struct device_node *np = ctlr->dev.of_node;
2842
2843         if (!np)
2844                 return 0;
2845
2846         nb = of_gpio_named_count(np, "cs-gpios");
2847         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2848
2849         /* Return error only for an incorrectly formed cs-gpios property */
2850         if (nb == 0 || nb == -ENOENT)
2851                 return 0;
2852         else if (nb < 0)
2853                 return nb;
2854
2855         cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2856                           GFP_KERNEL);
2857         ctlr->cs_gpios = cs;
2858
2859         if (!ctlr->cs_gpios)
2860                 return -ENOMEM;
2861
2862         for (i = 0; i < ctlr->num_chipselect; i++)
2863                 cs[i] = -ENOENT;
2864
2865         for (i = 0; i < nb; i++)
2866                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2867
2868         return 0;
2869 }
2870 #else
2871 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2872 {
2873         return 0;
2874 }
2875 #endif
2876
2877 /**
2878  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2879  * @ctlr: The SPI master to grab GPIO descriptors for
2880  */
2881 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2882 {
2883         int nb, i;
2884         struct gpio_desc **cs;
2885         struct device *dev = &ctlr->dev;
2886         unsigned long native_cs_mask = 0;
2887         unsigned int num_cs_gpios = 0;
2888
2889         nb = gpiod_count(dev, "cs");
2890         if (nb < 0) {
2891                 /* No GPIOs at all is fine, else return the error */
2892                 if (nb == -ENOENT)
2893                         return 0;
2894                 return nb;
2895         }
2896
2897         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2898
2899         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2900                           GFP_KERNEL);
2901         if (!cs)
2902                 return -ENOMEM;
2903         ctlr->cs_gpiods = cs;
2904
2905         for (i = 0; i < nb; i++) {
2906                 /*
2907                  * Most chipselects are active low, the inverted
2908                  * semantics are handled by special quirks in gpiolib,
2909                  * so initializing them GPIOD_OUT_LOW here means
2910                  * "unasserted", in most cases this will drive the physical
2911                  * line high.
2912                  */
2913                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2914                                                       GPIOD_OUT_LOW);
2915                 if (IS_ERR(cs[i]))
2916                         return PTR_ERR(cs[i]);
2917
2918                 if (cs[i]) {
2919                         /*
2920                          * If we find a CS GPIO, name it after the device and
2921                          * chip select line.
2922                          */
2923                         char *gpioname;
2924
2925                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2926                                                   dev_name(dev), i);
2927                         if (!gpioname)
2928                                 return -ENOMEM;
2929                         gpiod_set_consumer_name(cs[i], gpioname);
2930                         num_cs_gpios++;
2931                         continue;
2932                 }
2933
2934                 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2935                         dev_err(dev, "Invalid native chip select %d\n", i);
2936                         return -EINVAL;
2937                 }
2938                 native_cs_mask |= BIT(i);
2939         }
2940
2941         ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2942
2943         if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2944             ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2945                 dev_err(dev, "No unused native chip select available\n");
2946                 return -EINVAL;
2947         }
2948
2949         return 0;
2950 }
2951
2952 static int spi_controller_check_ops(struct spi_controller *ctlr)
2953 {
2954         /*
2955          * The controller may implement only the high-level SPI-memory like
2956          * operations if it does not support regular SPI transfers, and this is
2957          * valid use case.
2958          * If ->mem_ops is NULL, we request that at least one of the
2959          * ->transfer_xxx() method be implemented.
2960          */
2961         if (ctlr->mem_ops) {
2962                 if (!ctlr->mem_ops->exec_op)
2963                         return -EINVAL;
2964         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2965                    !ctlr->transfer_one_message) {
2966                 return -EINVAL;
2967         }
2968
2969         return 0;
2970 }
2971
2972 /**
2973  * spi_register_controller - register SPI master or slave controller
2974  * @ctlr: initialized master, originally from spi_alloc_master() or
2975  *      spi_alloc_slave()
2976  * Context: can sleep
2977  *
2978  * SPI controllers connect to their drivers using some non-SPI bus,
2979  * such as the platform bus.  The final stage of probe() in that code
2980  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2981  *
2982  * SPI controllers use board specific (often SOC specific) bus numbers,
2983  * and board-specific addressing for SPI devices combines those numbers
2984  * with chip select numbers.  Since SPI does not directly support dynamic
2985  * device identification, boards need configuration tables telling which
2986  * chip is at which address.
2987  *
2988  * This must be called from context that can sleep.  It returns zero on
2989  * success, else a negative error code (dropping the controller's refcount).
2990  * After a successful return, the caller is responsible for calling
2991  * spi_unregister_controller().
2992  *
2993  * Return: zero on success, else a negative error code.
2994  */
2995 int spi_register_controller(struct spi_controller *ctlr)
2996 {
2997         struct device           *dev = ctlr->dev.parent;
2998         struct boardinfo        *bi;
2999         int                     status;
3000         int                     id, first_dynamic;
3001
3002         if (!dev)
3003                 return -ENODEV;
3004
3005         /*
3006          * Make sure all necessary hooks are implemented before registering
3007          * the SPI controller.
3008          */
3009         status = spi_controller_check_ops(ctlr);
3010         if (status)
3011                 return status;
3012
3013         if (ctlr->bus_num >= 0) {
3014                 /* devices with a fixed bus num must check-in with the num */
3015                 mutex_lock(&board_lock);
3016                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3017                         ctlr->bus_num + 1, GFP_KERNEL);
3018                 mutex_unlock(&board_lock);
3019                 if (WARN(id < 0, "couldn't get idr"))
3020                         return id == -ENOSPC ? -EBUSY : id;
3021                 ctlr->bus_num = id;
3022         } else if (ctlr->dev.of_node) {
3023                 /* allocate dynamic bus number using Linux idr */
3024                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
3025                 if (id >= 0) {
3026                         ctlr->bus_num = id;
3027                         mutex_lock(&board_lock);
3028                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3029                                        ctlr->bus_num + 1, GFP_KERNEL);
3030                         mutex_unlock(&board_lock);
3031                         if (WARN(id < 0, "couldn't get idr"))
3032                                 return id == -ENOSPC ? -EBUSY : id;
3033                 }
3034         }
3035         if (ctlr->bus_num < 0) {
3036                 first_dynamic = of_alias_get_highest_id("spi");
3037                 if (first_dynamic < 0)
3038                         first_dynamic = 0;
3039                 else
3040                         first_dynamic++;
3041
3042                 mutex_lock(&board_lock);
3043                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3044                                0, GFP_KERNEL);
3045                 mutex_unlock(&board_lock);
3046                 if (WARN(id < 0, "couldn't get idr"))
3047                         return id;
3048                 ctlr->bus_num = id;
3049         }
3050         ctlr->bus_lock_flag = 0;
3051         init_completion(&ctlr->xfer_completion);
3052         if (!ctlr->max_dma_len)
3053                 ctlr->max_dma_len = INT_MAX;
3054
3055         /*
3056          * Register the device, then userspace will see it.
3057          * Registration fails if the bus ID is in use.
3058          */
3059         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3060
3061         if (!spi_controller_is_slave(ctlr)) {
3062                 if (ctlr->use_gpio_descriptors) {
3063                         status = spi_get_gpio_descs(ctlr);
3064                         if (status)
3065                                 goto free_bus_id;
3066                         /*
3067                          * A controller using GPIO descriptors always
3068                          * supports SPI_CS_HIGH if need be.
3069                          */
3070                         ctlr->mode_bits |= SPI_CS_HIGH;
3071                 } else {
3072                         /* Legacy code path for GPIOs from DT */
3073                         status = of_spi_get_gpio_numbers(ctlr);
3074                         if (status)
3075                                 goto free_bus_id;
3076                 }
3077         }
3078
3079         /*
3080          * Even if it's just one always-selected device, there must
3081          * be at least one chipselect.
3082          */
3083         if (!ctlr->num_chipselect) {
3084                 status = -EINVAL;
3085                 goto free_bus_id;
3086         }
3087
3088         status = device_add(&ctlr->dev);
3089         if (status < 0)
3090                 goto free_bus_id;
3091         dev_dbg(dev, "registered %s %s\n",
3092                         spi_controller_is_slave(ctlr) ? "slave" : "master",
3093                         dev_name(&ctlr->dev));
3094
3095         /*
3096          * If we're using a queued driver, start the queue. Note that we don't
3097          * need the queueing logic if the driver is only supporting high-level
3098          * memory operations.
3099          */
3100         if (ctlr->transfer) {
3101                 dev_info(dev, "controller is unqueued, this is deprecated\n");
3102         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3103                 status = spi_controller_initialize_queue(ctlr);
3104                 if (status) {
3105                         device_del(&ctlr->dev);
3106                         goto free_bus_id;
3107                 }
3108         }
3109         /* add statistics */
3110         spin_lock_init(&ctlr->statistics.lock);
3111
3112         mutex_lock(&board_lock);
3113         list_add_tail(&ctlr->list, &spi_controller_list);
3114         list_for_each_entry(bi, &board_list, list)
3115                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3116         mutex_unlock(&board_lock);
3117
3118         /* Register devices from the device tree and ACPI */
3119         of_register_spi_devices(ctlr);
3120         acpi_register_spi_devices(ctlr);
3121         return status;
3122
3123 free_bus_id:
3124         mutex_lock(&board_lock);
3125         idr_remove(&spi_master_idr, ctlr->bus_num);
3126         mutex_unlock(&board_lock);
3127         return status;
3128 }
3129 EXPORT_SYMBOL_GPL(spi_register_controller);
3130
3131 static void devm_spi_unregister(void *ctlr)
3132 {
3133         spi_unregister_controller(ctlr);
3134 }
3135
3136 /**
3137  * devm_spi_register_controller - register managed SPI master or slave
3138  *      controller
3139  * @dev:    device managing SPI controller
3140  * @ctlr: initialized controller, originally from spi_alloc_master() or
3141  *      spi_alloc_slave()
3142  * Context: can sleep
3143  *
3144  * Register a SPI device as with spi_register_controller() which will
3145  * automatically be unregistered and freed.
3146  *
3147  * Return: zero on success, else a negative error code.
3148  */
3149 int devm_spi_register_controller(struct device *dev,
3150                                  struct spi_controller *ctlr)
3151 {
3152         int ret;
3153
3154         ret = spi_register_controller(ctlr);
3155         if (ret)
3156                 return ret;
3157
3158         return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
3159 }
3160 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3161
3162 static int __unregister(struct device *dev, void *null)
3163 {
3164         spi_unregister_device(to_spi_device(dev));
3165         return 0;
3166 }
3167
3168 /**
3169  * spi_unregister_controller - unregister SPI master or slave controller
3170  * @ctlr: the controller being unregistered
3171  * Context: can sleep
3172  *
3173  * This call is used only by SPI controller drivers, which are the
3174  * only ones directly touching chip registers.
3175  *
3176  * This must be called from context that can sleep.
3177  *
3178  * Note that this function also drops a reference to the controller.
3179  */
3180 void spi_unregister_controller(struct spi_controller *ctlr)
3181 {
3182         struct spi_controller *found;
3183         int id = ctlr->bus_num;
3184
3185         /* Prevent addition of new devices, unregister existing ones */
3186         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3187                 mutex_lock(&ctlr->add_lock);
3188
3189         device_for_each_child(&ctlr->dev, NULL, __unregister);
3190
3191         /* First make sure that this controller was ever added */
3192         mutex_lock(&board_lock);
3193         found = idr_find(&spi_master_idr, id);
3194         mutex_unlock(&board_lock);
3195         if (ctlr->queued) {
3196                 if (spi_destroy_queue(ctlr))
3197                         dev_err(&ctlr->dev, "queue remove failed\n");
3198         }
3199         mutex_lock(&board_lock);
3200         list_del(&ctlr->list);
3201         mutex_unlock(&board_lock);
3202
3203         device_del(&ctlr->dev);
3204
3205         /* free bus id */
3206         mutex_lock(&board_lock);
3207         if (found == ctlr)
3208                 idr_remove(&spi_master_idr, id);
3209         mutex_unlock(&board_lock);
3210
3211         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3212                 mutex_unlock(&ctlr->add_lock);
3213
3214         /* Release the last reference on the controller if its driver
3215          * has not yet been converted to devm_spi_alloc_master/slave().
3216          */
3217         if (!ctlr->devm_allocated)
3218                 put_device(&ctlr->dev);
3219 }
3220 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3221
3222 int spi_controller_suspend(struct spi_controller *ctlr)
3223 {
3224         int ret;
3225
3226         /* Basically no-ops for non-queued controllers */
3227         if (!ctlr->queued)
3228                 return 0;
3229
3230         ret = spi_stop_queue(ctlr);
3231         if (ret)
3232                 dev_err(&ctlr->dev, "queue stop failed\n");
3233
3234         return ret;
3235 }
3236 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3237
3238 int spi_controller_resume(struct spi_controller *ctlr)
3239 {
3240         int ret;
3241
3242         if (!ctlr->queued)
3243                 return 0;
3244
3245         ret = spi_start_queue(ctlr);
3246         if (ret)
3247                 dev_err(&ctlr->dev, "queue restart failed\n");
3248
3249         return ret;
3250 }
3251 EXPORT_SYMBOL_GPL(spi_controller_resume);
3252
3253 /*-------------------------------------------------------------------------*/
3254
3255 /* Core methods for spi_message alterations */
3256
3257 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3258                                             struct spi_message *msg,
3259                                             void *res)
3260 {
3261         struct spi_replaced_transfers *rxfer = res;
3262         size_t i;
3263
3264         /* call extra callback if requested */
3265         if (rxfer->release)
3266                 rxfer->release(ctlr, msg, res);
3267
3268         /* insert replaced transfers back into the message */
3269         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3270
3271         /* remove the formerly inserted entries */
3272         for (i = 0; i < rxfer->inserted; i++)
3273                 list_del(&rxfer->inserted_transfers[i].transfer_list);
3274 }
3275
3276 /**
3277  * spi_replace_transfers - replace transfers with several transfers
3278  *                         and register change with spi_message.resources
3279  * @msg:           the spi_message we work upon
3280  * @xfer_first:    the first spi_transfer we want to replace
3281  * @remove:        number of transfers to remove
3282  * @insert:        the number of transfers we want to insert instead
3283  * @release:       extra release code necessary in some circumstances
3284  * @extradatasize: extra data to allocate (with alignment guarantees
3285  *                 of struct @spi_transfer)
3286  * @gfp:           gfp flags
3287  *
3288  * Returns: pointer to @spi_replaced_transfers,
3289  *          PTR_ERR(...) in case of errors.
3290  */
3291 static struct spi_replaced_transfers *spi_replace_transfers(
3292         struct spi_message *msg,
3293         struct spi_transfer *xfer_first,
3294         size_t remove,
3295         size_t insert,
3296         spi_replaced_release_t release,
3297         size_t extradatasize,
3298         gfp_t gfp)
3299 {
3300         struct spi_replaced_transfers *rxfer;
3301         struct spi_transfer *xfer;
3302         size_t i;
3303
3304         /* allocate the structure using spi_res */
3305         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3306                               struct_size(rxfer, inserted_transfers, insert)
3307                               + extradatasize,
3308                               gfp);
3309         if (!rxfer)
3310                 return ERR_PTR(-ENOMEM);
3311
3312         /* the release code to invoke before running the generic release */
3313         rxfer->release = release;
3314
3315         /* assign extradata */
3316         if (extradatasize)
3317                 rxfer->extradata =
3318                         &rxfer->inserted_transfers[insert];
3319
3320         /* init the replaced_transfers list */
3321         INIT_LIST_HEAD(&rxfer->replaced_transfers);
3322
3323         /*
3324          * Assign the list_entry after which we should reinsert
3325          * the @replaced_transfers - it may be spi_message.messages!
3326          */
3327         rxfer->replaced_after = xfer_first->transfer_list.prev;
3328
3329         /* remove the requested number of transfers */
3330         for (i = 0; i < remove; i++) {
3331                 /*
3332                  * If the entry after replaced_after it is msg->transfers
3333                  * then we have been requested to remove more transfers
3334                  * than are in the list.
3335                  */
3336                 if (rxfer->replaced_after->next == &msg->transfers) {
3337                         dev_err(&msg->spi->dev,
3338                                 "requested to remove more spi_transfers than are available\n");
3339                         /* insert replaced transfers back into the message */
3340                         list_splice(&rxfer->replaced_transfers,
3341                                     rxfer->replaced_after);
3342
3343                         /* free the spi_replace_transfer structure */
3344                         spi_res_free(rxfer);
3345
3346                         /* and return with an error */
3347                         return ERR_PTR(-EINVAL);
3348                 }
3349
3350                 /*
3351                  * Remove the entry after replaced_after from list of
3352                  * transfers and add it to list of replaced_transfers.
3353                  */
3354                 list_move_tail(rxfer->replaced_after->next,
3355                                &rxfer->replaced_transfers);
3356         }
3357
3358         /*
3359          * Create copy of the given xfer with identical settings
3360          * based on the first transfer to get removed.
3361          */
3362         for (i = 0; i < insert; i++) {
3363                 /* we need to run in reverse order */
3364                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3365
3366                 /* copy all spi_transfer data */
3367                 memcpy(xfer, xfer_first, sizeof(*xfer));
3368
3369                 /* add to list */
3370                 list_add(&xfer->transfer_list, rxfer->replaced_after);
3371
3372                 /* clear cs_change and delay for all but the last */
3373                 if (i) {
3374                         xfer->cs_change = false;
3375                         xfer->delay.value = 0;
3376                 }
3377         }
3378
3379         /* set up inserted */
3380         rxfer->inserted = insert;
3381
3382         /* and register it with spi_res/spi_message */
3383         spi_res_add(msg, rxfer);
3384
3385         return rxfer;
3386 }
3387
3388 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3389                                         struct spi_message *msg,
3390                                         struct spi_transfer **xferp,
3391                                         size_t maxsize,
3392                                         gfp_t gfp)
3393 {
3394         struct spi_transfer *xfer = *xferp, *xfers;
3395         struct spi_replaced_transfers *srt;
3396         size_t offset;
3397         size_t count, i;
3398
3399         /* calculate how many we have to replace */
3400         count = DIV_ROUND_UP(xfer->len, maxsize);
3401
3402         /* create replacement */
3403         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3404         if (IS_ERR(srt))
3405                 return PTR_ERR(srt);
3406         xfers = srt->inserted_transfers;
3407
3408         /*
3409          * Now handle each of those newly inserted spi_transfers.
3410          * Note that the replacements spi_transfers all are preset
3411          * to the same values as *xferp, so tx_buf, rx_buf and len
3412          * are all identical (as well as most others)
3413          * so we just have to fix up len and the pointers.
3414          *
3415          * This also includes support for the depreciated
3416          * spi_message.is_dma_mapped interface.
3417          */
3418
3419         /*
3420          * The first transfer just needs the length modified, so we
3421          * run it outside the loop.
3422          */
3423         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3424
3425         /* all the others need rx_buf/tx_buf also set */
3426         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3427                 /* update rx_buf, tx_buf and dma */
3428                 if (xfers[i].rx_buf)
3429                         xfers[i].rx_buf += offset;
3430                 if (xfers[i].rx_dma)
3431                         xfers[i].rx_dma += offset;
3432                 if (xfers[i].tx_buf)
3433                         xfers[i].tx_buf += offset;
3434                 if (xfers[i].tx_dma)
3435                         xfers[i].tx_dma += offset;
3436
3437                 /* update length */
3438                 xfers[i].len = min(maxsize, xfers[i].len - offset);
3439         }
3440
3441         /*
3442          * We set up xferp to the last entry we have inserted,
3443          * so that we skip those already split transfers.
3444          */
3445         *xferp = &xfers[count - 1];
3446
3447         /* increment statistics counters */
3448         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3449                                        transfers_split_maxsize);
3450         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3451                                        transfers_split_maxsize);
3452
3453         return 0;
3454 }
3455
3456 /**
3457  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3458  *                               when an individual transfer exceeds a
3459  *                               certain size
3460  * @ctlr:    the @spi_controller for this transfer
3461  * @msg:   the @spi_message to transform
3462  * @maxsize:  the maximum when to apply this
3463  * @gfp: GFP allocation flags
3464  *
3465  * Return: status of transformation
3466  */
3467 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3468                                 struct spi_message *msg,
3469                                 size_t maxsize,
3470                                 gfp_t gfp)
3471 {
3472         struct spi_transfer *xfer;
3473         int ret;
3474
3475         /*
3476          * Iterate over the transfer_list,
3477          * but note that xfer is advanced to the last transfer inserted
3478          * to avoid checking sizes again unnecessarily (also xfer does
3479          * potentially belong to a different list by the time the
3480          * replacement has happened).
3481          */
3482         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3483                 if (xfer->len > maxsize) {
3484                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3485                                                            maxsize, gfp);
3486                         if (ret)
3487                                 return ret;
3488                 }
3489         }
3490
3491         return 0;
3492 }
3493 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3494
3495 /*-------------------------------------------------------------------------*/
3496
3497 /* Core methods for SPI controller protocol drivers.  Some of the
3498  * other core methods are currently defined as inline functions.
3499  */
3500
3501 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3502                                         u8 bits_per_word)
3503 {
3504         if (ctlr->bits_per_word_mask) {
3505                 /* Only 32 bits fit in the mask */
3506                 if (bits_per_word > 32)
3507                         return -EINVAL;
3508                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3509                         return -EINVAL;
3510         }
3511
3512         return 0;
3513 }
3514
3515 /**
3516  * spi_setup - setup SPI mode and clock rate
3517  * @spi: the device whose settings are being modified
3518  * Context: can sleep, and no requests are queued to the device
3519  *
3520  * SPI protocol drivers may need to update the transfer mode if the
3521  * device doesn't work with its default.  They may likewise need
3522  * to update clock rates or word sizes from initial values.  This function
3523  * changes those settings, and must be called from a context that can sleep.
3524  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3525  * effect the next time the device is selected and data is transferred to
3526  * or from it.  When this function returns, the spi device is deselected.
3527  *
3528  * Note that this call will fail if the protocol driver specifies an option
3529  * that the underlying controller or its driver does not support.  For
3530  * example, not all hardware supports wire transfers using nine bit words,
3531  * LSB-first wire encoding, or active-high chipselects.
3532  *
3533  * Return: zero on success, else a negative error code.
3534  */
3535 int spi_setup(struct spi_device *spi)
3536 {
3537         unsigned        bad_bits, ugly_bits;
3538         int             status;
3539
3540         /*
3541          * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3542          * are set at the same time.
3543          */
3544         if ((hweight_long(spi->mode &
3545                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3546             (hweight_long(spi->mode &
3547                 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3548                 dev_err(&spi->dev,
3549                 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3550                 return -EINVAL;
3551         }
3552         /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3553         if ((spi->mode & SPI_3WIRE) && (spi->mode &
3554                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3555                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3556                 return -EINVAL;
3557         /*
3558          * Help drivers fail *cleanly* when they need options
3559          * that aren't supported with their current controller.
3560          * SPI_CS_WORD has a fallback software implementation,
3561          * so it is ignored here.
3562          */
3563         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3564                                  SPI_NO_TX | SPI_NO_RX);
3565         /*
3566          * Nothing prevents from working with active-high CS in case if it
3567          * is driven by GPIO.
3568          */
3569         if (gpio_is_valid(spi->cs_gpio))
3570                 bad_bits &= ~SPI_CS_HIGH;
3571         ugly_bits = bad_bits &
3572                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3573                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3574         if (ugly_bits) {
3575                 dev_warn(&spi->dev,
3576                          "setup: ignoring unsupported mode bits %x\n",
3577                          ugly_bits);
3578                 spi->mode &= ~ugly_bits;
3579                 bad_bits &= ~ugly_bits;
3580         }
3581         if (bad_bits) {
3582                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3583                         bad_bits);
3584                 return -EINVAL;
3585         }
3586
3587         if (!spi->bits_per_word)
3588                 spi->bits_per_word = 8;
3589
3590         status = __spi_validate_bits_per_word(spi->controller,
3591                                               spi->bits_per_word);
3592         if (status)
3593                 return status;
3594
3595         if (spi->controller->max_speed_hz &&
3596             (!spi->max_speed_hz ||
3597              spi->max_speed_hz > spi->controller->max_speed_hz))
3598                 spi->max_speed_hz = spi->controller->max_speed_hz;
3599
3600         mutex_lock(&spi->controller->io_mutex);
3601
3602         if (spi->controller->setup) {
3603                 status = spi->controller->setup(spi);
3604                 if (status) {
3605                         mutex_unlock(&spi->controller->io_mutex);
3606                         dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3607                                 status);
3608                         return status;
3609                 }
3610         }
3611
3612         if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3613                 status = pm_runtime_get_sync(spi->controller->dev.parent);
3614                 if (status < 0) {
3615                         mutex_unlock(&spi->controller->io_mutex);
3616                         pm_runtime_put_noidle(spi->controller->dev.parent);
3617                         dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3618                                 status);
3619                         return status;
3620                 }
3621
3622                 /*
3623                  * We do not want to return positive value from pm_runtime_get,
3624                  * there are many instances of devices calling spi_setup() and
3625                  * checking for a non-zero return value instead of a negative
3626                  * return value.
3627                  */
3628                 status = 0;
3629
3630                 spi_set_cs(spi, false, true);
3631                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3632                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3633         } else {
3634                 spi_set_cs(spi, false, true);
3635         }
3636
3637         mutex_unlock(&spi->controller->io_mutex);
3638
3639         if (spi->rt && !spi->controller->rt) {
3640                 spi->controller->rt = true;
3641                 spi_set_thread_rt(spi->controller);
3642         }
3643
3644         trace_spi_setup(spi, status);
3645
3646         dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3647                         spi->mode & SPI_MODE_X_MASK,
3648                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3649                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3650                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3651                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
3652                         spi->bits_per_word, spi->max_speed_hz,
3653                         status);
3654
3655         return status;
3656 }
3657 EXPORT_SYMBOL_GPL(spi_setup);
3658
3659 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3660                                        struct spi_device *spi)
3661 {
3662         int delay1, delay2;
3663
3664         delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3665         if (delay1 < 0)
3666                 return delay1;
3667
3668         delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3669         if (delay2 < 0)
3670                 return delay2;
3671
3672         if (delay1 < delay2)
3673                 memcpy(&xfer->word_delay, &spi->word_delay,
3674                        sizeof(xfer->word_delay));
3675
3676         return 0;
3677 }
3678
3679 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3680 {
3681         struct spi_controller *ctlr = spi->controller;
3682         struct spi_transfer *xfer;
3683         int w_size;
3684
3685         if (list_empty(&message->transfers))
3686                 return -EINVAL;
3687
3688         /*
3689          * If an SPI controller does not support toggling the CS line on each
3690          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3691          * for the CS line, we can emulate the CS-per-word hardware function by
3692          * splitting transfers into one-word transfers and ensuring that
3693          * cs_change is set for each transfer.
3694          */
3695         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3696                                           spi->cs_gpiod ||
3697                                           gpio_is_valid(spi->cs_gpio))) {
3698                 size_t maxsize;
3699                 int ret;
3700
3701                 maxsize = (spi->bits_per_word + 7) / 8;
3702
3703                 /* spi_split_transfers_maxsize() requires message->spi */
3704                 message->spi = spi;
3705
3706                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3707                                                   GFP_KERNEL);
3708                 if (ret)
3709                         return ret;
3710
3711                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3712                         /* don't change cs_change on the last entry in the list */
3713                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3714                                 break;
3715                         xfer->cs_change = 1;
3716                 }
3717         }
3718
3719         /*
3720          * Half-duplex links include original MicroWire, and ones with
3721          * only one data pin like SPI_3WIRE (switches direction) or where
3722          * either MOSI or MISO is missing.  They can also be caused by
3723          * software limitations.
3724          */
3725         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3726             (spi->mode & SPI_3WIRE)) {
3727                 unsigned flags = ctlr->flags;
3728
3729                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3730                         if (xfer->rx_buf && xfer->tx_buf)
3731                                 return -EINVAL;
3732                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3733                                 return -EINVAL;
3734                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3735                                 return -EINVAL;
3736                 }
3737         }
3738
3739         /*
3740          * Set transfer bits_per_word and max speed as spi device default if
3741          * it is not set for this transfer.
3742          * Set transfer tx_nbits and rx_nbits as single transfer default
3743          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3744          * Ensure transfer word_delay is at least as long as that required by
3745          * device itself.
3746          */
3747         message->frame_length = 0;
3748         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3749                 xfer->effective_speed_hz = 0;
3750                 message->frame_length += xfer->len;
3751                 if (!xfer->bits_per_word)
3752                         xfer->bits_per_word = spi->bits_per_word;
3753
3754                 if (!xfer->speed_hz)
3755                         xfer->speed_hz = spi->max_speed_hz;
3756
3757                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3758                         xfer->speed_hz = ctlr->max_speed_hz;
3759
3760                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3761                         return -EINVAL;
3762
3763                 /*
3764                  * SPI transfer length should be multiple of SPI word size
3765                  * where SPI word size should be power-of-two multiple.
3766                  */
3767                 if (xfer->bits_per_word <= 8)
3768                         w_size = 1;
3769                 else if (xfer->bits_per_word <= 16)
3770                         w_size = 2;
3771                 else
3772                         w_size = 4;
3773
3774                 /* No partial transfers accepted */
3775                 if (xfer->len % w_size)
3776                         return -EINVAL;
3777
3778                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3779                     xfer->speed_hz < ctlr->min_speed_hz)
3780                         return -EINVAL;
3781
3782                 if (xfer->tx_buf && !xfer->tx_nbits)
3783                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3784                 if (xfer->rx_buf && !xfer->rx_nbits)
3785                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3786                 /*
3787                  * Check transfer tx/rx_nbits:
3788                  * 1. check the value matches one of single, dual and quad
3789                  * 2. check tx/rx_nbits match the mode in spi_device
3790                  */
3791                 if (xfer->tx_buf) {
3792                         if (spi->mode & SPI_NO_TX)
3793                                 return -EINVAL;
3794                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3795                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3796                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3797                                 return -EINVAL;
3798                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3799                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3800                                 return -EINVAL;
3801                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3802                                 !(spi->mode & SPI_TX_QUAD))
3803                                 return -EINVAL;
3804                 }
3805                 /* check transfer rx_nbits */
3806                 if (xfer->rx_buf) {
3807                         if (spi->mode & SPI_NO_RX)
3808                                 return -EINVAL;
3809                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3810                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3811                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3812                                 return -EINVAL;
3813                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3814                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3815                                 return -EINVAL;
3816                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3817                                 !(spi->mode & SPI_RX_QUAD))
3818                                 return -EINVAL;
3819                 }
3820
3821                 if (_spi_xfer_word_delay_update(xfer, spi))
3822                         return -EINVAL;
3823         }
3824
3825         message->status = -EINPROGRESS;
3826
3827         return 0;
3828 }
3829
3830 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3831 {
3832         struct spi_controller *ctlr = spi->controller;
3833         struct spi_transfer *xfer;
3834
3835         /*
3836          * Some controllers do not support doing regular SPI transfers. Return
3837          * ENOTSUPP when this is the case.
3838          */
3839         if (!ctlr->transfer)
3840                 return -ENOTSUPP;
3841
3842         message->spi = spi;
3843
3844         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3845         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3846
3847         trace_spi_message_submit(message);
3848
3849         if (!ctlr->ptp_sts_supported) {
3850                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3851                         xfer->ptp_sts_word_pre = 0;
3852                         ptp_read_system_prets(xfer->ptp_sts);
3853                 }
3854         }
3855
3856         return ctlr->transfer(spi, message);
3857 }
3858
3859 /**
3860  * spi_async - asynchronous SPI transfer
3861  * @spi: device with which data will be exchanged
3862  * @message: describes the data transfers, including completion callback
3863  * Context: any (irqs may be blocked, etc)
3864  *
3865  * This call may be used in_irq and other contexts which can't sleep,
3866  * as well as from task contexts which can sleep.
3867  *
3868  * The completion callback is invoked in a context which can't sleep.
3869  * Before that invocation, the value of message->status is undefined.
3870  * When the callback is issued, message->status holds either zero (to
3871  * indicate complete success) or a negative error code.  After that
3872  * callback returns, the driver which issued the transfer request may
3873  * deallocate the associated memory; it's no longer in use by any SPI
3874  * core or controller driver code.
3875  *
3876  * Note that although all messages to a spi_device are handled in
3877  * FIFO order, messages may go to different devices in other orders.
3878  * Some device might be higher priority, or have various "hard" access
3879  * time requirements, for example.
3880  *
3881  * On detection of any fault during the transfer, processing of
3882  * the entire message is aborted, and the device is deselected.
3883  * Until returning from the associated message completion callback,
3884  * no other spi_message queued to that device will be processed.
3885  * (This rule applies equally to all the synchronous transfer calls,
3886  * which are wrappers around this core asynchronous primitive.)
3887  *
3888  * Return: zero on success, else a negative error code.
3889  */
3890 int spi_async(struct spi_device *spi, struct spi_message *message)
3891 {
3892         struct spi_controller *ctlr = spi->controller;
3893         int ret;
3894         unsigned long flags;
3895
3896         ret = __spi_validate(spi, message);
3897         if (ret != 0)
3898                 return ret;
3899
3900         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3901
3902         if (ctlr->bus_lock_flag)
3903                 ret = -EBUSY;
3904         else
3905                 ret = __spi_async(spi, message);
3906
3907         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3908
3909         return ret;
3910 }
3911 EXPORT_SYMBOL_GPL(spi_async);
3912
3913 /**
3914  * spi_async_locked - version of spi_async with exclusive bus usage
3915  * @spi: device with which data will be exchanged
3916  * @message: describes the data transfers, including completion callback
3917  * Context: any (irqs may be blocked, etc)
3918  *
3919  * This call may be used in_irq and other contexts which can't sleep,
3920  * as well as from task contexts which can sleep.
3921  *
3922  * The completion callback is invoked in a context which can't sleep.
3923  * Before that invocation, the value of message->status is undefined.
3924  * When the callback is issued, message->status holds either zero (to
3925  * indicate complete success) or a negative error code.  After that
3926  * callback returns, the driver which issued the transfer request may
3927  * deallocate the associated memory; it's no longer in use by any SPI
3928  * core or controller driver code.
3929  *
3930  * Note that although all messages to a spi_device are handled in
3931  * FIFO order, messages may go to different devices in other orders.
3932  * Some device might be higher priority, or have various "hard" access
3933  * time requirements, for example.
3934  *
3935  * On detection of any fault during the transfer, processing of
3936  * the entire message is aborted, and the device is deselected.
3937  * Until returning from the associated message completion callback,
3938  * no other spi_message queued to that device will be processed.
3939  * (This rule applies equally to all the synchronous transfer calls,
3940  * which are wrappers around this core asynchronous primitive.)
3941  *
3942  * Return: zero on success, else a negative error code.
3943  */
3944 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3945 {
3946         struct spi_controller *ctlr = spi->controller;
3947         int ret;
3948         unsigned long flags;
3949
3950         ret = __spi_validate(spi, message);
3951         if (ret != 0)
3952                 return ret;
3953
3954         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3955
3956         ret = __spi_async(spi, message);
3957
3958         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3959
3960         return ret;
3961
3962 }
3963
3964 /*-------------------------------------------------------------------------*/
3965
3966 /*
3967  * Utility methods for SPI protocol drivers, layered on
3968  * top of the core.  Some other utility methods are defined as
3969  * inline functions.
3970  */
3971
3972 static void spi_complete(void *arg)
3973 {
3974         complete(arg);
3975 }
3976
3977 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3978 {
3979         DECLARE_COMPLETION_ONSTACK(done);
3980         int status;
3981         struct spi_controller *ctlr = spi->controller;
3982         unsigned long flags;
3983
3984         status = __spi_validate(spi, message);
3985         if (status != 0)
3986                 return status;
3987
3988         message->complete = spi_complete;
3989         message->context = &done;
3990         message->spi = spi;
3991
3992         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3993         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3994
3995         /*
3996          * If we're not using the legacy transfer method then we will
3997          * try to transfer in the calling context so special case.
3998          * This code would be less tricky if we could remove the
3999          * support for driver implemented message queues.
4000          */
4001         if (ctlr->transfer == spi_queued_transfer) {
4002                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4003
4004                 trace_spi_message_submit(message);
4005
4006                 status = __spi_queued_transfer(spi, message, false);
4007
4008                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4009         } else {
4010                 status = spi_async_locked(spi, message);
4011         }
4012
4013         if (status == 0) {
4014                 /* Push out the messages in the calling context if we can */
4015                 if (ctlr->transfer == spi_queued_transfer) {
4016                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
4017                                                        spi_sync_immediate);
4018                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
4019                                                        spi_sync_immediate);
4020                         __spi_pump_messages(ctlr, false);
4021                 }
4022
4023                 wait_for_completion(&done);
4024                 status = message->status;
4025         }
4026         message->context = NULL;
4027         return status;
4028 }
4029
4030 /**
4031  * spi_sync - blocking/synchronous SPI data transfers
4032  * @spi: device with which data will be exchanged
4033  * @message: describes the data transfers
4034  * Context: can sleep
4035  *
4036  * This call may only be used from a context that may sleep.  The sleep
4037  * is non-interruptible, and has no timeout.  Low-overhead controller
4038  * drivers may DMA directly into and out of the message buffers.
4039  *
4040  * Note that the SPI device's chip select is active during the message,
4041  * and then is normally disabled between messages.  Drivers for some
4042  * frequently-used devices may want to minimize costs of selecting a chip,
4043  * by leaving it selected in anticipation that the next message will go
4044  * to the same chip.  (That may increase power usage.)
4045  *
4046  * Also, the caller is guaranteeing that the memory associated with the
4047  * message will not be freed before this call returns.
4048  *
4049  * Return: zero on success, else a negative error code.
4050  */
4051 int spi_sync(struct spi_device *spi, struct spi_message *message)
4052 {
4053         int ret;
4054
4055         mutex_lock(&spi->controller->bus_lock_mutex);
4056         ret = __spi_sync(spi, message);
4057         mutex_unlock(&spi->controller->bus_lock_mutex);
4058
4059         return ret;
4060 }
4061 EXPORT_SYMBOL_GPL(spi_sync);
4062
4063 /**
4064  * spi_sync_locked - version of spi_sync with exclusive bus usage
4065  * @spi: device with which data will be exchanged
4066  * @message: describes the data transfers
4067  * Context: can sleep
4068  *
4069  * This call may only be used from a context that may sleep.  The sleep
4070  * is non-interruptible, and has no timeout.  Low-overhead controller
4071  * drivers may DMA directly into and out of the message buffers.
4072  *
4073  * This call should be used by drivers that require exclusive access to the
4074  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4075  * be released by a spi_bus_unlock call when the exclusive access is over.
4076  *
4077  * Return: zero on success, else a negative error code.
4078  */
4079 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4080 {
4081         return __spi_sync(spi, message);
4082 }
4083 EXPORT_SYMBOL_GPL(spi_sync_locked);
4084
4085 /**
4086  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4087  * @ctlr: SPI bus master that should be locked for exclusive bus access
4088  * Context: can sleep
4089  *
4090  * This call may only be used from a context that may sleep.  The sleep
4091  * is non-interruptible, and has no timeout.
4092  *
4093  * This call should be used by drivers that require exclusive access to the
4094  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4095  * exclusive access is over. Data transfer must be done by spi_sync_locked
4096  * and spi_async_locked calls when the SPI bus lock is held.
4097  *
4098  * Return: always zero.
4099  */
4100 int spi_bus_lock(struct spi_controller *ctlr)
4101 {
4102         unsigned long flags;
4103
4104         mutex_lock(&ctlr->bus_lock_mutex);
4105
4106         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4107         ctlr->bus_lock_flag = 1;
4108         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4109
4110         /* mutex remains locked until spi_bus_unlock is called */
4111
4112         return 0;
4113 }
4114 EXPORT_SYMBOL_GPL(spi_bus_lock);
4115
4116 /**
4117  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4118  * @ctlr: SPI bus master that was locked for exclusive bus access
4119  * Context: can sleep
4120  *
4121  * This call may only be used from a context that may sleep.  The sleep
4122  * is non-interruptible, and has no timeout.
4123  *
4124  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4125  * call.
4126  *
4127  * Return: always zero.
4128  */
4129 int spi_bus_unlock(struct spi_controller *ctlr)
4130 {
4131         ctlr->bus_lock_flag = 0;
4132
4133         mutex_unlock(&ctlr->bus_lock_mutex);
4134
4135         return 0;
4136 }
4137 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4138
4139 /* portable code must never pass more than 32 bytes */
4140 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
4141
4142 static u8       *buf;
4143
4144 /**
4145  * spi_write_then_read - SPI synchronous write followed by read
4146  * @spi: device with which data will be exchanged
4147  * @txbuf: data to be written (need not be dma-safe)
4148  * @n_tx: size of txbuf, in bytes
4149  * @rxbuf: buffer into which data will be read (need not be dma-safe)
4150  * @n_rx: size of rxbuf, in bytes
4151  * Context: can sleep
4152  *
4153  * This performs a half duplex MicroWire style transaction with the
4154  * device, sending txbuf and then reading rxbuf.  The return value
4155  * is zero for success, else a negative errno status code.
4156  * This call may only be used from a context that may sleep.
4157  *
4158  * Parameters to this routine are always copied using a small buffer.
4159  * Performance-sensitive or bulk transfer code should instead use
4160  * spi_{async,sync}() calls with dma-safe buffers.
4161  *
4162  * Return: zero on success, else a negative error code.
4163  */
4164 int spi_write_then_read(struct spi_device *spi,
4165                 const void *txbuf, unsigned n_tx,
4166                 void *rxbuf, unsigned n_rx)
4167 {
4168         static DEFINE_MUTEX(lock);
4169
4170         int                     status;
4171         struct spi_message      message;
4172         struct spi_transfer     x[2];
4173         u8                      *local_buf;
4174
4175         /*
4176          * Use preallocated DMA-safe buffer if we can. We can't avoid
4177          * copying here, (as a pure convenience thing), but we can
4178          * keep heap costs out of the hot path unless someone else is
4179          * using the pre-allocated buffer or the transfer is too large.
4180          */
4181         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4182                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4183                                     GFP_KERNEL | GFP_DMA);
4184                 if (!local_buf)
4185                         return -ENOMEM;
4186         } else {
4187                 local_buf = buf;
4188         }
4189
4190         spi_message_init(&message);
4191         memset(x, 0, sizeof(x));
4192         if (n_tx) {
4193                 x[0].len = n_tx;
4194                 spi_message_add_tail(&x[0], &message);
4195         }
4196         if (n_rx) {
4197                 x[1].len = n_rx;
4198                 spi_message_add_tail(&x[1], &message);
4199         }
4200
4201         memcpy(local_buf, txbuf, n_tx);
4202         x[0].tx_buf = local_buf;
4203         x[1].rx_buf = local_buf + n_tx;
4204
4205         /* do the i/o */
4206         status = spi_sync(spi, &message);
4207         if (status == 0)
4208                 memcpy(rxbuf, x[1].rx_buf, n_rx);
4209
4210         if (x[0].tx_buf == buf)
4211                 mutex_unlock(&lock);
4212         else
4213                 kfree(local_buf);
4214
4215         return status;
4216 }
4217 EXPORT_SYMBOL_GPL(spi_write_then_read);
4218
4219 /*-------------------------------------------------------------------------*/
4220
4221 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4222 /* must call put_device() when done with returned spi_device device */
4223 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4224 {
4225         struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4226
4227         return dev ? to_spi_device(dev) : NULL;
4228 }
4229
4230 /* the spi controllers are not using spi_bus, so we find it with another way */
4231 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4232 {
4233         struct device *dev;
4234
4235         dev = class_find_device_by_of_node(&spi_master_class, node);
4236         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4237                 dev = class_find_device_by_of_node(&spi_slave_class, node);
4238         if (!dev)
4239                 return NULL;
4240
4241         /* reference got in class_find_device */
4242         return container_of(dev, struct spi_controller, dev);
4243 }
4244
4245 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4246                          void *arg)
4247 {
4248         struct of_reconfig_data *rd = arg;
4249         struct spi_controller *ctlr;
4250         struct spi_device *spi;
4251
4252         switch (of_reconfig_get_state_change(action, arg)) {
4253         case OF_RECONFIG_CHANGE_ADD:
4254                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4255                 if (ctlr == NULL)
4256                         return NOTIFY_OK;       /* not for us */
4257
4258                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4259                         put_device(&ctlr->dev);
4260                         return NOTIFY_OK;
4261                 }
4262
4263                 spi = of_register_spi_device(ctlr, rd->dn);
4264                 put_device(&ctlr->dev);
4265
4266                 if (IS_ERR(spi)) {
4267                         pr_err("%s: failed to create for '%pOF'\n",
4268                                         __func__, rd->dn);
4269                         of_node_clear_flag(rd->dn, OF_POPULATED);
4270                         return notifier_from_errno(PTR_ERR(spi));
4271                 }
4272                 break;
4273
4274         case OF_RECONFIG_CHANGE_REMOVE:
4275                 /* already depopulated? */
4276                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4277                         return NOTIFY_OK;
4278
4279                 /* find our device by node */
4280                 spi = of_find_spi_device_by_node(rd->dn);
4281                 if (spi == NULL)
4282                         return NOTIFY_OK;       /* no? not meant for us */
4283
4284                 /* unregister takes one ref away */
4285                 spi_unregister_device(spi);
4286
4287                 /* and put the reference of the find */
4288                 put_device(&spi->dev);
4289                 break;
4290         }
4291
4292         return NOTIFY_OK;
4293 }
4294
4295 static struct notifier_block spi_of_notifier = {
4296         .notifier_call = of_spi_notify,
4297 };
4298 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4299 extern struct notifier_block spi_of_notifier;
4300 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4301
4302 #if IS_ENABLED(CONFIG_ACPI)
4303 static int spi_acpi_controller_match(struct device *dev, const void *data)
4304 {
4305         return ACPI_COMPANION(dev->parent) == data;
4306 }
4307
4308 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4309 {
4310         struct device *dev;
4311
4312         dev = class_find_device(&spi_master_class, NULL, adev,
4313                                 spi_acpi_controller_match);
4314         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4315                 dev = class_find_device(&spi_slave_class, NULL, adev,
4316                                         spi_acpi_controller_match);
4317         if (!dev)
4318                 return NULL;
4319
4320         return container_of(dev, struct spi_controller, dev);
4321 }
4322
4323 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4324 {
4325         struct device *dev;
4326
4327         dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4328         return to_spi_device(dev);
4329 }
4330
4331 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4332                            void *arg)
4333 {
4334         struct acpi_device *adev = arg;
4335         struct spi_controller *ctlr;
4336         struct spi_device *spi;
4337
4338         switch (value) {
4339         case ACPI_RECONFIG_DEVICE_ADD:
4340                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4341                 if (!ctlr)
4342                         break;
4343
4344                 acpi_register_spi_device(ctlr, adev);
4345                 put_device(&ctlr->dev);
4346                 break;
4347         case ACPI_RECONFIG_DEVICE_REMOVE:
4348                 if (!acpi_device_enumerated(adev))
4349                         break;
4350
4351                 spi = acpi_spi_find_device_by_adev(adev);
4352                 if (!spi)
4353                         break;
4354
4355                 spi_unregister_device(spi);
4356                 put_device(&spi->dev);
4357                 break;
4358         }
4359
4360         return NOTIFY_OK;
4361 }
4362
4363 static struct notifier_block spi_acpi_notifier = {
4364         .notifier_call = acpi_spi_notify,
4365 };
4366 #else
4367 extern struct notifier_block spi_acpi_notifier;
4368 #endif
4369
4370 static int __init spi_init(void)
4371 {
4372         int     status;
4373
4374         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4375         if (!buf) {
4376                 status = -ENOMEM;
4377                 goto err0;
4378         }
4379
4380         status = bus_register(&spi_bus_type);
4381         if (status < 0)
4382                 goto err1;
4383
4384         status = class_register(&spi_master_class);
4385         if (status < 0)
4386                 goto err2;
4387
4388         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4389                 status = class_register(&spi_slave_class);
4390                 if (status < 0)
4391                         goto err3;
4392         }
4393
4394         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4395                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4396         if (IS_ENABLED(CONFIG_ACPI))
4397                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4398
4399         return 0;
4400
4401 err3:
4402         class_unregister(&spi_master_class);
4403 err2:
4404         bus_unregister(&spi_bus_type);
4405 err1:
4406         kfree(buf);
4407         buf = NULL;
4408 err0:
4409         return status;
4410 }
4411
4412 /*
4413  * A board_info is normally registered in arch_initcall(),
4414  * but even essential drivers wait till later.
4415  *
4416  * REVISIT only boardinfo really needs static linking. The rest (device and
4417  * driver registration) _could_ be dynamically linked (modular) ... Costs
4418  * include needing to have boardinfo data structures be much more public.
4419  */
4420 postcore_initcall(spi_init);
This page took 0.28714 seconds and 4 git commands to generate.