]> Git Repo - linux.git/blob - drivers/spi/spi.c
spi: introduce accelerated read support for spi flash devices
[linux.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = to_spi_device(dev);                    \
88         return spi_statistics_##field##_show(&spi->statistics, buf);    \
89 }                                                                       \
90 static struct device_attribute dev_attr_spi_device_##field = {          \
91         .attr = { .name = file, .mode = S_IRUGO },                      \
92         .show = spi_device_##field##_show,                              \
93 }
94
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
97                                             char *buf)                  \
98 {                                                                       \
99         unsigned long flags;                                            \
100         ssize_t len;                                                    \
101         spin_lock_irqsave(&stat->lock, flags);                          \
102         len = sprintf(buf, format_string, stat->field);                 \
103         spin_unlock_irqrestore(&stat->lock, flags);                     \
104         return len;                                                     \
105 }                                                                       \
106 SPI_STATISTICS_ATTRS(name, file)
107
108 #define SPI_STATISTICS_SHOW(field, format_string)                       \
109         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
110                                  field, format_string)
111
112 SPI_STATISTICS_SHOW(messages, "%lu");
113 SPI_STATISTICS_SHOW(transfers, "%lu");
114 SPI_STATISTICS_SHOW(errors, "%lu");
115 SPI_STATISTICS_SHOW(timedout, "%lu");
116
117 SPI_STATISTICS_SHOW(spi_sync, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
119 SPI_STATISTICS_SHOW(spi_async, "%lu");
120
121 SPI_STATISTICS_SHOW(bytes, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
124
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
126         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
127                                  "transfer_bytes_histo_" number,        \
128                                  transfer_bytes_histo[index],  "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
146
147 static struct attribute *spi_dev_attrs[] = {
148         &dev_attr_modalias.attr,
149         NULL,
150 };
151
152 static const struct attribute_group spi_dev_group = {
153         .attrs  = spi_dev_attrs,
154 };
155
156 static struct attribute *spi_device_statistics_attrs[] = {
157         &dev_attr_spi_device_messages.attr,
158         &dev_attr_spi_device_transfers.attr,
159         &dev_attr_spi_device_errors.attr,
160         &dev_attr_spi_device_timedout.attr,
161         &dev_attr_spi_device_spi_sync.attr,
162         &dev_attr_spi_device_spi_sync_immediate.attr,
163         &dev_attr_spi_device_spi_async.attr,
164         &dev_attr_spi_device_bytes.attr,
165         &dev_attr_spi_device_bytes_rx.attr,
166         &dev_attr_spi_device_bytes_tx.attr,
167         &dev_attr_spi_device_transfer_bytes_histo0.attr,
168         &dev_attr_spi_device_transfer_bytes_histo1.attr,
169         &dev_attr_spi_device_transfer_bytes_histo2.attr,
170         &dev_attr_spi_device_transfer_bytes_histo3.attr,
171         &dev_attr_spi_device_transfer_bytes_histo4.attr,
172         &dev_attr_spi_device_transfer_bytes_histo5.attr,
173         &dev_attr_spi_device_transfer_bytes_histo6.attr,
174         &dev_attr_spi_device_transfer_bytes_histo7.attr,
175         &dev_attr_spi_device_transfer_bytes_histo8.attr,
176         &dev_attr_spi_device_transfer_bytes_histo9.attr,
177         &dev_attr_spi_device_transfer_bytes_histo10.attr,
178         &dev_attr_spi_device_transfer_bytes_histo11.attr,
179         &dev_attr_spi_device_transfer_bytes_histo12.attr,
180         &dev_attr_spi_device_transfer_bytes_histo13.attr,
181         &dev_attr_spi_device_transfer_bytes_histo14.attr,
182         &dev_attr_spi_device_transfer_bytes_histo15.attr,
183         &dev_attr_spi_device_transfer_bytes_histo16.attr,
184         NULL,
185 };
186
187 static const struct attribute_group spi_device_statistics_group = {
188         .name  = "statistics",
189         .attrs  = spi_device_statistics_attrs,
190 };
191
192 static const struct attribute_group *spi_dev_groups[] = {
193         &spi_dev_group,
194         &spi_device_statistics_group,
195         NULL,
196 };
197
198 static struct attribute *spi_master_statistics_attrs[] = {
199         &dev_attr_spi_master_messages.attr,
200         &dev_attr_spi_master_transfers.attr,
201         &dev_attr_spi_master_errors.attr,
202         &dev_attr_spi_master_timedout.attr,
203         &dev_attr_spi_master_spi_sync.attr,
204         &dev_attr_spi_master_spi_sync_immediate.attr,
205         &dev_attr_spi_master_spi_async.attr,
206         &dev_attr_spi_master_bytes.attr,
207         &dev_attr_spi_master_bytes_rx.attr,
208         &dev_attr_spi_master_bytes_tx.attr,
209         &dev_attr_spi_master_transfer_bytes_histo0.attr,
210         &dev_attr_spi_master_transfer_bytes_histo1.attr,
211         &dev_attr_spi_master_transfer_bytes_histo2.attr,
212         &dev_attr_spi_master_transfer_bytes_histo3.attr,
213         &dev_attr_spi_master_transfer_bytes_histo4.attr,
214         &dev_attr_spi_master_transfer_bytes_histo5.attr,
215         &dev_attr_spi_master_transfer_bytes_histo6.attr,
216         &dev_attr_spi_master_transfer_bytes_histo7.attr,
217         &dev_attr_spi_master_transfer_bytes_histo8.attr,
218         &dev_attr_spi_master_transfer_bytes_histo9.attr,
219         &dev_attr_spi_master_transfer_bytes_histo10.attr,
220         &dev_attr_spi_master_transfer_bytes_histo11.attr,
221         &dev_attr_spi_master_transfer_bytes_histo12.attr,
222         &dev_attr_spi_master_transfer_bytes_histo13.attr,
223         &dev_attr_spi_master_transfer_bytes_histo14.attr,
224         &dev_attr_spi_master_transfer_bytes_histo15.attr,
225         &dev_attr_spi_master_transfer_bytes_histo16.attr,
226         NULL,
227 };
228
229 static const struct attribute_group spi_master_statistics_group = {
230         .name  = "statistics",
231         .attrs  = spi_master_statistics_attrs,
232 };
233
234 static const struct attribute_group *spi_master_groups[] = {
235         &spi_master_statistics_group,
236         NULL,
237 };
238
239 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
240                                        struct spi_transfer *xfer,
241                                        struct spi_master *master)
242 {
243         unsigned long flags;
244         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
245
246         if (l2len < 0)
247                 l2len = 0;
248
249         spin_lock_irqsave(&stats->lock, flags);
250
251         stats->transfers++;
252         stats->transfer_bytes_histo[l2len]++;
253
254         stats->bytes += xfer->len;
255         if ((xfer->tx_buf) &&
256             (xfer->tx_buf != master->dummy_tx))
257                 stats->bytes_tx += xfer->len;
258         if ((xfer->rx_buf) &&
259             (xfer->rx_buf != master->dummy_rx))
260                 stats->bytes_rx += xfer->len;
261
262         spin_unlock_irqrestore(&stats->lock, flags);
263 }
264 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
265
266 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
267  * and the sysfs version makes coldplug work too.
268  */
269
270 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
271                                                 const struct spi_device *sdev)
272 {
273         while (id->name[0]) {
274                 if (!strcmp(sdev->modalias, id->name))
275                         return id;
276                 id++;
277         }
278         return NULL;
279 }
280
281 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
282 {
283         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
284
285         return spi_match_id(sdrv->id_table, sdev);
286 }
287 EXPORT_SYMBOL_GPL(spi_get_device_id);
288
289 static int spi_match_device(struct device *dev, struct device_driver *drv)
290 {
291         const struct spi_device *spi = to_spi_device(dev);
292         const struct spi_driver *sdrv = to_spi_driver(drv);
293
294         /* Attempt an OF style match */
295         if (of_driver_match_device(dev, drv))
296                 return 1;
297
298         /* Then try ACPI */
299         if (acpi_driver_match_device(dev, drv))
300                 return 1;
301
302         if (sdrv->id_table)
303                 return !!spi_match_id(sdrv->id_table, spi);
304
305         return strcmp(spi->modalias, drv->name) == 0;
306 }
307
308 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
309 {
310         const struct spi_device         *spi = to_spi_device(dev);
311         int rc;
312
313         rc = acpi_device_uevent_modalias(dev, env);
314         if (rc != -ENODEV)
315                 return rc;
316
317         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
318         return 0;
319 }
320
321 struct bus_type spi_bus_type = {
322         .name           = "spi",
323         .dev_groups     = spi_dev_groups,
324         .match          = spi_match_device,
325         .uevent         = spi_uevent,
326 };
327 EXPORT_SYMBOL_GPL(spi_bus_type);
328
329
330 static int spi_drv_probe(struct device *dev)
331 {
332         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
333         struct spi_device               *spi = to_spi_device(dev);
334         int ret;
335
336         ret = of_clk_set_defaults(dev->of_node, false);
337         if (ret)
338                 return ret;
339
340         if (dev->of_node) {
341                 spi->irq = of_irq_get(dev->of_node, 0);
342                 if (spi->irq == -EPROBE_DEFER)
343                         return -EPROBE_DEFER;
344                 if (spi->irq < 0)
345                         spi->irq = 0;
346         }
347
348         ret = dev_pm_domain_attach(dev, true);
349         if (ret != -EPROBE_DEFER) {
350                 ret = sdrv->probe(spi);
351                 if (ret)
352                         dev_pm_domain_detach(dev, true);
353         }
354
355         return ret;
356 }
357
358 static int spi_drv_remove(struct device *dev)
359 {
360         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
361         int ret;
362
363         ret = sdrv->remove(to_spi_device(dev));
364         dev_pm_domain_detach(dev, true);
365
366         return ret;
367 }
368
369 static void spi_drv_shutdown(struct device *dev)
370 {
371         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
372
373         sdrv->shutdown(to_spi_device(dev));
374 }
375
376 /**
377  * __spi_register_driver - register a SPI driver
378  * @owner: owner module of the driver to register
379  * @sdrv: the driver to register
380  * Context: can sleep
381  *
382  * Return: zero on success, else a negative error code.
383  */
384 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
385 {
386         sdrv->driver.owner = owner;
387         sdrv->driver.bus = &spi_bus_type;
388         if (sdrv->probe)
389                 sdrv->driver.probe = spi_drv_probe;
390         if (sdrv->remove)
391                 sdrv->driver.remove = spi_drv_remove;
392         if (sdrv->shutdown)
393                 sdrv->driver.shutdown = spi_drv_shutdown;
394         return driver_register(&sdrv->driver);
395 }
396 EXPORT_SYMBOL_GPL(__spi_register_driver);
397
398 /*-------------------------------------------------------------------------*/
399
400 /* SPI devices should normally not be created by SPI device drivers; that
401  * would make them board-specific.  Similarly with SPI master drivers.
402  * Device registration normally goes into like arch/.../mach.../board-YYY.c
403  * with other readonly (flashable) information about mainboard devices.
404  */
405
406 struct boardinfo {
407         struct list_head        list;
408         struct spi_board_info   board_info;
409 };
410
411 static LIST_HEAD(board_list);
412 static LIST_HEAD(spi_master_list);
413
414 /*
415  * Used to protect add/del opertion for board_info list and
416  * spi_master list, and their matching process
417  */
418 static DEFINE_MUTEX(board_lock);
419
420 /**
421  * spi_alloc_device - Allocate a new SPI device
422  * @master: Controller to which device is connected
423  * Context: can sleep
424  *
425  * Allows a driver to allocate and initialize a spi_device without
426  * registering it immediately.  This allows a driver to directly
427  * fill the spi_device with device parameters before calling
428  * spi_add_device() on it.
429  *
430  * Caller is responsible to call spi_add_device() on the returned
431  * spi_device structure to add it to the SPI master.  If the caller
432  * needs to discard the spi_device without adding it, then it should
433  * call spi_dev_put() on it.
434  *
435  * Return: a pointer to the new device, or NULL.
436  */
437 struct spi_device *spi_alloc_device(struct spi_master *master)
438 {
439         struct spi_device       *spi;
440
441         if (!spi_master_get(master))
442                 return NULL;
443
444         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
445         if (!spi) {
446                 spi_master_put(master);
447                 return NULL;
448         }
449
450         spi->master = master;
451         spi->dev.parent = &master->dev;
452         spi->dev.bus = &spi_bus_type;
453         spi->dev.release = spidev_release;
454         spi->cs_gpio = -ENOENT;
455
456         spin_lock_init(&spi->statistics.lock);
457
458         device_initialize(&spi->dev);
459         return spi;
460 }
461 EXPORT_SYMBOL_GPL(spi_alloc_device);
462
463 static void spi_dev_set_name(struct spi_device *spi)
464 {
465         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
466
467         if (adev) {
468                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
469                 return;
470         }
471
472         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
473                      spi->chip_select);
474 }
475
476 static int spi_dev_check(struct device *dev, void *data)
477 {
478         struct spi_device *spi = to_spi_device(dev);
479         struct spi_device *new_spi = data;
480
481         if (spi->master == new_spi->master &&
482             spi->chip_select == new_spi->chip_select)
483                 return -EBUSY;
484         return 0;
485 }
486
487 /**
488  * spi_add_device - Add spi_device allocated with spi_alloc_device
489  * @spi: spi_device to register
490  *
491  * Companion function to spi_alloc_device.  Devices allocated with
492  * spi_alloc_device can be added onto the spi bus with this function.
493  *
494  * Return: 0 on success; negative errno on failure
495  */
496 int spi_add_device(struct spi_device *spi)
497 {
498         static DEFINE_MUTEX(spi_add_lock);
499         struct spi_master *master = spi->master;
500         struct device *dev = master->dev.parent;
501         int status;
502
503         /* Chipselects are numbered 0..max; validate. */
504         if (spi->chip_select >= master->num_chipselect) {
505                 dev_err(dev, "cs%d >= max %d\n",
506                         spi->chip_select,
507                         master->num_chipselect);
508                 return -EINVAL;
509         }
510
511         /* Set the bus ID string */
512         spi_dev_set_name(spi);
513
514         /* We need to make sure there's no other device with this
515          * chipselect **BEFORE** we call setup(), else we'll trash
516          * its configuration.  Lock against concurrent add() calls.
517          */
518         mutex_lock(&spi_add_lock);
519
520         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
521         if (status) {
522                 dev_err(dev, "chipselect %d already in use\n",
523                                 spi->chip_select);
524                 goto done;
525         }
526
527         if (master->cs_gpios)
528                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
529
530         /* Drivers may modify this initial i/o setup, but will
531          * normally rely on the device being setup.  Devices
532          * using SPI_CS_HIGH can't coexist well otherwise...
533          */
534         status = spi_setup(spi);
535         if (status < 0) {
536                 dev_err(dev, "can't setup %s, status %d\n",
537                                 dev_name(&spi->dev), status);
538                 goto done;
539         }
540
541         /* Device may be bound to an active driver when this returns */
542         status = device_add(&spi->dev);
543         if (status < 0)
544                 dev_err(dev, "can't add %s, status %d\n",
545                                 dev_name(&spi->dev), status);
546         else
547                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
548
549 done:
550         mutex_unlock(&spi_add_lock);
551         return status;
552 }
553 EXPORT_SYMBOL_GPL(spi_add_device);
554
555 /**
556  * spi_new_device - instantiate one new SPI device
557  * @master: Controller to which device is connected
558  * @chip: Describes the SPI device
559  * Context: can sleep
560  *
561  * On typical mainboards, this is purely internal; and it's not needed
562  * after board init creates the hard-wired devices.  Some development
563  * platforms may not be able to use spi_register_board_info though, and
564  * this is exported so that for example a USB or parport based adapter
565  * driver could add devices (which it would learn about out-of-band).
566  *
567  * Return: the new device, or NULL.
568  */
569 struct spi_device *spi_new_device(struct spi_master *master,
570                                   struct spi_board_info *chip)
571 {
572         struct spi_device       *proxy;
573         int                     status;
574
575         /* NOTE:  caller did any chip->bus_num checks necessary.
576          *
577          * Also, unless we change the return value convention to use
578          * error-or-pointer (not NULL-or-pointer), troubleshootability
579          * suggests syslogged diagnostics are best here (ugh).
580          */
581
582         proxy = spi_alloc_device(master);
583         if (!proxy)
584                 return NULL;
585
586         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
587
588         proxy->chip_select = chip->chip_select;
589         proxy->max_speed_hz = chip->max_speed_hz;
590         proxy->mode = chip->mode;
591         proxy->irq = chip->irq;
592         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
593         proxy->dev.platform_data = (void *) chip->platform_data;
594         proxy->controller_data = chip->controller_data;
595         proxy->controller_state = NULL;
596
597         status = spi_add_device(proxy);
598         if (status < 0) {
599                 spi_dev_put(proxy);
600                 return NULL;
601         }
602
603         return proxy;
604 }
605 EXPORT_SYMBOL_GPL(spi_new_device);
606
607 /**
608  * spi_unregister_device - unregister a single SPI device
609  * @spi: spi_device to unregister
610  *
611  * Start making the passed SPI device vanish. Normally this would be handled
612  * by spi_unregister_master().
613  */
614 void spi_unregister_device(struct spi_device *spi)
615 {
616         if (!spi)
617                 return;
618
619         if (spi->dev.of_node)
620                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
621         device_unregister(&spi->dev);
622 }
623 EXPORT_SYMBOL_GPL(spi_unregister_device);
624
625 static void spi_match_master_to_boardinfo(struct spi_master *master,
626                                 struct spi_board_info *bi)
627 {
628         struct spi_device *dev;
629
630         if (master->bus_num != bi->bus_num)
631                 return;
632
633         dev = spi_new_device(master, bi);
634         if (!dev)
635                 dev_err(master->dev.parent, "can't create new device for %s\n",
636                         bi->modalias);
637 }
638
639 /**
640  * spi_register_board_info - register SPI devices for a given board
641  * @info: array of chip descriptors
642  * @n: how many descriptors are provided
643  * Context: can sleep
644  *
645  * Board-specific early init code calls this (probably during arch_initcall)
646  * with segments of the SPI device table.  Any device nodes are created later,
647  * after the relevant parent SPI controller (bus_num) is defined.  We keep
648  * this table of devices forever, so that reloading a controller driver will
649  * not make Linux forget about these hard-wired devices.
650  *
651  * Other code can also call this, e.g. a particular add-on board might provide
652  * SPI devices through its expansion connector, so code initializing that board
653  * would naturally declare its SPI devices.
654  *
655  * The board info passed can safely be __initdata ... but be careful of
656  * any embedded pointers (platform_data, etc), they're copied as-is.
657  *
658  * Return: zero on success, else a negative error code.
659  */
660 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
661 {
662         struct boardinfo *bi;
663         int i;
664
665         if (!n)
666                 return -EINVAL;
667
668         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
669         if (!bi)
670                 return -ENOMEM;
671
672         for (i = 0; i < n; i++, bi++, info++) {
673                 struct spi_master *master;
674
675                 memcpy(&bi->board_info, info, sizeof(*info));
676                 mutex_lock(&board_lock);
677                 list_add_tail(&bi->list, &board_list);
678                 list_for_each_entry(master, &spi_master_list, list)
679                         spi_match_master_to_boardinfo(master, &bi->board_info);
680                 mutex_unlock(&board_lock);
681         }
682
683         return 0;
684 }
685
686 /*-------------------------------------------------------------------------*/
687
688 static void spi_set_cs(struct spi_device *spi, bool enable)
689 {
690         if (spi->mode & SPI_CS_HIGH)
691                 enable = !enable;
692
693         if (gpio_is_valid(spi->cs_gpio))
694                 gpio_set_value(spi->cs_gpio, !enable);
695         else if (spi->master->set_cs)
696                 spi->master->set_cs(spi, !enable);
697 }
698
699 #ifdef CONFIG_HAS_DMA
700 static int spi_map_buf(struct spi_master *master, struct device *dev,
701                        struct sg_table *sgt, void *buf, size_t len,
702                        enum dma_data_direction dir)
703 {
704         const bool vmalloced_buf = is_vmalloc_addr(buf);
705         int desc_len;
706         int sgs;
707         struct page *vm_page;
708         void *sg_buf;
709         size_t min;
710         int i, ret;
711
712         if (vmalloced_buf) {
713                 desc_len = PAGE_SIZE;
714                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
715         } else {
716                 desc_len = master->max_dma_len;
717                 sgs = DIV_ROUND_UP(len, desc_len);
718         }
719
720         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
721         if (ret != 0)
722                 return ret;
723
724         for (i = 0; i < sgs; i++) {
725
726                 if (vmalloced_buf) {
727                         min = min_t(size_t,
728                                     len, desc_len - offset_in_page(buf));
729                         vm_page = vmalloc_to_page(buf);
730                         if (!vm_page) {
731                                 sg_free_table(sgt);
732                                 return -ENOMEM;
733                         }
734                         sg_set_page(&sgt->sgl[i], vm_page,
735                                     min, offset_in_page(buf));
736                 } else {
737                         min = min_t(size_t, len, desc_len);
738                         sg_buf = buf;
739                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
740                 }
741
742
743                 buf += min;
744                 len -= min;
745         }
746
747         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
748         if (!ret)
749                 ret = -ENOMEM;
750         if (ret < 0) {
751                 sg_free_table(sgt);
752                 return ret;
753         }
754
755         sgt->nents = ret;
756
757         return 0;
758 }
759
760 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
761                           struct sg_table *sgt, enum dma_data_direction dir)
762 {
763         if (sgt->orig_nents) {
764                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
765                 sg_free_table(sgt);
766         }
767 }
768
769 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
770 {
771         struct device *tx_dev, *rx_dev;
772         struct spi_transfer *xfer;
773         int ret;
774
775         if (!master->can_dma)
776                 return 0;
777
778         if (master->dma_tx)
779                 tx_dev = master->dma_tx->device->dev;
780         else
781                 tx_dev = &master->dev;
782
783         if (master->dma_rx)
784                 rx_dev = master->dma_rx->device->dev;
785         else
786                 rx_dev = &master->dev;
787
788         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
789                 if (!master->can_dma(master, msg->spi, xfer))
790                         continue;
791
792                 if (xfer->tx_buf != NULL) {
793                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
794                                           (void *)xfer->tx_buf, xfer->len,
795                                           DMA_TO_DEVICE);
796                         if (ret != 0)
797                                 return ret;
798                 }
799
800                 if (xfer->rx_buf != NULL) {
801                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
802                                           xfer->rx_buf, xfer->len,
803                                           DMA_FROM_DEVICE);
804                         if (ret != 0) {
805                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
806                                               DMA_TO_DEVICE);
807                                 return ret;
808                         }
809                 }
810         }
811
812         master->cur_msg_mapped = true;
813
814         return 0;
815 }
816
817 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
818 {
819         struct spi_transfer *xfer;
820         struct device *tx_dev, *rx_dev;
821
822         if (!master->cur_msg_mapped || !master->can_dma)
823                 return 0;
824
825         if (master->dma_tx)
826                 tx_dev = master->dma_tx->device->dev;
827         else
828                 tx_dev = &master->dev;
829
830         if (master->dma_rx)
831                 rx_dev = master->dma_rx->device->dev;
832         else
833                 rx_dev = &master->dev;
834
835         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
836                 if (!master->can_dma(master, msg->spi, xfer))
837                         continue;
838
839                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
840                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
841         }
842
843         return 0;
844 }
845 #else /* !CONFIG_HAS_DMA */
846 static inline int __spi_map_msg(struct spi_master *master,
847                                 struct spi_message *msg)
848 {
849         return 0;
850 }
851
852 static inline int __spi_unmap_msg(struct spi_master *master,
853                                   struct spi_message *msg)
854 {
855         return 0;
856 }
857 #endif /* !CONFIG_HAS_DMA */
858
859 static inline int spi_unmap_msg(struct spi_master *master,
860                                 struct spi_message *msg)
861 {
862         struct spi_transfer *xfer;
863
864         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
865                 /*
866                  * Restore the original value of tx_buf or rx_buf if they are
867                  * NULL.
868                  */
869                 if (xfer->tx_buf == master->dummy_tx)
870                         xfer->tx_buf = NULL;
871                 if (xfer->rx_buf == master->dummy_rx)
872                         xfer->rx_buf = NULL;
873         }
874
875         return __spi_unmap_msg(master, msg);
876 }
877
878 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
879 {
880         struct spi_transfer *xfer;
881         void *tmp;
882         unsigned int max_tx, max_rx;
883
884         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
885                 max_tx = 0;
886                 max_rx = 0;
887
888                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
889                         if ((master->flags & SPI_MASTER_MUST_TX) &&
890                             !xfer->tx_buf)
891                                 max_tx = max(xfer->len, max_tx);
892                         if ((master->flags & SPI_MASTER_MUST_RX) &&
893                             !xfer->rx_buf)
894                                 max_rx = max(xfer->len, max_rx);
895                 }
896
897                 if (max_tx) {
898                         tmp = krealloc(master->dummy_tx, max_tx,
899                                        GFP_KERNEL | GFP_DMA);
900                         if (!tmp)
901                                 return -ENOMEM;
902                         master->dummy_tx = tmp;
903                         memset(tmp, 0, max_tx);
904                 }
905
906                 if (max_rx) {
907                         tmp = krealloc(master->dummy_rx, max_rx,
908                                        GFP_KERNEL | GFP_DMA);
909                         if (!tmp)
910                                 return -ENOMEM;
911                         master->dummy_rx = tmp;
912                 }
913
914                 if (max_tx || max_rx) {
915                         list_for_each_entry(xfer, &msg->transfers,
916                                             transfer_list) {
917                                 if (!xfer->tx_buf)
918                                         xfer->tx_buf = master->dummy_tx;
919                                 if (!xfer->rx_buf)
920                                         xfer->rx_buf = master->dummy_rx;
921                         }
922                 }
923         }
924
925         return __spi_map_msg(master, msg);
926 }
927
928 /*
929  * spi_transfer_one_message - Default implementation of transfer_one_message()
930  *
931  * This is a standard implementation of transfer_one_message() for
932  * drivers which impelment a transfer_one() operation.  It provides
933  * standard handling of delays and chip select management.
934  */
935 static int spi_transfer_one_message(struct spi_master *master,
936                                     struct spi_message *msg)
937 {
938         struct spi_transfer *xfer;
939         bool keep_cs = false;
940         int ret = 0;
941         unsigned long ms = 1;
942         struct spi_statistics *statm = &master->statistics;
943         struct spi_statistics *stats = &msg->spi->statistics;
944
945         spi_set_cs(msg->spi, true);
946
947         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
948         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
949
950         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
951                 trace_spi_transfer_start(msg, xfer);
952
953                 spi_statistics_add_transfer_stats(statm, xfer, master);
954                 spi_statistics_add_transfer_stats(stats, xfer, master);
955
956                 if (xfer->tx_buf || xfer->rx_buf) {
957                         reinit_completion(&master->xfer_completion);
958
959                         ret = master->transfer_one(master, msg->spi, xfer);
960                         if (ret < 0) {
961                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
962                                                                errors);
963                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
964                                                                errors);
965                                 dev_err(&msg->spi->dev,
966                                         "SPI transfer failed: %d\n", ret);
967                                 goto out;
968                         }
969
970                         if (ret > 0) {
971                                 ret = 0;
972                                 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
973                                 ms += ms + 100; /* some tolerance */
974
975                                 ms = wait_for_completion_timeout(&master->xfer_completion,
976                                                                  msecs_to_jiffies(ms));
977                         }
978
979                         if (ms == 0) {
980                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
981                                                                timedout);
982                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
983                                                                timedout);
984                                 dev_err(&msg->spi->dev,
985                                         "SPI transfer timed out\n");
986                                 msg->status = -ETIMEDOUT;
987                         }
988                 } else {
989                         if (xfer->len)
990                                 dev_err(&msg->spi->dev,
991                                         "Bufferless transfer has length %u\n",
992                                         xfer->len);
993                 }
994
995                 trace_spi_transfer_stop(msg, xfer);
996
997                 if (msg->status != -EINPROGRESS)
998                         goto out;
999
1000                 if (xfer->delay_usecs)
1001                         udelay(xfer->delay_usecs);
1002
1003                 if (xfer->cs_change) {
1004                         if (list_is_last(&xfer->transfer_list,
1005                                          &msg->transfers)) {
1006                                 keep_cs = true;
1007                         } else {
1008                                 spi_set_cs(msg->spi, false);
1009                                 udelay(10);
1010                                 spi_set_cs(msg->spi, true);
1011                         }
1012                 }
1013
1014                 msg->actual_length += xfer->len;
1015         }
1016
1017 out:
1018         if (ret != 0 || !keep_cs)
1019                 spi_set_cs(msg->spi, false);
1020
1021         if (msg->status == -EINPROGRESS)
1022                 msg->status = ret;
1023
1024         if (msg->status && master->handle_err)
1025                 master->handle_err(master, msg);
1026
1027         spi_finalize_current_message(master);
1028
1029         return ret;
1030 }
1031
1032 /**
1033  * spi_finalize_current_transfer - report completion of a transfer
1034  * @master: the master reporting completion
1035  *
1036  * Called by SPI drivers using the core transfer_one_message()
1037  * implementation to notify it that the current interrupt driven
1038  * transfer has finished and the next one may be scheduled.
1039  */
1040 void spi_finalize_current_transfer(struct spi_master *master)
1041 {
1042         complete(&master->xfer_completion);
1043 }
1044 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1045
1046 /**
1047  * __spi_pump_messages - function which processes spi message queue
1048  * @master: master to process queue for
1049  * @in_kthread: true if we are in the context of the message pump thread
1050  *
1051  * This function checks if there is any spi message in the queue that
1052  * needs processing and if so call out to the driver to initialize hardware
1053  * and transfer each message.
1054  *
1055  * Note that it is called both from the kthread itself and also from
1056  * inside spi_sync(); the queue extraction handling at the top of the
1057  * function should deal with this safely.
1058  */
1059 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1060 {
1061         unsigned long flags;
1062         bool was_busy = false;
1063         int ret;
1064
1065         /* Lock queue */
1066         spin_lock_irqsave(&master->queue_lock, flags);
1067
1068         /* Make sure we are not already running a message */
1069         if (master->cur_msg) {
1070                 spin_unlock_irqrestore(&master->queue_lock, flags);
1071                 return;
1072         }
1073
1074         /* If another context is idling the device then defer */
1075         if (master->idling) {
1076                 queue_kthread_work(&master->kworker, &master->pump_messages);
1077                 spin_unlock_irqrestore(&master->queue_lock, flags);
1078                 return;
1079         }
1080
1081         /* Check if the queue is idle */
1082         if (list_empty(&master->queue) || !master->running) {
1083                 if (!master->busy) {
1084                         spin_unlock_irqrestore(&master->queue_lock, flags);
1085                         return;
1086                 }
1087
1088                 /* Only do teardown in the thread */
1089                 if (!in_kthread) {
1090                         queue_kthread_work(&master->kworker,
1091                                            &master->pump_messages);
1092                         spin_unlock_irqrestore(&master->queue_lock, flags);
1093                         return;
1094                 }
1095
1096                 master->busy = false;
1097                 master->idling = true;
1098                 spin_unlock_irqrestore(&master->queue_lock, flags);
1099
1100                 kfree(master->dummy_rx);
1101                 master->dummy_rx = NULL;
1102                 kfree(master->dummy_tx);
1103                 master->dummy_tx = NULL;
1104                 if (master->unprepare_transfer_hardware &&
1105                     master->unprepare_transfer_hardware(master))
1106                         dev_err(&master->dev,
1107                                 "failed to unprepare transfer hardware\n");
1108                 if (master->auto_runtime_pm) {
1109                         pm_runtime_mark_last_busy(master->dev.parent);
1110                         pm_runtime_put_autosuspend(master->dev.parent);
1111                 }
1112                 trace_spi_master_idle(master);
1113
1114                 spin_lock_irqsave(&master->queue_lock, flags);
1115                 master->idling = false;
1116                 spin_unlock_irqrestore(&master->queue_lock, flags);
1117                 return;
1118         }
1119
1120         /* Extract head of queue */
1121         master->cur_msg =
1122                 list_first_entry(&master->queue, struct spi_message, queue);
1123
1124         list_del_init(&master->cur_msg->queue);
1125         if (master->busy)
1126                 was_busy = true;
1127         else
1128                 master->busy = true;
1129         spin_unlock_irqrestore(&master->queue_lock, flags);
1130
1131         if (!was_busy && master->auto_runtime_pm) {
1132                 ret = pm_runtime_get_sync(master->dev.parent);
1133                 if (ret < 0) {
1134                         dev_err(&master->dev, "Failed to power device: %d\n",
1135                                 ret);
1136                         return;
1137                 }
1138         }
1139
1140         if (!was_busy)
1141                 trace_spi_master_busy(master);
1142
1143         if (!was_busy && master->prepare_transfer_hardware) {
1144                 ret = master->prepare_transfer_hardware(master);
1145                 if (ret) {
1146                         dev_err(&master->dev,
1147                                 "failed to prepare transfer hardware\n");
1148
1149                         if (master->auto_runtime_pm)
1150                                 pm_runtime_put(master->dev.parent);
1151                         return;
1152                 }
1153         }
1154
1155         mutex_lock(&master->bus_lock_mutex);
1156         trace_spi_message_start(master->cur_msg);
1157
1158         if (master->prepare_message) {
1159                 ret = master->prepare_message(master, master->cur_msg);
1160                 if (ret) {
1161                         dev_err(&master->dev,
1162                                 "failed to prepare message: %d\n", ret);
1163                         master->cur_msg->status = ret;
1164                         spi_finalize_current_message(master);
1165                         mutex_unlock(&master->bus_lock_mutex);
1166                         return;
1167                 }
1168                 master->cur_msg_prepared = true;
1169         }
1170
1171         ret = spi_map_msg(master, master->cur_msg);
1172         if (ret) {
1173                 master->cur_msg->status = ret;
1174                 spi_finalize_current_message(master);
1175                 mutex_unlock(&master->bus_lock_mutex);
1176                 return;
1177         }
1178
1179         ret = master->transfer_one_message(master, master->cur_msg);
1180         if (ret) {
1181                 dev_err(&master->dev,
1182                         "failed to transfer one message from queue\n");
1183                 mutex_unlock(&master->bus_lock_mutex);
1184                 return;
1185         }
1186         mutex_unlock(&master->bus_lock_mutex);
1187 }
1188
1189 /**
1190  * spi_pump_messages - kthread work function which processes spi message queue
1191  * @work: pointer to kthread work struct contained in the master struct
1192  */
1193 static void spi_pump_messages(struct kthread_work *work)
1194 {
1195         struct spi_master *master =
1196                 container_of(work, struct spi_master, pump_messages);
1197
1198         __spi_pump_messages(master, true);
1199 }
1200
1201 static int spi_init_queue(struct spi_master *master)
1202 {
1203         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1204
1205         master->running = false;
1206         master->busy = false;
1207
1208         init_kthread_worker(&master->kworker);
1209         master->kworker_task = kthread_run(kthread_worker_fn,
1210                                            &master->kworker, "%s",
1211                                            dev_name(&master->dev));
1212         if (IS_ERR(master->kworker_task)) {
1213                 dev_err(&master->dev, "failed to create message pump task\n");
1214                 return PTR_ERR(master->kworker_task);
1215         }
1216         init_kthread_work(&master->pump_messages, spi_pump_messages);
1217
1218         /*
1219          * Master config will indicate if this controller should run the
1220          * message pump with high (realtime) priority to reduce the transfer
1221          * latency on the bus by minimising the delay between a transfer
1222          * request and the scheduling of the message pump thread. Without this
1223          * setting the message pump thread will remain at default priority.
1224          */
1225         if (master->rt) {
1226                 dev_info(&master->dev,
1227                         "will run message pump with realtime priority\n");
1228                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1229         }
1230
1231         return 0;
1232 }
1233
1234 /**
1235  * spi_get_next_queued_message() - called by driver to check for queued
1236  * messages
1237  * @master: the master to check for queued messages
1238  *
1239  * If there are more messages in the queue, the next message is returned from
1240  * this call.
1241  *
1242  * Return: the next message in the queue, else NULL if the queue is empty.
1243  */
1244 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1245 {
1246         struct spi_message *next;
1247         unsigned long flags;
1248
1249         /* get a pointer to the next message, if any */
1250         spin_lock_irqsave(&master->queue_lock, flags);
1251         next = list_first_entry_or_null(&master->queue, struct spi_message,
1252                                         queue);
1253         spin_unlock_irqrestore(&master->queue_lock, flags);
1254
1255         return next;
1256 }
1257 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1258
1259 /**
1260  * spi_finalize_current_message() - the current message is complete
1261  * @master: the master to return the message to
1262  *
1263  * Called by the driver to notify the core that the message in the front of the
1264  * queue is complete and can be removed from the queue.
1265  */
1266 void spi_finalize_current_message(struct spi_master *master)
1267 {
1268         struct spi_message *mesg;
1269         unsigned long flags;
1270         int ret;
1271
1272         spin_lock_irqsave(&master->queue_lock, flags);
1273         mesg = master->cur_msg;
1274         spin_unlock_irqrestore(&master->queue_lock, flags);
1275
1276         spi_unmap_msg(master, mesg);
1277
1278         if (master->cur_msg_prepared && master->unprepare_message) {
1279                 ret = master->unprepare_message(master, mesg);
1280                 if (ret) {
1281                         dev_err(&master->dev,
1282                                 "failed to unprepare message: %d\n", ret);
1283                 }
1284         }
1285
1286         spin_lock_irqsave(&master->queue_lock, flags);
1287         master->cur_msg = NULL;
1288         master->cur_msg_prepared = false;
1289         queue_kthread_work(&master->kworker, &master->pump_messages);
1290         spin_unlock_irqrestore(&master->queue_lock, flags);
1291
1292         trace_spi_message_done(mesg);
1293
1294         mesg->state = NULL;
1295         if (mesg->complete)
1296                 mesg->complete(mesg->context);
1297 }
1298 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1299
1300 static int spi_start_queue(struct spi_master *master)
1301 {
1302         unsigned long flags;
1303
1304         spin_lock_irqsave(&master->queue_lock, flags);
1305
1306         if (master->running || master->busy) {
1307                 spin_unlock_irqrestore(&master->queue_lock, flags);
1308                 return -EBUSY;
1309         }
1310
1311         master->running = true;
1312         master->cur_msg = NULL;
1313         spin_unlock_irqrestore(&master->queue_lock, flags);
1314
1315         queue_kthread_work(&master->kworker, &master->pump_messages);
1316
1317         return 0;
1318 }
1319
1320 static int spi_stop_queue(struct spi_master *master)
1321 {
1322         unsigned long flags;
1323         unsigned limit = 500;
1324         int ret = 0;
1325
1326         spin_lock_irqsave(&master->queue_lock, flags);
1327
1328         /*
1329          * This is a bit lame, but is optimized for the common execution path.
1330          * A wait_queue on the master->busy could be used, but then the common
1331          * execution path (pump_messages) would be required to call wake_up or
1332          * friends on every SPI message. Do this instead.
1333          */
1334         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1335                 spin_unlock_irqrestore(&master->queue_lock, flags);
1336                 usleep_range(10000, 11000);
1337                 spin_lock_irqsave(&master->queue_lock, flags);
1338         }
1339
1340         if (!list_empty(&master->queue) || master->busy)
1341                 ret = -EBUSY;
1342         else
1343                 master->running = false;
1344
1345         spin_unlock_irqrestore(&master->queue_lock, flags);
1346
1347         if (ret) {
1348                 dev_warn(&master->dev,
1349                          "could not stop message queue\n");
1350                 return ret;
1351         }
1352         return ret;
1353 }
1354
1355 static int spi_destroy_queue(struct spi_master *master)
1356 {
1357         int ret;
1358
1359         ret = spi_stop_queue(master);
1360
1361         /*
1362          * flush_kthread_worker will block until all work is done.
1363          * If the reason that stop_queue timed out is that the work will never
1364          * finish, then it does no good to call flush/stop thread, so
1365          * return anyway.
1366          */
1367         if (ret) {
1368                 dev_err(&master->dev, "problem destroying queue\n");
1369                 return ret;
1370         }
1371
1372         flush_kthread_worker(&master->kworker);
1373         kthread_stop(master->kworker_task);
1374
1375         return 0;
1376 }
1377
1378 static int __spi_queued_transfer(struct spi_device *spi,
1379                                  struct spi_message *msg,
1380                                  bool need_pump)
1381 {
1382         struct spi_master *master = spi->master;
1383         unsigned long flags;
1384
1385         spin_lock_irqsave(&master->queue_lock, flags);
1386
1387         if (!master->running) {
1388                 spin_unlock_irqrestore(&master->queue_lock, flags);
1389                 return -ESHUTDOWN;
1390         }
1391         msg->actual_length = 0;
1392         msg->status = -EINPROGRESS;
1393
1394         list_add_tail(&msg->queue, &master->queue);
1395         if (!master->busy && need_pump)
1396                 queue_kthread_work(&master->kworker, &master->pump_messages);
1397
1398         spin_unlock_irqrestore(&master->queue_lock, flags);
1399         return 0;
1400 }
1401
1402 /**
1403  * spi_queued_transfer - transfer function for queued transfers
1404  * @spi: spi device which is requesting transfer
1405  * @msg: spi message which is to handled is queued to driver queue
1406  *
1407  * Return: zero on success, else a negative error code.
1408  */
1409 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1410 {
1411         return __spi_queued_transfer(spi, msg, true);
1412 }
1413
1414 static int spi_master_initialize_queue(struct spi_master *master)
1415 {
1416         int ret;
1417
1418         master->transfer = spi_queued_transfer;
1419         if (!master->transfer_one_message)
1420                 master->transfer_one_message = spi_transfer_one_message;
1421
1422         /* Initialize and start queue */
1423         ret = spi_init_queue(master);
1424         if (ret) {
1425                 dev_err(&master->dev, "problem initializing queue\n");
1426                 goto err_init_queue;
1427         }
1428         master->queued = true;
1429         ret = spi_start_queue(master);
1430         if (ret) {
1431                 dev_err(&master->dev, "problem starting queue\n");
1432                 goto err_start_queue;
1433         }
1434
1435         return 0;
1436
1437 err_start_queue:
1438         spi_destroy_queue(master);
1439 err_init_queue:
1440         return ret;
1441 }
1442
1443 /*-------------------------------------------------------------------------*/
1444
1445 #if defined(CONFIG_OF)
1446 static struct spi_device *
1447 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1448 {
1449         struct spi_device *spi;
1450         int rc;
1451         u32 value;
1452
1453         /* Alloc an spi_device */
1454         spi = spi_alloc_device(master);
1455         if (!spi) {
1456                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1457                         nc->full_name);
1458                 rc = -ENOMEM;
1459                 goto err_out;
1460         }
1461
1462         /* Select device driver */
1463         rc = of_modalias_node(nc, spi->modalias,
1464                                 sizeof(spi->modalias));
1465         if (rc < 0) {
1466                 dev_err(&master->dev, "cannot find modalias for %s\n",
1467                         nc->full_name);
1468                 goto err_out;
1469         }
1470
1471         /* Device address */
1472         rc = of_property_read_u32(nc, "reg", &value);
1473         if (rc) {
1474                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1475                         nc->full_name, rc);
1476                 goto err_out;
1477         }
1478         spi->chip_select = value;
1479
1480         /* Mode (clock phase/polarity/etc.) */
1481         if (of_find_property(nc, "spi-cpha", NULL))
1482                 spi->mode |= SPI_CPHA;
1483         if (of_find_property(nc, "spi-cpol", NULL))
1484                 spi->mode |= SPI_CPOL;
1485         if (of_find_property(nc, "spi-cs-high", NULL))
1486                 spi->mode |= SPI_CS_HIGH;
1487         if (of_find_property(nc, "spi-3wire", NULL))
1488                 spi->mode |= SPI_3WIRE;
1489         if (of_find_property(nc, "spi-lsb-first", NULL))
1490                 spi->mode |= SPI_LSB_FIRST;
1491
1492         /* Device DUAL/QUAD mode */
1493         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1494                 switch (value) {
1495                 case 1:
1496                         break;
1497                 case 2:
1498                         spi->mode |= SPI_TX_DUAL;
1499                         break;
1500                 case 4:
1501                         spi->mode |= SPI_TX_QUAD;
1502                         break;
1503                 default:
1504                         dev_warn(&master->dev,
1505                                 "spi-tx-bus-width %d not supported\n",
1506                                 value);
1507                         break;
1508                 }
1509         }
1510
1511         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1512                 switch (value) {
1513                 case 1:
1514                         break;
1515                 case 2:
1516                         spi->mode |= SPI_RX_DUAL;
1517                         break;
1518                 case 4:
1519                         spi->mode |= SPI_RX_QUAD;
1520                         break;
1521                 default:
1522                         dev_warn(&master->dev,
1523                                 "spi-rx-bus-width %d not supported\n",
1524                                 value);
1525                         break;
1526                 }
1527         }
1528
1529         /* Device speed */
1530         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1531         if (rc) {
1532                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1533                         nc->full_name, rc);
1534                 goto err_out;
1535         }
1536         spi->max_speed_hz = value;
1537
1538         /* Store a pointer to the node in the device structure */
1539         of_node_get(nc);
1540         spi->dev.of_node = nc;
1541
1542         /* Register the new device */
1543         rc = spi_add_device(spi);
1544         if (rc) {
1545                 dev_err(&master->dev, "spi_device register error %s\n",
1546                         nc->full_name);
1547                 goto err_out;
1548         }
1549
1550         return spi;
1551
1552 err_out:
1553         spi_dev_put(spi);
1554         return ERR_PTR(rc);
1555 }
1556
1557 /**
1558  * of_register_spi_devices() - Register child devices onto the SPI bus
1559  * @master:     Pointer to spi_master device
1560  *
1561  * Registers an spi_device for each child node of master node which has a 'reg'
1562  * property.
1563  */
1564 static void of_register_spi_devices(struct spi_master *master)
1565 {
1566         struct spi_device *spi;
1567         struct device_node *nc;
1568
1569         if (!master->dev.of_node)
1570                 return;
1571
1572         for_each_available_child_of_node(master->dev.of_node, nc) {
1573                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1574                         continue;
1575                 spi = of_register_spi_device(master, nc);
1576                 if (IS_ERR(spi))
1577                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1578                                 nc->full_name);
1579         }
1580 }
1581 #else
1582 static void of_register_spi_devices(struct spi_master *master) { }
1583 #endif
1584
1585 #ifdef CONFIG_ACPI
1586 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1587 {
1588         struct spi_device *spi = data;
1589
1590         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1591                 struct acpi_resource_spi_serialbus *sb;
1592
1593                 sb = &ares->data.spi_serial_bus;
1594                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1595                         spi->chip_select = sb->device_selection;
1596                         spi->max_speed_hz = sb->connection_speed;
1597
1598                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1599                                 spi->mode |= SPI_CPHA;
1600                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1601                                 spi->mode |= SPI_CPOL;
1602                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1603                                 spi->mode |= SPI_CS_HIGH;
1604                 }
1605         } else if (spi->irq < 0) {
1606                 struct resource r;
1607
1608                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1609                         spi->irq = r.start;
1610         }
1611
1612         /* Always tell the ACPI core to skip this resource */
1613         return 1;
1614 }
1615
1616 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1617                                        void *data, void **return_value)
1618 {
1619         struct spi_master *master = data;
1620         struct list_head resource_list;
1621         struct acpi_device *adev;
1622         struct spi_device *spi;
1623         int ret;
1624
1625         if (acpi_bus_get_device(handle, &adev))
1626                 return AE_OK;
1627         if (acpi_bus_get_status(adev) || !adev->status.present)
1628                 return AE_OK;
1629
1630         spi = spi_alloc_device(master);
1631         if (!spi) {
1632                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1633                         dev_name(&adev->dev));
1634                 return AE_NO_MEMORY;
1635         }
1636
1637         ACPI_COMPANION_SET(&spi->dev, adev);
1638         spi->irq = -1;
1639
1640         INIT_LIST_HEAD(&resource_list);
1641         ret = acpi_dev_get_resources(adev, &resource_list,
1642                                      acpi_spi_add_resource, spi);
1643         acpi_dev_free_resource_list(&resource_list);
1644
1645         if (ret < 0 || !spi->max_speed_hz) {
1646                 spi_dev_put(spi);
1647                 return AE_OK;
1648         }
1649
1650         if (spi->irq < 0)
1651                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1652
1653         adev->power.flags.ignore_parent = true;
1654         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1655         if (spi_add_device(spi)) {
1656                 adev->power.flags.ignore_parent = false;
1657                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1658                         dev_name(&adev->dev));
1659                 spi_dev_put(spi);
1660         }
1661
1662         return AE_OK;
1663 }
1664
1665 static void acpi_register_spi_devices(struct spi_master *master)
1666 {
1667         acpi_status status;
1668         acpi_handle handle;
1669
1670         handle = ACPI_HANDLE(master->dev.parent);
1671         if (!handle)
1672                 return;
1673
1674         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1675                                      acpi_spi_add_device, NULL,
1676                                      master, NULL);
1677         if (ACPI_FAILURE(status))
1678                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1679 }
1680 #else
1681 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1682 #endif /* CONFIG_ACPI */
1683
1684 static void spi_master_release(struct device *dev)
1685 {
1686         struct spi_master *master;
1687
1688         master = container_of(dev, struct spi_master, dev);
1689         kfree(master);
1690 }
1691
1692 static struct class spi_master_class = {
1693         .name           = "spi_master",
1694         .owner          = THIS_MODULE,
1695         .dev_release    = spi_master_release,
1696         .dev_groups     = spi_master_groups,
1697 };
1698
1699
1700 /**
1701  * spi_alloc_master - allocate SPI master controller
1702  * @dev: the controller, possibly using the platform_bus
1703  * @size: how much zeroed driver-private data to allocate; the pointer to this
1704  *      memory is in the driver_data field of the returned device,
1705  *      accessible with spi_master_get_devdata().
1706  * Context: can sleep
1707  *
1708  * This call is used only by SPI master controller drivers, which are the
1709  * only ones directly touching chip registers.  It's how they allocate
1710  * an spi_master structure, prior to calling spi_register_master().
1711  *
1712  * This must be called from context that can sleep.
1713  *
1714  * The caller is responsible for assigning the bus number and initializing
1715  * the master's methods before calling spi_register_master(); and (after errors
1716  * adding the device) calling spi_master_put() to prevent a memory leak.
1717  *
1718  * Return: the SPI master structure on success, else NULL.
1719  */
1720 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1721 {
1722         struct spi_master       *master;
1723
1724         if (!dev)
1725                 return NULL;
1726
1727         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1728         if (!master)
1729                 return NULL;
1730
1731         device_initialize(&master->dev);
1732         master->bus_num = -1;
1733         master->num_chipselect = 1;
1734         master->dev.class = &spi_master_class;
1735         master->dev.parent = dev;
1736         spi_master_set_devdata(master, &master[1]);
1737
1738         return master;
1739 }
1740 EXPORT_SYMBOL_GPL(spi_alloc_master);
1741
1742 #ifdef CONFIG_OF
1743 static int of_spi_register_master(struct spi_master *master)
1744 {
1745         int nb, i, *cs;
1746         struct device_node *np = master->dev.of_node;
1747
1748         if (!np)
1749                 return 0;
1750
1751         nb = of_gpio_named_count(np, "cs-gpios");
1752         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1753
1754         /* Return error only for an incorrectly formed cs-gpios property */
1755         if (nb == 0 || nb == -ENOENT)
1756                 return 0;
1757         else if (nb < 0)
1758                 return nb;
1759
1760         cs = devm_kzalloc(&master->dev,
1761                           sizeof(int) * master->num_chipselect,
1762                           GFP_KERNEL);
1763         master->cs_gpios = cs;
1764
1765         if (!master->cs_gpios)
1766                 return -ENOMEM;
1767
1768         for (i = 0; i < master->num_chipselect; i++)
1769                 cs[i] = -ENOENT;
1770
1771         for (i = 0; i < nb; i++)
1772                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1773
1774         return 0;
1775 }
1776 #else
1777 static int of_spi_register_master(struct spi_master *master)
1778 {
1779         return 0;
1780 }
1781 #endif
1782
1783 /**
1784  * spi_register_master - register SPI master controller
1785  * @master: initialized master, originally from spi_alloc_master()
1786  * Context: can sleep
1787  *
1788  * SPI master controllers connect to their drivers using some non-SPI bus,
1789  * such as the platform bus.  The final stage of probe() in that code
1790  * includes calling spi_register_master() to hook up to this SPI bus glue.
1791  *
1792  * SPI controllers use board specific (often SOC specific) bus numbers,
1793  * and board-specific addressing for SPI devices combines those numbers
1794  * with chip select numbers.  Since SPI does not directly support dynamic
1795  * device identification, boards need configuration tables telling which
1796  * chip is at which address.
1797  *
1798  * This must be called from context that can sleep.  It returns zero on
1799  * success, else a negative error code (dropping the master's refcount).
1800  * After a successful return, the caller is responsible for calling
1801  * spi_unregister_master().
1802  *
1803  * Return: zero on success, else a negative error code.
1804  */
1805 int spi_register_master(struct spi_master *master)
1806 {
1807         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1808         struct device           *dev = master->dev.parent;
1809         struct boardinfo        *bi;
1810         int                     status = -ENODEV;
1811         int                     dynamic = 0;
1812
1813         if (!dev)
1814                 return -ENODEV;
1815
1816         status = of_spi_register_master(master);
1817         if (status)
1818                 return status;
1819
1820         /* even if it's just one always-selected device, there must
1821          * be at least one chipselect
1822          */
1823         if (master->num_chipselect == 0)
1824                 return -EINVAL;
1825
1826         if ((master->bus_num < 0) && master->dev.of_node)
1827                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1828
1829         /* convention:  dynamically assigned bus IDs count down from the max */
1830         if (master->bus_num < 0) {
1831                 /* FIXME switch to an IDR based scheme, something like
1832                  * I2C now uses, so we can't run out of "dynamic" IDs
1833                  */
1834                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1835                 dynamic = 1;
1836         }
1837
1838         INIT_LIST_HEAD(&master->queue);
1839         spin_lock_init(&master->queue_lock);
1840         spin_lock_init(&master->bus_lock_spinlock);
1841         mutex_init(&master->bus_lock_mutex);
1842         master->bus_lock_flag = 0;
1843         init_completion(&master->xfer_completion);
1844         if (!master->max_dma_len)
1845                 master->max_dma_len = INT_MAX;
1846
1847         /* register the device, then userspace will see it.
1848          * registration fails if the bus ID is in use.
1849          */
1850         dev_set_name(&master->dev, "spi%u", master->bus_num);
1851         status = device_add(&master->dev);
1852         if (status < 0)
1853                 goto done;
1854         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1855                         dynamic ? " (dynamic)" : "");
1856
1857         /* If we're using a queued driver, start the queue */
1858         if (master->transfer)
1859                 dev_info(dev, "master is unqueued, this is deprecated\n");
1860         else {
1861                 status = spi_master_initialize_queue(master);
1862                 if (status) {
1863                         device_del(&master->dev);
1864                         goto done;
1865                 }
1866         }
1867         /* add statistics */
1868         spin_lock_init(&master->statistics.lock);
1869
1870         mutex_lock(&board_lock);
1871         list_add_tail(&master->list, &spi_master_list);
1872         list_for_each_entry(bi, &board_list, list)
1873                 spi_match_master_to_boardinfo(master, &bi->board_info);
1874         mutex_unlock(&board_lock);
1875
1876         /* Register devices from the device tree and ACPI */
1877         of_register_spi_devices(master);
1878         acpi_register_spi_devices(master);
1879 done:
1880         return status;
1881 }
1882 EXPORT_SYMBOL_GPL(spi_register_master);
1883
1884 static void devm_spi_unregister(struct device *dev, void *res)
1885 {
1886         spi_unregister_master(*(struct spi_master **)res);
1887 }
1888
1889 /**
1890  * dev_spi_register_master - register managed SPI master controller
1891  * @dev:    device managing SPI master
1892  * @master: initialized master, originally from spi_alloc_master()
1893  * Context: can sleep
1894  *
1895  * Register a SPI device as with spi_register_master() which will
1896  * automatically be unregister
1897  *
1898  * Return: zero on success, else a negative error code.
1899  */
1900 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1901 {
1902         struct spi_master **ptr;
1903         int ret;
1904
1905         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1906         if (!ptr)
1907                 return -ENOMEM;
1908
1909         ret = spi_register_master(master);
1910         if (!ret) {
1911                 *ptr = master;
1912                 devres_add(dev, ptr);
1913         } else {
1914                 devres_free(ptr);
1915         }
1916
1917         return ret;
1918 }
1919 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1920
1921 static int __unregister(struct device *dev, void *null)
1922 {
1923         spi_unregister_device(to_spi_device(dev));
1924         return 0;
1925 }
1926
1927 /**
1928  * spi_unregister_master - unregister SPI master controller
1929  * @master: the master being unregistered
1930  * Context: can sleep
1931  *
1932  * This call is used only by SPI master controller drivers, which are the
1933  * only ones directly touching chip registers.
1934  *
1935  * This must be called from context that can sleep.
1936  */
1937 void spi_unregister_master(struct spi_master *master)
1938 {
1939         int dummy;
1940
1941         if (master->queued) {
1942                 if (spi_destroy_queue(master))
1943                         dev_err(&master->dev, "queue remove failed\n");
1944         }
1945
1946         mutex_lock(&board_lock);
1947         list_del(&master->list);
1948         mutex_unlock(&board_lock);
1949
1950         dummy = device_for_each_child(&master->dev, NULL, __unregister);
1951         device_unregister(&master->dev);
1952 }
1953 EXPORT_SYMBOL_GPL(spi_unregister_master);
1954
1955 int spi_master_suspend(struct spi_master *master)
1956 {
1957         int ret;
1958
1959         /* Basically no-ops for non-queued masters */
1960         if (!master->queued)
1961                 return 0;
1962
1963         ret = spi_stop_queue(master);
1964         if (ret)
1965                 dev_err(&master->dev, "queue stop failed\n");
1966
1967         return ret;
1968 }
1969 EXPORT_SYMBOL_GPL(spi_master_suspend);
1970
1971 int spi_master_resume(struct spi_master *master)
1972 {
1973         int ret;
1974
1975         if (!master->queued)
1976                 return 0;
1977
1978         ret = spi_start_queue(master);
1979         if (ret)
1980                 dev_err(&master->dev, "queue restart failed\n");
1981
1982         return ret;
1983 }
1984 EXPORT_SYMBOL_GPL(spi_master_resume);
1985
1986 static int __spi_master_match(struct device *dev, const void *data)
1987 {
1988         struct spi_master *m;
1989         const u16 *bus_num = data;
1990
1991         m = container_of(dev, struct spi_master, dev);
1992         return m->bus_num == *bus_num;
1993 }
1994
1995 /**
1996  * spi_busnum_to_master - look up master associated with bus_num
1997  * @bus_num: the master's bus number
1998  * Context: can sleep
1999  *
2000  * This call may be used with devices that are registered after
2001  * arch init time.  It returns a refcounted pointer to the relevant
2002  * spi_master (which the caller must release), or NULL if there is
2003  * no such master registered.
2004  *
2005  * Return: the SPI master structure on success, else NULL.
2006  */
2007 struct spi_master *spi_busnum_to_master(u16 bus_num)
2008 {
2009         struct device           *dev;
2010         struct spi_master       *master = NULL;
2011
2012         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2013                                 __spi_master_match);
2014         if (dev)
2015                 master = container_of(dev, struct spi_master, dev);
2016         /* reference got in class_find_device */
2017         return master;
2018 }
2019 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2020
2021
2022 /*-------------------------------------------------------------------------*/
2023
2024 /* Core methods for SPI master protocol drivers.  Some of the
2025  * other core methods are currently defined as inline functions.
2026  */
2027
2028 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2029 {
2030         if (master->bits_per_word_mask) {
2031                 /* Only 32 bits fit in the mask */
2032                 if (bits_per_word > 32)
2033                         return -EINVAL;
2034                 if (!(master->bits_per_word_mask &
2035                                 SPI_BPW_MASK(bits_per_word)))
2036                         return -EINVAL;
2037         }
2038
2039         return 0;
2040 }
2041
2042 /**
2043  * spi_setup - setup SPI mode and clock rate
2044  * @spi: the device whose settings are being modified
2045  * Context: can sleep, and no requests are queued to the device
2046  *
2047  * SPI protocol drivers may need to update the transfer mode if the
2048  * device doesn't work with its default.  They may likewise need
2049  * to update clock rates or word sizes from initial values.  This function
2050  * changes those settings, and must be called from a context that can sleep.
2051  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2052  * effect the next time the device is selected and data is transferred to
2053  * or from it.  When this function returns, the spi device is deselected.
2054  *
2055  * Note that this call will fail if the protocol driver specifies an option
2056  * that the underlying controller or its driver does not support.  For
2057  * example, not all hardware supports wire transfers using nine bit words,
2058  * LSB-first wire encoding, or active-high chipselects.
2059  *
2060  * Return: zero on success, else a negative error code.
2061  */
2062 int spi_setup(struct spi_device *spi)
2063 {
2064         unsigned        bad_bits, ugly_bits;
2065         int             status;
2066
2067         /* check mode to prevent that DUAL and QUAD set at the same time
2068          */
2069         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2070                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2071                 dev_err(&spi->dev,
2072                 "setup: can not select dual and quad at the same time\n");
2073                 return -EINVAL;
2074         }
2075         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2076          */
2077         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2078                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2079                 return -EINVAL;
2080         /* help drivers fail *cleanly* when they need options
2081          * that aren't supported with their current master
2082          */
2083         bad_bits = spi->mode & ~spi->master->mode_bits;
2084         ugly_bits = bad_bits &
2085                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2086         if (ugly_bits) {
2087                 dev_warn(&spi->dev,
2088                          "setup: ignoring unsupported mode bits %x\n",
2089                          ugly_bits);
2090                 spi->mode &= ~ugly_bits;
2091                 bad_bits &= ~ugly_bits;
2092         }
2093         if (bad_bits) {
2094                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2095                         bad_bits);
2096                 return -EINVAL;
2097         }
2098
2099         if (!spi->bits_per_word)
2100                 spi->bits_per_word = 8;
2101
2102         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2103         if (status)
2104                 return status;
2105
2106         if (!spi->max_speed_hz)
2107                 spi->max_speed_hz = spi->master->max_speed_hz;
2108
2109         if (spi->master->setup)
2110                 status = spi->master->setup(spi);
2111
2112         spi_set_cs(spi, false);
2113
2114         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2115                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2116                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2117                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2118                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2119                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2120                         spi->bits_per_word, spi->max_speed_hz,
2121                         status);
2122
2123         return status;
2124 }
2125 EXPORT_SYMBOL_GPL(spi_setup);
2126
2127 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2128 {
2129         struct spi_master *master = spi->master;
2130         struct spi_transfer *xfer;
2131         int w_size;
2132
2133         if (list_empty(&message->transfers))
2134                 return -EINVAL;
2135
2136         /* Half-duplex links include original MicroWire, and ones with
2137          * only one data pin like SPI_3WIRE (switches direction) or where
2138          * either MOSI or MISO is missing.  They can also be caused by
2139          * software limitations.
2140          */
2141         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2142                         || (spi->mode & SPI_3WIRE)) {
2143                 unsigned flags = master->flags;
2144
2145                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2146                         if (xfer->rx_buf && xfer->tx_buf)
2147                                 return -EINVAL;
2148                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2149                                 return -EINVAL;
2150                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2151                                 return -EINVAL;
2152                 }
2153         }
2154
2155         /**
2156          * Set transfer bits_per_word and max speed as spi device default if
2157          * it is not set for this transfer.
2158          * Set transfer tx_nbits and rx_nbits as single transfer default
2159          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2160          */
2161         message->frame_length = 0;
2162         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2163                 message->frame_length += xfer->len;
2164                 if (!xfer->bits_per_word)
2165                         xfer->bits_per_word = spi->bits_per_word;
2166
2167                 if (!xfer->speed_hz)
2168                         xfer->speed_hz = spi->max_speed_hz;
2169                 if (!xfer->speed_hz)
2170                         xfer->speed_hz = master->max_speed_hz;
2171
2172                 if (master->max_speed_hz &&
2173                     xfer->speed_hz > master->max_speed_hz)
2174                         xfer->speed_hz = master->max_speed_hz;
2175
2176                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2177                         return -EINVAL;
2178
2179                 /*
2180                  * SPI transfer length should be multiple of SPI word size
2181                  * where SPI word size should be power-of-two multiple
2182                  */
2183                 if (xfer->bits_per_word <= 8)
2184                         w_size = 1;
2185                 else if (xfer->bits_per_word <= 16)
2186                         w_size = 2;
2187                 else
2188                         w_size = 4;
2189
2190                 /* No partial transfers accepted */
2191                 if (xfer->len % w_size)
2192                         return -EINVAL;
2193
2194                 if (xfer->speed_hz && master->min_speed_hz &&
2195                     xfer->speed_hz < master->min_speed_hz)
2196                         return -EINVAL;
2197
2198                 if (xfer->tx_buf && !xfer->tx_nbits)
2199                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2200                 if (xfer->rx_buf && !xfer->rx_nbits)
2201                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2202                 /* check transfer tx/rx_nbits:
2203                  * 1. check the value matches one of single, dual and quad
2204                  * 2. check tx/rx_nbits match the mode in spi_device
2205                  */
2206                 if (xfer->tx_buf) {
2207                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2208                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2209                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2210                                 return -EINVAL;
2211                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2212                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2213                                 return -EINVAL;
2214                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2215                                 !(spi->mode & SPI_TX_QUAD))
2216                                 return -EINVAL;
2217                 }
2218                 /* check transfer rx_nbits */
2219                 if (xfer->rx_buf) {
2220                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2221                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2222                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2223                                 return -EINVAL;
2224                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2225                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2226                                 return -EINVAL;
2227                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2228                                 !(spi->mode & SPI_RX_QUAD))
2229                                 return -EINVAL;
2230                 }
2231         }
2232
2233         message->status = -EINPROGRESS;
2234
2235         return 0;
2236 }
2237
2238 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2239 {
2240         struct spi_master *master = spi->master;
2241
2242         message->spi = spi;
2243
2244         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2245         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2246
2247         trace_spi_message_submit(message);
2248
2249         return master->transfer(spi, message);
2250 }
2251
2252 /**
2253  * spi_async - asynchronous SPI transfer
2254  * @spi: device with which data will be exchanged
2255  * @message: describes the data transfers, including completion callback
2256  * Context: any (irqs may be blocked, etc)
2257  *
2258  * This call may be used in_irq and other contexts which can't sleep,
2259  * as well as from task contexts which can sleep.
2260  *
2261  * The completion callback is invoked in a context which can't sleep.
2262  * Before that invocation, the value of message->status is undefined.
2263  * When the callback is issued, message->status holds either zero (to
2264  * indicate complete success) or a negative error code.  After that
2265  * callback returns, the driver which issued the transfer request may
2266  * deallocate the associated memory; it's no longer in use by any SPI
2267  * core or controller driver code.
2268  *
2269  * Note that although all messages to a spi_device are handled in
2270  * FIFO order, messages may go to different devices in other orders.
2271  * Some device might be higher priority, or have various "hard" access
2272  * time requirements, for example.
2273  *
2274  * On detection of any fault during the transfer, processing of
2275  * the entire message is aborted, and the device is deselected.
2276  * Until returning from the associated message completion callback,
2277  * no other spi_message queued to that device will be processed.
2278  * (This rule applies equally to all the synchronous transfer calls,
2279  * which are wrappers around this core asynchronous primitive.)
2280  *
2281  * Return: zero on success, else a negative error code.
2282  */
2283 int spi_async(struct spi_device *spi, struct spi_message *message)
2284 {
2285         struct spi_master *master = spi->master;
2286         int ret;
2287         unsigned long flags;
2288
2289         ret = __spi_validate(spi, message);
2290         if (ret != 0)
2291                 return ret;
2292
2293         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2294
2295         if (master->bus_lock_flag)
2296                 ret = -EBUSY;
2297         else
2298                 ret = __spi_async(spi, message);
2299
2300         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2301
2302         return ret;
2303 }
2304 EXPORT_SYMBOL_GPL(spi_async);
2305
2306 /**
2307  * spi_async_locked - version of spi_async with exclusive bus usage
2308  * @spi: device with which data will be exchanged
2309  * @message: describes the data transfers, including completion callback
2310  * Context: any (irqs may be blocked, etc)
2311  *
2312  * This call may be used in_irq and other contexts which can't sleep,
2313  * as well as from task contexts which can sleep.
2314  *
2315  * The completion callback is invoked in a context which can't sleep.
2316  * Before that invocation, the value of message->status is undefined.
2317  * When the callback is issued, message->status holds either zero (to
2318  * indicate complete success) or a negative error code.  After that
2319  * callback returns, the driver which issued the transfer request may
2320  * deallocate the associated memory; it's no longer in use by any SPI
2321  * core or controller driver code.
2322  *
2323  * Note that although all messages to a spi_device are handled in
2324  * FIFO order, messages may go to different devices in other orders.
2325  * Some device might be higher priority, or have various "hard" access
2326  * time requirements, for example.
2327  *
2328  * On detection of any fault during the transfer, processing of
2329  * the entire message is aborted, and the device is deselected.
2330  * Until returning from the associated message completion callback,
2331  * no other spi_message queued to that device will be processed.
2332  * (This rule applies equally to all the synchronous transfer calls,
2333  * which are wrappers around this core asynchronous primitive.)
2334  *
2335  * Return: zero on success, else a negative error code.
2336  */
2337 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2338 {
2339         struct spi_master *master = spi->master;
2340         int ret;
2341         unsigned long flags;
2342
2343         ret = __spi_validate(spi, message);
2344         if (ret != 0)
2345                 return ret;
2346
2347         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2348
2349         ret = __spi_async(spi, message);
2350
2351         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2352
2353         return ret;
2354
2355 }
2356 EXPORT_SYMBOL_GPL(spi_async_locked);
2357
2358
2359 int spi_flash_read(struct spi_device *spi,
2360                    struct spi_flash_read_message *msg)
2361
2362 {
2363         struct spi_master *master = spi->master;
2364         int ret;
2365
2366         if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2367              msg->addr_nbits == SPI_NBITS_DUAL) &&
2368             !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2369                 return -EINVAL;
2370         if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2371              msg->addr_nbits == SPI_NBITS_QUAD) &&
2372             !(spi->mode & SPI_TX_QUAD))
2373                 return -EINVAL;
2374         if (msg->data_nbits == SPI_NBITS_DUAL &&
2375             !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2376                 return -EINVAL;
2377         if (msg->data_nbits == SPI_NBITS_QUAD &&
2378             !(spi->mode &  SPI_RX_QUAD))
2379                 return -EINVAL;
2380
2381         if (master->auto_runtime_pm) {
2382                 ret = pm_runtime_get_sync(master->dev.parent);
2383                 if (ret < 0) {
2384                         dev_err(&master->dev, "Failed to power device: %d\n",
2385                                 ret);
2386                         return ret;
2387                 }
2388         }
2389         mutex_lock(&master->bus_lock_mutex);
2390         ret = master->spi_flash_read(spi, msg);
2391         mutex_unlock(&master->bus_lock_mutex);
2392         if (master->auto_runtime_pm)
2393                 pm_runtime_put(master->dev.parent);
2394
2395         return ret;
2396 }
2397 EXPORT_SYMBOL_GPL(spi_flash_read);
2398
2399 /*-------------------------------------------------------------------------*/
2400
2401 /* Utility methods for SPI master protocol drivers, layered on
2402  * top of the core.  Some other utility methods are defined as
2403  * inline functions.
2404  */
2405
2406 static void spi_complete(void *arg)
2407 {
2408         complete(arg);
2409 }
2410
2411 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2412                       int bus_locked)
2413 {
2414         DECLARE_COMPLETION_ONSTACK(done);
2415         int status;
2416         struct spi_master *master = spi->master;
2417         unsigned long flags;
2418
2419         status = __spi_validate(spi, message);
2420         if (status != 0)
2421                 return status;
2422
2423         message->complete = spi_complete;
2424         message->context = &done;
2425         message->spi = spi;
2426
2427         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2428         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2429
2430         if (!bus_locked)
2431                 mutex_lock(&master->bus_lock_mutex);
2432
2433         /* If we're not using the legacy transfer method then we will
2434          * try to transfer in the calling context so special case.
2435          * This code would be less tricky if we could remove the
2436          * support for driver implemented message queues.
2437          */
2438         if (master->transfer == spi_queued_transfer) {
2439                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2440
2441                 trace_spi_message_submit(message);
2442
2443                 status = __spi_queued_transfer(spi, message, false);
2444
2445                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2446         } else {
2447                 status = spi_async_locked(spi, message);
2448         }
2449
2450         if (!bus_locked)
2451                 mutex_unlock(&master->bus_lock_mutex);
2452
2453         if (status == 0) {
2454                 /* Push out the messages in the calling context if we
2455                  * can.
2456                  */
2457                 if (master->transfer == spi_queued_transfer) {
2458                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2459                                                        spi_sync_immediate);
2460                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2461                                                        spi_sync_immediate);
2462                         __spi_pump_messages(master, false);
2463                 }
2464
2465                 wait_for_completion(&done);
2466                 status = message->status;
2467         }
2468         message->context = NULL;
2469         return status;
2470 }
2471
2472 /**
2473  * spi_sync - blocking/synchronous SPI data transfers
2474  * @spi: device with which data will be exchanged
2475  * @message: describes the data transfers
2476  * Context: can sleep
2477  *
2478  * This call may only be used from a context that may sleep.  The sleep
2479  * is non-interruptible, and has no timeout.  Low-overhead controller
2480  * drivers may DMA directly into and out of the message buffers.
2481  *
2482  * Note that the SPI device's chip select is active during the message,
2483  * and then is normally disabled between messages.  Drivers for some
2484  * frequently-used devices may want to minimize costs of selecting a chip,
2485  * by leaving it selected in anticipation that the next message will go
2486  * to the same chip.  (That may increase power usage.)
2487  *
2488  * Also, the caller is guaranteeing that the memory associated with the
2489  * message will not be freed before this call returns.
2490  *
2491  * Return: zero on success, else a negative error code.
2492  */
2493 int spi_sync(struct spi_device *spi, struct spi_message *message)
2494 {
2495         return __spi_sync(spi, message, 0);
2496 }
2497 EXPORT_SYMBOL_GPL(spi_sync);
2498
2499 /**
2500  * spi_sync_locked - version of spi_sync with exclusive bus usage
2501  * @spi: device with which data will be exchanged
2502  * @message: describes the data transfers
2503  * Context: can sleep
2504  *
2505  * This call may only be used from a context that may sleep.  The sleep
2506  * is non-interruptible, and has no timeout.  Low-overhead controller
2507  * drivers may DMA directly into and out of the message buffers.
2508  *
2509  * This call should be used by drivers that require exclusive access to the
2510  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2511  * be released by a spi_bus_unlock call when the exclusive access is over.
2512  *
2513  * Return: zero on success, else a negative error code.
2514  */
2515 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2516 {
2517         return __spi_sync(spi, message, 1);
2518 }
2519 EXPORT_SYMBOL_GPL(spi_sync_locked);
2520
2521 /**
2522  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2523  * @master: SPI bus master that should be locked for exclusive bus access
2524  * Context: can sleep
2525  *
2526  * This call may only be used from a context that may sleep.  The sleep
2527  * is non-interruptible, and has no timeout.
2528  *
2529  * This call should be used by drivers that require exclusive access to the
2530  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2531  * exclusive access is over. Data transfer must be done by spi_sync_locked
2532  * and spi_async_locked calls when the SPI bus lock is held.
2533  *
2534  * Return: always zero.
2535  */
2536 int spi_bus_lock(struct spi_master *master)
2537 {
2538         unsigned long flags;
2539
2540         mutex_lock(&master->bus_lock_mutex);
2541
2542         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2543         master->bus_lock_flag = 1;
2544         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2545
2546         /* mutex remains locked until spi_bus_unlock is called */
2547
2548         return 0;
2549 }
2550 EXPORT_SYMBOL_GPL(spi_bus_lock);
2551
2552 /**
2553  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2554  * @master: SPI bus master that was locked for exclusive bus access
2555  * Context: can sleep
2556  *
2557  * This call may only be used from a context that may sleep.  The sleep
2558  * is non-interruptible, and has no timeout.
2559  *
2560  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2561  * call.
2562  *
2563  * Return: always zero.
2564  */
2565 int spi_bus_unlock(struct spi_master *master)
2566 {
2567         master->bus_lock_flag = 0;
2568
2569         mutex_unlock(&master->bus_lock_mutex);
2570
2571         return 0;
2572 }
2573 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2574
2575 /* portable code must never pass more than 32 bytes */
2576 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2577
2578 static u8       *buf;
2579
2580 /**
2581  * spi_write_then_read - SPI synchronous write followed by read
2582  * @spi: device with which data will be exchanged
2583  * @txbuf: data to be written (need not be dma-safe)
2584  * @n_tx: size of txbuf, in bytes
2585  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2586  * @n_rx: size of rxbuf, in bytes
2587  * Context: can sleep
2588  *
2589  * This performs a half duplex MicroWire style transaction with the
2590  * device, sending txbuf and then reading rxbuf.  The return value
2591  * is zero for success, else a negative errno status code.
2592  * This call may only be used from a context that may sleep.
2593  *
2594  * Parameters to this routine are always copied using a small buffer;
2595  * portable code should never use this for more than 32 bytes.
2596  * Performance-sensitive or bulk transfer code should instead use
2597  * spi_{async,sync}() calls with dma-safe buffers.
2598  *
2599  * Return: zero on success, else a negative error code.
2600  */
2601 int spi_write_then_read(struct spi_device *spi,
2602                 const void *txbuf, unsigned n_tx,
2603                 void *rxbuf, unsigned n_rx)
2604 {
2605         static DEFINE_MUTEX(lock);
2606
2607         int                     status;
2608         struct spi_message      message;
2609         struct spi_transfer     x[2];
2610         u8                      *local_buf;
2611
2612         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2613          * copying here, (as a pure convenience thing), but we can
2614          * keep heap costs out of the hot path unless someone else is
2615          * using the pre-allocated buffer or the transfer is too large.
2616          */
2617         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2618                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2619                                     GFP_KERNEL | GFP_DMA);
2620                 if (!local_buf)
2621                         return -ENOMEM;
2622         } else {
2623                 local_buf = buf;
2624         }
2625
2626         spi_message_init(&message);
2627         memset(x, 0, sizeof(x));
2628         if (n_tx) {
2629                 x[0].len = n_tx;
2630                 spi_message_add_tail(&x[0], &message);
2631         }
2632         if (n_rx) {
2633                 x[1].len = n_rx;
2634                 spi_message_add_tail(&x[1], &message);
2635         }
2636
2637         memcpy(local_buf, txbuf, n_tx);
2638         x[0].tx_buf = local_buf;
2639         x[1].rx_buf = local_buf + n_tx;
2640
2641         /* do the i/o */
2642         status = spi_sync(spi, &message);
2643         if (status == 0)
2644                 memcpy(rxbuf, x[1].rx_buf, n_rx);
2645
2646         if (x[0].tx_buf == buf)
2647                 mutex_unlock(&lock);
2648         else
2649                 kfree(local_buf);
2650
2651         return status;
2652 }
2653 EXPORT_SYMBOL_GPL(spi_write_then_read);
2654
2655 /*-------------------------------------------------------------------------*/
2656
2657 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2658 static int __spi_of_device_match(struct device *dev, void *data)
2659 {
2660         return dev->of_node == data;
2661 }
2662
2663 /* must call put_device() when done with returned spi_device device */
2664 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2665 {
2666         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2667                                                 __spi_of_device_match);
2668         return dev ? to_spi_device(dev) : NULL;
2669 }
2670
2671 static int __spi_of_master_match(struct device *dev, const void *data)
2672 {
2673         return dev->of_node == data;
2674 }
2675
2676 /* the spi masters are not using spi_bus, so we find it with another way */
2677 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2678 {
2679         struct device *dev;
2680
2681         dev = class_find_device(&spi_master_class, NULL, node,
2682                                 __spi_of_master_match);
2683         if (!dev)
2684                 return NULL;
2685
2686         /* reference got in class_find_device */
2687         return container_of(dev, struct spi_master, dev);
2688 }
2689
2690 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2691                          void *arg)
2692 {
2693         struct of_reconfig_data *rd = arg;
2694         struct spi_master *master;
2695         struct spi_device *spi;
2696
2697         switch (of_reconfig_get_state_change(action, arg)) {
2698         case OF_RECONFIG_CHANGE_ADD:
2699                 master = of_find_spi_master_by_node(rd->dn->parent);
2700                 if (master == NULL)
2701                         return NOTIFY_OK;       /* not for us */
2702
2703                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
2704                         put_device(&master->dev);
2705                         return NOTIFY_OK;
2706                 }
2707
2708                 spi = of_register_spi_device(master, rd->dn);
2709                 put_device(&master->dev);
2710
2711                 if (IS_ERR(spi)) {
2712                         pr_err("%s: failed to create for '%s'\n",
2713                                         __func__, rd->dn->full_name);
2714                         return notifier_from_errno(PTR_ERR(spi));
2715                 }
2716                 break;
2717
2718         case OF_RECONFIG_CHANGE_REMOVE:
2719                 /* already depopulated? */
2720                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
2721                         return NOTIFY_OK;
2722
2723                 /* find our device by node */
2724                 spi = of_find_spi_device_by_node(rd->dn);
2725                 if (spi == NULL)
2726                         return NOTIFY_OK;       /* no? not meant for us */
2727
2728                 /* unregister takes one ref away */
2729                 spi_unregister_device(spi);
2730
2731                 /* and put the reference of the find */
2732                 put_device(&spi->dev);
2733                 break;
2734         }
2735
2736         return NOTIFY_OK;
2737 }
2738
2739 static struct notifier_block spi_of_notifier = {
2740         .notifier_call = of_spi_notify,
2741 };
2742 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2743 extern struct notifier_block spi_of_notifier;
2744 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2745
2746 static int __init spi_init(void)
2747 {
2748         int     status;
2749
2750         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2751         if (!buf) {
2752                 status = -ENOMEM;
2753                 goto err0;
2754         }
2755
2756         status = bus_register(&spi_bus_type);
2757         if (status < 0)
2758                 goto err1;
2759
2760         status = class_register(&spi_master_class);
2761         if (status < 0)
2762                 goto err2;
2763
2764         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2765                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2766
2767         return 0;
2768
2769 err2:
2770         bus_unregister(&spi_bus_type);
2771 err1:
2772         kfree(buf);
2773         buf = NULL;
2774 err0:
2775         return status;
2776 }
2777
2778 /* board_info is normally registered in arch_initcall(),
2779  * but even essential drivers wait till later
2780  *
2781  * REVISIT only boardinfo really needs static linking. the rest (device and
2782  * driver registration) _could_ be dynamically linked (modular) ... costs
2783  * include needing to have boardinfo data structures be much more public.
2784  */
2785 postcore_initcall(spi_init);
2786
This page took 0.195303 seconds and 4 git commands to generate.