]> Git Repo - linux.git/blob - drivers/nvmem/core.c
Merge tag 'for-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / nvmem / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <[email protected]>
6  * Copyright (C) 2013 Maxime Ripard <[email protected]>
7  */
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 struct nvmem_device {
23         struct module           *owner;
24         struct device           dev;
25         int                     stride;
26         int                     word_size;
27         int                     id;
28         struct kref             refcnt;
29         size_t                  size;
30         bool                    read_only;
31         bool                    root_only;
32         int                     flags;
33         enum nvmem_type         type;
34         struct bin_attribute    eeprom;
35         struct device           *base_dev;
36         struct list_head        cells;
37         const struct nvmem_keepout *keepout;
38         unsigned int            nkeepout;
39         nvmem_reg_read_t        reg_read;
40         nvmem_reg_write_t       reg_write;
41         nvmem_cell_post_process_t cell_post_process;
42         struct gpio_desc        *wp_gpio;
43         void *priv;
44 };
45
46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47
48 #define FLAG_COMPAT             BIT(0)
49 struct nvmem_cell_entry {
50         const char              *name;
51         int                     offset;
52         int                     bytes;
53         int                     bit_offset;
54         int                     nbits;
55         struct device_node      *np;
56         struct nvmem_device     *nvmem;
57         struct list_head        node;
58 };
59
60 struct nvmem_cell {
61         struct nvmem_cell_entry *entry;
62         const char              *id;
63 };
64
65 static DEFINE_MUTEX(nvmem_mutex);
66 static DEFINE_IDA(nvmem_ida);
67
68 static DEFINE_MUTEX(nvmem_cell_mutex);
69 static LIST_HEAD(nvmem_cell_tables);
70
71 static DEFINE_MUTEX(nvmem_lookup_mutex);
72 static LIST_HEAD(nvmem_lookup_list);
73
74 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
75
76 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
77                             void *val, size_t bytes)
78 {
79         if (nvmem->reg_read)
80                 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
81
82         return -EINVAL;
83 }
84
85 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
86                              void *val, size_t bytes)
87 {
88         int ret;
89
90         if (nvmem->reg_write) {
91                 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
92                 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
93                 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
94                 return ret;
95         }
96
97         return -EINVAL;
98 }
99
100 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
101                                       unsigned int offset, void *val,
102                                       size_t bytes, int write)
103 {
104
105         unsigned int end = offset + bytes;
106         unsigned int kend, ksize;
107         const struct nvmem_keepout *keepout = nvmem->keepout;
108         const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
109         int rc;
110
111         /*
112          * Skip all keepouts before the range being accessed.
113          * Keepouts are sorted.
114          */
115         while ((keepout < keepoutend) && (keepout->end <= offset))
116                 keepout++;
117
118         while ((offset < end) && (keepout < keepoutend)) {
119                 /* Access the valid portion before the keepout. */
120                 if (offset < keepout->start) {
121                         kend = min(end, keepout->start);
122                         ksize = kend - offset;
123                         if (write)
124                                 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
125                         else
126                                 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
127
128                         if (rc)
129                                 return rc;
130
131                         offset += ksize;
132                         val += ksize;
133                 }
134
135                 /*
136                  * Now we're aligned to the start of this keepout zone. Go
137                  * through it.
138                  */
139                 kend = min(end, keepout->end);
140                 ksize = kend - offset;
141                 if (!write)
142                         memset(val, keepout->value, ksize);
143
144                 val += ksize;
145                 offset += ksize;
146                 keepout++;
147         }
148
149         /*
150          * If we ran out of keepouts but there's still stuff to do, send it
151          * down directly
152          */
153         if (offset < end) {
154                 ksize = end - offset;
155                 if (write)
156                         return __nvmem_reg_write(nvmem, offset, val, ksize);
157                 else
158                         return __nvmem_reg_read(nvmem, offset, val, ksize);
159         }
160
161         return 0;
162 }
163
164 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
165                           void *val, size_t bytes)
166 {
167         if (!nvmem->nkeepout)
168                 return __nvmem_reg_read(nvmem, offset, val, bytes);
169
170         return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
171 }
172
173 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
174                            void *val, size_t bytes)
175 {
176         if (!nvmem->nkeepout)
177                 return __nvmem_reg_write(nvmem, offset, val, bytes);
178
179         return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
180 }
181
182 #ifdef CONFIG_NVMEM_SYSFS
183 static const char * const nvmem_type_str[] = {
184         [NVMEM_TYPE_UNKNOWN] = "Unknown",
185         [NVMEM_TYPE_EEPROM] = "EEPROM",
186         [NVMEM_TYPE_OTP] = "OTP",
187         [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
188         [NVMEM_TYPE_FRAM] = "FRAM",
189 };
190
191 #ifdef CONFIG_DEBUG_LOCK_ALLOC
192 static struct lock_class_key eeprom_lock_key;
193 #endif
194
195 static ssize_t type_show(struct device *dev,
196                          struct device_attribute *attr, char *buf)
197 {
198         struct nvmem_device *nvmem = to_nvmem_device(dev);
199
200         return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
201 }
202
203 static DEVICE_ATTR_RO(type);
204
205 static struct attribute *nvmem_attrs[] = {
206         &dev_attr_type.attr,
207         NULL,
208 };
209
210 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
211                                    struct bin_attribute *attr, char *buf,
212                                    loff_t pos, size_t count)
213 {
214         struct device *dev;
215         struct nvmem_device *nvmem;
216         int rc;
217
218         if (attr->private)
219                 dev = attr->private;
220         else
221                 dev = kobj_to_dev(kobj);
222         nvmem = to_nvmem_device(dev);
223
224         /* Stop the user from reading */
225         if (pos >= nvmem->size)
226                 return 0;
227
228         if (!IS_ALIGNED(pos, nvmem->stride))
229                 return -EINVAL;
230
231         if (count < nvmem->word_size)
232                 return -EINVAL;
233
234         if (pos + count > nvmem->size)
235                 count = nvmem->size - pos;
236
237         count = round_down(count, nvmem->word_size);
238
239         if (!nvmem->reg_read)
240                 return -EPERM;
241
242         rc = nvmem_reg_read(nvmem, pos, buf, count);
243
244         if (rc)
245                 return rc;
246
247         return count;
248 }
249
250 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
251                                     struct bin_attribute *attr, char *buf,
252                                     loff_t pos, size_t count)
253 {
254         struct device *dev;
255         struct nvmem_device *nvmem;
256         int rc;
257
258         if (attr->private)
259                 dev = attr->private;
260         else
261                 dev = kobj_to_dev(kobj);
262         nvmem = to_nvmem_device(dev);
263
264         /* Stop the user from writing */
265         if (pos >= nvmem->size)
266                 return -EFBIG;
267
268         if (!IS_ALIGNED(pos, nvmem->stride))
269                 return -EINVAL;
270
271         if (count < nvmem->word_size)
272                 return -EINVAL;
273
274         if (pos + count > nvmem->size)
275                 count = nvmem->size - pos;
276
277         count = round_down(count, nvmem->word_size);
278
279         if (!nvmem->reg_write)
280                 return -EPERM;
281
282         rc = nvmem_reg_write(nvmem, pos, buf, count);
283
284         if (rc)
285                 return rc;
286
287         return count;
288 }
289
290 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
291 {
292         umode_t mode = 0400;
293
294         if (!nvmem->root_only)
295                 mode |= 0044;
296
297         if (!nvmem->read_only)
298                 mode |= 0200;
299
300         if (!nvmem->reg_write)
301                 mode &= ~0200;
302
303         if (!nvmem->reg_read)
304                 mode &= ~0444;
305
306         return mode;
307 }
308
309 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
310                                          struct bin_attribute *attr, int i)
311 {
312         struct device *dev = kobj_to_dev(kobj);
313         struct nvmem_device *nvmem = to_nvmem_device(dev);
314
315         attr->size = nvmem->size;
316
317         return nvmem_bin_attr_get_umode(nvmem);
318 }
319
320 /* default read/write permissions */
321 static struct bin_attribute bin_attr_rw_nvmem = {
322         .attr   = {
323                 .name   = "nvmem",
324                 .mode   = 0644,
325         },
326         .read   = bin_attr_nvmem_read,
327         .write  = bin_attr_nvmem_write,
328 };
329
330 static struct bin_attribute *nvmem_bin_attributes[] = {
331         &bin_attr_rw_nvmem,
332         NULL,
333 };
334
335 static const struct attribute_group nvmem_bin_group = {
336         .bin_attrs      = nvmem_bin_attributes,
337         .attrs          = nvmem_attrs,
338         .is_bin_visible = nvmem_bin_attr_is_visible,
339 };
340
341 static const struct attribute_group *nvmem_dev_groups[] = {
342         &nvmem_bin_group,
343         NULL,
344 };
345
346 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
347         .attr   = {
348                 .name   = "eeprom",
349         },
350         .read   = bin_attr_nvmem_read,
351         .write  = bin_attr_nvmem_write,
352 };
353
354 /*
355  * nvmem_setup_compat() - Create an additional binary entry in
356  * drivers sys directory, to be backwards compatible with the older
357  * drivers/misc/eeprom drivers.
358  */
359 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
360                                     const struct nvmem_config *config)
361 {
362         int rval;
363
364         if (!config->compat)
365                 return 0;
366
367         if (!config->base_dev)
368                 return -EINVAL;
369
370         if (config->type == NVMEM_TYPE_FRAM)
371                 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
372
373         nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
374         nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
375         nvmem->eeprom.size = nvmem->size;
376 #ifdef CONFIG_DEBUG_LOCK_ALLOC
377         nvmem->eeprom.attr.key = &eeprom_lock_key;
378 #endif
379         nvmem->eeprom.private = &nvmem->dev;
380         nvmem->base_dev = config->base_dev;
381
382         rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
383         if (rval) {
384                 dev_err(&nvmem->dev,
385                         "Failed to create eeprom binary file %d\n", rval);
386                 return rval;
387         }
388
389         nvmem->flags |= FLAG_COMPAT;
390
391         return 0;
392 }
393
394 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
395                               const struct nvmem_config *config)
396 {
397         if (config->compat)
398                 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
399 }
400
401 #else /* CONFIG_NVMEM_SYSFS */
402
403 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
404                                     const struct nvmem_config *config)
405 {
406         return -ENOSYS;
407 }
408 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
409                                       const struct nvmem_config *config)
410 {
411 }
412
413 #endif /* CONFIG_NVMEM_SYSFS */
414
415 static void nvmem_release(struct device *dev)
416 {
417         struct nvmem_device *nvmem = to_nvmem_device(dev);
418
419         ida_free(&nvmem_ida, nvmem->id);
420         gpiod_put(nvmem->wp_gpio);
421         kfree(nvmem);
422 }
423
424 static const struct device_type nvmem_provider_type = {
425         .release        = nvmem_release,
426 };
427
428 static struct bus_type nvmem_bus_type = {
429         .name           = "nvmem",
430 };
431
432 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
433 {
434         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
435         mutex_lock(&nvmem_mutex);
436         list_del(&cell->node);
437         mutex_unlock(&nvmem_mutex);
438         of_node_put(cell->np);
439         kfree_const(cell->name);
440         kfree(cell);
441 }
442
443 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
444 {
445         struct nvmem_cell_entry *cell, *p;
446
447         list_for_each_entry_safe(cell, p, &nvmem->cells, node)
448                 nvmem_cell_entry_drop(cell);
449 }
450
451 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
452 {
453         mutex_lock(&nvmem_mutex);
454         list_add_tail(&cell->node, &cell->nvmem->cells);
455         mutex_unlock(&nvmem_mutex);
456         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
457 }
458
459 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
460                                                      const struct nvmem_cell_info *info,
461                                                      struct nvmem_cell_entry *cell)
462 {
463         cell->nvmem = nvmem;
464         cell->offset = info->offset;
465         cell->bytes = info->bytes;
466         cell->name = info->name;
467
468         cell->bit_offset = info->bit_offset;
469         cell->nbits = info->nbits;
470
471         if (cell->nbits)
472                 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
473                                            BITS_PER_BYTE);
474
475         if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
476                 dev_err(&nvmem->dev,
477                         "cell %s unaligned to nvmem stride %d\n",
478                         cell->name ?: "<unknown>", nvmem->stride);
479                 return -EINVAL;
480         }
481
482         return 0;
483 }
484
485 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
486                                                const struct nvmem_cell_info *info,
487                                                struct nvmem_cell_entry *cell)
488 {
489         int err;
490
491         err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
492         if (err)
493                 return err;
494
495         cell->name = kstrdup_const(info->name, GFP_KERNEL);
496         if (!cell->name)
497                 return -ENOMEM;
498
499         return 0;
500 }
501
502 /**
503  * nvmem_add_cells() - Add cell information to an nvmem device
504  *
505  * @nvmem: nvmem device to add cells to.
506  * @info: nvmem cell info to add to the device
507  * @ncells: number of cells in info
508  *
509  * Return: 0 or negative error code on failure.
510  */
511 static int nvmem_add_cells(struct nvmem_device *nvmem,
512                     const struct nvmem_cell_info *info,
513                     int ncells)
514 {
515         struct nvmem_cell_entry **cells;
516         int i, rval;
517
518         cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
519         if (!cells)
520                 return -ENOMEM;
521
522         for (i = 0; i < ncells; i++) {
523                 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
524                 if (!cells[i]) {
525                         rval = -ENOMEM;
526                         goto err;
527                 }
528
529                 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
530                 if (rval) {
531                         kfree(cells[i]);
532                         goto err;
533                 }
534
535                 nvmem_cell_entry_add(cells[i]);
536         }
537
538         /* remove tmp array */
539         kfree(cells);
540
541         return 0;
542 err:
543         while (i--)
544                 nvmem_cell_entry_drop(cells[i]);
545
546         kfree(cells);
547
548         return rval;
549 }
550
551 /**
552  * nvmem_register_notifier() - Register a notifier block for nvmem events.
553  *
554  * @nb: notifier block to be called on nvmem events.
555  *
556  * Return: 0 on success, negative error number on failure.
557  */
558 int nvmem_register_notifier(struct notifier_block *nb)
559 {
560         return blocking_notifier_chain_register(&nvmem_notifier, nb);
561 }
562 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
563
564 /**
565  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
566  *
567  * @nb: notifier block to be unregistered.
568  *
569  * Return: 0 on success, negative error number on failure.
570  */
571 int nvmem_unregister_notifier(struct notifier_block *nb)
572 {
573         return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
574 }
575 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
576
577 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
578 {
579         const struct nvmem_cell_info *info;
580         struct nvmem_cell_table *table;
581         struct nvmem_cell_entry *cell;
582         int rval = 0, i;
583
584         mutex_lock(&nvmem_cell_mutex);
585         list_for_each_entry(table, &nvmem_cell_tables, node) {
586                 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
587                         for (i = 0; i < table->ncells; i++) {
588                                 info = &table->cells[i];
589
590                                 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
591                                 if (!cell) {
592                                         rval = -ENOMEM;
593                                         goto out;
594                                 }
595
596                                 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
597                                 if (rval) {
598                                         kfree(cell);
599                                         goto out;
600                                 }
601
602                                 nvmem_cell_entry_add(cell);
603                         }
604                 }
605         }
606
607 out:
608         mutex_unlock(&nvmem_cell_mutex);
609         return rval;
610 }
611
612 static struct nvmem_cell_entry *
613 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
614 {
615         struct nvmem_cell_entry *iter, *cell = NULL;
616
617         mutex_lock(&nvmem_mutex);
618         list_for_each_entry(iter, &nvmem->cells, node) {
619                 if (strcmp(cell_id, iter->name) == 0) {
620                         cell = iter;
621                         break;
622                 }
623         }
624         mutex_unlock(&nvmem_mutex);
625
626         return cell;
627 }
628
629 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
630 {
631         unsigned int cur = 0;
632         const struct nvmem_keepout *keepout = nvmem->keepout;
633         const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
634
635         while (keepout < keepoutend) {
636                 /* Ensure keepouts are sorted and don't overlap. */
637                 if (keepout->start < cur) {
638                         dev_err(&nvmem->dev,
639                                 "Keepout regions aren't sorted or overlap.\n");
640
641                         return -ERANGE;
642                 }
643
644                 if (keepout->end < keepout->start) {
645                         dev_err(&nvmem->dev,
646                                 "Invalid keepout region.\n");
647
648                         return -EINVAL;
649                 }
650
651                 /*
652                  * Validate keepouts (and holes between) don't violate
653                  * word_size constraints.
654                  */
655                 if ((keepout->end - keepout->start < nvmem->word_size) ||
656                     ((keepout->start != cur) &&
657                      (keepout->start - cur < nvmem->word_size))) {
658
659                         dev_err(&nvmem->dev,
660                                 "Keepout regions violate word_size constraints.\n");
661
662                         return -ERANGE;
663                 }
664
665                 /* Validate keepouts don't violate stride (alignment). */
666                 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
667                     !IS_ALIGNED(keepout->end, nvmem->stride)) {
668
669                         dev_err(&nvmem->dev,
670                                 "Keepout regions violate stride.\n");
671
672                         return -EINVAL;
673                 }
674
675                 cur = keepout->end;
676                 keepout++;
677         }
678
679         return 0;
680 }
681
682 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
683 {
684         struct device_node *parent, *child;
685         struct device *dev = &nvmem->dev;
686         struct nvmem_cell_entry *cell;
687         const __be32 *addr;
688         int len;
689
690         parent = dev->of_node;
691
692         for_each_child_of_node(parent, child) {
693                 addr = of_get_property(child, "reg", &len);
694                 if (!addr)
695                         continue;
696                 if (len < 2 * sizeof(u32)) {
697                         dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
698                         of_node_put(child);
699                         return -EINVAL;
700                 }
701
702                 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
703                 if (!cell) {
704                         of_node_put(child);
705                         return -ENOMEM;
706                 }
707
708                 cell->nvmem = nvmem;
709                 cell->offset = be32_to_cpup(addr++);
710                 cell->bytes = be32_to_cpup(addr);
711                 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
712
713                 addr = of_get_property(child, "bits", &len);
714                 if (addr && len == (2 * sizeof(u32))) {
715                         cell->bit_offset = be32_to_cpup(addr++);
716                         cell->nbits = be32_to_cpup(addr);
717                 }
718
719                 if (cell->nbits)
720                         cell->bytes = DIV_ROUND_UP(
721                                         cell->nbits + cell->bit_offset,
722                                         BITS_PER_BYTE);
723
724                 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
725                         dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
726                                 cell->name, nvmem->stride);
727                         /* Cells already added will be freed later. */
728                         kfree_const(cell->name);
729                         kfree(cell);
730                         of_node_put(child);
731                         return -EINVAL;
732                 }
733
734                 cell->np = of_node_get(child);
735                 nvmem_cell_entry_add(cell);
736         }
737
738         return 0;
739 }
740
741 /**
742  * nvmem_register() - Register a nvmem device for given nvmem_config.
743  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
744  *
745  * @config: nvmem device configuration with which nvmem device is created.
746  *
747  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
748  * on success.
749  */
750
751 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
752 {
753         struct nvmem_device *nvmem;
754         int rval;
755
756         if (!config->dev)
757                 return ERR_PTR(-EINVAL);
758
759         if (!config->reg_read && !config->reg_write)
760                 return ERR_PTR(-EINVAL);
761
762         nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
763         if (!nvmem)
764                 return ERR_PTR(-ENOMEM);
765
766         rval  = ida_alloc(&nvmem_ida, GFP_KERNEL);
767         if (rval < 0) {
768                 kfree(nvmem);
769                 return ERR_PTR(rval);
770         }
771
772         if (config->wp_gpio)
773                 nvmem->wp_gpio = config->wp_gpio;
774         else
775                 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
776                                                     GPIOD_OUT_HIGH);
777         if (IS_ERR(nvmem->wp_gpio)) {
778                 ida_free(&nvmem_ida, nvmem->id);
779                 rval = PTR_ERR(nvmem->wp_gpio);
780                 kfree(nvmem);
781                 return ERR_PTR(rval);
782         }
783
784         kref_init(&nvmem->refcnt);
785         INIT_LIST_HEAD(&nvmem->cells);
786
787         nvmem->id = rval;
788         nvmem->owner = config->owner;
789         if (!nvmem->owner && config->dev->driver)
790                 nvmem->owner = config->dev->driver->owner;
791         nvmem->stride = config->stride ?: 1;
792         nvmem->word_size = config->word_size ?: 1;
793         nvmem->size = config->size;
794         nvmem->dev.type = &nvmem_provider_type;
795         nvmem->dev.bus = &nvmem_bus_type;
796         nvmem->dev.parent = config->dev;
797         nvmem->root_only = config->root_only;
798         nvmem->priv = config->priv;
799         nvmem->type = config->type;
800         nvmem->reg_read = config->reg_read;
801         nvmem->reg_write = config->reg_write;
802         nvmem->cell_post_process = config->cell_post_process;
803         nvmem->keepout = config->keepout;
804         nvmem->nkeepout = config->nkeepout;
805         if (config->of_node)
806                 nvmem->dev.of_node = config->of_node;
807         else if (!config->no_of_node)
808                 nvmem->dev.of_node = config->dev->of_node;
809
810         switch (config->id) {
811         case NVMEM_DEVID_NONE:
812                 dev_set_name(&nvmem->dev, "%s", config->name);
813                 break;
814         case NVMEM_DEVID_AUTO:
815                 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
816                 break;
817         default:
818                 dev_set_name(&nvmem->dev, "%s%d",
819                              config->name ? : "nvmem",
820                              config->name ? config->id : nvmem->id);
821                 break;
822         }
823
824         nvmem->read_only = device_property_present(config->dev, "read-only") ||
825                            config->read_only || !nvmem->reg_write;
826
827 #ifdef CONFIG_NVMEM_SYSFS
828         nvmem->dev.groups = nvmem_dev_groups;
829 #endif
830
831         if (nvmem->nkeepout) {
832                 rval = nvmem_validate_keepouts(nvmem);
833                 if (rval) {
834                         ida_free(&nvmem_ida, nvmem->id);
835                         kfree(nvmem);
836                         return ERR_PTR(rval);
837                 }
838         }
839
840         dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
841
842         rval = device_register(&nvmem->dev);
843         if (rval)
844                 goto err_put_device;
845
846         if (config->compat) {
847                 rval = nvmem_sysfs_setup_compat(nvmem, config);
848                 if (rval)
849                         goto err_device_del;
850         }
851
852         if (config->cells) {
853                 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
854                 if (rval)
855                         goto err_teardown_compat;
856         }
857
858         rval = nvmem_add_cells_from_table(nvmem);
859         if (rval)
860                 goto err_remove_cells;
861
862         rval = nvmem_add_cells_from_of(nvmem);
863         if (rval)
864                 goto err_remove_cells;
865
866         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
867
868         return nvmem;
869
870 err_remove_cells:
871         nvmem_device_remove_all_cells(nvmem);
872 err_teardown_compat:
873         if (config->compat)
874                 nvmem_sysfs_remove_compat(nvmem, config);
875 err_device_del:
876         device_del(&nvmem->dev);
877 err_put_device:
878         put_device(&nvmem->dev);
879
880         return ERR_PTR(rval);
881 }
882 EXPORT_SYMBOL_GPL(nvmem_register);
883
884 static void nvmem_device_release(struct kref *kref)
885 {
886         struct nvmem_device *nvmem;
887
888         nvmem = container_of(kref, struct nvmem_device, refcnt);
889
890         blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
891
892         if (nvmem->flags & FLAG_COMPAT)
893                 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
894
895         nvmem_device_remove_all_cells(nvmem);
896         device_unregister(&nvmem->dev);
897 }
898
899 /**
900  * nvmem_unregister() - Unregister previously registered nvmem device
901  *
902  * @nvmem: Pointer to previously registered nvmem device.
903  */
904 void nvmem_unregister(struct nvmem_device *nvmem)
905 {
906         kref_put(&nvmem->refcnt, nvmem_device_release);
907 }
908 EXPORT_SYMBOL_GPL(nvmem_unregister);
909
910 static void devm_nvmem_release(struct device *dev, void *res)
911 {
912         nvmem_unregister(*(struct nvmem_device **)res);
913 }
914
915 /**
916  * devm_nvmem_register() - Register a managed nvmem device for given
917  * nvmem_config.
918  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
919  *
920  * @dev: Device that uses the nvmem device.
921  * @config: nvmem device configuration with which nvmem device is created.
922  *
923  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
924  * on success.
925  */
926 struct nvmem_device *devm_nvmem_register(struct device *dev,
927                                          const struct nvmem_config *config)
928 {
929         struct nvmem_device **ptr, *nvmem;
930
931         ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
932         if (!ptr)
933                 return ERR_PTR(-ENOMEM);
934
935         nvmem = nvmem_register(config);
936
937         if (!IS_ERR(nvmem)) {
938                 *ptr = nvmem;
939                 devres_add(dev, ptr);
940         } else {
941                 devres_free(ptr);
942         }
943
944         return nvmem;
945 }
946 EXPORT_SYMBOL_GPL(devm_nvmem_register);
947
948 static int devm_nvmem_match(struct device *dev, void *res, void *data)
949 {
950         struct nvmem_device **r = res;
951
952         return *r == data;
953 }
954
955 /**
956  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
957  * device.
958  *
959  * @dev: Device that uses the nvmem device.
960  * @nvmem: Pointer to previously registered nvmem device.
961  *
962  * Return: Will be negative on error or zero on success.
963  */
964 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
965 {
966         return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
967 }
968 EXPORT_SYMBOL(devm_nvmem_unregister);
969
970 static struct nvmem_device *__nvmem_device_get(void *data,
971                         int (*match)(struct device *dev, const void *data))
972 {
973         struct nvmem_device *nvmem = NULL;
974         struct device *dev;
975
976         mutex_lock(&nvmem_mutex);
977         dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
978         if (dev)
979                 nvmem = to_nvmem_device(dev);
980         mutex_unlock(&nvmem_mutex);
981         if (!nvmem)
982                 return ERR_PTR(-EPROBE_DEFER);
983
984         if (!try_module_get(nvmem->owner)) {
985                 dev_err(&nvmem->dev,
986                         "could not increase module refcount for cell %s\n",
987                         nvmem_dev_name(nvmem));
988
989                 put_device(&nvmem->dev);
990                 return ERR_PTR(-EINVAL);
991         }
992
993         kref_get(&nvmem->refcnt);
994
995         return nvmem;
996 }
997
998 static void __nvmem_device_put(struct nvmem_device *nvmem)
999 {
1000         put_device(&nvmem->dev);
1001         module_put(nvmem->owner);
1002         kref_put(&nvmem->refcnt, nvmem_device_release);
1003 }
1004
1005 #if IS_ENABLED(CONFIG_OF)
1006 /**
1007  * of_nvmem_device_get() - Get nvmem device from a given id
1008  *
1009  * @np: Device tree node that uses the nvmem device.
1010  * @id: nvmem name from nvmem-names property.
1011  *
1012  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1013  * on success.
1014  */
1015 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1016 {
1017
1018         struct device_node *nvmem_np;
1019         struct nvmem_device *nvmem;
1020         int index = 0;
1021
1022         if (id)
1023                 index = of_property_match_string(np, "nvmem-names", id);
1024
1025         nvmem_np = of_parse_phandle(np, "nvmem", index);
1026         if (!nvmem_np)
1027                 return ERR_PTR(-ENOENT);
1028
1029         nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1030         of_node_put(nvmem_np);
1031         return nvmem;
1032 }
1033 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1034 #endif
1035
1036 /**
1037  * nvmem_device_get() - Get nvmem device from a given id
1038  *
1039  * @dev: Device that uses the nvmem device.
1040  * @dev_name: name of the requested nvmem device.
1041  *
1042  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1043  * on success.
1044  */
1045 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1046 {
1047         if (dev->of_node) { /* try dt first */
1048                 struct nvmem_device *nvmem;
1049
1050                 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1051
1052                 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1053                         return nvmem;
1054
1055         }
1056
1057         return __nvmem_device_get((void *)dev_name, device_match_name);
1058 }
1059 EXPORT_SYMBOL_GPL(nvmem_device_get);
1060
1061 /**
1062  * nvmem_device_find() - Find nvmem device with matching function
1063  *
1064  * @data: Data to pass to match function
1065  * @match: Callback function to check device
1066  *
1067  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1068  * on success.
1069  */
1070 struct nvmem_device *nvmem_device_find(void *data,
1071                         int (*match)(struct device *dev, const void *data))
1072 {
1073         return __nvmem_device_get(data, match);
1074 }
1075 EXPORT_SYMBOL_GPL(nvmem_device_find);
1076
1077 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1078 {
1079         struct nvmem_device **nvmem = res;
1080
1081         if (WARN_ON(!nvmem || !*nvmem))
1082                 return 0;
1083
1084         return *nvmem == data;
1085 }
1086
1087 static void devm_nvmem_device_release(struct device *dev, void *res)
1088 {
1089         nvmem_device_put(*(struct nvmem_device **)res);
1090 }
1091
1092 /**
1093  * devm_nvmem_device_put() - put alredy got nvmem device
1094  *
1095  * @dev: Device that uses the nvmem device.
1096  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1097  * that needs to be released.
1098  */
1099 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1100 {
1101         int ret;
1102
1103         ret = devres_release(dev, devm_nvmem_device_release,
1104                              devm_nvmem_device_match, nvmem);
1105
1106         WARN_ON(ret);
1107 }
1108 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1109
1110 /**
1111  * nvmem_device_put() - put alredy got nvmem device
1112  *
1113  * @nvmem: pointer to nvmem device that needs to be released.
1114  */
1115 void nvmem_device_put(struct nvmem_device *nvmem)
1116 {
1117         __nvmem_device_put(nvmem);
1118 }
1119 EXPORT_SYMBOL_GPL(nvmem_device_put);
1120
1121 /**
1122  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1123  *
1124  * @dev: Device that requests the nvmem device.
1125  * @id: name id for the requested nvmem device.
1126  *
1127  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1128  * on success.  The nvmem_cell will be freed by the automatically once the
1129  * device is freed.
1130  */
1131 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1132 {
1133         struct nvmem_device **ptr, *nvmem;
1134
1135         ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1136         if (!ptr)
1137                 return ERR_PTR(-ENOMEM);
1138
1139         nvmem = nvmem_device_get(dev, id);
1140         if (!IS_ERR(nvmem)) {
1141                 *ptr = nvmem;
1142                 devres_add(dev, ptr);
1143         } else {
1144                 devres_free(ptr);
1145         }
1146
1147         return nvmem;
1148 }
1149 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1150
1151 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
1152 {
1153         struct nvmem_cell *cell;
1154         const char *name = NULL;
1155
1156         cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1157         if (!cell)
1158                 return ERR_PTR(-ENOMEM);
1159
1160         if (id) {
1161                 name = kstrdup_const(id, GFP_KERNEL);
1162                 if (!name) {
1163                         kfree(cell);
1164                         return ERR_PTR(-ENOMEM);
1165                 }
1166         }
1167
1168         cell->id = name;
1169         cell->entry = entry;
1170
1171         return cell;
1172 }
1173
1174 static struct nvmem_cell *
1175 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1176 {
1177         struct nvmem_cell_entry *cell_entry;
1178         struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1179         struct nvmem_cell_lookup *lookup;
1180         struct nvmem_device *nvmem;
1181         const char *dev_id;
1182
1183         if (!dev)
1184                 return ERR_PTR(-EINVAL);
1185
1186         dev_id = dev_name(dev);
1187
1188         mutex_lock(&nvmem_lookup_mutex);
1189
1190         list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1191                 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1192                     (strcmp(lookup->con_id, con_id) == 0)) {
1193                         /* This is the right entry. */
1194                         nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1195                                                    device_match_name);
1196                         if (IS_ERR(nvmem)) {
1197                                 /* Provider may not be registered yet. */
1198                                 cell = ERR_CAST(nvmem);
1199                                 break;
1200                         }
1201
1202                         cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1203                                                                    lookup->cell_name);
1204                         if (!cell_entry) {
1205                                 __nvmem_device_put(nvmem);
1206                                 cell = ERR_PTR(-ENOENT);
1207                         } else {
1208                                 cell = nvmem_create_cell(cell_entry, con_id);
1209                                 if (IS_ERR(cell))
1210                                         __nvmem_device_put(nvmem);
1211                         }
1212                         break;
1213                 }
1214         }
1215
1216         mutex_unlock(&nvmem_lookup_mutex);
1217         return cell;
1218 }
1219
1220 #if IS_ENABLED(CONFIG_OF)
1221 static struct nvmem_cell_entry *
1222 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1223 {
1224         struct nvmem_cell_entry *iter, *cell = NULL;
1225
1226         mutex_lock(&nvmem_mutex);
1227         list_for_each_entry(iter, &nvmem->cells, node) {
1228                 if (np == iter->np) {
1229                         cell = iter;
1230                         break;
1231                 }
1232         }
1233         mutex_unlock(&nvmem_mutex);
1234
1235         return cell;
1236 }
1237
1238 /**
1239  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1240  *
1241  * @np: Device tree node that uses the nvmem cell.
1242  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1243  *      for the cell at index 0 (the lone cell with no accompanying
1244  *      nvmem-cell-names property).
1245  *
1246  * Return: Will be an ERR_PTR() on error or a valid pointer
1247  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1248  * nvmem_cell_put().
1249  */
1250 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1251 {
1252         struct device_node *cell_np, *nvmem_np;
1253         struct nvmem_device *nvmem;
1254         struct nvmem_cell_entry *cell_entry;
1255         struct nvmem_cell *cell;
1256         int index = 0;
1257
1258         /* if cell name exists, find index to the name */
1259         if (id)
1260                 index = of_property_match_string(np, "nvmem-cell-names", id);
1261
1262         cell_np = of_parse_phandle(np, "nvmem-cells", index);
1263         if (!cell_np)
1264                 return ERR_PTR(-ENOENT);
1265
1266         nvmem_np = of_get_next_parent(cell_np);
1267         if (!nvmem_np)
1268                 return ERR_PTR(-EINVAL);
1269
1270         nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1271         of_node_put(nvmem_np);
1272         if (IS_ERR(nvmem))
1273                 return ERR_CAST(nvmem);
1274
1275         cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1276         if (!cell_entry) {
1277                 __nvmem_device_put(nvmem);
1278                 return ERR_PTR(-ENOENT);
1279         }
1280
1281         cell = nvmem_create_cell(cell_entry, id);
1282         if (IS_ERR(cell))
1283                 __nvmem_device_put(nvmem);
1284
1285         return cell;
1286 }
1287 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1288 #endif
1289
1290 /**
1291  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1292  *
1293  * @dev: Device that requests the nvmem cell.
1294  * @id: nvmem cell name to get (this corresponds with the name from the
1295  *      nvmem-cell-names property for DT systems and with the con_id from
1296  *      the lookup entry for non-DT systems).
1297  *
1298  * Return: Will be an ERR_PTR() on error or a valid pointer
1299  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1300  * nvmem_cell_put().
1301  */
1302 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1303 {
1304         struct nvmem_cell *cell;
1305
1306         if (dev->of_node) { /* try dt first */
1307                 cell = of_nvmem_cell_get(dev->of_node, id);
1308                 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1309                         return cell;
1310         }
1311
1312         /* NULL cell id only allowed for device tree; invalid otherwise */
1313         if (!id)
1314                 return ERR_PTR(-EINVAL);
1315
1316         return nvmem_cell_get_from_lookup(dev, id);
1317 }
1318 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1319
1320 static void devm_nvmem_cell_release(struct device *dev, void *res)
1321 {
1322         nvmem_cell_put(*(struct nvmem_cell **)res);
1323 }
1324
1325 /**
1326  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1327  *
1328  * @dev: Device that requests the nvmem cell.
1329  * @id: nvmem cell name id to get.
1330  *
1331  * Return: Will be an ERR_PTR() on error or a valid pointer
1332  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1333  * automatically once the device is freed.
1334  */
1335 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1336 {
1337         struct nvmem_cell **ptr, *cell;
1338
1339         ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1340         if (!ptr)
1341                 return ERR_PTR(-ENOMEM);
1342
1343         cell = nvmem_cell_get(dev, id);
1344         if (!IS_ERR(cell)) {
1345                 *ptr = cell;
1346                 devres_add(dev, ptr);
1347         } else {
1348                 devres_free(ptr);
1349         }
1350
1351         return cell;
1352 }
1353 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1354
1355 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1356 {
1357         struct nvmem_cell **c = res;
1358
1359         if (WARN_ON(!c || !*c))
1360                 return 0;
1361
1362         return *c == data;
1363 }
1364
1365 /**
1366  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1367  * from devm_nvmem_cell_get.
1368  *
1369  * @dev: Device that requests the nvmem cell.
1370  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1371  */
1372 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1373 {
1374         int ret;
1375
1376         ret = devres_release(dev, devm_nvmem_cell_release,
1377                                 devm_nvmem_cell_match, cell);
1378
1379         WARN_ON(ret);
1380 }
1381 EXPORT_SYMBOL(devm_nvmem_cell_put);
1382
1383 /**
1384  * nvmem_cell_put() - Release previously allocated nvmem cell.
1385  *
1386  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1387  */
1388 void nvmem_cell_put(struct nvmem_cell *cell)
1389 {
1390         struct nvmem_device *nvmem = cell->entry->nvmem;
1391
1392         if (cell->id)
1393                 kfree_const(cell->id);
1394
1395         kfree(cell);
1396         __nvmem_device_put(nvmem);
1397 }
1398 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1399
1400 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1401 {
1402         u8 *p, *b;
1403         int i, extra, bit_offset = cell->bit_offset;
1404
1405         p = b = buf;
1406         if (bit_offset) {
1407                 /* First shift */
1408                 *b++ >>= bit_offset;
1409
1410                 /* setup rest of the bytes if any */
1411                 for (i = 1; i < cell->bytes; i++) {
1412                         /* Get bits from next byte and shift them towards msb */
1413                         *p |= *b << (BITS_PER_BYTE - bit_offset);
1414
1415                         p = b;
1416                         *b++ >>= bit_offset;
1417                 }
1418         } else {
1419                 /* point to the msb */
1420                 p += cell->bytes - 1;
1421         }
1422
1423         /* result fits in less bytes */
1424         extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1425         while (--extra >= 0)
1426                 *p-- = 0;
1427
1428         /* clear msb bits if any leftover in the last byte */
1429         if (cell->nbits % BITS_PER_BYTE)
1430                 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1431 }
1432
1433 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1434                       struct nvmem_cell_entry *cell,
1435                       void *buf, size_t *len, const char *id)
1436 {
1437         int rc;
1438
1439         rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1440
1441         if (rc)
1442                 return rc;
1443
1444         /* shift bits in-place */
1445         if (cell->bit_offset || cell->nbits)
1446                 nvmem_shift_read_buffer_in_place(cell, buf);
1447
1448         if (nvmem->cell_post_process) {
1449                 rc = nvmem->cell_post_process(nvmem->priv, id,
1450                                               cell->offset, buf, cell->bytes);
1451                 if (rc)
1452                         return rc;
1453         }
1454
1455         if (len)
1456                 *len = cell->bytes;
1457
1458         return 0;
1459 }
1460
1461 /**
1462  * nvmem_cell_read() - Read a given nvmem cell
1463  *
1464  * @cell: nvmem cell to be read.
1465  * @len: pointer to length of cell which will be populated on successful read;
1466  *       can be NULL.
1467  *
1468  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1469  * buffer should be freed by the consumer with a kfree().
1470  */
1471 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1472 {
1473         struct nvmem_device *nvmem = cell->entry->nvmem;
1474         u8 *buf;
1475         int rc;
1476
1477         if (!nvmem)
1478                 return ERR_PTR(-EINVAL);
1479
1480         buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
1481         if (!buf)
1482                 return ERR_PTR(-ENOMEM);
1483
1484         rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
1485         if (rc) {
1486                 kfree(buf);
1487                 return ERR_PTR(rc);
1488         }
1489
1490         return buf;
1491 }
1492 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1493
1494 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1495                                              u8 *_buf, int len)
1496 {
1497         struct nvmem_device *nvmem = cell->nvmem;
1498         int i, rc, nbits, bit_offset = cell->bit_offset;
1499         u8 v, *p, *buf, *b, pbyte, pbits;
1500
1501         nbits = cell->nbits;
1502         buf = kzalloc(cell->bytes, GFP_KERNEL);
1503         if (!buf)
1504                 return ERR_PTR(-ENOMEM);
1505
1506         memcpy(buf, _buf, len);
1507         p = b = buf;
1508
1509         if (bit_offset) {
1510                 pbyte = *b;
1511                 *b <<= bit_offset;
1512
1513                 /* setup the first byte with lsb bits from nvmem */
1514                 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1515                 if (rc)
1516                         goto err;
1517                 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1518
1519                 /* setup rest of the byte if any */
1520                 for (i = 1; i < cell->bytes; i++) {
1521                         /* Get last byte bits and shift them towards lsb */
1522                         pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1523                         pbyte = *b;
1524                         p = b;
1525                         *b <<= bit_offset;
1526                         *b++ |= pbits;
1527                 }
1528         }
1529
1530         /* if it's not end on byte boundary */
1531         if ((nbits + bit_offset) % BITS_PER_BYTE) {
1532                 /* setup the last byte with msb bits from nvmem */
1533                 rc = nvmem_reg_read(nvmem,
1534                                     cell->offset + cell->bytes - 1, &v, 1);
1535                 if (rc)
1536                         goto err;
1537                 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1538
1539         }
1540
1541         return buf;
1542 err:
1543         kfree(buf);
1544         return ERR_PTR(rc);
1545 }
1546
1547 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1548 {
1549         struct nvmem_device *nvmem = cell->nvmem;
1550         int rc;
1551
1552         if (!nvmem || nvmem->read_only ||
1553             (cell->bit_offset == 0 && len != cell->bytes))
1554                 return -EINVAL;
1555
1556         if (cell->bit_offset || cell->nbits) {
1557                 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1558                 if (IS_ERR(buf))
1559                         return PTR_ERR(buf);
1560         }
1561
1562         rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1563
1564         /* free the tmp buffer */
1565         if (cell->bit_offset || cell->nbits)
1566                 kfree(buf);
1567
1568         if (rc)
1569                 return rc;
1570
1571         return len;
1572 }
1573
1574 /**
1575  * nvmem_cell_write() - Write to a given nvmem cell
1576  *
1577  * @cell: nvmem cell to be written.
1578  * @buf: Buffer to be written.
1579  * @len: length of buffer to be written to nvmem cell.
1580  *
1581  * Return: length of bytes written or negative on failure.
1582  */
1583 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1584 {
1585         return __nvmem_cell_entry_write(cell->entry, buf, len);
1586 }
1587
1588 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1589
1590 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1591                                   void *val, size_t count)
1592 {
1593         struct nvmem_cell *cell;
1594         void *buf;
1595         size_t len;
1596
1597         cell = nvmem_cell_get(dev, cell_id);
1598         if (IS_ERR(cell))
1599                 return PTR_ERR(cell);
1600
1601         buf = nvmem_cell_read(cell, &len);
1602         if (IS_ERR(buf)) {
1603                 nvmem_cell_put(cell);
1604                 return PTR_ERR(buf);
1605         }
1606         if (len != count) {
1607                 kfree(buf);
1608                 nvmem_cell_put(cell);
1609                 return -EINVAL;
1610         }
1611         memcpy(val, buf, count);
1612         kfree(buf);
1613         nvmem_cell_put(cell);
1614
1615         return 0;
1616 }
1617
1618 /**
1619  * nvmem_cell_read_u8() - Read a cell value as a u8
1620  *
1621  * @dev: Device that requests the nvmem cell.
1622  * @cell_id: Name of nvmem cell to read.
1623  * @val: pointer to output value.
1624  *
1625  * Return: 0 on success or negative errno.
1626  */
1627 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1628 {
1629         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1630 }
1631 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1632
1633 /**
1634  * nvmem_cell_read_u16() - Read a cell value as a u16
1635  *
1636  * @dev: Device that requests the nvmem cell.
1637  * @cell_id: Name of nvmem cell to read.
1638  * @val: pointer to output value.
1639  *
1640  * Return: 0 on success or negative errno.
1641  */
1642 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1643 {
1644         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1645 }
1646 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1647
1648 /**
1649  * nvmem_cell_read_u32() - Read a cell value as a u32
1650  *
1651  * @dev: Device that requests the nvmem cell.
1652  * @cell_id: Name of nvmem cell to read.
1653  * @val: pointer to output value.
1654  *
1655  * Return: 0 on success or negative errno.
1656  */
1657 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1658 {
1659         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1660 }
1661 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1662
1663 /**
1664  * nvmem_cell_read_u64() - Read a cell value as a u64
1665  *
1666  * @dev: Device that requests the nvmem cell.
1667  * @cell_id: Name of nvmem cell to read.
1668  * @val: pointer to output value.
1669  *
1670  * Return: 0 on success or negative errno.
1671  */
1672 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1673 {
1674         return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1675 }
1676 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1677
1678 static const void *nvmem_cell_read_variable_common(struct device *dev,
1679                                                    const char *cell_id,
1680                                                    size_t max_len, size_t *len)
1681 {
1682         struct nvmem_cell *cell;
1683         int nbits;
1684         void *buf;
1685
1686         cell = nvmem_cell_get(dev, cell_id);
1687         if (IS_ERR(cell))
1688                 return cell;
1689
1690         nbits = cell->entry->nbits;
1691         buf = nvmem_cell_read(cell, len);
1692         nvmem_cell_put(cell);
1693         if (IS_ERR(buf))
1694                 return buf;
1695
1696         /*
1697          * If nbits is set then nvmem_cell_read() can significantly exaggerate
1698          * the length of the real data. Throw away the extra junk.
1699          */
1700         if (nbits)
1701                 *len = DIV_ROUND_UP(nbits, 8);
1702
1703         if (*len > max_len) {
1704                 kfree(buf);
1705                 return ERR_PTR(-ERANGE);
1706         }
1707
1708         return buf;
1709 }
1710
1711 /**
1712  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1713  *
1714  * @dev: Device that requests the nvmem cell.
1715  * @cell_id: Name of nvmem cell to read.
1716  * @val: pointer to output value.
1717  *
1718  * Return: 0 on success or negative errno.
1719  */
1720 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1721                                     u32 *val)
1722 {
1723         size_t len;
1724         const u8 *buf;
1725         int i;
1726
1727         buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1728         if (IS_ERR(buf))
1729                 return PTR_ERR(buf);
1730
1731         /* Copy w/ implicit endian conversion */
1732         *val = 0;
1733         for (i = 0; i < len; i++)
1734                 *val |= buf[i] << (8 * i);
1735
1736         kfree(buf);
1737
1738         return 0;
1739 }
1740 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1741
1742 /**
1743  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1744  *
1745  * @dev: Device that requests the nvmem cell.
1746  * @cell_id: Name of nvmem cell to read.
1747  * @val: pointer to output value.
1748  *
1749  * Return: 0 on success or negative errno.
1750  */
1751 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1752                                     u64 *val)
1753 {
1754         size_t len;
1755         const u8 *buf;
1756         int i;
1757
1758         buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1759         if (IS_ERR(buf))
1760                 return PTR_ERR(buf);
1761
1762         /* Copy w/ implicit endian conversion */
1763         *val = 0;
1764         for (i = 0; i < len; i++)
1765                 *val |= (uint64_t)buf[i] << (8 * i);
1766
1767         kfree(buf);
1768
1769         return 0;
1770 }
1771 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1772
1773 /**
1774  * nvmem_device_cell_read() - Read a given nvmem device and cell
1775  *
1776  * @nvmem: nvmem device to read from.
1777  * @info: nvmem cell info to be read.
1778  * @buf: buffer pointer which will be populated on successful read.
1779  *
1780  * Return: length of successful bytes read on success and negative
1781  * error code on error.
1782  */
1783 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1784                            struct nvmem_cell_info *info, void *buf)
1785 {
1786         struct nvmem_cell_entry cell;
1787         int rc;
1788         ssize_t len;
1789
1790         if (!nvmem)
1791                 return -EINVAL;
1792
1793         rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1794         if (rc)
1795                 return rc;
1796
1797         rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
1798         if (rc)
1799                 return rc;
1800
1801         return len;
1802 }
1803 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1804
1805 /**
1806  * nvmem_device_cell_write() - Write cell to a given nvmem device
1807  *
1808  * @nvmem: nvmem device to be written to.
1809  * @info: nvmem cell info to be written.
1810  * @buf: buffer to be written to cell.
1811  *
1812  * Return: length of bytes written or negative error code on failure.
1813  */
1814 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1815                             struct nvmem_cell_info *info, void *buf)
1816 {
1817         struct nvmem_cell_entry cell;
1818         int rc;
1819
1820         if (!nvmem)
1821                 return -EINVAL;
1822
1823         rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1824         if (rc)
1825                 return rc;
1826
1827         return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1828 }
1829 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1830
1831 /**
1832  * nvmem_device_read() - Read from a given nvmem device
1833  *
1834  * @nvmem: nvmem device to read from.
1835  * @offset: offset in nvmem device.
1836  * @bytes: number of bytes to read.
1837  * @buf: buffer pointer which will be populated on successful read.
1838  *
1839  * Return: length of successful bytes read on success and negative
1840  * error code on error.
1841  */
1842 int nvmem_device_read(struct nvmem_device *nvmem,
1843                       unsigned int offset,
1844                       size_t bytes, void *buf)
1845 {
1846         int rc;
1847
1848         if (!nvmem)
1849                 return -EINVAL;
1850
1851         rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1852
1853         if (rc)
1854                 return rc;
1855
1856         return bytes;
1857 }
1858 EXPORT_SYMBOL_GPL(nvmem_device_read);
1859
1860 /**
1861  * nvmem_device_write() - Write cell to a given nvmem device
1862  *
1863  * @nvmem: nvmem device to be written to.
1864  * @offset: offset in nvmem device.
1865  * @bytes: number of bytes to write.
1866  * @buf: buffer to be written.
1867  *
1868  * Return: length of bytes written or negative error code on failure.
1869  */
1870 int nvmem_device_write(struct nvmem_device *nvmem,
1871                        unsigned int offset,
1872                        size_t bytes, void *buf)
1873 {
1874         int rc;
1875
1876         if (!nvmem)
1877                 return -EINVAL;
1878
1879         rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1880
1881         if (rc)
1882                 return rc;
1883
1884
1885         return bytes;
1886 }
1887 EXPORT_SYMBOL_GPL(nvmem_device_write);
1888
1889 /**
1890  * nvmem_add_cell_table() - register a table of cell info entries
1891  *
1892  * @table: table of cell info entries
1893  */
1894 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1895 {
1896         mutex_lock(&nvmem_cell_mutex);
1897         list_add_tail(&table->node, &nvmem_cell_tables);
1898         mutex_unlock(&nvmem_cell_mutex);
1899 }
1900 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1901
1902 /**
1903  * nvmem_del_cell_table() - remove a previously registered cell info table
1904  *
1905  * @table: table of cell info entries
1906  */
1907 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1908 {
1909         mutex_lock(&nvmem_cell_mutex);
1910         list_del(&table->node);
1911         mutex_unlock(&nvmem_cell_mutex);
1912 }
1913 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1914
1915 /**
1916  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1917  *
1918  * @entries: array of cell lookup entries
1919  * @nentries: number of cell lookup entries in the array
1920  */
1921 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1922 {
1923         int i;
1924
1925         mutex_lock(&nvmem_lookup_mutex);
1926         for (i = 0; i < nentries; i++)
1927                 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1928         mutex_unlock(&nvmem_lookup_mutex);
1929 }
1930 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1931
1932 /**
1933  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1934  *                            entries
1935  *
1936  * @entries: array of cell lookup entries
1937  * @nentries: number of cell lookup entries in the array
1938  */
1939 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1940 {
1941         int i;
1942
1943         mutex_lock(&nvmem_lookup_mutex);
1944         for (i = 0; i < nentries; i++)
1945                 list_del(&entries[i].node);
1946         mutex_unlock(&nvmem_lookup_mutex);
1947 }
1948 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1949
1950 /**
1951  * nvmem_dev_name() - Get the name of a given nvmem device.
1952  *
1953  * @nvmem: nvmem device.
1954  *
1955  * Return: name of the nvmem device.
1956  */
1957 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1958 {
1959         return dev_name(&nvmem->dev);
1960 }
1961 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1962
1963 static int __init nvmem_init(void)
1964 {
1965         return bus_register(&nvmem_bus_type);
1966 }
1967
1968 static void __exit nvmem_exit(void)
1969 {
1970         bus_unregister(&nvmem_bus_type);
1971 }
1972
1973 subsys_initcall(nvmem_init);
1974 module_exit(nvmem_exit);
1975
1976 MODULE_AUTHOR("Srinivas Kandagatla <[email protected]");
1977 MODULE_AUTHOR("Maxime Ripard <[email protected]");
1978 MODULE_DESCRIPTION("nvmem Driver Core");
1979 MODULE_LICENSE("GPL v2");
This page took 0.148745 seconds and 4 git commands to generate.