]> Git Repo - linux.git/blob - drivers/base/regmap/regmap.c
platform/x86: amd-pmc: Move to later in the suspend process
[linux.git] / drivers / base / regmap / regmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register map access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <[email protected]>
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/err.h>
14 #include <linux/property.h>
15 #include <linux/rbtree.h>
16 #include <linux/sched.h>
17 #include <linux/delay.h>
18 #include <linux/log2.h>
19 #include <linux/hwspinlock.h>
20 #include <asm/unaligned.h>
21
22 #define CREATE_TRACE_POINTS
23 #include "trace.h"
24
25 #include "internal.h"
26
27 /*
28  * Sometimes for failures during very early init the trace
29  * infrastructure isn't available early enough to be used.  For this
30  * sort of problem defining LOG_DEVICE will add printks for basic
31  * register I/O on a specific device.
32  */
33 #undef LOG_DEVICE
34
35 #ifdef LOG_DEVICE
36 static inline bool regmap_should_log(struct regmap *map)
37 {
38         return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39 }
40 #else
41 static inline bool regmap_should_log(struct regmap *map) { return false; }
42 #endif
43
44
45 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46                                unsigned int mask, unsigned int val,
47                                bool *change, bool force_write);
48
49 static int _regmap_bus_reg_read(void *context, unsigned int reg,
50                                 unsigned int *val);
51 static int _regmap_bus_read(void *context, unsigned int reg,
52                             unsigned int *val);
53 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54                                        unsigned int val);
55 static int _regmap_bus_reg_write(void *context, unsigned int reg,
56                                  unsigned int val);
57 static int _regmap_bus_raw_write(void *context, unsigned int reg,
58                                  unsigned int val);
59
60 bool regmap_reg_in_ranges(unsigned int reg,
61                           const struct regmap_range *ranges,
62                           unsigned int nranges)
63 {
64         const struct regmap_range *r;
65         int i;
66
67         for (i = 0, r = ranges; i < nranges; i++, r++)
68                 if (regmap_reg_in_range(reg, r))
69                         return true;
70         return false;
71 }
72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
74 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75                               const struct regmap_access_table *table)
76 {
77         /* Check "no ranges" first */
78         if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79                 return false;
80
81         /* In case zero "yes ranges" are supplied, any reg is OK */
82         if (!table->n_yes_ranges)
83                 return true;
84
85         return regmap_reg_in_ranges(reg, table->yes_ranges,
86                                     table->n_yes_ranges);
87 }
88 EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
90 bool regmap_writeable(struct regmap *map, unsigned int reg)
91 {
92         if (map->max_register && reg > map->max_register)
93                 return false;
94
95         if (map->writeable_reg)
96                 return map->writeable_reg(map->dev, reg);
97
98         if (map->wr_table)
99                 return regmap_check_range_table(map, reg, map->wr_table);
100
101         return true;
102 }
103
104 bool regmap_cached(struct regmap *map, unsigned int reg)
105 {
106         int ret;
107         unsigned int val;
108
109         if (map->cache_type == REGCACHE_NONE)
110                 return false;
111
112         if (!map->cache_ops)
113                 return false;
114
115         if (map->max_register && reg > map->max_register)
116                 return false;
117
118         map->lock(map->lock_arg);
119         ret = regcache_read(map, reg, &val);
120         map->unlock(map->lock_arg);
121         if (ret)
122                 return false;
123
124         return true;
125 }
126
127 bool regmap_readable(struct regmap *map, unsigned int reg)
128 {
129         if (!map->reg_read)
130                 return false;
131
132         if (map->max_register && reg > map->max_register)
133                 return false;
134
135         if (map->format.format_write)
136                 return false;
137
138         if (map->readable_reg)
139                 return map->readable_reg(map->dev, reg);
140
141         if (map->rd_table)
142                 return regmap_check_range_table(map, reg, map->rd_table);
143
144         return true;
145 }
146
147 bool regmap_volatile(struct regmap *map, unsigned int reg)
148 {
149         if (!map->format.format_write && !regmap_readable(map, reg))
150                 return false;
151
152         if (map->volatile_reg)
153                 return map->volatile_reg(map->dev, reg);
154
155         if (map->volatile_table)
156                 return regmap_check_range_table(map, reg, map->volatile_table);
157
158         if (map->cache_ops)
159                 return false;
160         else
161                 return true;
162 }
163
164 bool regmap_precious(struct regmap *map, unsigned int reg)
165 {
166         if (!regmap_readable(map, reg))
167                 return false;
168
169         if (map->precious_reg)
170                 return map->precious_reg(map->dev, reg);
171
172         if (map->precious_table)
173                 return regmap_check_range_table(map, reg, map->precious_table);
174
175         return false;
176 }
177
178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179 {
180         if (map->writeable_noinc_reg)
181                 return map->writeable_noinc_reg(map->dev, reg);
182
183         if (map->wr_noinc_table)
184                 return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186         return true;
187 }
188
189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190 {
191         if (map->readable_noinc_reg)
192                 return map->readable_noinc_reg(map->dev, reg);
193
194         if (map->rd_noinc_table)
195                 return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197         return true;
198 }
199
200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201         size_t num)
202 {
203         unsigned int i;
204
205         for (i = 0; i < num; i++)
206                 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207                         return false;
208
209         return true;
210 }
211
212 static void regmap_format_12_20_write(struct regmap *map,
213                                      unsigned int reg, unsigned int val)
214 {
215         u8 *out = map->work_buf;
216
217         out[0] = reg >> 4;
218         out[1] = (reg << 4) | (val >> 16);
219         out[2] = val >> 8;
220         out[3] = val;
221 }
222
223
224 static void regmap_format_2_6_write(struct regmap *map,
225                                      unsigned int reg, unsigned int val)
226 {
227         u8 *out = map->work_buf;
228
229         *out = (reg << 6) | val;
230 }
231
232 static void regmap_format_4_12_write(struct regmap *map,
233                                      unsigned int reg, unsigned int val)
234 {
235         __be16 *out = map->work_buf;
236         *out = cpu_to_be16((reg << 12) | val);
237 }
238
239 static void regmap_format_7_9_write(struct regmap *map,
240                                     unsigned int reg, unsigned int val)
241 {
242         __be16 *out = map->work_buf;
243         *out = cpu_to_be16((reg << 9) | val);
244 }
245
246 static void regmap_format_7_17_write(struct regmap *map,
247                                     unsigned int reg, unsigned int val)
248 {
249         u8 *out = map->work_buf;
250
251         out[2] = val;
252         out[1] = val >> 8;
253         out[0] = (val >> 16) | (reg << 1);
254 }
255
256 static void regmap_format_10_14_write(struct regmap *map,
257                                     unsigned int reg, unsigned int val)
258 {
259         u8 *out = map->work_buf;
260
261         out[2] = val;
262         out[1] = (val >> 8) | (reg << 6);
263         out[0] = reg >> 2;
264 }
265
266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267 {
268         u8 *b = buf;
269
270         b[0] = val << shift;
271 }
272
273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274 {
275         put_unaligned_be16(val << shift, buf);
276 }
277
278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279 {
280         put_unaligned_le16(val << shift, buf);
281 }
282
283 static void regmap_format_16_native(void *buf, unsigned int val,
284                                     unsigned int shift)
285 {
286         u16 v = val << shift;
287
288         memcpy(buf, &v, sizeof(v));
289 }
290
291 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
292 {
293         u8 *b = buf;
294
295         val <<= shift;
296
297         b[0] = val >> 16;
298         b[1] = val >> 8;
299         b[2] = val;
300 }
301
302 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
303 {
304         put_unaligned_be32(val << shift, buf);
305 }
306
307 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
308 {
309         put_unaligned_le32(val << shift, buf);
310 }
311
312 static void regmap_format_32_native(void *buf, unsigned int val,
313                                     unsigned int shift)
314 {
315         u32 v = val << shift;
316
317         memcpy(buf, &v, sizeof(v));
318 }
319
320 #ifdef CONFIG_64BIT
321 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
322 {
323         put_unaligned_be64((u64) val << shift, buf);
324 }
325
326 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
327 {
328         put_unaligned_le64((u64) val << shift, buf);
329 }
330
331 static void regmap_format_64_native(void *buf, unsigned int val,
332                                     unsigned int shift)
333 {
334         u64 v = (u64) val << shift;
335
336         memcpy(buf, &v, sizeof(v));
337 }
338 #endif
339
340 static void regmap_parse_inplace_noop(void *buf)
341 {
342 }
343
344 static unsigned int regmap_parse_8(const void *buf)
345 {
346         const u8 *b = buf;
347
348         return b[0];
349 }
350
351 static unsigned int regmap_parse_16_be(const void *buf)
352 {
353         return get_unaligned_be16(buf);
354 }
355
356 static unsigned int regmap_parse_16_le(const void *buf)
357 {
358         return get_unaligned_le16(buf);
359 }
360
361 static void regmap_parse_16_be_inplace(void *buf)
362 {
363         u16 v = get_unaligned_be16(buf);
364
365         memcpy(buf, &v, sizeof(v));
366 }
367
368 static void regmap_parse_16_le_inplace(void *buf)
369 {
370         u16 v = get_unaligned_le16(buf);
371
372         memcpy(buf, &v, sizeof(v));
373 }
374
375 static unsigned int regmap_parse_16_native(const void *buf)
376 {
377         u16 v;
378
379         memcpy(&v, buf, sizeof(v));
380         return v;
381 }
382
383 static unsigned int regmap_parse_24(const void *buf)
384 {
385         const u8 *b = buf;
386         unsigned int ret = b[2];
387         ret |= ((unsigned int)b[1]) << 8;
388         ret |= ((unsigned int)b[0]) << 16;
389
390         return ret;
391 }
392
393 static unsigned int regmap_parse_32_be(const void *buf)
394 {
395         return get_unaligned_be32(buf);
396 }
397
398 static unsigned int regmap_parse_32_le(const void *buf)
399 {
400         return get_unaligned_le32(buf);
401 }
402
403 static void regmap_parse_32_be_inplace(void *buf)
404 {
405         u32 v = get_unaligned_be32(buf);
406
407         memcpy(buf, &v, sizeof(v));
408 }
409
410 static void regmap_parse_32_le_inplace(void *buf)
411 {
412         u32 v = get_unaligned_le32(buf);
413
414         memcpy(buf, &v, sizeof(v));
415 }
416
417 static unsigned int regmap_parse_32_native(const void *buf)
418 {
419         u32 v;
420
421         memcpy(&v, buf, sizeof(v));
422         return v;
423 }
424
425 #ifdef CONFIG_64BIT
426 static unsigned int regmap_parse_64_be(const void *buf)
427 {
428         return get_unaligned_be64(buf);
429 }
430
431 static unsigned int regmap_parse_64_le(const void *buf)
432 {
433         return get_unaligned_le64(buf);
434 }
435
436 static void regmap_parse_64_be_inplace(void *buf)
437 {
438         u64 v =  get_unaligned_be64(buf);
439
440         memcpy(buf, &v, sizeof(v));
441 }
442
443 static void regmap_parse_64_le_inplace(void *buf)
444 {
445         u64 v = get_unaligned_le64(buf);
446
447         memcpy(buf, &v, sizeof(v));
448 }
449
450 static unsigned int regmap_parse_64_native(const void *buf)
451 {
452         u64 v;
453
454         memcpy(&v, buf, sizeof(v));
455         return v;
456 }
457 #endif
458
459 static void regmap_lock_hwlock(void *__map)
460 {
461         struct regmap *map = __map;
462
463         hwspin_lock_timeout(map->hwlock, UINT_MAX);
464 }
465
466 static void regmap_lock_hwlock_irq(void *__map)
467 {
468         struct regmap *map = __map;
469
470         hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
471 }
472
473 static void regmap_lock_hwlock_irqsave(void *__map)
474 {
475         struct regmap *map = __map;
476
477         hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
478                                     &map->spinlock_flags);
479 }
480
481 static void regmap_unlock_hwlock(void *__map)
482 {
483         struct regmap *map = __map;
484
485         hwspin_unlock(map->hwlock);
486 }
487
488 static void regmap_unlock_hwlock_irq(void *__map)
489 {
490         struct regmap *map = __map;
491
492         hwspin_unlock_irq(map->hwlock);
493 }
494
495 static void regmap_unlock_hwlock_irqrestore(void *__map)
496 {
497         struct regmap *map = __map;
498
499         hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
500 }
501
502 static void regmap_lock_unlock_none(void *__map)
503 {
504
505 }
506
507 static void regmap_lock_mutex(void *__map)
508 {
509         struct regmap *map = __map;
510         mutex_lock(&map->mutex);
511 }
512
513 static void regmap_unlock_mutex(void *__map)
514 {
515         struct regmap *map = __map;
516         mutex_unlock(&map->mutex);
517 }
518
519 static void regmap_lock_spinlock(void *__map)
520 __acquires(&map->spinlock)
521 {
522         struct regmap *map = __map;
523         unsigned long flags;
524
525         spin_lock_irqsave(&map->spinlock, flags);
526         map->spinlock_flags = flags;
527 }
528
529 static void regmap_unlock_spinlock(void *__map)
530 __releases(&map->spinlock)
531 {
532         struct regmap *map = __map;
533         spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
534 }
535
536 static void regmap_lock_raw_spinlock(void *__map)
537 __acquires(&map->raw_spinlock)
538 {
539         struct regmap *map = __map;
540         unsigned long flags;
541
542         raw_spin_lock_irqsave(&map->raw_spinlock, flags);
543         map->raw_spinlock_flags = flags;
544 }
545
546 static void regmap_unlock_raw_spinlock(void *__map)
547 __releases(&map->raw_spinlock)
548 {
549         struct regmap *map = __map;
550         raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
551 }
552
553 static void dev_get_regmap_release(struct device *dev, void *res)
554 {
555         /*
556          * We don't actually have anything to do here; the goal here
557          * is not to manage the regmap but to provide a simple way to
558          * get the regmap back given a struct device.
559          */
560 }
561
562 static bool _regmap_range_add(struct regmap *map,
563                               struct regmap_range_node *data)
564 {
565         struct rb_root *root = &map->range_tree;
566         struct rb_node **new = &(root->rb_node), *parent = NULL;
567
568         while (*new) {
569                 struct regmap_range_node *this =
570                         rb_entry(*new, struct regmap_range_node, node);
571
572                 parent = *new;
573                 if (data->range_max < this->range_min)
574                         new = &((*new)->rb_left);
575                 else if (data->range_min > this->range_max)
576                         new = &((*new)->rb_right);
577                 else
578                         return false;
579         }
580
581         rb_link_node(&data->node, parent, new);
582         rb_insert_color(&data->node, root);
583
584         return true;
585 }
586
587 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
588                                                       unsigned int reg)
589 {
590         struct rb_node *node = map->range_tree.rb_node;
591
592         while (node) {
593                 struct regmap_range_node *this =
594                         rb_entry(node, struct regmap_range_node, node);
595
596                 if (reg < this->range_min)
597                         node = node->rb_left;
598                 else if (reg > this->range_max)
599                         node = node->rb_right;
600                 else
601                         return this;
602         }
603
604         return NULL;
605 }
606
607 static void regmap_range_exit(struct regmap *map)
608 {
609         struct rb_node *next;
610         struct regmap_range_node *range_node;
611
612         next = rb_first(&map->range_tree);
613         while (next) {
614                 range_node = rb_entry(next, struct regmap_range_node, node);
615                 next = rb_next(&range_node->node);
616                 rb_erase(&range_node->node, &map->range_tree);
617                 kfree(range_node);
618         }
619
620         kfree(map->selector_work_buf);
621 }
622
623 static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
624 {
625         if (config->name) {
626                 const char *name = kstrdup_const(config->name, GFP_KERNEL);
627
628                 if (!name)
629                         return -ENOMEM;
630
631                 kfree_const(map->name);
632                 map->name = name;
633         }
634
635         return 0;
636 }
637
638 int regmap_attach_dev(struct device *dev, struct regmap *map,
639                       const struct regmap_config *config)
640 {
641         struct regmap **m;
642         int ret;
643
644         map->dev = dev;
645
646         ret = regmap_set_name(map, config);
647         if (ret)
648                 return ret;
649
650         regmap_debugfs_exit(map);
651         regmap_debugfs_init(map);
652
653         /* Add a devres resource for dev_get_regmap() */
654         m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
655         if (!m) {
656                 regmap_debugfs_exit(map);
657                 return -ENOMEM;
658         }
659         *m = map;
660         devres_add(dev, m);
661
662         return 0;
663 }
664 EXPORT_SYMBOL_GPL(regmap_attach_dev);
665
666 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
667                                         const struct regmap_config *config)
668 {
669         enum regmap_endian endian;
670
671         /* Retrieve the endianness specification from the regmap config */
672         endian = config->reg_format_endian;
673
674         /* If the regmap config specified a non-default value, use that */
675         if (endian != REGMAP_ENDIAN_DEFAULT)
676                 return endian;
677
678         /* Retrieve the endianness specification from the bus config */
679         if (bus && bus->reg_format_endian_default)
680                 endian = bus->reg_format_endian_default;
681
682         /* If the bus specified a non-default value, use that */
683         if (endian != REGMAP_ENDIAN_DEFAULT)
684                 return endian;
685
686         /* Use this if no other value was found */
687         return REGMAP_ENDIAN_BIG;
688 }
689
690 enum regmap_endian regmap_get_val_endian(struct device *dev,
691                                          const struct regmap_bus *bus,
692                                          const struct regmap_config *config)
693 {
694         struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
695         enum regmap_endian endian;
696
697         /* Retrieve the endianness specification from the regmap config */
698         endian = config->val_format_endian;
699
700         /* If the regmap config specified a non-default value, use that */
701         if (endian != REGMAP_ENDIAN_DEFAULT)
702                 return endian;
703
704         /* If the firmware node exist try to get endianness from it */
705         if (fwnode_property_read_bool(fwnode, "big-endian"))
706                 endian = REGMAP_ENDIAN_BIG;
707         else if (fwnode_property_read_bool(fwnode, "little-endian"))
708                 endian = REGMAP_ENDIAN_LITTLE;
709         else if (fwnode_property_read_bool(fwnode, "native-endian"))
710                 endian = REGMAP_ENDIAN_NATIVE;
711
712         /* If the endianness was specified in fwnode, use that */
713         if (endian != REGMAP_ENDIAN_DEFAULT)
714                 return endian;
715
716         /* Retrieve the endianness specification from the bus config */
717         if (bus && bus->val_format_endian_default)
718                 endian = bus->val_format_endian_default;
719
720         /* If the bus specified a non-default value, use that */
721         if (endian != REGMAP_ENDIAN_DEFAULT)
722                 return endian;
723
724         /* Use this if no other value was found */
725         return REGMAP_ENDIAN_BIG;
726 }
727 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
728
729 struct regmap *__regmap_init(struct device *dev,
730                              const struct regmap_bus *bus,
731                              void *bus_context,
732                              const struct regmap_config *config,
733                              struct lock_class_key *lock_key,
734                              const char *lock_name)
735 {
736         struct regmap *map;
737         int ret = -EINVAL;
738         enum regmap_endian reg_endian, val_endian;
739         int i, j;
740
741         if (!config)
742                 goto err;
743
744         map = kzalloc(sizeof(*map), GFP_KERNEL);
745         if (map == NULL) {
746                 ret = -ENOMEM;
747                 goto err;
748         }
749
750         ret = regmap_set_name(map, config);
751         if (ret)
752                 goto err_map;
753
754         ret = -EINVAL; /* Later error paths rely on this */
755
756         if (config->disable_locking) {
757                 map->lock = map->unlock = regmap_lock_unlock_none;
758                 map->can_sleep = config->can_sleep;
759                 regmap_debugfs_disable(map);
760         } else if (config->lock && config->unlock) {
761                 map->lock = config->lock;
762                 map->unlock = config->unlock;
763                 map->lock_arg = config->lock_arg;
764                 map->can_sleep = config->can_sleep;
765         } else if (config->use_hwlock) {
766                 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
767                 if (!map->hwlock) {
768                         ret = -ENXIO;
769                         goto err_name;
770                 }
771
772                 switch (config->hwlock_mode) {
773                 case HWLOCK_IRQSTATE:
774                         map->lock = regmap_lock_hwlock_irqsave;
775                         map->unlock = regmap_unlock_hwlock_irqrestore;
776                         break;
777                 case HWLOCK_IRQ:
778                         map->lock = regmap_lock_hwlock_irq;
779                         map->unlock = regmap_unlock_hwlock_irq;
780                         break;
781                 default:
782                         map->lock = regmap_lock_hwlock;
783                         map->unlock = regmap_unlock_hwlock;
784                         break;
785                 }
786
787                 map->lock_arg = map;
788         } else {
789                 if ((bus && bus->fast_io) ||
790                     config->fast_io) {
791                         if (config->use_raw_spinlock) {
792                                 raw_spin_lock_init(&map->raw_spinlock);
793                                 map->lock = regmap_lock_raw_spinlock;
794                                 map->unlock = regmap_unlock_raw_spinlock;
795                                 lockdep_set_class_and_name(&map->raw_spinlock,
796                                                            lock_key, lock_name);
797                         } else {
798                                 spin_lock_init(&map->spinlock);
799                                 map->lock = regmap_lock_spinlock;
800                                 map->unlock = regmap_unlock_spinlock;
801                                 lockdep_set_class_and_name(&map->spinlock,
802                                                            lock_key, lock_name);
803                         }
804                 } else {
805                         mutex_init(&map->mutex);
806                         map->lock = regmap_lock_mutex;
807                         map->unlock = regmap_unlock_mutex;
808                         map->can_sleep = true;
809                         lockdep_set_class_and_name(&map->mutex,
810                                                    lock_key, lock_name);
811                 }
812                 map->lock_arg = map;
813         }
814
815         /*
816          * When we write in fast-paths with regmap_bulk_write() don't allocate
817          * scratch buffers with sleeping allocations.
818          */
819         if ((bus && bus->fast_io) || config->fast_io)
820                 map->alloc_flags = GFP_ATOMIC;
821         else
822                 map->alloc_flags = GFP_KERNEL;
823
824         map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
825         map->format.pad_bytes = config->pad_bits / 8;
826         map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
827         map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
828                         config->val_bits + config->pad_bits, 8);
829         map->reg_shift = config->pad_bits % 8;
830         if (config->reg_stride)
831                 map->reg_stride = config->reg_stride;
832         else
833                 map->reg_stride = 1;
834         if (is_power_of_2(map->reg_stride))
835                 map->reg_stride_order = ilog2(map->reg_stride);
836         else
837                 map->reg_stride_order = -1;
838         map->use_single_read = config->use_single_read || !bus || !bus->read;
839         map->use_single_write = config->use_single_write || !bus || !bus->write;
840         map->can_multi_write = config->can_multi_write && bus && bus->write;
841         if (bus) {
842                 map->max_raw_read = bus->max_raw_read;
843                 map->max_raw_write = bus->max_raw_write;
844         }
845         map->dev = dev;
846         map->bus = bus;
847         map->bus_context = bus_context;
848         map->max_register = config->max_register;
849         map->wr_table = config->wr_table;
850         map->rd_table = config->rd_table;
851         map->volatile_table = config->volatile_table;
852         map->precious_table = config->precious_table;
853         map->wr_noinc_table = config->wr_noinc_table;
854         map->rd_noinc_table = config->rd_noinc_table;
855         map->writeable_reg = config->writeable_reg;
856         map->readable_reg = config->readable_reg;
857         map->volatile_reg = config->volatile_reg;
858         map->precious_reg = config->precious_reg;
859         map->writeable_noinc_reg = config->writeable_noinc_reg;
860         map->readable_noinc_reg = config->readable_noinc_reg;
861         map->cache_type = config->cache_type;
862
863         spin_lock_init(&map->async_lock);
864         INIT_LIST_HEAD(&map->async_list);
865         INIT_LIST_HEAD(&map->async_free);
866         init_waitqueue_head(&map->async_waitq);
867
868         if (config->read_flag_mask ||
869             config->write_flag_mask ||
870             config->zero_flag_mask) {
871                 map->read_flag_mask = config->read_flag_mask;
872                 map->write_flag_mask = config->write_flag_mask;
873         } else if (bus) {
874                 map->read_flag_mask = bus->read_flag_mask;
875         }
876
877         if (!bus) {
878                 map->reg_read  = config->reg_read;
879                 map->reg_write = config->reg_write;
880                 map->reg_update_bits = config->reg_update_bits;
881
882                 map->defer_caching = false;
883                 goto skip_format_initialization;
884         } else if (!bus->read || !bus->write) {
885                 map->reg_read = _regmap_bus_reg_read;
886                 map->reg_write = _regmap_bus_reg_write;
887                 map->reg_update_bits = bus->reg_update_bits;
888
889                 map->defer_caching = false;
890                 goto skip_format_initialization;
891         } else {
892                 map->reg_read  = _regmap_bus_read;
893                 map->reg_update_bits = bus->reg_update_bits;
894         }
895
896         reg_endian = regmap_get_reg_endian(bus, config);
897         val_endian = regmap_get_val_endian(dev, bus, config);
898
899         switch (config->reg_bits + map->reg_shift) {
900         case 2:
901                 switch (config->val_bits) {
902                 case 6:
903                         map->format.format_write = regmap_format_2_6_write;
904                         break;
905                 default:
906                         goto err_hwlock;
907                 }
908                 break;
909
910         case 4:
911                 switch (config->val_bits) {
912                 case 12:
913                         map->format.format_write = regmap_format_4_12_write;
914                         break;
915                 default:
916                         goto err_hwlock;
917                 }
918                 break;
919
920         case 7:
921                 switch (config->val_bits) {
922                 case 9:
923                         map->format.format_write = regmap_format_7_9_write;
924                         break;
925                 case 17:
926                         map->format.format_write = regmap_format_7_17_write;
927                         break;
928                 default:
929                         goto err_hwlock;
930                 }
931                 break;
932
933         case 10:
934                 switch (config->val_bits) {
935                 case 14:
936                         map->format.format_write = regmap_format_10_14_write;
937                         break;
938                 default:
939                         goto err_hwlock;
940                 }
941                 break;
942
943         case 12:
944                 switch (config->val_bits) {
945                 case 20:
946                         map->format.format_write = regmap_format_12_20_write;
947                         break;
948                 default:
949                         goto err_hwlock;
950                 }
951                 break;
952
953         case 8:
954                 map->format.format_reg = regmap_format_8;
955                 break;
956
957         case 16:
958                 switch (reg_endian) {
959                 case REGMAP_ENDIAN_BIG:
960                         map->format.format_reg = regmap_format_16_be;
961                         break;
962                 case REGMAP_ENDIAN_LITTLE:
963                         map->format.format_reg = regmap_format_16_le;
964                         break;
965                 case REGMAP_ENDIAN_NATIVE:
966                         map->format.format_reg = regmap_format_16_native;
967                         break;
968                 default:
969                         goto err_hwlock;
970                 }
971                 break;
972
973         case 24:
974                 if (reg_endian != REGMAP_ENDIAN_BIG)
975                         goto err_hwlock;
976                 map->format.format_reg = regmap_format_24;
977                 break;
978
979         case 32:
980                 switch (reg_endian) {
981                 case REGMAP_ENDIAN_BIG:
982                         map->format.format_reg = regmap_format_32_be;
983                         break;
984                 case REGMAP_ENDIAN_LITTLE:
985                         map->format.format_reg = regmap_format_32_le;
986                         break;
987                 case REGMAP_ENDIAN_NATIVE:
988                         map->format.format_reg = regmap_format_32_native;
989                         break;
990                 default:
991                         goto err_hwlock;
992                 }
993                 break;
994
995 #ifdef CONFIG_64BIT
996         case 64:
997                 switch (reg_endian) {
998                 case REGMAP_ENDIAN_BIG:
999                         map->format.format_reg = regmap_format_64_be;
1000                         break;
1001                 case REGMAP_ENDIAN_LITTLE:
1002                         map->format.format_reg = regmap_format_64_le;
1003                         break;
1004                 case REGMAP_ENDIAN_NATIVE:
1005                         map->format.format_reg = regmap_format_64_native;
1006                         break;
1007                 default:
1008                         goto err_hwlock;
1009                 }
1010                 break;
1011 #endif
1012
1013         default:
1014                 goto err_hwlock;
1015         }
1016
1017         if (val_endian == REGMAP_ENDIAN_NATIVE)
1018                 map->format.parse_inplace = regmap_parse_inplace_noop;
1019
1020         switch (config->val_bits) {
1021         case 8:
1022                 map->format.format_val = regmap_format_8;
1023                 map->format.parse_val = regmap_parse_8;
1024                 map->format.parse_inplace = regmap_parse_inplace_noop;
1025                 break;
1026         case 16:
1027                 switch (val_endian) {
1028                 case REGMAP_ENDIAN_BIG:
1029                         map->format.format_val = regmap_format_16_be;
1030                         map->format.parse_val = regmap_parse_16_be;
1031                         map->format.parse_inplace = regmap_parse_16_be_inplace;
1032                         break;
1033                 case REGMAP_ENDIAN_LITTLE:
1034                         map->format.format_val = regmap_format_16_le;
1035                         map->format.parse_val = regmap_parse_16_le;
1036                         map->format.parse_inplace = regmap_parse_16_le_inplace;
1037                         break;
1038                 case REGMAP_ENDIAN_NATIVE:
1039                         map->format.format_val = regmap_format_16_native;
1040                         map->format.parse_val = regmap_parse_16_native;
1041                         break;
1042                 default:
1043                         goto err_hwlock;
1044                 }
1045                 break;
1046         case 24:
1047                 if (val_endian != REGMAP_ENDIAN_BIG)
1048                         goto err_hwlock;
1049                 map->format.format_val = regmap_format_24;
1050                 map->format.parse_val = regmap_parse_24;
1051                 break;
1052         case 32:
1053                 switch (val_endian) {
1054                 case REGMAP_ENDIAN_BIG:
1055                         map->format.format_val = regmap_format_32_be;
1056                         map->format.parse_val = regmap_parse_32_be;
1057                         map->format.parse_inplace = regmap_parse_32_be_inplace;
1058                         break;
1059                 case REGMAP_ENDIAN_LITTLE:
1060                         map->format.format_val = regmap_format_32_le;
1061                         map->format.parse_val = regmap_parse_32_le;
1062                         map->format.parse_inplace = regmap_parse_32_le_inplace;
1063                         break;
1064                 case REGMAP_ENDIAN_NATIVE:
1065                         map->format.format_val = regmap_format_32_native;
1066                         map->format.parse_val = regmap_parse_32_native;
1067                         break;
1068                 default:
1069                         goto err_hwlock;
1070                 }
1071                 break;
1072 #ifdef CONFIG_64BIT
1073         case 64:
1074                 switch (val_endian) {
1075                 case REGMAP_ENDIAN_BIG:
1076                         map->format.format_val = regmap_format_64_be;
1077                         map->format.parse_val = regmap_parse_64_be;
1078                         map->format.parse_inplace = regmap_parse_64_be_inplace;
1079                         break;
1080                 case REGMAP_ENDIAN_LITTLE:
1081                         map->format.format_val = regmap_format_64_le;
1082                         map->format.parse_val = regmap_parse_64_le;
1083                         map->format.parse_inplace = regmap_parse_64_le_inplace;
1084                         break;
1085                 case REGMAP_ENDIAN_NATIVE:
1086                         map->format.format_val = regmap_format_64_native;
1087                         map->format.parse_val = regmap_parse_64_native;
1088                         break;
1089                 default:
1090                         goto err_hwlock;
1091                 }
1092                 break;
1093 #endif
1094         }
1095
1096         if (map->format.format_write) {
1097                 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1098                     (val_endian != REGMAP_ENDIAN_BIG))
1099                         goto err_hwlock;
1100                 map->use_single_write = true;
1101         }
1102
1103         if (!map->format.format_write &&
1104             !(map->format.format_reg && map->format.format_val))
1105                 goto err_hwlock;
1106
1107         map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1108         if (map->work_buf == NULL) {
1109                 ret = -ENOMEM;
1110                 goto err_hwlock;
1111         }
1112
1113         if (map->format.format_write) {
1114                 map->defer_caching = false;
1115                 map->reg_write = _regmap_bus_formatted_write;
1116         } else if (map->format.format_val) {
1117                 map->defer_caching = true;
1118                 map->reg_write = _regmap_bus_raw_write;
1119         }
1120
1121 skip_format_initialization:
1122
1123         map->range_tree = RB_ROOT;
1124         for (i = 0; i < config->num_ranges; i++) {
1125                 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1126                 struct regmap_range_node *new;
1127
1128                 /* Sanity check */
1129                 if (range_cfg->range_max < range_cfg->range_min) {
1130                         dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1131                                 range_cfg->range_max, range_cfg->range_min);
1132                         goto err_range;
1133                 }
1134
1135                 if (range_cfg->range_max > map->max_register) {
1136                         dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1137                                 range_cfg->range_max, map->max_register);
1138                         goto err_range;
1139                 }
1140
1141                 if (range_cfg->selector_reg > map->max_register) {
1142                         dev_err(map->dev,
1143                                 "Invalid range %d: selector out of map\n", i);
1144                         goto err_range;
1145                 }
1146
1147                 if (range_cfg->window_len == 0) {
1148                         dev_err(map->dev, "Invalid range %d: window_len 0\n",
1149                                 i);
1150                         goto err_range;
1151                 }
1152
1153                 /* Make sure, that this register range has no selector
1154                    or data window within its boundary */
1155                 for (j = 0; j < config->num_ranges; j++) {
1156                         unsigned int sel_reg = config->ranges[j].selector_reg;
1157                         unsigned int win_min = config->ranges[j].window_start;
1158                         unsigned int win_max = win_min +
1159                                                config->ranges[j].window_len - 1;
1160
1161                         /* Allow data window inside its own virtual range */
1162                         if (j == i)
1163                                 continue;
1164
1165                         if (range_cfg->range_min <= sel_reg &&
1166                             sel_reg <= range_cfg->range_max) {
1167                                 dev_err(map->dev,
1168                                         "Range %d: selector for %d in window\n",
1169                                         i, j);
1170                                 goto err_range;
1171                         }
1172
1173                         if (!(win_max < range_cfg->range_min ||
1174                               win_min > range_cfg->range_max)) {
1175                                 dev_err(map->dev,
1176                                         "Range %d: window for %d in window\n",
1177                                         i, j);
1178                                 goto err_range;
1179                         }
1180                 }
1181
1182                 new = kzalloc(sizeof(*new), GFP_KERNEL);
1183                 if (new == NULL) {
1184                         ret = -ENOMEM;
1185                         goto err_range;
1186                 }
1187
1188                 new->map = map;
1189                 new->name = range_cfg->name;
1190                 new->range_min = range_cfg->range_min;
1191                 new->range_max = range_cfg->range_max;
1192                 new->selector_reg = range_cfg->selector_reg;
1193                 new->selector_mask = range_cfg->selector_mask;
1194                 new->selector_shift = range_cfg->selector_shift;
1195                 new->window_start = range_cfg->window_start;
1196                 new->window_len = range_cfg->window_len;
1197
1198                 if (!_regmap_range_add(map, new)) {
1199                         dev_err(map->dev, "Failed to add range %d\n", i);
1200                         kfree(new);
1201                         goto err_range;
1202                 }
1203
1204                 if (map->selector_work_buf == NULL) {
1205                         map->selector_work_buf =
1206                                 kzalloc(map->format.buf_size, GFP_KERNEL);
1207                         if (map->selector_work_buf == NULL) {
1208                                 ret = -ENOMEM;
1209                                 goto err_range;
1210                         }
1211                 }
1212         }
1213
1214         ret = regcache_init(map, config);
1215         if (ret != 0)
1216                 goto err_range;
1217
1218         if (dev) {
1219                 ret = regmap_attach_dev(dev, map, config);
1220                 if (ret != 0)
1221                         goto err_regcache;
1222         } else {
1223                 regmap_debugfs_init(map);
1224         }
1225
1226         return map;
1227
1228 err_regcache:
1229         regcache_exit(map);
1230 err_range:
1231         regmap_range_exit(map);
1232         kfree(map->work_buf);
1233 err_hwlock:
1234         if (map->hwlock)
1235                 hwspin_lock_free(map->hwlock);
1236 err_name:
1237         kfree_const(map->name);
1238 err_map:
1239         kfree(map);
1240 err:
1241         return ERR_PTR(ret);
1242 }
1243 EXPORT_SYMBOL_GPL(__regmap_init);
1244
1245 static void devm_regmap_release(struct device *dev, void *res)
1246 {
1247         regmap_exit(*(struct regmap **)res);
1248 }
1249
1250 struct regmap *__devm_regmap_init(struct device *dev,
1251                                   const struct regmap_bus *bus,
1252                                   void *bus_context,
1253                                   const struct regmap_config *config,
1254                                   struct lock_class_key *lock_key,
1255                                   const char *lock_name)
1256 {
1257         struct regmap **ptr, *regmap;
1258
1259         ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1260         if (!ptr)
1261                 return ERR_PTR(-ENOMEM);
1262
1263         regmap = __regmap_init(dev, bus, bus_context, config,
1264                                lock_key, lock_name);
1265         if (!IS_ERR(regmap)) {
1266                 *ptr = regmap;
1267                 devres_add(dev, ptr);
1268         } else {
1269                 devres_free(ptr);
1270         }
1271
1272         return regmap;
1273 }
1274 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1275
1276 static void regmap_field_init(struct regmap_field *rm_field,
1277         struct regmap *regmap, struct reg_field reg_field)
1278 {
1279         rm_field->regmap = regmap;
1280         rm_field->reg = reg_field.reg;
1281         rm_field->shift = reg_field.lsb;
1282         rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1283         rm_field->id_size = reg_field.id_size;
1284         rm_field->id_offset = reg_field.id_offset;
1285 }
1286
1287 /**
1288  * devm_regmap_field_alloc() - Allocate and initialise a register field.
1289  *
1290  * @dev: Device that will be interacted with
1291  * @regmap: regmap bank in which this register field is located.
1292  * @reg_field: Register field with in the bank.
1293  *
1294  * The return value will be an ERR_PTR() on error or a valid pointer
1295  * to a struct regmap_field. The regmap_field will be automatically freed
1296  * by the device management code.
1297  */
1298 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1299                 struct regmap *regmap, struct reg_field reg_field)
1300 {
1301         struct regmap_field *rm_field = devm_kzalloc(dev,
1302                                         sizeof(*rm_field), GFP_KERNEL);
1303         if (!rm_field)
1304                 return ERR_PTR(-ENOMEM);
1305
1306         regmap_field_init(rm_field, regmap, reg_field);
1307
1308         return rm_field;
1309
1310 }
1311 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1312
1313
1314 /**
1315  * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1316  *
1317  * @regmap: regmap bank in which this register field is located.
1318  * @rm_field: regmap register fields within the bank.
1319  * @reg_field: Register fields within the bank.
1320  * @num_fields: Number of register fields.
1321  *
1322  * The return value will be an -ENOMEM on error or zero for success.
1323  * Newly allocated regmap_fields should be freed by calling
1324  * regmap_field_bulk_free()
1325  */
1326 int regmap_field_bulk_alloc(struct regmap *regmap,
1327                             struct regmap_field **rm_field,
1328                             const struct reg_field *reg_field,
1329                             int num_fields)
1330 {
1331         struct regmap_field *rf;
1332         int i;
1333
1334         rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1335         if (!rf)
1336                 return -ENOMEM;
1337
1338         for (i = 0; i < num_fields; i++) {
1339                 regmap_field_init(&rf[i], regmap, reg_field[i]);
1340                 rm_field[i] = &rf[i];
1341         }
1342
1343         return 0;
1344 }
1345 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1346
1347 /**
1348  * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1349  * fields.
1350  *
1351  * @dev: Device that will be interacted with
1352  * @regmap: regmap bank in which this register field is located.
1353  * @rm_field: regmap register fields within the bank.
1354  * @reg_field: Register fields within the bank.
1355  * @num_fields: Number of register fields.
1356  *
1357  * The return value will be an -ENOMEM on error or zero for success.
1358  * Newly allocated regmap_fields will be automatically freed by the
1359  * device management code.
1360  */
1361 int devm_regmap_field_bulk_alloc(struct device *dev,
1362                                  struct regmap *regmap,
1363                                  struct regmap_field **rm_field,
1364                                  const struct reg_field *reg_field,
1365                                  int num_fields)
1366 {
1367         struct regmap_field *rf;
1368         int i;
1369
1370         rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1371         if (!rf)
1372                 return -ENOMEM;
1373
1374         for (i = 0; i < num_fields; i++) {
1375                 regmap_field_init(&rf[i], regmap, reg_field[i]);
1376                 rm_field[i] = &rf[i];
1377         }
1378
1379         return 0;
1380 }
1381 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1382
1383 /**
1384  * regmap_field_bulk_free() - Free register field allocated using
1385  *                       regmap_field_bulk_alloc.
1386  *
1387  * @field: regmap fields which should be freed.
1388  */
1389 void regmap_field_bulk_free(struct regmap_field *field)
1390 {
1391         kfree(field);
1392 }
1393 EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1394
1395 /**
1396  * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1397  *                            devm_regmap_field_bulk_alloc.
1398  *
1399  * @dev: Device that will be interacted with
1400  * @field: regmap field which should be freed.
1401  *
1402  * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1403  * drivers need not call this function, as the memory allocated via devm
1404  * will be freed as per device-driver life-cycle.
1405  */
1406 void devm_regmap_field_bulk_free(struct device *dev,
1407                                  struct regmap_field *field)
1408 {
1409         devm_kfree(dev, field);
1410 }
1411 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1412
1413 /**
1414  * devm_regmap_field_free() - Free a register field allocated using
1415  *                            devm_regmap_field_alloc.
1416  *
1417  * @dev: Device that will be interacted with
1418  * @field: regmap field which should be freed.
1419  *
1420  * Free register field allocated using devm_regmap_field_alloc(). Usually
1421  * drivers need not call this function, as the memory allocated via devm
1422  * will be freed as per device-driver life-cyle.
1423  */
1424 void devm_regmap_field_free(struct device *dev,
1425         struct regmap_field *field)
1426 {
1427         devm_kfree(dev, field);
1428 }
1429 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1430
1431 /**
1432  * regmap_field_alloc() - Allocate and initialise a register field.
1433  *
1434  * @regmap: regmap bank in which this register field is located.
1435  * @reg_field: Register field with in the bank.
1436  *
1437  * The return value will be an ERR_PTR() on error or a valid pointer
1438  * to a struct regmap_field. The regmap_field should be freed by the
1439  * user once its finished working with it using regmap_field_free().
1440  */
1441 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1442                 struct reg_field reg_field)
1443 {
1444         struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1445
1446         if (!rm_field)
1447                 return ERR_PTR(-ENOMEM);
1448
1449         regmap_field_init(rm_field, regmap, reg_field);
1450
1451         return rm_field;
1452 }
1453 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1454
1455 /**
1456  * regmap_field_free() - Free register field allocated using
1457  *                       regmap_field_alloc.
1458  *
1459  * @field: regmap field which should be freed.
1460  */
1461 void regmap_field_free(struct regmap_field *field)
1462 {
1463         kfree(field);
1464 }
1465 EXPORT_SYMBOL_GPL(regmap_field_free);
1466
1467 /**
1468  * regmap_reinit_cache() - Reinitialise the current register cache
1469  *
1470  * @map: Register map to operate on.
1471  * @config: New configuration.  Only the cache data will be used.
1472  *
1473  * Discard any existing register cache for the map and initialize a
1474  * new cache.  This can be used to restore the cache to defaults or to
1475  * update the cache configuration to reflect runtime discovery of the
1476  * hardware.
1477  *
1478  * No explicit locking is done here, the user needs to ensure that
1479  * this function will not race with other calls to regmap.
1480  */
1481 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1482 {
1483         int ret;
1484
1485         regcache_exit(map);
1486         regmap_debugfs_exit(map);
1487
1488         map->max_register = config->max_register;
1489         map->writeable_reg = config->writeable_reg;
1490         map->readable_reg = config->readable_reg;
1491         map->volatile_reg = config->volatile_reg;
1492         map->precious_reg = config->precious_reg;
1493         map->writeable_noinc_reg = config->writeable_noinc_reg;
1494         map->readable_noinc_reg = config->readable_noinc_reg;
1495         map->cache_type = config->cache_type;
1496
1497         ret = regmap_set_name(map, config);
1498         if (ret)
1499                 return ret;
1500
1501         regmap_debugfs_init(map);
1502
1503         map->cache_bypass = false;
1504         map->cache_only = false;
1505
1506         return regcache_init(map, config);
1507 }
1508 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1509
1510 /**
1511  * regmap_exit() - Free a previously allocated register map
1512  *
1513  * @map: Register map to operate on.
1514  */
1515 void regmap_exit(struct regmap *map)
1516 {
1517         struct regmap_async *async;
1518
1519         regcache_exit(map);
1520         regmap_debugfs_exit(map);
1521         regmap_range_exit(map);
1522         if (map->bus && map->bus->free_context)
1523                 map->bus->free_context(map->bus_context);
1524         kfree(map->work_buf);
1525         while (!list_empty(&map->async_free)) {
1526                 async = list_first_entry_or_null(&map->async_free,
1527                                                  struct regmap_async,
1528                                                  list);
1529                 list_del(&async->list);
1530                 kfree(async->work_buf);
1531                 kfree(async);
1532         }
1533         if (map->hwlock)
1534                 hwspin_lock_free(map->hwlock);
1535         if (map->lock == regmap_lock_mutex)
1536                 mutex_destroy(&map->mutex);
1537         kfree_const(map->name);
1538         kfree(map->patch);
1539         if (map->bus && map->bus->free_on_exit)
1540                 kfree(map->bus);
1541         kfree(map);
1542 }
1543 EXPORT_SYMBOL_GPL(regmap_exit);
1544
1545 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1546 {
1547         struct regmap **r = res;
1548         if (!r || !*r) {
1549                 WARN_ON(!r || !*r);
1550                 return 0;
1551         }
1552
1553         /* If the user didn't specify a name match any */
1554         if (data)
1555                 return !strcmp((*r)->name, data);
1556         else
1557                 return 1;
1558 }
1559
1560 /**
1561  * dev_get_regmap() - Obtain the regmap (if any) for a device
1562  *
1563  * @dev: Device to retrieve the map for
1564  * @name: Optional name for the register map, usually NULL.
1565  *
1566  * Returns the regmap for the device if one is present, or NULL.  If
1567  * name is specified then it must match the name specified when
1568  * registering the device, if it is NULL then the first regmap found
1569  * will be used.  Devices with multiple register maps are very rare,
1570  * generic code should normally not need to specify a name.
1571  */
1572 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1573 {
1574         struct regmap **r = devres_find(dev, dev_get_regmap_release,
1575                                         dev_get_regmap_match, (void *)name);
1576
1577         if (!r)
1578                 return NULL;
1579         return *r;
1580 }
1581 EXPORT_SYMBOL_GPL(dev_get_regmap);
1582
1583 /**
1584  * regmap_get_device() - Obtain the device from a regmap
1585  *
1586  * @map: Register map to operate on.
1587  *
1588  * Returns the underlying device that the regmap has been created for.
1589  */
1590 struct device *regmap_get_device(struct regmap *map)
1591 {
1592         return map->dev;
1593 }
1594 EXPORT_SYMBOL_GPL(regmap_get_device);
1595
1596 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1597                                struct regmap_range_node *range,
1598                                unsigned int val_num)
1599 {
1600         void *orig_work_buf;
1601         unsigned int win_offset;
1602         unsigned int win_page;
1603         bool page_chg;
1604         int ret;
1605
1606         win_offset = (*reg - range->range_min) % range->window_len;
1607         win_page = (*reg - range->range_min) / range->window_len;
1608
1609         if (val_num > 1) {
1610                 /* Bulk write shouldn't cross range boundary */
1611                 if (*reg + val_num - 1 > range->range_max)
1612                         return -EINVAL;
1613
1614                 /* ... or single page boundary */
1615                 if (val_num > range->window_len - win_offset)
1616                         return -EINVAL;
1617         }
1618
1619         /* It is possible to have selector register inside data window.
1620            In that case, selector register is located on every page and
1621            it needs no page switching, when accessed alone. */
1622         if (val_num > 1 ||
1623             range->window_start + win_offset != range->selector_reg) {
1624                 /* Use separate work_buf during page switching */
1625                 orig_work_buf = map->work_buf;
1626                 map->work_buf = map->selector_work_buf;
1627
1628                 ret = _regmap_update_bits(map, range->selector_reg,
1629                                           range->selector_mask,
1630                                           win_page << range->selector_shift,
1631                                           &page_chg, false);
1632
1633                 map->work_buf = orig_work_buf;
1634
1635                 if (ret != 0)
1636                         return ret;
1637         }
1638
1639         *reg = range->window_start + win_offset;
1640
1641         return 0;
1642 }
1643
1644 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1645                                           unsigned long mask)
1646 {
1647         u8 *buf;
1648         int i;
1649
1650         if (!mask || !map->work_buf)
1651                 return;
1652
1653         buf = map->work_buf;
1654
1655         for (i = 0; i < max_bytes; i++)
1656                 buf[i] |= (mask >> (8 * i)) & 0xff;
1657 }
1658
1659 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1660                                   const void *val, size_t val_len, bool noinc)
1661 {
1662         struct regmap_range_node *range;
1663         unsigned long flags;
1664         void *work_val = map->work_buf + map->format.reg_bytes +
1665                 map->format.pad_bytes;
1666         void *buf;
1667         int ret = -ENOTSUPP;
1668         size_t len;
1669         int i;
1670
1671         WARN_ON(!map->bus);
1672
1673         /* Check for unwritable or noinc registers in range
1674          * before we start
1675          */
1676         if (!regmap_writeable_noinc(map, reg)) {
1677                 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1678                         unsigned int element =
1679                                 reg + regmap_get_offset(map, i);
1680                         if (!regmap_writeable(map, element) ||
1681                                 regmap_writeable_noinc(map, element))
1682                                 return -EINVAL;
1683                 }
1684         }
1685
1686         if (!map->cache_bypass && map->format.parse_val) {
1687                 unsigned int ival;
1688                 int val_bytes = map->format.val_bytes;
1689                 for (i = 0; i < val_len / val_bytes; i++) {
1690                         ival = map->format.parse_val(val + (i * val_bytes));
1691                         ret = regcache_write(map,
1692                                              reg + regmap_get_offset(map, i),
1693                                              ival);
1694                         if (ret) {
1695                                 dev_err(map->dev,
1696                                         "Error in caching of register: %x ret: %d\n",
1697                                         reg + regmap_get_offset(map, i), ret);
1698                                 return ret;
1699                         }
1700                 }
1701                 if (map->cache_only) {
1702                         map->cache_dirty = true;
1703                         return 0;
1704                 }
1705         }
1706
1707         range = _regmap_range_lookup(map, reg);
1708         if (range) {
1709                 int val_num = val_len / map->format.val_bytes;
1710                 int win_offset = (reg - range->range_min) % range->window_len;
1711                 int win_residue = range->window_len - win_offset;
1712
1713                 /* If the write goes beyond the end of the window split it */
1714                 while (val_num > win_residue) {
1715                         dev_dbg(map->dev, "Writing window %d/%zu\n",
1716                                 win_residue, val_len / map->format.val_bytes);
1717                         ret = _regmap_raw_write_impl(map, reg, val,
1718                                                      win_residue *
1719                                                      map->format.val_bytes, noinc);
1720                         if (ret != 0)
1721                                 return ret;
1722
1723                         reg += win_residue;
1724                         val_num -= win_residue;
1725                         val += win_residue * map->format.val_bytes;
1726                         val_len -= win_residue * map->format.val_bytes;
1727
1728                         win_offset = (reg - range->range_min) %
1729                                 range->window_len;
1730                         win_residue = range->window_len - win_offset;
1731                 }
1732
1733                 ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1734                 if (ret != 0)
1735                         return ret;
1736         }
1737
1738         map->format.format_reg(map->work_buf, reg, map->reg_shift);
1739         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1740                                       map->write_flag_mask);
1741
1742         /*
1743          * Essentially all I/O mechanisms will be faster with a single
1744          * buffer to write.  Since register syncs often generate raw
1745          * writes of single registers optimise that case.
1746          */
1747         if (val != work_val && val_len == map->format.val_bytes) {
1748                 memcpy(work_val, val, map->format.val_bytes);
1749                 val = work_val;
1750         }
1751
1752         if (map->async && map->bus->async_write) {
1753                 struct regmap_async *async;
1754
1755                 trace_regmap_async_write_start(map, reg, val_len);
1756
1757                 spin_lock_irqsave(&map->async_lock, flags);
1758                 async = list_first_entry_or_null(&map->async_free,
1759                                                  struct regmap_async,
1760                                                  list);
1761                 if (async)
1762                         list_del(&async->list);
1763                 spin_unlock_irqrestore(&map->async_lock, flags);
1764
1765                 if (!async) {
1766                         async = map->bus->async_alloc();
1767                         if (!async)
1768                                 return -ENOMEM;
1769
1770                         async->work_buf = kzalloc(map->format.buf_size,
1771                                                   GFP_KERNEL | GFP_DMA);
1772                         if (!async->work_buf) {
1773                                 kfree(async);
1774                                 return -ENOMEM;
1775                         }
1776                 }
1777
1778                 async->map = map;
1779
1780                 /* If the caller supplied the value we can use it safely. */
1781                 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1782                        map->format.reg_bytes + map->format.val_bytes);
1783
1784                 spin_lock_irqsave(&map->async_lock, flags);
1785                 list_add_tail(&async->list, &map->async_list);
1786                 spin_unlock_irqrestore(&map->async_lock, flags);
1787
1788                 if (val != work_val)
1789                         ret = map->bus->async_write(map->bus_context,
1790                                                     async->work_buf,
1791                                                     map->format.reg_bytes +
1792                                                     map->format.pad_bytes,
1793                                                     val, val_len, async);
1794                 else
1795                         ret = map->bus->async_write(map->bus_context,
1796                                                     async->work_buf,
1797                                                     map->format.reg_bytes +
1798                                                     map->format.pad_bytes +
1799                                                     val_len, NULL, 0, async);
1800
1801                 if (ret != 0) {
1802                         dev_err(map->dev, "Failed to schedule write: %d\n",
1803                                 ret);
1804
1805                         spin_lock_irqsave(&map->async_lock, flags);
1806                         list_move(&async->list, &map->async_free);
1807                         spin_unlock_irqrestore(&map->async_lock, flags);
1808                 }
1809
1810                 return ret;
1811         }
1812
1813         trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1814
1815         /* If we're doing a single register write we can probably just
1816          * send the work_buf directly, otherwise try to do a gather
1817          * write.
1818          */
1819         if (val == work_val)
1820                 ret = map->bus->write(map->bus_context, map->work_buf,
1821                                       map->format.reg_bytes +
1822                                       map->format.pad_bytes +
1823                                       val_len);
1824         else if (map->bus->gather_write)
1825                 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1826                                              map->format.reg_bytes +
1827                                              map->format.pad_bytes,
1828                                              val, val_len);
1829         else
1830                 ret = -ENOTSUPP;
1831
1832         /* If that didn't work fall back on linearising by hand. */
1833         if (ret == -ENOTSUPP) {
1834                 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1835                 buf = kzalloc(len, GFP_KERNEL);
1836                 if (!buf)
1837                         return -ENOMEM;
1838
1839                 memcpy(buf, map->work_buf, map->format.reg_bytes);
1840                 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1841                        val, val_len);
1842                 ret = map->bus->write(map->bus_context, buf, len);
1843
1844                 kfree(buf);
1845         } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1846                 /* regcache_drop_region() takes lock that we already have,
1847                  * thus call map->cache_ops->drop() directly
1848                  */
1849                 if (map->cache_ops && map->cache_ops->drop)
1850                         map->cache_ops->drop(map, reg, reg + 1);
1851         }
1852
1853         trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1854
1855         return ret;
1856 }
1857
1858 /**
1859  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1860  *
1861  * @map: Map to check.
1862  */
1863 bool regmap_can_raw_write(struct regmap *map)
1864 {
1865         return map->bus && map->bus->write && map->format.format_val &&
1866                 map->format.format_reg;
1867 }
1868 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1869
1870 /**
1871  * regmap_get_raw_read_max - Get the maximum size we can read
1872  *
1873  * @map: Map to check.
1874  */
1875 size_t regmap_get_raw_read_max(struct regmap *map)
1876 {
1877         return map->max_raw_read;
1878 }
1879 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1880
1881 /**
1882  * regmap_get_raw_write_max - Get the maximum size we can read
1883  *
1884  * @map: Map to check.
1885  */
1886 size_t regmap_get_raw_write_max(struct regmap *map)
1887 {
1888         return map->max_raw_write;
1889 }
1890 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1891
1892 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1893                                        unsigned int val)
1894 {
1895         int ret;
1896         struct regmap_range_node *range;
1897         struct regmap *map = context;
1898
1899         WARN_ON(!map->bus || !map->format.format_write);
1900
1901         range = _regmap_range_lookup(map, reg);
1902         if (range) {
1903                 ret = _regmap_select_page(map, &reg, range, 1);
1904                 if (ret != 0)
1905                         return ret;
1906         }
1907
1908         map->format.format_write(map, reg, val);
1909
1910         trace_regmap_hw_write_start(map, reg, 1);
1911
1912         ret = map->bus->write(map->bus_context, map->work_buf,
1913                               map->format.buf_size);
1914
1915         trace_regmap_hw_write_done(map, reg, 1);
1916
1917         return ret;
1918 }
1919
1920 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1921                                  unsigned int val)
1922 {
1923         struct regmap *map = context;
1924
1925         return map->bus->reg_write(map->bus_context, reg, val);
1926 }
1927
1928 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1929                                  unsigned int val)
1930 {
1931         struct regmap *map = context;
1932
1933         WARN_ON(!map->bus || !map->format.format_val);
1934
1935         map->format.format_val(map->work_buf + map->format.reg_bytes
1936                                + map->format.pad_bytes, val, 0);
1937         return _regmap_raw_write_impl(map, reg,
1938                                       map->work_buf +
1939                                       map->format.reg_bytes +
1940                                       map->format.pad_bytes,
1941                                       map->format.val_bytes,
1942                                       false);
1943 }
1944
1945 static inline void *_regmap_map_get_context(struct regmap *map)
1946 {
1947         return (map->bus) ? map : map->bus_context;
1948 }
1949
1950 int _regmap_write(struct regmap *map, unsigned int reg,
1951                   unsigned int val)
1952 {
1953         int ret;
1954         void *context = _regmap_map_get_context(map);
1955
1956         if (!regmap_writeable(map, reg))
1957                 return -EIO;
1958
1959         if (!map->cache_bypass && !map->defer_caching) {
1960                 ret = regcache_write(map, reg, val);
1961                 if (ret != 0)
1962                         return ret;
1963                 if (map->cache_only) {
1964                         map->cache_dirty = true;
1965                         return 0;
1966                 }
1967         }
1968
1969         ret = map->reg_write(context, reg, val);
1970         if (ret == 0) {
1971                 if (regmap_should_log(map))
1972                         dev_info(map->dev, "%x <= %x\n", reg, val);
1973
1974                 trace_regmap_reg_write(map, reg, val);
1975         }
1976
1977         return ret;
1978 }
1979
1980 /**
1981  * regmap_write() - Write a value to a single register
1982  *
1983  * @map: Register map to write to
1984  * @reg: Register to write to
1985  * @val: Value to be written
1986  *
1987  * A value of zero will be returned on success, a negative errno will
1988  * be returned in error cases.
1989  */
1990 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1991 {
1992         int ret;
1993
1994         if (!IS_ALIGNED(reg, map->reg_stride))
1995                 return -EINVAL;
1996
1997         map->lock(map->lock_arg);
1998
1999         ret = _regmap_write(map, reg, val);
2000
2001         map->unlock(map->lock_arg);
2002
2003         return ret;
2004 }
2005 EXPORT_SYMBOL_GPL(regmap_write);
2006
2007 /**
2008  * regmap_write_async() - Write a value to a single register asynchronously
2009  *
2010  * @map: Register map to write to
2011  * @reg: Register to write to
2012  * @val: Value to be written
2013  *
2014  * A value of zero will be returned on success, a negative errno will
2015  * be returned in error cases.
2016  */
2017 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
2018 {
2019         int ret;
2020
2021         if (!IS_ALIGNED(reg, map->reg_stride))
2022                 return -EINVAL;
2023
2024         map->lock(map->lock_arg);
2025
2026         map->async = true;
2027
2028         ret = _regmap_write(map, reg, val);
2029
2030         map->async = false;
2031
2032         map->unlock(map->lock_arg);
2033
2034         return ret;
2035 }
2036 EXPORT_SYMBOL_GPL(regmap_write_async);
2037
2038 int _regmap_raw_write(struct regmap *map, unsigned int reg,
2039                       const void *val, size_t val_len, bool noinc)
2040 {
2041         size_t val_bytes = map->format.val_bytes;
2042         size_t val_count = val_len / val_bytes;
2043         size_t chunk_count, chunk_bytes;
2044         size_t chunk_regs = val_count;
2045         int ret, i;
2046
2047         if (!val_count)
2048                 return -EINVAL;
2049
2050         if (map->use_single_write)
2051                 chunk_regs = 1;
2052         else if (map->max_raw_write && val_len > map->max_raw_write)
2053                 chunk_regs = map->max_raw_write / val_bytes;
2054
2055         chunk_count = val_count / chunk_regs;
2056         chunk_bytes = chunk_regs * val_bytes;
2057
2058         /* Write as many bytes as possible with chunk_size */
2059         for (i = 0; i < chunk_count; i++) {
2060                 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2061                 if (ret)
2062                         return ret;
2063
2064                 reg += regmap_get_offset(map, chunk_regs);
2065                 val += chunk_bytes;
2066                 val_len -= chunk_bytes;
2067         }
2068
2069         /* Write remaining bytes */
2070         if (val_len)
2071                 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2072
2073         return ret;
2074 }
2075
2076 /**
2077  * regmap_raw_write() - Write raw values to one or more registers
2078  *
2079  * @map: Register map to write to
2080  * @reg: Initial register to write to
2081  * @val: Block of data to be written, laid out for direct transmission to the
2082  *       device
2083  * @val_len: Length of data pointed to by val.
2084  *
2085  * This function is intended to be used for things like firmware
2086  * download where a large block of data needs to be transferred to the
2087  * device.  No formatting will be done on the data provided.
2088  *
2089  * A value of zero will be returned on success, a negative errno will
2090  * be returned in error cases.
2091  */
2092 int regmap_raw_write(struct regmap *map, unsigned int reg,
2093                      const void *val, size_t val_len)
2094 {
2095         int ret;
2096
2097         if (!regmap_can_raw_write(map))
2098                 return -EINVAL;
2099         if (val_len % map->format.val_bytes)
2100                 return -EINVAL;
2101
2102         map->lock(map->lock_arg);
2103
2104         ret = _regmap_raw_write(map, reg, val, val_len, false);
2105
2106         map->unlock(map->lock_arg);
2107
2108         return ret;
2109 }
2110 EXPORT_SYMBOL_GPL(regmap_raw_write);
2111
2112 /**
2113  * regmap_noinc_write(): Write data from a register without incrementing the
2114  *                      register number
2115  *
2116  * @map: Register map to write to
2117  * @reg: Register to write to
2118  * @val: Pointer to data buffer
2119  * @val_len: Length of output buffer in bytes.
2120  *
2121  * The regmap API usually assumes that bulk bus write operations will write a
2122  * range of registers. Some devices have certain registers for which a write
2123  * operation can write to an internal FIFO.
2124  *
2125  * The target register must be volatile but registers after it can be
2126  * completely unrelated cacheable registers.
2127  *
2128  * This will attempt multiple writes as required to write val_len bytes.
2129  *
2130  * A value of zero will be returned on success, a negative errno will be
2131  * returned in error cases.
2132  */
2133 int regmap_noinc_write(struct regmap *map, unsigned int reg,
2134                       const void *val, size_t val_len)
2135 {
2136         size_t write_len;
2137         int ret;
2138
2139         if (!map->bus)
2140                 return -EINVAL;
2141         if (!map->bus->write)
2142                 return -ENOTSUPP;
2143         if (val_len % map->format.val_bytes)
2144                 return -EINVAL;
2145         if (!IS_ALIGNED(reg, map->reg_stride))
2146                 return -EINVAL;
2147         if (val_len == 0)
2148                 return -EINVAL;
2149
2150         map->lock(map->lock_arg);
2151
2152         if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2153                 ret = -EINVAL;
2154                 goto out_unlock;
2155         }
2156
2157         while (val_len) {
2158                 if (map->max_raw_write && map->max_raw_write < val_len)
2159                         write_len = map->max_raw_write;
2160                 else
2161                         write_len = val_len;
2162                 ret = _regmap_raw_write(map, reg, val, write_len, true);
2163                 if (ret)
2164                         goto out_unlock;
2165                 val = ((u8 *)val) + write_len;
2166                 val_len -= write_len;
2167         }
2168
2169 out_unlock:
2170         map->unlock(map->lock_arg);
2171         return ret;
2172 }
2173 EXPORT_SYMBOL_GPL(regmap_noinc_write);
2174
2175 /**
2176  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2177  *                                   register field.
2178  *
2179  * @field: Register field to write to
2180  * @mask: Bitmask to change
2181  * @val: Value to be written
2182  * @change: Boolean indicating if a write was done
2183  * @async: Boolean indicating asynchronously
2184  * @force: Boolean indicating use force update
2185  *
2186  * Perform a read/modify/write cycle on the register field with change,
2187  * async, force option.
2188  *
2189  * A value of zero will be returned on success, a negative errno will
2190  * be returned in error cases.
2191  */
2192 int regmap_field_update_bits_base(struct regmap_field *field,
2193                                   unsigned int mask, unsigned int val,
2194                                   bool *change, bool async, bool force)
2195 {
2196         mask = (mask << field->shift) & field->mask;
2197
2198         return regmap_update_bits_base(field->regmap, field->reg,
2199                                        mask, val << field->shift,
2200                                        change, async, force);
2201 }
2202 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2203
2204 /**
2205  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2206  *                                    register field with port ID
2207  *
2208  * @field: Register field to write to
2209  * @id: port ID
2210  * @mask: Bitmask to change
2211  * @val: Value to be written
2212  * @change: Boolean indicating if a write was done
2213  * @async: Boolean indicating asynchronously
2214  * @force: Boolean indicating use force update
2215  *
2216  * A value of zero will be returned on success, a negative errno will
2217  * be returned in error cases.
2218  */
2219 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2220                                    unsigned int mask, unsigned int val,
2221                                    bool *change, bool async, bool force)
2222 {
2223         if (id >= field->id_size)
2224                 return -EINVAL;
2225
2226         mask = (mask << field->shift) & field->mask;
2227
2228         return regmap_update_bits_base(field->regmap,
2229                                        field->reg + (field->id_offset * id),
2230                                        mask, val << field->shift,
2231                                        change, async, force);
2232 }
2233 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2234
2235 /**
2236  * regmap_bulk_write() - Write multiple registers to the device
2237  *
2238  * @map: Register map to write to
2239  * @reg: First register to be write from
2240  * @val: Block of data to be written, in native register size for device
2241  * @val_count: Number of registers to write
2242  *
2243  * This function is intended to be used for writing a large block of
2244  * data to the device either in single transfer or multiple transfer.
2245  *
2246  * A value of zero will be returned on success, a negative errno will
2247  * be returned in error cases.
2248  */
2249 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2250                      size_t val_count)
2251 {
2252         int ret = 0, i;
2253         size_t val_bytes = map->format.val_bytes;
2254
2255         if (!IS_ALIGNED(reg, map->reg_stride))
2256                 return -EINVAL;
2257
2258         /*
2259          * Some devices don't support bulk write, for them we have a series of
2260          * single write operations.
2261          */
2262         if (!map->bus || !map->format.parse_inplace) {
2263                 map->lock(map->lock_arg);
2264                 for (i = 0; i < val_count; i++) {
2265                         unsigned int ival;
2266
2267                         switch (val_bytes) {
2268                         case 1:
2269                                 ival = *(u8 *)(val + (i * val_bytes));
2270                                 break;
2271                         case 2:
2272                                 ival = *(u16 *)(val + (i * val_bytes));
2273                                 break;
2274                         case 4:
2275                                 ival = *(u32 *)(val + (i * val_bytes));
2276                                 break;
2277 #ifdef CONFIG_64BIT
2278                         case 8:
2279                                 ival = *(u64 *)(val + (i * val_bytes));
2280                                 break;
2281 #endif
2282                         default:
2283                                 ret = -EINVAL;
2284                                 goto out;
2285                         }
2286
2287                         ret = _regmap_write(map,
2288                                             reg + regmap_get_offset(map, i),
2289                                             ival);
2290                         if (ret != 0)
2291                                 goto out;
2292                 }
2293 out:
2294                 map->unlock(map->lock_arg);
2295         } else {
2296                 void *wval;
2297
2298                 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2299                 if (!wval)
2300                         return -ENOMEM;
2301
2302                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2303                         map->format.parse_inplace(wval + i);
2304
2305                 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2306
2307                 kfree(wval);
2308         }
2309         return ret;
2310 }
2311 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2312
2313 /*
2314  * _regmap_raw_multi_reg_write()
2315  *
2316  * the (register,newvalue) pairs in regs have not been formatted, but
2317  * they are all in the same page and have been changed to being page
2318  * relative. The page register has been written if that was necessary.
2319  */
2320 static int _regmap_raw_multi_reg_write(struct regmap *map,
2321                                        const struct reg_sequence *regs,
2322                                        size_t num_regs)
2323 {
2324         int ret;
2325         void *buf;
2326         int i;
2327         u8 *u8;
2328         size_t val_bytes = map->format.val_bytes;
2329         size_t reg_bytes = map->format.reg_bytes;
2330         size_t pad_bytes = map->format.pad_bytes;
2331         size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2332         size_t len = pair_size * num_regs;
2333
2334         if (!len)
2335                 return -EINVAL;
2336
2337         buf = kzalloc(len, GFP_KERNEL);
2338         if (!buf)
2339                 return -ENOMEM;
2340
2341         /* We have to linearise by hand. */
2342
2343         u8 = buf;
2344
2345         for (i = 0; i < num_regs; i++) {
2346                 unsigned int reg = regs[i].reg;
2347                 unsigned int val = regs[i].def;
2348                 trace_regmap_hw_write_start(map, reg, 1);
2349                 map->format.format_reg(u8, reg, map->reg_shift);
2350                 u8 += reg_bytes + pad_bytes;
2351                 map->format.format_val(u8, val, 0);
2352                 u8 += val_bytes;
2353         }
2354         u8 = buf;
2355         *u8 |= map->write_flag_mask;
2356
2357         ret = map->bus->write(map->bus_context, buf, len);
2358
2359         kfree(buf);
2360
2361         for (i = 0; i < num_regs; i++) {
2362                 int reg = regs[i].reg;
2363                 trace_regmap_hw_write_done(map, reg, 1);
2364         }
2365         return ret;
2366 }
2367
2368 static unsigned int _regmap_register_page(struct regmap *map,
2369                                           unsigned int reg,
2370                                           struct regmap_range_node *range)
2371 {
2372         unsigned int win_page = (reg - range->range_min) / range->window_len;
2373
2374         return win_page;
2375 }
2376
2377 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2378                                                struct reg_sequence *regs,
2379                                                size_t num_regs)
2380 {
2381         int ret;
2382         int i, n;
2383         struct reg_sequence *base;
2384         unsigned int this_page = 0;
2385         unsigned int page_change = 0;
2386         /*
2387          * the set of registers are not neccessarily in order, but
2388          * since the order of write must be preserved this algorithm
2389          * chops the set each time the page changes. This also applies
2390          * if there is a delay required at any point in the sequence.
2391          */
2392         base = regs;
2393         for (i = 0, n = 0; i < num_regs; i++, n++) {
2394                 unsigned int reg = regs[i].reg;
2395                 struct regmap_range_node *range;
2396
2397                 range = _regmap_range_lookup(map, reg);
2398                 if (range) {
2399                         unsigned int win_page = _regmap_register_page(map, reg,
2400                                                                       range);
2401
2402                         if (i == 0)
2403                                 this_page = win_page;
2404                         if (win_page != this_page) {
2405                                 this_page = win_page;
2406                                 page_change = 1;
2407                         }
2408                 }
2409
2410                 /* If we have both a page change and a delay make sure to
2411                  * write the regs and apply the delay before we change the
2412                  * page.
2413                  */
2414
2415                 if (page_change || regs[i].delay_us) {
2416
2417                                 /* For situations where the first write requires
2418                                  * a delay we need to make sure we don't call
2419                                  * raw_multi_reg_write with n=0
2420                                  * This can't occur with page breaks as we
2421                                  * never write on the first iteration
2422                                  */
2423                                 if (regs[i].delay_us && i == 0)
2424                                         n = 1;
2425
2426                                 ret = _regmap_raw_multi_reg_write(map, base, n);
2427                                 if (ret != 0)
2428                                         return ret;
2429
2430                                 if (regs[i].delay_us) {
2431                                         if (map->can_sleep)
2432                                                 fsleep(regs[i].delay_us);
2433                                         else
2434                                                 udelay(regs[i].delay_us);
2435                                 }
2436
2437                                 base += n;
2438                                 n = 0;
2439
2440                                 if (page_change) {
2441                                         ret = _regmap_select_page(map,
2442                                                                   &base[n].reg,
2443                                                                   range, 1);
2444                                         if (ret != 0)
2445                                                 return ret;
2446
2447                                         page_change = 0;
2448                                 }
2449
2450                 }
2451
2452         }
2453         if (n > 0)
2454                 return _regmap_raw_multi_reg_write(map, base, n);
2455         return 0;
2456 }
2457
2458 static int _regmap_multi_reg_write(struct regmap *map,
2459                                    const struct reg_sequence *regs,
2460                                    size_t num_regs)
2461 {
2462         int i;
2463         int ret;
2464
2465         if (!map->can_multi_write) {
2466                 for (i = 0; i < num_regs; i++) {
2467                         ret = _regmap_write(map, regs[i].reg, regs[i].def);
2468                         if (ret != 0)
2469                                 return ret;
2470
2471                         if (regs[i].delay_us) {
2472                                 if (map->can_sleep)
2473                                         fsleep(regs[i].delay_us);
2474                                 else
2475                                         udelay(regs[i].delay_us);
2476                         }
2477                 }
2478                 return 0;
2479         }
2480
2481         if (!map->format.parse_inplace)
2482                 return -EINVAL;
2483
2484         if (map->writeable_reg)
2485                 for (i = 0; i < num_regs; i++) {
2486                         int reg = regs[i].reg;
2487                         if (!map->writeable_reg(map->dev, reg))
2488                                 return -EINVAL;
2489                         if (!IS_ALIGNED(reg, map->reg_stride))
2490                                 return -EINVAL;
2491                 }
2492
2493         if (!map->cache_bypass) {
2494                 for (i = 0; i < num_regs; i++) {
2495                         unsigned int val = regs[i].def;
2496                         unsigned int reg = regs[i].reg;
2497                         ret = regcache_write(map, reg, val);
2498                         if (ret) {
2499                                 dev_err(map->dev,
2500                                 "Error in caching of register: %x ret: %d\n",
2501                                                                 reg, ret);
2502                                 return ret;
2503                         }
2504                 }
2505                 if (map->cache_only) {
2506                         map->cache_dirty = true;
2507                         return 0;
2508                 }
2509         }
2510
2511         WARN_ON(!map->bus);
2512
2513         for (i = 0; i < num_regs; i++) {
2514                 unsigned int reg = regs[i].reg;
2515                 struct regmap_range_node *range;
2516
2517                 /* Coalesce all the writes between a page break or a delay
2518                  * in a sequence
2519                  */
2520                 range = _regmap_range_lookup(map, reg);
2521                 if (range || regs[i].delay_us) {
2522                         size_t len = sizeof(struct reg_sequence)*num_regs;
2523                         struct reg_sequence *base = kmemdup(regs, len,
2524                                                            GFP_KERNEL);
2525                         if (!base)
2526                                 return -ENOMEM;
2527                         ret = _regmap_range_multi_paged_reg_write(map, base,
2528                                                                   num_regs);
2529                         kfree(base);
2530
2531                         return ret;
2532                 }
2533         }
2534         return _regmap_raw_multi_reg_write(map, regs, num_regs);
2535 }
2536
2537 /**
2538  * regmap_multi_reg_write() - Write multiple registers to the device
2539  *
2540  * @map: Register map to write to
2541  * @regs: Array of structures containing register,value to be written
2542  * @num_regs: Number of registers to write
2543  *
2544  * Write multiple registers to the device where the set of register, value
2545  * pairs are supplied in any order, possibly not all in a single range.
2546  *
2547  * The 'normal' block write mode will send ultimately send data on the
2548  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2549  * addressed. However, this alternative block multi write mode will send
2550  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2551  * must of course support the mode.
2552  *
2553  * A value of zero will be returned on success, a negative errno will be
2554  * returned in error cases.
2555  */
2556 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2557                            int num_regs)
2558 {
2559         int ret;
2560
2561         map->lock(map->lock_arg);
2562
2563         ret = _regmap_multi_reg_write(map, regs, num_regs);
2564
2565         map->unlock(map->lock_arg);
2566
2567         return ret;
2568 }
2569 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2570
2571 /**
2572  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2573  *                                     device but not the cache
2574  *
2575  * @map: Register map to write to
2576  * @regs: Array of structures containing register,value to be written
2577  * @num_regs: Number of registers to write
2578  *
2579  * Write multiple registers to the device but not the cache where the set
2580  * of register are supplied in any order.
2581  *
2582  * This function is intended to be used for writing a large block of data
2583  * atomically to the device in single transfer for those I2C client devices
2584  * that implement this alternative block write mode.
2585  *
2586  * A value of zero will be returned on success, a negative errno will
2587  * be returned in error cases.
2588  */
2589 int regmap_multi_reg_write_bypassed(struct regmap *map,
2590                                     const struct reg_sequence *regs,
2591                                     int num_regs)
2592 {
2593         int ret;
2594         bool bypass;
2595
2596         map->lock(map->lock_arg);
2597
2598         bypass = map->cache_bypass;
2599         map->cache_bypass = true;
2600
2601         ret = _regmap_multi_reg_write(map, regs, num_regs);
2602
2603         map->cache_bypass = bypass;
2604
2605         map->unlock(map->lock_arg);
2606
2607         return ret;
2608 }
2609 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2610
2611 /**
2612  * regmap_raw_write_async() - Write raw values to one or more registers
2613  *                            asynchronously
2614  *
2615  * @map: Register map to write to
2616  * @reg: Initial register to write to
2617  * @val: Block of data to be written, laid out for direct transmission to the
2618  *       device.  Must be valid until regmap_async_complete() is called.
2619  * @val_len: Length of data pointed to by val.
2620  *
2621  * This function is intended to be used for things like firmware
2622  * download where a large block of data needs to be transferred to the
2623  * device.  No formatting will be done on the data provided.
2624  *
2625  * If supported by the underlying bus the write will be scheduled
2626  * asynchronously, helping maximise I/O speed on higher speed buses
2627  * like SPI.  regmap_async_complete() can be called to ensure that all
2628  * asynchrnous writes have been completed.
2629  *
2630  * A value of zero will be returned on success, a negative errno will
2631  * be returned in error cases.
2632  */
2633 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2634                            const void *val, size_t val_len)
2635 {
2636         int ret;
2637
2638         if (val_len % map->format.val_bytes)
2639                 return -EINVAL;
2640         if (!IS_ALIGNED(reg, map->reg_stride))
2641                 return -EINVAL;
2642
2643         map->lock(map->lock_arg);
2644
2645         map->async = true;
2646
2647         ret = _regmap_raw_write(map, reg, val, val_len, false);
2648
2649         map->async = false;
2650
2651         map->unlock(map->lock_arg);
2652
2653         return ret;
2654 }
2655 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2656
2657 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2658                             unsigned int val_len, bool noinc)
2659 {
2660         struct regmap_range_node *range;
2661         int ret;
2662
2663         WARN_ON(!map->bus);
2664
2665         if (!map->bus || !map->bus->read)
2666                 return -EINVAL;
2667
2668         range = _regmap_range_lookup(map, reg);
2669         if (range) {
2670                 ret = _regmap_select_page(map, &reg, range,
2671                                           noinc ? 1 : val_len / map->format.val_bytes);
2672                 if (ret != 0)
2673                         return ret;
2674         }
2675
2676         map->format.format_reg(map->work_buf, reg, map->reg_shift);
2677         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2678                                       map->read_flag_mask);
2679         trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2680
2681         ret = map->bus->read(map->bus_context, map->work_buf,
2682                              map->format.reg_bytes + map->format.pad_bytes,
2683                              val, val_len);
2684
2685         trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2686
2687         return ret;
2688 }
2689
2690 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2691                                 unsigned int *val)
2692 {
2693         struct regmap *map = context;
2694
2695         return map->bus->reg_read(map->bus_context, reg, val);
2696 }
2697
2698 static int _regmap_bus_read(void *context, unsigned int reg,
2699                             unsigned int *val)
2700 {
2701         int ret;
2702         struct regmap *map = context;
2703         void *work_val = map->work_buf + map->format.reg_bytes +
2704                 map->format.pad_bytes;
2705
2706         if (!map->format.parse_val)
2707                 return -EINVAL;
2708
2709         ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2710         if (ret == 0)
2711                 *val = map->format.parse_val(work_val);
2712
2713         return ret;
2714 }
2715
2716 static int _regmap_read(struct regmap *map, unsigned int reg,
2717                         unsigned int *val)
2718 {
2719         int ret;
2720         void *context = _regmap_map_get_context(map);
2721
2722         if (!map->cache_bypass) {
2723                 ret = regcache_read(map, reg, val);
2724                 if (ret == 0)
2725                         return 0;
2726         }
2727
2728         if (map->cache_only)
2729                 return -EBUSY;
2730
2731         if (!regmap_readable(map, reg))
2732                 return -EIO;
2733
2734         ret = map->reg_read(context, reg, val);
2735         if (ret == 0) {
2736                 if (regmap_should_log(map))
2737                         dev_info(map->dev, "%x => %x\n", reg, *val);
2738
2739                 trace_regmap_reg_read(map, reg, *val);
2740
2741                 if (!map->cache_bypass)
2742                         regcache_write(map, reg, *val);
2743         }
2744
2745         return ret;
2746 }
2747
2748 /**
2749  * regmap_read() - Read a value from a single register
2750  *
2751  * @map: Register map to read from
2752  * @reg: Register to be read from
2753  * @val: Pointer to store read value
2754  *
2755  * A value of zero will be returned on success, a negative errno will
2756  * be returned in error cases.
2757  */
2758 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2759 {
2760         int ret;
2761
2762         if (!IS_ALIGNED(reg, map->reg_stride))
2763                 return -EINVAL;
2764
2765         map->lock(map->lock_arg);
2766
2767         ret = _regmap_read(map, reg, val);
2768
2769         map->unlock(map->lock_arg);
2770
2771         return ret;
2772 }
2773 EXPORT_SYMBOL_GPL(regmap_read);
2774
2775 /**
2776  * regmap_raw_read() - Read raw data from the device
2777  *
2778  * @map: Register map to read from
2779  * @reg: First register to be read from
2780  * @val: Pointer to store read value
2781  * @val_len: Size of data to read
2782  *
2783  * A value of zero will be returned on success, a negative errno will
2784  * be returned in error cases.
2785  */
2786 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2787                     size_t val_len)
2788 {
2789         size_t val_bytes = map->format.val_bytes;
2790         size_t val_count = val_len / val_bytes;
2791         unsigned int v;
2792         int ret, i;
2793
2794         if (!map->bus)
2795                 return -EINVAL;
2796         if (val_len % map->format.val_bytes)
2797                 return -EINVAL;
2798         if (!IS_ALIGNED(reg, map->reg_stride))
2799                 return -EINVAL;
2800         if (val_count == 0)
2801                 return -EINVAL;
2802
2803         map->lock(map->lock_arg);
2804
2805         if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2806             map->cache_type == REGCACHE_NONE) {
2807                 size_t chunk_count, chunk_bytes;
2808                 size_t chunk_regs = val_count;
2809
2810                 if (!map->bus->read) {
2811                         ret = -ENOTSUPP;
2812                         goto out;
2813                 }
2814
2815                 if (map->use_single_read)
2816                         chunk_regs = 1;
2817                 else if (map->max_raw_read && val_len > map->max_raw_read)
2818                         chunk_regs = map->max_raw_read / val_bytes;
2819
2820                 chunk_count = val_count / chunk_regs;
2821                 chunk_bytes = chunk_regs * val_bytes;
2822
2823                 /* Read bytes that fit into whole chunks */
2824                 for (i = 0; i < chunk_count; i++) {
2825                         ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2826                         if (ret != 0)
2827                                 goto out;
2828
2829                         reg += regmap_get_offset(map, chunk_regs);
2830                         val += chunk_bytes;
2831                         val_len -= chunk_bytes;
2832                 }
2833
2834                 /* Read remaining bytes */
2835                 if (val_len) {
2836                         ret = _regmap_raw_read(map, reg, val, val_len, false);
2837                         if (ret != 0)
2838                                 goto out;
2839                 }
2840         } else {
2841                 /* Otherwise go word by word for the cache; should be low
2842                  * cost as we expect to hit the cache.
2843                  */
2844                 for (i = 0; i < val_count; i++) {
2845                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2846                                            &v);
2847                         if (ret != 0)
2848                                 goto out;
2849
2850                         map->format.format_val(val + (i * val_bytes), v, 0);
2851                 }
2852         }
2853
2854  out:
2855         map->unlock(map->lock_arg);
2856
2857         return ret;
2858 }
2859 EXPORT_SYMBOL_GPL(regmap_raw_read);
2860
2861 /**
2862  * regmap_noinc_read(): Read data from a register without incrementing the
2863  *                      register number
2864  *
2865  * @map: Register map to read from
2866  * @reg: Register to read from
2867  * @val: Pointer to data buffer
2868  * @val_len: Length of output buffer in bytes.
2869  *
2870  * The regmap API usually assumes that bulk bus read operations will read a
2871  * range of registers. Some devices have certain registers for which a read
2872  * operation read will read from an internal FIFO.
2873  *
2874  * The target register must be volatile but registers after it can be
2875  * completely unrelated cacheable registers.
2876  *
2877  * This will attempt multiple reads as required to read val_len bytes.
2878  *
2879  * A value of zero will be returned on success, a negative errno will be
2880  * returned in error cases.
2881  */
2882 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2883                       void *val, size_t val_len)
2884 {
2885         size_t read_len;
2886         int ret;
2887
2888         if (!map->bus)
2889                 return -EINVAL;
2890         if (!map->bus->read)
2891                 return -ENOTSUPP;
2892         if (val_len % map->format.val_bytes)
2893                 return -EINVAL;
2894         if (!IS_ALIGNED(reg, map->reg_stride))
2895                 return -EINVAL;
2896         if (val_len == 0)
2897                 return -EINVAL;
2898
2899         map->lock(map->lock_arg);
2900
2901         if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2902                 ret = -EINVAL;
2903                 goto out_unlock;
2904         }
2905
2906         while (val_len) {
2907                 if (map->max_raw_read && map->max_raw_read < val_len)
2908                         read_len = map->max_raw_read;
2909                 else
2910                         read_len = val_len;
2911                 ret = _regmap_raw_read(map, reg, val, read_len, true);
2912                 if (ret)
2913                         goto out_unlock;
2914                 val = ((u8 *)val) + read_len;
2915                 val_len -= read_len;
2916         }
2917
2918 out_unlock:
2919         map->unlock(map->lock_arg);
2920         return ret;
2921 }
2922 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2923
2924 /**
2925  * regmap_field_read(): Read a value to a single register field
2926  *
2927  * @field: Register field to read from
2928  * @val: Pointer to store read value
2929  *
2930  * A value of zero will be returned on success, a negative errno will
2931  * be returned in error cases.
2932  */
2933 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2934 {
2935         int ret;
2936         unsigned int reg_val;
2937         ret = regmap_read(field->regmap, field->reg, &reg_val);
2938         if (ret != 0)
2939                 return ret;
2940
2941         reg_val &= field->mask;
2942         reg_val >>= field->shift;
2943         *val = reg_val;
2944
2945         return ret;
2946 }
2947 EXPORT_SYMBOL_GPL(regmap_field_read);
2948
2949 /**
2950  * regmap_fields_read() - Read a value to a single register field with port ID
2951  *
2952  * @field: Register field to read from
2953  * @id: port ID
2954  * @val: Pointer to store read value
2955  *
2956  * A value of zero will be returned on success, a negative errno will
2957  * be returned in error cases.
2958  */
2959 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2960                        unsigned int *val)
2961 {
2962         int ret;
2963         unsigned int reg_val;
2964
2965         if (id >= field->id_size)
2966                 return -EINVAL;
2967
2968         ret = regmap_read(field->regmap,
2969                           field->reg + (field->id_offset * id),
2970                           &reg_val);
2971         if (ret != 0)
2972                 return ret;
2973
2974         reg_val &= field->mask;
2975         reg_val >>= field->shift;
2976         *val = reg_val;
2977
2978         return ret;
2979 }
2980 EXPORT_SYMBOL_GPL(regmap_fields_read);
2981
2982 /**
2983  * regmap_bulk_read() - Read multiple registers from the device
2984  *
2985  * @map: Register map to read from
2986  * @reg: First register to be read from
2987  * @val: Pointer to store read value, in native register size for device
2988  * @val_count: Number of registers to read
2989  *
2990  * A value of zero will be returned on success, a negative errno will
2991  * be returned in error cases.
2992  */
2993 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2994                      size_t val_count)
2995 {
2996         int ret, i;
2997         size_t val_bytes = map->format.val_bytes;
2998         bool vol = regmap_volatile_range(map, reg, val_count);
2999
3000         if (!IS_ALIGNED(reg, map->reg_stride))
3001                 return -EINVAL;
3002         if (val_count == 0)
3003                 return -EINVAL;
3004
3005         if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3006                 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3007                 if (ret != 0)
3008                         return ret;
3009
3010                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
3011                         map->format.parse_inplace(val + i);
3012         } else {
3013 #ifdef CONFIG_64BIT
3014                 u64 *u64 = val;
3015 #endif
3016                 u32 *u32 = val;
3017                 u16 *u16 = val;
3018                 u8 *u8 = val;
3019
3020                 map->lock(map->lock_arg);
3021
3022                 for (i = 0; i < val_count; i++) {
3023                         unsigned int ival;
3024
3025                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
3026                                            &ival);
3027                         if (ret != 0)
3028                                 goto out;
3029
3030                         switch (map->format.val_bytes) {
3031 #ifdef CONFIG_64BIT
3032                         case 8:
3033                                 u64[i] = ival;
3034                                 break;
3035 #endif
3036                         case 4:
3037                                 u32[i] = ival;
3038                                 break;
3039                         case 2:
3040                                 u16[i] = ival;
3041                                 break;
3042                         case 1:
3043                                 u8[i] = ival;
3044                                 break;
3045                         default:
3046                                 ret = -EINVAL;
3047                                 goto out;
3048                         }
3049                 }
3050
3051 out:
3052                 map->unlock(map->lock_arg);
3053         }
3054
3055         return ret;
3056 }
3057 EXPORT_SYMBOL_GPL(regmap_bulk_read);
3058
3059 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3060                                unsigned int mask, unsigned int val,
3061                                bool *change, bool force_write)
3062 {
3063         int ret;
3064         unsigned int tmp, orig;
3065
3066         if (change)
3067                 *change = false;
3068
3069         if (regmap_volatile(map, reg) && map->reg_update_bits) {
3070                 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3071                 if (ret == 0 && change)
3072                         *change = true;
3073         } else {
3074                 ret = _regmap_read(map, reg, &orig);
3075                 if (ret != 0)
3076                         return ret;
3077
3078                 tmp = orig & ~mask;
3079                 tmp |= val & mask;
3080
3081                 if (force_write || (tmp != orig)) {
3082                         ret = _regmap_write(map, reg, tmp);
3083                         if (ret == 0 && change)
3084                                 *change = true;
3085                 }
3086         }
3087
3088         return ret;
3089 }
3090
3091 /**
3092  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3093  *
3094  * @map: Register map to update
3095  * @reg: Register to update
3096  * @mask: Bitmask to change
3097  * @val: New value for bitmask
3098  * @change: Boolean indicating if a write was done
3099  * @async: Boolean indicating asynchronously
3100  * @force: Boolean indicating use force update
3101  *
3102  * Perform a read/modify/write cycle on a register map with change, async, force
3103  * options.
3104  *
3105  * If async is true:
3106  *
3107  * With most buses the read must be done synchronously so this is most useful
3108  * for devices with a cache which do not need to interact with the hardware to
3109  * determine the current register value.
3110  *
3111  * Returns zero for success, a negative number on error.
3112  */
3113 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3114                             unsigned int mask, unsigned int val,
3115                             bool *change, bool async, bool force)
3116 {
3117         int ret;
3118
3119         map->lock(map->lock_arg);
3120
3121         map->async = async;
3122
3123         ret = _regmap_update_bits(map, reg, mask, val, change, force);
3124
3125         map->async = false;
3126
3127         map->unlock(map->lock_arg);
3128
3129         return ret;
3130 }
3131 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3132
3133 /**
3134  * regmap_test_bits() - Check if all specified bits are set in a register.
3135  *
3136  * @map: Register map to operate on
3137  * @reg: Register to read from
3138  * @bits: Bits to test
3139  *
3140  * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3141  * bits are set and a negative error number if the underlying regmap_read()
3142  * fails.
3143  */
3144 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3145 {
3146         unsigned int val, ret;
3147
3148         ret = regmap_read(map, reg, &val);
3149         if (ret)
3150                 return ret;
3151
3152         return (val & bits) == bits;
3153 }
3154 EXPORT_SYMBOL_GPL(regmap_test_bits);
3155
3156 void regmap_async_complete_cb(struct regmap_async *async, int ret)
3157 {
3158         struct regmap *map = async->map;
3159         bool wake;
3160
3161         trace_regmap_async_io_complete(map);
3162
3163         spin_lock(&map->async_lock);
3164         list_move(&async->list, &map->async_free);
3165         wake = list_empty(&map->async_list);
3166
3167         if (ret != 0)
3168                 map->async_ret = ret;
3169
3170         spin_unlock(&map->async_lock);
3171
3172         if (wake)
3173                 wake_up(&map->async_waitq);
3174 }
3175 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3176
3177 static int regmap_async_is_done(struct regmap *map)
3178 {
3179         unsigned long flags;
3180         int ret;
3181
3182         spin_lock_irqsave(&map->async_lock, flags);
3183         ret = list_empty(&map->async_list);
3184         spin_unlock_irqrestore(&map->async_lock, flags);
3185
3186         return ret;
3187 }
3188
3189 /**
3190  * regmap_async_complete - Ensure all asynchronous I/O has completed.
3191  *
3192  * @map: Map to operate on.
3193  *
3194  * Blocks until any pending asynchronous I/O has completed.  Returns
3195  * an error code for any failed I/O operations.
3196  */
3197 int regmap_async_complete(struct regmap *map)
3198 {
3199         unsigned long flags;
3200         int ret;
3201
3202         /* Nothing to do with no async support */
3203         if (!map->bus || !map->bus->async_write)
3204                 return 0;
3205
3206         trace_regmap_async_complete_start(map);
3207
3208         wait_event(map->async_waitq, regmap_async_is_done(map));
3209
3210         spin_lock_irqsave(&map->async_lock, flags);
3211         ret = map->async_ret;
3212         map->async_ret = 0;
3213         spin_unlock_irqrestore(&map->async_lock, flags);
3214
3215         trace_regmap_async_complete_done(map);
3216
3217         return ret;
3218 }
3219 EXPORT_SYMBOL_GPL(regmap_async_complete);
3220
3221 /**
3222  * regmap_register_patch - Register and apply register updates to be applied
3223  *                         on device initialistion
3224  *
3225  * @map: Register map to apply updates to.
3226  * @regs: Values to update.
3227  * @num_regs: Number of entries in regs.
3228  *
3229  * Register a set of register updates to be applied to the device
3230  * whenever the device registers are synchronised with the cache and
3231  * apply them immediately.  Typically this is used to apply
3232  * corrections to be applied to the device defaults on startup, such
3233  * as the updates some vendors provide to undocumented registers.
3234  *
3235  * The caller must ensure that this function cannot be called
3236  * concurrently with either itself or regcache_sync().
3237  */
3238 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3239                           int num_regs)
3240 {
3241         struct reg_sequence *p;
3242         int ret;
3243         bool bypass;
3244
3245         if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3246             num_regs))
3247                 return 0;
3248
3249         p = krealloc(map->patch,
3250                      sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3251                      GFP_KERNEL);
3252         if (p) {
3253                 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3254                 map->patch = p;
3255                 map->patch_regs += num_regs;
3256         } else {
3257                 return -ENOMEM;
3258         }
3259
3260         map->lock(map->lock_arg);
3261
3262         bypass = map->cache_bypass;
3263
3264         map->cache_bypass = true;
3265         map->async = true;
3266
3267         ret = _regmap_multi_reg_write(map, regs, num_regs);
3268
3269         map->async = false;
3270         map->cache_bypass = bypass;
3271
3272         map->unlock(map->lock_arg);
3273
3274         regmap_async_complete(map);
3275
3276         return ret;
3277 }
3278 EXPORT_SYMBOL_GPL(regmap_register_patch);
3279
3280 /**
3281  * regmap_get_val_bytes() - Report the size of a register value
3282  *
3283  * @map: Register map to operate on.
3284  *
3285  * Report the size of a register value, mainly intended to for use by
3286  * generic infrastructure built on top of regmap.
3287  */
3288 int regmap_get_val_bytes(struct regmap *map)
3289 {
3290         if (map->format.format_write)
3291                 return -EINVAL;
3292
3293         return map->format.val_bytes;
3294 }
3295 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3296
3297 /**
3298  * regmap_get_max_register() - Report the max register value
3299  *
3300  * @map: Register map to operate on.
3301  *
3302  * Report the max register value, mainly intended to for use by
3303  * generic infrastructure built on top of regmap.
3304  */
3305 int regmap_get_max_register(struct regmap *map)
3306 {
3307         return map->max_register ? map->max_register : -EINVAL;
3308 }
3309 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3310
3311 /**
3312  * regmap_get_reg_stride() - Report the register address stride
3313  *
3314  * @map: Register map to operate on.
3315  *
3316  * Report the register address stride, mainly intended to for use by
3317  * generic infrastructure built on top of regmap.
3318  */
3319 int regmap_get_reg_stride(struct regmap *map)
3320 {
3321         return map->reg_stride;
3322 }
3323 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3324
3325 int regmap_parse_val(struct regmap *map, const void *buf,
3326                         unsigned int *val)
3327 {
3328         if (!map->format.parse_val)
3329                 return -EINVAL;
3330
3331         *val = map->format.parse_val(buf);
3332
3333         return 0;
3334 }
3335 EXPORT_SYMBOL_GPL(regmap_parse_val);
3336
3337 static int __init regmap_initcall(void)
3338 {
3339         regmap_debugfs_initcall();
3340
3341         return 0;
3342 }
3343 postcore_initcall(regmap_initcall);
This page took 0.227224 seconds and 4 git commands to generate.