1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - maple tree based cache
5 // Copyright 2023 Arm, Ltd
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
16 static int regcache_maple_read(struct regmap *map,
17 unsigned int reg, unsigned int *value)
19 struct maple_tree *mt = map->cache;
20 MA_STATE(mas, mt, reg, reg);
25 entry = mas_walk(&mas);
31 *value = entry[reg - mas.index];
38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
41 struct maple_tree *mt = map->cache;
42 MA_STATE(mas, mt, reg, reg);
43 unsigned long *entry, *upper, *lower;
44 unsigned long index, last;
45 size_t lower_sz, upper_sz;
50 entry = mas_walk(&mas);
52 entry[reg - mas.index] = val;
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(&mas, reg - 1, reg + 1);
62 lower = mas_find(&mas, reg - 1);
65 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
68 upper = mas_find(&mas, reg + 1);
71 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
76 entry = kmalloc((last - index + 1) * sizeof(unsigned long),
82 memcpy(entry, lower, lower_sz);
83 entry[reg - index] = val;
85 memcpy(&entry[reg - index + 1], upper, upper_sz);
88 * This is safe because the regmap lock means the Maple lock
89 * is redundant, but we need to take it due to lockdep asserts
90 * in the maple tree code.
94 mas_set_range(&mas, index, last);
95 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
110 struct maple_tree *mt = map->cache;
111 MA_STATE(mas, mt, min, max);
112 unsigned long *entry, *lower, *upper;
113 unsigned long lower_index, lower_last;
114 unsigned long upper_index, upper_last;
122 mas_for_each(&mas, entry, max) {
124 * This is safe because the regmap lock means the
125 * Maple lock is redundant, but we need to take it due
126 * to lockdep asserts in the maple tree code.
130 /* Do we need to save any of this entry? */
131 if (mas.index < min) {
132 lower_index = mas.index;
135 lower = kmemdup(entry, ((min - mas.index) *
136 sizeof(unsigned long)),
144 if (mas.last > max) {
145 upper_index = max + 1;
146 upper_last = mas.last;
148 upper = kmemdup(&entry[max - mas.index + 1],
150 sizeof(unsigned long)),
162 /* Insert new nodes with the saved data */
164 mas_set_range(&mas, lower_index, lower_last);
165 ret = mas_store_gfp(&mas, lower, map->alloc_flags);
172 mas_set_range(&mas, upper_index, upper_last);
173 ret = mas_store_gfp(&mas, upper, map->alloc_flags);
189 static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
190 struct ma_state *mas,
191 unsigned int min, unsigned int max)
195 size_t val_bytes = map->format.val_bytes;
202 * Use a raw write if writing more than one register to a
203 * device that supports raw writes to reduce transaction
206 if (max - min > 1 && regmap_can_raw_write(map)) {
207 buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
213 /* Render the data for a raw write */
214 for (r = min; r < max; r++) {
215 regcache_set_val(map, buf, r - min,
216 entry[r - mas->index]);
219 ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
224 for (r = min; r < max; r++) {
225 ret = _regmap_write(map, r,
226 entry[r - mas->index]);
238 static int regcache_maple_sync(struct regmap *map, unsigned int min,
241 struct maple_tree *mt = map->cache;
242 unsigned long *entry;
243 MA_STATE(mas, mt, min, max);
244 unsigned long lmin = min;
245 unsigned long lmax = max;
246 unsigned int r, v, sync_start;
248 bool sync_needed = false;
250 map->cache_bypass = true;
254 mas_for_each(&mas, entry, max) {
255 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
256 v = entry[r - mas.index];
258 if (regcache_reg_needs_sync(map, r, v)) {
269 ret = regcache_maple_sync_block(map, entry, &mas,
277 ret = regcache_maple_sync_block(map, entry, &mas,
288 map->cache_bypass = false;
293 static int regcache_maple_exit(struct regmap *map)
295 struct maple_tree *mt = map->cache;
296 MA_STATE(mas, mt, 0, UINT_MAX);
299 /* if we've already been called then just return */
304 mas_for_each(&mas, entry, UINT_MAX)
315 static int regcache_maple_insert_block(struct regmap *map, int first,
318 struct maple_tree *mt = map->cache;
319 MA_STATE(mas, mt, first, last);
320 unsigned long *entry;
323 entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
327 for (i = 0; i < last - first + 1; i++)
328 entry[i] = map->reg_defaults[first + i].def;
332 mas_set_range(&mas, map->reg_defaults[first].reg,
333 map->reg_defaults[last].reg);
334 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
344 static int regcache_maple_init(struct regmap *map)
346 struct maple_tree *mt;
351 mt = kmalloc(sizeof(*mt), GFP_KERNEL);
358 if (!map->num_reg_defaults)
363 /* Scan for ranges of contiguous registers */
364 for (i = 1; i < map->num_reg_defaults; i++) {
365 if (map->reg_defaults[i].reg !=
366 map->reg_defaults[i - 1].reg + 1) {
367 ret = regcache_maple_insert_block(map, range_start,
376 /* Add the last block */
377 ret = regcache_maple_insert_block(map, range_start,
378 map->num_reg_defaults - 1);
385 regcache_maple_exit(map);
389 struct regcache_ops regcache_maple_ops = {
390 .type = REGCACHE_MAPLE,
392 .init = regcache_maple_init,
393 .exit = regcache_maple_exit,
394 .read = regcache_maple_read,
395 .write = regcache_maple_write,
396 .drop = regcache_maple_drop,
397 .sync = regcache_maple_sync,