]> Git Repo - linux.git/blob - drivers/base/regmap/regcache-rbtree.c
Merge remote-tracking branch 'linus/master' into testing
[linux.git] / drivers / base / regmap / regcache-rbtree.c
1 /*
2  * Register cache access API - rbtree caching support
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/debugfs.h>
16 #include <linux/rbtree.h>
17 #include <linux/seq_file.h>
18
19 #include "internal.h"
20
21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
22                                  unsigned int value);
23 static int regcache_rbtree_exit(struct regmap *map);
24
25 struct regcache_rbtree_node {
26         /* the actual rbtree node holding this block */
27         struct rb_node node;
28         /* base register handled by this block */
29         unsigned int base_reg;
30         /* block of adjacent registers */
31         void *block;
32         /* number of registers available in the block */
33         unsigned int blklen;
34 } __attribute__ ((packed));
35
36 struct regcache_rbtree_ctx {
37         struct rb_root root;
38         struct regcache_rbtree_node *cached_rbnode;
39 };
40
41 static inline void regcache_rbtree_get_base_top_reg(
42         struct regmap *map,
43         struct regcache_rbtree_node *rbnode,
44         unsigned int *base, unsigned int *top)
45 {
46         *base = rbnode->base_reg;
47         *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
48 }
49
50 static unsigned int regcache_rbtree_get_register(struct regmap *map,
51         struct regcache_rbtree_node *rbnode, unsigned int idx)
52 {
53         return regcache_get_val(map, rbnode->block, idx);
54 }
55
56 static void regcache_rbtree_set_register(struct regmap *map,
57                                          struct regcache_rbtree_node *rbnode,
58                                          unsigned int idx, unsigned int val)
59 {
60         regcache_set_val(map, rbnode->block, idx, val);
61 }
62
63 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
64                                                            unsigned int reg)
65 {
66         struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
67         struct rb_node *node;
68         struct regcache_rbtree_node *rbnode;
69         unsigned int base_reg, top_reg;
70
71         rbnode = rbtree_ctx->cached_rbnode;
72         if (rbnode) {
73                 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
74                                                  &top_reg);
75                 if (reg >= base_reg && reg <= top_reg)
76                         return rbnode;
77         }
78
79         node = rbtree_ctx->root.rb_node;
80         while (node) {
81                 rbnode = container_of(node, struct regcache_rbtree_node, node);
82                 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
83                                                  &top_reg);
84                 if (reg >= base_reg && reg <= top_reg) {
85                         rbtree_ctx->cached_rbnode = rbnode;
86                         return rbnode;
87                 } else if (reg > top_reg) {
88                         node = node->rb_right;
89                 } else if (reg < base_reg) {
90                         node = node->rb_left;
91                 }
92         }
93
94         return NULL;
95 }
96
97 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
98                                   struct regcache_rbtree_node *rbnode)
99 {
100         struct rb_node **new, *parent;
101         struct regcache_rbtree_node *rbnode_tmp;
102         unsigned int base_reg_tmp, top_reg_tmp;
103         unsigned int base_reg;
104
105         parent = NULL;
106         new = &root->rb_node;
107         while (*new) {
108                 rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
109                                           node);
110                 /* base and top registers of the current rbnode */
111                 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
112                                                  &top_reg_tmp);
113                 /* base register of the rbnode to be added */
114                 base_reg = rbnode->base_reg;
115                 parent = *new;
116                 /* if this register has already been inserted, just return */
117                 if (base_reg >= base_reg_tmp &&
118                     base_reg <= top_reg_tmp)
119                         return 0;
120                 else if (base_reg > top_reg_tmp)
121                         new = &((*new)->rb_right);
122                 else if (base_reg < base_reg_tmp)
123                         new = &((*new)->rb_left);
124         }
125
126         /* insert the node into the rbtree */
127         rb_link_node(&rbnode->node, parent, new);
128         rb_insert_color(&rbnode->node, root);
129
130         return 1;
131 }
132
133 #ifdef CONFIG_DEBUG_FS
134 static int rbtree_show(struct seq_file *s, void *ignored)
135 {
136         struct regmap *map = s->private;
137         struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
138         struct regcache_rbtree_node *n;
139         struct rb_node *node;
140         unsigned int base, top;
141         size_t mem_size;
142         int nodes = 0;
143         int registers = 0;
144         int this_registers, average;
145
146         map->lock(map->lock_arg);
147
148         mem_size = sizeof(*rbtree_ctx);
149         mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
150
151         for (node = rb_first(&rbtree_ctx->root); node != NULL;
152              node = rb_next(node)) {
153                 n = container_of(node, struct regcache_rbtree_node, node);
154                 mem_size += sizeof(*n);
155                 mem_size += (n->blklen * map->cache_word_size);
156
157                 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
158                 this_registers = ((top - base) / map->reg_stride) + 1;
159                 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
160
161                 nodes++;
162                 registers += this_registers;
163         }
164
165         if (nodes)
166                 average = registers / nodes;
167         else
168                 average = 0;
169
170         seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
171                    nodes, registers, average, mem_size);
172
173         map->unlock(map->lock_arg);
174
175         return 0;
176 }
177
178 static int rbtree_open(struct inode *inode, struct file *file)
179 {
180         return single_open(file, rbtree_show, inode->i_private);
181 }
182
183 static const struct file_operations rbtree_fops = {
184         .open           = rbtree_open,
185         .read           = seq_read,
186         .llseek         = seq_lseek,
187         .release        = single_release,
188 };
189
190 static void rbtree_debugfs_init(struct regmap *map)
191 {
192         debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
193 }
194 #else
195 static void rbtree_debugfs_init(struct regmap *map)
196 {
197 }
198 #endif
199
200 static int regcache_rbtree_init(struct regmap *map)
201 {
202         struct regcache_rbtree_ctx *rbtree_ctx;
203         int i;
204         int ret;
205
206         map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
207         if (!map->cache)
208                 return -ENOMEM;
209
210         rbtree_ctx = map->cache;
211         rbtree_ctx->root = RB_ROOT;
212         rbtree_ctx->cached_rbnode = NULL;
213
214         for (i = 0; i < map->num_reg_defaults; i++) {
215                 ret = regcache_rbtree_write(map,
216                                             map->reg_defaults[i].reg,
217                                             map->reg_defaults[i].def);
218                 if (ret)
219                         goto err;
220         }
221
222         rbtree_debugfs_init(map);
223
224         return 0;
225
226 err:
227         regcache_rbtree_exit(map);
228         return ret;
229 }
230
231 static int regcache_rbtree_exit(struct regmap *map)
232 {
233         struct rb_node *next;
234         struct regcache_rbtree_ctx *rbtree_ctx;
235         struct regcache_rbtree_node *rbtree_node;
236
237         /* if we've already been called then just return */
238         rbtree_ctx = map->cache;
239         if (!rbtree_ctx)
240                 return 0;
241
242         /* free up the rbtree */
243         next = rb_first(&rbtree_ctx->root);
244         while (next) {
245                 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
246                 next = rb_next(&rbtree_node->node);
247                 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
248                 kfree(rbtree_node->block);
249                 kfree(rbtree_node);
250         }
251
252         /* release the resources */
253         kfree(map->cache);
254         map->cache = NULL;
255
256         return 0;
257 }
258
259 static int regcache_rbtree_read(struct regmap *map,
260                                 unsigned int reg, unsigned int *value)
261 {
262         struct regcache_rbtree_node *rbnode;
263         unsigned int reg_tmp;
264
265         rbnode = regcache_rbtree_lookup(map, reg);
266         if (rbnode) {
267                 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
268                 if (!regcache_reg_present(map, reg))
269                         return -ENOENT;
270                 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
271         } else {
272                 return -ENOENT;
273         }
274
275         return 0;
276 }
277
278
279 static int regcache_rbtree_insert_to_block(struct regmap *map,
280                                            struct regcache_rbtree_node *rbnode,
281                                            unsigned int pos, unsigned int reg,
282                                            unsigned int value)
283 {
284         u8 *blk;
285
286         blk = krealloc(rbnode->block,
287                        (rbnode->blklen + 1) * map->cache_word_size,
288                        GFP_KERNEL);
289         if (!blk)
290                 return -ENOMEM;
291
292         /* insert the register value in the correct place in the rbnode block */
293         memmove(blk + (pos + 1) * map->cache_word_size,
294                 blk + pos * map->cache_word_size,
295                 (rbnode->blklen - pos) * map->cache_word_size);
296
297         /* update the rbnode block, its size and the base register */
298         rbnode->block = blk;
299         rbnode->blklen++;
300         if (!pos)
301                 rbnode->base_reg = reg;
302
303         regcache_rbtree_set_register(map, rbnode, pos, value);
304         return 0;
305 }
306
307 static struct regcache_rbtree_node *
308 regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
309 {
310         struct regcache_rbtree_node *rbnode;
311         const struct regmap_range *range;
312         int i;
313
314         rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
315         if (!rbnode)
316                 return NULL;
317
318         /* If there is a read table then use it to guess at an allocation */
319         if (map->rd_table) {
320                 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
321                         if (regmap_reg_in_range(reg,
322                                                 &map->rd_table->yes_ranges[i]))
323                                 break;
324                 }
325
326                 if (i != map->rd_table->n_yes_ranges) {
327                         range = &map->rd_table->yes_ranges[i];
328                         rbnode->blklen = range->range_max - range->range_min
329                                 + 1;
330                         rbnode->base_reg = range->range_min;
331                 }
332         }
333
334         if (!rbnode->blklen) {
335                 rbnode->blklen = sizeof(*rbnode);
336                 rbnode->base_reg = reg;
337         }
338
339         rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
340                                 GFP_KERNEL);
341         if (!rbnode->block) {
342                 kfree(rbnode);
343                 return NULL;
344         }
345
346         return rbnode;
347 }
348
349 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
350                                  unsigned int value)
351 {
352         struct regcache_rbtree_ctx *rbtree_ctx;
353         struct regcache_rbtree_node *rbnode, *rbnode_tmp;
354         struct rb_node *node;
355         unsigned int reg_tmp;
356         unsigned int pos;
357         int i;
358         int ret;
359
360         rbtree_ctx = map->cache;
361         /* update the reg_present bitmap, make space if necessary */
362         ret = regcache_set_reg_present(map, reg);
363         if (ret < 0)
364                 return ret;
365
366         /* if we can't locate it in the cached rbnode we'll have
367          * to traverse the rbtree looking for it.
368          */
369         rbnode = regcache_rbtree_lookup(map, reg);
370         if (rbnode) {
371                 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
372                 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
373         } else {
374                 /* look for an adjacent register to the one we are about to add */
375                 for (node = rb_first(&rbtree_ctx->root); node;
376                      node = rb_next(node)) {
377                         rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
378                                               node);
379                         for (i = 0; i < rbnode_tmp->blklen; i++) {
380                                 reg_tmp = rbnode_tmp->base_reg +
381                                                 (i * map->reg_stride);
382                                 if (abs(reg_tmp - reg) != map->reg_stride)
383                                         continue;
384                                 /* decide where in the block to place our register */
385                                 if (reg_tmp + map->reg_stride == reg)
386                                         pos = i + 1;
387                                 else
388                                         pos = i;
389                                 ret = regcache_rbtree_insert_to_block(map,
390                                                                       rbnode_tmp,
391                                                                       pos, reg,
392                                                                       value);
393                                 if (ret)
394                                         return ret;
395                                 rbtree_ctx->cached_rbnode = rbnode_tmp;
396                                 return 0;
397                         }
398                 }
399
400                 /* We did not manage to find a place to insert it in
401                  * an existing block so create a new rbnode.
402                  */
403                 rbnode = regcache_rbtree_node_alloc(map, reg);
404                 if (!rbnode)
405                         return -ENOMEM;
406                 regcache_rbtree_set_register(map, rbnode,
407                                              reg - rbnode->base_reg, value);
408                 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
409                 rbtree_ctx->cached_rbnode = rbnode;
410         }
411
412         return 0;
413 }
414
415 static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
416                                 unsigned int max)
417 {
418         struct regcache_rbtree_ctx *rbtree_ctx;
419         struct rb_node *node;
420         struct regcache_rbtree_node *rbnode;
421         int ret;
422         int base, end;
423
424         rbtree_ctx = map->cache;
425         for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
426                 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
427
428                 if (rbnode->base_reg > max)
429                         break;
430                 if (rbnode->base_reg + rbnode->blklen < min)
431                         continue;
432
433                 if (min > rbnode->base_reg)
434                         base = min - rbnode->base_reg;
435                 else
436                         base = 0;
437
438                 if (max < rbnode->base_reg + rbnode->blklen)
439                         end = max - rbnode->base_reg + 1;
440                 else
441                         end = rbnode->blklen;
442
443                 ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
444                                           base, end);
445                 if (ret != 0)
446                         return ret;
447         }
448
449         return regmap_async_complete(map);
450 }
451
452 struct regcache_ops regcache_rbtree_ops = {
453         .type = REGCACHE_RBTREE,
454         .name = "rbtree",
455         .init = regcache_rbtree_init,
456         .exit = regcache_rbtree_exit,
457         .read = regcache_rbtree_read,
458         .write = regcache_rbtree_write,
459         .sync = regcache_rbtree_sync
460 };
This page took 0.057446 seconds and 4 git commands to generate.