1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - LZO caching support
5 // Copyright 2011 Wolfson Microelectronics plc
9 #include <linux/device.h>
10 #include <linux/lzo.h>
11 #include <linux/slab.h>
15 static int regcache_lzo_exit(struct regmap *map);
17 struct regcache_lzo_ctx {
23 size_t decompressed_size;
24 unsigned long *sync_bmp;
28 #define LZO_BLOCK_NUM 8
29 static int regcache_lzo_block_count(struct regmap *map)
34 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
36 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
42 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
47 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
48 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
49 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
51 lzo_ctx->dst_len = compress_size;
55 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
60 dst_len = lzo_ctx->dst_len;
61 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
62 lzo_ctx->dst, &dst_len);
63 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
68 static int regcache_lzo_compress_cache_block(struct regmap *map,
69 struct regcache_lzo_ctx *lzo_ctx)
73 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
74 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
80 ret = regcache_lzo_compress(lzo_ctx);
86 static int regcache_lzo_decompress_cache_block(struct regmap *map,
87 struct regcache_lzo_ctx *lzo_ctx)
91 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
92 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
98 ret = regcache_lzo_decompress(lzo_ctx);
104 static inline int regcache_lzo_get_blkindex(struct regmap *map,
107 return ((reg / map->reg_stride) * map->cache_word_size) /
108 DIV_ROUND_UP(map->cache_size_raw,
109 regcache_lzo_block_count(map));
112 static inline int regcache_lzo_get_blkpos(struct regmap *map,
115 return (reg / map->reg_stride) %
116 (DIV_ROUND_UP(map->cache_size_raw,
117 regcache_lzo_block_count(map)) /
118 map->cache_word_size);
121 static inline int regcache_lzo_get_blksize(struct regmap *map)
123 return DIV_ROUND_UP(map->cache_size_raw,
124 regcache_lzo_block_count(map));
127 static int regcache_lzo_init(struct regmap *map)
129 struct regcache_lzo_ctx **lzo_blocks;
131 int ret, i, blksize, blkcount;
133 unsigned long *sync_bmp;
137 blkcount = regcache_lzo_block_count(map);
138 map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
142 lzo_blocks = map->cache;
145 * allocate a bitmap to be used when syncing the cache with
146 * the hardware. Each time a register is modified, the corresponding
147 * bit is set in the bitmap, so we know that we have to sync
150 bmp_size = map->num_reg_defaults_raw;
151 sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long),
157 bitmap_zero(sync_bmp, bmp_size);
159 /* allocate the lzo blocks and initialize them */
160 for (i = 0; i < blkcount; i++) {
161 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
163 if (!lzo_blocks[i]) {
168 lzo_blocks[i]->sync_bmp = sync_bmp;
169 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
170 /* alloc the working space for the compressed block */
171 ret = regcache_lzo_prepare(lzo_blocks[i]);
176 blksize = regcache_lzo_get_blksize(map);
177 p = map->reg_defaults_raw;
178 end = map->reg_defaults_raw + map->cache_size_raw;
179 /* compress the register map and fill the lzo blocks */
180 for (i = 0; i < blkcount; i++, p += blksize) {
181 lzo_blocks[i]->src = p;
182 if (p + blksize > end)
183 lzo_blocks[i]->src_len = end - p;
185 lzo_blocks[i]->src_len = blksize;
186 ret = regcache_lzo_compress_cache_block(map,
190 lzo_blocks[i]->decompressed_size =
191 lzo_blocks[i]->src_len;
196 regcache_lzo_exit(map);
200 static int regcache_lzo_exit(struct regmap *map)
202 struct regcache_lzo_ctx **lzo_blocks;
205 lzo_blocks = map->cache;
209 blkcount = regcache_lzo_block_count(map);
211 * the pointer to the bitmap used for syncing the cache
212 * is shared amongst all lzo_blocks. Ensure it is freed
216 kfree(lzo_blocks[0]->sync_bmp);
217 for (i = 0; i < blkcount; i++) {
219 kfree(lzo_blocks[i]->wmem);
220 kfree(lzo_blocks[i]->dst);
222 /* each lzo_block is a pointer returned by kmalloc or NULL */
223 kfree(lzo_blocks[i]);
230 static int regcache_lzo_read(struct regmap *map,
231 unsigned int reg, unsigned int *value)
233 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
234 int ret, blkindex, blkpos;
238 /* index of the compressed lzo block */
239 blkindex = regcache_lzo_get_blkindex(map, reg);
240 /* register index within the decompressed block */
241 blkpos = regcache_lzo_get_blkpos(map, reg);
242 lzo_blocks = map->cache;
243 lzo_block = lzo_blocks[blkindex];
245 /* save the pointer and length of the compressed block */
246 tmp_dst = lzo_block->dst;
247 tmp_dst_len = lzo_block->dst_len;
249 /* prepare the source to be the compressed block */
250 lzo_block->src = lzo_block->dst;
251 lzo_block->src_len = lzo_block->dst_len;
253 /* decompress the block */
254 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
256 /* fetch the value from the cache */
257 *value = regcache_get_val(map, lzo_block->dst, blkpos);
259 kfree(lzo_block->dst);
260 /* restore the pointer and length of the compressed block */
261 lzo_block->dst = tmp_dst;
262 lzo_block->dst_len = tmp_dst_len;
267 static int regcache_lzo_write(struct regmap *map,
268 unsigned int reg, unsigned int value)
270 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
271 int ret, blkindex, blkpos;
275 /* index of the compressed lzo block */
276 blkindex = regcache_lzo_get_blkindex(map, reg);
277 /* register index within the decompressed block */
278 blkpos = regcache_lzo_get_blkpos(map, reg);
279 lzo_blocks = map->cache;
280 lzo_block = lzo_blocks[blkindex];
282 /* save the pointer and length of the compressed block */
283 tmp_dst = lzo_block->dst;
284 tmp_dst_len = lzo_block->dst_len;
286 /* prepare the source to be the compressed block */
287 lzo_block->src = lzo_block->dst;
288 lzo_block->src_len = lzo_block->dst_len;
290 /* decompress the block */
291 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
293 kfree(lzo_block->dst);
297 /* write the new value to the cache */
298 if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
299 kfree(lzo_block->dst);
303 /* prepare the source to be the decompressed block */
304 lzo_block->src = lzo_block->dst;
305 lzo_block->src_len = lzo_block->dst_len;
307 /* compress the block */
308 ret = regcache_lzo_compress_cache_block(map, lzo_block);
310 kfree(lzo_block->dst);
311 kfree(lzo_block->src);
315 /* set the bit so we know we have to sync this register */
316 set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
318 kfree(lzo_block->src);
321 lzo_block->dst = tmp_dst;
322 lzo_block->dst_len = tmp_dst_len;
326 static int regcache_lzo_sync(struct regmap *map, unsigned int min,
329 struct regcache_lzo_ctx **lzo_blocks;
334 lzo_blocks = map->cache;
336 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
337 lzo_blocks[0]->sync_bmp_nbits) {
341 ret = regcache_read(map, i, &val);
345 /* Is this the hardware default? If so skip. */
346 ret = regcache_lookup_reg(map, i);
347 if (ret > 0 && val == map->reg_defaults[ret].def)
350 map->cache_bypass = true;
351 ret = _regmap_write(map, i, val);
352 map->cache_bypass = false;
355 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
362 struct regcache_ops regcache_lzo_ops = {
363 .type = REGCACHE_COMPRESSED,
365 .init = regcache_lzo_init,
366 .exit = regcache_lzo_exit,
367 .read = regcache_lzo_read,
368 .write = regcache_lzo_write,
369 .sync = regcache_lzo_sync