1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) Nelson Integration, LLC 2016
11 #include <linux/ctype.h>
12 #include <linux/list.h>
14 struct block_cache_node {
24 static LIST_HEAD(block_cache);
26 static struct block_cache_stats _stats = {
27 .max_blocks_per_entry = 8,
31 static struct block_cache_node *cache_find(int iftype, int devnum,
32 lbaint_t start, lbaint_t blkcnt,
35 struct block_cache_node *node;
37 list_for_each_entry(node, &block_cache, lh)
38 if ((node->iftype == iftype) &&
39 (node->devnum == devnum) &&
40 (node->blksz == blksz) &&
41 (node->start <= start) &&
42 (node->start + node->blkcnt >= start + blkcnt)) {
43 if (block_cache.next != &node->lh) {
44 /* maintain MRU ordering */
46 list_add(&node->lh, &block_cache);
53 int blkcache_read(int iftype, int devnum,
54 lbaint_t start, lbaint_t blkcnt,
55 unsigned long blksz, void *buffer)
57 struct block_cache_node *node = cache_find(iftype, devnum, start,
60 const char *src = node->cache + (start - node->start) * blksz;
61 memcpy(buffer, src, blksz * blkcnt);
62 debug("hit: start " LBAF ", count " LBAFU "\n",
68 debug("miss: start " LBAF ", count " LBAFU "\n",
74 void blkcache_fill(int iftype, int devnum,
75 lbaint_t start, lbaint_t blkcnt,
76 unsigned long blksz, void const *buffer)
79 struct block_cache_node *node;
81 /* don't cache big stuff */
82 if (blkcnt > _stats.max_blocks_per_entry)
85 if (_stats.max_entries == 0)
88 bytes = blksz * blkcnt;
89 if (_stats.max_entries <= _stats.entries) {
91 node = (struct block_cache_node *)block_cache.prev;
94 debug("drop: start " LBAF ", count " LBAFU "\n",
95 node->start, node->blkcnt);
96 if (node->blkcnt * node->blksz < bytes) {
101 node = malloc(sizeof(*node));
108 node->cache = malloc(bytes);
115 debug("fill: start " LBAF ", count " LBAFU "\n",
118 node->iftype = iftype;
119 node->devnum = devnum;
121 node->blkcnt = blkcnt;
123 memcpy(node->cache, buffer, bytes);
124 list_add(&node->lh, &block_cache);
128 void blkcache_invalidate(int iftype, int devnum)
130 struct list_head *entry, *n;
131 struct block_cache_node *node;
133 list_for_each_safe(entry, n, &block_cache) {
134 node = (struct block_cache_node *)entry;
135 if ((node->iftype == iftype) &&
136 (node->devnum == devnum)) {
145 void blkcache_configure(unsigned blocks, unsigned entries)
147 struct block_cache_node *node;
148 if ((blocks != _stats.max_blocks_per_entry) ||
149 (entries != _stats.max_entries)) {
150 /* invalidate cache */
151 while (!list_empty(&block_cache)) {
152 node = (struct block_cache_node *)block_cache.next;
160 _stats.max_blocks_per_entry = blocks;
161 _stats.max_entries = entries;
167 void blkcache_stats(struct block_cache_stats *stats)
169 memcpy(stats, &_stats, sizeof(*stats));