]>
Commit | Line | Data |
---|---|---|
e40cf34a EN |
1 | /* |
2 | * Copyright (C) Nelson Integration, LLC 2016 | |
3 | * Author: Eric Nelson<[email protected]> | |
4 | * | |
5 | * SPDX-License-Identifier: GPL-2.0+ | |
6 | * | |
7 | */ | |
8 | #include <config.h> | |
9 | #include <common.h> | |
10 | #include <malloc.h> | |
11 | #include <part.h> | |
12 | #include <linux/ctype.h> | |
13 | #include <linux/list.h> | |
14 | ||
15 | struct block_cache_node { | |
16 | struct list_head lh; | |
17 | int iftype; | |
18 | int devnum; | |
19 | lbaint_t start; | |
20 | lbaint_t blkcnt; | |
21 | unsigned long blksz; | |
22 | char *cache; | |
23 | }; | |
24 | ||
25 | static LIST_HEAD(block_cache); | |
26 | ||
27 | static struct block_cache_stats _stats = { | |
28 | .max_blocks_per_entry = 2, | |
29 | .max_entries = 32 | |
30 | }; | |
31 | ||
32 | static struct block_cache_node *cache_find(int iftype, int devnum, | |
33 | lbaint_t start, lbaint_t blkcnt, | |
34 | unsigned long blksz) | |
35 | { | |
36 | struct block_cache_node *node; | |
37 | ||
38 | list_for_each_entry(node, &block_cache, lh) | |
39 | if ((node->iftype == iftype) && | |
40 | (node->devnum == devnum) && | |
41 | (node->blksz == blksz) && | |
42 | (node->start <= start) && | |
43 | (node->start + node->blkcnt >= start + blkcnt)) { | |
44 | if (block_cache.next != &node->lh) { | |
45 | /* maintain MRU ordering */ | |
46 | list_del(&node->lh); | |
47 | list_add(&node->lh, &block_cache); | |
48 | } | |
49 | return node; | |
50 | } | |
51 | return 0; | |
52 | } | |
53 | ||
54 | int blkcache_read(int iftype, int devnum, | |
55 | lbaint_t start, lbaint_t blkcnt, | |
56 | unsigned long blksz, void *buffer) | |
57 | { | |
58 | struct block_cache_node *node = cache_find(iftype, devnum, start, | |
59 | blkcnt, blksz); | |
60 | if (node) { | |
61 | const char *src = node->cache + (start - node->start) * blksz; | |
62 | memcpy(buffer, src, blksz * blkcnt); | |
63 | debug("hit: start " LBAF ", count " LBAFU "\n", | |
64 | start, blkcnt); | |
65 | ++_stats.hits; | |
66 | return 1; | |
67 | } | |
68 | ||
69 | debug("miss: start " LBAF ", count " LBAFU "\n", | |
70 | start, blkcnt); | |
71 | ++_stats.misses; | |
72 | return 0; | |
73 | } | |
74 | ||
75 | void blkcache_fill(int iftype, int devnum, | |
76 | lbaint_t start, lbaint_t blkcnt, | |
77 | unsigned long blksz, void const *buffer) | |
78 | { | |
79 | lbaint_t bytes; | |
80 | struct block_cache_node *node; | |
81 | ||
82 | /* don't cache big stuff */ | |
83 | if (blkcnt > _stats.max_blocks_per_entry) | |
84 | return; | |
85 | ||
86 | if (_stats.max_entries == 0) | |
87 | return; | |
88 | ||
89 | bytes = blksz * blkcnt; | |
90 | if (_stats.max_entries <= _stats.entries) { | |
91 | /* pop LRU */ | |
92 | node = (struct block_cache_node *)block_cache.prev; | |
93 | list_del(&node->lh); | |
94 | _stats.entries--; | |
95 | debug("drop: start " LBAF ", count " LBAFU "\n", | |
96 | node->start, node->blkcnt); | |
97 | if (node->blkcnt * node->blksz < bytes) { | |
98 | free(node->cache); | |
99 | node->cache = 0; | |
100 | } | |
101 | } else { | |
102 | node = malloc(sizeof(*node)); | |
103 | if (!node) | |
104 | return; | |
105 | node->cache = 0; | |
106 | } | |
107 | ||
108 | if (!node->cache) { | |
109 | node->cache = malloc(bytes); | |
110 | if (!node->cache) { | |
111 | free(node); | |
112 | return; | |
113 | } | |
114 | } | |
115 | ||
116 | debug("fill: start " LBAF ", count " LBAFU "\n", | |
117 | start, blkcnt); | |
118 | ||
119 | node->iftype = iftype; | |
120 | node->devnum = devnum; | |
121 | node->start = start; | |
122 | node->blkcnt = blkcnt; | |
123 | node->blksz = blksz; | |
124 | memcpy(node->cache, buffer, bytes); | |
125 | list_add(&node->lh, &block_cache); | |
126 | _stats.entries++; | |
127 | } | |
128 | ||
129 | void blkcache_invalidate(int iftype, int devnum) | |
130 | { | |
131 | struct list_head *entry, *n; | |
132 | struct block_cache_node *node; | |
133 | ||
134 | list_for_each_safe(entry, n, &block_cache) { | |
135 | node = (struct block_cache_node *)entry; | |
136 | if ((node->iftype == iftype) && | |
137 | (node->devnum == devnum)) { | |
138 | list_del(entry); | |
139 | free(node->cache); | |
140 | free(node); | |
141 | --_stats.entries; | |
142 | } | |
143 | } | |
144 | } | |
145 | ||
146 | void blkcache_configure(unsigned blocks, unsigned entries) | |
147 | { | |
148 | struct block_cache_node *node; | |
149 | if ((blocks != _stats.max_blocks_per_entry) || | |
150 | (entries != _stats.max_entries)) { | |
151 | /* invalidate cache */ | |
152 | while (!list_empty(&block_cache)) { | |
153 | node = (struct block_cache_node *)block_cache.next; | |
154 | list_del(&node->lh); | |
155 | free(node->cache); | |
156 | free(node); | |
157 | } | |
158 | _stats.entries = 0; | |
159 | } | |
160 | ||
161 | _stats.max_blocks_per_entry = blocks; | |
162 | _stats.max_entries = entries; | |
163 | ||
164 | _stats.hits = 0; | |
165 | _stats.misses = 0; | |
166 | } | |
167 | ||
168 | void blkcache_stats(struct block_cache_stats *stats) | |
169 | { | |
170 | memcpy(stats, &_stats, sizeof(*stats)); | |
171 | _stats.hits = 0; | |
172 | _stats.misses = 0; | |
173 | } |