]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
e40cf34a EN |
2 | /* |
3 | * Copyright (C) Nelson Integration, LLC 2016 | |
4 | * Author: Eric Nelson<[email protected]> | |
5 | * | |
e40cf34a | 6 | */ |
e40cf34a | 7 | #include <common.h> |
e6f6f9e6 | 8 | #include <blk.h> |
f7ae49fc | 9 | #include <log.h> |
e40cf34a EN |
10 | #include <malloc.h> |
11 | #include <part.h> | |
401d1c4f | 12 | #include <asm/global_data.h> |
e40cf34a EN |
13 | #include <linux/ctype.h> |
14 | #include <linux/list.h> | |
15 | ||
365af27f OP |
16 | #ifdef CONFIG_NEEDS_MANUAL_RELOC |
17 | DECLARE_GLOBAL_DATA_PTR; | |
18 | #endif | |
19 | ||
e40cf34a EN |
20 | struct block_cache_node { |
21 | struct list_head lh; | |
22 | int iftype; | |
23 | int devnum; | |
24 | lbaint_t start; | |
25 | lbaint_t blkcnt; | |
26 | unsigned long blksz; | |
27 | char *cache; | |
28 | }; | |
29 | ||
c05b38df | 30 | static LIST_HEAD(block_cache); |
e40cf34a EN |
31 | |
32 | static struct block_cache_stats _stats = { | |
2e89bbef | 33 | .max_blocks_per_entry = 8, |
e40cf34a EN |
34 | .max_entries = 32 |
35 | }; | |
36 | ||
365af27f | 37 | #ifdef CONFIG_NEEDS_MANUAL_RELOC |
1526bcce AD |
38 | int blkcache_init(void) |
39 | { | |
365af27f OP |
40 | struct list_head *head = &block_cache; |
41 | ||
42 | head->next = (uintptr_t)head->next + gd->reloc_off; | |
43 | head->prev = (uintptr_t)head->prev + gd->reloc_off; | |
1526bcce AD |
44 | |
45 | return 0; | |
46 | } | |
c05b38df | 47 | #endif |
1526bcce | 48 | |
e40cf34a EN |
49 | static struct block_cache_node *cache_find(int iftype, int devnum, |
50 | lbaint_t start, lbaint_t blkcnt, | |
51 | unsigned long blksz) | |
52 | { | |
53 | struct block_cache_node *node; | |
54 | ||
55 | list_for_each_entry(node, &block_cache, lh) | |
56 | if ((node->iftype == iftype) && | |
57 | (node->devnum == devnum) && | |
58 | (node->blksz == blksz) && | |
59 | (node->start <= start) && | |
60 | (node->start + node->blkcnt >= start + blkcnt)) { | |
61 | if (block_cache.next != &node->lh) { | |
62 | /* maintain MRU ordering */ | |
63 | list_del(&node->lh); | |
64 | list_add(&node->lh, &block_cache); | |
65 | } | |
66 | return node; | |
67 | } | |
68 | return 0; | |
69 | } | |
70 | ||
71 | int blkcache_read(int iftype, int devnum, | |
72 | lbaint_t start, lbaint_t blkcnt, | |
73 | unsigned long blksz, void *buffer) | |
74 | { | |
75 | struct block_cache_node *node = cache_find(iftype, devnum, start, | |
76 | blkcnt, blksz); | |
77 | if (node) { | |
78 | const char *src = node->cache + (start - node->start) * blksz; | |
79 | memcpy(buffer, src, blksz * blkcnt); | |
80 | debug("hit: start " LBAF ", count " LBAFU "\n", | |
81 | start, blkcnt); | |
82 | ++_stats.hits; | |
83 | return 1; | |
84 | } | |
85 | ||
86 | debug("miss: start " LBAF ", count " LBAFU "\n", | |
87 | start, blkcnt); | |
88 | ++_stats.misses; | |
89 | return 0; | |
90 | } | |
91 | ||
92 | void blkcache_fill(int iftype, int devnum, | |
93 | lbaint_t start, lbaint_t blkcnt, | |
94 | unsigned long blksz, void const *buffer) | |
95 | { | |
96 | lbaint_t bytes; | |
97 | struct block_cache_node *node; | |
98 | ||
99 | /* don't cache big stuff */ | |
100 | if (blkcnt > _stats.max_blocks_per_entry) | |
101 | return; | |
102 | ||
103 | if (_stats.max_entries == 0) | |
104 | return; | |
105 | ||
106 | bytes = blksz * blkcnt; | |
107 | if (_stats.max_entries <= _stats.entries) { | |
108 | /* pop LRU */ | |
109 | node = (struct block_cache_node *)block_cache.prev; | |
110 | list_del(&node->lh); | |
111 | _stats.entries--; | |
112 | debug("drop: start " LBAF ", count " LBAFU "\n", | |
113 | node->start, node->blkcnt); | |
114 | if (node->blkcnt * node->blksz < bytes) { | |
115 | free(node->cache); | |
116 | node->cache = 0; | |
117 | } | |
118 | } else { | |
119 | node = malloc(sizeof(*node)); | |
120 | if (!node) | |
121 | return; | |
122 | node->cache = 0; | |
123 | } | |
124 | ||
125 | if (!node->cache) { | |
126 | node->cache = malloc(bytes); | |
127 | if (!node->cache) { | |
128 | free(node); | |
129 | return; | |
130 | } | |
131 | } | |
132 | ||
133 | debug("fill: start " LBAF ", count " LBAFU "\n", | |
134 | start, blkcnt); | |
135 | ||
136 | node->iftype = iftype; | |
137 | node->devnum = devnum; | |
138 | node->start = start; | |
139 | node->blkcnt = blkcnt; | |
140 | node->blksz = blksz; | |
141 | memcpy(node->cache, buffer, bytes); | |
142 | list_add(&node->lh, &block_cache); | |
143 | _stats.entries++; | |
144 | } | |
145 | ||
146 | void blkcache_invalidate(int iftype, int devnum) | |
147 | { | |
148 | struct list_head *entry, *n; | |
149 | struct block_cache_node *node; | |
150 | ||
151 | list_for_each_safe(entry, n, &block_cache) { | |
152 | node = (struct block_cache_node *)entry; | |
153 | if ((node->iftype == iftype) && | |
154 | (node->devnum == devnum)) { | |
155 | list_del(entry); | |
156 | free(node->cache); | |
157 | free(node); | |
158 | --_stats.entries; | |
159 | } | |
160 | } | |
161 | } | |
162 | ||
163 | void blkcache_configure(unsigned blocks, unsigned entries) | |
164 | { | |
165 | struct block_cache_node *node; | |
166 | if ((blocks != _stats.max_blocks_per_entry) || | |
167 | (entries != _stats.max_entries)) { | |
168 | /* invalidate cache */ | |
169 | while (!list_empty(&block_cache)) { | |
170 | node = (struct block_cache_node *)block_cache.next; | |
171 | list_del(&node->lh); | |
172 | free(node->cache); | |
173 | free(node); | |
174 | } | |
175 | _stats.entries = 0; | |
176 | } | |
177 | ||
178 | _stats.max_blocks_per_entry = blocks; | |
179 | _stats.max_entries = entries; | |
180 | ||
181 | _stats.hits = 0; | |
182 | _stats.misses = 0; | |
183 | } | |
184 | ||
185 | void blkcache_stats(struct block_cache_stats *stats) | |
186 | { | |
187 | memcpy(stats, &_stats, sizeof(*stats)); | |
188 | _stats.hits = 0; | |
189 | _stats.misses = 0; | |
190 | } |