]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/mbcache.c | |
3 | * (C) 2001-2002 Andreas Gruenbacher, <[email protected]> | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Filesystem Meta Information Block Cache (mbcache) | |
8 | * | |
9 | * The mbcache caches blocks of block devices that need to be located | |
10 | * by their device/block number, as well as by other criteria (such | |
11 | * as the block's contents). | |
12 | * | |
13 | * There can only be one cache entry in a cache per device and block number. | |
14 | * Additional indexes need not be unique in this sense. The number of | |
15 | * additional indexes (=other criteria) can be hardwired at compile time | |
16 | * or specified at cache create time. | |
17 | * | |
18 | * Each cache entry is of fixed size. An entry may be `valid' or `invalid' | |
19 | * in the cache. A valid entry is in the main hash tables of the cache, | |
20 | * and may also be in the lru list. An invalid entry is not in any hashes | |
21 | * or lists. | |
22 | * | |
23 | * A valid cache entry is only in the lru list if no handles refer to it. | |
24 | * Invalid cache entries will be freed when the last handle to the cache | |
25 | * entry is released. Entries that cannot be freed immediately are put | |
26 | * back on the lru list. | |
27 | */ | |
28 | ||
29 | #include <linux/kernel.h> | |
30 | #include <linux/module.h> | |
31 | ||
32 | #include <linux/hash.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/mm.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/mbcache.h> | |
39 | ||
40 | ||
41 | #ifdef MB_CACHE_DEBUG | |
42 | # define mb_debug(f...) do { \ | |
43 | printk(KERN_DEBUG f); \ | |
44 | printk("\n"); \ | |
45 | } while (0) | |
46 | #define mb_assert(c) do { if (!(c)) \ | |
47 | printk(KERN_ERR "assertion " #c " failed\n"); \ | |
48 | } while(0) | |
49 | #else | |
50 | # define mb_debug(f...) do { } while(0) | |
51 | # define mb_assert(c) do { } while(0) | |
52 | #endif | |
53 | #define mb_error(f...) do { \ | |
54 | printk(KERN_ERR f); \ | |
55 | printk("\n"); \ | |
56 | } while(0) | |
57 | ||
58 | #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) | |
59 | ||
75c96f85 | 60 | static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); |
1da177e4 LT |
61 | |
62 | MODULE_AUTHOR("Andreas Gruenbacher <[email protected]>"); | |
63 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); | |
64 | MODULE_LICENSE("GPL"); | |
65 | ||
66 | EXPORT_SYMBOL(mb_cache_create); | |
67 | EXPORT_SYMBOL(mb_cache_shrink); | |
68 | EXPORT_SYMBOL(mb_cache_destroy); | |
69 | EXPORT_SYMBOL(mb_cache_entry_alloc); | |
70 | EXPORT_SYMBOL(mb_cache_entry_insert); | |
71 | EXPORT_SYMBOL(mb_cache_entry_release); | |
72 | EXPORT_SYMBOL(mb_cache_entry_free); | |
73 | EXPORT_SYMBOL(mb_cache_entry_get); | |
74 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) | |
75 | EXPORT_SYMBOL(mb_cache_entry_find_first); | |
76 | EXPORT_SYMBOL(mb_cache_entry_find_next); | |
77 | #endif | |
78 | ||
1da177e4 LT |
79 | /* |
80 | * Global data: list of all mbcache's, lru list, and a spinlock for | |
81 | * accessing cache data structures on SMP machines. The lru list is | |
82 | * global across all mbcaches. | |
83 | */ | |
84 | ||
85 | static LIST_HEAD(mb_cache_list); | |
86 | static LIST_HEAD(mb_cache_lru_list); | |
87 | static DEFINE_SPINLOCK(mb_cache_spinlock); | |
1da177e4 | 88 | |
1da177e4 LT |
89 | static inline int |
90 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | |
91 | { | |
92 | return !list_empty(&ce->e_block_list); | |
93 | } | |
94 | ||
95 | ||
858119e1 | 96 | static void |
1da177e4 LT |
97 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) |
98 | { | |
1da177e4 LT |
99 | if (__mb_cache_entry_is_hashed(ce)) { |
100 | list_del_init(&ce->e_block_list); | |
2aec7c52 | 101 | list_del(&ce->e_index.o_list); |
1da177e4 LT |
102 | } |
103 | } | |
104 | ||
105 | ||
858119e1 | 106 | static void |
27496a8c | 107 | __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) |
1da177e4 LT |
108 | { |
109 | struct mb_cache *cache = ce->e_cache; | |
110 | ||
111 | mb_assert(!(ce->e_used || ce->e_queued)); | |
2aec7c52 AG |
112 | kmem_cache_free(cache->c_entry_cache, ce); |
113 | atomic_dec(&cache->c_entry_count); | |
1da177e4 LT |
114 | } |
115 | ||
116 | ||
858119e1 | 117 | static void |
1da177e4 | 118 | __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) |
58f555e5 | 119 | __releases(mb_cache_spinlock) |
1da177e4 LT |
120 | { |
121 | /* Wake up all processes queuing for this cache entry. */ | |
122 | if (ce->e_queued) | |
123 | wake_up_all(&mb_cache_queue); | |
124 | if (ce->e_used >= MB_CACHE_WRITER) | |
125 | ce->e_used -= MB_CACHE_WRITER; | |
126 | ce->e_used--; | |
127 | if (!(ce->e_used || ce->e_queued)) { | |
128 | if (!__mb_cache_entry_is_hashed(ce)) | |
129 | goto forget; | |
130 | mb_assert(list_empty(&ce->e_lru_list)); | |
131 | list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); | |
132 | } | |
133 | spin_unlock(&mb_cache_spinlock); | |
134 | return; | |
135 | forget: | |
136 | spin_unlock(&mb_cache_spinlock); | |
137 | __mb_cache_entry_forget(ce, GFP_KERNEL); | |
138 | } | |
139 | ||
140 | ||
141 | /* | |
1ab6c499 | 142 | * mb_cache_shrink_scan() memory pressure callback |
1da177e4 LT |
143 | * |
144 | * This function is called by the kernel memory management when memory | |
145 | * gets low. | |
146 | * | |
7f8275d0 | 147 | * @shrink: (ignored) |
1495f230 | 148 | * @sc: shrink_control passed from reclaim |
1da177e4 | 149 | * |
1ab6c499 | 150 | * Returns the number of objects freed. |
1da177e4 | 151 | */ |
1ab6c499 DC |
152 | static unsigned long |
153 | mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1da177e4 LT |
154 | { |
155 | LIST_HEAD(free_list); | |
e566d48c | 156 | struct mb_cache_entry *entry, *tmp; |
1495f230 YH |
157 | int nr_to_scan = sc->nr_to_scan; |
158 | gfp_t gfp_mask = sc->gfp_mask; | |
1ab6c499 | 159 | unsigned long freed = 0; |
1da177e4 | 160 | |
1da177e4 | 161 | mb_debug("trying to free %d entries", nr_to_scan); |
e566d48c | 162 | spin_lock(&mb_cache_spinlock); |
1da177e4 LT |
163 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { |
164 | struct mb_cache_entry *ce = | |
165 | list_entry(mb_cache_lru_list.next, | |
166 | struct mb_cache_entry, e_lru_list); | |
167 | list_move_tail(&ce->e_lru_list, &free_list); | |
168 | __mb_cache_entry_unhash(ce); | |
1ab6c499 DC |
169 | freed++; |
170 | } | |
171 | spin_unlock(&mb_cache_spinlock); | |
172 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { | |
173 | __mb_cache_entry_forget(entry, gfp_mask); | |
1da177e4 | 174 | } |
1ab6c499 DC |
175 | return freed; |
176 | } | |
177 | ||
178 | static unsigned long | |
179 | mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
180 | { | |
181 | struct mb_cache *cache; | |
182 | unsigned long count = 0; | |
183 | ||
184 | spin_lock(&mb_cache_spinlock); | |
e566d48c AG |
185 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { |
186 | mb_debug("cache %s (%d)", cache->c_name, | |
187 | atomic_read(&cache->c_entry_count)); | |
188 | count += atomic_read(&cache->c_entry_count); | |
189 | } | |
1da177e4 | 190 | spin_unlock(&mb_cache_spinlock); |
1ab6c499 | 191 | |
55f841ce | 192 | return vfs_pressure_ratio(count); |
1da177e4 LT |
193 | } |
194 | ||
1ab6c499 DC |
195 | static struct shrinker mb_cache_shrinker = { |
196 | .count_objects = mb_cache_shrink_count, | |
197 | .scan_objects = mb_cache_shrink_scan, | |
198 | .seeks = DEFAULT_SEEKS, | |
199 | }; | |
1da177e4 LT |
200 | |
201 | /* | |
202 | * mb_cache_create() create a new cache | |
203 | * | |
204 | * All entries in one cache are equal size. Cache entries may be from | |
205 | * multiple devices. If this is the first mbcache created, registers | |
206 | * the cache with kernel memory management. Returns NULL if no more | |
207 | * memory was available. | |
208 | * | |
209 | * @name: name of the cache (informal) | |
1da177e4 LT |
210 | * @bucket_bits: log2(number of hash buckets) |
211 | */ | |
212 | struct mb_cache * | |
2aec7c52 | 213 | mb_cache_create(const char *name, int bucket_bits) |
1da177e4 | 214 | { |
2aec7c52 | 215 | int n, bucket_count = 1 << bucket_bits; |
1da177e4 LT |
216 | struct mb_cache *cache = NULL; |
217 | ||
2aec7c52 | 218 | cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); |
1da177e4 | 219 | if (!cache) |
2aec7c52 | 220 | return NULL; |
1da177e4 | 221 | cache->c_name = name; |
1da177e4 LT |
222 | atomic_set(&cache->c_entry_count, 0); |
223 | cache->c_bucket_bits = bucket_bits; | |
1da177e4 LT |
224 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), |
225 | GFP_KERNEL); | |
226 | if (!cache->c_block_hash) | |
227 | goto fail; | |
228 | for (n=0; n<bucket_count; n++) | |
229 | INIT_LIST_HEAD(&cache->c_block_hash[n]); | |
2aec7c52 AG |
230 | cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head), |
231 | GFP_KERNEL); | |
232 | if (!cache->c_index_hash) | |
233 | goto fail; | |
234 | for (n=0; n<bucket_count; n++) | |
235 | INIT_LIST_HEAD(&cache->c_index_hash[n]); | |
236 | cache->c_entry_cache = kmem_cache_create(name, | |
237 | sizeof(struct mb_cache_entry), 0, | |
20c2df83 | 238 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
1da177e4 | 239 | if (!cache->c_entry_cache) |
2aec7c52 | 240 | goto fail2; |
1da177e4 | 241 | |
3a48ee8a AG |
242 | /* |
243 | * Set an upper limit on the number of cache entries so that the hash | |
244 | * chains won't grow too long. | |
245 | */ | |
246 | cache->c_max_entries = bucket_count << 4; | |
247 | ||
1da177e4 LT |
248 | spin_lock(&mb_cache_spinlock); |
249 | list_add(&cache->c_cache_list, &mb_cache_list); | |
250 | spin_unlock(&mb_cache_spinlock); | |
251 | return cache; | |
252 | ||
2aec7c52 AG |
253 | fail2: |
254 | kfree(cache->c_index_hash); | |
255 | ||
1da177e4 | 256 | fail: |
2aec7c52 AG |
257 | kfree(cache->c_block_hash); |
258 | kfree(cache); | |
1da177e4 LT |
259 | return NULL; |
260 | } | |
261 | ||
262 | ||
263 | /* | |
264 | * mb_cache_shrink() | |
265 | * | |
7f927fcc | 266 | * Removes all cache entries of a device from the cache. All cache entries |
1da177e4 LT |
267 | * currently in use cannot be freed, and thus remain in the cache. All others |
268 | * are freed. | |
269 | * | |
1da177e4 LT |
270 | * @bdev: which device's cache entries to shrink |
271 | */ | |
272 | void | |
8c52ab42 | 273 | mb_cache_shrink(struct block_device *bdev) |
1da177e4 LT |
274 | { |
275 | LIST_HEAD(free_list); | |
276 | struct list_head *l, *ltmp; | |
277 | ||
278 | spin_lock(&mb_cache_spinlock); | |
279 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | |
280 | struct mb_cache_entry *ce = | |
281 | list_entry(l, struct mb_cache_entry, e_lru_list); | |
282 | if (ce->e_bdev == bdev) { | |
283 | list_move_tail(&ce->e_lru_list, &free_list); | |
284 | __mb_cache_entry_unhash(ce); | |
285 | } | |
286 | } | |
287 | spin_unlock(&mb_cache_spinlock); | |
288 | list_for_each_safe(l, ltmp, &free_list) { | |
289 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | |
290 | e_lru_list), GFP_KERNEL); | |
291 | } | |
292 | } | |
293 | ||
294 | ||
295 | /* | |
296 | * mb_cache_destroy() | |
297 | * | |
298 | * Shrinks the cache to its minimum possible size (hopefully 0 entries), | |
299 | * and then destroys it. If this was the last mbcache, un-registers the | |
300 | * mbcache from kernel memory management. | |
301 | */ | |
302 | void | |
303 | mb_cache_destroy(struct mb_cache *cache) | |
304 | { | |
305 | LIST_HEAD(free_list); | |
306 | struct list_head *l, *ltmp; | |
1da177e4 LT |
307 | |
308 | spin_lock(&mb_cache_spinlock); | |
309 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { | |
310 | struct mb_cache_entry *ce = | |
311 | list_entry(l, struct mb_cache_entry, e_lru_list); | |
312 | if (ce->e_cache == cache) { | |
313 | list_move_tail(&ce->e_lru_list, &free_list); | |
314 | __mb_cache_entry_unhash(ce); | |
315 | } | |
316 | } | |
317 | list_del(&cache->c_cache_list); | |
318 | spin_unlock(&mb_cache_spinlock); | |
319 | ||
320 | list_for_each_safe(l, ltmp, &free_list) { | |
321 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, | |
322 | e_lru_list), GFP_KERNEL); | |
323 | } | |
324 | ||
325 | if (atomic_read(&cache->c_entry_count) > 0) { | |
326 | mb_error("cache %s: %d orphaned entries", | |
327 | cache->c_name, | |
328 | atomic_read(&cache->c_entry_count)); | |
329 | } | |
330 | ||
331 | kmem_cache_destroy(cache->c_entry_cache); | |
332 | ||
2aec7c52 | 333 | kfree(cache->c_index_hash); |
1da177e4 LT |
334 | kfree(cache->c_block_hash); |
335 | kfree(cache); | |
336 | } | |
337 | ||
1da177e4 LT |
338 | /* |
339 | * mb_cache_entry_alloc() | |
340 | * | |
341 | * Allocates a new cache entry. The new entry will not be valid initially, | |
342 | * and thus cannot be looked up yet. It should be filled with data, and | |
343 | * then inserted into the cache using mb_cache_entry_insert(). Returns NULL | |
344 | * if no more memory was available. | |
345 | */ | |
346 | struct mb_cache_entry * | |
335e92e8 | 347 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) |
1da177e4 | 348 | { |
3a48ee8a AG |
349 | struct mb_cache_entry *ce = NULL; |
350 | ||
351 | if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { | |
352 | spin_lock(&mb_cache_spinlock); | |
353 | if (!list_empty(&mb_cache_lru_list)) { | |
354 | ce = list_entry(mb_cache_lru_list.next, | |
355 | struct mb_cache_entry, e_lru_list); | |
356 | list_del_init(&ce->e_lru_list); | |
357 | __mb_cache_entry_unhash(ce); | |
358 | } | |
359 | spin_unlock(&mb_cache_spinlock); | |
360 | } | |
361 | if (!ce) { | |
362 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); | |
363 | if (!ce) | |
364 | return NULL; | |
f9e83489 | 365 | atomic_inc(&cache->c_entry_count); |
1da177e4 LT |
366 | INIT_LIST_HEAD(&ce->e_lru_list); |
367 | INIT_LIST_HEAD(&ce->e_block_list); | |
368 | ce->e_cache = cache; | |
1da177e4 LT |
369 | ce->e_queued = 0; |
370 | } | |
3a48ee8a | 371 | ce->e_used = 1 + MB_CACHE_WRITER; |
1da177e4 LT |
372 | return ce; |
373 | } | |
374 | ||
375 | ||
376 | /* | |
377 | * mb_cache_entry_insert() | |
378 | * | |
379 | * Inserts an entry that was allocated using mb_cache_entry_alloc() into | |
380 | * the cache. After this, the cache entry can be looked up, but is not yet | |
381 | * in the lru list as the caller still holds a handle to it. Returns 0 on | |
382 | * success, or -EBUSY if a cache entry for that device + inode exists | |
383 | * already (this may happen after a failed lookup, but when another process | |
384 | * has inserted the same cache entry in the meantime). | |
385 | * | |
386 | * @bdev: device the cache entry belongs to | |
387 | * @block: block number | |
2aec7c52 | 388 | * @key: lookup key |
1da177e4 LT |
389 | */ |
390 | int | |
391 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, | |
2aec7c52 | 392 | sector_t block, unsigned int key) |
1da177e4 LT |
393 | { |
394 | struct mb_cache *cache = ce->e_cache; | |
395 | unsigned int bucket; | |
396 | struct list_head *l; | |
2aec7c52 | 397 | int error = -EBUSY; |
1da177e4 LT |
398 | |
399 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | |
400 | cache->c_bucket_bits); | |
401 | spin_lock(&mb_cache_spinlock); | |
402 | list_for_each_prev(l, &cache->c_block_hash[bucket]) { | |
403 | struct mb_cache_entry *ce = | |
404 | list_entry(l, struct mb_cache_entry, e_block_list); | |
405 | if (ce->e_bdev == bdev && ce->e_block == block) | |
406 | goto out; | |
407 | } | |
408 | __mb_cache_entry_unhash(ce); | |
409 | ce->e_bdev = bdev; | |
410 | ce->e_block = block; | |
411 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); | |
2aec7c52 AG |
412 | ce->e_index.o_key = key; |
413 | bucket = hash_long(key, cache->c_bucket_bits); | |
414 | list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]); | |
1da177e4 LT |
415 | error = 0; |
416 | out: | |
417 | spin_unlock(&mb_cache_spinlock); | |
418 | return error; | |
419 | } | |
420 | ||
421 | ||
422 | /* | |
423 | * mb_cache_entry_release() | |
424 | * | |
425 | * Release a handle to a cache entry. When the last handle to a cache entry | |
426 | * is released it is either freed (if it is invalid) or otherwise inserted | |
427 | * in to the lru list. | |
428 | */ | |
429 | void | |
430 | mb_cache_entry_release(struct mb_cache_entry *ce) | |
431 | { | |
432 | spin_lock(&mb_cache_spinlock); | |
433 | __mb_cache_entry_release_unlock(ce); | |
434 | } | |
435 | ||
436 | ||
437 | /* | |
438 | * mb_cache_entry_free() | |
439 | * | |
440 | * This is equivalent to the sequence mb_cache_entry_takeout() -- | |
441 | * mb_cache_entry_release(). | |
442 | */ | |
443 | void | |
444 | mb_cache_entry_free(struct mb_cache_entry *ce) | |
445 | { | |
446 | spin_lock(&mb_cache_spinlock); | |
447 | mb_assert(list_empty(&ce->e_lru_list)); | |
448 | __mb_cache_entry_unhash(ce); | |
449 | __mb_cache_entry_release_unlock(ce); | |
450 | } | |
451 | ||
452 | ||
453 | /* | |
454 | * mb_cache_entry_get() | |
455 | * | |
456 | * Get a cache entry by device / block number. (There can only be one entry | |
457 | * in the cache per device and block.) Returns NULL if no such cache entry | |
458 | * exists. The returned cache entry is locked for exclusive access ("single | |
459 | * writer"). | |
460 | */ | |
461 | struct mb_cache_entry * | |
462 | mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, | |
463 | sector_t block) | |
464 | { | |
465 | unsigned int bucket; | |
466 | struct list_head *l; | |
467 | struct mb_cache_entry *ce; | |
468 | ||
469 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), | |
470 | cache->c_bucket_bits); | |
471 | spin_lock(&mb_cache_spinlock); | |
472 | list_for_each(l, &cache->c_block_hash[bucket]) { | |
473 | ce = list_entry(l, struct mb_cache_entry, e_block_list); | |
474 | if (ce->e_bdev == bdev && ce->e_block == block) { | |
475 | DEFINE_WAIT(wait); | |
476 | ||
477 | if (!list_empty(&ce->e_lru_list)) | |
478 | list_del_init(&ce->e_lru_list); | |
479 | ||
480 | while (ce->e_used > 0) { | |
481 | ce->e_queued++; | |
482 | prepare_to_wait(&mb_cache_queue, &wait, | |
483 | TASK_UNINTERRUPTIBLE); | |
484 | spin_unlock(&mb_cache_spinlock); | |
485 | schedule(); | |
486 | spin_lock(&mb_cache_spinlock); | |
487 | ce->e_queued--; | |
488 | } | |
489 | finish_wait(&mb_cache_queue, &wait); | |
490 | ce->e_used += 1 + MB_CACHE_WRITER; | |
491 | ||
492 | if (!__mb_cache_entry_is_hashed(ce)) { | |
493 | __mb_cache_entry_release_unlock(ce); | |
494 | return NULL; | |
495 | } | |
496 | goto cleanup; | |
497 | } | |
498 | } | |
499 | ce = NULL; | |
500 | ||
501 | cleanup: | |
502 | spin_unlock(&mb_cache_spinlock); | |
503 | return ce; | |
504 | } | |
505 | ||
506 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) | |
507 | ||
508 | static struct mb_cache_entry * | |
509 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, | |
2aec7c52 | 510 | struct block_device *bdev, unsigned int key) |
1da177e4 LT |
511 | { |
512 | while (l != head) { | |
513 | struct mb_cache_entry *ce = | |
2aec7c52 AG |
514 | list_entry(l, struct mb_cache_entry, e_index.o_list); |
515 | if (ce->e_bdev == bdev && ce->e_index.o_key == key) { | |
1da177e4 LT |
516 | DEFINE_WAIT(wait); |
517 | ||
518 | if (!list_empty(&ce->e_lru_list)) | |
519 | list_del_init(&ce->e_lru_list); | |
520 | ||
521 | /* Incrementing before holding the lock gives readers | |
522 | priority over writers. */ | |
523 | ce->e_used++; | |
524 | while (ce->e_used >= MB_CACHE_WRITER) { | |
525 | ce->e_queued++; | |
526 | prepare_to_wait(&mb_cache_queue, &wait, | |
527 | TASK_UNINTERRUPTIBLE); | |
528 | spin_unlock(&mb_cache_spinlock); | |
529 | schedule(); | |
530 | spin_lock(&mb_cache_spinlock); | |
531 | ce->e_queued--; | |
532 | } | |
533 | finish_wait(&mb_cache_queue, &wait); | |
534 | ||
535 | if (!__mb_cache_entry_is_hashed(ce)) { | |
536 | __mb_cache_entry_release_unlock(ce); | |
537 | spin_lock(&mb_cache_spinlock); | |
538 | return ERR_PTR(-EAGAIN); | |
539 | } | |
540 | return ce; | |
541 | } | |
542 | l = l->next; | |
543 | } | |
544 | return NULL; | |
545 | } | |
546 | ||
547 | ||
548 | /* | |
549 | * mb_cache_entry_find_first() | |
550 | * | |
551 | * Find the first cache entry on a given device with a certain key in | |
25985edc | 552 | * an additional index. Additional matches can be found with |
1da177e4 LT |
553 | * mb_cache_entry_find_next(). Returns NULL if no match was found. The |
554 | * returned cache entry is locked for shared access ("multiple readers"). | |
555 | * | |
556 | * @cache: the cache to search | |
1da177e4 LT |
557 | * @bdev: the device the cache entry should belong to |
558 | * @key: the key in the index | |
559 | */ | |
560 | struct mb_cache_entry * | |
2aec7c52 AG |
561 | mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, |
562 | unsigned int key) | |
1da177e4 LT |
563 | { |
564 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | |
565 | struct list_head *l; | |
566 | struct mb_cache_entry *ce; | |
567 | ||
1da177e4 | 568 | spin_lock(&mb_cache_spinlock); |
2aec7c52 AG |
569 | l = cache->c_index_hash[bucket].next; |
570 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); | |
1da177e4 LT |
571 | spin_unlock(&mb_cache_spinlock); |
572 | return ce; | |
573 | } | |
574 | ||
575 | ||
576 | /* | |
577 | * mb_cache_entry_find_next() | |
578 | * | |
579 | * Find the next cache entry on a given device with a certain key in an | |
580 | * additional index. Returns NULL if no match could be found. The previous | |
581 | * entry is atomatically released, so that mb_cache_entry_find_next() can | |
582 | * be called like this: | |
583 | * | |
584 | * entry = mb_cache_entry_find_first(); | |
585 | * while (entry) { | |
586 | * ... | |
587 | * entry = mb_cache_entry_find_next(entry, ...); | |
588 | * } | |
589 | * | |
590 | * @prev: The previous match | |
1da177e4 LT |
591 | * @bdev: the device the cache entry should belong to |
592 | * @key: the key in the index | |
593 | */ | |
594 | struct mb_cache_entry * | |
2aec7c52 | 595 | mb_cache_entry_find_next(struct mb_cache_entry *prev, |
1da177e4 LT |
596 | struct block_device *bdev, unsigned int key) |
597 | { | |
598 | struct mb_cache *cache = prev->e_cache; | |
599 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); | |
600 | struct list_head *l; | |
601 | struct mb_cache_entry *ce; | |
602 | ||
1da177e4 | 603 | spin_lock(&mb_cache_spinlock); |
2aec7c52 AG |
604 | l = prev->e_index.o_list.next; |
605 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); | |
1da177e4 LT |
606 | __mb_cache_entry_release_unlock(prev); |
607 | return ce; | |
608 | } | |
609 | ||
610 | #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */ | |
611 | ||
612 | static int __init init_mbcache(void) | |
613 | { | |
8e1f936b | 614 | register_shrinker(&mb_cache_shrinker); |
1da177e4 LT |
615 | return 0; |
616 | } | |
617 | ||
618 | static void __exit exit_mbcache(void) | |
619 | { | |
8e1f936b | 620 | unregister_shrinker(&mb_cache_shrinker); |
1da177e4 LT |
621 | } |
622 | ||
623 | module_init(init_mbcache) | |
624 | module_exit(exit_mbcache) | |
625 |