]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Request reply cache. This is currently a global cache, but this may |
4 | * change in the future and be a per-client cache. | |
5 | * | |
6 | * This code is heavily inspired by the 44BSD implementation, although | |
7 | * it does things a bit differently. | |
8 | * | |
9 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
10 | */ | |
11 | ||
5a0e3ad6 | 12 | #include <linux/slab.h> |
8f97514b | 13 | #include <linux/vmalloc.h> |
5976687a | 14 | #include <linux/sunrpc/addr.h> |
0338dd15 | 15 | #include <linux/highmem.h> |
0733c7ba JL |
16 | #include <linux/log2.h> |
17 | #include <linux/hash.h> | |
01a7decf | 18 | #include <net/checksum.h> |
5a0e3ad6 | 19 | |
9a74af21 BH |
20 | #include "nfsd.h" |
21 | #include "cache.h" | |
1da177e4 | 22 | |
0338dd15 JL |
23 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
24 | ||
0733c7ba JL |
25 | /* |
26 | * We use this value to determine the number of hash buckets from the max | |
27 | * cache size, the idea being that when the cache is at its maximum number | |
28 | * of entries, then this should be the average number of entries per bucket. | |
29 | */ | |
30 | #define TARGET_BUCKET_SIZE 64 | |
1da177e4 | 31 | |
7142b98d | 32 | struct nfsd_drc_bucket { |
736c6625 | 33 | struct rb_root rb_head; |
bedd4b61 | 34 | struct list_head lru_head; |
89a26b3d | 35 | spinlock_t cache_lock; |
7142b98d TM |
36 | }; |
37 | ||
38 | static struct nfsd_drc_bucket *drc_hashtbl; | |
8a8bc40d | 39 | static struct kmem_cache *drc_slab; |
9dc56143 JL |
40 | |
41 | /* max number of entries allowed in the cache */ | |
0338dd15 | 42 | static unsigned int max_drc_entries; |
1da177e4 | 43 | |
0733c7ba JL |
44 | /* number of significant bits in the hash value */ |
45 | static unsigned int maskbits; | |
bedd4b61 | 46 | static unsigned int drc_hashsize; |
0733c7ba | 47 | |
9dc56143 JL |
48 | /* |
49 | * Stats and other tracking of on the duplicate reply cache. All of these and | |
50 | * the "rc" fields in nfsdstats are protected by the cache_lock | |
51 | */ | |
52 | ||
53 | /* total number of entries */ | |
31e60f52 | 54 | static atomic_t num_drc_entries; |
9dc56143 JL |
55 | |
56 | /* cache misses due only to checksum comparison failures */ | |
57 | static unsigned int payload_misses; | |
58 | ||
6c6910cd JL |
59 | /* amount of memory (in bytes) currently consumed by the DRC */ |
60 | static unsigned int drc_mem_usage; | |
61 | ||
98d821bd JL |
62 | /* longest hash chain seen */ |
63 | static unsigned int longest_chain; | |
64 | ||
65 | /* size of cache when we saw the longest hash chain */ | |
66 | static unsigned int longest_chain_cachesize; | |
67 | ||
1da177e4 | 68 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
1ab6c499 DC |
69 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
70 | struct shrink_control *sc); | |
71 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, | |
72 | struct shrink_control *sc); | |
b4e7f2c9 | 73 | |
c8c797f9 | 74 | static struct shrinker nfsd_reply_cache_shrinker = { |
1ab6c499 DC |
75 | .scan_objects = nfsd_reply_cache_scan, |
76 | .count_objects = nfsd_reply_cache_count, | |
b4e7f2c9 JL |
77 | .seeks = 1, |
78 | }; | |
1da177e4 | 79 | |
0338dd15 JL |
80 | /* |
81 | * Put a cap on the size of the DRC based on the amount of available | |
82 | * low memory in the machine. | |
83 | * | |
84 | * 64MB: 8192 | |
85 | * 128MB: 11585 | |
86 | * 256MB: 16384 | |
87 | * 512MB: 23170 | |
88 | * 1GB: 32768 | |
89 | * 2GB: 46340 | |
90 | * 4GB: 65536 | |
91 | * 8GB: 92681 | |
92 | * 16GB: 131072 | |
93 | * | |
94 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
95 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
96 | * used in k. | |
97 | */ | |
98 | static unsigned int | |
99 | nfsd_cache_size_limit(void) | |
100 | { | |
101 | unsigned int limit; | |
ca79b0c2 | 102 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
0338dd15 JL |
103 | |
104 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
105 | return min_t(unsigned int, limit, 256*1024); | |
106 | } | |
107 | ||
0733c7ba JL |
108 | /* |
109 | * Compute the number of hash buckets we need. Divide the max cachesize by | |
110 | * the "target" max bucket size, and round up to next power of two. | |
111 | */ | |
112 | static unsigned int | |
113 | nfsd_hashsize(unsigned int limit) | |
114 | { | |
115 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); | |
116 | } | |
117 | ||
7142b98d TM |
118 | static u32 |
119 | nfsd_cache_hash(__be32 xid) | |
120 | { | |
121 | return hash_32(be32_to_cpu(xid), maskbits); | |
122 | } | |
123 | ||
f09841fd | 124 | static struct svc_cacherep * |
76ecec21 | 125 | nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum) |
1da177e4 LT |
126 | { |
127 | struct svc_cacherep *rp; | |
1da177e4 | 128 | |
f09841fd JL |
129 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
130 | if (rp) { | |
1da177e4 LT |
131 | rp->c_state = RC_UNUSED; |
132 | rp->c_type = RC_NOCACHE; | |
736c6625 | 133 | RB_CLEAR_NODE(&rp->c_node); |
f09841fd | 134 | INIT_LIST_HEAD(&rp->c_lru); |
76ecec21 | 135 | |
ed00c2f6 TM |
136 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
137 | rp->c_key.k_xid = rqstp->rq_xid; | |
138 | rp->c_key.k_proc = rqstp->rq_proc; | |
139 | rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); | |
140 | rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); | |
141 | rp->c_key.k_prot = rqstp->rq_prot; | |
142 | rp->c_key.k_vers = rqstp->rq_vers; | |
143 | rp->c_key.k_len = rqstp->rq_arg.len; | |
144 | rp->c_key.k_csum = csum; | |
1da177e4 | 145 | } |
f09841fd JL |
146 | return rp; |
147 | } | |
1da177e4 | 148 | |
f09841fd | 149 | static void |
736c6625 | 150 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
f09841fd | 151 | { |
6c6910cd JL |
152 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
153 | drc_mem_usage -= rp->c_replvec.iov_len; | |
f09841fd | 154 | kfree(rp->c_replvec.iov_base); |
6c6910cd | 155 | } |
76ecec21 | 156 | if (rp->c_state != RC_UNUSED) { |
736c6625 | 157 | rb_erase(&rp->c_node, &b->rb_head); |
76ecec21 TM |
158 | list_del(&rp->c_lru); |
159 | atomic_dec(&num_drc_entries); | |
160 | drc_mem_usage -= sizeof(*rp); | |
161 | } | |
f09841fd JL |
162 | kmem_cache_free(drc_slab, rp); |
163 | } | |
164 | ||
2c6b691c | 165 | static void |
89a26b3d | 166 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
2c6b691c | 167 | { |
89a26b3d | 168 | spin_lock(&b->cache_lock); |
736c6625 | 169 | nfsd_reply_cache_free_locked(b, rp); |
89a26b3d | 170 | spin_unlock(&b->cache_lock); |
2c6b691c JL |
171 | } |
172 | ||
f09841fd JL |
173 | int nfsd_reply_cache_init(void) |
174 | { | |
0733c7ba | 175 | unsigned int hashsize; |
bedd4b61 | 176 | unsigned int i; |
a68465c9 | 177 | int status = 0; |
0733c7ba | 178 | |
ac534ff2 | 179 | max_drc_entries = nfsd_cache_size_limit(); |
31e60f52 | 180 | atomic_set(&num_drc_entries, 0); |
0733c7ba JL |
181 | hashsize = nfsd_hashsize(max_drc_entries); |
182 | maskbits = ilog2(hashsize); | |
ac534ff2 | 183 | |
a68465c9 KM |
184 | status = register_shrinker(&nfsd_reply_cache_shrinker); |
185 | if (status) | |
186 | return status; | |
187 | ||
8a8bc40d JL |
188 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
189 | 0, 0, NULL); | |
190 | if (!drc_slab) | |
191 | goto out_nomem; | |
192 | ||
7142b98d | 193 | drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL); |
8f97514b | 194 | if (!drc_hashtbl) { |
fad953ce KC |
195 | drc_hashtbl = vzalloc(array_size(hashsize, |
196 | sizeof(*drc_hashtbl))); | |
8f97514b JL |
197 | if (!drc_hashtbl) |
198 | goto out_nomem; | |
199 | } | |
200 | ||
89a26b3d | 201 | for (i = 0; i < hashsize; i++) { |
bedd4b61 | 202 | INIT_LIST_HEAD(&drc_hashtbl[i].lru_head); |
89a26b3d TM |
203 | spin_lock_init(&drc_hashtbl[i].cache_lock); |
204 | } | |
bedd4b61 | 205 | drc_hashsize = hashsize; |
1da177e4 | 206 | |
d5c3428b BF |
207 | return 0; |
208 | out_nomem: | |
209 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
210 | nfsd_reply_cache_shutdown(); | |
211 | return -ENOMEM; | |
1da177e4 LT |
212 | } |
213 | ||
d5c3428b | 214 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
215 | { |
216 | struct svc_cacherep *rp; | |
bedd4b61 | 217 | unsigned int i; |
1da177e4 | 218 | |
b4e7f2c9 | 219 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
aca8a23d | 220 | |
bedd4b61 TM |
221 | for (i = 0; i < drc_hashsize; i++) { |
222 | struct list_head *head = &drc_hashtbl[i].lru_head; | |
223 | while (!list_empty(head)) { | |
224 | rp = list_first_entry(head, struct svc_cacherep, c_lru); | |
736c6625 | 225 | nfsd_reply_cache_free_locked(&drc_hashtbl[i], rp); |
bedd4b61 | 226 | } |
1da177e4 LT |
227 | } |
228 | ||
8f97514b | 229 | kvfree(drc_hashtbl); |
7142b98d | 230 | drc_hashtbl = NULL; |
bedd4b61 | 231 | drc_hashsize = 0; |
8a8bc40d | 232 | |
e79017dd JL |
233 | kmem_cache_destroy(drc_slab); |
234 | drc_slab = NULL; | |
1da177e4 LT |
235 | } |
236 | ||
237 | /* | |
aca8a23d JL |
238 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
239 | * not already scheduled. | |
1da177e4 LT |
240 | */ |
241 | static void | |
bedd4b61 | 242 | lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
1da177e4 | 243 | { |
56c2548b | 244 | rp->c_timestamp = jiffies; |
bedd4b61 | 245 | list_move_tail(&rp->c_lru, &b->lru_head); |
1da177e4 LT |
246 | } |
247 | ||
1ab6c499 | 248 | static long |
bedd4b61 | 249 | prune_bucket(struct nfsd_drc_bucket *b) |
aca8a23d JL |
250 | { |
251 | struct svc_cacherep *rp, *tmp; | |
1ab6c499 | 252 | long freed = 0; |
aca8a23d | 253 | |
bedd4b61 | 254 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
1b19453d JL |
255 | /* |
256 | * Don't free entries attached to calls that are still | |
257 | * in-progress, but do keep scanning the list. | |
258 | */ | |
259 | if (rp->c_state == RC_INPROG) | |
260 | continue; | |
31e60f52 | 261 | if (atomic_read(&num_drc_entries) <= max_drc_entries && |
1b19453d | 262 | time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) |
aca8a23d | 263 | break; |
736c6625 | 264 | nfsd_reply_cache_free_locked(b, rp); |
1ab6c499 | 265 | freed++; |
aca8a23d | 266 | } |
bedd4b61 TM |
267 | return freed; |
268 | } | |
269 | ||
270 | /* | |
271 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
272 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
273 | */ | |
274 | static long | |
275 | prune_cache_entries(void) | |
276 | { | |
277 | unsigned int i; | |
278 | long freed = 0; | |
bedd4b61 TM |
279 | |
280 | for (i = 0; i < drc_hashsize; i++) { | |
281 | struct nfsd_drc_bucket *b = &drc_hashtbl[i]; | |
282 | ||
89a26b3d TM |
283 | if (list_empty(&b->lru_head)) |
284 | continue; | |
285 | spin_lock(&b->cache_lock); | |
bedd4b61 | 286 | freed += prune_bucket(b); |
89a26b3d | 287 | spin_unlock(&b->cache_lock); |
bedd4b61 | 288 | } |
1ab6c499 | 289 | return freed; |
aca8a23d JL |
290 | } |
291 | ||
1ab6c499 DC |
292 | static unsigned long |
293 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) | |
b4e7f2c9 | 294 | { |
31e60f52 | 295 | return atomic_read(&num_drc_entries); |
b4e7f2c9 JL |
296 | } |
297 | ||
1ab6c499 DC |
298 | static unsigned long |
299 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | |
300 | { | |
89a26b3d | 301 | return prune_cache_entries(); |
1ab6c499 | 302 | } |
01a7decf JL |
303 | /* |
304 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
305 | */ | |
306 | static __wsum | |
307 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
308 | { | |
309 | int idx; | |
310 | unsigned int base; | |
311 | __wsum csum; | |
312 | struct xdr_buf *buf = &rqstp->rq_arg; | |
313 | const unsigned char *p = buf->head[0].iov_base; | |
314 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
315 | RC_CSUMLEN); | |
316 | size_t len = min(buf->head[0].iov_len, csum_len); | |
317 | ||
318 | /* rq_arg.head first */ | |
319 | csum = csum_partial(p, len, 0); | |
320 | csum_len -= len; | |
321 | ||
322 | /* Continue into page array */ | |
323 | idx = buf->page_base / PAGE_SIZE; | |
324 | base = buf->page_base & ~PAGE_MASK; | |
325 | while (csum_len) { | |
326 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 327 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
328 | csum = csum_partial(p, len, csum); |
329 | csum_len -= len; | |
330 | base = 0; | |
331 | ++idx; | |
332 | } | |
333 | return csum; | |
334 | } | |
335 | ||
ed00c2f6 TM |
336 | static int |
337 | nfsd_cache_key_cmp(const struct svc_cacherep *key, const struct svc_cacherep *rp) | |
9dc56143 | 338 | { |
ed00c2f6 TM |
339 | if (key->c_key.k_xid == rp->c_key.k_xid && |
340 | key->c_key.k_csum != rp->c_key.k_csum) | |
9dc56143 | 341 | ++payload_misses; |
ef9b16dc | 342 | |
ed00c2f6 | 343 | return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); |
9dc56143 JL |
344 | } |
345 | ||
a4a3ec32 JL |
346 | /* |
347 | * Search the request hash for an entry that matches the given rqstp. | |
348 | * Must be called with cache_lock held. Returns the found entry or | |
76ecec21 | 349 | * inserts an empty key on failure. |
a4a3ec32 JL |
350 | */ |
351 | static struct svc_cacherep * | |
76ecec21 | 352 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key) |
a4a3ec32 | 353 | { |
76ecec21 | 354 | struct svc_cacherep *rp, *ret = key; |
736c6625 TM |
355 | struct rb_node **p = &b->rb_head.rb_node, |
356 | *parent = NULL; | |
98d821bd | 357 | unsigned int entries = 0; |
736c6625 | 358 | int cmp; |
a4a3ec32 | 359 | |
736c6625 | 360 | while (*p != NULL) { |
98d821bd | 361 | ++entries; |
736c6625 TM |
362 | parent = *p; |
363 | rp = rb_entry(parent, struct svc_cacherep, c_node); | |
364 | ||
365 | cmp = nfsd_cache_key_cmp(key, rp); | |
366 | if (cmp < 0) | |
367 | p = &parent->rb_left; | |
368 | else if (cmp > 0) | |
369 | p = &parent->rb_right; | |
370 | else { | |
98d821bd | 371 | ret = rp; |
736c6625 | 372 | goto out; |
98d821bd JL |
373 | } |
374 | } | |
736c6625 TM |
375 | rb_link_node(&key->c_node, parent, p); |
376 | rb_insert_color(&key->c_node, &b->rb_head); | |
377 | out: | |
98d821bd JL |
378 | /* tally hash chain length stats */ |
379 | if (entries > longest_chain) { | |
380 | longest_chain = entries; | |
31e60f52 | 381 | longest_chain_cachesize = atomic_read(&num_drc_entries); |
98d821bd JL |
382 | } else if (entries == longest_chain) { |
383 | /* prefer to keep the smallest cachesize possible here */ | |
31e60f52 TM |
384 | longest_chain_cachesize = min_t(unsigned int, |
385 | longest_chain_cachesize, | |
386 | atomic_read(&num_drc_entries)); | |
a4a3ec32 | 387 | } |
98d821bd | 388 | |
76ecec21 | 389 | lru_put_end(b, ret); |
98d821bd | 390 | return ret; |
a4a3ec32 JL |
391 | } |
392 | ||
1da177e4 LT |
393 | /* |
394 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
395 | * is found, we try to grab the oldest expired entry off the LRU list. If |
396 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
397 | * new one, then search again in case one got inserted while this thread | |
398 | * didn't hold the lock. | |
1da177e4 LT |
399 | */ |
400 | int | |
1091006c | 401 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 402 | { |
0338dd15 | 403 | struct svc_cacherep *rp, *found; |
c7afef1f | 404 | __be32 xid = rqstp->rq_xid; |
01a7decf | 405 | __wsum csum; |
7142b98d TM |
406 | u32 hash = nfsd_cache_hash(xid); |
407 | struct nfsd_drc_bucket *b = &drc_hashtbl[hash]; | |
1091006c | 408 | int type = rqstp->rq_cachetype; |
0b9ea37f | 409 | int rtn = RC_DOIT; |
1da177e4 LT |
410 | |
411 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 412 | if (type == RC_NOCACHE) { |
1da177e4 | 413 | nfsdstats.rcnocache++; |
0b9ea37f | 414 | return rtn; |
1da177e4 LT |
415 | } |
416 | ||
01a7decf JL |
417 | csum = nfsd_cache_csum(rqstp); |
418 | ||
0b9ea37f JL |
419 | /* |
420 | * Since the common case is a cache miss followed by an insert, | |
a0ef5e19 | 421 | * preallocate an entry. |
0b9ea37f | 422 | */ |
76ecec21 TM |
423 | rp = nfsd_reply_cache_alloc(rqstp, csum); |
424 | if (!rp) { | |
425 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
426 | return rtn; | |
6c6910cd | 427 | } |
0338dd15 | 428 | |
76ecec21 TM |
429 | spin_lock(&b->cache_lock); |
430 | found = nfsd_cache_insert(b, rp); | |
431 | if (found != rp) { | |
736c6625 | 432 | nfsd_reply_cache_free_locked(NULL, rp); |
0338dd15 JL |
433 | rp = found; |
434 | goto found_entry; | |
1da177e4 LT |
435 | } |
436 | ||
0338dd15 | 437 | nfsdstats.rcmisses++; |
1da177e4 LT |
438 | rqstp->rq_cacherep = rp; |
439 | rp->c_state = RC_INPROG; | |
76ecec21 TM |
440 | |
441 | atomic_inc(&num_drc_entries); | |
442 | drc_mem_usage += sizeof(*rp); | |
443 | ||
444 | /* go ahead and prune the cache */ | |
445 | prune_bucket(b); | |
1da177e4 | 446 | out: |
89a26b3d | 447 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
448 | return rtn; |
449 | ||
450 | found_entry: | |
451 | /* We found a matching entry which is either in progress or done. */ | |
76ecec21 | 452 | nfsdstats.rchits++; |
1da177e4 | 453 | rtn = RC_DROPIT; |
76ecec21 | 454 | |
7e5d0e0d TM |
455 | /* Request being processed */ |
456 | if (rp->c_state == RC_INPROG) | |
1da177e4 LT |
457 | goto out; |
458 | ||
459 | /* From the hall of fame of impractical attacks: | |
460 | * Is this a user who tries to snoop on the cache? */ | |
461 | rtn = RC_DOIT; | |
4d152e2c | 462 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
1da177e4 LT |
463 | goto out; |
464 | ||
465 | /* Compose RPC reply header */ | |
466 | switch (rp->c_type) { | |
467 | case RC_NOCACHE: | |
468 | break; | |
469 | case RC_REPLSTAT: | |
470 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
471 | rtn = RC_REPLY; | |
472 | break; | |
473 | case RC_REPLBUFF: | |
474 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
475 | goto out; /* should not happen */ | |
476 | rtn = RC_REPLY; | |
477 | break; | |
478 | default: | |
479 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
736c6625 | 480 | nfsd_reply_cache_free_locked(b, rp); |
1da177e4 LT |
481 | } |
482 | ||
483 | goto out; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Update a cache entry. This is called from nfsd_dispatch when | |
488 | * the procedure has been executed and the complete reply is in | |
489 | * rqstp->rq_res. | |
490 | * | |
491 | * We're copying around data here rather than swapping buffers because | |
492 | * the toplevel loop requires max-sized buffers, which would be a waste | |
493 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
494 | * | |
495 | * If we should start to use different types of cache entries tailored | |
496 | * specifically for attrstat and fh's, we may save even more space. | |
497 | * | |
498 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
499 | * nfsd failed to encode a reply that otherwise would have been cached. | |
500 | * In this case, nfsd_cache_update is called with statp == NULL. | |
501 | */ | |
502 | void | |
c7afef1f | 503 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 504 | { |
13cc8a78 | 505 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 | 506 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
bedd4b61 TM |
507 | u32 hash; |
508 | struct nfsd_drc_bucket *b; | |
1da177e4 | 509 | int len; |
6c6910cd | 510 | size_t bufsize = 0; |
1da177e4 | 511 | |
13cc8a78 | 512 | if (!rp) |
1da177e4 LT |
513 | return; |
514 | ||
ed00c2f6 | 515 | hash = nfsd_cache_hash(rp->c_key.k_xid); |
bedd4b61 TM |
516 | b = &drc_hashtbl[hash]; |
517 | ||
1da177e4 LT |
518 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
519 | len >>= 2; | |
fca4217c | 520 | |
1da177e4 LT |
521 | /* Don't cache excessive amounts of data and XDR failures */ |
522 | if (!statp || len > (256 >> 2)) { | |
89a26b3d | 523 | nfsd_reply_cache_free(b, rp); |
1da177e4 LT |
524 | return; |
525 | } | |
526 | ||
527 | switch (cachetype) { | |
528 | case RC_REPLSTAT: | |
529 | if (len != 1) | |
530 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
531 | rp->c_replstat = *statp; | |
532 | break; | |
533 | case RC_REPLBUFF: | |
534 | cachv = &rp->c_replvec; | |
6c6910cd JL |
535 | bufsize = len << 2; |
536 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); | |
1da177e4 | 537 | if (!cachv->iov_base) { |
89a26b3d | 538 | nfsd_reply_cache_free(b, rp); |
1da177e4 LT |
539 | return; |
540 | } | |
6c6910cd JL |
541 | cachv->iov_len = bufsize; |
542 | memcpy(cachv->iov_base, statp, bufsize); | |
1da177e4 | 543 | break; |
2c6b691c | 544 | case RC_NOCACHE: |
89a26b3d | 545 | nfsd_reply_cache_free(b, rp); |
2c6b691c | 546 | return; |
1da177e4 | 547 | } |
89a26b3d | 548 | spin_lock(&b->cache_lock); |
6c6910cd | 549 | drc_mem_usage += bufsize; |
bedd4b61 | 550 | lru_put_end(b, rp); |
4d152e2c | 551 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
1da177e4 LT |
552 | rp->c_type = cachetype; |
553 | rp->c_state = RC_DONE; | |
89a26b3d | 554 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
555 | return; |
556 | } | |
557 | ||
558 | /* | |
559 | * Copy cached reply to current reply buffer. Should always fit. | |
560 | * FIXME as reply is in a page, we should just attach the page, and | |
561 | * keep a refcount.... | |
562 | */ | |
563 | static int | |
564 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
565 | { | |
566 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
567 | ||
568 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
5b5e0928 | 569 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", |
1da177e4 LT |
570 | data->iov_len); |
571 | return 0; | |
572 | } | |
573 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
574 | vec->iov_len += data->iov_len; | |
575 | return 1; | |
576 | } | |
a2f999a3 JL |
577 | |
578 | /* | |
579 | * Note that fields may be added, removed or reordered in the future. Programs | |
580 | * scraping this file for info should test the labels to ensure they're | |
581 | * getting the correct field. | |
582 | */ | |
583 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) | |
584 | { | |
a2f999a3 | 585 | seq_printf(m, "max entries: %u\n", max_drc_entries); |
31e60f52 TM |
586 | seq_printf(m, "num entries: %u\n", |
587 | atomic_read(&num_drc_entries)); | |
0733c7ba | 588 | seq_printf(m, "hash buckets: %u\n", 1 << maskbits); |
a2f999a3 JL |
589 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); |
590 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); | |
591 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); | |
592 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); | |
593 | seq_printf(m, "payload misses: %u\n", payload_misses); | |
98d821bd JL |
594 | seq_printf(m, "longest chain len: %u\n", longest_chain); |
595 | seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); | |
a2f999a3 JL |
596 | return 0; |
597 | } | |
598 | ||
599 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) | |
600 | { | |
601 | return single_open(file, nfsd_reply_cache_stats_show, NULL); | |
602 | } |