2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sunrpc/addr.h>
14 #include <linux/highmem.h>
15 #include <linux/log2.h>
16 #include <linux/hash.h>
17 #include <net/checksum.h>
22 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
25 * We use this value to determine the number of hash buckets from the max
26 * cache size, the idea being that when the cache is at its maximum number
27 * of entries, then this should be the average number of entries per bucket.
29 #define TARGET_BUCKET_SIZE 64
31 struct nfsd_drc_bucket {
32 struct list_head lru_head;
33 spinlock_t cache_lock;
36 static struct nfsd_drc_bucket *drc_hashtbl;
37 static struct kmem_cache *drc_slab;
39 /* max number of entries allowed in the cache */
40 static unsigned int max_drc_entries;
42 /* number of significant bits in the hash value */
43 static unsigned int maskbits;
44 static unsigned int drc_hashsize;
47 * Stats and other tracking of on the duplicate reply cache. All of these and
48 * the "rc" fields in nfsdstats are protected by the cache_lock
51 /* total number of entries */
52 static atomic_t num_drc_entries;
54 /* cache misses due only to checksum comparison failures */
55 static unsigned int payload_misses;
57 /* amount of memory (in bytes) currently consumed by the DRC */
58 static unsigned int drc_mem_usage;
60 /* longest hash chain seen */
61 static unsigned int longest_chain;
63 /* size of cache when we saw the longest hash chain */
64 static unsigned int longest_chain_cachesize;
66 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
67 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
68 struct shrink_control *sc);
69 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
70 struct shrink_control *sc);
72 static struct shrinker nfsd_reply_cache_shrinker = {
73 .scan_objects = nfsd_reply_cache_scan,
74 .count_objects = nfsd_reply_cache_count,
79 * Put a cap on the size of the DRC based on the amount of available
80 * low memory in the machine.
92 * ...with a hard cap of 256k entries. In the worst case, each entry will be
93 * ~1k, so the above numbers should give a rough max of the amount of memory
97 nfsd_cache_size_limit(void)
100 unsigned long low_pages = totalram_pages - totalhigh_pages;
102 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
103 return min_t(unsigned int, limit, 256*1024);
107 * Compute the number of hash buckets we need. Divide the max cachesize by
108 * the "target" max bucket size, and round up to next power of two.
111 nfsd_hashsize(unsigned int limit)
113 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117 nfsd_cache_hash(__be32 xid)
119 return hash_32(be32_to_cpu(xid), maskbits);
122 static struct svc_cacherep *
123 nfsd_reply_cache_alloc(void)
125 struct svc_cacherep *rp;
127 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
129 rp->c_state = RC_UNUSED;
130 rp->c_type = RC_NOCACHE;
131 INIT_LIST_HEAD(&rp->c_lru);
137 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
140 drc_mem_usage -= rp->c_replvec.iov_len;
141 kfree(rp->c_replvec.iov_base);
143 list_del(&rp->c_lru);
144 atomic_dec(&num_drc_entries);
145 drc_mem_usage -= sizeof(*rp);
146 kmem_cache_free(drc_slab, rp);
150 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
152 spin_lock(&b->cache_lock);
153 nfsd_reply_cache_free_locked(rp);
154 spin_unlock(&b->cache_lock);
157 int nfsd_reply_cache_init(void)
159 unsigned int hashsize;
163 max_drc_entries = nfsd_cache_size_limit();
164 atomic_set(&num_drc_entries, 0);
165 hashsize = nfsd_hashsize(max_drc_entries);
166 maskbits = ilog2(hashsize);
168 status = register_shrinker(&nfsd_reply_cache_shrinker);
172 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
177 drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
179 drc_hashtbl = vzalloc(hashsize * sizeof(*drc_hashtbl));
184 for (i = 0; i < hashsize; i++) {
185 INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
186 spin_lock_init(&drc_hashtbl[i].cache_lock);
188 drc_hashsize = hashsize;
192 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
193 nfsd_reply_cache_shutdown();
197 void nfsd_reply_cache_shutdown(void)
199 struct svc_cacherep *rp;
202 unregister_shrinker(&nfsd_reply_cache_shrinker);
204 for (i = 0; i < drc_hashsize; i++) {
205 struct list_head *head = &drc_hashtbl[i].lru_head;
206 while (!list_empty(head)) {
207 rp = list_first_entry(head, struct svc_cacherep, c_lru);
208 nfsd_reply_cache_free_locked(rp);
216 kmem_cache_destroy(drc_slab);
221 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
222 * not already scheduled.
225 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
227 rp->c_timestamp = jiffies;
228 list_move_tail(&rp->c_lru, &b->lru_head);
232 prune_bucket(struct nfsd_drc_bucket *b)
234 struct svc_cacherep *rp, *tmp;
237 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
239 * Don't free entries attached to calls that are still
240 * in-progress, but do keep scanning the list.
242 if (rp->c_state == RC_INPROG)
244 if (atomic_read(&num_drc_entries) <= max_drc_entries &&
245 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
247 nfsd_reply_cache_free_locked(rp);
254 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
255 * Also prune the oldest ones when the total exceeds the max number of entries.
258 prune_cache_entries(void)
263 for (i = 0; i < drc_hashsize; i++) {
264 struct nfsd_drc_bucket *b = &drc_hashtbl[i];
266 if (list_empty(&b->lru_head))
268 spin_lock(&b->cache_lock);
269 freed += prune_bucket(b);
270 spin_unlock(&b->cache_lock);
276 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
278 return atomic_read(&num_drc_entries);
282 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
284 return prune_cache_entries();
287 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
290 nfsd_cache_csum(struct svc_rqst *rqstp)
295 struct xdr_buf *buf = &rqstp->rq_arg;
296 const unsigned char *p = buf->head[0].iov_base;
297 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
299 size_t len = min(buf->head[0].iov_len, csum_len);
301 /* rq_arg.head first */
302 csum = csum_partial(p, len, 0);
305 /* Continue into page array */
306 idx = buf->page_base / PAGE_SIZE;
307 base = buf->page_base & ~PAGE_MASK;
309 p = page_address(buf->pages[idx]) + base;
310 len = min_t(size_t, PAGE_SIZE - base, csum_len);
311 csum = csum_partial(p, len, csum);
320 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
322 /* Check RPC XID first */
323 if (rqstp->rq_xid != rp->c_xid)
325 /* compare checksum of NFS data */
326 if (csum != rp->c_csum) {
331 /* Other discriminators */
332 if (rqstp->rq_proc != rp->c_proc ||
333 rqstp->rq_prot != rp->c_prot ||
334 rqstp->rq_vers != rp->c_vers ||
335 rqstp->rq_arg.len != rp->c_len ||
336 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
337 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
344 * Search the request hash for an entry that matches the given rqstp.
345 * Must be called with cache_lock held. Returns the found entry or
348 static struct svc_cacherep *
349 nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
352 struct svc_cacherep *rp, *ret = NULL;
353 struct list_head *rh = &b->lru_head;
354 unsigned int entries = 0;
356 list_for_each_entry(rp, rh, c_lru) {
358 if (nfsd_cache_match(rqstp, csum, rp)) {
364 /* tally hash chain length stats */
365 if (entries > longest_chain) {
366 longest_chain = entries;
367 longest_chain_cachesize = atomic_read(&num_drc_entries);
368 } else if (entries == longest_chain) {
369 /* prefer to keep the smallest cachesize possible here */
370 longest_chain_cachesize = min_t(unsigned int,
371 longest_chain_cachesize,
372 atomic_read(&num_drc_entries));
379 * Try to find an entry matching the current call in the cache. When none
380 * is found, we try to grab the oldest expired entry off the LRU list. If
381 * a suitable one isn't there, then drop the cache_lock and allocate a
382 * new one, then search again in case one got inserted while this thread
383 * didn't hold the lock.
386 nfsd_cache_lookup(struct svc_rqst *rqstp)
388 struct svc_cacherep *rp, *found;
389 __be32 xid = rqstp->rq_xid;
390 u32 proto = rqstp->rq_prot,
391 vers = rqstp->rq_vers,
392 proc = rqstp->rq_proc;
394 u32 hash = nfsd_cache_hash(xid);
395 struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
397 int type = rqstp->rq_cachetype;
400 rqstp->rq_cacherep = NULL;
401 if (type == RC_NOCACHE) {
402 nfsdstats.rcnocache++;
406 csum = nfsd_cache_csum(rqstp);
409 * Since the common case is a cache miss followed by an insert,
410 * preallocate an entry.
412 rp = nfsd_reply_cache_alloc();
413 spin_lock(&b->cache_lock);
415 atomic_inc(&num_drc_entries);
416 drc_mem_usage += sizeof(*rp);
419 /* go ahead and prune the cache */
422 found = nfsd_cache_search(b, rqstp, csum);
425 nfsd_reply_cache_free_locked(rp);
431 dprintk("nfsd: unable to allocate DRC entry!\n");
435 nfsdstats.rcmisses++;
436 rqstp->rq_cacherep = rp;
437 rp->c_state = RC_INPROG;
440 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
441 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
444 rp->c_len = rqstp->rq_arg.len;
449 /* release any buffer */
450 if (rp->c_type == RC_REPLBUFF) {
451 drc_mem_usage -= rp->c_replvec.iov_len;
452 kfree(rp->c_replvec.iov_base);
453 rp->c_replvec.iov_base = NULL;
455 rp->c_type = RC_NOCACHE;
457 spin_unlock(&b->cache_lock);
462 /* We found a matching entry which is either in progress or done. */
463 age = jiffies - rp->c_timestamp;
467 /* Request being processed or excessive rexmits */
468 if (rp->c_state == RC_INPROG || age < RC_DELAY)
471 /* From the hall of fame of impractical attacks:
472 * Is this a user who tries to snoop on the cache? */
474 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
477 /* Compose RPC reply header */
478 switch (rp->c_type) {
482 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
486 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
487 goto out; /* should not happen */
491 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
492 nfsd_reply_cache_free_locked(rp);
499 * Update a cache entry. This is called from nfsd_dispatch when
500 * the procedure has been executed and the complete reply is in
503 * We're copying around data here rather than swapping buffers because
504 * the toplevel loop requires max-sized buffers, which would be a waste
505 * of memory for a cache with a max reply size of 100 bytes (diropokres).
507 * If we should start to use different types of cache entries tailored
508 * specifically for attrstat and fh's, we may save even more space.
510 * Also note that a cachetype of RC_NOCACHE can legally be passed when
511 * nfsd failed to encode a reply that otherwise would have been cached.
512 * In this case, nfsd_cache_update is called with statp == NULL.
515 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
517 struct svc_cacherep *rp = rqstp->rq_cacherep;
518 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
520 struct nfsd_drc_bucket *b;
527 hash = nfsd_cache_hash(rp->c_xid);
528 b = &drc_hashtbl[hash];
530 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
533 /* Don't cache excessive amounts of data and XDR failures */
534 if (!statp || len > (256 >> 2)) {
535 nfsd_reply_cache_free(b, rp);
542 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
543 rp->c_replstat = *statp;
546 cachv = &rp->c_replvec;
548 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
549 if (!cachv->iov_base) {
550 nfsd_reply_cache_free(b, rp);
553 cachv->iov_len = bufsize;
554 memcpy(cachv->iov_base, statp, bufsize);
557 nfsd_reply_cache_free(b, rp);
560 spin_lock(&b->cache_lock);
561 drc_mem_usage += bufsize;
563 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
564 rp->c_type = cachetype;
565 rp->c_state = RC_DONE;
566 spin_unlock(&b->cache_lock);
571 * Copy cached reply to current reply buffer. Should always fit.
572 * FIXME as reply is in a page, we should just attach the page, and
573 * keep a refcount....
576 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
578 struct kvec *vec = &rqstp->rq_res.head[0];
580 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
581 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
585 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
586 vec->iov_len += data->iov_len;
591 * Note that fields may be added, removed or reordered in the future. Programs
592 * scraping this file for info should test the labels to ensure they're
593 * getting the correct field.
595 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
597 seq_printf(m, "max entries: %u\n", max_drc_entries);
598 seq_printf(m, "num entries: %u\n",
599 atomic_read(&num_drc_entries));
600 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
601 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
602 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
603 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
604 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
605 seq_printf(m, "payload misses: %u\n", payload_misses);
606 seq_printf(m, "longest chain len: %u\n", longest_chain);
607 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
611 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
613 return single_open(file, nfsd_reply_cache_stats_show, NULL);