]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfsd/nfscache.c | |
3 | * | |
4 | * Request reply cache. This is currently a global cache, but this may | |
5 | * change in the future and be a per-client cache. | |
6 | * | |
7 | * This code is heavily inspired by the 44BSD implementation, although | |
8 | * it does things a bit differently. | |
9 | * | |
10 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/time.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/list.h> | |
19 | ||
20 | #include <linux/sunrpc/svc.h> | |
21 | #include <linux/nfsd/nfsd.h> | |
22 | #include <linux/nfsd/cache.h> | |
23 | ||
24 | /* Size of reply cache. Common values are: | |
25 | * 4.3BSD: 128 | |
26 | * 4.4BSD: 256 | |
27 | * Solaris2: 1024 | |
28 | * DEC Unix: 512-4096 | |
29 | */ | |
30 | #define CACHESIZE 1024 | |
31 | #define HASHSIZE 64 | |
32 | #define REQHASH(xid) ((((xid) >> 24) ^ (xid)) & (HASHSIZE-1)) | |
33 | ||
34 | static struct hlist_head * hash_list; | |
35 | static struct list_head lru_head; | |
36 | static int cache_disabled = 1; | |
37 | ||
38 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); | |
39 | ||
40 | /* | |
41 | * locking for the reply cache: | |
42 | * A cache entry is "single use" if c_state == RC_INPROG | |
43 | * Otherwise, it when accessing _prev or _next, the lock must be held. | |
44 | */ | |
45 | static DEFINE_SPINLOCK(cache_lock); | |
46 | ||
47 | void | |
48 | nfsd_cache_init(void) | |
49 | { | |
50 | struct svc_cacherep *rp; | |
51 | int i; | |
52 | ||
53 | INIT_LIST_HEAD(&lru_head); | |
54 | i = CACHESIZE; | |
55 | while(i) { | |
56 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); | |
57 | if (!rp) break; | |
58 | list_add(&rp->c_lru, &lru_head); | |
59 | rp->c_state = RC_UNUSED; | |
60 | rp->c_type = RC_NOCACHE; | |
61 | INIT_HLIST_NODE(&rp->c_hash); | |
62 | i--; | |
63 | } | |
64 | ||
65 | if (i) | |
66 | printk (KERN_ERR "nfsd: cannot allocate all %d cache entries, only got %d\n", | |
67 | CACHESIZE, CACHESIZE-i); | |
68 | ||
69 | hash_list = kmalloc (HASHSIZE * sizeof(struct hlist_head), GFP_KERNEL); | |
70 | if (!hash_list) { | |
71 | nfsd_cache_shutdown(); | |
72 | printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n", | |
73 | HASHSIZE * sizeof(struct hlist_head)); | |
74 | return; | |
75 | } | |
76 | memset(hash_list, 0, HASHSIZE * sizeof(struct hlist_head)); | |
77 | ||
78 | cache_disabled = 0; | |
79 | } | |
80 | ||
81 | void | |
82 | nfsd_cache_shutdown(void) | |
83 | { | |
84 | struct svc_cacherep *rp; | |
85 | ||
86 | while (!list_empty(&lru_head)) { | |
87 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); | |
88 | if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF) | |
89 | kfree(rp->c_replvec.iov_base); | |
90 | list_del(&rp->c_lru); | |
91 | kfree(rp); | |
92 | } | |
93 | ||
94 | cache_disabled = 1; | |
95 | ||
f99d49ad | 96 | kfree (hash_list); |
1da177e4 LT |
97 | hash_list = NULL; |
98 | } | |
99 | ||
100 | /* | |
101 | * Move cache entry to end of LRU list | |
102 | */ | |
103 | static void | |
104 | lru_put_end(struct svc_cacherep *rp) | |
105 | { | |
106 | list_del(&rp->c_lru); | |
107 | list_add_tail(&rp->c_lru, &lru_head); | |
108 | } | |
109 | ||
110 | /* | |
111 | * Move a cache entry from one hash list to another | |
112 | */ | |
113 | static void | |
114 | hash_refile(struct svc_cacherep *rp) | |
115 | { | |
116 | hlist_del_init(&rp->c_hash); | |
117 | hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid)); | |
118 | } | |
119 | ||
120 | /* | |
121 | * Try to find an entry matching the current call in the cache. When none | |
122 | * is found, we grab the oldest unlocked entry off the LRU list. | |
123 | * Note that no operation within the loop may sleep. | |
124 | */ | |
125 | int | |
126 | nfsd_cache_lookup(struct svc_rqst *rqstp, int type) | |
127 | { | |
128 | struct hlist_node *hn; | |
129 | struct hlist_head *rh; | |
130 | struct svc_cacherep *rp; | |
131 | u32 xid = rqstp->rq_xid, | |
132 | proto = rqstp->rq_prot, | |
133 | vers = rqstp->rq_vers, | |
134 | proc = rqstp->rq_proc; | |
135 | unsigned long age; | |
136 | int rtn; | |
137 | ||
138 | rqstp->rq_cacherep = NULL; | |
139 | if (cache_disabled || type == RC_NOCACHE) { | |
140 | nfsdstats.rcnocache++; | |
141 | return RC_DOIT; | |
142 | } | |
143 | ||
144 | spin_lock(&cache_lock); | |
145 | rtn = RC_DOIT; | |
146 | ||
147 | rh = &hash_list[REQHASH(xid)]; | |
148 | hlist_for_each_entry(rp, hn, rh, c_hash) { | |
149 | if (rp->c_state != RC_UNUSED && | |
150 | xid == rp->c_xid && proc == rp->c_proc && | |
151 | proto == rp->c_prot && vers == rp->c_vers && | |
152 | time_before(jiffies, rp->c_timestamp + 120*HZ) && | |
153 | memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) { | |
154 | nfsdstats.rchits++; | |
155 | goto found_entry; | |
156 | } | |
157 | } | |
158 | nfsdstats.rcmisses++; | |
159 | ||
160 | /* This loop shouldn't take more than a few iterations normally */ | |
161 | { | |
162 | int safe = 0; | |
163 | list_for_each_entry(rp, &lru_head, c_lru) { | |
164 | if (rp->c_state != RC_INPROG) | |
165 | break; | |
166 | if (safe++ > CACHESIZE) { | |
167 | printk("nfsd: loop in repcache LRU list\n"); | |
168 | cache_disabled = 1; | |
169 | goto out; | |
170 | } | |
171 | } | |
172 | } | |
173 | ||
174 | /* This should not happen */ | |
175 | if (rp == NULL) { | |
176 | static int complaints; | |
177 | ||
178 | printk(KERN_WARNING "nfsd: all repcache entries locked!\n"); | |
179 | if (++complaints > 5) { | |
180 | printk(KERN_WARNING "nfsd: disabling repcache.\n"); | |
181 | cache_disabled = 1; | |
182 | } | |
183 | goto out; | |
184 | } | |
185 | ||
186 | rqstp->rq_cacherep = rp; | |
187 | rp->c_state = RC_INPROG; | |
188 | rp->c_xid = xid; | |
189 | rp->c_proc = proc; | |
190 | rp->c_addr = rqstp->rq_addr; | |
191 | rp->c_prot = proto; | |
192 | rp->c_vers = vers; | |
193 | rp->c_timestamp = jiffies; | |
194 | ||
195 | hash_refile(rp); | |
196 | ||
197 | /* release any buffer */ | |
198 | if (rp->c_type == RC_REPLBUFF) { | |
199 | kfree(rp->c_replvec.iov_base); | |
200 | rp->c_replvec.iov_base = NULL; | |
201 | } | |
202 | rp->c_type = RC_NOCACHE; | |
203 | out: | |
204 | spin_unlock(&cache_lock); | |
205 | return rtn; | |
206 | ||
207 | found_entry: | |
208 | /* We found a matching entry which is either in progress or done. */ | |
209 | age = jiffies - rp->c_timestamp; | |
210 | rp->c_timestamp = jiffies; | |
211 | lru_put_end(rp); | |
212 | ||
213 | rtn = RC_DROPIT; | |
214 | /* Request being processed or excessive rexmits */ | |
215 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
216 | goto out; | |
217 | ||
218 | /* From the hall of fame of impractical attacks: | |
219 | * Is this a user who tries to snoop on the cache? */ | |
220 | rtn = RC_DOIT; | |
221 | if (!rqstp->rq_secure && rp->c_secure) | |
222 | goto out; | |
223 | ||
224 | /* Compose RPC reply header */ | |
225 | switch (rp->c_type) { | |
226 | case RC_NOCACHE: | |
227 | break; | |
228 | case RC_REPLSTAT: | |
229 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
230 | rtn = RC_REPLY; | |
231 | break; | |
232 | case RC_REPLBUFF: | |
233 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
234 | goto out; /* should not happen */ | |
235 | rtn = RC_REPLY; | |
236 | break; | |
237 | default: | |
238 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
239 | rp->c_state = RC_UNUSED; | |
240 | } | |
241 | ||
242 | goto out; | |
243 | } | |
244 | ||
245 | /* | |
246 | * Update a cache entry. This is called from nfsd_dispatch when | |
247 | * the procedure has been executed and the complete reply is in | |
248 | * rqstp->rq_res. | |
249 | * | |
250 | * We're copying around data here rather than swapping buffers because | |
251 | * the toplevel loop requires max-sized buffers, which would be a waste | |
252 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
253 | * | |
254 | * If we should start to use different types of cache entries tailored | |
255 | * specifically for attrstat and fh's, we may save even more space. | |
256 | * | |
257 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
258 | * nfsd failed to encode a reply that otherwise would have been cached. | |
259 | * In this case, nfsd_cache_update is called with statp == NULL. | |
260 | */ | |
261 | void | |
262 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp) | |
263 | { | |
264 | struct svc_cacherep *rp; | |
265 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; | |
266 | int len; | |
267 | ||
268 | if (!(rp = rqstp->rq_cacherep) || cache_disabled) | |
269 | return; | |
270 | ||
271 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); | |
272 | len >>= 2; | |
273 | ||
274 | /* Don't cache excessive amounts of data and XDR failures */ | |
275 | if (!statp || len > (256 >> 2)) { | |
276 | rp->c_state = RC_UNUSED; | |
277 | return; | |
278 | } | |
279 | ||
280 | switch (cachetype) { | |
281 | case RC_REPLSTAT: | |
282 | if (len != 1) | |
283 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
284 | rp->c_replstat = *statp; | |
285 | break; | |
286 | case RC_REPLBUFF: | |
287 | cachv = &rp->c_replvec; | |
288 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); | |
289 | if (!cachv->iov_base) { | |
290 | spin_lock(&cache_lock); | |
291 | rp->c_state = RC_UNUSED; | |
292 | spin_unlock(&cache_lock); | |
293 | return; | |
294 | } | |
295 | cachv->iov_len = len << 2; | |
296 | memcpy(cachv->iov_base, statp, len << 2); | |
297 | break; | |
298 | } | |
299 | spin_lock(&cache_lock); | |
300 | lru_put_end(rp); | |
301 | rp->c_secure = rqstp->rq_secure; | |
302 | rp->c_type = cachetype; | |
303 | rp->c_state = RC_DONE; | |
304 | rp->c_timestamp = jiffies; | |
305 | spin_unlock(&cache_lock); | |
306 | return; | |
307 | } | |
308 | ||
309 | /* | |
310 | * Copy cached reply to current reply buffer. Should always fit. | |
311 | * FIXME as reply is in a page, we should just attach the page, and | |
312 | * keep a refcount.... | |
313 | */ | |
314 | static int | |
315 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
316 | { | |
317 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
318 | ||
319 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
320 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", | |
321 | data->iov_len); | |
322 | return 0; | |
323 | } | |
324 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
325 | vec->iov_len += data->iov_len; | |
326 | return 1; | |
327 | } |