]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/dcache.c | |
3 | * | |
4 | * Complete reimplementation | |
5 | * (C) 1997 Thomas Schoebel-Theuer, | |
6 | * with heavy changes by Linus Torvalds | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Notes on the allocation strategy: | |
11 | * | |
12 | * The dcache is a master of the icache - whenever a dcache entry | |
13 | * exists, the inode will always exist. "iput()" is done either when | |
14 | * the dcache entry is deleted or garbage collected. | |
15 | */ | |
16 | ||
1da177e4 LT |
17 | #include <linux/syscalls.h> |
18 | #include <linux/string.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/fs.h> | |
7a91bf7f | 21 | #include <linux/fsnotify.h> |
1da177e4 LT |
22 | #include <linux/slab.h> |
23 | #include <linux/init.h> | |
1da177e4 LT |
24 | #include <linux/hash.h> |
25 | #include <linux/cache.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/mount.h> | |
28 | #include <linux/file.h> | |
29 | #include <asm/uaccess.h> | |
30 | #include <linux/security.h> | |
31 | #include <linux/seqlock.h> | |
32 | #include <linux/swap.h> | |
33 | #include <linux/bootmem.h> | |
07f3f05c | 34 | #include "internal.h" |
1da177e4 | 35 | |
1da177e4 | 36 | |
fa3536cc | 37 | int sysctl_vfs_cache_pressure __read_mostly = 100; |
1da177e4 LT |
38 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); |
39 | ||
40 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); | |
e4d91918 | 41 | static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); |
1da177e4 LT |
42 | |
43 | EXPORT_SYMBOL(dcache_lock); | |
44 | ||
e18b890b | 45 | static struct kmem_cache *dentry_cache __read_mostly; |
1da177e4 LT |
46 | |
47 | #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) | |
48 | ||
49 | /* | |
50 | * This is the single most critical data structure when it comes | |
51 | * to the dcache: the hashtable for lookups. Somebody should try | |
52 | * to make this good - I've just made it work. | |
53 | * | |
54 | * This hash-function tries to avoid losing too many bits of hash | |
55 | * information, yet avoid using a prime hash-size or similar. | |
56 | */ | |
57 | #define D_HASHBITS d_hash_shift | |
58 | #define D_HASHMASK d_hash_mask | |
59 | ||
fa3536cc ED |
60 | static unsigned int d_hash_mask __read_mostly; |
61 | static unsigned int d_hash_shift __read_mostly; | |
62 | static struct hlist_head *dentry_hashtable __read_mostly; | |
1da177e4 LT |
63 | static LIST_HEAD(dentry_unused); |
64 | ||
65 | /* Statistics gathering. */ | |
66 | struct dentry_stat_t dentry_stat = { | |
67 | .age_limit = 45, | |
68 | }; | |
69 | ||
b3423415 | 70 | static void __d_free(struct dentry *dentry) |
1da177e4 | 71 | { |
1da177e4 LT |
72 | if (dname_external(dentry)) |
73 | kfree(dentry->d_name.name); | |
74 | kmem_cache_free(dentry_cache, dentry); | |
75 | } | |
76 | ||
b3423415 ED |
77 | static void d_callback(struct rcu_head *head) |
78 | { | |
79 | struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); | |
80 | __d_free(dentry); | |
81 | } | |
82 | ||
1da177e4 LT |
83 | /* |
84 | * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry | |
85 | * inside dcache_lock. | |
86 | */ | |
87 | static void d_free(struct dentry *dentry) | |
88 | { | |
89 | if (dentry->d_op && dentry->d_op->d_release) | |
90 | dentry->d_op->d_release(dentry); | |
b3423415 ED |
91 | /* if dentry was never inserted into hash, immediate free is OK */ |
92 | if (dentry->d_hash.pprev == NULL) | |
93 | __d_free(dentry); | |
94 | else | |
95 | call_rcu(&dentry->d_u.d_rcu, d_callback); | |
1da177e4 LT |
96 | } |
97 | ||
98 | /* | |
99 | * Release the dentry's inode, using the filesystem | |
100 | * d_iput() operation if defined. | |
101 | * Called with dcache_lock and per dentry lock held, drops both. | |
102 | */ | |
858119e1 | 103 | static void dentry_iput(struct dentry * dentry) |
1da177e4 LT |
104 | { |
105 | struct inode *inode = dentry->d_inode; | |
106 | if (inode) { | |
107 | dentry->d_inode = NULL; | |
108 | list_del_init(&dentry->d_alias); | |
109 | spin_unlock(&dentry->d_lock); | |
110 | spin_unlock(&dcache_lock); | |
f805fbda LT |
111 | if (!inode->i_nlink) |
112 | fsnotify_inoderemove(inode); | |
1da177e4 LT |
113 | if (dentry->d_op && dentry->d_op->d_iput) |
114 | dentry->d_op->d_iput(dentry, inode); | |
115 | else | |
116 | iput(inode); | |
117 | } else { | |
118 | spin_unlock(&dentry->d_lock); | |
119 | spin_unlock(&dcache_lock); | |
120 | } | |
121 | } | |
122 | ||
d52b9086 MS |
123 | /** |
124 | * d_kill - kill dentry and return parent | |
125 | * @dentry: dentry to kill | |
126 | * | |
127 | * Called with dcache_lock and d_lock, releases both. The dentry must | |
128 | * already be unhashed and removed from the LRU. | |
129 | * | |
130 | * If this is the root of the dentry tree, return NULL. | |
131 | */ | |
132 | static struct dentry *d_kill(struct dentry *dentry) | |
133 | { | |
134 | struct dentry *parent; | |
135 | ||
136 | list_del(&dentry->d_u.d_child); | |
137 | dentry_stat.nr_dentry--; /* For d_free, below */ | |
138 | /*drops the locks, at that point nobody can reach this dentry */ | |
139 | dentry_iput(dentry); | |
140 | parent = dentry->d_parent; | |
141 | d_free(dentry); | |
142 | return dentry == parent ? NULL : parent; | |
143 | } | |
144 | ||
1da177e4 LT |
145 | /* |
146 | * This is dput | |
147 | * | |
148 | * This is complicated by the fact that we do not want to put | |
149 | * dentries that are no longer on any hash chain on the unused | |
150 | * list: we'd much rather just get rid of them immediately. | |
151 | * | |
152 | * However, that implies that we have to traverse the dentry | |
153 | * tree upwards to the parents which might _also_ now be | |
154 | * scheduled for deletion (it may have been only waiting for | |
155 | * its last child to go away). | |
156 | * | |
157 | * This tail recursion is done by hand as we don't want to depend | |
158 | * on the compiler to always get this right (gcc generally doesn't). | |
159 | * Real recursion would eat up our stack space. | |
160 | */ | |
161 | ||
162 | /* | |
163 | * dput - release a dentry | |
164 | * @dentry: dentry to release | |
165 | * | |
166 | * Release a dentry. This will drop the usage count and if appropriate | |
167 | * call the dentry unlink method as well as removing it from the queues and | |
168 | * releasing its resources. If the parent dentries were scheduled for release | |
169 | * they too may now get deleted. | |
170 | * | |
171 | * no dcache lock, please. | |
172 | */ | |
173 | ||
174 | void dput(struct dentry *dentry) | |
175 | { | |
176 | if (!dentry) | |
177 | return; | |
178 | ||
179 | repeat: | |
180 | if (atomic_read(&dentry->d_count) == 1) | |
181 | might_sleep(); | |
182 | if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) | |
183 | return; | |
184 | ||
185 | spin_lock(&dentry->d_lock); | |
186 | if (atomic_read(&dentry->d_count)) { | |
187 | spin_unlock(&dentry->d_lock); | |
188 | spin_unlock(&dcache_lock); | |
189 | return; | |
190 | } | |
191 | ||
192 | /* | |
193 | * AV: ->d_delete() is _NOT_ allowed to block now. | |
194 | */ | |
195 | if (dentry->d_op && dentry->d_op->d_delete) { | |
196 | if (dentry->d_op->d_delete(dentry)) | |
197 | goto unhash_it; | |
198 | } | |
199 | /* Unreachable? Get rid of it */ | |
200 | if (d_unhashed(dentry)) | |
201 | goto kill_it; | |
202 | if (list_empty(&dentry->d_lru)) { | |
203 | dentry->d_flags |= DCACHE_REFERENCED; | |
204 | list_add(&dentry->d_lru, &dentry_unused); | |
205 | dentry_stat.nr_unused++; | |
206 | } | |
207 | spin_unlock(&dentry->d_lock); | |
208 | spin_unlock(&dcache_lock); | |
209 | return; | |
210 | ||
211 | unhash_it: | |
212 | __d_drop(dentry); | |
d52b9086 MS |
213 | kill_it: |
214 | /* If dentry was on d_lru list | |
215 | * delete it from there | |
216 | */ | |
217 | if (!list_empty(&dentry->d_lru)) { | |
218 | list_del(&dentry->d_lru); | |
219 | dentry_stat.nr_unused--; | |
1da177e4 | 220 | } |
d52b9086 MS |
221 | dentry = d_kill(dentry); |
222 | if (dentry) | |
223 | goto repeat; | |
1da177e4 LT |
224 | } |
225 | ||
226 | /** | |
227 | * d_invalidate - invalidate a dentry | |
228 | * @dentry: dentry to invalidate | |
229 | * | |
230 | * Try to invalidate the dentry if it turns out to be | |
231 | * possible. If there are other dentries that can be | |
232 | * reached through this one we can't delete it and we | |
233 | * return -EBUSY. On success we return 0. | |
234 | * | |
235 | * no dcache lock. | |
236 | */ | |
237 | ||
238 | int d_invalidate(struct dentry * dentry) | |
239 | { | |
240 | /* | |
241 | * If it's already been dropped, return OK. | |
242 | */ | |
243 | spin_lock(&dcache_lock); | |
244 | if (d_unhashed(dentry)) { | |
245 | spin_unlock(&dcache_lock); | |
246 | return 0; | |
247 | } | |
248 | /* | |
249 | * Check whether to do a partial shrink_dcache | |
250 | * to get rid of unused child entries. | |
251 | */ | |
252 | if (!list_empty(&dentry->d_subdirs)) { | |
253 | spin_unlock(&dcache_lock); | |
254 | shrink_dcache_parent(dentry); | |
255 | spin_lock(&dcache_lock); | |
256 | } | |
257 | ||
258 | /* | |
259 | * Somebody else still using it? | |
260 | * | |
261 | * If it's a directory, we can't drop it | |
262 | * for fear of somebody re-populating it | |
263 | * with children (even though dropping it | |
264 | * would make it unreachable from the root, | |
265 | * we might still populate it if it was a | |
266 | * working directory or similar). | |
267 | */ | |
268 | spin_lock(&dentry->d_lock); | |
269 | if (atomic_read(&dentry->d_count) > 1) { | |
270 | if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { | |
271 | spin_unlock(&dentry->d_lock); | |
272 | spin_unlock(&dcache_lock); | |
273 | return -EBUSY; | |
274 | } | |
275 | } | |
276 | ||
277 | __d_drop(dentry); | |
278 | spin_unlock(&dentry->d_lock); | |
279 | spin_unlock(&dcache_lock); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | /* This should be called _only_ with dcache_lock held */ | |
284 | ||
285 | static inline struct dentry * __dget_locked(struct dentry *dentry) | |
286 | { | |
287 | atomic_inc(&dentry->d_count); | |
288 | if (!list_empty(&dentry->d_lru)) { | |
289 | dentry_stat.nr_unused--; | |
290 | list_del_init(&dentry->d_lru); | |
291 | } | |
292 | return dentry; | |
293 | } | |
294 | ||
295 | struct dentry * dget_locked(struct dentry *dentry) | |
296 | { | |
297 | return __dget_locked(dentry); | |
298 | } | |
299 | ||
300 | /** | |
301 | * d_find_alias - grab a hashed alias of inode | |
302 | * @inode: inode in question | |
303 | * @want_discon: flag, used by d_splice_alias, to request | |
304 | * that only a DISCONNECTED alias be returned. | |
305 | * | |
306 | * If inode has a hashed alias, or is a directory and has any alias, | |
307 | * acquire the reference to alias and return it. Otherwise return NULL. | |
308 | * Notice that if inode is a directory there can be only one alias and | |
309 | * it can be unhashed only if it has no children, or if it is the root | |
310 | * of a filesystem. | |
311 | * | |
21c0d8fd | 312 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
1da177e4 | 313 | * any other hashed alias over that one unless @want_discon is set, |
21c0d8fd | 314 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. |
1da177e4 LT |
315 | */ |
316 | ||
317 | static struct dentry * __d_find_alias(struct inode *inode, int want_discon) | |
318 | { | |
319 | struct list_head *head, *next, *tmp; | |
320 | struct dentry *alias, *discon_alias=NULL; | |
321 | ||
322 | head = &inode->i_dentry; | |
323 | next = inode->i_dentry.next; | |
324 | while (next != head) { | |
325 | tmp = next; | |
326 | next = tmp->next; | |
327 | prefetch(next); | |
328 | alias = list_entry(tmp, struct dentry, d_alias); | |
329 | if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { | |
21c0d8fd N |
330 | if (IS_ROOT(alias) && |
331 | (alias->d_flags & DCACHE_DISCONNECTED)) | |
1da177e4 LT |
332 | discon_alias = alias; |
333 | else if (!want_discon) { | |
334 | __dget_locked(alias); | |
335 | return alias; | |
336 | } | |
337 | } | |
338 | } | |
339 | if (discon_alias) | |
340 | __dget_locked(discon_alias); | |
341 | return discon_alias; | |
342 | } | |
343 | ||
344 | struct dentry * d_find_alias(struct inode *inode) | |
345 | { | |
214fda1f DH |
346 | struct dentry *de = NULL; |
347 | ||
348 | if (!list_empty(&inode->i_dentry)) { | |
349 | spin_lock(&dcache_lock); | |
350 | de = __d_find_alias(inode, 0); | |
351 | spin_unlock(&dcache_lock); | |
352 | } | |
1da177e4 LT |
353 | return de; |
354 | } | |
355 | ||
356 | /* | |
357 | * Try to kill dentries associated with this inode. | |
358 | * WARNING: you must own a reference to inode. | |
359 | */ | |
360 | void d_prune_aliases(struct inode *inode) | |
361 | { | |
0cdca3f9 | 362 | struct dentry *dentry; |
1da177e4 LT |
363 | restart: |
364 | spin_lock(&dcache_lock); | |
0cdca3f9 | 365 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
1da177e4 LT |
366 | spin_lock(&dentry->d_lock); |
367 | if (!atomic_read(&dentry->d_count)) { | |
368 | __dget_locked(dentry); | |
369 | __d_drop(dentry); | |
370 | spin_unlock(&dentry->d_lock); | |
371 | spin_unlock(&dcache_lock); | |
372 | dput(dentry); | |
373 | goto restart; | |
374 | } | |
375 | spin_unlock(&dentry->d_lock); | |
376 | } | |
377 | spin_unlock(&dcache_lock); | |
378 | } | |
379 | ||
380 | /* | |
d702ccb3 AM |
381 | * Throw away a dentry - free the inode, dput the parent. This requires that |
382 | * the LRU list has already been removed. | |
383 | * | |
d52b9086 MS |
384 | * If prune_parents is true, try to prune ancestors as well. |
385 | * | |
1da177e4 | 386 | * Called with dcache_lock, drops it and then regains. |
d702ccb3 | 387 | * Called with dentry->d_lock held, drops it. |
1da177e4 | 388 | */ |
d52b9086 | 389 | static void prune_one_dentry(struct dentry * dentry, int prune_parents) |
1da177e4 | 390 | { |
1da177e4 | 391 | __d_drop(dentry); |
d52b9086 MS |
392 | dentry = d_kill(dentry); |
393 | if (!prune_parents) { | |
394 | dput(dentry); | |
395 | spin_lock(&dcache_lock); | |
396 | return; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Prune ancestors. Locking is simpler than in dput(), | |
401 | * because dcache_lock needs to be taken anyway. | |
402 | */ | |
1da177e4 | 403 | spin_lock(&dcache_lock); |
d52b9086 MS |
404 | while (dentry) { |
405 | if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) | |
406 | return; | |
407 | ||
408 | if (dentry->d_op && dentry->d_op->d_delete) | |
409 | dentry->d_op->d_delete(dentry); | |
410 | if (!list_empty(&dentry->d_lru)) { | |
411 | list_del(&dentry->d_lru); | |
412 | dentry_stat.nr_unused--; | |
413 | } | |
414 | __d_drop(dentry); | |
415 | dentry = d_kill(dentry); | |
416 | spin_lock(&dcache_lock); | |
417 | } | |
1da177e4 LT |
418 | } |
419 | ||
420 | /** | |
421 | * prune_dcache - shrink the dcache | |
422 | * @count: number of entries to try and free | |
0feae5c4 N |
423 | * @sb: if given, ignore dentries for other superblocks |
424 | * which are being unmounted. | |
d52b9086 | 425 | * @prune_parents: if true, try to prune ancestors as well in one go |
1da177e4 LT |
426 | * |
427 | * Shrink the dcache. This is done when we need | |
428 | * more memory, or simply when we need to unmount | |
429 | * something (at which point we need to unuse | |
430 | * all dentries). | |
431 | * | |
432 | * This function may fail to free any resources if | |
433 | * all the dentries are in use. | |
434 | */ | |
435 | ||
d52b9086 | 436 | static void prune_dcache(int count, struct super_block *sb, int prune_parents) |
1da177e4 LT |
437 | { |
438 | spin_lock(&dcache_lock); | |
439 | for (; count ; count--) { | |
440 | struct dentry *dentry; | |
441 | struct list_head *tmp; | |
0feae5c4 | 442 | struct rw_semaphore *s_umount; |
1da177e4 LT |
443 | |
444 | cond_resched_lock(&dcache_lock); | |
445 | ||
446 | tmp = dentry_unused.prev; | |
f58a1ebb | 447 | if (sb) { |
0feae5c4 N |
448 | /* Try to find a dentry for this sb, but don't try |
449 | * too hard, if they aren't near the tail they will | |
450 | * be moved down again soon | |
451 | */ | |
452 | int skip = count; | |
453 | while (skip && tmp != &dentry_unused && | |
454 | list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { | |
455 | skip--; | |
456 | tmp = tmp->prev; | |
457 | } | |
458 | } | |
1da177e4 LT |
459 | if (tmp == &dentry_unused) |
460 | break; | |
461 | list_del_init(tmp); | |
462 | prefetch(dentry_unused.prev); | |
463 | dentry_stat.nr_unused--; | |
464 | dentry = list_entry(tmp, struct dentry, d_lru); | |
465 | ||
466 | spin_lock(&dentry->d_lock); | |
467 | /* | |
468 | * We found an inuse dentry which was not removed from | |
469 | * dentry_unused because of laziness during lookup. Do not free | |
470 | * it - just keep it off the dentry_unused list. | |
471 | */ | |
472 | if (atomic_read(&dentry->d_count)) { | |
473 | spin_unlock(&dentry->d_lock); | |
474 | continue; | |
475 | } | |
476 | /* If the dentry was recently referenced, don't free it. */ | |
477 | if (dentry->d_flags & DCACHE_REFERENCED) { | |
478 | dentry->d_flags &= ~DCACHE_REFERENCED; | |
479 | list_add(&dentry->d_lru, &dentry_unused); | |
480 | dentry_stat.nr_unused++; | |
481 | spin_unlock(&dentry->d_lock); | |
482 | continue; | |
483 | } | |
0feae5c4 N |
484 | /* |
485 | * If the dentry is not DCACHED_REFERENCED, it is time | |
486 | * to remove it from the dcache, provided the super block is | |
487 | * NULL (which means we are trying to reclaim memory) | |
488 | * or this dentry belongs to the same super block that | |
489 | * we want to shrink. | |
490 | */ | |
491 | /* | |
492 | * If this dentry is for "my" filesystem, then I can prune it | |
493 | * without taking the s_umount lock (I already hold it). | |
494 | */ | |
495 | if (sb && dentry->d_sb == sb) { | |
d52b9086 | 496 | prune_one_dentry(dentry, prune_parents); |
0feae5c4 N |
497 | continue; |
498 | } | |
499 | /* | |
500 | * ...otherwise we need to be sure this filesystem isn't being | |
501 | * unmounted, otherwise we could race with | |
502 | * generic_shutdown_super(), and end up holding a reference to | |
503 | * an inode while the filesystem is unmounted. | |
504 | * So we try to get s_umount, and make sure s_root isn't NULL. | |
505 | * (Take a local copy of s_umount to avoid a use-after-free of | |
506 | * `dentry'). | |
507 | */ | |
508 | s_umount = &dentry->d_sb->s_umount; | |
509 | if (down_read_trylock(s_umount)) { | |
510 | if (dentry->d_sb->s_root != NULL) { | |
d52b9086 | 511 | prune_one_dentry(dentry, prune_parents); |
0feae5c4 N |
512 | up_read(s_umount); |
513 | continue; | |
514 | } | |
515 | up_read(s_umount); | |
516 | } | |
517 | spin_unlock(&dentry->d_lock); | |
6eac3f93 VA |
518 | /* |
519 | * Insert dentry at the head of the list as inserting at the | |
520 | * tail leads to a cycle. | |
0feae5c4 | 521 | */ |
6eac3f93 VA |
522 | list_add(&dentry->d_lru, &dentry_unused); |
523 | dentry_stat.nr_unused++; | |
1da177e4 LT |
524 | } |
525 | spin_unlock(&dcache_lock); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Shrink the dcache for the specified super block. | |
530 | * This allows us to unmount a device without disturbing | |
531 | * the dcache for the other devices. | |
532 | * | |
533 | * This implementation makes just two traversals of the | |
534 | * unused list. On the first pass we move the selected | |
535 | * dentries to the most recent end, and on the second | |
536 | * pass we free them. The second pass must restart after | |
537 | * each dput(), but since the target dentries are all at | |
538 | * the end, it's really just a single traversal. | |
539 | */ | |
540 | ||
541 | /** | |
542 | * shrink_dcache_sb - shrink dcache for a superblock | |
543 | * @sb: superblock | |
544 | * | |
545 | * Shrink the dcache for the specified super block. This | |
546 | * is used to free the dcache before unmounting a file | |
547 | * system | |
548 | */ | |
549 | ||
550 | void shrink_dcache_sb(struct super_block * sb) | |
551 | { | |
552 | struct list_head *tmp, *next; | |
553 | struct dentry *dentry; | |
554 | ||
555 | /* | |
556 | * Pass one ... move the dentries for the specified | |
557 | * superblock to the most recent end of the unused list. | |
558 | */ | |
559 | spin_lock(&dcache_lock); | |
0cdca3f9 | 560 | list_for_each_safe(tmp, next, &dentry_unused) { |
1da177e4 LT |
561 | dentry = list_entry(tmp, struct dentry, d_lru); |
562 | if (dentry->d_sb != sb) | |
563 | continue; | |
1bfba4e8 | 564 | list_move(tmp, &dentry_unused); |
1da177e4 LT |
565 | } |
566 | ||
567 | /* | |
568 | * Pass two ... free the dentries for this superblock. | |
569 | */ | |
570 | repeat: | |
0cdca3f9 | 571 | list_for_each_safe(tmp, next, &dentry_unused) { |
1da177e4 LT |
572 | dentry = list_entry(tmp, struct dentry, d_lru); |
573 | if (dentry->d_sb != sb) | |
574 | continue; | |
575 | dentry_stat.nr_unused--; | |
576 | list_del_init(tmp); | |
577 | spin_lock(&dentry->d_lock); | |
578 | if (atomic_read(&dentry->d_count)) { | |
579 | spin_unlock(&dentry->d_lock); | |
580 | continue; | |
581 | } | |
d52b9086 | 582 | prune_one_dentry(dentry, 1); |
2ab13460 | 583 | cond_resched_lock(&dcache_lock); |
1da177e4 LT |
584 | goto repeat; |
585 | } | |
586 | spin_unlock(&dcache_lock); | |
587 | } | |
588 | ||
c636ebdb DH |
589 | /* |
590 | * destroy a single subtree of dentries for unmount | |
591 | * - see the comments on shrink_dcache_for_umount() for a description of the | |
592 | * locking | |
593 | */ | |
594 | static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |
595 | { | |
596 | struct dentry *parent; | |
f8713576 | 597 | unsigned detached = 0; |
c636ebdb DH |
598 | |
599 | BUG_ON(!IS_ROOT(dentry)); | |
600 | ||
601 | /* detach this root from the system */ | |
602 | spin_lock(&dcache_lock); | |
603 | if (!list_empty(&dentry->d_lru)) { | |
604 | dentry_stat.nr_unused--; | |
605 | list_del_init(&dentry->d_lru); | |
606 | } | |
607 | __d_drop(dentry); | |
608 | spin_unlock(&dcache_lock); | |
609 | ||
610 | for (;;) { | |
611 | /* descend to the first leaf in the current subtree */ | |
612 | while (!list_empty(&dentry->d_subdirs)) { | |
613 | struct dentry *loop; | |
614 | ||
615 | /* this is a branch with children - detach all of them | |
616 | * from the system in one go */ | |
617 | spin_lock(&dcache_lock); | |
618 | list_for_each_entry(loop, &dentry->d_subdirs, | |
619 | d_u.d_child) { | |
620 | if (!list_empty(&loop->d_lru)) { | |
621 | dentry_stat.nr_unused--; | |
622 | list_del_init(&loop->d_lru); | |
623 | } | |
624 | ||
625 | __d_drop(loop); | |
626 | cond_resched_lock(&dcache_lock); | |
627 | } | |
628 | spin_unlock(&dcache_lock); | |
629 | ||
630 | /* move to the first child */ | |
631 | dentry = list_entry(dentry->d_subdirs.next, | |
632 | struct dentry, d_u.d_child); | |
633 | } | |
634 | ||
635 | /* consume the dentries from this leaf up through its parents | |
636 | * until we find one with children or run out altogether */ | |
637 | do { | |
638 | struct inode *inode; | |
639 | ||
640 | if (atomic_read(&dentry->d_count) != 0) { | |
641 | printk(KERN_ERR | |
642 | "BUG: Dentry %p{i=%lx,n=%s}" | |
643 | " still in use (%d)" | |
644 | " [unmount of %s %s]\n", | |
645 | dentry, | |
646 | dentry->d_inode ? | |
647 | dentry->d_inode->i_ino : 0UL, | |
648 | dentry->d_name.name, | |
649 | atomic_read(&dentry->d_count), | |
650 | dentry->d_sb->s_type->name, | |
651 | dentry->d_sb->s_id); | |
652 | BUG(); | |
653 | } | |
654 | ||
655 | parent = dentry->d_parent; | |
656 | if (parent == dentry) | |
657 | parent = NULL; | |
658 | else | |
659 | atomic_dec(&parent->d_count); | |
660 | ||
661 | list_del(&dentry->d_u.d_child); | |
f8713576 | 662 | detached++; |
c636ebdb DH |
663 | |
664 | inode = dentry->d_inode; | |
665 | if (inode) { | |
666 | dentry->d_inode = NULL; | |
667 | list_del_init(&dentry->d_alias); | |
668 | if (dentry->d_op && dentry->d_op->d_iput) | |
669 | dentry->d_op->d_iput(dentry, inode); | |
670 | else | |
671 | iput(inode); | |
672 | } | |
673 | ||
674 | d_free(dentry); | |
675 | ||
676 | /* finished when we fall off the top of the tree, | |
677 | * otherwise we ascend to the parent and move to the | |
678 | * next sibling if there is one */ | |
679 | if (!parent) | |
f8713576 | 680 | goto out; |
c636ebdb DH |
681 | |
682 | dentry = parent; | |
683 | ||
684 | } while (list_empty(&dentry->d_subdirs)); | |
685 | ||
686 | dentry = list_entry(dentry->d_subdirs.next, | |
687 | struct dentry, d_u.d_child); | |
688 | } | |
f8713576 DH |
689 | out: |
690 | /* several dentries were freed, need to correct nr_dentry */ | |
691 | spin_lock(&dcache_lock); | |
692 | dentry_stat.nr_dentry -= detached; | |
693 | spin_unlock(&dcache_lock); | |
c636ebdb DH |
694 | } |
695 | ||
696 | /* | |
697 | * destroy the dentries attached to a superblock on unmounting | |
698 | * - we don't need to use dentry->d_lock, and only need dcache_lock when | |
699 | * removing the dentry from the system lists and hashes because: | |
700 | * - the superblock is detached from all mountings and open files, so the | |
701 | * dentry trees will not be rearranged by the VFS | |
702 | * - s_umount is write-locked, so the memory pressure shrinker will ignore | |
703 | * any dentries belonging to this superblock that it comes across | |
704 | * - the filesystem itself is no longer permitted to rearrange the dentries | |
705 | * in this superblock | |
706 | */ | |
707 | void shrink_dcache_for_umount(struct super_block *sb) | |
708 | { | |
709 | struct dentry *dentry; | |
710 | ||
711 | if (down_read_trylock(&sb->s_umount)) | |
712 | BUG(); | |
713 | ||
714 | dentry = sb->s_root; | |
715 | sb->s_root = NULL; | |
716 | atomic_dec(&dentry->d_count); | |
717 | shrink_dcache_for_umount_subtree(dentry); | |
718 | ||
719 | while (!hlist_empty(&sb->s_anon)) { | |
720 | dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); | |
721 | shrink_dcache_for_umount_subtree(dentry); | |
722 | } | |
723 | } | |
724 | ||
1da177e4 LT |
725 | /* |
726 | * Search for at least 1 mount point in the dentry's subdirs. | |
727 | * We descend to the next level whenever the d_subdirs | |
728 | * list is non-empty and continue searching. | |
729 | */ | |
730 | ||
731 | /** | |
732 | * have_submounts - check for mounts over a dentry | |
733 | * @parent: dentry to check. | |
734 | * | |
735 | * Return true if the parent or its subdirectories contain | |
736 | * a mount point | |
737 | */ | |
738 | ||
739 | int have_submounts(struct dentry *parent) | |
740 | { | |
741 | struct dentry *this_parent = parent; | |
742 | struct list_head *next; | |
743 | ||
744 | spin_lock(&dcache_lock); | |
745 | if (d_mountpoint(parent)) | |
746 | goto positive; | |
747 | repeat: | |
748 | next = this_parent->d_subdirs.next; | |
749 | resume: | |
750 | while (next != &this_parent->d_subdirs) { | |
751 | struct list_head *tmp = next; | |
5160ee6f | 752 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
1da177e4 LT |
753 | next = tmp->next; |
754 | /* Have we found a mount point ? */ | |
755 | if (d_mountpoint(dentry)) | |
756 | goto positive; | |
757 | if (!list_empty(&dentry->d_subdirs)) { | |
758 | this_parent = dentry; | |
759 | goto repeat; | |
760 | } | |
761 | } | |
762 | /* | |
763 | * All done at this level ... ascend and resume the search. | |
764 | */ | |
765 | if (this_parent != parent) { | |
5160ee6f | 766 | next = this_parent->d_u.d_child.next; |
1da177e4 LT |
767 | this_parent = this_parent->d_parent; |
768 | goto resume; | |
769 | } | |
770 | spin_unlock(&dcache_lock); | |
771 | return 0; /* No mount points found in tree */ | |
772 | positive: | |
773 | spin_unlock(&dcache_lock); | |
774 | return 1; | |
775 | } | |
776 | ||
777 | /* | |
778 | * Search the dentry child list for the specified parent, | |
779 | * and move any unused dentries to the end of the unused | |
780 | * list for prune_dcache(). We descend to the next level | |
781 | * whenever the d_subdirs list is non-empty and continue | |
782 | * searching. | |
783 | * | |
784 | * It returns zero iff there are no unused children, | |
785 | * otherwise it returns the number of children moved to | |
786 | * the end of the unused list. This may not be the total | |
787 | * number of unused children, because select_parent can | |
788 | * drop the lock and return early due to latency | |
789 | * constraints. | |
790 | */ | |
791 | static int select_parent(struct dentry * parent) | |
792 | { | |
793 | struct dentry *this_parent = parent; | |
794 | struct list_head *next; | |
795 | int found = 0; | |
796 | ||
797 | spin_lock(&dcache_lock); | |
798 | repeat: | |
799 | next = this_parent->d_subdirs.next; | |
800 | resume: | |
801 | while (next != &this_parent->d_subdirs) { | |
802 | struct list_head *tmp = next; | |
5160ee6f | 803 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
1da177e4 LT |
804 | next = tmp->next; |
805 | ||
806 | if (!list_empty(&dentry->d_lru)) { | |
807 | dentry_stat.nr_unused--; | |
808 | list_del_init(&dentry->d_lru); | |
809 | } | |
810 | /* | |
811 | * move only zero ref count dentries to the end | |
812 | * of the unused list for prune_dcache | |
813 | */ | |
814 | if (!atomic_read(&dentry->d_count)) { | |
8e13059a | 815 | list_add_tail(&dentry->d_lru, &dentry_unused); |
1da177e4 LT |
816 | dentry_stat.nr_unused++; |
817 | found++; | |
818 | } | |
819 | ||
820 | /* | |
821 | * We can return to the caller if we have found some (this | |
822 | * ensures forward progress). We'll be coming back to find | |
823 | * the rest. | |
824 | */ | |
825 | if (found && need_resched()) | |
826 | goto out; | |
827 | ||
828 | /* | |
829 | * Descend a level if the d_subdirs list is non-empty. | |
830 | */ | |
831 | if (!list_empty(&dentry->d_subdirs)) { | |
832 | this_parent = dentry; | |
1da177e4 LT |
833 | goto repeat; |
834 | } | |
835 | } | |
836 | /* | |
837 | * All done at this level ... ascend and resume the search. | |
838 | */ | |
839 | if (this_parent != parent) { | |
5160ee6f | 840 | next = this_parent->d_u.d_child.next; |
1da177e4 | 841 | this_parent = this_parent->d_parent; |
1da177e4 LT |
842 | goto resume; |
843 | } | |
844 | out: | |
845 | spin_unlock(&dcache_lock); | |
846 | return found; | |
847 | } | |
848 | ||
849 | /** | |
850 | * shrink_dcache_parent - prune dcache | |
851 | * @parent: parent of entries to prune | |
852 | * | |
853 | * Prune the dcache to remove unused children of the parent dentry. | |
854 | */ | |
855 | ||
856 | void shrink_dcache_parent(struct dentry * parent) | |
857 | { | |
858 | int found; | |
859 | ||
860 | while ((found = select_parent(parent)) != 0) | |
d52b9086 | 861 | prune_dcache(found, parent->d_sb, 1); |
1da177e4 LT |
862 | } |
863 | ||
1da177e4 LT |
864 | /* |
865 | * Scan `nr' dentries and return the number which remain. | |
866 | * | |
867 | * We need to avoid reentering the filesystem if the caller is performing a | |
868 | * GFP_NOFS allocation attempt. One example deadlock is: | |
869 | * | |
870 | * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> | |
871 | * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> | |
872 | * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. | |
873 | * | |
874 | * In this case we return -1 to tell the caller that we baled. | |
875 | */ | |
27496a8c | 876 | static int shrink_dcache_memory(int nr, gfp_t gfp_mask) |
1da177e4 LT |
877 | { |
878 | if (nr) { | |
879 | if (!(gfp_mask & __GFP_FS)) | |
880 | return -1; | |
24c32d73 | 881 | prune_dcache(nr, NULL, 1); |
1da177e4 LT |
882 | } |
883 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | |
884 | } | |
885 | ||
8e1f936b RR |
886 | static struct shrinker dcache_shrinker = { |
887 | .shrink = shrink_dcache_memory, | |
888 | .seeks = DEFAULT_SEEKS, | |
889 | }; | |
890 | ||
1da177e4 LT |
891 | /** |
892 | * d_alloc - allocate a dcache entry | |
893 | * @parent: parent of entry to allocate | |
894 | * @name: qstr of the name | |
895 | * | |
896 | * Allocates a dentry. It returns %NULL if there is insufficient memory | |
897 | * available. On a success the dentry is returned. The name passed in is | |
898 | * copied and the copy passed in may be reused after this call. | |
899 | */ | |
900 | ||
901 | struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |
902 | { | |
903 | struct dentry *dentry; | |
904 | char *dname; | |
905 | ||
906 | dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); | |
907 | if (!dentry) | |
908 | return NULL; | |
909 | ||
910 | if (name->len > DNAME_INLINE_LEN-1) { | |
911 | dname = kmalloc(name->len + 1, GFP_KERNEL); | |
912 | if (!dname) { | |
913 | kmem_cache_free(dentry_cache, dentry); | |
914 | return NULL; | |
915 | } | |
916 | } else { | |
917 | dname = dentry->d_iname; | |
918 | } | |
919 | dentry->d_name.name = dname; | |
920 | ||
921 | dentry->d_name.len = name->len; | |
922 | dentry->d_name.hash = name->hash; | |
923 | memcpy(dname, name->name, name->len); | |
924 | dname[name->len] = 0; | |
925 | ||
926 | atomic_set(&dentry->d_count, 1); | |
927 | dentry->d_flags = DCACHE_UNHASHED; | |
928 | spin_lock_init(&dentry->d_lock); | |
929 | dentry->d_inode = NULL; | |
930 | dentry->d_parent = NULL; | |
931 | dentry->d_sb = NULL; | |
932 | dentry->d_op = NULL; | |
933 | dentry->d_fsdata = NULL; | |
934 | dentry->d_mounted = 0; | |
47ba87e0 | 935 | #ifdef CONFIG_PROFILING |
1da177e4 | 936 | dentry->d_cookie = NULL; |
47ba87e0 | 937 | #endif |
1da177e4 LT |
938 | INIT_HLIST_NODE(&dentry->d_hash); |
939 | INIT_LIST_HEAD(&dentry->d_lru); | |
940 | INIT_LIST_HEAD(&dentry->d_subdirs); | |
941 | INIT_LIST_HEAD(&dentry->d_alias); | |
942 | ||
943 | if (parent) { | |
944 | dentry->d_parent = dget(parent); | |
945 | dentry->d_sb = parent->d_sb; | |
946 | } else { | |
5160ee6f | 947 | INIT_LIST_HEAD(&dentry->d_u.d_child); |
1da177e4 LT |
948 | } |
949 | ||
950 | spin_lock(&dcache_lock); | |
951 | if (parent) | |
5160ee6f | 952 | list_add(&dentry->d_u.d_child, &parent->d_subdirs); |
1da177e4 LT |
953 | dentry_stat.nr_dentry++; |
954 | spin_unlock(&dcache_lock); | |
955 | ||
956 | return dentry; | |
957 | } | |
958 | ||
959 | struct dentry *d_alloc_name(struct dentry *parent, const char *name) | |
960 | { | |
961 | struct qstr q; | |
962 | ||
963 | q.name = name; | |
964 | q.len = strlen(name); | |
965 | q.hash = full_name_hash(q.name, q.len); | |
966 | return d_alloc(parent, &q); | |
967 | } | |
968 | ||
969 | /** | |
970 | * d_instantiate - fill in inode information for a dentry | |
971 | * @entry: dentry to complete | |
972 | * @inode: inode to attach to this dentry | |
973 | * | |
974 | * Fill in inode information in the entry. | |
975 | * | |
976 | * This turns negative dentries into productive full members | |
977 | * of society. | |
978 | * | |
979 | * NOTE! This assumes that the inode count has been incremented | |
980 | * (or otherwise set) by the caller to indicate that it is now | |
981 | * in use by the dcache. | |
982 | */ | |
983 | ||
984 | void d_instantiate(struct dentry *entry, struct inode * inode) | |
985 | { | |
28133c7b | 986 | BUG_ON(!list_empty(&entry->d_alias)); |
1da177e4 LT |
987 | spin_lock(&dcache_lock); |
988 | if (inode) | |
989 | list_add(&entry->d_alias, &inode->i_dentry); | |
990 | entry->d_inode = inode; | |
c32ccd87 | 991 | fsnotify_d_instantiate(entry, inode); |
1da177e4 LT |
992 | spin_unlock(&dcache_lock); |
993 | security_d_instantiate(entry, inode); | |
994 | } | |
995 | ||
996 | /** | |
997 | * d_instantiate_unique - instantiate a non-aliased dentry | |
998 | * @entry: dentry to instantiate | |
999 | * @inode: inode to attach to this dentry | |
1000 | * | |
1001 | * Fill in inode information in the entry. On success, it returns NULL. | |
1002 | * If an unhashed alias of "entry" already exists, then we return the | |
e866cfa9 | 1003 | * aliased dentry instead and drop one reference to inode. |
1da177e4 LT |
1004 | * |
1005 | * Note that in order to avoid conflicts with rename() etc, the caller | |
1006 | * had better be holding the parent directory semaphore. | |
e866cfa9 OD |
1007 | * |
1008 | * This also assumes that the inode count has been incremented | |
1009 | * (or otherwise set) by the caller to indicate that it is now | |
1010 | * in use by the dcache. | |
1da177e4 | 1011 | */ |
770bfad8 DH |
1012 | static struct dentry *__d_instantiate_unique(struct dentry *entry, |
1013 | struct inode *inode) | |
1da177e4 LT |
1014 | { |
1015 | struct dentry *alias; | |
1016 | int len = entry->d_name.len; | |
1017 | const char *name = entry->d_name.name; | |
1018 | unsigned int hash = entry->d_name.hash; | |
1019 | ||
770bfad8 DH |
1020 | if (!inode) { |
1021 | entry->d_inode = NULL; | |
1022 | return NULL; | |
1023 | } | |
1024 | ||
1da177e4 LT |
1025 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { |
1026 | struct qstr *qstr = &alias->d_name; | |
1027 | ||
1028 | if (qstr->hash != hash) | |
1029 | continue; | |
1030 | if (alias->d_parent != entry->d_parent) | |
1031 | continue; | |
1032 | if (qstr->len != len) | |
1033 | continue; | |
1034 | if (memcmp(qstr->name, name, len)) | |
1035 | continue; | |
1036 | dget_locked(alias); | |
1da177e4 LT |
1037 | return alias; |
1038 | } | |
770bfad8 | 1039 | |
1da177e4 | 1040 | list_add(&entry->d_alias, &inode->i_dentry); |
1da177e4 | 1041 | entry->d_inode = inode; |
c32ccd87 | 1042 | fsnotify_d_instantiate(entry, inode); |
1da177e4 LT |
1043 | return NULL; |
1044 | } | |
770bfad8 DH |
1045 | |
1046 | struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) | |
1047 | { | |
1048 | struct dentry *result; | |
1049 | ||
1050 | BUG_ON(!list_empty(&entry->d_alias)); | |
1051 | ||
1052 | spin_lock(&dcache_lock); | |
1053 | result = __d_instantiate_unique(entry, inode); | |
1054 | spin_unlock(&dcache_lock); | |
1055 | ||
1056 | if (!result) { | |
1057 | security_d_instantiate(entry, inode); | |
1058 | return NULL; | |
1059 | } | |
1060 | ||
1061 | BUG_ON(!d_unhashed(result)); | |
1062 | iput(inode); | |
1063 | return result; | |
1064 | } | |
1065 | ||
1da177e4 LT |
1066 | EXPORT_SYMBOL(d_instantiate_unique); |
1067 | ||
1068 | /** | |
1069 | * d_alloc_root - allocate root dentry | |
1070 | * @root_inode: inode to allocate the root for | |
1071 | * | |
1072 | * Allocate a root ("/") dentry for the inode given. The inode is | |
1073 | * instantiated and returned. %NULL is returned if there is insufficient | |
1074 | * memory or the inode passed is %NULL. | |
1075 | */ | |
1076 | ||
1077 | struct dentry * d_alloc_root(struct inode * root_inode) | |
1078 | { | |
1079 | struct dentry *res = NULL; | |
1080 | ||
1081 | if (root_inode) { | |
1082 | static const struct qstr name = { .name = "/", .len = 1 }; | |
1083 | ||
1084 | res = d_alloc(NULL, &name); | |
1085 | if (res) { | |
1086 | res->d_sb = root_inode->i_sb; | |
1087 | res->d_parent = res; | |
1088 | d_instantiate(res, root_inode); | |
1089 | } | |
1090 | } | |
1091 | return res; | |
1092 | } | |
1093 | ||
1094 | static inline struct hlist_head *d_hash(struct dentry *parent, | |
1095 | unsigned long hash) | |
1096 | { | |
1097 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; | |
1098 | hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); | |
1099 | return dentry_hashtable + (hash & D_HASHMASK); | |
1100 | } | |
1101 | ||
1102 | /** | |
1103 | * d_alloc_anon - allocate an anonymous dentry | |
1104 | * @inode: inode to allocate the dentry for | |
1105 | * | |
1106 | * This is similar to d_alloc_root. It is used by filesystems when | |
1107 | * creating a dentry for a given inode, often in the process of | |
1108 | * mapping a filehandle to a dentry. The returned dentry may be | |
1109 | * anonymous, or may have a full name (if the inode was already | |
1110 | * in the cache). The file system may need to make further | |
1111 | * efforts to connect this dentry into the dcache properly. | |
1112 | * | |
1113 | * When called on a directory inode, we must ensure that | |
1114 | * the inode only ever has one dentry. If a dentry is | |
1115 | * found, that is returned instead of allocating a new one. | |
1116 | * | |
1117 | * On successful return, the reference to the inode has been transferred | |
1118 | * to the dentry. If %NULL is returned (indicating kmalloc failure), | |
1119 | * the reference on the inode has not been released. | |
1120 | */ | |
1121 | ||
1122 | struct dentry * d_alloc_anon(struct inode *inode) | |
1123 | { | |
1124 | static const struct qstr anonstring = { .name = "" }; | |
1125 | struct dentry *tmp; | |
1126 | struct dentry *res; | |
1127 | ||
1128 | if ((res = d_find_alias(inode))) { | |
1129 | iput(inode); | |
1130 | return res; | |
1131 | } | |
1132 | ||
1133 | tmp = d_alloc(NULL, &anonstring); | |
1134 | if (!tmp) | |
1135 | return NULL; | |
1136 | ||
1137 | tmp->d_parent = tmp; /* make sure dput doesn't croak */ | |
1138 | ||
1139 | spin_lock(&dcache_lock); | |
1140 | res = __d_find_alias(inode, 0); | |
1141 | if (!res) { | |
1142 | /* attach a disconnected dentry */ | |
1143 | res = tmp; | |
1144 | tmp = NULL; | |
1145 | spin_lock(&res->d_lock); | |
1146 | res->d_sb = inode->i_sb; | |
1147 | res->d_parent = res; | |
1148 | res->d_inode = inode; | |
1149 | res->d_flags |= DCACHE_DISCONNECTED; | |
1150 | res->d_flags &= ~DCACHE_UNHASHED; | |
1151 | list_add(&res->d_alias, &inode->i_dentry); | |
1152 | hlist_add_head(&res->d_hash, &inode->i_sb->s_anon); | |
1153 | spin_unlock(&res->d_lock); | |
1154 | ||
1155 | inode = NULL; /* don't drop reference */ | |
1156 | } | |
1157 | spin_unlock(&dcache_lock); | |
1158 | ||
1159 | if (inode) | |
1160 | iput(inode); | |
1161 | if (tmp) | |
1162 | dput(tmp); | |
1163 | return res; | |
1164 | } | |
1165 | ||
1166 | ||
1167 | /** | |
1168 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | |
1169 | * @inode: the inode which may have a disconnected dentry | |
1170 | * @dentry: a negative dentry which we want to point to the inode. | |
1171 | * | |
1172 | * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and | |
1173 | * DCACHE_DISCONNECTED), then d_move that in place of the given dentry | |
1174 | * and return it, else simply d_add the inode to the dentry and return NULL. | |
1175 | * | |
1176 | * This is needed in the lookup routine of any filesystem that is exportable | |
1177 | * (via knfsd) so that we can build dcache paths to directories effectively. | |
1178 | * | |
1179 | * If a dentry was found and moved, then it is returned. Otherwise NULL | |
1180 | * is returned. This matches the expected return value of ->lookup. | |
1181 | * | |
1182 | */ | |
1183 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |
1184 | { | |
1185 | struct dentry *new = NULL; | |
1186 | ||
21c0d8fd | 1187 | if (inode && S_ISDIR(inode->i_mode)) { |
1da177e4 LT |
1188 | spin_lock(&dcache_lock); |
1189 | new = __d_find_alias(inode, 1); | |
1190 | if (new) { | |
1191 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | |
c32ccd87 | 1192 | fsnotify_d_instantiate(new, inode); |
1da177e4 LT |
1193 | spin_unlock(&dcache_lock); |
1194 | security_d_instantiate(new, inode); | |
1195 | d_rehash(dentry); | |
1196 | d_move(new, dentry); | |
1197 | iput(inode); | |
1198 | } else { | |
1199 | /* d_instantiate takes dcache_lock, so we do it by hand */ | |
1200 | list_add(&dentry->d_alias, &inode->i_dentry); | |
1201 | dentry->d_inode = inode; | |
c32ccd87 | 1202 | fsnotify_d_instantiate(dentry, inode); |
1da177e4 LT |
1203 | spin_unlock(&dcache_lock); |
1204 | security_d_instantiate(dentry, inode); | |
1205 | d_rehash(dentry); | |
1206 | } | |
1207 | } else | |
1208 | d_add(dentry, inode); | |
1209 | return new; | |
1210 | } | |
1211 | ||
1212 | ||
1213 | /** | |
1214 | * d_lookup - search for a dentry | |
1215 | * @parent: parent dentry | |
1216 | * @name: qstr of name we wish to find | |
1217 | * | |
1218 | * Searches the children of the parent dentry for the name in question. If | |
1219 | * the dentry is found its reference count is incremented and the dentry | |
1220 | * is returned. The caller must use d_put to free the entry when it has | |
1221 | * finished using it. %NULL is returned on failure. | |
1222 | * | |
1223 | * __d_lookup is dcache_lock free. The hash list is protected using RCU. | |
1224 | * Memory barriers are used while updating and doing lockless traversal. | |
1225 | * To avoid races with d_move while rename is happening, d_lock is used. | |
1226 | * | |
1227 | * Overflows in memcmp(), while d_move, are avoided by keeping the length | |
1228 | * and name pointer in one structure pointed by d_qstr. | |
1229 | * | |
1230 | * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while | |
1231 | * lookup is going on. | |
1232 | * | |
1233 | * dentry_unused list is not updated even if lookup finds the required dentry | |
1234 | * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, | |
1235 | * select_parent and __dget_locked. This laziness saves lookup from dcache_lock | |
1236 | * acquisition. | |
1237 | * | |
1238 | * d_lookup() is protected against the concurrent renames in some unrelated | |
1239 | * directory using the seqlockt_t rename_lock. | |
1240 | */ | |
1241 | ||
1242 | struct dentry * d_lookup(struct dentry * parent, struct qstr * name) | |
1243 | { | |
1244 | struct dentry * dentry = NULL; | |
1245 | unsigned long seq; | |
1246 | ||
1247 | do { | |
1248 | seq = read_seqbegin(&rename_lock); | |
1249 | dentry = __d_lookup(parent, name); | |
1250 | if (dentry) | |
1251 | break; | |
1252 | } while (read_seqretry(&rename_lock, seq)); | |
1253 | return dentry; | |
1254 | } | |
1255 | ||
1256 | struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |
1257 | { | |
1258 | unsigned int len = name->len; | |
1259 | unsigned int hash = name->hash; | |
1260 | const unsigned char *str = name->name; | |
1261 | struct hlist_head *head = d_hash(parent,hash); | |
1262 | struct dentry *found = NULL; | |
1263 | struct hlist_node *node; | |
665a7583 | 1264 | struct dentry *dentry; |
1da177e4 LT |
1265 | |
1266 | rcu_read_lock(); | |
1267 | ||
665a7583 | 1268 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { |
1da177e4 LT |
1269 | struct qstr *qstr; |
1270 | ||
1da177e4 LT |
1271 | if (dentry->d_name.hash != hash) |
1272 | continue; | |
1273 | if (dentry->d_parent != parent) | |
1274 | continue; | |
1275 | ||
1276 | spin_lock(&dentry->d_lock); | |
1277 | ||
1278 | /* | |
1279 | * Recheck the dentry after taking the lock - d_move may have | |
1280 | * changed things. Don't bother checking the hash because we're | |
1281 | * about to compare the whole name anyway. | |
1282 | */ | |
1283 | if (dentry->d_parent != parent) | |
1284 | goto next; | |
1285 | ||
1286 | /* | |
1287 | * It is safe to compare names since d_move() cannot | |
1288 | * change the qstr (protected by d_lock). | |
1289 | */ | |
1290 | qstr = &dentry->d_name; | |
1291 | if (parent->d_op && parent->d_op->d_compare) { | |
1292 | if (parent->d_op->d_compare(parent, qstr, name)) | |
1293 | goto next; | |
1294 | } else { | |
1295 | if (qstr->len != len) | |
1296 | goto next; | |
1297 | if (memcmp(qstr->name, str, len)) | |
1298 | goto next; | |
1299 | } | |
1300 | ||
1301 | if (!d_unhashed(dentry)) { | |
1302 | atomic_inc(&dentry->d_count); | |
1303 | found = dentry; | |
1304 | } | |
1305 | spin_unlock(&dentry->d_lock); | |
1306 | break; | |
1307 | next: | |
1308 | spin_unlock(&dentry->d_lock); | |
1309 | } | |
1310 | rcu_read_unlock(); | |
1311 | ||
1312 | return found; | |
1313 | } | |
1314 | ||
3e7e241f EB |
1315 | /** |
1316 | * d_hash_and_lookup - hash the qstr then search for a dentry | |
1317 | * @dir: Directory to search in | |
1318 | * @name: qstr of name we wish to find | |
1319 | * | |
1320 | * On hash failure or on lookup failure NULL is returned. | |
1321 | */ | |
1322 | struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) | |
1323 | { | |
1324 | struct dentry *dentry = NULL; | |
1325 | ||
1326 | /* | |
1327 | * Check for a fs-specific hash function. Note that we must | |
1328 | * calculate the standard hash first, as the d_op->d_hash() | |
1329 | * routine may choose to leave the hash value unchanged. | |
1330 | */ | |
1331 | name->hash = full_name_hash(name->name, name->len); | |
1332 | if (dir->d_op && dir->d_op->d_hash) { | |
1333 | if (dir->d_op->d_hash(dir, name) < 0) | |
1334 | goto out; | |
1335 | } | |
1336 | dentry = d_lookup(dir, name); | |
1337 | out: | |
1338 | return dentry; | |
1339 | } | |
1340 | ||
1da177e4 LT |
1341 | /** |
1342 | * d_validate - verify dentry provided from insecure source | |
1343 | * @dentry: The dentry alleged to be valid child of @dparent | |
1344 | * @dparent: The parent dentry (known to be valid) | |
1345 | * @hash: Hash of the dentry | |
1346 | * @len: Length of the name | |
1347 | * | |
1348 | * An insecure source has sent us a dentry, here we verify it and dget() it. | |
1349 | * This is used by ncpfs in its readdir implementation. | |
1350 | * Zero is returned in the dentry is invalid. | |
1351 | */ | |
1352 | ||
1353 | int d_validate(struct dentry *dentry, struct dentry *dparent) | |
1354 | { | |
1355 | struct hlist_head *base; | |
1356 | struct hlist_node *lhp; | |
1357 | ||
1358 | /* Check whether the ptr might be valid at all.. */ | |
1359 | if (!kmem_ptr_validate(dentry_cache, dentry)) | |
1360 | goto out; | |
1361 | ||
1362 | if (dentry->d_parent != dparent) | |
1363 | goto out; | |
1364 | ||
1365 | spin_lock(&dcache_lock); | |
1366 | base = d_hash(dparent, dentry->d_name.hash); | |
1367 | hlist_for_each(lhp,base) { | |
665a7583 | 1368 | /* hlist_for_each_entry_rcu() not required for d_hash list |
1da177e4 LT |
1369 | * as it is parsed under dcache_lock |
1370 | */ | |
1371 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { | |
1372 | __dget_locked(dentry); | |
1373 | spin_unlock(&dcache_lock); | |
1374 | return 1; | |
1375 | } | |
1376 | } | |
1377 | spin_unlock(&dcache_lock); | |
1378 | out: | |
1379 | return 0; | |
1380 | } | |
1381 | ||
1382 | /* | |
1383 | * When a file is deleted, we have two options: | |
1384 | * - turn this dentry into a negative dentry | |
1385 | * - unhash this dentry and free it. | |
1386 | * | |
1387 | * Usually, we want to just turn this into | |
1388 | * a negative dentry, but if anybody else is | |
1389 | * currently using the dentry or the inode | |
1390 | * we can't do that and we fall back on removing | |
1391 | * it from the hash queues and waiting for | |
1392 | * it to be deleted later when it has no users | |
1393 | */ | |
1394 | ||
1395 | /** | |
1396 | * d_delete - delete a dentry | |
1397 | * @dentry: The dentry to delete | |
1398 | * | |
1399 | * Turn the dentry into a negative dentry if possible, otherwise | |
1400 | * remove it from the hash queues so it can be deleted later | |
1401 | */ | |
1402 | ||
1403 | void d_delete(struct dentry * dentry) | |
1404 | { | |
7a91bf7f | 1405 | int isdir = 0; |
1da177e4 LT |
1406 | /* |
1407 | * Are we the only user? | |
1408 | */ | |
1409 | spin_lock(&dcache_lock); | |
1410 | spin_lock(&dentry->d_lock); | |
7a91bf7f | 1411 | isdir = S_ISDIR(dentry->d_inode->i_mode); |
1da177e4 LT |
1412 | if (atomic_read(&dentry->d_count) == 1) { |
1413 | dentry_iput(dentry); | |
7a91bf7f | 1414 | fsnotify_nameremove(dentry, isdir); |
7a2bd3f7 AG |
1415 | |
1416 | /* remove this and other inotify debug checks after 2.6.18 */ | |
1417 | dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; | |
1da177e4 LT |
1418 | return; |
1419 | } | |
1420 | ||
1421 | if (!d_unhashed(dentry)) | |
1422 | __d_drop(dentry); | |
1423 | ||
1424 | spin_unlock(&dentry->d_lock); | |
1425 | spin_unlock(&dcache_lock); | |
7a91bf7f JM |
1426 | |
1427 | fsnotify_nameremove(dentry, isdir); | |
1da177e4 LT |
1428 | } |
1429 | ||
1430 | static void __d_rehash(struct dentry * entry, struct hlist_head *list) | |
1431 | { | |
1432 | ||
1433 | entry->d_flags &= ~DCACHE_UNHASHED; | |
1434 | hlist_add_head_rcu(&entry->d_hash, list); | |
1435 | } | |
1436 | ||
770bfad8 DH |
1437 | static void _d_rehash(struct dentry * entry) |
1438 | { | |
1439 | __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); | |
1440 | } | |
1441 | ||
1da177e4 LT |
1442 | /** |
1443 | * d_rehash - add an entry back to the hash | |
1444 | * @entry: dentry to add to the hash | |
1445 | * | |
1446 | * Adds a dentry to the hash according to its name. | |
1447 | */ | |
1448 | ||
1449 | void d_rehash(struct dentry * entry) | |
1450 | { | |
1da177e4 LT |
1451 | spin_lock(&dcache_lock); |
1452 | spin_lock(&entry->d_lock); | |
770bfad8 | 1453 | _d_rehash(entry); |
1da177e4 LT |
1454 | spin_unlock(&entry->d_lock); |
1455 | spin_unlock(&dcache_lock); | |
1456 | } | |
1457 | ||
1458 | #define do_switch(x,y) do { \ | |
1459 | __typeof__ (x) __tmp = x; \ | |
1460 | x = y; y = __tmp; } while (0) | |
1461 | ||
1462 | /* | |
1463 | * When switching names, the actual string doesn't strictly have to | |
1464 | * be preserved in the target - because we're dropping the target | |
1465 | * anyway. As such, we can just do a simple memcpy() to copy over | |
1466 | * the new name before we switch. | |
1467 | * | |
1468 | * Note that we have to be a lot more careful about getting the hash | |
1469 | * switched - we have to switch the hash value properly even if it | |
1470 | * then no longer matches the actual (corrupted) string of the target. | |
1471 | * The hash value has to match the hash queue that the dentry is on.. | |
1472 | */ | |
1473 | static void switch_names(struct dentry *dentry, struct dentry *target) | |
1474 | { | |
1475 | if (dname_external(target)) { | |
1476 | if (dname_external(dentry)) { | |
1477 | /* | |
1478 | * Both external: swap the pointers | |
1479 | */ | |
1480 | do_switch(target->d_name.name, dentry->d_name.name); | |
1481 | } else { | |
1482 | /* | |
1483 | * dentry:internal, target:external. Steal target's | |
1484 | * storage and make target internal. | |
1485 | */ | |
1486 | dentry->d_name.name = target->d_name.name; | |
1487 | target->d_name.name = target->d_iname; | |
1488 | } | |
1489 | } else { | |
1490 | if (dname_external(dentry)) { | |
1491 | /* | |
1492 | * dentry:external, target:internal. Give dentry's | |
1493 | * storage to target and make dentry internal | |
1494 | */ | |
1495 | memcpy(dentry->d_iname, target->d_name.name, | |
1496 | target->d_name.len + 1); | |
1497 | target->d_name.name = dentry->d_name.name; | |
1498 | dentry->d_name.name = dentry->d_iname; | |
1499 | } else { | |
1500 | /* | |
1501 | * Both are internal. Just copy target to dentry | |
1502 | */ | |
1503 | memcpy(dentry->d_iname, target->d_name.name, | |
1504 | target->d_name.len + 1); | |
1505 | } | |
1506 | } | |
1507 | } | |
1508 | ||
1509 | /* | |
1510 | * We cannibalize "target" when moving dentry on top of it, | |
1511 | * because it's going to be thrown away anyway. We could be more | |
1512 | * polite about it, though. | |
1513 | * | |
1514 | * This forceful removal will result in ugly /proc output if | |
1515 | * somebody holds a file open that got deleted due to a rename. | |
1516 | * We could be nicer about the deleted file, and let it show | |
1517 | * up under the name it got deleted rather than the name that | |
1518 | * deleted it. | |
1519 | */ | |
1520 | ||
9eaef27b TM |
1521 | /* |
1522 | * d_move_locked - move a dentry | |
1da177e4 LT |
1523 | * @dentry: entry to move |
1524 | * @target: new dentry | |
1525 | * | |
1526 | * Update the dcache to reflect the move of a file name. Negative | |
1527 | * dcache entries should not be moved in this way. | |
1528 | */ | |
9eaef27b | 1529 | static void d_move_locked(struct dentry * dentry, struct dentry * target) |
1da177e4 LT |
1530 | { |
1531 | struct hlist_head *list; | |
1532 | ||
1533 | if (!dentry->d_inode) | |
1534 | printk(KERN_WARNING "VFS: moving negative dcache entry\n"); | |
1535 | ||
1da177e4 LT |
1536 | write_seqlock(&rename_lock); |
1537 | /* | |
1538 | * XXXX: do we really need to take target->d_lock? | |
1539 | */ | |
1540 | if (target < dentry) { | |
1541 | spin_lock(&target->d_lock); | |
a90b9c05 | 1542 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
1da177e4 LT |
1543 | } else { |
1544 | spin_lock(&dentry->d_lock); | |
a90b9c05 | 1545 | spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); |
1da177e4 LT |
1546 | } |
1547 | ||
1548 | /* Move the dentry to the target hash queue, if on different bucket */ | |
1549 | if (dentry->d_flags & DCACHE_UNHASHED) | |
1550 | goto already_unhashed; | |
1551 | ||
1552 | hlist_del_rcu(&dentry->d_hash); | |
1553 | ||
1554 | already_unhashed: | |
1555 | list = d_hash(target->d_parent, target->d_name.hash); | |
1556 | __d_rehash(dentry, list); | |
1557 | ||
1558 | /* Unhash the target: dput() will then get rid of it */ | |
1559 | __d_drop(target); | |
1560 | ||
5160ee6f ED |
1561 | list_del(&dentry->d_u.d_child); |
1562 | list_del(&target->d_u.d_child); | |
1da177e4 LT |
1563 | |
1564 | /* Switch the names.. */ | |
1565 | switch_names(dentry, target); | |
1566 | do_switch(dentry->d_name.len, target->d_name.len); | |
1567 | do_switch(dentry->d_name.hash, target->d_name.hash); | |
1568 | ||
1569 | /* ... and switch the parents */ | |
1570 | if (IS_ROOT(dentry)) { | |
1571 | dentry->d_parent = target->d_parent; | |
1572 | target->d_parent = target; | |
5160ee6f | 1573 | INIT_LIST_HEAD(&target->d_u.d_child); |
1da177e4 LT |
1574 | } else { |
1575 | do_switch(dentry->d_parent, target->d_parent); | |
1576 | ||
1577 | /* And add them back to the (new) parent lists */ | |
5160ee6f | 1578 | list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); |
1da177e4 LT |
1579 | } |
1580 | ||
5160ee6f | 1581 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); |
1da177e4 | 1582 | spin_unlock(&target->d_lock); |
c32ccd87 | 1583 | fsnotify_d_move(dentry); |
1da177e4 LT |
1584 | spin_unlock(&dentry->d_lock); |
1585 | write_sequnlock(&rename_lock); | |
9eaef27b TM |
1586 | } |
1587 | ||
1588 | /** | |
1589 | * d_move - move a dentry | |
1590 | * @dentry: entry to move | |
1591 | * @target: new dentry | |
1592 | * | |
1593 | * Update the dcache to reflect the move of a file name. Negative | |
1594 | * dcache entries should not be moved in this way. | |
1595 | */ | |
1596 | ||
1597 | void d_move(struct dentry * dentry, struct dentry * target) | |
1598 | { | |
1599 | spin_lock(&dcache_lock); | |
1600 | d_move_locked(dentry, target); | |
1da177e4 LT |
1601 | spin_unlock(&dcache_lock); |
1602 | } | |
1603 | ||
9eaef27b TM |
1604 | /* |
1605 | * Helper that returns 1 if p1 is a parent of p2, else 0 | |
1606 | */ | |
1607 | static int d_isparent(struct dentry *p1, struct dentry *p2) | |
1608 | { | |
1609 | struct dentry *p; | |
1610 | ||
1611 | for (p = p2; p->d_parent != p; p = p->d_parent) { | |
1612 | if (p->d_parent == p1) | |
1613 | return 1; | |
1614 | } | |
1615 | return 0; | |
1616 | } | |
1617 | ||
1618 | /* | |
1619 | * This helper attempts to cope with remotely renamed directories | |
1620 | * | |
1621 | * It assumes that the caller is already holding | |
1622 | * dentry->d_parent->d_inode->i_mutex and the dcache_lock | |
1623 | * | |
1624 | * Note: If ever the locking in lock_rename() changes, then please | |
1625 | * remember to update this too... | |
1626 | * | |
1627 | * On return, dcache_lock will have been unlocked. | |
1628 | */ | |
1629 | static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) | |
1630 | { | |
1631 | struct mutex *m1 = NULL, *m2 = NULL; | |
1632 | struct dentry *ret; | |
1633 | ||
1634 | /* If alias and dentry share a parent, then no extra locks required */ | |
1635 | if (alias->d_parent == dentry->d_parent) | |
1636 | goto out_unalias; | |
1637 | ||
1638 | /* Check for loops */ | |
1639 | ret = ERR_PTR(-ELOOP); | |
1640 | if (d_isparent(alias, dentry)) | |
1641 | goto out_err; | |
1642 | ||
1643 | /* See lock_rename() */ | |
1644 | ret = ERR_PTR(-EBUSY); | |
1645 | if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) | |
1646 | goto out_err; | |
1647 | m1 = &dentry->d_sb->s_vfs_rename_mutex; | |
1648 | if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) | |
1649 | goto out_err; | |
1650 | m2 = &alias->d_parent->d_inode->i_mutex; | |
1651 | out_unalias: | |
1652 | d_move_locked(alias, dentry); | |
1653 | ret = alias; | |
1654 | out_err: | |
1655 | spin_unlock(&dcache_lock); | |
1656 | if (m2) | |
1657 | mutex_unlock(m2); | |
1658 | if (m1) | |
1659 | mutex_unlock(m1); | |
1660 | return ret; | |
1661 | } | |
1662 | ||
770bfad8 DH |
1663 | /* |
1664 | * Prepare an anonymous dentry for life in the superblock's dentry tree as a | |
1665 | * named dentry in place of the dentry to be replaced. | |
1666 | */ | |
1667 | static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | |
1668 | { | |
1669 | struct dentry *dparent, *aparent; | |
1670 | ||
1671 | switch_names(dentry, anon); | |
1672 | do_switch(dentry->d_name.len, anon->d_name.len); | |
1673 | do_switch(dentry->d_name.hash, anon->d_name.hash); | |
1674 | ||
1675 | dparent = dentry->d_parent; | |
1676 | aparent = anon->d_parent; | |
1677 | ||
1678 | dentry->d_parent = (aparent == anon) ? dentry : aparent; | |
1679 | list_del(&dentry->d_u.d_child); | |
1680 | if (!IS_ROOT(dentry)) | |
1681 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | |
1682 | else | |
1683 | INIT_LIST_HEAD(&dentry->d_u.d_child); | |
1684 | ||
1685 | anon->d_parent = (dparent == dentry) ? anon : dparent; | |
1686 | list_del(&anon->d_u.d_child); | |
1687 | if (!IS_ROOT(anon)) | |
1688 | list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); | |
1689 | else | |
1690 | INIT_LIST_HEAD(&anon->d_u.d_child); | |
1691 | ||
1692 | anon->d_flags &= ~DCACHE_DISCONNECTED; | |
1693 | } | |
1694 | ||
1695 | /** | |
1696 | * d_materialise_unique - introduce an inode into the tree | |
1697 | * @dentry: candidate dentry | |
1698 | * @inode: inode to bind to the dentry, to which aliases may be attached | |
1699 | * | |
1700 | * Introduces an dentry into the tree, substituting an extant disconnected | |
1701 | * root directory alias in its place if there is one | |
1702 | */ | |
1703 | struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |
1704 | { | |
9eaef27b | 1705 | struct dentry *actual; |
770bfad8 DH |
1706 | |
1707 | BUG_ON(!d_unhashed(dentry)); | |
1708 | ||
1709 | spin_lock(&dcache_lock); | |
1710 | ||
1711 | if (!inode) { | |
1712 | actual = dentry; | |
1713 | dentry->d_inode = NULL; | |
1714 | goto found_lock; | |
1715 | } | |
1716 | ||
9eaef27b TM |
1717 | if (S_ISDIR(inode->i_mode)) { |
1718 | struct dentry *alias; | |
1719 | ||
1720 | /* Does an aliased dentry already exist? */ | |
1721 | alias = __d_find_alias(inode, 0); | |
1722 | if (alias) { | |
1723 | actual = alias; | |
1724 | /* Is this an anonymous mountpoint that we could splice | |
1725 | * into our tree? */ | |
1726 | if (IS_ROOT(alias)) { | |
1727 | spin_lock(&alias->d_lock); | |
1728 | __d_materialise_dentry(dentry, alias); | |
1729 | __d_drop(alias); | |
1730 | goto found; | |
1731 | } | |
1732 | /* Nope, but we must(!) avoid directory aliasing */ | |
1733 | actual = __d_unalias(dentry, alias); | |
1734 | if (IS_ERR(actual)) | |
1735 | dput(alias); | |
1736 | goto out_nolock; | |
1737 | } | |
770bfad8 DH |
1738 | } |
1739 | ||
1740 | /* Add a unique reference */ | |
1741 | actual = __d_instantiate_unique(dentry, inode); | |
1742 | if (!actual) | |
1743 | actual = dentry; | |
1744 | else if (unlikely(!d_unhashed(actual))) | |
1745 | goto shouldnt_be_hashed; | |
1746 | ||
1747 | found_lock: | |
1748 | spin_lock(&actual->d_lock); | |
1749 | found: | |
1750 | _d_rehash(actual); | |
1751 | spin_unlock(&actual->d_lock); | |
1752 | spin_unlock(&dcache_lock); | |
9eaef27b | 1753 | out_nolock: |
770bfad8 DH |
1754 | if (actual == dentry) { |
1755 | security_d_instantiate(dentry, inode); | |
1756 | return NULL; | |
1757 | } | |
1758 | ||
1759 | iput(inode); | |
1760 | return actual; | |
1761 | ||
770bfad8 DH |
1762 | shouldnt_be_hashed: |
1763 | spin_unlock(&dcache_lock); | |
1764 | BUG(); | |
1765 | goto shouldnt_be_hashed; | |
1766 | } | |
1767 | ||
1da177e4 LT |
1768 | /** |
1769 | * d_path - return the path of a dentry | |
1770 | * @dentry: dentry to report | |
1771 | * @vfsmnt: vfsmnt to which the dentry belongs | |
1772 | * @root: root dentry | |
1773 | * @rootmnt: vfsmnt to which the root dentry belongs | |
1774 | * @buffer: buffer to return value in | |
1775 | * @buflen: buffer length | |
1776 | * | |
552ce544 LT |
1777 | * Convert a dentry into an ASCII path name. If the entry has been deleted |
1778 | * the string " (deleted)" is appended. Note that this is ambiguous. | |
1da177e4 | 1779 | * |
552ce544 LT |
1780 | * Returns the buffer or an error code if the path was too long. |
1781 | * | |
1782 | * "buflen" should be positive. Caller holds the dcache_lock. | |
1da177e4 | 1783 | */ |
552ce544 LT |
1784 | static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, |
1785 | struct dentry *root, struct vfsmount *rootmnt, | |
1786 | char *buffer, int buflen) | |
1da177e4 | 1787 | { |
552ce544 LT |
1788 | char * end = buffer+buflen; |
1789 | char * retval; | |
1790 | int namelen; | |
1da177e4 | 1791 | |
552ce544 LT |
1792 | *--end = '\0'; |
1793 | buflen--; | |
1da177e4 | 1794 | if (!IS_ROOT(dentry) && d_unhashed(dentry)) { |
eb3dfb0c | 1795 | buflen -= 10; |
552ce544 LT |
1796 | end -= 10; |
1797 | if (buflen < 0) | |
1798 | goto Elong; | |
1799 | memcpy(end, " (deleted)", 10); | |
1da177e4 | 1800 | } |
552ce544 LT |
1801 | |
1802 | if (buflen < 1) | |
1803 | goto Elong; | |
1804 | /* Get '/' right */ | |
1805 | retval = end-1; | |
1806 | *retval = '/'; | |
1807 | ||
1808 | for (;;) { | |
1da177e4 LT |
1809 | struct dentry * parent; |
1810 | ||
552ce544 LT |
1811 | if (dentry == root && vfsmnt == rootmnt) |
1812 | break; | |
1da177e4 | 1813 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
552ce544 | 1814 | /* Global root? */ |
1da177e4 LT |
1815 | spin_lock(&vfsmount_lock); |
1816 | if (vfsmnt->mnt_parent == vfsmnt) { | |
1817 | spin_unlock(&vfsmount_lock); | |
1818 | goto global_root; | |
1819 | } | |
1820 | dentry = vfsmnt->mnt_mountpoint; | |
1821 | vfsmnt = vfsmnt->mnt_parent; | |
1822 | spin_unlock(&vfsmount_lock); | |
1823 | continue; | |
1824 | } | |
1825 | parent = dentry->d_parent; | |
1826 | prefetch(parent); | |
1827 | namelen = dentry->d_name.len; | |
eb3dfb0c | 1828 | buflen -= namelen + 1; |
552ce544 LT |
1829 | if (buflen < 0) |
1830 | goto Elong; | |
1831 | end -= namelen; | |
1832 | memcpy(end, dentry->d_name.name, namelen); | |
1833 | *--end = '/'; | |
1834 | retval = end; | |
1da177e4 LT |
1835 | dentry = parent; |
1836 | } | |
1837 | ||
552ce544 | 1838 | return retval; |
1da177e4 LT |
1839 | |
1840 | global_root: | |
1841 | namelen = dentry->d_name.len; | |
552ce544 LT |
1842 | buflen -= namelen; |
1843 | if (buflen < 0) | |
1da177e4 | 1844 | goto Elong; |
552ce544 LT |
1845 | retval -= namelen-1; /* hit the slash */ |
1846 | memcpy(retval, dentry->d_name.name, namelen); | |
1847 | return retval; | |
1da177e4 | 1848 | Elong: |
552ce544 | 1849 | return ERR_PTR(-ENAMETOOLONG); |
1da177e4 LT |
1850 | } |
1851 | ||
1852 | /* write full pathname into buffer and return start of pathname */ | |
552ce544 LT |
1853 | char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, |
1854 | char *buf, int buflen) | |
1da177e4 LT |
1855 | { |
1856 | char *res; | |
1857 | struct vfsmount *rootmnt; | |
1858 | struct dentry *root; | |
1859 | ||
c23fbb6b ED |
1860 | /* |
1861 | * We have various synthetic filesystems that never get mounted. On | |
1862 | * these filesystems dentries are never used for lookup purposes, and | |
1863 | * thus don't need to be hashed. They also don't need a name until a | |
1864 | * user wants to identify the object in /proc/pid/fd/. The little hack | |
1865 | * below allows us to generate a name for these objects on demand: | |
1866 | */ | |
1867 | if (dentry->d_op && dentry->d_op->d_dname) | |
1868 | return dentry->d_op->d_dname(dentry, buf, buflen); | |
1869 | ||
1da177e4 LT |
1870 | read_lock(¤t->fs->lock); |
1871 | rootmnt = mntget(current->fs->rootmnt); | |
1872 | root = dget(current->fs->root); | |
1873 | read_unlock(¤t->fs->lock); | |
552ce544 LT |
1874 | spin_lock(&dcache_lock); |
1875 | res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); | |
1876 | spin_unlock(&dcache_lock); | |
1da177e4 LT |
1877 | dput(root); |
1878 | mntput(rootmnt); | |
1879 | return res; | |
1880 | } | |
1881 | ||
c23fbb6b ED |
1882 | /* |
1883 | * Helper function for dentry_operations.d_dname() members | |
1884 | */ | |
1885 | char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, | |
1886 | const char *fmt, ...) | |
1887 | { | |
1888 | va_list args; | |
1889 | char temp[64]; | |
1890 | int sz; | |
1891 | ||
1892 | va_start(args, fmt); | |
1893 | sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; | |
1894 | va_end(args); | |
1895 | ||
1896 | if (sz > sizeof(temp) || sz > buflen) | |
1897 | return ERR_PTR(-ENAMETOOLONG); | |
1898 | ||
1899 | buffer += buflen - sz; | |
1900 | return memcpy(buffer, temp, sz); | |
1901 | } | |
1902 | ||
1da177e4 LT |
1903 | /* |
1904 | * NOTE! The user-level library version returns a | |
1905 | * character pointer. The kernel system call just | |
1906 | * returns the length of the buffer filled (which | |
1907 | * includes the ending '\0' character), or a negative | |
1908 | * error value. So libc would do something like | |
1909 | * | |
1910 | * char *getcwd(char * buf, size_t size) | |
1911 | * { | |
1912 | * int retval; | |
1913 | * | |
1914 | * retval = sys_getcwd(buf, size); | |
1915 | * if (retval >= 0) | |
1916 | * return buf; | |
1917 | * errno = -retval; | |
1918 | * return NULL; | |
1919 | * } | |
1920 | */ | |
1921 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size) | |
1922 | { | |
552ce544 | 1923 | int error; |
1da177e4 LT |
1924 | struct vfsmount *pwdmnt, *rootmnt; |
1925 | struct dentry *pwd, *root; | |
552ce544 | 1926 | char *page = (char *) __get_free_page(GFP_USER); |
1da177e4 LT |
1927 | |
1928 | if (!page) | |
1929 | return -ENOMEM; | |
1930 | ||
1931 | read_lock(¤t->fs->lock); | |
1932 | pwdmnt = mntget(current->fs->pwdmnt); | |
1933 | pwd = dget(current->fs->pwd); | |
1934 | rootmnt = mntget(current->fs->rootmnt); | |
1935 | root = dget(current->fs->root); | |
1936 | read_unlock(¤t->fs->lock); | |
1937 | ||
552ce544 LT |
1938 | error = -ENOENT; |
1939 | /* Has the current directory has been unlinked? */ | |
1940 | spin_lock(&dcache_lock); | |
1941 | if (pwd->d_parent == pwd || !d_unhashed(pwd)) { | |
1942 | unsigned long len; | |
1943 | char * cwd; | |
1da177e4 | 1944 | |
552ce544 LT |
1945 | cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE); |
1946 | spin_unlock(&dcache_lock); | |
1947 | ||
1948 | error = PTR_ERR(cwd); | |
1949 | if (IS_ERR(cwd)) | |
1950 | goto out; | |
1951 | ||
1952 | error = -ERANGE; | |
1953 | len = PAGE_SIZE + page - cwd; | |
1954 | if (len <= size) { | |
1955 | error = len; | |
1956 | if (copy_to_user(buf, cwd, len)) | |
1957 | error = -EFAULT; | |
1958 | } | |
1959 | } else | |
1960 | spin_unlock(&dcache_lock); | |
1da177e4 LT |
1961 | |
1962 | out: | |
1963 | dput(pwd); | |
1964 | mntput(pwdmnt); | |
1965 | dput(root); | |
1966 | mntput(rootmnt); | |
1967 | free_page((unsigned long) page); | |
1968 | return error; | |
1969 | } | |
1970 | ||
1971 | /* | |
1972 | * Test whether new_dentry is a subdirectory of old_dentry. | |
1973 | * | |
1974 | * Trivially implemented using the dcache structure | |
1975 | */ | |
1976 | ||
1977 | /** | |
1978 | * is_subdir - is new dentry a subdirectory of old_dentry | |
1979 | * @new_dentry: new dentry | |
1980 | * @old_dentry: old dentry | |
1981 | * | |
1982 | * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). | |
1983 | * Returns 0 otherwise. | |
1984 | * Caller must ensure that "new_dentry" is pinned before calling is_subdir() | |
1985 | */ | |
1986 | ||
1987 | int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry) | |
1988 | { | |
1989 | int result; | |
1990 | struct dentry * saved = new_dentry; | |
1991 | unsigned long seq; | |
1992 | ||
1993 | /* need rcu_readlock to protect against the d_parent trashing due to | |
1994 | * d_move | |
1995 | */ | |
1996 | rcu_read_lock(); | |
1997 | do { | |
1998 | /* for restarting inner loop in case of seq retry */ | |
1999 | new_dentry = saved; | |
2000 | result = 0; | |
2001 | seq = read_seqbegin(&rename_lock); | |
2002 | for (;;) { | |
2003 | if (new_dentry != old_dentry) { | |
2004 | struct dentry * parent = new_dentry->d_parent; | |
2005 | if (parent == new_dentry) | |
2006 | break; | |
2007 | new_dentry = parent; | |
2008 | continue; | |
2009 | } | |
2010 | result = 1; | |
2011 | break; | |
2012 | } | |
2013 | } while (read_seqretry(&rename_lock, seq)); | |
2014 | rcu_read_unlock(); | |
2015 | ||
2016 | return result; | |
2017 | } | |
2018 | ||
2019 | void d_genocide(struct dentry *root) | |
2020 | { | |
2021 | struct dentry *this_parent = root; | |
2022 | struct list_head *next; | |
2023 | ||
2024 | spin_lock(&dcache_lock); | |
2025 | repeat: | |
2026 | next = this_parent->d_subdirs.next; | |
2027 | resume: | |
2028 | while (next != &this_parent->d_subdirs) { | |
2029 | struct list_head *tmp = next; | |
5160ee6f | 2030 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
1da177e4 LT |
2031 | next = tmp->next; |
2032 | if (d_unhashed(dentry)||!dentry->d_inode) | |
2033 | continue; | |
2034 | if (!list_empty(&dentry->d_subdirs)) { | |
2035 | this_parent = dentry; | |
2036 | goto repeat; | |
2037 | } | |
2038 | atomic_dec(&dentry->d_count); | |
2039 | } | |
2040 | if (this_parent != root) { | |
5160ee6f | 2041 | next = this_parent->d_u.d_child.next; |
1da177e4 LT |
2042 | atomic_dec(&this_parent->d_count); |
2043 | this_parent = this_parent->d_parent; | |
2044 | goto resume; | |
2045 | } | |
2046 | spin_unlock(&dcache_lock); | |
2047 | } | |
2048 | ||
2049 | /** | |
2050 | * find_inode_number - check for dentry with name | |
2051 | * @dir: directory to check | |
2052 | * @name: Name to find. | |
2053 | * | |
2054 | * Check whether a dentry already exists for the given name, | |
2055 | * and return the inode number if it has an inode. Otherwise | |
2056 | * 0 is returned. | |
2057 | * | |
2058 | * This routine is used to post-process directory listings for | |
2059 | * filesystems using synthetic inode numbers, and is necessary | |
2060 | * to keep getcwd() working. | |
2061 | */ | |
2062 | ||
2063 | ino_t find_inode_number(struct dentry *dir, struct qstr *name) | |
2064 | { | |
2065 | struct dentry * dentry; | |
2066 | ino_t ino = 0; | |
2067 | ||
3e7e241f EB |
2068 | dentry = d_hash_and_lookup(dir, name); |
2069 | if (dentry) { | |
1da177e4 LT |
2070 | if (dentry->d_inode) |
2071 | ino = dentry->d_inode->i_ino; | |
2072 | dput(dentry); | |
2073 | } | |
1da177e4 LT |
2074 | return ino; |
2075 | } | |
2076 | ||
2077 | static __initdata unsigned long dhash_entries; | |
2078 | static int __init set_dhash_entries(char *str) | |
2079 | { | |
2080 | if (!str) | |
2081 | return 0; | |
2082 | dhash_entries = simple_strtoul(str, &str, 0); | |
2083 | return 1; | |
2084 | } | |
2085 | __setup("dhash_entries=", set_dhash_entries); | |
2086 | ||
2087 | static void __init dcache_init_early(void) | |
2088 | { | |
2089 | int loop; | |
2090 | ||
2091 | /* If hashes are distributed across NUMA nodes, defer | |
2092 | * hash allocation until vmalloc space is available. | |
2093 | */ | |
2094 | if (hashdist) | |
2095 | return; | |
2096 | ||
2097 | dentry_hashtable = | |
2098 | alloc_large_system_hash("Dentry cache", | |
2099 | sizeof(struct hlist_head), | |
2100 | dhash_entries, | |
2101 | 13, | |
2102 | HASH_EARLY, | |
2103 | &d_hash_shift, | |
2104 | &d_hash_mask, | |
2105 | 0); | |
2106 | ||
2107 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | |
2108 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); | |
2109 | } | |
2110 | ||
2111 | static void __init dcache_init(unsigned long mempages) | |
2112 | { | |
2113 | int loop; | |
2114 | ||
2115 | /* | |
2116 | * A constructor could be added for stable state like the lists, | |
2117 | * but it is probably not worth it because of the cache nature | |
2118 | * of the dcache. | |
2119 | */ | |
0a31bd5f CL |
2120 | dentry_cache = KMEM_CACHE(dentry, |
2121 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); | |
1da177e4 | 2122 | |
8e1f936b | 2123 | register_shrinker(&dcache_shrinker); |
1da177e4 LT |
2124 | |
2125 | /* Hash may have been set up in dcache_init_early */ | |
2126 | if (!hashdist) | |
2127 | return; | |
2128 | ||
2129 | dentry_hashtable = | |
2130 | alloc_large_system_hash("Dentry cache", | |
2131 | sizeof(struct hlist_head), | |
2132 | dhash_entries, | |
2133 | 13, | |
2134 | 0, | |
2135 | &d_hash_shift, | |
2136 | &d_hash_mask, | |
2137 | 0); | |
2138 | ||
2139 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | |
2140 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); | |
2141 | } | |
2142 | ||
2143 | /* SLAB cache for __getname() consumers */ | |
e18b890b | 2144 | struct kmem_cache *names_cachep __read_mostly; |
1da177e4 LT |
2145 | |
2146 | /* SLAB cache for file structures */ | |
e18b890b | 2147 | struct kmem_cache *filp_cachep __read_mostly; |
1da177e4 LT |
2148 | |
2149 | EXPORT_SYMBOL(d_genocide); | |
2150 | ||
1da177e4 LT |
2151 | void __init vfs_caches_init_early(void) |
2152 | { | |
2153 | dcache_init_early(); | |
2154 | inode_init_early(); | |
2155 | } | |
2156 | ||
2157 | void __init vfs_caches_init(unsigned long mempages) | |
2158 | { | |
2159 | unsigned long reserve; | |
2160 | ||
2161 | /* Base hash sizes on available memory, with a reserve equal to | |
2162 | 150% of current kernel size */ | |
2163 | ||
2164 | reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); | |
2165 | mempages -= reserve; | |
2166 | ||
2167 | names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, | |
20c2df83 | 2168 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
2169 | |
2170 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, | |
20c2df83 | 2171 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
2172 | |
2173 | dcache_init(mempages); | |
2174 | inode_init(mempages); | |
2175 | files_init(mempages); | |
2176 | mnt_init(mempages); | |
2177 | bdev_cache_init(); | |
2178 | chrdev_init(); | |
2179 | } | |
2180 | ||
2181 | EXPORT_SYMBOL(d_alloc); | |
2182 | EXPORT_SYMBOL(d_alloc_anon); | |
2183 | EXPORT_SYMBOL(d_alloc_root); | |
2184 | EXPORT_SYMBOL(d_delete); | |
2185 | EXPORT_SYMBOL(d_find_alias); | |
2186 | EXPORT_SYMBOL(d_instantiate); | |
2187 | EXPORT_SYMBOL(d_invalidate); | |
2188 | EXPORT_SYMBOL(d_lookup); | |
2189 | EXPORT_SYMBOL(d_move); | |
770bfad8 | 2190 | EXPORT_SYMBOL_GPL(d_materialise_unique); |
1da177e4 LT |
2191 | EXPORT_SYMBOL(d_path); |
2192 | EXPORT_SYMBOL(d_prune_aliases); | |
2193 | EXPORT_SYMBOL(d_rehash); | |
2194 | EXPORT_SYMBOL(d_splice_alias); | |
2195 | EXPORT_SYMBOL(d_validate); | |
2196 | EXPORT_SYMBOL(dget_locked); | |
2197 | EXPORT_SYMBOL(dput); | |
2198 | EXPORT_SYMBOL(find_inode_number); | |
2199 | EXPORT_SYMBOL(have_submounts); | |
2200 | EXPORT_SYMBOL(names_cachep); | |
2201 | EXPORT_SYMBOL(shrink_dcache_parent); | |
2202 | EXPORT_SYMBOL(shrink_dcache_sb); |