fs: dcache reduce prune_one_dentry locking
[deliverable/linux.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include "internal.h"
37
38 /*
39 * Usage:
40 * dcache_inode_lock protects:
41 * - i_dentry, d_alias, d_inode
42 * dcache_hash_lock protects:
43 * - the dcache hash table, s_anon lists
44 * dcache_lru_lock protects:
45 * - the dcache lru lists and counters
46 * d_lock protects:
47 * - d_flags
48 * - d_name
49 * - d_lru
50 * - d_count
51 * - d_unhashed()
52 * - d_parent and d_subdirs
53 * - childrens' d_child and d_parent
54 * - d_alias, d_inode
55 *
56 * Ordering:
57 * dcache_inode_lock
58 * dentry->d_lock
59 * dcache_lru_lock
60 * dcache_hash_lock
61 *
62 * If there is an ancestor relationship:
63 * dentry->d_parent->...->d_parent->d_lock
64 * ...
65 * dentry->d_parent->d_lock
66 * dentry->d_lock
67 *
68 * If no ancestor relationship:
69 * if (dentry1 < dentry2)
70 * dentry1->d_lock
71 * dentry2->d_lock
72 */
73 int sysctl_vfs_cache_pressure __read_mostly = 100;
74 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
75
76 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock);
77 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
78 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
80
81 EXPORT_SYMBOL(rename_lock);
82 EXPORT_SYMBOL(dcache_inode_lock);
83
84 static struct kmem_cache *dentry_cache __read_mostly;
85
86 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
87
88 /*
89 * This is the single most critical data structure when it comes
90 * to the dcache: the hashtable for lookups. Somebody should try
91 * to make this good - I've just made it work.
92 *
93 * This hash-function tries to avoid losing too many bits of hash
94 * information, yet avoid using a prime hash-size or similar.
95 */
96 #define D_HASHBITS d_hash_shift
97 #define D_HASHMASK d_hash_mask
98
99 static unsigned int d_hash_mask __read_mostly;
100 static unsigned int d_hash_shift __read_mostly;
101 static struct hlist_head *dentry_hashtable __read_mostly;
102
103 /* Statistics gathering. */
104 struct dentry_stat_t dentry_stat = {
105 .age_limit = 45,
106 };
107
108 static DEFINE_PER_CPU(unsigned int, nr_dentry);
109
110 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
111 static int get_nr_dentry(void)
112 {
113 int i;
114 int sum = 0;
115 for_each_possible_cpu(i)
116 sum += per_cpu(nr_dentry, i);
117 return sum < 0 ? 0 : sum;
118 }
119
120 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
121 size_t *lenp, loff_t *ppos)
122 {
123 dentry_stat.nr_dentry = get_nr_dentry();
124 return proc_dointvec(table, write, buffer, lenp, ppos);
125 }
126 #endif
127
128 static void __d_free(struct rcu_head *head)
129 {
130 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
131
132 WARN_ON(!list_empty(&dentry->d_alias));
133 if (dname_external(dentry))
134 kfree(dentry->d_name.name);
135 kmem_cache_free(dentry_cache, dentry);
136 }
137
138 /*
139 * no locks, please.
140 */
141 static void d_free(struct dentry *dentry)
142 {
143 BUG_ON(dentry->d_count);
144 this_cpu_dec(nr_dentry);
145 if (dentry->d_op && dentry->d_op->d_release)
146 dentry->d_op->d_release(dentry);
147
148 /* if dentry was never inserted into hash, immediate free is OK */
149 if (hlist_unhashed(&dentry->d_hash))
150 __d_free(&dentry->d_u.d_rcu);
151 else
152 call_rcu(&dentry->d_u.d_rcu, __d_free);
153 }
154
155 /*
156 * Release the dentry's inode, using the filesystem
157 * d_iput() operation if defined.
158 */
159 static void dentry_iput(struct dentry * dentry)
160 __releases(dentry->d_lock)
161 __releases(dcache_inode_lock)
162 {
163 struct inode *inode = dentry->d_inode;
164 if (inode) {
165 dentry->d_inode = NULL;
166 list_del_init(&dentry->d_alias);
167 spin_unlock(&dentry->d_lock);
168 spin_unlock(&dcache_inode_lock);
169 if (!inode->i_nlink)
170 fsnotify_inoderemove(inode);
171 if (dentry->d_op && dentry->d_op->d_iput)
172 dentry->d_op->d_iput(dentry, inode);
173 else
174 iput(inode);
175 } else {
176 spin_unlock(&dentry->d_lock);
177 spin_unlock(&dcache_inode_lock);
178 }
179 }
180
181 /*
182 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
183 */
184 static void dentry_lru_add(struct dentry *dentry)
185 {
186 if (list_empty(&dentry->d_lru)) {
187 spin_lock(&dcache_lru_lock);
188 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
189 dentry->d_sb->s_nr_dentry_unused++;
190 dentry_stat.nr_unused++;
191 spin_unlock(&dcache_lru_lock);
192 }
193 }
194
195 static void __dentry_lru_del(struct dentry *dentry)
196 {
197 list_del_init(&dentry->d_lru);
198 dentry->d_sb->s_nr_dentry_unused--;
199 dentry_stat.nr_unused--;
200 }
201
202 static void dentry_lru_del(struct dentry *dentry)
203 {
204 if (!list_empty(&dentry->d_lru)) {
205 spin_lock(&dcache_lru_lock);
206 __dentry_lru_del(dentry);
207 spin_unlock(&dcache_lru_lock);
208 }
209 }
210
211 static void dentry_lru_move_tail(struct dentry *dentry)
212 {
213 spin_lock(&dcache_lru_lock);
214 if (list_empty(&dentry->d_lru)) {
215 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
216 dentry->d_sb->s_nr_dentry_unused++;
217 dentry_stat.nr_unused++;
218 } else {
219 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
220 }
221 spin_unlock(&dcache_lru_lock);
222 }
223
224 /**
225 * d_kill - kill dentry and return parent
226 * @dentry: dentry to kill
227 *
228 * The dentry must already be unhashed and removed from the LRU.
229 *
230 * If this is the root of the dentry tree, return NULL.
231 *
232 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
233 * d_kill.
234 */
235 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
236 __releases(dentry->d_lock)
237 __releases(parent->d_lock)
238 __releases(dcache_inode_lock)
239 {
240 dentry->d_parent = NULL;
241 list_del(&dentry->d_u.d_child);
242 if (parent)
243 spin_unlock(&parent->d_lock);
244 dentry_iput(dentry);
245 /*
246 * dentry_iput drops the locks, at which point nobody (except
247 * transient RCU lookups) can reach this dentry.
248 */
249 d_free(dentry);
250 return parent;
251 }
252
253 /**
254 * d_drop - drop a dentry
255 * @dentry: dentry to drop
256 *
257 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
258 * be found through a VFS lookup any more. Note that this is different from
259 * deleting the dentry - d_delete will try to mark the dentry negative if
260 * possible, giving a successful _negative_ lookup, while d_drop will
261 * just make the cache lookup fail.
262 *
263 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
264 * reason (NFS timeouts or autofs deletes).
265 *
266 * __d_drop requires dentry->d_lock.
267 */
268 void __d_drop(struct dentry *dentry)
269 {
270 if (!(dentry->d_flags & DCACHE_UNHASHED)) {
271 dentry->d_flags |= DCACHE_UNHASHED;
272 spin_lock(&dcache_hash_lock);
273 hlist_del_rcu(&dentry->d_hash);
274 spin_unlock(&dcache_hash_lock);
275 }
276 }
277 EXPORT_SYMBOL(__d_drop);
278
279 void d_drop(struct dentry *dentry)
280 {
281 spin_lock(&dentry->d_lock);
282 __d_drop(dentry);
283 spin_unlock(&dentry->d_lock);
284 }
285 EXPORT_SYMBOL(d_drop);
286
287 /*
288 * This is dput
289 *
290 * This is complicated by the fact that we do not want to put
291 * dentries that are no longer on any hash chain on the unused
292 * list: we'd much rather just get rid of them immediately.
293 *
294 * However, that implies that we have to traverse the dentry
295 * tree upwards to the parents which might _also_ now be
296 * scheduled for deletion (it may have been only waiting for
297 * its last child to go away).
298 *
299 * This tail recursion is done by hand as we don't want to depend
300 * on the compiler to always get this right (gcc generally doesn't).
301 * Real recursion would eat up our stack space.
302 */
303
304 /*
305 * dput - release a dentry
306 * @dentry: dentry to release
307 *
308 * Release a dentry. This will drop the usage count and if appropriate
309 * call the dentry unlink method as well as removing it from the queues and
310 * releasing its resources. If the parent dentries were scheduled for release
311 * they too may now get deleted.
312 *
313 * no dcache lock, please.
314 */
315
316 void dput(struct dentry *dentry)
317 {
318 struct dentry *parent;
319 if (!dentry)
320 return;
321
322 repeat:
323 if (dentry->d_count == 1)
324 might_sleep();
325 spin_lock(&dentry->d_lock);
326 BUG_ON(!dentry->d_count);
327 if (dentry->d_count > 1) {
328 dentry->d_count--;
329 spin_unlock(&dentry->d_lock);
330 return;
331 }
332
333 if (dentry->d_op && dentry->d_op->d_delete) {
334 if (dentry->d_op->d_delete(dentry))
335 goto kill_it;
336 }
337
338 /* Unreachable? Get rid of it */
339 if (d_unhashed(dentry))
340 goto kill_it;
341
342 /* Otherwise leave it cached and ensure it's on the LRU */
343 dentry->d_flags |= DCACHE_REFERENCED;
344 dentry_lru_add(dentry);
345
346 dentry->d_count--;
347 spin_unlock(&dentry->d_lock);
348 return;
349
350 kill_it:
351 if (!spin_trylock(&dcache_inode_lock)) {
352 relock:
353 spin_unlock(&dentry->d_lock);
354 cpu_relax();
355 goto repeat;
356 }
357 if (IS_ROOT(dentry))
358 parent = NULL;
359 else
360 parent = dentry->d_parent;
361 if (parent && !spin_trylock(&parent->d_lock)) {
362 spin_unlock(&dcache_inode_lock);
363 goto relock;
364 }
365 dentry->d_count--;
366 /* if dentry was on the d_lru list delete it from there */
367 dentry_lru_del(dentry);
368 /* if it was on the hash (d_delete case), then remove it */
369 __d_drop(dentry);
370 dentry = d_kill(dentry, parent);
371 if (dentry)
372 goto repeat;
373 }
374 EXPORT_SYMBOL(dput);
375
376 /**
377 * d_invalidate - invalidate a dentry
378 * @dentry: dentry to invalidate
379 *
380 * Try to invalidate the dentry if it turns out to be
381 * possible. If there are other dentries that can be
382 * reached through this one we can't delete it and we
383 * return -EBUSY. On success we return 0.
384 *
385 * no dcache lock.
386 */
387
388 int d_invalidate(struct dentry * dentry)
389 {
390 /*
391 * If it's already been dropped, return OK.
392 */
393 spin_lock(&dentry->d_lock);
394 if (d_unhashed(dentry)) {
395 spin_unlock(&dentry->d_lock);
396 return 0;
397 }
398 /*
399 * Check whether to do a partial shrink_dcache
400 * to get rid of unused child entries.
401 */
402 if (!list_empty(&dentry->d_subdirs)) {
403 spin_unlock(&dentry->d_lock);
404 shrink_dcache_parent(dentry);
405 spin_lock(&dentry->d_lock);
406 }
407
408 /*
409 * Somebody else still using it?
410 *
411 * If it's a directory, we can't drop it
412 * for fear of somebody re-populating it
413 * with children (even though dropping it
414 * would make it unreachable from the root,
415 * we might still populate it if it was a
416 * working directory or similar).
417 */
418 if (dentry->d_count > 1) {
419 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
420 spin_unlock(&dentry->d_lock);
421 return -EBUSY;
422 }
423 }
424
425 __d_drop(dentry);
426 spin_unlock(&dentry->d_lock);
427 return 0;
428 }
429 EXPORT_SYMBOL(d_invalidate);
430
431 /* This must be called with d_lock held */
432 static inline void __dget_dlock(struct dentry *dentry)
433 {
434 dentry->d_count++;
435 }
436
437 static inline void __dget(struct dentry *dentry)
438 {
439 spin_lock(&dentry->d_lock);
440 __dget_dlock(dentry);
441 spin_unlock(&dentry->d_lock);
442 }
443
444 struct dentry *dget_parent(struct dentry *dentry)
445 {
446 struct dentry *ret;
447
448 repeat:
449 /*
450 * Don't need rcu_dereference because we re-check it was correct under
451 * the lock.
452 */
453 rcu_read_lock();
454 ret = dentry->d_parent;
455 if (!ret) {
456 rcu_read_unlock();
457 goto out;
458 }
459 spin_lock(&ret->d_lock);
460 if (unlikely(ret != dentry->d_parent)) {
461 spin_unlock(&ret->d_lock);
462 rcu_read_unlock();
463 goto repeat;
464 }
465 rcu_read_unlock();
466 BUG_ON(!ret->d_count);
467 ret->d_count++;
468 spin_unlock(&ret->d_lock);
469 out:
470 return ret;
471 }
472 EXPORT_SYMBOL(dget_parent);
473
474 /**
475 * d_find_alias - grab a hashed alias of inode
476 * @inode: inode in question
477 * @want_discon: flag, used by d_splice_alias, to request
478 * that only a DISCONNECTED alias be returned.
479 *
480 * If inode has a hashed alias, or is a directory and has any alias,
481 * acquire the reference to alias and return it. Otherwise return NULL.
482 * Notice that if inode is a directory there can be only one alias and
483 * it can be unhashed only if it has no children, or if it is the root
484 * of a filesystem.
485 *
486 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
487 * any other hashed alias over that one unless @want_discon is set,
488 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
489 */
490 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
491 {
492 struct dentry *alias, *discon_alias;
493
494 again:
495 discon_alias = NULL;
496 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
497 spin_lock(&alias->d_lock);
498 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
499 if (IS_ROOT(alias) &&
500 (alias->d_flags & DCACHE_DISCONNECTED)) {
501 discon_alias = alias;
502 } else if (!want_discon) {
503 __dget_dlock(alias);
504 spin_unlock(&alias->d_lock);
505 return alias;
506 }
507 }
508 spin_unlock(&alias->d_lock);
509 }
510 if (discon_alias) {
511 alias = discon_alias;
512 spin_lock(&alias->d_lock);
513 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
514 if (IS_ROOT(alias) &&
515 (alias->d_flags & DCACHE_DISCONNECTED)) {
516 __dget_dlock(alias);
517 spin_unlock(&alias->d_lock);
518 return alias;
519 }
520 }
521 spin_unlock(&alias->d_lock);
522 goto again;
523 }
524 return NULL;
525 }
526
527 struct dentry *d_find_alias(struct inode *inode)
528 {
529 struct dentry *de = NULL;
530
531 if (!list_empty(&inode->i_dentry)) {
532 spin_lock(&dcache_inode_lock);
533 de = __d_find_alias(inode, 0);
534 spin_unlock(&dcache_inode_lock);
535 }
536 return de;
537 }
538 EXPORT_SYMBOL(d_find_alias);
539
540 /*
541 * Try to kill dentries associated with this inode.
542 * WARNING: you must own a reference to inode.
543 */
544 void d_prune_aliases(struct inode *inode)
545 {
546 struct dentry *dentry;
547 restart:
548 spin_lock(&dcache_inode_lock);
549 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
550 spin_lock(&dentry->d_lock);
551 if (!dentry->d_count) {
552 __dget_dlock(dentry);
553 __d_drop(dentry);
554 spin_unlock(&dentry->d_lock);
555 spin_unlock(&dcache_inode_lock);
556 dput(dentry);
557 goto restart;
558 }
559 spin_unlock(&dentry->d_lock);
560 }
561 spin_unlock(&dcache_inode_lock);
562 }
563 EXPORT_SYMBOL(d_prune_aliases);
564
565 /*
566 * Throw away a dentry - free the inode, dput the parent. This requires that
567 * the LRU list has already been removed.
568 *
569 * Try to prune ancestors as well. This is necessary to prevent
570 * quadratic behavior of shrink_dcache_parent(), but is also expected
571 * to be beneficial in reducing dentry cache fragmentation.
572 */
573 static void prune_one_dentry(struct dentry *dentry, struct dentry *parent)
574 __releases(dentry->d_lock)
575 __releases(parent->d_lock)
576 __releases(dcache_inode_lock)
577 {
578 __d_drop(dentry);
579 dentry = d_kill(dentry, parent);
580
581 /*
582 * Prune ancestors.
583 */
584 while (dentry) {
585 relock:
586 spin_lock(&dentry->d_lock);
587 if (dentry->d_count > 1) {
588 dentry->d_count--;
589 spin_unlock(&dentry->d_lock);
590 return;
591 }
592 if (!spin_trylock(&dcache_inode_lock)) {
593 relock2:
594 spin_unlock(&dentry->d_lock);
595 cpu_relax();
596 goto relock;
597 }
598
599 if (IS_ROOT(dentry))
600 parent = NULL;
601 else
602 parent = dentry->d_parent;
603 if (parent && !spin_trylock(&parent->d_lock)) {
604 spin_unlock(&dcache_inode_lock);
605 goto relock2;
606 }
607 dentry->d_count--;
608 dentry_lru_del(dentry);
609 __d_drop(dentry);
610 dentry = d_kill(dentry, parent);
611 }
612 }
613
614 static void shrink_dentry_list(struct list_head *list)
615 {
616 struct dentry *dentry;
617
618 while (!list_empty(list)) {
619 struct dentry *parent;
620
621 dentry = list_entry(list->prev, struct dentry, d_lru);
622
623 if (!spin_trylock(&dentry->d_lock)) {
624 relock:
625 spin_unlock(&dcache_lru_lock);
626 cpu_relax();
627 spin_lock(&dcache_lru_lock);
628 continue;
629 }
630
631 /*
632 * We found an inuse dentry which was not removed from
633 * the LRU because of laziness during lookup. Do not free
634 * it - just keep it off the LRU list.
635 */
636 if (dentry->d_count) {
637 __dentry_lru_del(dentry);
638 spin_unlock(&dentry->d_lock);
639 continue;
640 }
641 if (IS_ROOT(dentry))
642 parent = NULL;
643 else
644 parent = dentry->d_parent;
645 if (parent && !spin_trylock(&parent->d_lock)) {
646 spin_unlock(&dentry->d_lock);
647 goto relock;
648 }
649 __dentry_lru_del(dentry);
650 spin_unlock(&dcache_lru_lock);
651
652 prune_one_dentry(dentry, parent);
653 /* dcache_inode_lock and dentry->d_lock dropped */
654 spin_lock(&dcache_inode_lock);
655 spin_lock(&dcache_lru_lock);
656 }
657 }
658
659 /**
660 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
661 * @sb: superblock to shrink dentry LRU.
662 * @count: number of entries to prune
663 * @flags: flags to control the dentry processing
664 *
665 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
666 */
667 static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
668 {
669 /* called from prune_dcache() and shrink_dcache_parent() */
670 struct dentry *dentry;
671 LIST_HEAD(referenced);
672 LIST_HEAD(tmp);
673 int cnt = *count;
674
675 spin_lock(&dcache_inode_lock);
676 relock:
677 spin_lock(&dcache_lru_lock);
678 while (!list_empty(&sb->s_dentry_lru)) {
679 dentry = list_entry(sb->s_dentry_lru.prev,
680 struct dentry, d_lru);
681 BUG_ON(dentry->d_sb != sb);
682
683 if (!spin_trylock(&dentry->d_lock)) {
684 spin_unlock(&dcache_lru_lock);
685 cpu_relax();
686 goto relock;
687 }
688
689 /*
690 * If we are honouring the DCACHE_REFERENCED flag and the
691 * dentry has this flag set, don't free it. Clear the flag
692 * and put it back on the LRU.
693 */
694 if (flags & DCACHE_REFERENCED &&
695 dentry->d_flags & DCACHE_REFERENCED) {
696 dentry->d_flags &= ~DCACHE_REFERENCED;
697 list_move(&dentry->d_lru, &referenced);
698 spin_unlock(&dentry->d_lock);
699 } else {
700 list_move_tail(&dentry->d_lru, &tmp);
701 spin_unlock(&dentry->d_lock);
702 if (!--cnt)
703 break;
704 }
705 /* XXX: re-add cond_resched_lock when dcache_lock goes away */
706 }
707
708 *count = cnt;
709 shrink_dentry_list(&tmp);
710
711 if (!list_empty(&referenced))
712 list_splice(&referenced, &sb->s_dentry_lru);
713 spin_unlock(&dcache_lru_lock);
714 spin_unlock(&dcache_inode_lock);
715 }
716
717 /**
718 * prune_dcache - shrink the dcache
719 * @count: number of entries to try to free
720 *
721 * Shrink the dcache. This is done when we need more memory, or simply when we
722 * need to unmount something (at which point we need to unuse all dentries).
723 *
724 * This function may fail to free any resources if all the dentries are in use.
725 */
726 static void prune_dcache(int count)
727 {
728 struct super_block *sb, *p = NULL;
729 int w_count;
730 int unused = dentry_stat.nr_unused;
731 int prune_ratio;
732 int pruned;
733
734 if (unused == 0 || count == 0)
735 return;
736 if (count >= unused)
737 prune_ratio = 1;
738 else
739 prune_ratio = unused / count;
740 spin_lock(&sb_lock);
741 list_for_each_entry(sb, &super_blocks, s_list) {
742 if (list_empty(&sb->s_instances))
743 continue;
744 if (sb->s_nr_dentry_unused == 0)
745 continue;
746 sb->s_count++;
747 /* Now, we reclaim unused dentrins with fairness.
748 * We reclaim them same percentage from each superblock.
749 * We calculate number of dentries to scan on this sb
750 * as follows, but the implementation is arranged to avoid
751 * overflows:
752 * number of dentries to scan on this sb =
753 * count * (number of dentries on this sb /
754 * number of dentries in the machine)
755 */
756 spin_unlock(&sb_lock);
757 if (prune_ratio != 1)
758 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
759 else
760 w_count = sb->s_nr_dentry_unused;
761 pruned = w_count;
762 /*
763 * We need to be sure this filesystem isn't being unmounted,
764 * otherwise we could race with generic_shutdown_super(), and
765 * end up holding a reference to an inode while the filesystem
766 * is unmounted. So we try to get s_umount, and make sure
767 * s_root isn't NULL.
768 */
769 if (down_read_trylock(&sb->s_umount)) {
770 if ((sb->s_root != NULL) &&
771 (!list_empty(&sb->s_dentry_lru))) {
772 __shrink_dcache_sb(sb, &w_count,
773 DCACHE_REFERENCED);
774 pruned -= w_count;
775 }
776 up_read(&sb->s_umount);
777 }
778 spin_lock(&sb_lock);
779 if (p)
780 __put_super(p);
781 count -= pruned;
782 p = sb;
783 /* more work left to do? */
784 if (count <= 0)
785 break;
786 }
787 if (p)
788 __put_super(p);
789 spin_unlock(&sb_lock);
790 }
791
792 /**
793 * shrink_dcache_sb - shrink dcache for a superblock
794 * @sb: superblock
795 *
796 * Shrink the dcache for the specified super block. This is used to free
797 * the dcache before unmounting a file system.
798 */
799 void shrink_dcache_sb(struct super_block *sb)
800 {
801 LIST_HEAD(tmp);
802
803 spin_lock(&dcache_inode_lock);
804 spin_lock(&dcache_lru_lock);
805 while (!list_empty(&sb->s_dentry_lru)) {
806 list_splice_init(&sb->s_dentry_lru, &tmp);
807 shrink_dentry_list(&tmp);
808 }
809 spin_unlock(&dcache_lru_lock);
810 spin_unlock(&dcache_inode_lock);
811 }
812 EXPORT_SYMBOL(shrink_dcache_sb);
813
814 /*
815 * destroy a single subtree of dentries for unmount
816 * - see the comments on shrink_dcache_for_umount() for a description of the
817 * locking
818 */
819 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
820 {
821 struct dentry *parent;
822 unsigned detached = 0;
823
824 BUG_ON(!IS_ROOT(dentry));
825
826 /* detach this root from the system */
827 spin_lock(&dentry->d_lock);
828 dentry_lru_del(dentry);
829 __d_drop(dentry);
830 spin_unlock(&dentry->d_lock);
831
832 for (;;) {
833 /* descend to the first leaf in the current subtree */
834 while (!list_empty(&dentry->d_subdirs)) {
835 struct dentry *loop;
836
837 /* this is a branch with children - detach all of them
838 * from the system in one go */
839 spin_lock(&dentry->d_lock);
840 list_for_each_entry(loop, &dentry->d_subdirs,
841 d_u.d_child) {
842 spin_lock_nested(&loop->d_lock,
843 DENTRY_D_LOCK_NESTED);
844 dentry_lru_del(loop);
845 __d_drop(loop);
846 spin_unlock(&loop->d_lock);
847 }
848 spin_unlock(&dentry->d_lock);
849
850 /* move to the first child */
851 dentry = list_entry(dentry->d_subdirs.next,
852 struct dentry, d_u.d_child);
853 }
854
855 /* consume the dentries from this leaf up through its parents
856 * until we find one with children or run out altogether */
857 do {
858 struct inode *inode;
859
860 if (dentry->d_count != 0) {
861 printk(KERN_ERR
862 "BUG: Dentry %p{i=%lx,n=%s}"
863 " still in use (%d)"
864 " [unmount of %s %s]\n",
865 dentry,
866 dentry->d_inode ?
867 dentry->d_inode->i_ino : 0UL,
868 dentry->d_name.name,
869 dentry->d_count,
870 dentry->d_sb->s_type->name,
871 dentry->d_sb->s_id);
872 BUG();
873 }
874
875 if (IS_ROOT(dentry)) {
876 parent = NULL;
877 list_del(&dentry->d_u.d_child);
878 } else {
879 parent = dentry->d_parent;
880 spin_lock(&parent->d_lock);
881 parent->d_count--;
882 list_del(&dentry->d_u.d_child);
883 spin_unlock(&parent->d_lock);
884 }
885
886 detached++;
887
888 inode = dentry->d_inode;
889 if (inode) {
890 dentry->d_inode = NULL;
891 list_del_init(&dentry->d_alias);
892 if (dentry->d_op && dentry->d_op->d_iput)
893 dentry->d_op->d_iput(dentry, inode);
894 else
895 iput(inode);
896 }
897
898 d_free(dentry);
899
900 /* finished when we fall off the top of the tree,
901 * otherwise we ascend to the parent and move to the
902 * next sibling if there is one */
903 if (!parent)
904 return;
905 dentry = parent;
906 } while (list_empty(&dentry->d_subdirs));
907
908 dentry = list_entry(dentry->d_subdirs.next,
909 struct dentry, d_u.d_child);
910 }
911 }
912
913 /*
914 * destroy the dentries attached to a superblock on unmounting
915 * - we don't need to use dentry->d_lock because:
916 * - the superblock is detached from all mountings and open files, so the
917 * dentry trees will not be rearranged by the VFS
918 * - s_umount is write-locked, so the memory pressure shrinker will ignore
919 * any dentries belonging to this superblock that it comes across
920 * - the filesystem itself is no longer permitted to rearrange the dentries
921 * in this superblock
922 */
923 void shrink_dcache_for_umount(struct super_block *sb)
924 {
925 struct dentry *dentry;
926
927 if (down_read_trylock(&sb->s_umount))
928 BUG();
929
930 dentry = sb->s_root;
931 sb->s_root = NULL;
932 spin_lock(&dentry->d_lock);
933 dentry->d_count--;
934 spin_unlock(&dentry->d_lock);
935 shrink_dcache_for_umount_subtree(dentry);
936
937 while (!hlist_empty(&sb->s_anon)) {
938 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
939 shrink_dcache_for_umount_subtree(dentry);
940 }
941 }
942
943 /*
944 * Search for at least 1 mount point in the dentry's subdirs.
945 * We descend to the next level whenever the d_subdirs
946 * list is non-empty and continue searching.
947 */
948
949 /**
950 * have_submounts - check for mounts over a dentry
951 * @parent: dentry to check.
952 *
953 * Return true if the parent or its subdirectories contain
954 * a mount point
955 */
956 int have_submounts(struct dentry *parent)
957 {
958 struct dentry *this_parent;
959 struct list_head *next;
960 unsigned seq;
961 int locked = 0;
962
963 seq = read_seqbegin(&rename_lock);
964 again:
965 this_parent = parent;
966
967 if (d_mountpoint(parent))
968 goto positive;
969 spin_lock(&this_parent->d_lock);
970 repeat:
971 next = this_parent->d_subdirs.next;
972 resume:
973 while (next != &this_parent->d_subdirs) {
974 struct list_head *tmp = next;
975 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
976 next = tmp->next;
977
978 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
979 /* Have we found a mount point ? */
980 if (d_mountpoint(dentry)) {
981 spin_unlock(&dentry->d_lock);
982 spin_unlock(&this_parent->d_lock);
983 goto positive;
984 }
985 if (!list_empty(&dentry->d_subdirs)) {
986 spin_unlock(&this_parent->d_lock);
987 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
988 this_parent = dentry;
989 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
990 goto repeat;
991 }
992 spin_unlock(&dentry->d_lock);
993 }
994 /*
995 * All done at this level ... ascend and resume the search.
996 */
997 if (this_parent != parent) {
998 struct dentry *tmp;
999 struct dentry *child;
1000
1001 tmp = this_parent->d_parent;
1002 rcu_read_lock();
1003 spin_unlock(&this_parent->d_lock);
1004 child = this_parent;
1005 this_parent = tmp;
1006 spin_lock(&this_parent->d_lock);
1007 /* might go back up the wrong parent if we have had a rename
1008 * or deletion */
1009 if (this_parent != child->d_parent ||
1010 (!locked && read_seqretry(&rename_lock, seq))) {
1011 spin_unlock(&this_parent->d_lock);
1012 rcu_read_unlock();
1013 goto rename_retry;
1014 }
1015 rcu_read_unlock();
1016 next = child->d_u.d_child.next;
1017 goto resume;
1018 }
1019 spin_unlock(&this_parent->d_lock);
1020 if (!locked && read_seqretry(&rename_lock, seq))
1021 goto rename_retry;
1022 if (locked)
1023 write_sequnlock(&rename_lock);
1024 return 0; /* No mount points found in tree */
1025 positive:
1026 if (!locked && read_seqretry(&rename_lock, seq))
1027 goto rename_retry;
1028 if (locked)
1029 write_sequnlock(&rename_lock);
1030 return 1;
1031
1032 rename_retry:
1033 locked = 1;
1034 write_seqlock(&rename_lock);
1035 goto again;
1036 }
1037 EXPORT_SYMBOL(have_submounts);
1038
1039 /*
1040 * Search the dentry child list for the specified parent,
1041 * and move any unused dentries to the end of the unused
1042 * list for prune_dcache(). We descend to the next level
1043 * whenever the d_subdirs list is non-empty and continue
1044 * searching.
1045 *
1046 * It returns zero iff there are no unused children,
1047 * otherwise it returns the number of children moved to
1048 * the end of the unused list. This may not be the total
1049 * number of unused children, because select_parent can
1050 * drop the lock and return early due to latency
1051 * constraints.
1052 */
1053 static int select_parent(struct dentry * parent)
1054 {
1055 struct dentry *this_parent;
1056 struct list_head *next;
1057 unsigned seq;
1058 int found = 0;
1059 int locked = 0;
1060
1061 seq = read_seqbegin(&rename_lock);
1062 again:
1063 this_parent = parent;
1064 spin_lock(&this_parent->d_lock);
1065 repeat:
1066 next = this_parent->d_subdirs.next;
1067 resume:
1068 while (next != &this_parent->d_subdirs) {
1069 struct list_head *tmp = next;
1070 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1071 next = tmp->next;
1072
1073 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1074
1075 /*
1076 * move only zero ref count dentries to the end
1077 * of the unused list for prune_dcache
1078 */
1079 if (!dentry->d_count) {
1080 dentry_lru_move_tail(dentry);
1081 found++;
1082 } else {
1083 dentry_lru_del(dentry);
1084 }
1085
1086 /*
1087 * We can return to the caller if we have found some (this
1088 * ensures forward progress). We'll be coming back to find
1089 * the rest.
1090 */
1091 if (found && need_resched()) {
1092 spin_unlock(&dentry->d_lock);
1093 goto out;
1094 }
1095
1096 /*
1097 * Descend a level if the d_subdirs list is non-empty.
1098 */
1099 if (!list_empty(&dentry->d_subdirs)) {
1100 spin_unlock(&this_parent->d_lock);
1101 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1102 this_parent = dentry;
1103 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1104 goto repeat;
1105 }
1106
1107 spin_unlock(&dentry->d_lock);
1108 }
1109 /*
1110 * All done at this level ... ascend and resume the search.
1111 */
1112 if (this_parent != parent) {
1113 struct dentry *tmp;
1114 struct dentry *child;
1115
1116 tmp = this_parent->d_parent;
1117 rcu_read_lock();
1118 spin_unlock(&this_parent->d_lock);
1119 child = this_parent;
1120 this_parent = tmp;
1121 spin_lock(&this_parent->d_lock);
1122 /* might go back up the wrong parent if we have had a rename
1123 * or deletion */
1124 if (this_parent != child->d_parent ||
1125 (!locked && read_seqretry(&rename_lock, seq))) {
1126 spin_unlock(&this_parent->d_lock);
1127 rcu_read_unlock();
1128 goto rename_retry;
1129 }
1130 rcu_read_unlock();
1131 next = child->d_u.d_child.next;
1132 goto resume;
1133 }
1134 out:
1135 spin_unlock(&this_parent->d_lock);
1136 if (!locked && read_seqretry(&rename_lock, seq))
1137 goto rename_retry;
1138 if (locked)
1139 write_sequnlock(&rename_lock);
1140 return found;
1141
1142 rename_retry:
1143 if (found)
1144 return found;
1145 locked = 1;
1146 write_seqlock(&rename_lock);
1147 goto again;
1148 }
1149
1150 /**
1151 * shrink_dcache_parent - prune dcache
1152 * @parent: parent of entries to prune
1153 *
1154 * Prune the dcache to remove unused children of the parent dentry.
1155 */
1156
1157 void shrink_dcache_parent(struct dentry * parent)
1158 {
1159 struct super_block *sb = parent->d_sb;
1160 int found;
1161
1162 while ((found = select_parent(parent)) != 0)
1163 __shrink_dcache_sb(sb, &found, 0);
1164 }
1165 EXPORT_SYMBOL(shrink_dcache_parent);
1166
1167 /*
1168 * Scan `nr' dentries and return the number which remain.
1169 *
1170 * We need to avoid reentering the filesystem if the caller is performing a
1171 * GFP_NOFS allocation attempt. One example deadlock is:
1172 *
1173 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
1174 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
1175 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
1176 *
1177 * In this case we return -1 to tell the caller that we baled.
1178 */
1179 static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1180 {
1181 if (nr) {
1182 if (!(gfp_mask & __GFP_FS))
1183 return -1;
1184 prune_dcache(nr);
1185 }
1186
1187 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
1188 }
1189
1190 static struct shrinker dcache_shrinker = {
1191 .shrink = shrink_dcache_memory,
1192 .seeks = DEFAULT_SEEKS,
1193 };
1194
1195 /**
1196 * d_alloc - allocate a dcache entry
1197 * @parent: parent of entry to allocate
1198 * @name: qstr of the name
1199 *
1200 * Allocates a dentry. It returns %NULL if there is insufficient memory
1201 * available. On a success the dentry is returned. The name passed in is
1202 * copied and the copy passed in may be reused after this call.
1203 */
1204
1205 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1206 {
1207 struct dentry *dentry;
1208 char *dname;
1209
1210 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1211 if (!dentry)
1212 return NULL;
1213
1214 if (name->len > DNAME_INLINE_LEN-1) {
1215 dname = kmalloc(name->len + 1, GFP_KERNEL);
1216 if (!dname) {
1217 kmem_cache_free(dentry_cache, dentry);
1218 return NULL;
1219 }
1220 } else {
1221 dname = dentry->d_iname;
1222 }
1223 dentry->d_name.name = dname;
1224
1225 dentry->d_name.len = name->len;
1226 dentry->d_name.hash = name->hash;
1227 memcpy(dname, name->name, name->len);
1228 dname[name->len] = 0;
1229
1230 dentry->d_count = 1;
1231 dentry->d_flags = DCACHE_UNHASHED;
1232 spin_lock_init(&dentry->d_lock);
1233 dentry->d_inode = NULL;
1234 dentry->d_parent = NULL;
1235 dentry->d_sb = NULL;
1236 dentry->d_op = NULL;
1237 dentry->d_fsdata = NULL;
1238 dentry->d_mounted = 0;
1239 INIT_HLIST_NODE(&dentry->d_hash);
1240 INIT_LIST_HEAD(&dentry->d_lru);
1241 INIT_LIST_HEAD(&dentry->d_subdirs);
1242 INIT_LIST_HEAD(&dentry->d_alias);
1243 INIT_LIST_HEAD(&dentry->d_u.d_child);
1244
1245 if (parent) {
1246 spin_lock(&parent->d_lock);
1247 /*
1248 * don't need child lock because it is not subject
1249 * to concurrency here
1250 */
1251 __dget_dlock(parent);
1252 dentry->d_parent = parent;
1253 dentry->d_sb = parent->d_sb;
1254 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1255 spin_unlock(&parent->d_lock);
1256 }
1257
1258 this_cpu_inc(nr_dentry);
1259
1260 return dentry;
1261 }
1262 EXPORT_SYMBOL(d_alloc);
1263
1264 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1265 {
1266 struct qstr q;
1267
1268 q.name = name;
1269 q.len = strlen(name);
1270 q.hash = full_name_hash(q.name, q.len);
1271 return d_alloc(parent, &q);
1272 }
1273 EXPORT_SYMBOL(d_alloc_name);
1274
1275 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1276 {
1277 spin_lock(&dentry->d_lock);
1278 if (inode)
1279 list_add(&dentry->d_alias, &inode->i_dentry);
1280 dentry->d_inode = inode;
1281 spin_unlock(&dentry->d_lock);
1282 fsnotify_d_instantiate(dentry, inode);
1283 }
1284
1285 /**
1286 * d_instantiate - fill in inode information for a dentry
1287 * @entry: dentry to complete
1288 * @inode: inode to attach to this dentry
1289 *
1290 * Fill in inode information in the entry.
1291 *
1292 * This turns negative dentries into productive full members
1293 * of society.
1294 *
1295 * NOTE! This assumes that the inode count has been incremented
1296 * (or otherwise set) by the caller to indicate that it is now
1297 * in use by the dcache.
1298 */
1299
1300 void d_instantiate(struct dentry *entry, struct inode * inode)
1301 {
1302 BUG_ON(!list_empty(&entry->d_alias));
1303 spin_lock(&dcache_inode_lock);
1304 __d_instantiate(entry, inode);
1305 spin_unlock(&dcache_inode_lock);
1306 security_d_instantiate(entry, inode);
1307 }
1308 EXPORT_SYMBOL(d_instantiate);
1309
1310 /**
1311 * d_instantiate_unique - instantiate a non-aliased dentry
1312 * @entry: dentry to instantiate
1313 * @inode: inode to attach to this dentry
1314 *
1315 * Fill in inode information in the entry. On success, it returns NULL.
1316 * If an unhashed alias of "entry" already exists, then we return the
1317 * aliased dentry instead and drop one reference to inode.
1318 *
1319 * Note that in order to avoid conflicts with rename() etc, the caller
1320 * had better be holding the parent directory semaphore.
1321 *
1322 * This also assumes that the inode count has been incremented
1323 * (or otherwise set) by the caller to indicate that it is now
1324 * in use by the dcache.
1325 */
1326 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1327 struct inode *inode)
1328 {
1329 struct dentry *alias;
1330 int len = entry->d_name.len;
1331 const char *name = entry->d_name.name;
1332 unsigned int hash = entry->d_name.hash;
1333
1334 if (!inode) {
1335 __d_instantiate(entry, NULL);
1336 return NULL;
1337 }
1338
1339 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1340 struct qstr *qstr = &alias->d_name;
1341
1342 /*
1343 * Don't need alias->d_lock here, because aliases with
1344 * d_parent == entry->d_parent are not subject to name or
1345 * parent changes, because the parent inode i_mutex is held.
1346 */
1347 if (qstr->hash != hash)
1348 continue;
1349 if (alias->d_parent != entry->d_parent)
1350 continue;
1351 if (qstr->len != len)
1352 continue;
1353 if (memcmp(qstr->name, name, len))
1354 continue;
1355 __dget(alias);
1356 return alias;
1357 }
1358
1359 __d_instantiate(entry, inode);
1360 return NULL;
1361 }
1362
1363 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1364 {
1365 struct dentry *result;
1366
1367 BUG_ON(!list_empty(&entry->d_alias));
1368
1369 spin_lock(&dcache_inode_lock);
1370 result = __d_instantiate_unique(entry, inode);
1371 spin_unlock(&dcache_inode_lock);
1372
1373 if (!result) {
1374 security_d_instantiate(entry, inode);
1375 return NULL;
1376 }
1377
1378 BUG_ON(!d_unhashed(result));
1379 iput(inode);
1380 return result;
1381 }
1382
1383 EXPORT_SYMBOL(d_instantiate_unique);
1384
1385 /**
1386 * d_alloc_root - allocate root dentry
1387 * @root_inode: inode to allocate the root for
1388 *
1389 * Allocate a root ("/") dentry for the inode given. The inode is
1390 * instantiated and returned. %NULL is returned if there is insufficient
1391 * memory or the inode passed is %NULL.
1392 */
1393
1394 struct dentry * d_alloc_root(struct inode * root_inode)
1395 {
1396 struct dentry *res = NULL;
1397
1398 if (root_inode) {
1399 static const struct qstr name = { .name = "/", .len = 1 };
1400
1401 res = d_alloc(NULL, &name);
1402 if (res) {
1403 res->d_sb = root_inode->i_sb;
1404 res->d_parent = res;
1405 d_instantiate(res, root_inode);
1406 }
1407 }
1408 return res;
1409 }
1410 EXPORT_SYMBOL(d_alloc_root);
1411
1412 static inline struct hlist_head *d_hash(struct dentry *parent,
1413 unsigned long hash)
1414 {
1415 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
1416 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
1417 return dentry_hashtable + (hash & D_HASHMASK);
1418 }
1419
1420 /**
1421 * d_obtain_alias - find or allocate a dentry for a given inode
1422 * @inode: inode to allocate the dentry for
1423 *
1424 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1425 * similar open by handle operations. The returned dentry may be anonymous,
1426 * or may have a full name (if the inode was already in the cache).
1427 *
1428 * When called on a directory inode, we must ensure that the inode only ever
1429 * has one dentry. If a dentry is found, that is returned instead of
1430 * allocating a new one.
1431 *
1432 * On successful return, the reference to the inode has been transferred
1433 * to the dentry. In case of an error the reference on the inode is released.
1434 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1435 * be passed in and will be the error will be propagate to the return value,
1436 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1437 */
1438 struct dentry *d_obtain_alias(struct inode *inode)
1439 {
1440 static const struct qstr anonstring = { .name = "" };
1441 struct dentry *tmp;
1442 struct dentry *res;
1443
1444 if (!inode)
1445 return ERR_PTR(-ESTALE);
1446 if (IS_ERR(inode))
1447 return ERR_CAST(inode);
1448
1449 res = d_find_alias(inode);
1450 if (res)
1451 goto out_iput;
1452
1453 tmp = d_alloc(NULL, &anonstring);
1454 if (!tmp) {
1455 res = ERR_PTR(-ENOMEM);
1456 goto out_iput;
1457 }
1458 tmp->d_parent = tmp; /* make sure dput doesn't croak */
1459
1460
1461 spin_lock(&dcache_inode_lock);
1462 res = __d_find_alias(inode, 0);
1463 if (res) {
1464 spin_unlock(&dcache_inode_lock);
1465 dput(tmp);
1466 goto out_iput;
1467 }
1468
1469 /* attach a disconnected dentry */
1470 spin_lock(&tmp->d_lock);
1471 tmp->d_sb = inode->i_sb;
1472 tmp->d_inode = inode;
1473 tmp->d_flags |= DCACHE_DISCONNECTED;
1474 tmp->d_flags &= ~DCACHE_UNHASHED;
1475 list_add(&tmp->d_alias, &inode->i_dentry);
1476 spin_lock(&dcache_hash_lock);
1477 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
1478 spin_unlock(&dcache_hash_lock);
1479 spin_unlock(&tmp->d_lock);
1480 spin_unlock(&dcache_inode_lock);
1481
1482 return tmp;
1483
1484 out_iput:
1485 iput(inode);
1486 return res;
1487 }
1488 EXPORT_SYMBOL(d_obtain_alias);
1489
1490 /**
1491 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1492 * @inode: the inode which may have a disconnected dentry
1493 * @dentry: a negative dentry which we want to point to the inode.
1494 *
1495 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1496 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1497 * and return it, else simply d_add the inode to the dentry and return NULL.
1498 *
1499 * This is needed in the lookup routine of any filesystem that is exportable
1500 * (via knfsd) so that we can build dcache paths to directories effectively.
1501 *
1502 * If a dentry was found and moved, then it is returned. Otherwise NULL
1503 * is returned. This matches the expected return value of ->lookup.
1504 *
1505 */
1506 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1507 {
1508 struct dentry *new = NULL;
1509
1510 if (inode && S_ISDIR(inode->i_mode)) {
1511 spin_lock(&dcache_inode_lock);
1512 new = __d_find_alias(inode, 1);
1513 if (new) {
1514 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1515 spin_unlock(&dcache_inode_lock);
1516 security_d_instantiate(new, inode);
1517 d_move(new, dentry);
1518 iput(inode);
1519 } else {
1520 /* already taking dcache_inode_lock, so d_add() by hand */
1521 __d_instantiate(dentry, inode);
1522 spin_unlock(&dcache_inode_lock);
1523 security_d_instantiate(dentry, inode);
1524 d_rehash(dentry);
1525 }
1526 } else
1527 d_add(dentry, inode);
1528 return new;
1529 }
1530 EXPORT_SYMBOL(d_splice_alias);
1531
1532 /**
1533 * d_add_ci - lookup or allocate new dentry with case-exact name
1534 * @inode: the inode case-insensitive lookup has found
1535 * @dentry: the negative dentry that was passed to the parent's lookup func
1536 * @name: the case-exact name to be associated with the returned dentry
1537 *
1538 * This is to avoid filling the dcache with case-insensitive names to the
1539 * same inode, only the actual correct case is stored in the dcache for
1540 * case-insensitive filesystems.
1541 *
1542 * For a case-insensitive lookup match and if the the case-exact dentry
1543 * already exists in in the dcache, use it and return it.
1544 *
1545 * If no entry exists with the exact case name, allocate new dentry with
1546 * the exact case, and return the spliced entry.
1547 */
1548 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1549 struct qstr *name)
1550 {
1551 int error;
1552 struct dentry *found;
1553 struct dentry *new;
1554
1555 /*
1556 * First check if a dentry matching the name already exists,
1557 * if not go ahead and create it now.
1558 */
1559 found = d_hash_and_lookup(dentry->d_parent, name);
1560 if (!found) {
1561 new = d_alloc(dentry->d_parent, name);
1562 if (!new) {
1563 error = -ENOMEM;
1564 goto err_out;
1565 }
1566
1567 found = d_splice_alias(inode, new);
1568 if (found) {
1569 dput(new);
1570 return found;
1571 }
1572 return new;
1573 }
1574
1575 /*
1576 * If a matching dentry exists, and it's not negative use it.
1577 *
1578 * Decrement the reference count to balance the iget() done
1579 * earlier on.
1580 */
1581 if (found->d_inode) {
1582 if (unlikely(found->d_inode != inode)) {
1583 /* This can't happen because bad inodes are unhashed. */
1584 BUG_ON(!is_bad_inode(inode));
1585 BUG_ON(!is_bad_inode(found->d_inode));
1586 }
1587 iput(inode);
1588 return found;
1589 }
1590
1591 /*
1592 * Negative dentry: instantiate it unless the inode is a directory and
1593 * already has a dentry.
1594 */
1595 spin_lock(&dcache_inode_lock);
1596 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1597 __d_instantiate(found, inode);
1598 spin_unlock(&dcache_inode_lock);
1599 security_d_instantiate(found, inode);
1600 return found;
1601 }
1602
1603 /*
1604 * In case a directory already has a (disconnected) entry grab a
1605 * reference to it, move it in place and use it.
1606 */
1607 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1608 __dget(new);
1609 spin_unlock(&dcache_inode_lock);
1610 security_d_instantiate(found, inode);
1611 d_move(new, found);
1612 iput(inode);
1613 dput(found);
1614 return new;
1615
1616 err_out:
1617 iput(inode);
1618 return ERR_PTR(error);
1619 }
1620 EXPORT_SYMBOL(d_add_ci);
1621
1622 /**
1623 * d_lookup - search for a dentry
1624 * @parent: parent dentry
1625 * @name: qstr of name we wish to find
1626 * Returns: dentry, or NULL
1627 *
1628 * d_lookup searches the children of the parent dentry for the name in
1629 * question. If the dentry is found its reference count is incremented and the
1630 * dentry is returned. The caller must use dput to free the entry when it has
1631 * finished using it. %NULL is returned if the dentry does not exist.
1632 */
1633 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1634 {
1635 struct dentry * dentry = NULL;
1636 unsigned seq;
1637
1638 do {
1639 seq = read_seqbegin(&rename_lock);
1640 dentry = __d_lookup(parent, name);
1641 if (dentry)
1642 break;
1643 } while (read_seqretry(&rename_lock, seq));
1644 return dentry;
1645 }
1646 EXPORT_SYMBOL(d_lookup);
1647
1648 /*
1649 * __d_lookup - search for a dentry (racy)
1650 * @parent: parent dentry
1651 * @name: qstr of name we wish to find
1652 * Returns: dentry, or NULL
1653 *
1654 * __d_lookup is like d_lookup, however it may (rarely) return a
1655 * false-negative result due to unrelated rename activity.
1656 *
1657 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1658 * however it must be used carefully, eg. with a following d_lookup in
1659 * the case of failure.
1660 *
1661 * __d_lookup callers must be commented.
1662 */
1663 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1664 {
1665 unsigned int len = name->len;
1666 unsigned int hash = name->hash;
1667 const unsigned char *str = name->name;
1668 struct hlist_head *head = d_hash(parent,hash);
1669 struct dentry *found = NULL;
1670 struct hlist_node *node;
1671 struct dentry *dentry;
1672
1673 /*
1674 * The hash list is protected using RCU.
1675 *
1676 * Take d_lock when comparing a candidate dentry, to avoid races
1677 * with d_move().
1678 *
1679 * It is possible that concurrent renames can mess up our list
1680 * walk here and result in missing our dentry, resulting in the
1681 * false-negative result. d_lookup() protects against concurrent
1682 * renames using rename_lock seqlock.
1683 *
1684 * See Documentation/vfs/dcache-locking.txt for more details.
1685 */
1686 rcu_read_lock();
1687
1688 hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
1689 struct qstr *qstr;
1690
1691 if (dentry->d_name.hash != hash)
1692 continue;
1693 if (dentry->d_parent != parent)
1694 continue;
1695
1696 spin_lock(&dentry->d_lock);
1697
1698 /*
1699 * Recheck the dentry after taking the lock - d_move may have
1700 * changed things. Don't bother checking the hash because
1701 * we're about to compare the whole name anyway.
1702 */
1703 if (dentry->d_parent != parent)
1704 goto next;
1705
1706 /* non-existing due to RCU? */
1707 if (d_unhashed(dentry))
1708 goto next;
1709
1710 /*
1711 * It is safe to compare names since d_move() cannot
1712 * change the qstr (protected by d_lock).
1713 */
1714 qstr = &dentry->d_name;
1715 if (parent->d_op && parent->d_op->d_compare) {
1716 if (parent->d_op->d_compare(parent, parent->d_inode,
1717 dentry, dentry->d_inode,
1718 qstr->len, qstr->name, name))
1719 goto next;
1720 } else {
1721 if (qstr->len != len)
1722 goto next;
1723 if (memcmp(qstr->name, str, len))
1724 goto next;
1725 }
1726
1727 dentry->d_count++;
1728 found = dentry;
1729 spin_unlock(&dentry->d_lock);
1730 break;
1731 next:
1732 spin_unlock(&dentry->d_lock);
1733 }
1734 rcu_read_unlock();
1735
1736 return found;
1737 }
1738
1739 /**
1740 * d_hash_and_lookup - hash the qstr then search for a dentry
1741 * @dir: Directory to search in
1742 * @name: qstr of name we wish to find
1743 *
1744 * On hash failure or on lookup failure NULL is returned.
1745 */
1746 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1747 {
1748 struct dentry *dentry = NULL;
1749
1750 /*
1751 * Check for a fs-specific hash function. Note that we must
1752 * calculate the standard hash first, as the d_op->d_hash()
1753 * routine may choose to leave the hash value unchanged.
1754 */
1755 name->hash = full_name_hash(name->name, name->len);
1756 if (dir->d_op && dir->d_op->d_hash) {
1757 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
1758 goto out;
1759 }
1760 dentry = d_lookup(dir, name);
1761 out:
1762 return dentry;
1763 }
1764
1765 /**
1766 * d_validate - verify dentry provided from insecure source (deprecated)
1767 * @dentry: The dentry alleged to be valid child of @dparent
1768 * @dparent: The parent dentry (known to be valid)
1769 *
1770 * An insecure source has sent us a dentry, here we verify it and dget() it.
1771 * This is used by ncpfs in its readdir implementation.
1772 * Zero is returned in the dentry is invalid.
1773 *
1774 * This function is slow for big directories, and deprecated, do not use it.
1775 */
1776 int d_validate(struct dentry *dentry, struct dentry *dparent)
1777 {
1778 struct dentry *child;
1779
1780 spin_lock(&dparent->d_lock);
1781 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1782 if (dentry == child) {
1783 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1784 __dget_dlock(dentry);
1785 spin_unlock(&dentry->d_lock);
1786 spin_unlock(&dparent->d_lock);
1787 return 1;
1788 }
1789 }
1790 spin_unlock(&dparent->d_lock);
1791
1792 return 0;
1793 }
1794 EXPORT_SYMBOL(d_validate);
1795
1796 /*
1797 * When a file is deleted, we have two options:
1798 * - turn this dentry into a negative dentry
1799 * - unhash this dentry and free it.
1800 *
1801 * Usually, we want to just turn this into
1802 * a negative dentry, but if anybody else is
1803 * currently using the dentry or the inode
1804 * we can't do that and we fall back on removing
1805 * it from the hash queues and waiting for
1806 * it to be deleted later when it has no users
1807 */
1808
1809 /**
1810 * d_delete - delete a dentry
1811 * @dentry: The dentry to delete
1812 *
1813 * Turn the dentry into a negative dentry if possible, otherwise
1814 * remove it from the hash queues so it can be deleted later
1815 */
1816
1817 void d_delete(struct dentry * dentry)
1818 {
1819 int isdir = 0;
1820 /*
1821 * Are we the only user?
1822 */
1823 again:
1824 spin_lock(&dentry->d_lock);
1825 isdir = S_ISDIR(dentry->d_inode->i_mode);
1826 if (dentry->d_count == 1) {
1827 if (!spin_trylock(&dcache_inode_lock)) {
1828 spin_unlock(&dentry->d_lock);
1829 cpu_relax();
1830 goto again;
1831 }
1832 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
1833 dentry_iput(dentry);
1834 fsnotify_nameremove(dentry, isdir);
1835 return;
1836 }
1837
1838 if (!d_unhashed(dentry))
1839 __d_drop(dentry);
1840
1841 spin_unlock(&dentry->d_lock);
1842
1843 fsnotify_nameremove(dentry, isdir);
1844 }
1845 EXPORT_SYMBOL(d_delete);
1846
1847 static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1848 {
1849
1850 entry->d_flags &= ~DCACHE_UNHASHED;
1851 hlist_add_head_rcu(&entry->d_hash, list);
1852 }
1853
1854 static void _d_rehash(struct dentry * entry)
1855 {
1856 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1857 }
1858
1859 /**
1860 * d_rehash - add an entry back to the hash
1861 * @entry: dentry to add to the hash
1862 *
1863 * Adds a dentry to the hash according to its name.
1864 */
1865
1866 void d_rehash(struct dentry * entry)
1867 {
1868 spin_lock(&entry->d_lock);
1869 spin_lock(&dcache_hash_lock);
1870 _d_rehash(entry);
1871 spin_unlock(&dcache_hash_lock);
1872 spin_unlock(&entry->d_lock);
1873 }
1874 EXPORT_SYMBOL(d_rehash);
1875
1876 /**
1877 * dentry_update_name_case - update case insensitive dentry with a new name
1878 * @dentry: dentry to be updated
1879 * @name: new name
1880 *
1881 * Update a case insensitive dentry with new case of name.
1882 *
1883 * dentry must have been returned by d_lookup with name @name. Old and new
1884 * name lengths must match (ie. no d_compare which allows mismatched name
1885 * lengths).
1886 *
1887 * Parent inode i_mutex must be held over d_lookup and into this call (to
1888 * keep renames and concurrent inserts, and readdir(2) away).
1889 */
1890 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
1891 {
1892 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
1893 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
1894
1895 spin_lock(&dentry->d_lock);
1896 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
1897 spin_unlock(&dentry->d_lock);
1898 }
1899 EXPORT_SYMBOL(dentry_update_name_case);
1900
1901 static void switch_names(struct dentry *dentry, struct dentry *target)
1902 {
1903 if (dname_external(target)) {
1904 if (dname_external(dentry)) {
1905 /*
1906 * Both external: swap the pointers
1907 */
1908 swap(target->d_name.name, dentry->d_name.name);
1909 } else {
1910 /*
1911 * dentry:internal, target:external. Steal target's
1912 * storage and make target internal.
1913 */
1914 memcpy(target->d_iname, dentry->d_name.name,
1915 dentry->d_name.len + 1);
1916 dentry->d_name.name = target->d_name.name;
1917 target->d_name.name = target->d_iname;
1918 }
1919 } else {
1920 if (dname_external(dentry)) {
1921 /*
1922 * dentry:external, target:internal. Give dentry's
1923 * storage to target and make dentry internal
1924 */
1925 memcpy(dentry->d_iname, target->d_name.name,
1926 target->d_name.len + 1);
1927 target->d_name.name = dentry->d_name.name;
1928 dentry->d_name.name = dentry->d_iname;
1929 } else {
1930 /*
1931 * Both are internal. Just copy target to dentry
1932 */
1933 memcpy(dentry->d_iname, target->d_name.name,
1934 target->d_name.len + 1);
1935 dentry->d_name.len = target->d_name.len;
1936 return;
1937 }
1938 }
1939 swap(dentry->d_name.len, target->d_name.len);
1940 }
1941
1942 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
1943 {
1944 /*
1945 * XXXX: do we really need to take target->d_lock?
1946 */
1947 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
1948 spin_lock(&target->d_parent->d_lock);
1949 else {
1950 if (d_ancestor(dentry->d_parent, target->d_parent)) {
1951 spin_lock(&dentry->d_parent->d_lock);
1952 spin_lock_nested(&target->d_parent->d_lock,
1953 DENTRY_D_LOCK_NESTED);
1954 } else {
1955 spin_lock(&target->d_parent->d_lock);
1956 spin_lock_nested(&dentry->d_parent->d_lock,
1957 DENTRY_D_LOCK_NESTED);
1958 }
1959 }
1960 if (target < dentry) {
1961 spin_lock_nested(&target->d_lock, 2);
1962 spin_lock_nested(&dentry->d_lock, 3);
1963 } else {
1964 spin_lock_nested(&dentry->d_lock, 2);
1965 spin_lock_nested(&target->d_lock, 3);
1966 }
1967 }
1968
1969 static void dentry_unlock_parents_for_move(struct dentry *dentry,
1970 struct dentry *target)
1971 {
1972 if (target->d_parent != dentry->d_parent)
1973 spin_unlock(&dentry->d_parent->d_lock);
1974 if (target->d_parent != target)
1975 spin_unlock(&target->d_parent->d_lock);
1976 }
1977
1978 /*
1979 * When switching names, the actual string doesn't strictly have to
1980 * be preserved in the target - because we're dropping the target
1981 * anyway. As such, we can just do a simple memcpy() to copy over
1982 * the new name before we switch.
1983 *
1984 * Note that we have to be a lot more careful about getting the hash
1985 * switched - we have to switch the hash value properly even if it
1986 * then no longer matches the actual (corrupted) string of the target.
1987 * The hash value has to match the hash queue that the dentry is on..
1988 */
1989 /*
1990 * d_move - move a dentry
1991 * @dentry: entry to move
1992 * @target: new dentry
1993 *
1994 * Update the dcache to reflect the move of a file name. Negative
1995 * dcache entries should not be moved in this way.
1996 */
1997 void d_move(struct dentry * dentry, struct dentry * target)
1998 {
1999 if (!dentry->d_inode)
2000 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2001
2002 BUG_ON(d_ancestor(dentry, target));
2003 BUG_ON(d_ancestor(target, dentry));
2004
2005 write_seqlock(&rename_lock);
2006
2007 dentry_lock_for_move(dentry, target);
2008
2009 /* Move the dentry to the target hash queue, if on different bucket */
2010 spin_lock(&dcache_hash_lock);
2011 if (!d_unhashed(dentry))
2012 hlist_del_rcu(&dentry->d_hash);
2013 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2014 spin_unlock(&dcache_hash_lock);
2015
2016 /* Unhash the target: dput() will then get rid of it */
2017 __d_drop(target);
2018
2019 list_del(&dentry->d_u.d_child);
2020 list_del(&target->d_u.d_child);
2021
2022 /* Switch the names.. */
2023 switch_names(dentry, target);
2024 swap(dentry->d_name.hash, target->d_name.hash);
2025
2026 /* ... and switch the parents */
2027 if (IS_ROOT(dentry)) {
2028 dentry->d_parent = target->d_parent;
2029 target->d_parent = target;
2030 INIT_LIST_HEAD(&target->d_u.d_child);
2031 } else {
2032 swap(dentry->d_parent, target->d_parent);
2033
2034 /* And add them back to the (new) parent lists */
2035 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2036 }
2037
2038 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2039
2040 dentry_unlock_parents_for_move(dentry, target);
2041 spin_unlock(&target->d_lock);
2042 fsnotify_d_move(dentry);
2043 spin_unlock(&dentry->d_lock);
2044 write_sequnlock(&rename_lock);
2045 }
2046 EXPORT_SYMBOL(d_move);
2047
2048 /**
2049 * d_ancestor - search for an ancestor
2050 * @p1: ancestor dentry
2051 * @p2: child dentry
2052 *
2053 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2054 * an ancestor of p2, else NULL.
2055 */
2056 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2057 {
2058 struct dentry *p;
2059
2060 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2061 if (p->d_parent == p1)
2062 return p;
2063 }
2064 return NULL;
2065 }
2066
2067 /*
2068 * This helper attempts to cope with remotely renamed directories
2069 *
2070 * It assumes that the caller is already holding
2071 * dentry->d_parent->d_inode->i_mutex and the dcache_inode_lock
2072 *
2073 * Note: If ever the locking in lock_rename() changes, then please
2074 * remember to update this too...
2075 */
2076 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
2077 __releases(dcache_inode_lock)
2078 {
2079 struct mutex *m1 = NULL, *m2 = NULL;
2080 struct dentry *ret;
2081
2082 /* If alias and dentry share a parent, then no extra locks required */
2083 if (alias->d_parent == dentry->d_parent)
2084 goto out_unalias;
2085
2086 /* Check for loops */
2087 ret = ERR_PTR(-ELOOP);
2088 if (d_ancestor(alias, dentry))
2089 goto out_err;
2090
2091 /* See lock_rename() */
2092 ret = ERR_PTR(-EBUSY);
2093 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2094 goto out_err;
2095 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2096 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2097 goto out_err;
2098 m2 = &alias->d_parent->d_inode->i_mutex;
2099 out_unalias:
2100 d_move(alias, dentry);
2101 ret = alias;
2102 out_err:
2103 spin_unlock(&dcache_inode_lock);
2104 if (m2)
2105 mutex_unlock(m2);
2106 if (m1)
2107 mutex_unlock(m1);
2108 return ret;
2109 }
2110
2111 /*
2112 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2113 * named dentry in place of the dentry to be replaced.
2114 * returns with anon->d_lock held!
2115 */
2116 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2117 {
2118 struct dentry *dparent, *aparent;
2119
2120 dentry_lock_for_move(anon, dentry);
2121
2122 dparent = dentry->d_parent;
2123 aparent = anon->d_parent;
2124
2125 switch_names(dentry, anon);
2126 swap(dentry->d_name.hash, anon->d_name.hash);
2127
2128 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2129 list_del(&dentry->d_u.d_child);
2130 if (!IS_ROOT(dentry))
2131 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2132 else
2133 INIT_LIST_HEAD(&dentry->d_u.d_child);
2134
2135 anon->d_parent = (dparent == dentry) ? anon : dparent;
2136 list_del(&anon->d_u.d_child);
2137 if (!IS_ROOT(anon))
2138 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2139 else
2140 INIT_LIST_HEAD(&anon->d_u.d_child);
2141
2142 dentry_unlock_parents_for_move(anon, dentry);
2143 spin_unlock(&dentry->d_lock);
2144
2145 /* anon->d_lock still locked, returns locked */
2146 anon->d_flags &= ~DCACHE_DISCONNECTED;
2147 }
2148
2149 /**
2150 * d_materialise_unique - introduce an inode into the tree
2151 * @dentry: candidate dentry
2152 * @inode: inode to bind to the dentry, to which aliases may be attached
2153 *
2154 * Introduces an dentry into the tree, substituting an extant disconnected
2155 * root directory alias in its place if there is one
2156 */
2157 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2158 {
2159 struct dentry *actual;
2160
2161 BUG_ON(!d_unhashed(dentry));
2162
2163 if (!inode) {
2164 actual = dentry;
2165 __d_instantiate(dentry, NULL);
2166 d_rehash(actual);
2167 goto out_nolock;
2168 }
2169
2170 spin_lock(&dcache_inode_lock);
2171
2172 if (S_ISDIR(inode->i_mode)) {
2173 struct dentry *alias;
2174
2175 /* Does an aliased dentry already exist? */
2176 alias = __d_find_alias(inode, 0);
2177 if (alias) {
2178 actual = alias;
2179 /* Is this an anonymous mountpoint that we could splice
2180 * into our tree? */
2181 if (IS_ROOT(alias)) {
2182 __d_materialise_dentry(dentry, alias);
2183 __d_drop(alias);
2184 goto found;
2185 }
2186 /* Nope, but we must(!) avoid directory aliasing */
2187 actual = __d_unalias(dentry, alias);
2188 if (IS_ERR(actual))
2189 dput(alias);
2190 goto out_nolock;
2191 }
2192 }
2193
2194 /* Add a unique reference */
2195 actual = __d_instantiate_unique(dentry, inode);
2196 if (!actual)
2197 actual = dentry;
2198 else
2199 BUG_ON(!d_unhashed(actual));
2200
2201 spin_lock(&actual->d_lock);
2202 found:
2203 spin_lock(&dcache_hash_lock);
2204 _d_rehash(actual);
2205 spin_unlock(&dcache_hash_lock);
2206 spin_unlock(&actual->d_lock);
2207 spin_unlock(&dcache_inode_lock);
2208 out_nolock:
2209 if (actual == dentry) {
2210 security_d_instantiate(dentry, inode);
2211 return NULL;
2212 }
2213
2214 iput(inode);
2215 return actual;
2216 }
2217 EXPORT_SYMBOL_GPL(d_materialise_unique);
2218
2219 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2220 {
2221 *buflen -= namelen;
2222 if (*buflen < 0)
2223 return -ENAMETOOLONG;
2224 *buffer -= namelen;
2225 memcpy(*buffer, str, namelen);
2226 return 0;
2227 }
2228
2229 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2230 {
2231 return prepend(buffer, buflen, name->name, name->len);
2232 }
2233
2234 /**
2235 * Prepend path string to a buffer
2236 *
2237 * @path: the dentry/vfsmount to report
2238 * @root: root vfsmnt/dentry (may be modified by this function)
2239 * @buffer: pointer to the end of the buffer
2240 * @buflen: pointer to buffer length
2241 *
2242 * Caller holds the rename_lock.
2243 *
2244 * If path is not reachable from the supplied root, then the value of
2245 * root is changed (without modifying refcounts).
2246 */
2247 static int prepend_path(const struct path *path, struct path *root,
2248 char **buffer, int *buflen)
2249 {
2250 struct dentry *dentry = path->dentry;
2251 struct vfsmount *vfsmnt = path->mnt;
2252 bool slash = false;
2253 int error = 0;
2254
2255 br_read_lock(vfsmount_lock);
2256 while (dentry != root->dentry || vfsmnt != root->mnt) {
2257 struct dentry * parent;
2258
2259 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2260 /* Global root? */
2261 if (vfsmnt->mnt_parent == vfsmnt) {
2262 goto global_root;
2263 }
2264 dentry = vfsmnt->mnt_mountpoint;
2265 vfsmnt = vfsmnt->mnt_parent;
2266 continue;
2267 }
2268 parent = dentry->d_parent;
2269 prefetch(parent);
2270 spin_lock(&dentry->d_lock);
2271 error = prepend_name(buffer, buflen, &dentry->d_name);
2272 spin_unlock(&dentry->d_lock);
2273 if (!error)
2274 error = prepend(buffer, buflen, "/", 1);
2275 if (error)
2276 break;
2277
2278 slash = true;
2279 dentry = parent;
2280 }
2281
2282 out:
2283 if (!error && !slash)
2284 error = prepend(buffer, buflen, "/", 1);
2285
2286 br_read_unlock(vfsmount_lock);
2287 return error;
2288
2289 global_root:
2290 /*
2291 * Filesystems needing to implement special "root names"
2292 * should do so with ->d_dname()
2293 */
2294 if (IS_ROOT(dentry) &&
2295 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2296 WARN(1, "Root dentry has weird name <%.*s>\n",
2297 (int) dentry->d_name.len, dentry->d_name.name);
2298 }
2299 root->mnt = vfsmnt;
2300 root->dentry = dentry;
2301 goto out;
2302 }
2303
2304 /**
2305 * __d_path - return the path of a dentry
2306 * @path: the dentry/vfsmount to report
2307 * @root: root vfsmnt/dentry (may be modified by this function)
2308 * @buf: buffer to return value in
2309 * @buflen: buffer length
2310 *
2311 * Convert a dentry into an ASCII path name.
2312 *
2313 * Returns a pointer into the buffer or an error code if the
2314 * path was too long.
2315 *
2316 * "buflen" should be positive.
2317 *
2318 * If path is not reachable from the supplied root, then the value of
2319 * root is changed (without modifying refcounts).
2320 */
2321 char *__d_path(const struct path *path, struct path *root,
2322 char *buf, int buflen)
2323 {
2324 char *res = buf + buflen;
2325 int error;
2326
2327 prepend(&res, &buflen, "\0", 1);
2328 write_seqlock(&rename_lock);
2329 error = prepend_path(path, root, &res, &buflen);
2330 write_sequnlock(&rename_lock);
2331
2332 if (error)
2333 return ERR_PTR(error);
2334 return res;
2335 }
2336
2337 /*
2338 * same as __d_path but appends "(deleted)" for unlinked files.
2339 */
2340 static int path_with_deleted(const struct path *path, struct path *root,
2341 char **buf, int *buflen)
2342 {
2343 prepend(buf, buflen, "\0", 1);
2344 if (d_unlinked(path->dentry)) {
2345 int error = prepend(buf, buflen, " (deleted)", 10);
2346 if (error)
2347 return error;
2348 }
2349
2350 return prepend_path(path, root, buf, buflen);
2351 }
2352
2353 static int prepend_unreachable(char **buffer, int *buflen)
2354 {
2355 return prepend(buffer, buflen, "(unreachable)", 13);
2356 }
2357
2358 /**
2359 * d_path - return the path of a dentry
2360 * @path: path to report
2361 * @buf: buffer to return value in
2362 * @buflen: buffer length
2363 *
2364 * Convert a dentry into an ASCII path name. If the entry has been deleted
2365 * the string " (deleted)" is appended. Note that this is ambiguous.
2366 *
2367 * Returns a pointer into the buffer or an error code if the path was
2368 * too long. Note: Callers should use the returned pointer, not the passed
2369 * in buffer, to use the name! The implementation often starts at an offset
2370 * into the buffer, and may leave 0 bytes at the start.
2371 *
2372 * "buflen" should be positive.
2373 */
2374 char *d_path(const struct path *path, char *buf, int buflen)
2375 {
2376 char *res = buf + buflen;
2377 struct path root;
2378 struct path tmp;
2379 int error;
2380
2381 /*
2382 * We have various synthetic filesystems that never get mounted. On
2383 * these filesystems dentries are never used for lookup purposes, and
2384 * thus don't need to be hashed. They also don't need a name until a
2385 * user wants to identify the object in /proc/pid/fd/. The little hack
2386 * below allows us to generate a name for these objects on demand:
2387 */
2388 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2389 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2390
2391 get_fs_root(current->fs, &root);
2392 write_seqlock(&rename_lock);
2393 tmp = root;
2394 error = path_with_deleted(path, &tmp, &res, &buflen);
2395 if (error)
2396 res = ERR_PTR(error);
2397 write_sequnlock(&rename_lock);
2398 path_put(&root);
2399 return res;
2400 }
2401 EXPORT_SYMBOL(d_path);
2402
2403 /**
2404 * d_path_with_unreachable - return the path of a dentry
2405 * @path: path to report
2406 * @buf: buffer to return value in
2407 * @buflen: buffer length
2408 *
2409 * The difference from d_path() is that this prepends "(unreachable)"
2410 * to paths which are unreachable from the current process' root.
2411 */
2412 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2413 {
2414 char *res = buf + buflen;
2415 struct path root;
2416 struct path tmp;
2417 int error;
2418
2419 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2420 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2421
2422 get_fs_root(current->fs, &root);
2423 write_seqlock(&rename_lock);
2424 tmp = root;
2425 error = path_with_deleted(path, &tmp, &res, &buflen);
2426 if (!error && !path_equal(&tmp, &root))
2427 error = prepend_unreachable(&res, &buflen);
2428 write_sequnlock(&rename_lock);
2429 path_put(&root);
2430 if (error)
2431 res = ERR_PTR(error);
2432
2433 return res;
2434 }
2435
2436 /*
2437 * Helper function for dentry_operations.d_dname() members
2438 */
2439 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2440 const char *fmt, ...)
2441 {
2442 va_list args;
2443 char temp[64];
2444 int sz;
2445
2446 va_start(args, fmt);
2447 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2448 va_end(args);
2449
2450 if (sz > sizeof(temp) || sz > buflen)
2451 return ERR_PTR(-ENAMETOOLONG);
2452
2453 buffer += buflen - sz;
2454 return memcpy(buffer, temp, sz);
2455 }
2456
2457 /*
2458 * Write full pathname from the root of the filesystem into the buffer.
2459 */
2460 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2461 {
2462 char *end = buf + buflen;
2463 char *retval;
2464
2465 prepend(&end, &buflen, "\0", 1);
2466 if (buflen < 1)
2467 goto Elong;
2468 /* Get '/' right */
2469 retval = end-1;
2470 *retval = '/';
2471
2472 while (!IS_ROOT(dentry)) {
2473 struct dentry *parent = dentry->d_parent;
2474 int error;
2475
2476 prefetch(parent);
2477 spin_lock(&dentry->d_lock);
2478 error = prepend_name(&end, &buflen, &dentry->d_name);
2479 spin_unlock(&dentry->d_lock);
2480 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2481 goto Elong;
2482
2483 retval = end;
2484 dentry = parent;
2485 }
2486 return retval;
2487 Elong:
2488 return ERR_PTR(-ENAMETOOLONG);
2489 }
2490
2491 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2492 {
2493 char *retval;
2494
2495 write_seqlock(&rename_lock);
2496 retval = __dentry_path(dentry, buf, buflen);
2497 write_sequnlock(&rename_lock);
2498
2499 return retval;
2500 }
2501 EXPORT_SYMBOL(dentry_path_raw);
2502
2503 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2504 {
2505 char *p = NULL;
2506 char *retval;
2507
2508 write_seqlock(&rename_lock);
2509 if (d_unlinked(dentry)) {
2510 p = buf + buflen;
2511 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2512 goto Elong;
2513 buflen++;
2514 }
2515 retval = __dentry_path(dentry, buf, buflen);
2516 write_sequnlock(&rename_lock);
2517 if (!IS_ERR(retval) && p)
2518 *p = '/'; /* restore '/' overriden with '\0' */
2519 return retval;
2520 Elong:
2521 return ERR_PTR(-ENAMETOOLONG);
2522 }
2523
2524 /*
2525 * NOTE! The user-level library version returns a
2526 * character pointer. The kernel system call just
2527 * returns the length of the buffer filled (which
2528 * includes the ending '\0' character), or a negative
2529 * error value. So libc would do something like
2530 *
2531 * char *getcwd(char * buf, size_t size)
2532 * {
2533 * int retval;
2534 *
2535 * retval = sys_getcwd(buf, size);
2536 * if (retval >= 0)
2537 * return buf;
2538 * errno = -retval;
2539 * return NULL;
2540 * }
2541 */
2542 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2543 {
2544 int error;
2545 struct path pwd, root;
2546 char *page = (char *) __get_free_page(GFP_USER);
2547
2548 if (!page)
2549 return -ENOMEM;
2550
2551 get_fs_root_and_pwd(current->fs, &root, &pwd);
2552
2553 error = -ENOENT;
2554 write_seqlock(&rename_lock);
2555 if (!d_unlinked(pwd.dentry)) {
2556 unsigned long len;
2557 struct path tmp = root;
2558 char *cwd = page + PAGE_SIZE;
2559 int buflen = PAGE_SIZE;
2560
2561 prepend(&cwd, &buflen, "\0", 1);
2562 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
2563 write_sequnlock(&rename_lock);
2564
2565 if (error)
2566 goto out;
2567
2568 /* Unreachable from current root */
2569 if (!path_equal(&tmp, &root)) {
2570 error = prepend_unreachable(&cwd, &buflen);
2571 if (error)
2572 goto out;
2573 }
2574
2575 error = -ERANGE;
2576 len = PAGE_SIZE + page - cwd;
2577 if (len <= size) {
2578 error = len;
2579 if (copy_to_user(buf, cwd, len))
2580 error = -EFAULT;
2581 }
2582 } else {
2583 write_sequnlock(&rename_lock);
2584 }
2585
2586 out:
2587 path_put(&pwd);
2588 path_put(&root);
2589 free_page((unsigned long) page);
2590 return error;
2591 }
2592
2593 /*
2594 * Test whether new_dentry is a subdirectory of old_dentry.
2595 *
2596 * Trivially implemented using the dcache structure
2597 */
2598
2599 /**
2600 * is_subdir - is new dentry a subdirectory of old_dentry
2601 * @new_dentry: new dentry
2602 * @old_dentry: old dentry
2603 *
2604 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2605 * Returns 0 otherwise.
2606 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2607 */
2608
2609 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2610 {
2611 int result;
2612 unsigned seq;
2613
2614 if (new_dentry == old_dentry)
2615 return 1;
2616
2617 do {
2618 /* for restarting inner loop in case of seq retry */
2619 seq = read_seqbegin(&rename_lock);
2620 /*
2621 * Need rcu_readlock to protect against the d_parent trashing
2622 * due to d_move
2623 */
2624 rcu_read_lock();
2625 if (d_ancestor(old_dentry, new_dentry))
2626 result = 1;
2627 else
2628 result = 0;
2629 rcu_read_unlock();
2630 } while (read_seqretry(&rename_lock, seq));
2631
2632 return result;
2633 }
2634
2635 int path_is_under(struct path *path1, struct path *path2)
2636 {
2637 struct vfsmount *mnt = path1->mnt;
2638 struct dentry *dentry = path1->dentry;
2639 int res;
2640
2641 br_read_lock(vfsmount_lock);
2642 if (mnt != path2->mnt) {
2643 for (;;) {
2644 if (mnt->mnt_parent == mnt) {
2645 br_read_unlock(vfsmount_lock);
2646 return 0;
2647 }
2648 if (mnt->mnt_parent == path2->mnt)
2649 break;
2650 mnt = mnt->mnt_parent;
2651 }
2652 dentry = mnt->mnt_mountpoint;
2653 }
2654 res = is_subdir(dentry, path2->dentry);
2655 br_read_unlock(vfsmount_lock);
2656 return res;
2657 }
2658 EXPORT_SYMBOL(path_is_under);
2659
2660 void d_genocide(struct dentry *root)
2661 {
2662 struct dentry *this_parent;
2663 struct list_head *next;
2664 unsigned seq;
2665 int locked = 0;
2666
2667 seq = read_seqbegin(&rename_lock);
2668 again:
2669 this_parent = root;
2670 spin_lock(&this_parent->d_lock);
2671 repeat:
2672 next = this_parent->d_subdirs.next;
2673 resume:
2674 while (next != &this_parent->d_subdirs) {
2675 struct list_head *tmp = next;
2676 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2677 next = tmp->next;
2678
2679 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2680 if (d_unhashed(dentry) || !dentry->d_inode) {
2681 spin_unlock(&dentry->d_lock);
2682 continue;
2683 }
2684 if (!list_empty(&dentry->d_subdirs)) {
2685 spin_unlock(&this_parent->d_lock);
2686 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2687 this_parent = dentry;
2688 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2689 goto repeat;
2690 }
2691 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2692 dentry->d_flags |= DCACHE_GENOCIDE;
2693 dentry->d_count--;
2694 }
2695 spin_unlock(&dentry->d_lock);
2696 }
2697 if (this_parent != root) {
2698 struct dentry *tmp;
2699 struct dentry *child;
2700
2701 tmp = this_parent->d_parent;
2702 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2703 this_parent->d_flags |= DCACHE_GENOCIDE;
2704 this_parent->d_count--;
2705 }
2706 rcu_read_lock();
2707 spin_unlock(&this_parent->d_lock);
2708 child = this_parent;
2709 this_parent = tmp;
2710 spin_lock(&this_parent->d_lock);
2711 /* might go back up the wrong parent if we have had a rename
2712 * or deletion */
2713 if (this_parent != child->d_parent ||
2714 (!locked && read_seqretry(&rename_lock, seq))) {
2715 spin_unlock(&this_parent->d_lock);
2716 rcu_read_unlock();
2717 goto rename_retry;
2718 }
2719 rcu_read_unlock();
2720 next = child->d_u.d_child.next;
2721 goto resume;
2722 }
2723 spin_unlock(&this_parent->d_lock);
2724 if (!locked && read_seqretry(&rename_lock, seq))
2725 goto rename_retry;
2726 if (locked)
2727 write_sequnlock(&rename_lock);
2728 return;
2729
2730 rename_retry:
2731 locked = 1;
2732 write_seqlock(&rename_lock);
2733 goto again;
2734 }
2735
2736 /**
2737 * find_inode_number - check for dentry with name
2738 * @dir: directory to check
2739 * @name: Name to find.
2740 *
2741 * Check whether a dentry already exists for the given name,
2742 * and return the inode number if it has an inode. Otherwise
2743 * 0 is returned.
2744 *
2745 * This routine is used to post-process directory listings for
2746 * filesystems using synthetic inode numbers, and is necessary
2747 * to keep getcwd() working.
2748 */
2749
2750 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2751 {
2752 struct dentry * dentry;
2753 ino_t ino = 0;
2754
2755 dentry = d_hash_and_lookup(dir, name);
2756 if (dentry) {
2757 if (dentry->d_inode)
2758 ino = dentry->d_inode->i_ino;
2759 dput(dentry);
2760 }
2761 return ino;
2762 }
2763 EXPORT_SYMBOL(find_inode_number);
2764
2765 static __initdata unsigned long dhash_entries;
2766 static int __init set_dhash_entries(char *str)
2767 {
2768 if (!str)
2769 return 0;
2770 dhash_entries = simple_strtoul(str, &str, 0);
2771 return 1;
2772 }
2773 __setup("dhash_entries=", set_dhash_entries);
2774
2775 static void __init dcache_init_early(void)
2776 {
2777 int loop;
2778
2779 /* If hashes are distributed across NUMA nodes, defer
2780 * hash allocation until vmalloc space is available.
2781 */
2782 if (hashdist)
2783 return;
2784
2785 dentry_hashtable =
2786 alloc_large_system_hash("Dentry cache",
2787 sizeof(struct hlist_head),
2788 dhash_entries,
2789 13,
2790 HASH_EARLY,
2791 &d_hash_shift,
2792 &d_hash_mask,
2793 0);
2794
2795 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2796 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2797 }
2798
2799 static void __init dcache_init(void)
2800 {
2801 int loop;
2802
2803 /*
2804 * A constructor could be added for stable state like the lists,
2805 * but it is probably not worth it because of the cache nature
2806 * of the dcache.
2807 */
2808 dentry_cache = KMEM_CACHE(dentry,
2809 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2810
2811 register_shrinker(&dcache_shrinker);
2812
2813 /* Hash may have been set up in dcache_init_early */
2814 if (!hashdist)
2815 return;
2816
2817 dentry_hashtable =
2818 alloc_large_system_hash("Dentry cache",
2819 sizeof(struct hlist_head),
2820 dhash_entries,
2821 13,
2822 0,
2823 &d_hash_shift,
2824 &d_hash_mask,
2825 0);
2826
2827 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2828 INIT_HLIST_HEAD(&dentry_hashtable[loop]);
2829 }
2830
2831 /* SLAB cache for __getname() consumers */
2832 struct kmem_cache *names_cachep __read_mostly;
2833 EXPORT_SYMBOL(names_cachep);
2834
2835 EXPORT_SYMBOL(d_genocide);
2836
2837 void __init vfs_caches_init_early(void)
2838 {
2839 dcache_init_early();
2840 inode_init_early();
2841 }
2842
2843 void __init vfs_caches_init(unsigned long mempages)
2844 {
2845 unsigned long reserve;
2846
2847 /* Base hash sizes on available memory, with a reserve equal to
2848 150% of current kernel size */
2849
2850 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2851 mempages -= reserve;
2852
2853 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
2854 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2855
2856 dcache_init();
2857 inode_init();
2858 files_init(mempages);
2859 mnt_init();
2860 bdev_cache_init();
2861 chrdev_init();
2862 }
This page took 0.105758 seconds and 6 git commands to generate.