tracing: extend sched_pi_setprio
[deliverable/linux.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
42
43 #include "internal.h"
44 #include "mount.h"
45
46 /*
47 * Usage:
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
56 * d_lock protects:
57 * - d_flags
58 * - d_name
59 * - d_lru
60 * - d_count
61 * - d_unhashed()
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
65 *
66 * Ordering:
67 * dentry->d_inode->i_lock
68 * dentry->d_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
71 * s_anon lock
72 *
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
75 * ...
76 * dentry->d_parent->d_lock
77 * dentry->d_lock
78 *
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
81 * dentry1->d_lock
82 * dentry2->d_lock
83 */
84 int sysctl_vfs_cache_pressure __read_mostly = 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89 EXPORT_SYMBOL(rename_lock);
90
91 static struct kmem_cache *dentry_cache __read_mostly;
92
93 /*
94 * This is the single most critical data structure when it comes
95 * to the dcache: the hashtable for lookups. Somebody should try
96 * to make this good - I've just made it work.
97 *
98 * This hash-function tries to avoid losing too many bits of hash
99 * information, yet avoid using a prime hash-size or similar.
100 */
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107 static inline struct hlist_bl_head *d_hash(unsigned int hash)
108 {
109 return dentry_hashtable + (hash >> (32 - d_hash_shift));
110 }
111
112 #define IN_LOOKUP_SHIFT 10
113 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
114
115 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
116 unsigned int hash)
117 {
118 hash += (unsigned long) parent / L1_CACHE_BYTES;
119 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
120 }
121
122
123 /* Statistics gathering. */
124 struct dentry_stat_t dentry_stat = {
125 .age_limit = 45,
126 };
127
128 static DEFINE_PER_CPU(long, nr_dentry);
129 static DEFINE_PER_CPU(long, nr_dentry_unused);
130
131 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
132
133 /*
134 * Here we resort to our own counters instead of using generic per-cpu counters
135 * for consistency with what the vfs inode code does. We are expected to harvest
136 * better code and performance by having our own specialized counters.
137 *
138 * Please note that the loop is done over all possible CPUs, not over all online
139 * CPUs. The reason for this is that we don't want to play games with CPUs going
140 * on and off. If one of them goes off, we will just keep their counters.
141 *
142 * glommer: See cffbc8a for details, and if you ever intend to change this,
143 * please update all vfs counters to match.
144 */
145 static long get_nr_dentry(void)
146 {
147 int i;
148 long sum = 0;
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry, i);
151 return sum < 0 ? 0 : sum;
152 }
153
154 static long get_nr_dentry_unused(void)
155 {
156 int i;
157 long sum = 0;
158 for_each_possible_cpu(i)
159 sum += per_cpu(nr_dentry_unused, i);
160 return sum < 0 ? 0 : sum;
161 }
162
163 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
164 size_t *lenp, loff_t *ppos)
165 {
166 dentry_stat.nr_dentry = get_nr_dentry();
167 dentry_stat.nr_unused = get_nr_dentry_unused();
168 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
169 }
170 #endif
171
172 /*
173 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
174 * The strings are both count bytes long, and count is non-zero.
175 */
176 #ifdef CONFIG_DCACHE_WORD_ACCESS
177
178 #include <asm/word-at-a-time.h>
179 /*
180 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
181 * aligned allocation for this particular component. We don't
182 * strictly need the load_unaligned_zeropad() safety, but it
183 * doesn't hurt either.
184 *
185 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
186 * need the careful unaligned handling.
187 */
188 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
189 {
190 unsigned long a,b,mask;
191
192 for (;;) {
193 a = *(unsigned long *)cs;
194 b = load_unaligned_zeropad(ct);
195 if (tcount < sizeof(unsigned long))
196 break;
197 if (unlikely(a != b))
198 return 1;
199 cs += sizeof(unsigned long);
200 ct += sizeof(unsigned long);
201 tcount -= sizeof(unsigned long);
202 if (!tcount)
203 return 0;
204 }
205 mask = bytemask_from_count(tcount);
206 return unlikely(!!((a ^ b) & mask));
207 }
208
209 #else
210
211 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
212 {
213 do {
214 if (*cs != *ct)
215 return 1;
216 cs++;
217 ct++;
218 tcount--;
219 } while (tcount);
220 return 0;
221 }
222
223 #endif
224
225 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
226 {
227 /*
228 * Be careful about RCU walk racing with rename:
229 * use 'lockless_dereference' to fetch the name pointer.
230 *
231 * NOTE! Even if a rename will mean that the length
232 * was not loaded atomically, we don't care. The
233 * RCU walk will check the sequence count eventually,
234 * and catch it. And we won't overrun the buffer,
235 * because we're reading the name pointer atomically,
236 * and a dentry name is guaranteed to be properly
237 * terminated with a NUL byte.
238 *
239 * End result: even if 'len' is wrong, we'll exit
240 * early because the data cannot match (there can
241 * be no NUL in the ct/tcount data)
242 */
243 const unsigned char *cs = lockless_dereference(dentry->d_name.name);
244
245 return dentry_string_cmp(cs, ct, tcount);
246 }
247
248 struct external_name {
249 union {
250 atomic_t count;
251 struct rcu_head head;
252 } u;
253 unsigned char name[];
254 };
255
256 static inline struct external_name *external_name(struct dentry *dentry)
257 {
258 return container_of(dentry->d_name.name, struct external_name, name[0]);
259 }
260
261 static void __d_free(struct rcu_head *head)
262 {
263 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
264
265 kmem_cache_free(dentry_cache, dentry);
266 }
267
268 static void __d_free_external(struct rcu_head *head)
269 {
270 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
271 kfree(external_name(dentry));
272 kmem_cache_free(dentry_cache, dentry);
273 }
274
275 static inline int dname_external(const struct dentry *dentry)
276 {
277 return dentry->d_name.name != dentry->d_iname;
278 }
279
280 static inline void __d_set_inode_and_type(struct dentry *dentry,
281 struct inode *inode,
282 unsigned type_flags)
283 {
284 unsigned flags;
285
286 dentry->d_inode = inode;
287 flags = READ_ONCE(dentry->d_flags);
288 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
289 flags |= type_flags;
290 WRITE_ONCE(dentry->d_flags, flags);
291 }
292
293 static inline void __d_clear_type_and_inode(struct dentry *dentry)
294 {
295 unsigned flags = READ_ONCE(dentry->d_flags);
296
297 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
298 WRITE_ONCE(dentry->d_flags, flags);
299 dentry->d_inode = NULL;
300 }
301
302 static void dentry_free(struct dentry *dentry)
303 {
304 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
305 if (unlikely(dname_external(dentry))) {
306 struct external_name *p = external_name(dentry);
307 if (likely(atomic_dec_and_test(&p->u.count))) {
308 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
309 return;
310 }
311 }
312 /* if dentry was never visible to RCU, immediate free is OK */
313 if (!(dentry->d_flags & DCACHE_RCUACCESS))
314 __d_free(&dentry->d_u.d_rcu);
315 else
316 call_rcu(&dentry->d_u.d_rcu, __d_free);
317 }
318
319 /*
320 * Release the dentry's inode, using the filesystem
321 * d_iput() operation if defined.
322 */
323 static void dentry_unlink_inode(struct dentry * dentry)
324 __releases(dentry->d_lock)
325 __releases(dentry->d_inode->i_lock)
326 {
327 struct inode *inode = dentry->d_inode;
328 bool hashed = !d_unhashed(dentry);
329
330 if (hashed)
331 raw_write_seqcount_begin(&dentry->d_seq);
332 __d_clear_type_and_inode(dentry);
333 hlist_del_init(&dentry->d_u.d_alias);
334 if (hashed)
335 raw_write_seqcount_end(&dentry->d_seq);
336 spin_unlock(&dentry->d_lock);
337 spin_unlock(&inode->i_lock);
338 if (!inode->i_nlink)
339 fsnotify_inoderemove(inode);
340 if (dentry->d_op && dentry->d_op->d_iput)
341 dentry->d_op->d_iput(dentry, inode);
342 else
343 iput(inode);
344 }
345
346 /*
347 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
348 * is in use - which includes both the "real" per-superblock
349 * LRU list _and_ the DCACHE_SHRINK_LIST use.
350 *
351 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
352 * on the shrink list (ie not on the superblock LRU list).
353 *
354 * The per-cpu "nr_dentry_unused" counters are updated with
355 * the DCACHE_LRU_LIST bit.
356 *
357 * These helper functions make sure we always follow the
358 * rules. d_lock must be held by the caller.
359 */
360 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
361 static void d_lru_add(struct dentry *dentry)
362 {
363 D_FLAG_VERIFY(dentry, 0);
364 dentry->d_flags |= DCACHE_LRU_LIST;
365 this_cpu_inc(nr_dentry_unused);
366 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
367 }
368
369 static void d_lru_del(struct dentry *dentry)
370 {
371 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
372 dentry->d_flags &= ~DCACHE_LRU_LIST;
373 this_cpu_dec(nr_dentry_unused);
374 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
375 }
376
377 static void d_shrink_del(struct dentry *dentry)
378 {
379 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
380 list_del_init(&dentry->d_lru);
381 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
382 this_cpu_dec(nr_dentry_unused);
383 }
384
385 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
386 {
387 D_FLAG_VERIFY(dentry, 0);
388 list_add(&dentry->d_lru, list);
389 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
390 this_cpu_inc(nr_dentry_unused);
391 }
392
393 /*
394 * These can only be called under the global LRU lock, ie during the
395 * callback for freeing the LRU list. "isolate" removes it from the
396 * LRU lists entirely, while shrink_move moves it to the indicated
397 * private list.
398 */
399 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
400 {
401 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
402 dentry->d_flags &= ~DCACHE_LRU_LIST;
403 this_cpu_dec(nr_dentry_unused);
404 list_lru_isolate(lru, &dentry->d_lru);
405 }
406
407 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
408 struct list_head *list)
409 {
410 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
411 dentry->d_flags |= DCACHE_SHRINK_LIST;
412 list_lru_isolate_move(lru, &dentry->d_lru, list);
413 }
414
415 /*
416 * dentry_lru_(add|del)_list) must be called with d_lock held.
417 */
418 static void dentry_lru_add(struct dentry *dentry)
419 {
420 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
421 d_lru_add(dentry);
422 }
423
424 /**
425 * d_drop - drop a dentry
426 * @dentry: dentry to drop
427 *
428 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
429 * be found through a VFS lookup any more. Note that this is different from
430 * deleting the dentry - d_delete will try to mark the dentry negative if
431 * possible, giving a successful _negative_ lookup, while d_drop will
432 * just make the cache lookup fail.
433 *
434 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
435 * reason (NFS timeouts or autofs deletes).
436 *
437 * __d_drop requires dentry->d_lock.
438 */
439 void __d_drop(struct dentry *dentry)
440 {
441 if (!d_unhashed(dentry)) {
442 struct hlist_bl_head *b;
443 /*
444 * Hashed dentries are normally on the dentry hashtable,
445 * with the exception of those newly allocated by
446 * d_obtain_alias, which are always IS_ROOT:
447 */
448 if (unlikely(IS_ROOT(dentry)))
449 b = &dentry->d_sb->s_anon;
450 else
451 b = d_hash(dentry->d_name.hash);
452
453 hlist_bl_lock(b);
454 __hlist_bl_del(&dentry->d_hash);
455 dentry->d_hash.pprev = NULL;
456 hlist_bl_unlock(b);
457 /* After this call, in-progress rcu-walk path lookup will fail. */
458 write_seqcount_invalidate(&dentry->d_seq);
459 }
460 }
461 EXPORT_SYMBOL(__d_drop);
462
463 void d_drop(struct dentry *dentry)
464 {
465 spin_lock(&dentry->d_lock);
466 __d_drop(dentry);
467 spin_unlock(&dentry->d_lock);
468 }
469 EXPORT_SYMBOL(d_drop);
470
471 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
472 {
473 struct dentry *next;
474 /*
475 * Inform d_walk() and shrink_dentry_list() that we are no longer
476 * attached to the dentry tree
477 */
478 dentry->d_flags |= DCACHE_DENTRY_KILLED;
479 if (unlikely(list_empty(&dentry->d_child)))
480 return;
481 __list_del_entry(&dentry->d_child);
482 /*
483 * Cursors can move around the list of children. While we'd been
484 * a normal list member, it didn't matter - ->d_child.next would've
485 * been updated. However, from now on it won't be and for the
486 * things like d_walk() it might end up with a nasty surprise.
487 * Normally d_walk() doesn't care about cursors moving around -
488 * ->d_lock on parent prevents that and since a cursor has no children
489 * of its own, we get through it without ever unlocking the parent.
490 * There is one exception, though - if we ascend from a child that
491 * gets killed as soon as we unlock it, the next sibling is found
492 * using the value left in its ->d_child.next. And if _that_
493 * pointed to a cursor, and cursor got moved (e.g. by lseek())
494 * before d_walk() regains parent->d_lock, we'll end up skipping
495 * everything the cursor had been moved past.
496 *
497 * Solution: make sure that the pointer left behind in ->d_child.next
498 * points to something that won't be moving around. I.e. skip the
499 * cursors.
500 */
501 while (dentry->d_child.next != &parent->d_subdirs) {
502 next = list_entry(dentry->d_child.next, struct dentry, d_child);
503 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
504 break;
505 dentry->d_child.next = next->d_child.next;
506 }
507 }
508
509 static void __dentry_kill(struct dentry *dentry)
510 {
511 struct dentry *parent = NULL;
512 bool can_free = true;
513 if (!IS_ROOT(dentry))
514 parent = dentry->d_parent;
515
516 /*
517 * The dentry is now unrecoverably dead to the world.
518 */
519 lockref_mark_dead(&dentry->d_lockref);
520
521 /*
522 * inform the fs via d_prune that this dentry is about to be
523 * unhashed and destroyed.
524 */
525 if (dentry->d_flags & DCACHE_OP_PRUNE)
526 dentry->d_op->d_prune(dentry);
527
528 if (dentry->d_flags & DCACHE_LRU_LIST) {
529 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
530 d_lru_del(dentry);
531 }
532 /* if it was on the hash then remove it */
533 __d_drop(dentry);
534 dentry_unlist(dentry, parent);
535 if (parent)
536 spin_unlock(&parent->d_lock);
537 if (dentry->d_inode)
538 dentry_unlink_inode(dentry);
539 else
540 spin_unlock(&dentry->d_lock);
541 this_cpu_dec(nr_dentry);
542 if (dentry->d_op && dentry->d_op->d_release)
543 dentry->d_op->d_release(dentry);
544
545 spin_lock(&dentry->d_lock);
546 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
547 dentry->d_flags |= DCACHE_MAY_FREE;
548 can_free = false;
549 }
550 spin_unlock(&dentry->d_lock);
551 if (likely(can_free))
552 dentry_free(dentry);
553 }
554
555 /*
556 * Finish off a dentry we've decided to kill.
557 * dentry->d_lock must be held, returns with it unlocked.
558 * If ref is non-zero, then decrement the refcount too.
559 * Returns dentry requiring refcount drop, or NULL if we're done.
560 */
561 static struct dentry *dentry_kill(struct dentry *dentry)
562 __releases(dentry->d_lock)
563 {
564 struct inode *inode = dentry->d_inode;
565 struct dentry *parent = NULL;
566
567 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
568 goto failed;
569
570 if (!IS_ROOT(dentry)) {
571 parent = dentry->d_parent;
572 if (unlikely(!spin_trylock(&parent->d_lock))) {
573 if (inode)
574 spin_unlock(&inode->i_lock);
575 goto failed;
576 }
577 }
578
579 __dentry_kill(dentry);
580 return parent;
581
582 failed:
583 spin_unlock(&dentry->d_lock);
584 return dentry; /* try again with same dentry */
585 }
586
587 static inline struct dentry *lock_parent(struct dentry *dentry)
588 {
589 struct dentry *parent = dentry->d_parent;
590 if (IS_ROOT(dentry))
591 return NULL;
592 if (unlikely(dentry->d_lockref.count < 0))
593 return NULL;
594 if (likely(spin_trylock(&parent->d_lock)))
595 return parent;
596 rcu_read_lock();
597 spin_unlock(&dentry->d_lock);
598 again:
599 parent = ACCESS_ONCE(dentry->d_parent);
600 spin_lock(&parent->d_lock);
601 /*
602 * We can't blindly lock dentry until we are sure
603 * that we won't violate the locking order.
604 * Any changes of dentry->d_parent must have
605 * been done with parent->d_lock held, so
606 * spin_lock() above is enough of a barrier
607 * for checking if it's still our child.
608 */
609 if (unlikely(parent != dentry->d_parent)) {
610 spin_unlock(&parent->d_lock);
611 goto again;
612 }
613 rcu_read_unlock();
614 if (parent != dentry)
615 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
616 else
617 parent = NULL;
618 return parent;
619 }
620
621 /*
622 * Try to do a lockless dput(), and return whether that was successful.
623 *
624 * If unsuccessful, we return false, having already taken the dentry lock.
625 *
626 * The caller needs to hold the RCU read lock, so that the dentry is
627 * guaranteed to stay around even if the refcount goes down to zero!
628 */
629 static inline bool fast_dput(struct dentry *dentry)
630 {
631 int ret;
632 unsigned int d_flags;
633
634 /*
635 * If we have a d_op->d_delete() operation, we sould not
636 * let the dentry count go to zero, so use "put_or_lock".
637 */
638 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
639 return lockref_put_or_lock(&dentry->d_lockref);
640
641 /*
642 * .. otherwise, we can try to just decrement the
643 * lockref optimistically.
644 */
645 ret = lockref_put_return(&dentry->d_lockref);
646
647 /*
648 * If the lockref_put_return() failed due to the lock being held
649 * by somebody else, the fast path has failed. We will need to
650 * get the lock, and then check the count again.
651 */
652 if (unlikely(ret < 0)) {
653 spin_lock(&dentry->d_lock);
654 if (dentry->d_lockref.count > 1) {
655 dentry->d_lockref.count--;
656 spin_unlock(&dentry->d_lock);
657 return 1;
658 }
659 return 0;
660 }
661
662 /*
663 * If we weren't the last ref, we're done.
664 */
665 if (ret)
666 return 1;
667
668 /*
669 * Careful, careful. The reference count went down
670 * to zero, but we don't hold the dentry lock, so
671 * somebody else could get it again, and do another
672 * dput(), and we need to not race with that.
673 *
674 * However, there is a very special and common case
675 * where we don't care, because there is nothing to
676 * do: the dentry is still hashed, it does not have
677 * a 'delete' op, and it's referenced and already on
678 * the LRU list.
679 *
680 * NOTE! Since we aren't locked, these values are
681 * not "stable". However, it is sufficient that at
682 * some point after we dropped the reference the
683 * dentry was hashed and the flags had the proper
684 * value. Other dentry users may have re-gotten
685 * a reference to the dentry and change that, but
686 * our work is done - we can leave the dentry
687 * around with a zero refcount.
688 */
689 smp_rmb();
690 d_flags = ACCESS_ONCE(dentry->d_flags);
691 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
692
693 /* Nothing to do? Dropping the reference was all we needed? */
694 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
695 return 1;
696
697 /*
698 * Not the fast normal case? Get the lock. We've already decremented
699 * the refcount, but we'll need to re-check the situation after
700 * getting the lock.
701 */
702 spin_lock(&dentry->d_lock);
703
704 /*
705 * Did somebody else grab a reference to it in the meantime, and
706 * we're no longer the last user after all? Alternatively, somebody
707 * else could have killed it and marked it dead. Either way, we
708 * don't need to do anything else.
709 */
710 if (dentry->d_lockref.count) {
711 spin_unlock(&dentry->d_lock);
712 return 1;
713 }
714
715 /*
716 * Re-get the reference we optimistically dropped. We hold the
717 * lock, and we just tested that it was zero, so we can just
718 * set it to 1.
719 */
720 dentry->d_lockref.count = 1;
721 return 0;
722 }
723
724
725 /*
726 * This is dput
727 *
728 * This is complicated by the fact that we do not want to put
729 * dentries that are no longer on any hash chain on the unused
730 * list: we'd much rather just get rid of them immediately.
731 *
732 * However, that implies that we have to traverse the dentry
733 * tree upwards to the parents which might _also_ now be
734 * scheduled for deletion (it may have been only waiting for
735 * its last child to go away).
736 *
737 * This tail recursion is done by hand as we don't want to depend
738 * on the compiler to always get this right (gcc generally doesn't).
739 * Real recursion would eat up our stack space.
740 */
741
742 /*
743 * dput - release a dentry
744 * @dentry: dentry to release
745 *
746 * Release a dentry. This will drop the usage count and if appropriate
747 * call the dentry unlink method as well as removing it from the queues and
748 * releasing its resources. If the parent dentries were scheduled for release
749 * they too may now get deleted.
750 */
751 void dput(struct dentry *dentry)
752 {
753 if (unlikely(!dentry))
754 return;
755
756 repeat:
757 might_sleep();
758
759 rcu_read_lock();
760 if (likely(fast_dput(dentry))) {
761 rcu_read_unlock();
762 return;
763 }
764
765 /* Slow case: now with the dentry lock held */
766 rcu_read_unlock();
767
768 WARN_ON(d_in_lookup(dentry));
769
770 /* Unreachable? Get rid of it */
771 if (unlikely(d_unhashed(dentry)))
772 goto kill_it;
773
774 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
775 goto kill_it;
776
777 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
778 if (dentry->d_op->d_delete(dentry))
779 goto kill_it;
780 }
781
782 if (!(dentry->d_flags & DCACHE_REFERENCED))
783 dentry->d_flags |= DCACHE_REFERENCED;
784 dentry_lru_add(dentry);
785
786 dentry->d_lockref.count--;
787 spin_unlock(&dentry->d_lock);
788 return;
789
790 kill_it:
791 dentry = dentry_kill(dentry);
792 if (dentry) {
793 cond_resched();
794 goto repeat;
795 }
796 }
797 EXPORT_SYMBOL(dput);
798
799
800 /* This must be called with d_lock held */
801 static inline void __dget_dlock(struct dentry *dentry)
802 {
803 dentry->d_lockref.count++;
804 }
805
806 static inline void __dget(struct dentry *dentry)
807 {
808 lockref_get(&dentry->d_lockref);
809 }
810
811 struct dentry *dget_parent(struct dentry *dentry)
812 {
813 int gotref;
814 struct dentry *ret;
815
816 /*
817 * Do optimistic parent lookup without any
818 * locking.
819 */
820 rcu_read_lock();
821 ret = ACCESS_ONCE(dentry->d_parent);
822 gotref = lockref_get_not_zero(&ret->d_lockref);
823 rcu_read_unlock();
824 if (likely(gotref)) {
825 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
826 return ret;
827 dput(ret);
828 }
829
830 repeat:
831 /*
832 * Don't need rcu_dereference because we re-check it was correct under
833 * the lock.
834 */
835 rcu_read_lock();
836 ret = dentry->d_parent;
837 spin_lock(&ret->d_lock);
838 if (unlikely(ret != dentry->d_parent)) {
839 spin_unlock(&ret->d_lock);
840 rcu_read_unlock();
841 goto repeat;
842 }
843 rcu_read_unlock();
844 BUG_ON(!ret->d_lockref.count);
845 ret->d_lockref.count++;
846 spin_unlock(&ret->d_lock);
847 return ret;
848 }
849 EXPORT_SYMBOL(dget_parent);
850
851 /**
852 * d_find_alias - grab a hashed alias of inode
853 * @inode: inode in question
854 *
855 * If inode has a hashed alias, or is a directory and has any alias,
856 * acquire the reference to alias and return it. Otherwise return NULL.
857 * Notice that if inode is a directory there can be only one alias and
858 * it can be unhashed only if it has no children, or if it is the root
859 * of a filesystem, or if the directory was renamed and d_revalidate
860 * was the first vfs operation to notice.
861 *
862 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
863 * any other hashed alias over that one.
864 */
865 static struct dentry *__d_find_alias(struct inode *inode)
866 {
867 struct dentry *alias, *discon_alias;
868
869 again:
870 discon_alias = NULL;
871 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
872 spin_lock(&alias->d_lock);
873 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
874 if (IS_ROOT(alias) &&
875 (alias->d_flags & DCACHE_DISCONNECTED)) {
876 discon_alias = alias;
877 } else {
878 __dget_dlock(alias);
879 spin_unlock(&alias->d_lock);
880 return alias;
881 }
882 }
883 spin_unlock(&alias->d_lock);
884 }
885 if (discon_alias) {
886 alias = discon_alias;
887 spin_lock(&alias->d_lock);
888 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
889 __dget_dlock(alias);
890 spin_unlock(&alias->d_lock);
891 return alias;
892 }
893 spin_unlock(&alias->d_lock);
894 goto again;
895 }
896 return NULL;
897 }
898
899 struct dentry *d_find_alias(struct inode *inode)
900 {
901 struct dentry *de = NULL;
902
903 if (!hlist_empty(&inode->i_dentry)) {
904 spin_lock(&inode->i_lock);
905 de = __d_find_alias(inode);
906 spin_unlock(&inode->i_lock);
907 }
908 return de;
909 }
910 EXPORT_SYMBOL(d_find_alias);
911
912 /*
913 * Try to kill dentries associated with this inode.
914 * WARNING: you must own a reference to inode.
915 */
916 void d_prune_aliases(struct inode *inode)
917 {
918 struct dentry *dentry;
919 restart:
920 spin_lock(&inode->i_lock);
921 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
922 spin_lock(&dentry->d_lock);
923 if (!dentry->d_lockref.count) {
924 struct dentry *parent = lock_parent(dentry);
925 if (likely(!dentry->d_lockref.count)) {
926 __dentry_kill(dentry);
927 dput(parent);
928 goto restart;
929 }
930 if (parent)
931 spin_unlock(&parent->d_lock);
932 }
933 spin_unlock(&dentry->d_lock);
934 }
935 spin_unlock(&inode->i_lock);
936 }
937 EXPORT_SYMBOL(d_prune_aliases);
938
939 static void shrink_dentry_list(struct list_head *list)
940 {
941 struct dentry *dentry, *parent;
942
943 while (!list_empty(list)) {
944 struct inode *inode;
945 dentry = list_entry(list->prev, struct dentry, d_lru);
946 spin_lock(&dentry->d_lock);
947 parent = lock_parent(dentry);
948
949 /*
950 * The dispose list is isolated and dentries are not accounted
951 * to the LRU here, so we can simply remove it from the list
952 * here regardless of whether it is referenced or not.
953 */
954 d_shrink_del(dentry);
955
956 /*
957 * We found an inuse dentry which was not removed from
958 * the LRU because of laziness during lookup. Do not free it.
959 */
960 if (dentry->d_lockref.count > 0) {
961 spin_unlock(&dentry->d_lock);
962 if (parent)
963 spin_unlock(&parent->d_lock);
964 continue;
965 }
966
967
968 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
969 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
970 spin_unlock(&dentry->d_lock);
971 if (parent)
972 spin_unlock(&parent->d_lock);
973 if (can_free)
974 dentry_free(dentry);
975 continue;
976 }
977
978 inode = dentry->d_inode;
979 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
980 d_shrink_add(dentry, list);
981 spin_unlock(&dentry->d_lock);
982 if (parent)
983 spin_unlock(&parent->d_lock);
984 continue;
985 }
986
987 __dentry_kill(dentry);
988
989 /*
990 * We need to prune ancestors too. This is necessary to prevent
991 * quadratic behavior of shrink_dcache_parent(), but is also
992 * expected to be beneficial in reducing dentry cache
993 * fragmentation.
994 */
995 dentry = parent;
996 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
997 parent = lock_parent(dentry);
998 if (dentry->d_lockref.count != 1) {
999 dentry->d_lockref.count--;
1000 spin_unlock(&dentry->d_lock);
1001 if (parent)
1002 spin_unlock(&parent->d_lock);
1003 break;
1004 }
1005 inode = dentry->d_inode; /* can't be NULL */
1006 if (unlikely(!spin_trylock(&inode->i_lock))) {
1007 spin_unlock(&dentry->d_lock);
1008 if (parent)
1009 spin_unlock(&parent->d_lock);
1010 cpu_relax();
1011 continue;
1012 }
1013 __dentry_kill(dentry);
1014 dentry = parent;
1015 }
1016 }
1017 }
1018
1019 static enum lru_status dentry_lru_isolate(struct list_head *item,
1020 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1021 {
1022 struct list_head *freeable = arg;
1023 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1024
1025
1026 /*
1027 * we are inverting the lru lock/dentry->d_lock here,
1028 * so use a trylock. If we fail to get the lock, just skip
1029 * it
1030 */
1031 if (!spin_trylock(&dentry->d_lock))
1032 return LRU_SKIP;
1033
1034 /*
1035 * Referenced dentries are still in use. If they have active
1036 * counts, just remove them from the LRU. Otherwise give them
1037 * another pass through the LRU.
1038 */
1039 if (dentry->d_lockref.count) {
1040 d_lru_isolate(lru, dentry);
1041 spin_unlock(&dentry->d_lock);
1042 return LRU_REMOVED;
1043 }
1044
1045 if (dentry->d_flags & DCACHE_REFERENCED) {
1046 dentry->d_flags &= ~DCACHE_REFERENCED;
1047 spin_unlock(&dentry->d_lock);
1048
1049 /*
1050 * The list move itself will be made by the common LRU code. At
1051 * this point, we've dropped the dentry->d_lock but keep the
1052 * lru lock. This is safe to do, since every list movement is
1053 * protected by the lru lock even if both locks are held.
1054 *
1055 * This is guaranteed by the fact that all LRU management
1056 * functions are intermediated by the LRU API calls like
1057 * list_lru_add and list_lru_del. List movement in this file
1058 * only ever occur through this functions or through callbacks
1059 * like this one, that are called from the LRU API.
1060 *
1061 * The only exceptions to this are functions like
1062 * shrink_dentry_list, and code that first checks for the
1063 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1064 * operating only with stack provided lists after they are
1065 * properly isolated from the main list. It is thus, always a
1066 * local access.
1067 */
1068 return LRU_ROTATE;
1069 }
1070
1071 d_lru_shrink_move(lru, dentry, freeable);
1072 spin_unlock(&dentry->d_lock);
1073
1074 return LRU_REMOVED;
1075 }
1076
1077 /**
1078 * prune_dcache_sb - shrink the dcache
1079 * @sb: superblock
1080 * @sc: shrink control, passed to list_lru_shrink_walk()
1081 *
1082 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1083 * is done when we need more memory and called from the superblock shrinker
1084 * function.
1085 *
1086 * This function may fail to free any resources if all the dentries are in
1087 * use.
1088 */
1089 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1090 {
1091 LIST_HEAD(dispose);
1092 long freed;
1093
1094 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1095 dentry_lru_isolate, &dispose);
1096 shrink_dentry_list(&dispose);
1097 return freed;
1098 }
1099
1100 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1101 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1102 {
1103 struct list_head *freeable = arg;
1104 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1105
1106 /*
1107 * we are inverting the lru lock/dentry->d_lock here,
1108 * so use a trylock. If we fail to get the lock, just skip
1109 * it
1110 */
1111 if (!spin_trylock(&dentry->d_lock))
1112 return LRU_SKIP;
1113
1114 d_lru_shrink_move(lru, dentry, freeable);
1115 spin_unlock(&dentry->d_lock);
1116
1117 return LRU_REMOVED;
1118 }
1119
1120
1121 /**
1122 * shrink_dcache_sb - shrink dcache for a superblock
1123 * @sb: superblock
1124 *
1125 * Shrink the dcache for the specified super block. This is used to free
1126 * the dcache before unmounting a file system.
1127 */
1128 void shrink_dcache_sb(struct super_block *sb)
1129 {
1130 long freed;
1131
1132 do {
1133 LIST_HEAD(dispose);
1134
1135 freed = list_lru_walk(&sb->s_dentry_lru,
1136 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1137
1138 this_cpu_sub(nr_dentry_unused, freed);
1139 shrink_dentry_list(&dispose);
1140 } while (freed > 0);
1141 }
1142 EXPORT_SYMBOL(shrink_dcache_sb);
1143
1144 /**
1145 * enum d_walk_ret - action to talke during tree walk
1146 * @D_WALK_CONTINUE: contrinue walk
1147 * @D_WALK_QUIT: quit walk
1148 * @D_WALK_NORETRY: quit when retry is needed
1149 * @D_WALK_SKIP: skip this dentry and its children
1150 */
1151 enum d_walk_ret {
1152 D_WALK_CONTINUE,
1153 D_WALK_QUIT,
1154 D_WALK_NORETRY,
1155 D_WALK_SKIP,
1156 };
1157
1158 /**
1159 * d_walk - walk the dentry tree
1160 * @parent: start of walk
1161 * @data: data passed to @enter() and @finish()
1162 * @enter: callback when first entering the dentry
1163 * @finish: callback when successfully finished the walk
1164 *
1165 * The @enter() and @finish() callbacks are called with d_lock held.
1166 */
1167 static void d_walk(struct dentry *parent, void *data,
1168 enum d_walk_ret (*enter)(void *, struct dentry *),
1169 void (*finish)(void *))
1170 {
1171 struct dentry *this_parent;
1172 struct list_head *next;
1173 unsigned seq = 0;
1174 enum d_walk_ret ret;
1175 bool retry = true;
1176
1177 again:
1178 read_seqbegin_or_lock(&rename_lock, &seq);
1179 this_parent = parent;
1180 spin_lock(&this_parent->d_lock);
1181
1182 ret = enter(data, this_parent);
1183 switch (ret) {
1184 case D_WALK_CONTINUE:
1185 break;
1186 case D_WALK_QUIT:
1187 case D_WALK_SKIP:
1188 goto out_unlock;
1189 case D_WALK_NORETRY:
1190 retry = false;
1191 break;
1192 }
1193 repeat:
1194 next = this_parent->d_subdirs.next;
1195 resume:
1196 while (next != &this_parent->d_subdirs) {
1197 struct list_head *tmp = next;
1198 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1199 next = tmp->next;
1200
1201 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1202 continue;
1203
1204 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1205
1206 ret = enter(data, dentry);
1207 switch (ret) {
1208 case D_WALK_CONTINUE:
1209 break;
1210 case D_WALK_QUIT:
1211 spin_unlock(&dentry->d_lock);
1212 goto out_unlock;
1213 case D_WALK_NORETRY:
1214 retry = false;
1215 break;
1216 case D_WALK_SKIP:
1217 spin_unlock(&dentry->d_lock);
1218 continue;
1219 }
1220
1221 if (!list_empty(&dentry->d_subdirs)) {
1222 spin_unlock(&this_parent->d_lock);
1223 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1224 this_parent = dentry;
1225 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1226 goto repeat;
1227 }
1228 spin_unlock(&dentry->d_lock);
1229 }
1230 /*
1231 * All done at this level ... ascend and resume the search.
1232 */
1233 rcu_read_lock();
1234 ascend:
1235 if (this_parent != parent) {
1236 struct dentry *child = this_parent;
1237 this_parent = child->d_parent;
1238
1239 spin_unlock(&child->d_lock);
1240 spin_lock(&this_parent->d_lock);
1241
1242 /* might go back up the wrong parent if we have had a rename. */
1243 if (need_seqretry(&rename_lock, seq))
1244 goto rename_retry;
1245 /* go into the first sibling still alive */
1246 do {
1247 next = child->d_child.next;
1248 if (next == &this_parent->d_subdirs)
1249 goto ascend;
1250 child = list_entry(next, struct dentry, d_child);
1251 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1252 rcu_read_unlock();
1253 goto resume;
1254 }
1255 if (need_seqretry(&rename_lock, seq))
1256 goto rename_retry;
1257 rcu_read_unlock();
1258 if (finish)
1259 finish(data);
1260
1261 out_unlock:
1262 spin_unlock(&this_parent->d_lock);
1263 done_seqretry(&rename_lock, seq);
1264 return;
1265
1266 rename_retry:
1267 spin_unlock(&this_parent->d_lock);
1268 rcu_read_unlock();
1269 BUG_ON(seq & 1);
1270 if (!retry)
1271 return;
1272 seq = 1;
1273 goto again;
1274 }
1275
1276 /*
1277 * Search for at least 1 mount point in the dentry's subdirs.
1278 * We descend to the next level whenever the d_subdirs
1279 * list is non-empty and continue searching.
1280 */
1281
1282 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1283 {
1284 int *ret = data;
1285 if (d_mountpoint(dentry)) {
1286 *ret = 1;
1287 return D_WALK_QUIT;
1288 }
1289 return D_WALK_CONTINUE;
1290 }
1291
1292 /**
1293 * have_submounts - check for mounts over a dentry
1294 * @parent: dentry to check.
1295 *
1296 * Return true if the parent or its subdirectories contain
1297 * a mount point
1298 */
1299 int have_submounts(struct dentry *parent)
1300 {
1301 int ret = 0;
1302
1303 d_walk(parent, &ret, check_mount, NULL);
1304
1305 return ret;
1306 }
1307 EXPORT_SYMBOL(have_submounts);
1308
1309 /*
1310 * Called by mount code to set a mountpoint and check if the mountpoint is
1311 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1312 * subtree can become unreachable).
1313 *
1314 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1315 * this reason take rename_lock and d_lock on dentry and ancestors.
1316 */
1317 int d_set_mounted(struct dentry *dentry)
1318 {
1319 struct dentry *p;
1320 int ret = -ENOENT;
1321 write_seqlock(&rename_lock);
1322 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1323 /* Need exclusion wrt. d_invalidate() */
1324 spin_lock(&p->d_lock);
1325 if (unlikely(d_unhashed(p))) {
1326 spin_unlock(&p->d_lock);
1327 goto out;
1328 }
1329 spin_unlock(&p->d_lock);
1330 }
1331 spin_lock(&dentry->d_lock);
1332 if (!d_unlinked(dentry)) {
1333 dentry->d_flags |= DCACHE_MOUNTED;
1334 ret = 0;
1335 }
1336 spin_unlock(&dentry->d_lock);
1337 out:
1338 write_sequnlock(&rename_lock);
1339 return ret;
1340 }
1341
1342 /*
1343 * Search the dentry child list of the specified parent,
1344 * and move any unused dentries to the end of the unused
1345 * list for prune_dcache(). We descend to the next level
1346 * whenever the d_subdirs list is non-empty and continue
1347 * searching.
1348 *
1349 * It returns zero iff there are no unused children,
1350 * otherwise it returns the number of children moved to
1351 * the end of the unused list. This may not be the total
1352 * number of unused children, because select_parent can
1353 * drop the lock and return early due to latency
1354 * constraints.
1355 */
1356
1357 struct select_data {
1358 struct dentry *start;
1359 struct list_head dispose;
1360 int found;
1361 };
1362
1363 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1364 {
1365 struct select_data *data = _data;
1366 enum d_walk_ret ret = D_WALK_CONTINUE;
1367
1368 if (data->start == dentry)
1369 goto out;
1370
1371 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1372 data->found++;
1373 } else {
1374 if (dentry->d_flags & DCACHE_LRU_LIST)
1375 d_lru_del(dentry);
1376 if (!dentry->d_lockref.count) {
1377 d_shrink_add(dentry, &data->dispose);
1378 data->found++;
1379 }
1380 }
1381 /*
1382 * We can return to the caller if we have found some (this
1383 * ensures forward progress). We'll be coming back to find
1384 * the rest.
1385 */
1386 if (!list_empty(&data->dispose))
1387 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1388 out:
1389 return ret;
1390 }
1391
1392 /**
1393 * shrink_dcache_parent - prune dcache
1394 * @parent: parent of entries to prune
1395 *
1396 * Prune the dcache to remove unused children of the parent dentry.
1397 */
1398 void shrink_dcache_parent(struct dentry *parent)
1399 {
1400 for (;;) {
1401 struct select_data data;
1402
1403 INIT_LIST_HEAD(&data.dispose);
1404 data.start = parent;
1405 data.found = 0;
1406
1407 d_walk(parent, &data, select_collect, NULL);
1408 if (!data.found)
1409 break;
1410
1411 shrink_dentry_list(&data.dispose);
1412 cond_resched();
1413 }
1414 }
1415 EXPORT_SYMBOL(shrink_dcache_parent);
1416
1417 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1418 {
1419 /* it has busy descendents; complain about those instead */
1420 if (!list_empty(&dentry->d_subdirs))
1421 return D_WALK_CONTINUE;
1422
1423 /* root with refcount 1 is fine */
1424 if (dentry == _data && dentry->d_lockref.count == 1)
1425 return D_WALK_CONTINUE;
1426
1427 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1428 " still in use (%d) [unmount of %s %s]\n",
1429 dentry,
1430 dentry->d_inode ?
1431 dentry->d_inode->i_ino : 0UL,
1432 dentry,
1433 dentry->d_lockref.count,
1434 dentry->d_sb->s_type->name,
1435 dentry->d_sb->s_id);
1436 WARN_ON(1);
1437 return D_WALK_CONTINUE;
1438 }
1439
1440 static void do_one_tree(struct dentry *dentry)
1441 {
1442 shrink_dcache_parent(dentry);
1443 d_walk(dentry, dentry, umount_check, NULL);
1444 d_drop(dentry);
1445 dput(dentry);
1446 }
1447
1448 /*
1449 * destroy the dentries attached to a superblock on unmounting
1450 */
1451 void shrink_dcache_for_umount(struct super_block *sb)
1452 {
1453 struct dentry *dentry;
1454
1455 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1456
1457 dentry = sb->s_root;
1458 sb->s_root = NULL;
1459 do_one_tree(dentry);
1460
1461 while (!hlist_bl_empty(&sb->s_anon)) {
1462 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1463 do_one_tree(dentry);
1464 }
1465 }
1466
1467 struct detach_data {
1468 struct select_data select;
1469 struct dentry *mountpoint;
1470 };
1471 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1472 {
1473 struct detach_data *data = _data;
1474
1475 if (d_mountpoint(dentry)) {
1476 __dget_dlock(dentry);
1477 data->mountpoint = dentry;
1478 return D_WALK_QUIT;
1479 }
1480
1481 return select_collect(&data->select, dentry);
1482 }
1483
1484 static void check_and_drop(void *_data)
1485 {
1486 struct detach_data *data = _data;
1487
1488 if (!data->mountpoint && !data->select.found)
1489 __d_drop(data->select.start);
1490 }
1491
1492 /**
1493 * d_invalidate - detach submounts, prune dcache, and drop
1494 * @dentry: dentry to invalidate (aka detach, prune and drop)
1495 *
1496 * no dcache lock.
1497 *
1498 * The final d_drop is done as an atomic operation relative to
1499 * rename_lock ensuring there are no races with d_set_mounted. This
1500 * ensures there are no unhashed dentries on the path to a mountpoint.
1501 */
1502 void d_invalidate(struct dentry *dentry)
1503 {
1504 /*
1505 * If it's already been dropped, return OK.
1506 */
1507 spin_lock(&dentry->d_lock);
1508 if (d_unhashed(dentry)) {
1509 spin_unlock(&dentry->d_lock);
1510 return;
1511 }
1512 spin_unlock(&dentry->d_lock);
1513
1514 /* Negative dentries can be dropped without further checks */
1515 if (!dentry->d_inode) {
1516 d_drop(dentry);
1517 return;
1518 }
1519
1520 for (;;) {
1521 struct detach_data data;
1522
1523 data.mountpoint = NULL;
1524 INIT_LIST_HEAD(&data.select.dispose);
1525 data.select.start = dentry;
1526 data.select.found = 0;
1527
1528 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1529
1530 if (data.select.found)
1531 shrink_dentry_list(&data.select.dispose);
1532
1533 if (data.mountpoint) {
1534 detach_mounts(data.mountpoint);
1535 dput(data.mountpoint);
1536 }
1537
1538 if (!data.mountpoint && !data.select.found)
1539 break;
1540
1541 cond_resched();
1542 }
1543 }
1544 EXPORT_SYMBOL(d_invalidate);
1545
1546 /**
1547 * __d_alloc - allocate a dcache entry
1548 * @sb: filesystem it will belong to
1549 * @name: qstr of the name
1550 *
1551 * Allocates a dentry. It returns %NULL if there is insufficient memory
1552 * available. On a success the dentry is returned. The name passed in is
1553 * copied and the copy passed in may be reused after this call.
1554 */
1555
1556 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1557 {
1558 struct dentry *dentry;
1559 char *dname;
1560 int err;
1561
1562 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1563 if (!dentry)
1564 return NULL;
1565
1566 /*
1567 * We guarantee that the inline name is always NUL-terminated.
1568 * This way the memcpy() done by the name switching in rename
1569 * will still always have a NUL at the end, even if we might
1570 * be overwriting an internal NUL character
1571 */
1572 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1573 if (unlikely(!name)) {
1574 static const struct qstr anon = QSTR_INIT("/", 1);
1575 name = &anon;
1576 dname = dentry->d_iname;
1577 } else if (name->len > DNAME_INLINE_LEN-1) {
1578 size_t size = offsetof(struct external_name, name[1]);
1579 struct external_name *p = kmalloc(size + name->len,
1580 GFP_KERNEL_ACCOUNT);
1581 if (!p) {
1582 kmem_cache_free(dentry_cache, dentry);
1583 return NULL;
1584 }
1585 atomic_set(&p->u.count, 1);
1586 dname = p->name;
1587 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1588 kasan_unpoison_shadow(dname,
1589 round_up(name->len + 1, sizeof(unsigned long)));
1590 } else {
1591 dname = dentry->d_iname;
1592 }
1593
1594 dentry->d_name.len = name->len;
1595 dentry->d_name.hash = name->hash;
1596 memcpy(dname, name->name, name->len);
1597 dname[name->len] = 0;
1598
1599 /* Make sure we always see the terminating NUL character */
1600 smp_wmb();
1601 dentry->d_name.name = dname;
1602
1603 dentry->d_lockref.count = 1;
1604 dentry->d_flags = 0;
1605 spin_lock_init(&dentry->d_lock);
1606 seqcount_init(&dentry->d_seq);
1607 dentry->d_inode = NULL;
1608 dentry->d_parent = dentry;
1609 dentry->d_sb = sb;
1610 dentry->d_op = NULL;
1611 dentry->d_fsdata = NULL;
1612 INIT_HLIST_BL_NODE(&dentry->d_hash);
1613 INIT_LIST_HEAD(&dentry->d_lru);
1614 INIT_LIST_HEAD(&dentry->d_subdirs);
1615 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1616 INIT_LIST_HEAD(&dentry->d_child);
1617 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1618
1619 if (dentry->d_op && dentry->d_op->d_init) {
1620 err = dentry->d_op->d_init(dentry);
1621 if (err) {
1622 if (dname_external(dentry))
1623 kfree(external_name(dentry));
1624 kmem_cache_free(dentry_cache, dentry);
1625 return NULL;
1626 }
1627 }
1628
1629 this_cpu_inc(nr_dentry);
1630
1631 return dentry;
1632 }
1633
1634 /**
1635 * d_alloc - allocate a dcache entry
1636 * @parent: parent of entry to allocate
1637 * @name: qstr of the name
1638 *
1639 * Allocates a dentry. It returns %NULL if there is insufficient memory
1640 * available. On a success the dentry is returned. The name passed in is
1641 * copied and the copy passed in may be reused after this call.
1642 */
1643 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1644 {
1645 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1646 if (!dentry)
1647 return NULL;
1648 dentry->d_flags |= DCACHE_RCUACCESS;
1649 spin_lock(&parent->d_lock);
1650 /*
1651 * don't need child lock because it is not subject
1652 * to concurrency here
1653 */
1654 __dget_dlock(parent);
1655 dentry->d_parent = parent;
1656 list_add(&dentry->d_child, &parent->d_subdirs);
1657 spin_unlock(&parent->d_lock);
1658
1659 return dentry;
1660 }
1661 EXPORT_SYMBOL(d_alloc);
1662
1663 struct dentry *d_alloc_cursor(struct dentry * parent)
1664 {
1665 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1666 if (dentry) {
1667 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1668 dentry->d_parent = dget(parent);
1669 }
1670 return dentry;
1671 }
1672
1673 /**
1674 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1675 * @sb: the superblock
1676 * @name: qstr of the name
1677 *
1678 * For a filesystem that just pins its dentries in memory and never
1679 * performs lookups at all, return an unhashed IS_ROOT dentry.
1680 */
1681 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1682 {
1683 return __d_alloc(sb, name);
1684 }
1685 EXPORT_SYMBOL(d_alloc_pseudo);
1686
1687 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1688 {
1689 struct qstr q;
1690
1691 q.name = name;
1692 q.hash_len = hashlen_string(parent, name);
1693 return d_alloc(parent, &q);
1694 }
1695 EXPORT_SYMBOL(d_alloc_name);
1696
1697 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1698 {
1699 WARN_ON_ONCE(dentry->d_op);
1700 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1701 DCACHE_OP_COMPARE |
1702 DCACHE_OP_REVALIDATE |
1703 DCACHE_OP_WEAK_REVALIDATE |
1704 DCACHE_OP_DELETE |
1705 DCACHE_OP_REAL));
1706 dentry->d_op = op;
1707 if (!op)
1708 return;
1709 if (op->d_hash)
1710 dentry->d_flags |= DCACHE_OP_HASH;
1711 if (op->d_compare)
1712 dentry->d_flags |= DCACHE_OP_COMPARE;
1713 if (op->d_revalidate)
1714 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1715 if (op->d_weak_revalidate)
1716 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1717 if (op->d_delete)
1718 dentry->d_flags |= DCACHE_OP_DELETE;
1719 if (op->d_prune)
1720 dentry->d_flags |= DCACHE_OP_PRUNE;
1721 if (op->d_real)
1722 dentry->d_flags |= DCACHE_OP_REAL;
1723
1724 }
1725 EXPORT_SYMBOL(d_set_d_op);
1726
1727
1728 /*
1729 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1730 * @dentry - The dentry to mark
1731 *
1732 * Mark a dentry as falling through to the lower layer (as set with
1733 * d_pin_lower()). This flag may be recorded on the medium.
1734 */
1735 void d_set_fallthru(struct dentry *dentry)
1736 {
1737 spin_lock(&dentry->d_lock);
1738 dentry->d_flags |= DCACHE_FALLTHRU;
1739 spin_unlock(&dentry->d_lock);
1740 }
1741 EXPORT_SYMBOL(d_set_fallthru);
1742
1743 static unsigned d_flags_for_inode(struct inode *inode)
1744 {
1745 unsigned add_flags = DCACHE_REGULAR_TYPE;
1746
1747 if (!inode)
1748 return DCACHE_MISS_TYPE;
1749
1750 if (S_ISDIR(inode->i_mode)) {
1751 add_flags = DCACHE_DIRECTORY_TYPE;
1752 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1753 if (unlikely(!inode->i_op->lookup))
1754 add_flags = DCACHE_AUTODIR_TYPE;
1755 else
1756 inode->i_opflags |= IOP_LOOKUP;
1757 }
1758 goto type_determined;
1759 }
1760
1761 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1762 if (unlikely(inode->i_op->get_link)) {
1763 add_flags = DCACHE_SYMLINK_TYPE;
1764 goto type_determined;
1765 }
1766 inode->i_opflags |= IOP_NOFOLLOW;
1767 }
1768
1769 if (unlikely(!S_ISREG(inode->i_mode)))
1770 add_flags = DCACHE_SPECIAL_TYPE;
1771
1772 type_determined:
1773 if (unlikely(IS_AUTOMOUNT(inode)))
1774 add_flags |= DCACHE_NEED_AUTOMOUNT;
1775 return add_flags;
1776 }
1777
1778 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1779 {
1780 unsigned add_flags = d_flags_for_inode(inode);
1781 WARN_ON(d_in_lookup(dentry));
1782
1783 spin_lock(&dentry->d_lock);
1784 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1785 raw_write_seqcount_begin(&dentry->d_seq);
1786 __d_set_inode_and_type(dentry, inode, add_flags);
1787 raw_write_seqcount_end(&dentry->d_seq);
1788 fsnotify_update_flags(dentry);
1789 spin_unlock(&dentry->d_lock);
1790 }
1791
1792 /**
1793 * d_instantiate - fill in inode information for a dentry
1794 * @entry: dentry to complete
1795 * @inode: inode to attach to this dentry
1796 *
1797 * Fill in inode information in the entry.
1798 *
1799 * This turns negative dentries into productive full members
1800 * of society.
1801 *
1802 * NOTE! This assumes that the inode count has been incremented
1803 * (or otherwise set) by the caller to indicate that it is now
1804 * in use by the dcache.
1805 */
1806
1807 void d_instantiate(struct dentry *entry, struct inode * inode)
1808 {
1809 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1810 if (inode) {
1811 security_d_instantiate(entry, inode);
1812 spin_lock(&inode->i_lock);
1813 __d_instantiate(entry, inode);
1814 spin_unlock(&inode->i_lock);
1815 }
1816 }
1817 EXPORT_SYMBOL(d_instantiate);
1818
1819 /**
1820 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1821 * @entry: dentry to complete
1822 * @inode: inode to attach to this dentry
1823 *
1824 * Fill in inode information in the entry. If a directory alias is found, then
1825 * return an error (and drop inode). Together with d_materialise_unique() this
1826 * guarantees that a directory inode may never have more than one alias.
1827 */
1828 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1829 {
1830 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1831
1832 security_d_instantiate(entry, inode);
1833 spin_lock(&inode->i_lock);
1834 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1835 spin_unlock(&inode->i_lock);
1836 iput(inode);
1837 return -EBUSY;
1838 }
1839 __d_instantiate(entry, inode);
1840 spin_unlock(&inode->i_lock);
1841
1842 return 0;
1843 }
1844 EXPORT_SYMBOL(d_instantiate_no_diralias);
1845
1846 struct dentry *d_make_root(struct inode *root_inode)
1847 {
1848 struct dentry *res = NULL;
1849
1850 if (root_inode) {
1851 res = __d_alloc(root_inode->i_sb, NULL);
1852 if (res)
1853 d_instantiate(res, root_inode);
1854 else
1855 iput(root_inode);
1856 }
1857 return res;
1858 }
1859 EXPORT_SYMBOL(d_make_root);
1860
1861 static struct dentry * __d_find_any_alias(struct inode *inode)
1862 {
1863 struct dentry *alias;
1864
1865 if (hlist_empty(&inode->i_dentry))
1866 return NULL;
1867 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1868 __dget(alias);
1869 return alias;
1870 }
1871
1872 /**
1873 * d_find_any_alias - find any alias for a given inode
1874 * @inode: inode to find an alias for
1875 *
1876 * If any aliases exist for the given inode, take and return a
1877 * reference for one of them. If no aliases exist, return %NULL.
1878 */
1879 struct dentry *d_find_any_alias(struct inode *inode)
1880 {
1881 struct dentry *de;
1882
1883 spin_lock(&inode->i_lock);
1884 de = __d_find_any_alias(inode);
1885 spin_unlock(&inode->i_lock);
1886 return de;
1887 }
1888 EXPORT_SYMBOL(d_find_any_alias);
1889
1890 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1891 {
1892 struct dentry *tmp;
1893 struct dentry *res;
1894 unsigned add_flags;
1895
1896 if (!inode)
1897 return ERR_PTR(-ESTALE);
1898 if (IS_ERR(inode))
1899 return ERR_CAST(inode);
1900
1901 res = d_find_any_alias(inode);
1902 if (res)
1903 goto out_iput;
1904
1905 tmp = __d_alloc(inode->i_sb, NULL);
1906 if (!tmp) {
1907 res = ERR_PTR(-ENOMEM);
1908 goto out_iput;
1909 }
1910
1911 security_d_instantiate(tmp, inode);
1912 spin_lock(&inode->i_lock);
1913 res = __d_find_any_alias(inode);
1914 if (res) {
1915 spin_unlock(&inode->i_lock);
1916 dput(tmp);
1917 goto out_iput;
1918 }
1919
1920 /* attach a disconnected dentry */
1921 add_flags = d_flags_for_inode(inode);
1922
1923 if (disconnected)
1924 add_flags |= DCACHE_DISCONNECTED;
1925
1926 spin_lock(&tmp->d_lock);
1927 __d_set_inode_and_type(tmp, inode, add_flags);
1928 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1929 hlist_bl_lock(&tmp->d_sb->s_anon);
1930 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1931 hlist_bl_unlock(&tmp->d_sb->s_anon);
1932 spin_unlock(&tmp->d_lock);
1933 spin_unlock(&inode->i_lock);
1934
1935 return tmp;
1936
1937 out_iput:
1938 iput(inode);
1939 return res;
1940 }
1941
1942 /**
1943 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1944 * @inode: inode to allocate the dentry for
1945 *
1946 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1947 * similar open by handle operations. The returned dentry may be anonymous,
1948 * or may have a full name (if the inode was already in the cache).
1949 *
1950 * When called on a directory inode, we must ensure that the inode only ever
1951 * has one dentry. If a dentry is found, that is returned instead of
1952 * allocating a new one.
1953 *
1954 * On successful return, the reference to the inode has been transferred
1955 * to the dentry. In case of an error the reference on the inode is released.
1956 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1957 * be passed in and the error will be propagated to the return value,
1958 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1959 */
1960 struct dentry *d_obtain_alias(struct inode *inode)
1961 {
1962 return __d_obtain_alias(inode, 1);
1963 }
1964 EXPORT_SYMBOL(d_obtain_alias);
1965
1966 /**
1967 * d_obtain_root - find or allocate a dentry for a given inode
1968 * @inode: inode to allocate the dentry for
1969 *
1970 * Obtain an IS_ROOT dentry for the root of a filesystem.
1971 *
1972 * We must ensure that directory inodes only ever have one dentry. If a
1973 * dentry is found, that is returned instead of allocating a new one.
1974 *
1975 * On successful return, the reference to the inode has been transferred
1976 * to the dentry. In case of an error the reference on the inode is
1977 * released. A %NULL or IS_ERR inode may be passed in and will be the
1978 * error will be propagate to the return value, with a %NULL @inode
1979 * replaced by ERR_PTR(-ESTALE).
1980 */
1981 struct dentry *d_obtain_root(struct inode *inode)
1982 {
1983 return __d_obtain_alias(inode, 0);
1984 }
1985 EXPORT_SYMBOL(d_obtain_root);
1986
1987 /**
1988 * d_add_ci - lookup or allocate new dentry with case-exact name
1989 * @inode: the inode case-insensitive lookup has found
1990 * @dentry: the negative dentry that was passed to the parent's lookup func
1991 * @name: the case-exact name to be associated with the returned dentry
1992 *
1993 * This is to avoid filling the dcache with case-insensitive names to the
1994 * same inode, only the actual correct case is stored in the dcache for
1995 * case-insensitive filesystems.
1996 *
1997 * For a case-insensitive lookup match and if the the case-exact dentry
1998 * already exists in in the dcache, use it and return it.
1999 *
2000 * If no entry exists with the exact case name, allocate new dentry with
2001 * the exact case, and return the spliced entry.
2002 */
2003 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2004 struct qstr *name)
2005 {
2006 struct dentry *found, *res;
2007
2008 /*
2009 * First check if a dentry matching the name already exists,
2010 * if not go ahead and create it now.
2011 */
2012 found = d_hash_and_lookup(dentry->d_parent, name);
2013 if (found) {
2014 iput(inode);
2015 return found;
2016 }
2017 if (d_in_lookup(dentry)) {
2018 found = d_alloc_parallel(dentry->d_parent, name,
2019 dentry->d_wait);
2020 if (IS_ERR(found) || !d_in_lookup(found)) {
2021 iput(inode);
2022 return found;
2023 }
2024 } else {
2025 found = d_alloc(dentry->d_parent, name);
2026 if (!found) {
2027 iput(inode);
2028 return ERR_PTR(-ENOMEM);
2029 }
2030 }
2031 res = d_splice_alias(inode, found);
2032 if (res) {
2033 dput(found);
2034 return res;
2035 }
2036 return found;
2037 }
2038 EXPORT_SYMBOL(d_add_ci);
2039
2040
2041 static inline bool d_same_name(const struct dentry *dentry,
2042 const struct dentry *parent,
2043 const struct qstr *name)
2044 {
2045 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2046 if (dentry->d_name.len != name->len)
2047 return false;
2048 return dentry_cmp(dentry, name->name, name->len) == 0;
2049 }
2050 return parent->d_op->d_compare(dentry,
2051 dentry->d_name.len, dentry->d_name.name,
2052 name) == 0;
2053 }
2054
2055 /**
2056 * __d_lookup_rcu - search for a dentry (racy, store-free)
2057 * @parent: parent dentry
2058 * @name: qstr of name we wish to find
2059 * @seqp: returns d_seq value at the point where the dentry was found
2060 * Returns: dentry, or NULL
2061 *
2062 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2063 * resolution (store-free path walking) design described in
2064 * Documentation/filesystems/path-lookup.txt.
2065 *
2066 * This is not to be used outside core vfs.
2067 *
2068 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2069 * held, and rcu_read_lock held. The returned dentry must not be stored into
2070 * without taking d_lock and checking d_seq sequence count against @seq
2071 * returned here.
2072 *
2073 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2074 * function.
2075 *
2076 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2077 * the returned dentry, so long as its parent's seqlock is checked after the
2078 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2079 * is formed, giving integrity down the path walk.
2080 *
2081 * NOTE! The caller *has* to check the resulting dentry against the sequence
2082 * number we've returned before using any of the resulting dentry state!
2083 */
2084 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2085 const struct qstr *name,
2086 unsigned *seqp)
2087 {
2088 u64 hashlen = name->hash_len;
2089 const unsigned char *str = name->name;
2090 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2091 struct hlist_bl_node *node;
2092 struct dentry *dentry;
2093
2094 /*
2095 * Note: There is significant duplication with __d_lookup_rcu which is
2096 * required to prevent single threaded performance regressions
2097 * especially on architectures where smp_rmb (in seqcounts) are costly.
2098 * Keep the two functions in sync.
2099 */
2100
2101 /*
2102 * The hash list is protected using RCU.
2103 *
2104 * Carefully use d_seq when comparing a candidate dentry, to avoid
2105 * races with d_move().
2106 *
2107 * It is possible that concurrent renames can mess up our list
2108 * walk here and result in missing our dentry, resulting in the
2109 * false-negative result. d_lookup() protects against concurrent
2110 * renames using rename_lock seqlock.
2111 *
2112 * See Documentation/filesystems/path-lookup.txt for more details.
2113 */
2114 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2115 unsigned seq;
2116
2117 seqretry:
2118 /*
2119 * The dentry sequence count protects us from concurrent
2120 * renames, and thus protects parent and name fields.
2121 *
2122 * The caller must perform a seqcount check in order
2123 * to do anything useful with the returned dentry.
2124 *
2125 * NOTE! We do a "raw" seqcount_begin here. That means that
2126 * we don't wait for the sequence count to stabilize if it
2127 * is in the middle of a sequence change. If we do the slow
2128 * dentry compare, we will do seqretries until it is stable,
2129 * and if we end up with a successful lookup, we actually
2130 * want to exit RCU lookup anyway.
2131 *
2132 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2133 * we are still guaranteed NUL-termination of ->d_name.name.
2134 */
2135 seq = raw_seqcount_begin(&dentry->d_seq);
2136 if (dentry->d_parent != parent)
2137 continue;
2138 if (d_unhashed(dentry))
2139 continue;
2140
2141 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2142 int tlen;
2143 const char *tname;
2144 if (dentry->d_name.hash != hashlen_hash(hashlen))
2145 continue;
2146 tlen = dentry->d_name.len;
2147 tname = dentry->d_name.name;
2148 /* we want a consistent (name,len) pair */
2149 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2150 cpu_relax();
2151 goto seqretry;
2152 }
2153 if (parent->d_op->d_compare(dentry,
2154 tlen, tname, name) != 0)
2155 continue;
2156 } else {
2157 if (dentry->d_name.hash_len != hashlen)
2158 continue;
2159 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2160 continue;
2161 }
2162 *seqp = seq;
2163 return dentry;
2164 }
2165 return NULL;
2166 }
2167
2168 /**
2169 * d_lookup - search for a dentry
2170 * @parent: parent dentry
2171 * @name: qstr of name we wish to find
2172 * Returns: dentry, or NULL
2173 *
2174 * d_lookup searches the children of the parent dentry for the name in
2175 * question. If the dentry is found its reference count is incremented and the
2176 * dentry is returned. The caller must use dput to free the entry when it has
2177 * finished using it. %NULL is returned if the dentry does not exist.
2178 */
2179 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2180 {
2181 struct dentry *dentry;
2182 unsigned seq;
2183
2184 do {
2185 seq = read_seqbegin(&rename_lock);
2186 dentry = __d_lookup(parent, name);
2187 if (dentry)
2188 break;
2189 } while (read_seqretry(&rename_lock, seq));
2190 return dentry;
2191 }
2192 EXPORT_SYMBOL(d_lookup);
2193
2194 /**
2195 * __d_lookup - search for a dentry (racy)
2196 * @parent: parent dentry
2197 * @name: qstr of name we wish to find
2198 * Returns: dentry, or NULL
2199 *
2200 * __d_lookup is like d_lookup, however it may (rarely) return a
2201 * false-negative result due to unrelated rename activity.
2202 *
2203 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2204 * however it must be used carefully, eg. with a following d_lookup in
2205 * the case of failure.
2206 *
2207 * __d_lookup callers must be commented.
2208 */
2209 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2210 {
2211 unsigned int hash = name->hash;
2212 struct hlist_bl_head *b = d_hash(hash);
2213 struct hlist_bl_node *node;
2214 struct dentry *found = NULL;
2215 struct dentry *dentry;
2216
2217 /*
2218 * Note: There is significant duplication with __d_lookup_rcu which is
2219 * required to prevent single threaded performance regressions
2220 * especially on architectures where smp_rmb (in seqcounts) are costly.
2221 * Keep the two functions in sync.
2222 */
2223
2224 /*
2225 * The hash list is protected using RCU.
2226 *
2227 * Take d_lock when comparing a candidate dentry, to avoid races
2228 * with d_move().
2229 *
2230 * It is possible that concurrent renames can mess up our list
2231 * walk here and result in missing our dentry, resulting in the
2232 * false-negative result. d_lookup() protects against concurrent
2233 * renames using rename_lock seqlock.
2234 *
2235 * See Documentation/filesystems/path-lookup.txt for more details.
2236 */
2237 rcu_read_lock();
2238
2239 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2240
2241 if (dentry->d_name.hash != hash)
2242 continue;
2243
2244 spin_lock(&dentry->d_lock);
2245 if (dentry->d_parent != parent)
2246 goto next;
2247 if (d_unhashed(dentry))
2248 goto next;
2249
2250 if (!d_same_name(dentry, parent, name))
2251 goto next;
2252
2253 dentry->d_lockref.count++;
2254 found = dentry;
2255 spin_unlock(&dentry->d_lock);
2256 break;
2257 next:
2258 spin_unlock(&dentry->d_lock);
2259 }
2260 rcu_read_unlock();
2261
2262 return found;
2263 }
2264
2265 /**
2266 * d_hash_and_lookup - hash the qstr then search for a dentry
2267 * @dir: Directory to search in
2268 * @name: qstr of name we wish to find
2269 *
2270 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2271 */
2272 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2273 {
2274 /*
2275 * Check for a fs-specific hash function. Note that we must
2276 * calculate the standard hash first, as the d_op->d_hash()
2277 * routine may choose to leave the hash value unchanged.
2278 */
2279 name->hash = full_name_hash(dir, name->name, name->len);
2280 if (dir->d_flags & DCACHE_OP_HASH) {
2281 int err = dir->d_op->d_hash(dir, name);
2282 if (unlikely(err < 0))
2283 return ERR_PTR(err);
2284 }
2285 return d_lookup(dir, name);
2286 }
2287 EXPORT_SYMBOL(d_hash_and_lookup);
2288
2289 /*
2290 * When a file is deleted, we have two options:
2291 * - turn this dentry into a negative dentry
2292 * - unhash this dentry and free it.
2293 *
2294 * Usually, we want to just turn this into
2295 * a negative dentry, but if anybody else is
2296 * currently using the dentry or the inode
2297 * we can't do that and we fall back on removing
2298 * it from the hash queues and waiting for
2299 * it to be deleted later when it has no users
2300 */
2301
2302 /**
2303 * d_delete - delete a dentry
2304 * @dentry: The dentry to delete
2305 *
2306 * Turn the dentry into a negative dentry if possible, otherwise
2307 * remove it from the hash queues so it can be deleted later
2308 */
2309
2310 void d_delete(struct dentry * dentry)
2311 {
2312 struct inode *inode;
2313 int isdir = 0;
2314 /*
2315 * Are we the only user?
2316 */
2317 again:
2318 spin_lock(&dentry->d_lock);
2319 inode = dentry->d_inode;
2320 isdir = S_ISDIR(inode->i_mode);
2321 if (dentry->d_lockref.count == 1) {
2322 if (!spin_trylock(&inode->i_lock)) {
2323 spin_unlock(&dentry->d_lock);
2324 cpu_relax();
2325 goto again;
2326 }
2327 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2328 dentry_unlink_inode(dentry);
2329 fsnotify_nameremove(dentry, isdir);
2330 return;
2331 }
2332
2333 if (!d_unhashed(dentry))
2334 __d_drop(dentry);
2335
2336 spin_unlock(&dentry->d_lock);
2337
2338 fsnotify_nameremove(dentry, isdir);
2339 }
2340 EXPORT_SYMBOL(d_delete);
2341
2342 static void __d_rehash(struct dentry *entry)
2343 {
2344 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2345 BUG_ON(!d_unhashed(entry));
2346 hlist_bl_lock(b);
2347 hlist_bl_add_head_rcu(&entry->d_hash, b);
2348 hlist_bl_unlock(b);
2349 }
2350
2351 /**
2352 * d_rehash - add an entry back to the hash
2353 * @entry: dentry to add to the hash
2354 *
2355 * Adds a dentry to the hash according to its name.
2356 */
2357
2358 void d_rehash(struct dentry * entry)
2359 {
2360 spin_lock(&entry->d_lock);
2361 __d_rehash(entry);
2362 spin_unlock(&entry->d_lock);
2363 }
2364 EXPORT_SYMBOL(d_rehash);
2365
2366 static inline unsigned start_dir_add(struct inode *dir)
2367 {
2368
2369 for (;;) {
2370 unsigned n = dir->i_dir_seq;
2371 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2372 return n;
2373 cpu_relax();
2374 }
2375 }
2376
2377 static inline void end_dir_add(struct inode *dir, unsigned n)
2378 {
2379 smp_store_release(&dir->i_dir_seq, n + 2);
2380 }
2381
2382 static void d_wait_lookup(struct dentry *dentry)
2383 {
2384 if (d_in_lookup(dentry)) {
2385 DECLARE_WAITQUEUE(wait, current);
2386 add_wait_queue(dentry->d_wait, &wait);
2387 do {
2388 set_current_state(TASK_UNINTERRUPTIBLE);
2389 spin_unlock(&dentry->d_lock);
2390 schedule();
2391 spin_lock(&dentry->d_lock);
2392 } while (d_in_lookup(dentry));
2393 }
2394 }
2395
2396 struct dentry *d_alloc_parallel(struct dentry *parent,
2397 const struct qstr *name,
2398 wait_queue_head_t *wq)
2399 {
2400 unsigned int hash = name->hash;
2401 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2402 struct hlist_bl_node *node;
2403 struct dentry *new = d_alloc(parent, name);
2404 struct dentry *dentry;
2405 unsigned seq, r_seq, d_seq;
2406
2407 if (unlikely(!new))
2408 return ERR_PTR(-ENOMEM);
2409
2410 retry:
2411 rcu_read_lock();
2412 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2413 r_seq = read_seqbegin(&rename_lock);
2414 dentry = __d_lookup_rcu(parent, name, &d_seq);
2415 if (unlikely(dentry)) {
2416 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2417 rcu_read_unlock();
2418 goto retry;
2419 }
2420 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2421 rcu_read_unlock();
2422 dput(dentry);
2423 goto retry;
2424 }
2425 rcu_read_unlock();
2426 dput(new);
2427 return dentry;
2428 }
2429 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2430 rcu_read_unlock();
2431 goto retry;
2432 }
2433 hlist_bl_lock(b);
2434 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2435 hlist_bl_unlock(b);
2436 rcu_read_unlock();
2437 goto retry;
2438 }
2439 /*
2440 * No changes for the parent since the beginning of d_lookup().
2441 * Since all removals from the chain happen with hlist_bl_lock(),
2442 * any potential in-lookup matches are going to stay here until
2443 * we unlock the chain. All fields are stable in everything
2444 * we encounter.
2445 */
2446 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2447 if (dentry->d_name.hash != hash)
2448 continue;
2449 if (dentry->d_parent != parent)
2450 continue;
2451 if (!d_same_name(dentry, parent, name))
2452 continue;
2453 hlist_bl_unlock(b);
2454 /* now we can try to grab a reference */
2455 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2456 rcu_read_unlock();
2457 goto retry;
2458 }
2459
2460 rcu_read_unlock();
2461 /*
2462 * somebody is likely to be still doing lookup for it;
2463 * wait for them to finish
2464 */
2465 spin_lock(&dentry->d_lock);
2466 d_wait_lookup(dentry);
2467 /*
2468 * it's not in-lookup anymore; in principle we should repeat
2469 * everything from dcache lookup, but it's likely to be what
2470 * d_lookup() would've found anyway. If it is, just return it;
2471 * otherwise we really have to repeat the whole thing.
2472 */
2473 if (unlikely(dentry->d_name.hash != hash))
2474 goto mismatch;
2475 if (unlikely(dentry->d_parent != parent))
2476 goto mismatch;
2477 if (unlikely(d_unhashed(dentry)))
2478 goto mismatch;
2479 if (unlikely(!d_same_name(dentry, parent, name)))
2480 goto mismatch;
2481 /* OK, it *is* a hashed match; return it */
2482 spin_unlock(&dentry->d_lock);
2483 dput(new);
2484 return dentry;
2485 }
2486 rcu_read_unlock();
2487 /* we can't take ->d_lock here; it's OK, though. */
2488 new->d_flags |= DCACHE_PAR_LOOKUP;
2489 new->d_wait = wq;
2490 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2491 hlist_bl_unlock(b);
2492 return new;
2493 mismatch:
2494 spin_unlock(&dentry->d_lock);
2495 dput(dentry);
2496 goto retry;
2497 }
2498 EXPORT_SYMBOL(d_alloc_parallel);
2499
2500 void __d_lookup_done(struct dentry *dentry)
2501 {
2502 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2503 dentry->d_name.hash);
2504 hlist_bl_lock(b);
2505 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2506 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2507 wake_up_all(dentry->d_wait);
2508 dentry->d_wait = NULL;
2509 hlist_bl_unlock(b);
2510 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2511 INIT_LIST_HEAD(&dentry->d_lru);
2512 }
2513 EXPORT_SYMBOL(__d_lookup_done);
2514
2515 /* inode->i_lock held if inode is non-NULL */
2516
2517 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2518 {
2519 struct inode *dir = NULL;
2520 unsigned n;
2521 spin_lock(&dentry->d_lock);
2522 if (unlikely(d_in_lookup(dentry))) {
2523 dir = dentry->d_parent->d_inode;
2524 n = start_dir_add(dir);
2525 __d_lookup_done(dentry);
2526 }
2527 if (inode) {
2528 unsigned add_flags = d_flags_for_inode(inode);
2529 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2530 raw_write_seqcount_begin(&dentry->d_seq);
2531 __d_set_inode_and_type(dentry, inode, add_flags);
2532 raw_write_seqcount_end(&dentry->d_seq);
2533 fsnotify_update_flags(dentry);
2534 }
2535 __d_rehash(dentry);
2536 if (dir)
2537 end_dir_add(dir, n);
2538 spin_unlock(&dentry->d_lock);
2539 if (inode)
2540 spin_unlock(&inode->i_lock);
2541 }
2542
2543 /**
2544 * d_add - add dentry to hash queues
2545 * @entry: dentry to add
2546 * @inode: The inode to attach to this dentry
2547 *
2548 * This adds the entry to the hash queues and initializes @inode.
2549 * The entry was actually filled in earlier during d_alloc().
2550 */
2551
2552 void d_add(struct dentry *entry, struct inode *inode)
2553 {
2554 if (inode) {
2555 security_d_instantiate(entry, inode);
2556 spin_lock(&inode->i_lock);
2557 }
2558 __d_add(entry, inode);
2559 }
2560 EXPORT_SYMBOL(d_add);
2561
2562 /**
2563 * d_exact_alias - find and hash an exact unhashed alias
2564 * @entry: dentry to add
2565 * @inode: The inode to go with this dentry
2566 *
2567 * If an unhashed dentry with the same name/parent and desired
2568 * inode already exists, hash and return it. Otherwise, return
2569 * NULL.
2570 *
2571 * Parent directory should be locked.
2572 */
2573 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2574 {
2575 struct dentry *alias;
2576 unsigned int hash = entry->d_name.hash;
2577
2578 spin_lock(&inode->i_lock);
2579 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2580 /*
2581 * Don't need alias->d_lock here, because aliases with
2582 * d_parent == entry->d_parent are not subject to name or
2583 * parent changes, because the parent inode i_mutex is held.
2584 */
2585 if (alias->d_name.hash != hash)
2586 continue;
2587 if (alias->d_parent != entry->d_parent)
2588 continue;
2589 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2590 continue;
2591 spin_lock(&alias->d_lock);
2592 if (!d_unhashed(alias)) {
2593 spin_unlock(&alias->d_lock);
2594 alias = NULL;
2595 } else {
2596 __dget_dlock(alias);
2597 __d_rehash(alias);
2598 spin_unlock(&alias->d_lock);
2599 }
2600 spin_unlock(&inode->i_lock);
2601 return alias;
2602 }
2603 spin_unlock(&inode->i_lock);
2604 return NULL;
2605 }
2606 EXPORT_SYMBOL(d_exact_alias);
2607
2608 /**
2609 * dentry_update_name_case - update case insensitive dentry with a new name
2610 * @dentry: dentry to be updated
2611 * @name: new name
2612 *
2613 * Update a case insensitive dentry with new case of name.
2614 *
2615 * dentry must have been returned by d_lookup with name @name. Old and new
2616 * name lengths must match (ie. no d_compare which allows mismatched name
2617 * lengths).
2618 *
2619 * Parent inode i_mutex must be held over d_lookup and into this call (to
2620 * keep renames and concurrent inserts, and readdir(2) away).
2621 */
2622 void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2623 {
2624 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2625 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2626
2627 spin_lock(&dentry->d_lock);
2628 write_seqcount_begin(&dentry->d_seq);
2629 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2630 write_seqcount_end(&dentry->d_seq);
2631 spin_unlock(&dentry->d_lock);
2632 }
2633 EXPORT_SYMBOL(dentry_update_name_case);
2634
2635 static void swap_names(struct dentry *dentry, struct dentry *target)
2636 {
2637 if (unlikely(dname_external(target))) {
2638 if (unlikely(dname_external(dentry))) {
2639 /*
2640 * Both external: swap the pointers
2641 */
2642 swap(target->d_name.name, dentry->d_name.name);
2643 } else {
2644 /*
2645 * dentry:internal, target:external. Steal target's
2646 * storage and make target internal.
2647 */
2648 memcpy(target->d_iname, dentry->d_name.name,
2649 dentry->d_name.len + 1);
2650 dentry->d_name.name = target->d_name.name;
2651 target->d_name.name = target->d_iname;
2652 }
2653 } else {
2654 if (unlikely(dname_external(dentry))) {
2655 /*
2656 * dentry:external, target:internal. Give dentry's
2657 * storage to target and make dentry internal
2658 */
2659 memcpy(dentry->d_iname, target->d_name.name,
2660 target->d_name.len + 1);
2661 target->d_name.name = dentry->d_name.name;
2662 dentry->d_name.name = dentry->d_iname;
2663 } else {
2664 /*
2665 * Both are internal.
2666 */
2667 unsigned int i;
2668 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2669 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2670 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2671 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2672 swap(((long *) &dentry->d_iname)[i],
2673 ((long *) &target->d_iname)[i]);
2674 }
2675 }
2676 }
2677 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2678 }
2679
2680 static void copy_name(struct dentry *dentry, struct dentry *target)
2681 {
2682 struct external_name *old_name = NULL;
2683 if (unlikely(dname_external(dentry)))
2684 old_name = external_name(dentry);
2685 if (unlikely(dname_external(target))) {
2686 atomic_inc(&external_name(target)->u.count);
2687 dentry->d_name = target->d_name;
2688 } else {
2689 memcpy(dentry->d_iname, target->d_name.name,
2690 target->d_name.len + 1);
2691 dentry->d_name.name = dentry->d_iname;
2692 dentry->d_name.hash_len = target->d_name.hash_len;
2693 }
2694 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2695 kfree_rcu(old_name, u.head);
2696 }
2697
2698 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2699 {
2700 /*
2701 * XXXX: do we really need to take target->d_lock?
2702 */
2703 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2704 spin_lock(&target->d_parent->d_lock);
2705 else {
2706 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2707 spin_lock(&dentry->d_parent->d_lock);
2708 spin_lock_nested(&target->d_parent->d_lock,
2709 DENTRY_D_LOCK_NESTED);
2710 } else {
2711 spin_lock(&target->d_parent->d_lock);
2712 spin_lock_nested(&dentry->d_parent->d_lock,
2713 DENTRY_D_LOCK_NESTED);
2714 }
2715 }
2716 if (target < dentry) {
2717 spin_lock_nested(&target->d_lock, 2);
2718 spin_lock_nested(&dentry->d_lock, 3);
2719 } else {
2720 spin_lock_nested(&dentry->d_lock, 2);
2721 spin_lock_nested(&target->d_lock, 3);
2722 }
2723 }
2724
2725 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2726 {
2727 if (target->d_parent != dentry->d_parent)
2728 spin_unlock(&dentry->d_parent->d_lock);
2729 if (target->d_parent != target)
2730 spin_unlock(&target->d_parent->d_lock);
2731 spin_unlock(&target->d_lock);
2732 spin_unlock(&dentry->d_lock);
2733 }
2734
2735 /*
2736 * When switching names, the actual string doesn't strictly have to
2737 * be preserved in the target - because we're dropping the target
2738 * anyway. As such, we can just do a simple memcpy() to copy over
2739 * the new name before we switch, unless we are going to rehash
2740 * it. Note that if we *do* unhash the target, we are not allowed
2741 * to rehash it without giving it a new name/hash key - whether
2742 * we swap or overwrite the names here, resulting name won't match
2743 * the reality in filesystem; it's only there for d_path() purposes.
2744 * Note that all of this is happening under rename_lock, so the
2745 * any hash lookup seeing it in the middle of manipulations will
2746 * be discarded anyway. So we do not care what happens to the hash
2747 * key in that case.
2748 */
2749 /*
2750 * __d_move - move a dentry
2751 * @dentry: entry to move
2752 * @target: new dentry
2753 * @exchange: exchange the two dentries
2754 *
2755 * Update the dcache to reflect the move of a file name. Negative
2756 * dcache entries should not be moved in this way. Caller must hold
2757 * rename_lock, the i_mutex of the source and target directories,
2758 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2759 */
2760 static void __d_move(struct dentry *dentry, struct dentry *target,
2761 bool exchange)
2762 {
2763 struct inode *dir = NULL;
2764 unsigned n;
2765 if (!dentry->d_inode)
2766 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2767
2768 BUG_ON(d_ancestor(dentry, target));
2769 BUG_ON(d_ancestor(target, dentry));
2770
2771 dentry_lock_for_move(dentry, target);
2772 if (unlikely(d_in_lookup(target))) {
2773 dir = target->d_parent->d_inode;
2774 n = start_dir_add(dir);
2775 __d_lookup_done(target);
2776 }
2777
2778 write_seqcount_begin(&dentry->d_seq);
2779 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2780
2781 /* unhash both */
2782 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2783 __d_drop(dentry);
2784 __d_drop(target);
2785
2786 /* Switch the names.. */
2787 if (exchange)
2788 swap_names(dentry, target);
2789 else
2790 copy_name(dentry, target);
2791
2792 /* rehash in new place(s) */
2793 __d_rehash(dentry);
2794 if (exchange)
2795 __d_rehash(target);
2796
2797 /* ... and switch them in the tree */
2798 if (IS_ROOT(dentry)) {
2799 /* splicing a tree */
2800 dentry->d_flags |= DCACHE_RCUACCESS;
2801 dentry->d_parent = target->d_parent;
2802 target->d_parent = target;
2803 list_del_init(&target->d_child);
2804 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2805 } else {
2806 /* swapping two dentries */
2807 swap(dentry->d_parent, target->d_parent);
2808 list_move(&target->d_child, &target->d_parent->d_subdirs);
2809 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2810 if (exchange)
2811 fsnotify_update_flags(target);
2812 fsnotify_update_flags(dentry);
2813 }
2814
2815 write_seqcount_end(&target->d_seq);
2816 write_seqcount_end(&dentry->d_seq);
2817
2818 if (dir)
2819 end_dir_add(dir, n);
2820 dentry_unlock_for_move(dentry, target);
2821 }
2822
2823 /*
2824 * d_move - move a dentry
2825 * @dentry: entry to move
2826 * @target: new dentry
2827 *
2828 * Update the dcache to reflect the move of a file name. Negative
2829 * dcache entries should not be moved in this way. See the locking
2830 * requirements for __d_move.
2831 */
2832 void d_move(struct dentry *dentry, struct dentry *target)
2833 {
2834 write_seqlock(&rename_lock);
2835 __d_move(dentry, target, false);
2836 write_sequnlock(&rename_lock);
2837 }
2838 EXPORT_SYMBOL(d_move);
2839
2840 /*
2841 * d_exchange - exchange two dentries
2842 * @dentry1: first dentry
2843 * @dentry2: second dentry
2844 */
2845 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2846 {
2847 write_seqlock(&rename_lock);
2848
2849 WARN_ON(!dentry1->d_inode);
2850 WARN_ON(!dentry2->d_inode);
2851 WARN_ON(IS_ROOT(dentry1));
2852 WARN_ON(IS_ROOT(dentry2));
2853
2854 __d_move(dentry1, dentry2, true);
2855
2856 write_sequnlock(&rename_lock);
2857 }
2858
2859 /**
2860 * d_ancestor - search for an ancestor
2861 * @p1: ancestor dentry
2862 * @p2: child dentry
2863 *
2864 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2865 * an ancestor of p2, else NULL.
2866 */
2867 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2868 {
2869 struct dentry *p;
2870
2871 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2872 if (p->d_parent == p1)
2873 return p;
2874 }
2875 return NULL;
2876 }
2877
2878 /*
2879 * This helper attempts to cope with remotely renamed directories
2880 *
2881 * It assumes that the caller is already holding
2882 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2883 *
2884 * Note: If ever the locking in lock_rename() changes, then please
2885 * remember to update this too...
2886 */
2887 static int __d_unalias(struct inode *inode,
2888 struct dentry *dentry, struct dentry *alias)
2889 {
2890 struct mutex *m1 = NULL;
2891 struct rw_semaphore *m2 = NULL;
2892 int ret = -ESTALE;
2893
2894 /* If alias and dentry share a parent, then no extra locks required */
2895 if (alias->d_parent == dentry->d_parent)
2896 goto out_unalias;
2897
2898 /* See lock_rename() */
2899 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2900 goto out_err;
2901 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2902 if (!inode_trylock_shared(alias->d_parent->d_inode))
2903 goto out_err;
2904 m2 = &alias->d_parent->d_inode->i_rwsem;
2905 out_unalias:
2906 __d_move(alias, dentry, false);
2907 ret = 0;
2908 out_err:
2909 if (m2)
2910 up_read(m2);
2911 if (m1)
2912 mutex_unlock(m1);
2913 return ret;
2914 }
2915
2916 /**
2917 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2918 * @inode: the inode which may have a disconnected dentry
2919 * @dentry: a negative dentry which we want to point to the inode.
2920 *
2921 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2922 * place of the given dentry and return it, else simply d_add the inode
2923 * to the dentry and return NULL.
2924 *
2925 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2926 * we should error out: directories can't have multiple aliases.
2927 *
2928 * This is needed in the lookup routine of any filesystem that is exportable
2929 * (via knfsd) so that we can build dcache paths to directories effectively.
2930 *
2931 * If a dentry was found and moved, then it is returned. Otherwise NULL
2932 * is returned. This matches the expected return value of ->lookup.
2933 *
2934 * Cluster filesystems may call this function with a negative, hashed dentry.
2935 * In that case, we know that the inode will be a regular file, and also this
2936 * will only occur during atomic_open. So we need to check for the dentry
2937 * being already hashed only in the final case.
2938 */
2939 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2940 {
2941 if (IS_ERR(inode))
2942 return ERR_CAST(inode);
2943
2944 BUG_ON(!d_unhashed(dentry));
2945
2946 if (!inode)
2947 goto out;
2948
2949 security_d_instantiate(dentry, inode);
2950 spin_lock(&inode->i_lock);
2951 if (S_ISDIR(inode->i_mode)) {
2952 struct dentry *new = __d_find_any_alias(inode);
2953 if (unlikely(new)) {
2954 /* The reference to new ensures it remains an alias */
2955 spin_unlock(&inode->i_lock);
2956 write_seqlock(&rename_lock);
2957 if (unlikely(d_ancestor(new, dentry))) {
2958 write_sequnlock(&rename_lock);
2959 dput(new);
2960 new = ERR_PTR(-ELOOP);
2961 pr_warn_ratelimited(
2962 "VFS: Lookup of '%s' in %s %s"
2963 " would have caused loop\n",
2964 dentry->d_name.name,
2965 inode->i_sb->s_type->name,
2966 inode->i_sb->s_id);
2967 } else if (!IS_ROOT(new)) {
2968 int err = __d_unalias(inode, dentry, new);
2969 write_sequnlock(&rename_lock);
2970 if (err) {
2971 dput(new);
2972 new = ERR_PTR(err);
2973 }
2974 } else {
2975 __d_move(new, dentry, false);
2976 write_sequnlock(&rename_lock);
2977 }
2978 iput(inode);
2979 return new;
2980 }
2981 }
2982 out:
2983 __d_add(dentry, inode);
2984 return NULL;
2985 }
2986 EXPORT_SYMBOL(d_splice_alias);
2987
2988 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2989 {
2990 *buflen -= namelen;
2991 if (*buflen < 0)
2992 return -ENAMETOOLONG;
2993 *buffer -= namelen;
2994 memcpy(*buffer, str, namelen);
2995 return 0;
2996 }
2997
2998 /**
2999 * prepend_name - prepend a pathname in front of current buffer pointer
3000 * @buffer: buffer pointer
3001 * @buflen: allocated length of the buffer
3002 * @name: name string and length qstr structure
3003 *
3004 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3005 * make sure that either the old or the new name pointer and length are
3006 * fetched. However, there may be mismatch between length and pointer.
3007 * The length cannot be trusted, we need to copy it byte-by-byte until
3008 * the length is reached or a null byte is found. It also prepends "/" at
3009 * the beginning of the name. The sequence number check at the caller will
3010 * retry it again when a d_move() does happen. So any garbage in the buffer
3011 * due to mismatched pointer and length will be discarded.
3012 *
3013 * Data dependency barrier is needed to make sure that we see that terminating
3014 * NUL. Alpha strikes again, film at 11...
3015 */
3016 static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3017 {
3018 const char *dname = ACCESS_ONCE(name->name);
3019 u32 dlen = ACCESS_ONCE(name->len);
3020 char *p;
3021
3022 smp_read_barrier_depends();
3023
3024 *buflen -= dlen + 1;
3025 if (*buflen < 0)
3026 return -ENAMETOOLONG;
3027 p = *buffer -= dlen + 1;
3028 *p++ = '/';
3029 while (dlen--) {
3030 char c = *dname++;
3031 if (!c)
3032 break;
3033 *p++ = c;
3034 }
3035 return 0;
3036 }
3037
3038 /**
3039 * prepend_path - Prepend path string to a buffer
3040 * @path: the dentry/vfsmount to report
3041 * @root: root vfsmnt/dentry
3042 * @buffer: pointer to the end of the buffer
3043 * @buflen: pointer to buffer length
3044 *
3045 * The function will first try to write out the pathname without taking any
3046 * lock other than the RCU read lock to make sure that dentries won't go away.
3047 * It only checks the sequence number of the global rename_lock as any change
3048 * in the dentry's d_seq will be preceded by changes in the rename_lock
3049 * sequence number. If the sequence number had been changed, it will restart
3050 * the whole pathname back-tracing sequence again by taking the rename_lock.
3051 * In this case, there is no need to take the RCU read lock as the recursive
3052 * parent pointer references will keep the dentry chain alive as long as no
3053 * rename operation is performed.
3054 */
3055 static int prepend_path(const struct path *path,
3056 const struct path *root,
3057 char **buffer, int *buflen)
3058 {
3059 struct dentry *dentry;
3060 struct vfsmount *vfsmnt;
3061 struct mount *mnt;
3062 int error = 0;
3063 unsigned seq, m_seq = 0;
3064 char *bptr;
3065 int blen;
3066
3067 rcu_read_lock();
3068 restart_mnt:
3069 read_seqbegin_or_lock(&mount_lock, &m_seq);
3070 seq = 0;
3071 rcu_read_lock();
3072 restart:
3073 bptr = *buffer;
3074 blen = *buflen;
3075 error = 0;
3076 dentry = path->dentry;
3077 vfsmnt = path->mnt;
3078 mnt = real_mount(vfsmnt);
3079 read_seqbegin_or_lock(&rename_lock, &seq);
3080 while (dentry != root->dentry || vfsmnt != root->mnt) {
3081 struct dentry * parent;
3082
3083 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3084 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3085 /* Escaped? */
3086 if (dentry != vfsmnt->mnt_root) {
3087 bptr = *buffer;
3088 blen = *buflen;
3089 error = 3;
3090 break;
3091 }
3092 /* Global root? */
3093 if (mnt != parent) {
3094 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3095 mnt = parent;
3096 vfsmnt = &mnt->mnt;
3097 continue;
3098 }
3099 if (!error)
3100 error = is_mounted(vfsmnt) ? 1 : 2;
3101 break;
3102 }
3103 parent = dentry->d_parent;
3104 prefetch(parent);
3105 error = prepend_name(&bptr, &blen, &dentry->d_name);
3106 if (error)
3107 break;
3108
3109 dentry = parent;
3110 }
3111 if (!(seq & 1))
3112 rcu_read_unlock();
3113 if (need_seqretry(&rename_lock, seq)) {
3114 seq = 1;
3115 goto restart;
3116 }
3117 done_seqretry(&rename_lock, seq);
3118
3119 if (!(m_seq & 1))
3120 rcu_read_unlock();
3121 if (need_seqretry(&mount_lock, m_seq)) {
3122 m_seq = 1;
3123 goto restart_mnt;
3124 }
3125 done_seqretry(&mount_lock, m_seq);
3126
3127 if (error >= 0 && bptr == *buffer) {
3128 if (--blen < 0)
3129 error = -ENAMETOOLONG;
3130 else
3131 *--bptr = '/';
3132 }
3133 *buffer = bptr;
3134 *buflen = blen;
3135 return error;
3136 }
3137
3138 /**
3139 * __d_path - return the path of a dentry
3140 * @path: the dentry/vfsmount to report
3141 * @root: root vfsmnt/dentry
3142 * @buf: buffer to return value in
3143 * @buflen: buffer length
3144 *
3145 * Convert a dentry into an ASCII path name.
3146 *
3147 * Returns a pointer into the buffer or an error code if the
3148 * path was too long.
3149 *
3150 * "buflen" should be positive.
3151 *
3152 * If the path is not reachable from the supplied root, return %NULL.
3153 */
3154 char *__d_path(const struct path *path,
3155 const struct path *root,
3156 char *buf, int buflen)
3157 {
3158 char *res = buf + buflen;
3159 int error;
3160
3161 prepend(&res, &buflen, "\0", 1);
3162 error = prepend_path(path, root, &res, &buflen);
3163
3164 if (error < 0)
3165 return ERR_PTR(error);
3166 if (error > 0)
3167 return NULL;
3168 return res;
3169 }
3170
3171 char *d_absolute_path(const struct path *path,
3172 char *buf, int buflen)
3173 {
3174 struct path root = {};
3175 char *res = buf + buflen;
3176 int error;
3177
3178 prepend(&res, &buflen, "\0", 1);
3179 error = prepend_path(path, &root, &res, &buflen);
3180
3181 if (error > 1)
3182 error = -EINVAL;
3183 if (error < 0)
3184 return ERR_PTR(error);
3185 return res;
3186 }
3187
3188 /*
3189 * same as __d_path but appends "(deleted)" for unlinked files.
3190 */
3191 static int path_with_deleted(const struct path *path,
3192 const struct path *root,
3193 char **buf, int *buflen)
3194 {
3195 prepend(buf, buflen, "\0", 1);
3196 if (d_unlinked(path->dentry)) {
3197 int error = prepend(buf, buflen, " (deleted)", 10);
3198 if (error)
3199 return error;
3200 }
3201
3202 return prepend_path(path, root, buf, buflen);
3203 }
3204
3205 static int prepend_unreachable(char **buffer, int *buflen)
3206 {
3207 return prepend(buffer, buflen, "(unreachable)", 13);
3208 }
3209
3210 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3211 {
3212 unsigned seq;
3213
3214 do {
3215 seq = read_seqcount_begin(&fs->seq);
3216 *root = fs->root;
3217 } while (read_seqcount_retry(&fs->seq, seq));
3218 }
3219
3220 /**
3221 * d_path - return the path of a dentry
3222 * @path: path to report
3223 * @buf: buffer to return value in
3224 * @buflen: buffer length
3225 *
3226 * Convert a dentry into an ASCII path name. If the entry has been deleted
3227 * the string " (deleted)" is appended. Note that this is ambiguous.
3228 *
3229 * Returns a pointer into the buffer or an error code if the path was
3230 * too long. Note: Callers should use the returned pointer, not the passed
3231 * in buffer, to use the name! The implementation often starts at an offset
3232 * into the buffer, and may leave 0 bytes at the start.
3233 *
3234 * "buflen" should be positive.
3235 */
3236 char *d_path(const struct path *path, char *buf, int buflen)
3237 {
3238 char *res = buf + buflen;
3239 struct path root;
3240 int error;
3241
3242 /*
3243 * We have various synthetic filesystems that never get mounted. On
3244 * these filesystems dentries are never used for lookup purposes, and
3245 * thus don't need to be hashed. They also don't need a name until a
3246 * user wants to identify the object in /proc/pid/fd/. The little hack
3247 * below allows us to generate a name for these objects on demand:
3248 *
3249 * Some pseudo inodes are mountable. When they are mounted
3250 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3251 * and instead have d_path return the mounted path.
3252 */
3253 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3254 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3255 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3256
3257 rcu_read_lock();
3258 get_fs_root_rcu(current->fs, &root);
3259 error = path_with_deleted(path, &root, &res, &buflen);
3260 rcu_read_unlock();
3261
3262 if (error < 0)
3263 res = ERR_PTR(error);
3264 return res;
3265 }
3266 EXPORT_SYMBOL(d_path);
3267
3268 /*
3269 * Helper function for dentry_operations.d_dname() members
3270 */
3271 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3272 const char *fmt, ...)
3273 {
3274 va_list args;
3275 char temp[64];
3276 int sz;
3277
3278 va_start(args, fmt);
3279 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3280 va_end(args);
3281
3282 if (sz > sizeof(temp) || sz > buflen)
3283 return ERR_PTR(-ENAMETOOLONG);
3284
3285 buffer += buflen - sz;
3286 return memcpy(buffer, temp, sz);
3287 }
3288
3289 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3290 {
3291 char *end = buffer + buflen;
3292 /* these dentries are never renamed, so d_lock is not needed */
3293 if (prepend(&end, &buflen, " (deleted)", 11) ||
3294 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3295 prepend(&end, &buflen, "/", 1))
3296 end = ERR_PTR(-ENAMETOOLONG);
3297 return end;
3298 }
3299 EXPORT_SYMBOL(simple_dname);
3300
3301 /*
3302 * Write full pathname from the root of the filesystem into the buffer.
3303 */
3304 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3305 {
3306 struct dentry *dentry;
3307 char *end, *retval;
3308 int len, seq = 0;
3309 int error = 0;
3310
3311 if (buflen < 2)
3312 goto Elong;
3313
3314 rcu_read_lock();
3315 restart:
3316 dentry = d;
3317 end = buf + buflen;
3318 len = buflen;
3319 prepend(&end, &len, "\0", 1);
3320 /* Get '/' right */
3321 retval = end-1;
3322 *retval = '/';
3323 read_seqbegin_or_lock(&rename_lock, &seq);
3324 while (!IS_ROOT(dentry)) {
3325 struct dentry *parent = dentry->d_parent;
3326
3327 prefetch(parent);
3328 error = prepend_name(&end, &len, &dentry->d_name);
3329 if (error)
3330 break;
3331
3332 retval = end;
3333 dentry = parent;
3334 }
3335 if (!(seq & 1))
3336 rcu_read_unlock();
3337 if (need_seqretry(&rename_lock, seq)) {
3338 seq = 1;
3339 goto restart;
3340 }
3341 done_seqretry(&rename_lock, seq);
3342 if (error)
3343 goto Elong;
3344 return retval;
3345 Elong:
3346 return ERR_PTR(-ENAMETOOLONG);
3347 }
3348
3349 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3350 {
3351 return __dentry_path(dentry, buf, buflen);
3352 }
3353 EXPORT_SYMBOL(dentry_path_raw);
3354
3355 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3356 {
3357 char *p = NULL;
3358 char *retval;
3359
3360 if (d_unlinked(dentry)) {
3361 p = buf + buflen;
3362 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3363 goto Elong;
3364 buflen++;
3365 }
3366 retval = __dentry_path(dentry, buf, buflen);
3367 if (!IS_ERR(retval) && p)
3368 *p = '/'; /* restore '/' overriden with '\0' */
3369 return retval;
3370 Elong:
3371 return ERR_PTR(-ENAMETOOLONG);
3372 }
3373
3374 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3375 struct path *pwd)
3376 {
3377 unsigned seq;
3378
3379 do {
3380 seq = read_seqcount_begin(&fs->seq);
3381 *root = fs->root;
3382 *pwd = fs->pwd;
3383 } while (read_seqcount_retry(&fs->seq, seq));
3384 }
3385
3386 /*
3387 * NOTE! The user-level library version returns a
3388 * character pointer. The kernel system call just
3389 * returns the length of the buffer filled (which
3390 * includes the ending '\0' character), or a negative
3391 * error value. So libc would do something like
3392 *
3393 * char *getcwd(char * buf, size_t size)
3394 * {
3395 * int retval;
3396 *
3397 * retval = sys_getcwd(buf, size);
3398 * if (retval >= 0)
3399 * return buf;
3400 * errno = -retval;
3401 * return NULL;
3402 * }
3403 */
3404 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3405 {
3406 int error;
3407 struct path pwd, root;
3408 char *page = __getname();
3409
3410 if (!page)
3411 return -ENOMEM;
3412
3413 rcu_read_lock();
3414 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3415
3416 error = -ENOENT;
3417 if (!d_unlinked(pwd.dentry)) {
3418 unsigned long len;
3419 char *cwd = page + PATH_MAX;
3420 int buflen = PATH_MAX;
3421
3422 prepend(&cwd, &buflen, "\0", 1);
3423 error = prepend_path(&pwd, &root, &cwd, &buflen);
3424 rcu_read_unlock();
3425
3426 if (error < 0)
3427 goto out;
3428
3429 /* Unreachable from current root */
3430 if (error > 0) {
3431 error = prepend_unreachable(&cwd, &buflen);
3432 if (error)
3433 goto out;
3434 }
3435
3436 error = -ERANGE;
3437 len = PATH_MAX + page - cwd;
3438 if (len <= size) {
3439 error = len;
3440 if (copy_to_user(buf, cwd, len))
3441 error = -EFAULT;
3442 }
3443 } else {
3444 rcu_read_unlock();
3445 }
3446
3447 out:
3448 __putname(page);
3449 return error;
3450 }
3451
3452 /*
3453 * Test whether new_dentry is a subdirectory of old_dentry.
3454 *
3455 * Trivially implemented using the dcache structure
3456 */
3457
3458 /**
3459 * is_subdir - is new dentry a subdirectory of old_dentry
3460 * @new_dentry: new dentry
3461 * @old_dentry: old dentry
3462 *
3463 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3464 * Returns false otherwise.
3465 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3466 */
3467
3468 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3469 {
3470 bool result;
3471 unsigned seq;
3472
3473 if (new_dentry == old_dentry)
3474 return true;
3475
3476 do {
3477 /* for restarting inner loop in case of seq retry */
3478 seq = read_seqbegin(&rename_lock);
3479 /*
3480 * Need rcu_readlock to protect against the d_parent trashing
3481 * due to d_move
3482 */
3483 rcu_read_lock();
3484 if (d_ancestor(old_dentry, new_dentry))
3485 result = true;
3486 else
3487 result = false;
3488 rcu_read_unlock();
3489 } while (read_seqretry(&rename_lock, seq));
3490
3491 return result;
3492 }
3493
3494 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3495 {
3496 struct dentry *root = data;
3497 if (dentry != root) {
3498 if (d_unhashed(dentry) || !dentry->d_inode)
3499 return D_WALK_SKIP;
3500
3501 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3502 dentry->d_flags |= DCACHE_GENOCIDE;
3503 dentry->d_lockref.count--;
3504 }
3505 }
3506 return D_WALK_CONTINUE;
3507 }
3508
3509 void d_genocide(struct dentry *parent)
3510 {
3511 d_walk(parent, parent, d_genocide_kill, NULL);
3512 }
3513
3514 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3515 {
3516 inode_dec_link_count(inode);
3517 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3518 !hlist_unhashed(&dentry->d_u.d_alias) ||
3519 !d_unlinked(dentry));
3520 spin_lock(&dentry->d_parent->d_lock);
3521 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3522 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3523 (unsigned long long)inode->i_ino);
3524 spin_unlock(&dentry->d_lock);
3525 spin_unlock(&dentry->d_parent->d_lock);
3526 d_instantiate(dentry, inode);
3527 }
3528 EXPORT_SYMBOL(d_tmpfile);
3529
3530 static __initdata unsigned long dhash_entries;
3531 static int __init set_dhash_entries(char *str)
3532 {
3533 if (!str)
3534 return 0;
3535 dhash_entries = simple_strtoul(str, &str, 0);
3536 return 1;
3537 }
3538 __setup("dhash_entries=", set_dhash_entries);
3539
3540 static void __init dcache_init_early(void)
3541 {
3542 unsigned int loop;
3543
3544 /* If hashes are distributed across NUMA nodes, defer
3545 * hash allocation until vmalloc space is available.
3546 */
3547 if (hashdist)
3548 return;
3549
3550 dentry_hashtable =
3551 alloc_large_system_hash("Dentry cache",
3552 sizeof(struct hlist_bl_head),
3553 dhash_entries,
3554 13,
3555 HASH_EARLY,
3556 &d_hash_shift,
3557 &d_hash_mask,
3558 0,
3559 0);
3560
3561 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3562 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3563 }
3564
3565 static void __init dcache_init(void)
3566 {
3567 unsigned int loop;
3568
3569 /*
3570 * A constructor could be added for stable state like the lists,
3571 * but it is probably not worth it because of the cache nature
3572 * of the dcache.
3573 */
3574 dentry_cache = KMEM_CACHE(dentry,
3575 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3576
3577 /* Hash may have been set up in dcache_init_early */
3578 if (!hashdist)
3579 return;
3580
3581 dentry_hashtable =
3582 alloc_large_system_hash("Dentry cache",
3583 sizeof(struct hlist_bl_head),
3584 dhash_entries,
3585 13,
3586 0,
3587 &d_hash_shift,
3588 &d_hash_mask,
3589 0,
3590 0);
3591
3592 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3593 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3594 }
3595
3596 /* SLAB cache for __getname() consumers */
3597 struct kmem_cache *names_cachep __read_mostly;
3598 EXPORT_SYMBOL(names_cachep);
3599
3600 EXPORT_SYMBOL(d_genocide);
3601
3602 void __init vfs_caches_init_early(void)
3603 {
3604 dcache_init_early();
3605 inode_init_early();
3606 }
3607
3608 void __init vfs_caches_init(void)
3609 {
3610 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3611 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3612
3613 dcache_init();
3614 inode_init();
3615 files_init();
3616 files_maxfiles_init();
3617 mnt_init();
3618 bdev_cache_init();
3619 chrdev_init();
3620 }
This page took 0.115242 seconds and 5 git commands to generate.