Merge tag 'nfsd-4.8' of git://linux-nfs.org/~bfields/linux
[deliverable/linux.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
42
43 #include "internal.h"
44 #include "mount.h"
45
46 /*
47 * Usage:
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
56 * d_lock protects:
57 * - d_flags
58 * - d_name
59 * - d_lru
60 * - d_count
61 * - d_unhashed()
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
65 *
66 * Ordering:
67 * dentry->d_inode->i_lock
68 * dentry->d_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
71 * s_anon lock
72 *
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
75 * ...
76 * dentry->d_parent->d_lock
77 * dentry->d_lock
78 *
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
81 * dentry1->d_lock
82 * dentry2->d_lock
83 */
84 int sysctl_vfs_cache_pressure __read_mostly = 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89 EXPORT_SYMBOL(rename_lock);
90
91 static struct kmem_cache *dentry_cache __read_mostly;
92
93 /*
94 * This is the single most critical data structure when it comes
95 * to the dcache: the hashtable for lookups. Somebody should try
96 * to make this good - I've just made it work.
97 *
98 * This hash-function tries to avoid losing too many bits of hash
99 * information, yet avoid using a prime hash-size or similar.
100 */
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107 static inline struct hlist_bl_head *d_hash(unsigned int hash)
108 {
109 return dentry_hashtable + (hash >> (32 - d_hash_shift));
110 }
111
112 #define IN_LOOKUP_SHIFT 10
113 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
114
115 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
116 unsigned int hash)
117 {
118 hash += (unsigned long) parent / L1_CACHE_BYTES;
119 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
120 }
121
122
123 /* Statistics gathering. */
124 struct dentry_stat_t dentry_stat = {
125 .age_limit = 45,
126 };
127
128 static DEFINE_PER_CPU(long, nr_dentry);
129 static DEFINE_PER_CPU(long, nr_dentry_unused);
130
131 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
132
133 /*
134 * Here we resort to our own counters instead of using generic per-cpu counters
135 * for consistency with what the vfs inode code does. We are expected to harvest
136 * better code and performance by having our own specialized counters.
137 *
138 * Please note that the loop is done over all possible CPUs, not over all online
139 * CPUs. The reason for this is that we don't want to play games with CPUs going
140 * on and off. If one of them goes off, we will just keep their counters.
141 *
142 * glommer: See cffbc8a for details, and if you ever intend to change this,
143 * please update all vfs counters to match.
144 */
145 static long get_nr_dentry(void)
146 {
147 int i;
148 long sum = 0;
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry, i);
151 return sum < 0 ? 0 : sum;
152 }
153
154 static long get_nr_dentry_unused(void)
155 {
156 int i;
157 long sum = 0;
158 for_each_possible_cpu(i)
159 sum += per_cpu(nr_dentry_unused, i);
160 return sum < 0 ? 0 : sum;
161 }
162
163 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
164 size_t *lenp, loff_t *ppos)
165 {
166 dentry_stat.nr_dentry = get_nr_dentry();
167 dentry_stat.nr_unused = get_nr_dentry_unused();
168 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
169 }
170 #endif
171
172 /*
173 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
174 * The strings are both count bytes long, and count is non-zero.
175 */
176 #ifdef CONFIG_DCACHE_WORD_ACCESS
177
178 #include <asm/word-at-a-time.h>
179 /*
180 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
181 * aligned allocation for this particular component. We don't
182 * strictly need the load_unaligned_zeropad() safety, but it
183 * doesn't hurt either.
184 *
185 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
186 * need the careful unaligned handling.
187 */
188 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
189 {
190 unsigned long a,b,mask;
191
192 for (;;) {
193 a = *(unsigned long *)cs;
194 b = load_unaligned_zeropad(ct);
195 if (tcount < sizeof(unsigned long))
196 break;
197 if (unlikely(a != b))
198 return 1;
199 cs += sizeof(unsigned long);
200 ct += sizeof(unsigned long);
201 tcount -= sizeof(unsigned long);
202 if (!tcount)
203 return 0;
204 }
205 mask = bytemask_from_count(tcount);
206 return unlikely(!!((a ^ b) & mask));
207 }
208
209 #else
210
211 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
212 {
213 do {
214 if (*cs != *ct)
215 return 1;
216 cs++;
217 ct++;
218 tcount--;
219 } while (tcount);
220 return 0;
221 }
222
223 #endif
224
225 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
226 {
227 /*
228 * Be careful about RCU walk racing with rename:
229 * use 'lockless_dereference' to fetch the name pointer.
230 *
231 * NOTE! Even if a rename will mean that the length
232 * was not loaded atomically, we don't care. The
233 * RCU walk will check the sequence count eventually,
234 * and catch it. And we won't overrun the buffer,
235 * because we're reading the name pointer atomically,
236 * and a dentry name is guaranteed to be properly
237 * terminated with a NUL byte.
238 *
239 * End result: even if 'len' is wrong, we'll exit
240 * early because the data cannot match (there can
241 * be no NUL in the ct/tcount data)
242 */
243 const unsigned char *cs = lockless_dereference(dentry->d_name.name);
244
245 return dentry_string_cmp(cs, ct, tcount);
246 }
247
248 struct external_name {
249 union {
250 atomic_t count;
251 struct rcu_head head;
252 } u;
253 unsigned char name[];
254 };
255
256 static inline struct external_name *external_name(struct dentry *dentry)
257 {
258 return container_of(dentry->d_name.name, struct external_name, name[0]);
259 }
260
261 static void __d_free(struct rcu_head *head)
262 {
263 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
264
265 kmem_cache_free(dentry_cache, dentry);
266 }
267
268 static void __d_free_external(struct rcu_head *head)
269 {
270 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
271 kfree(external_name(dentry));
272 kmem_cache_free(dentry_cache, dentry);
273 }
274
275 static inline int dname_external(const struct dentry *dentry)
276 {
277 return dentry->d_name.name != dentry->d_iname;
278 }
279
280 static inline void __d_set_inode_and_type(struct dentry *dentry,
281 struct inode *inode,
282 unsigned type_flags)
283 {
284 unsigned flags;
285
286 dentry->d_inode = inode;
287 flags = READ_ONCE(dentry->d_flags);
288 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
289 flags |= type_flags;
290 WRITE_ONCE(dentry->d_flags, flags);
291 }
292
293 static inline void __d_clear_type_and_inode(struct dentry *dentry)
294 {
295 unsigned flags = READ_ONCE(dentry->d_flags);
296
297 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
298 WRITE_ONCE(dentry->d_flags, flags);
299 dentry->d_inode = NULL;
300 }
301
302 static void dentry_free(struct dentry *dentry)
303 {
304 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
305 if (unlikely(dname_external(dentry))) {
306 struct external_name *p = external_name(dentry);
307 if (likely(atomic_dec_and_test(&p->u.count))) {
308 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
309 return;
310 }
311 }
312 /* if dentry was never visible to RCU, immediate free is OK */
313 if (!(dentry->d_flags & DCACHE_RCUACCESS))
314 __d_free(&dentry->d_u.d_rcu);
315 else
316 call_rcu(&dentry->d_u.d_rcu, __d_free);
317 }
318
319 /**
320 * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
321 * @dentry: the target dentry
322 * After this call, in-progress rcu-walk path lookup will fail. This
323 * should be called after unhashing, and after changing d_inode (if
324 * the dentry has not already been unhashed).
325 */
326 static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
327 {
328 lockdep_assert_held(&dentry->d_lock);
329 /* Go through am invalidation barrier */
330 write_seqcount_invalidate(&dentry->d_seq);
331 }
332
333 /*
334 * Release the dentry's inode, using the filesystem
335 * d_iput() operation if defined.
336 */
337 static void dentry_unlink_inode(struct dentry * dentry)
338 __releases(dentry->d_lock)
339 __releases(dentry->d_inode->i_lock)
340 {
341 struct inode *inode = dentry->d_inode;
342 bool hashed = !d_unhashed(dentry);
343
344 if (hashed)
345 raw_write_seqcount_begin(&dentry->d_seq);
346 __d_clear_type_and_inode(dentry);
347 hlist_del_init(&dentry->d_u.d_alias);
348 if (hashed)
349 raw_write_seqcount_end(&dentry->d_seq);
350 spin_unlock(&dentry->d_lock);
351 spin_unlock(&inode->i_lock);
352 if (!inode->i_nlink)
353 fsnotify_inoderemove(inode);
354 if (dentry->d_op && dentry->d_op->d_iput)
355 dentry->d_op->d_iput(dentry, inode);
356 else
357 iput(inode);
358 }
359
360 /*
361 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
362 * is in use - which includes both the "real" per-superblock
363 * LRU list _and_ the DCACHE_SHRINK_LIST use.
364 *
365 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
366 * on the shrink list (ie not on the superblock LRU list).
367 *
368 * The per-cpu "nr_dentry_unused" counters are updated with
369 * the DCACHE_LRU_LIST bit.
370 *
371 * These helper functions make sure we always follow the
372 * rules. d_lock must be held by the caller.
373 */
374 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
375 static void d_lru_add(struct dentry *dentry)
376 {
377 D_FLAG_VERIFY(dentry, 0);
378 dentry->d_flags |= DCACHE_LRU_LIST;
379 this_cpu_inc(nr_dentry_unused);
380 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
381 }
382
383 static void d_lru_del(struct dentry *dentry)
384 {
385 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
386 dentry->d_flags &= ~DCACHE_LRU_LIST;
387 this_cpu_dec(nr_dentry_unused);
388 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
389 }
390
391 static void d_shrink_del(struct dentry *dentry)
392 {
393 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
394 list_del_init(&dentry->d_lru);
395 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
396 this_cpu_dec(nr_dentry_unused);
397 }
398
399 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
400 {
401 D_FLAG_VERIFY(dentry, 0);
402 list_add(&dentry->d_lru, list);
403 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
404 this_cpu_inc(nr_dentry_unused);
405 }
406
407 /*
408 * These can only be called under the global LRU lock, ie during the
409 * callback for freeing the LRU list. "isolate" removes it from the
410 * LRU lists entirely, while shrink_move moves it to the indicated
411 * private list.
412 */
413 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
414 {
415 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
416 dentry->d_flags &= ~DCACHE_LRU_LIST;
417 this_cpu_dec(nr_dentry_unused);
418 list_lru_isolate(lru, &dentry->d_lru);
419 }
420
421 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
422 struct list_head *list)
423 {
424 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
425 dentry->d_flags |= DCACHE_SHRINK_LIST;
426 list_lru_isolate_move(lru, &dentry->d_lru, list);
427 }
428
429 /*
430 * dentry_lru_(add|del)_list) must be called with d_lock held.
431 */
432 static void dentry_lru_add(struct dentry *dentry)
433 {
434 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
435 d_lru_add(dentry);
436 }
437
438 /**
439 * d_drop - drop a dentry
440 * @dentry: dentry to drop
441 *
442 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
443 * be found through a VFS lookup any more. Note that this is different from
444 * deleting the dentry - d_delete will try to mark the dentry negative if
445 * possible, giving a successful _negative_ lookup, while d_drop will
446 * just make the cache lookup fail.
447 *
448 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
449 * reason (NFS timeouts or autofs deletes).
450 *
451 * __d_drop requires dentry->d_lock.
452 */
453 void __d_drop(struct dentry *dentry)
454 {
455 if (!d_unhashed(dentry)) {
456 struct hlist_bl_head *b;
457 /*
458 * Hashed dentries are normally on the dentry hashtable,
459 * with the exception of those newly allocated by
460 * d_obtain_alias, which are always IS_ROOT:
461 */
462 if (unlikely(IS_ROOT(dentry)))
463 b = &dentry->d_sb->s_anon;
464 else
465 b = d_hash(dentry->d_name.hash);
466
467 hlist_bl_lock(b);
468 __hlist_bl_del(&dentry->d_hash);
469 dentry->d_hash.pprev = NULL;
470 hlist_bl_unlock(b);
471 dentry_rcuwalk_invalidate(dentry);
472 }
473 }
474 EXPORT_SYMBOL(__d_drop);
475
476 void d_drop(struct dentry *dentry)
477 {
478 spin_lock(&dentry->d_lock);
479 __d_drop(dentry);
480 spin_unlock(&dentry->d_lock);
481 }
482 EXPORT_SYMBOL(d_drop);
483
484 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
485 {
486 struct dentry *next;
487 /*
488 * Inform d_walk() and shrink_dentry_list() that we are no longer
489 * attached to the dentry tree
490 */
491 dentry->d_flags |= DCACHE_DENTRY_KILLED;
492 if (unlikely(list_empty(&dentry->d_child)))
493 return;
494 __list_del_entry(&dentry->d_child);
495 /*
496 * Cursors can move around the list of children. While we'd been
497 * a normal list member, it didn't matter - ->d_child.next would've
498 * been updated. However, from now on it won't be and for the
499 * things like d_walk() it might end up with a nasty surprise.
500 * Normally d_walk() doesn't care about cursors moving around -
501 * ->d_lock on parent prevents that and since a cursor has no children
502 * of its own, we get through it without ever unlocking the parent.
503 * There is one exception, though - if we ascend from a child that
504 * gets killed as soon as we unlock it, the next sibling is found
505 * using the value left in its ->d_child.next. And if _that_
506 * pointed to a cursor, and cursor got moved (e.g. by lseek())
507 * before d_walk() regains parent->d_lock, we'll end up skipping
508 * everything the cursor had been moved past.
509 *
510 * Solution: make sure that the pointer left behind in ->d_child.next
511 * points to something that won't be moving around. I.e. skip the
512 * cursors.
513 */
514 while (dentry->d_child.next != &parent->d_subdirs) {
515 next = list_entry(dentry->d_child.next, struct dentry, d_child);
516 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
517 break;
518 dentry->d_child.next = next->d_child.next;
519 }
520 }
521
522 static void __dentry_kill(struct dentry *dentry)
523 {
524 struct dentry *parent = NULL;
525 bool can_free = true;
526 if (!IS_ROOT(dentry))
527 parent = dentry->d_parent;
528
529 /*
530 * The dentry is now unrecoverably dead to the world.
531 */
532 lockref_mark_dead(&dentry->d_lockref);
533
534 /*
535 * inform the fs via d_prune that this dentry is about to be
536 * unhashed and destroyed.
537 */
538 if (dentry->d_flags & DCACHE_OP_PRUNE)
539 dentry->d_op->d_prune(dentry);
540
541 if (dentry->d_flags & DCACHE_LRU_LIST) {
542 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
543 d_lru_del(dentry);
544 }
545 /* if it was on the hash then remove it */
546 __d_drop(dentry);
547 dentry_unlist(dentry, parent);
548 if (parent)
549 spin_unlock(&parent->d_lock);
550 if (dentry->d_inode)
551 dentry_unlink_inode(dentry);
552 else
553 spin_unlock(&dentry->d_lock);
554 this_cpu_dec(nr_dentry);
555 if (dentry->d_op && dentry->d_op->d_release)
556 dentry->d_op->d_release(dentry);
557
558 spin_lock(&dentry->d_lock);
559 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
560 dentry->d_flags |= DCACHE_MAY_FREE;
561 can_free = false;
562 }
563 spin_unlock(&dentry->d_lock);
564 if (likely(can_free))
565 dentry_free(dentry);
566 }
567
568 /*
569 * Finish off a dentry we've decided to kill.
570 * dentry->d_lock must be held, returns with it unlocked.
571 * If ref is non-zero, then decrement the refcount too.
572 * Returns dentry requiring refcount drop, or NULL if we're done.
573 */
574 static struct dentry *dentry_kill(struct dentry *dentry)
575 __releases(dentry->d_lock)
576 {
577 struct inode *inode = dentry->d_inode;
578 struct dentry *parent = NULL;
579
580 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
581 goto failed;
582
583 if (!IS_ROOT(dentry)) {
584 parent = dentry->d_parent;
585 if (unlikely(!spin_trylock(&parent->d_lock))) {
586 if (inode)
587 spin_unlock(&inode->i_lock);
588 goto failed;
589 }
590 }
591
592 __dentry_kill(dentry);
593 return parent;
594
595 failed:
596 spin_unlock(&dentry->d_lock);
597 return dentry; /* try again with same dentry */
598 }
599
600 static inline struct dentry *lock_parent(struct dentry *dentry)
601 {
602 struct dentry *parent = dentry->d_parent;
603 if (IS_ROOT(dentry))
604 return NULL;
605 if (unlikely(dentry->d_lockref.count < 0))
606 return NULL;
607 if (likely(spin_trylock(&parent->d_lock)))
608 return parent;
609 rcu_read_lock();
610 spin_unlock(&dentry->d_lock);
611 again:
612 parent = ACCESS_ONCE(dentry->d_parent);
613 spin_lock(&parent->d_lock);
614 /*
615 * We can't blindly lock dentry until we are sure
616 * that we won't violate the locking order.
617 * Any changes of dentry->d_parent must have
618 * been done with parent->d_lock held, so
619 * spin_lock() above is enough of a barrier
620 * for checking if it's still our child.
621 */
622 if (unlikely(parent != dentry->d_parent)) {
623 spin_unlock(&parent->d_lock);
624 goto again;
625 }
626 rcu_read_unlock();
627 if (parent != dentry)
628 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
629 else
630 parent = NULL;
631 return parent;
632 }
633
634 /*
635 * Try to do a lockless dput(), and return whether that was successful.
636 *
637 * If unsuccessful, we return false, having already taken the dentry lock.
638 *
639 * The caller needs to hold the RCU read lock, so that the dentry is
640 * guaranteed to stay around even if the refcount goes down to zero!
641 */
642 static inline bool fast_dput(struct dentry *dentry)
643 {
644 int ret;
645 unsigned int d_flags;
646
647 /*
648 * If we have a d_op->d_delete() operation, we sould not
649 * let the dentry count go to zero, so use "put_or_lock".
650 */
651 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
652 return lockref_put_or_lock(&dentry->d_lockref);
653
654 /*
655 * .. otherwise, we can try to just decrement the
656 * lockref optimistically.
657 */
658 ret = lockref_put_return(&dentry->d_lockref);
659
660 /*
661 * If the lockref_put_return() failed due to the lock being held
662 * by somebody else, the fast path has failed. We will need to
663 * get the lock, and then check the count again.
664 */
665 if (unlikely(ret < 0)) {
666 spin_lock(&dentry->d_lock);
667 if (dentry->d_lockref.count > 1) {
668 dentry->d_lockref.count--;
669 spin_unlock(&dentry->d_lock);
670 return 1;
671 }
672 return 0;
673 }
674
675 /*
676 * If we weren't the last ref, we're done.
677 */
678 if (ret)
679 return 1;
680
681 /*
682 * Careful, careful. The reference count went down
683 * to zero, but we don't hold the dentry lock, so
684 * somebody else could get it again, and do another
685 * dput(), and we need to not race with that.
686 *
687 * However, there is a very special and common case
688 * where we don't care, because there is nothing to
689 * do: the dentry is still hashed, it does not have
690 * a 'delete' op, and it's referenced and already on
691 * the LRU list.
692 *
693 * NOTE! Since we aren't locked, these values are
694 * not "stable". However, it is sufficient that at
695 * some point after we dropped the reference the
696 * dentry was hashed and the flags had the proper
697 * value. Other dentry users may have re-gotten
698 * a reference to the dentry and change that, but
699 * our work is done - we can leave the dentry
700 * around with a zero refcount.
701 */
702 smp_rmb();
703 d_flags = ACCESS_ONCE(dentry->d_flags);
704 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
705
706 /* Nothing to do? Dropping the reference was all we needed? */
707 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
708 return 1;
709
710 /*
711 * Not the fast normal case? Get the lock. We've already decremented
712 * the refcount, but we'll need to re-check the situation after
713 * getting the lock.
714 */
715 spin_lock(&dentry->d_lock);
716
717 /*
718 * Did somebody else grab a reference to it in the meantime, and
719 * we're no longer the last user after all? Alternatively, somebody
720 * else could have killed it and marked it dead. Either way, we
721 * don't need to do anything else.
722 */
723 if (dentry->d_lockref.count) {
724 spin_unlock(&dentry->d_lock);
725 return 1;
726 }
727
728 /*
729 * Re-get the reference we optimistically dropped. We hold the
730 * lock, and we just tested that it was zero, so we can just
731 * set it to 1.
732 */
733 dentry->d_lockref.count = 1;
734 return 0;
735 }
736
737
738 /*
739 * This is dput
740 *
741 * This is complicated by the fact that we do not want to put
742 * dentries that are no longer on any hash chain on the unused
743 * list: we'd much rather just get rid of them immediately.
744 *
745 * However, that implies that we have to traverse the dentry
746 * tree upwards to the parents which might _also_ now be
747 * scheduled for deletion (it may have been only waiting for
748 * its last child to go away).
749 *
750 * This tail recursion is done by hand as we don't want to depend
751 * on the compiler to always get this right (gcc generally doesn't).
752 * Real recursion would eat up our stack space.
753 */
754
755 /*
756 * dput - release a dentry
757 * @dentry: dentry to release
758 *
759 * Release a dentry. This will drop the usage count and if appropriate
760 * call the dentry unlink method as well as removing it from the queues and
761 * releasing its resources. If the parent dentries were scheduled for release
762 * they too may now get deleted.
763 */
764 void dput(struct dentry *dentry)
765 {
766 if (unlikely(!dentry))
767 return;
768
769 repeat:
770 might_sleep();
771
772 rcu_read_lock();
773 if (likely(fast_dput(dentry))) {
774 rcu_read_unlock();
775 return;
776 }
777
778 /* Slow case: now with the dentry lock held */
779 rcu_read_unlock();
780
781 WARN_ON(d_in_lookup(dentry));
782
783 /* Unreachable? Get rid of it */
784 if (unlikely(d_unhashed(dentry)))
785 goto kill_it;
786
787 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
788 goto kill_it;
789
790 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
791 if (dentry->d_op->d_delete(dentry))
792 goto kill_it;
793 }
794
795 if (!(dentry->d_flags & DCACHE_REFERENCED))
796 dentry->d_flags |= DCACHE_REFERENCED;
797 dentry_lru_add(dentry);
798
799 dentry->d_lockref.count--;
800 spin_unlock(&dentry->d_lock);
801 return;
802
803 kill_it:
804 dentry = dentry_kill(dentry);
805 if (dentry) {
806 cond_resched();
807 goto repeat;
808 }
809 }
810 EXPORT_SYMBOL(dput);
811
812
813 /* This must be called with d_lock held */
814 static inline void __dget_dlock(struct dentry *dentry)
815 {
816 dentry->d_lockref.count++;
817 }
818
819 static inline void __dget(struct dentry *dentry)
820 {
821 lockref_get(&dentry->d_lockref);
822 }
823
824 struct dentry *dget_parent(struct dentry *dentry)
825 {
826 int gotref;
827 struct dentry *ret;
828
829 /*
830 * Do optimistic parent lookup without any
831 * locking.
832 */
833 rcu_read_lock();
834 ret = ACCESS_ONCE(dentry->d_parent);
835 gotref = lockref_get_not_zero(&ret->d_lockref);
836 rcu_read_unlock();
837 if (likely(gotref)) {
838 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
839 return ret;
840 dput(ret);
841 }
842
843 repeat:
844 /*
845 * Don't need rcu_dereference because we re-check it was correct under
846 * the lock.
847 */
848 rcu_read_lock();
849 ret = dentry->d_parent;
850 spin_lock(&ret->d_lock);
851 if (unlikely(ret != dentry->d_parent)) {
852 spin_unlock(&ret->d_lock);
853 rcu_read_unlock();
854 goto repeat;
855 }
856 rcu_read_unlock();
857 BUG_ON(!ret->d_lockref.count);
858 ret->d_lockref.count++;
859 spin_unlock(&ret->d_lock);
860 return ret;
861 }
862 EXPORT_SYMBOL(dget_parent);
863
864 /**
865 * d_find_alias - grab a hashed alias of inode
866 * @inode: inode in question
867 *
868 * If inode has a hashed alias, or is a directory and has any alias,
869 * acquire the reference to alias and return it. Otherwise return NULL.
870 * Notice that if inode is a directory there can be only one alias and
871 * it can be unhashed only if it has no children, or if it is the root
872 * of a filesystem, or if the directory was renamed and d_revalidate
873 * was the first vfs operation to notice.
874 *
875 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
876 * any other hashed alias over that one.
877 */
878 static struct dentry *__d_find_alias(struct inode *inode)
879 {
880 struct dentry *alias, *discon_alias;
881
882 again:
883 discon_alias = NULL;
884 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
885 spin_lock(&alias->d_lock);
886 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
887 if (IS_ROOT(alias) &&
888 (alias->d_flags & DCACHE_DISCONNECTED)) {
889 discon_alias = alias;
890 } else {
891 __dget_dlock(alias);
892 spin_unlock(&alias->d_lock);
893 return alias;
894 }
895 }
896 spin_unlock(&alias->d_lock);
897 }
898 if (discon_alias) {
899 alias = discon_alias;
900 spin_lock(&alias->d_lock);
901 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
902 __dget_dlock(alias);
903 spin_unlock(&alias->d_lock);
904 return alias;
905 }
906 spin_unlock(&alias->d_lock);
907 goto again;
908 }
909 return NULL;
910 }
911
912 struct dentry *d_find_alias(struct inode *inode)
913 {
914 struct dentry *de = NULL;
915
916 if (!hlist_empty(&inode->i_dentry)) {
917 spin_lock(&inode->i_lock);
918 de = __d_find_alias(inode);
919 spin_unlock(&inode->i_lock);
920 }
921 return de;
922 }
923 EXPORT_SYMBOL(d_find_alias);
924
925 /*
926 * Try to kill dentries associated with this inode.
927 * WARNING: you must own a reference to inode.
928 */
929 void d_prune_aliases(struct inode *inode)
930 {
931 struct dentry *dentry;
932 restart:
933 spin_lock(&inode->i_lock);
934 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
935 spin_lock(&dentry->d_lock);
936 if (!dentry->d_lockref.count) {
937 struct dentry *parent = lock_parent(dentry);
938 if (likely(!dentry->d_lockref.count)) {
939 __dentry_kill(dentry);
940 dput(parent);
941 goto restart;
942 }
943 if (parent)
944 spin_unlock(&parent->d_lock);
945 }
946 spin_unlock(&dentry->d_lock);
947 }
948 spin_unlock(&inode->i_lock);
949 }
950 EXPORT_SYMBOL(d_prune_aliases);
951
952 static void shrink_dentry_list(struct list_head *list)
953 {
954 struct dentry *dentry, *parent;
955
956 while (!list_empty(list)) {
957 struct inode *inode;
958 dentry = list_entry(list->prev, struct dentry, d_lru);
959 spin_lock(&dentry->d_lock);
960 parent = lock_parent(dentry);
961
962 /*
963 * The dispose list is isolated and dentries are not accounted
964 * to the LRU here, so we can simply remove it from the list
965 * here regardless of whether it is referenced or not.
966 */
967 d_shrink_del(dentry);
968
969 /*
970 * We found an inuse dentry which was not removed from
971 * the LRU because of laziness during lookup. Do not free it.
972 */
973 if (dentry->d_lockref.count > 0) {
974 spin_unlock(&dentry->d_lock);
975 if (parent)
976 spin_unlock(&parent->d_lock);
977 continue;
978 }
979
980
981 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
982 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
983 spin_unlock(&dentry->d_lock);
984 if (parent)
985 spin_unlock(&parent->d_lock);
986 if (can_free)
987 dentry_free(dentry);
988 continue;
989 }
990
991 inode = dentry->d_inode;
992 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
993 d_shrink_add(dentry, list);
994 spin_unlock(&dentry->d_lock);
995 if (parent)
996 spin_unlock(&parent->d_lock);
997 continue;
998 }
999
1000 __dentry_kill(dentry);
1001
1002 /*
1003 * We need to prune ancestors too. This is necessary to prevent
1004 * quadratic behavior of shrink_dcache_parent(), but is also
1005 * expected to be beneficial in reducing dentry cache
1006 * fragmentation.
1007 */
1008 dentry = parent;
1009 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
1010 parent = lock_parent(dentry);
1011 if (dentry->d_lockref.count != 1) {
1012 dentry->d_lockref.count--;
1013 spin_unlock(&dentry->d_lock);
1014 if (parent)
1015 spin_unlock(&parent->d_lock);
1016 break;
1017 }
1018 inode = dentry->d_inode; /* can't be NULL */
1019 if (unlikely(!spin_trylock(&inode->i_lock))) {
1020 spin_unlock(&dentry->d_lock);
1021 if (parent)
1022 spin_unlock(&parent->d_lock);
1023 cpu_relax();
1024 continue;
1025 }
1026 __dentry_kill(dentry);
1027 dentry = parent;
1028 }
1029 }
1030 }
1031
1032 static enum lru_status dentry_lru_isolate(struct list_head *item,
1033 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1034 {
1035 struct list_head *freeable = arg;
1036 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1037
1038
1039 /*
1040 * we are inverting the lru lock/dentry->d_lock here,
1041 * so use a trylock. If we fail to get the lock, just skip
1042 * it
1043 */
1044 if (!spin_trylock(&dentry->d_lock))
1045 return LRU_SKIP;
1046
1047 /*
1048 * Referenced dentries are still in use. If they have active
1049 * counts, just remove them from the LRU. Otherwise give them
1050 * another pass through the LRU.
1051 */
1052 if (dentry->d_lockref.count) {
1053 d_lru_isolate(lru, dentry);
1054 spin_unlock(&dentry->d_lock);
1055 return LRU_REMOVED;
1056 }
1057
1058 if (dentry->d_flags & DCACHE_REFERENCED) {
1059 dentry->d_flags &= ~DCACHE_REFERENCED;
1060 spin_unlock(&dentry->d_lock);
1061
1062 /*
1063 * The list move itself will be made by the common LRU code. At
1064 * this point, we've dropped the dentry->d_lock but keep the
1065 * lru lock. This is safe to do, since every list movement is
1066 * protected by the lru lock even if both locks are held.
1067 *
1068 * This is guaranteed by the fact that all LRU management
1069 * functions are intermediated by the LRU API calls like
1070 * list_lru_add and list_lru_del. List movement in this file
1071 * only ever occur through this functions or through callbacks
1072 * like this one, that are called from the LRU API.
1073 *
1074 * The only exceptions to this are functions like
1075 * shrink_dentry_list, and code that first checks for the
1076 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1077 * operating only with stack provided lists after they are
1078 * properly isolated from the main list. It is thus, always a
1079 * local access.
1080 */
1081 return LRU_ROTATE;
1082 }
1083
1084 d_lru_shrink_move(lru, dentry, freeable);
1085 spin_unlock(&dentry->d_lock);
1086
1087 return LRU_REMOVED;
1088 }
1089
1090 /**
1091 * prune_dcache_sb - shrink the dcache
1092 * @sb: superblock
1093 * @sc: shrink control, passed to list_lru_shrink_walk()
1094 *
1095 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1096 * is done when we need more memory and called from the superblock shrinker
1097 * function.
1098 *
1099 * This function may fail to free any resources if all the dentries are in
1100 * use.
1101 */
1102 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1103 {
1104 LIST_HEAD(dispose);
1105 long freed;
1106
1107 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1108 dentry_lru_isolate, &dispose);
1109 shrink_dentry_list(&dispose);
1110 return freed;
1111 }
1112
1113 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1114 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1115 {
1116 struct list_head *freeable = arg;
1117 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1118
1119 /*
1120 * we are inverting the lru lock/dentry->d_lock here,
1121 * so use a trylock. If we fail to get the lock, just skip
1122 * it
1123 */
1124 if (!spin_trylock(&dentry->d_lock))
1125 return LRU_SKIP;
1126
1127 d_lru_shrink_move(lru, dentry, freeable);
1128 spin_unlock(&dentry->d_lock);
1129
1130 return LRU_REMOVED;
1131 }
1132
1133
1134 /**
1135 * shrink_dcache_sb - shrink dcache for a superblock
1136 * @sb: superblock
1137 *
1138 * Shrink the dcache for the specified super block. This is used to free
1139 * the dcache before unmounting a file system.
1140 */
1141 void shrink_dcache_sb(struct super_block *sb)
1142 {
1143 long freed;
1144
1145 do {
1146 LIST_HEAD(dispose);
1147
1148 freed = list_lru_walk(&sb->s_dentry_lru,
1149 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1150
1151 this_cpu_sub(nr_dentry_unused, freed);
1152 shrink_dentry_list(&dispose);
1153 } while (freed > 0);
1154 }
1155 EXPORT_SYMBOL(shrink_dcache_sb);
1156
1157 /**
1158 * enum d_walk_ret - action to talke during tree walk
1159 * @D_WALK_CONTINUE: contrinue walk
1160 * @D_WALK_QUIT: quit walk
1161 * @D_WALK_NORETRY: quit when retry is needed
1162 * @D_WALK_SKIP: skip this dentry and its children
1163 */
1164 enum d_walk_ret {
1165 D_WALK_CONTINUE,
1166 D_WALK_QUIT,
1167 D_WALK_NORETRY,
1168 D_WALK_SKIP,
1169 };
1170
1171 /**
1172 * d_walk - walk the dentry tree
1173 * @parent: start of walk
1174 * @data: data passed to @enter() and @finish()
1175 * @enter: callback when first entering the dentry
1176 * @finish: callback when successfully finished the walk
1177 *
1178 * The @enter() and @finish() callbacks are called with d_lock held.
1179 */
1180 static void d_walk(struct dentry *parent, void *data,
1181 enum d_walk_ret (*enter)(void *, struct dentry *),
1182 void (*finish)(void *))
1183 {
1184 struct dentry *this_parent;
1185 struct list_head *next;
1186 unsigned seq = 0;
1187 enum d_walk_ret ret;
1188 bool retry = true;
1189
1190 again:
1191 read_seqbegin_or_lock(&rename_lock, &seq);
1192 this_parent = parent;
1193 spin_lock(&this_parent->d_lock);
1194
1195 ret = enter(data, this_parent);
1196 switch (ret) {
1197 case D_WALK_CONTINUE:
1198 break;
1199 case D_WALK_QUIT:
1200 case D_WALK_SKIP:
1201 goto out_unlock;
1202 case D_WALK_NORETRY:
1203 retry = false;
1204 break;
1205 }
1206 repeat:
1207 next = this_parent->d_subdirs.next;
1208 resume:
1209 while (next != &this_parent->d_subdirs) {
1210 struct list_head *tmp = next;
1211 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1212 next = tmp->next;
1213
1214 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1215 continue;
1216
1217 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1218
1219 ret = enter(data, dentry);
1220 switch (ret) {
1221 case D_WALK_CONTINUE:
1222 break;
1223 case D_WALK_QUIT:
1224 spin_unlock(&dentry->d_lock);
1225 goto out_unlock;
1226 case D_WALK_NORETRY:
1227 retry = false;
1228 break;
1229 case D_WALK_SKIP:
1230 spin_unlock(&dentry->d_lock);
1231 continue;
1232 }
1233
1234 if (!list_empty(&dentry->d_subdirs)) {
1235 spin_unlock(&this_parent->d_lock);
1236 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1237 this_parent = dentry;
1238 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1239 goto repeat;
1240 }
1241 spin_unlock(&dentry->d_lock);
1242 }
1243 /*
1244 * All done at this level ... ascend and resume the search.
1245 */
1246 rcu_read_lock();
1247 ascend:
1248 if (this_parent != parent) {
1249 struct dentry *child = this_parent;
1250 this_parent = child->d_parent;
1251
1252 spin_unlock(&child->d_lock);
1253 spin_lock(&this_parent->d_lock);
1254
1255 /* might go back up the wrong parent if we have had a rename. */
1256 if (need_seqretry(&rename_lock, seq))
1257 goto rename_retry;
1258 /* go into the first sibling still alive */
1259 do {
1260 next = child->d_child.next;
1261 if (next == &this_parent->d_subdirs)
1262 goto ascend;
1263 child = list_entry(next, struct dentry, d_child);
1264 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1265 rcu_read_unlock();
1266 goto resume;
1267 }
1268 if (need_seqretry(&rename_lock, seq))
1269 goto rename_retry;
1270 rcu_read_unlock();
1271 if (finish)
1272 finish(data);
1273
1274 out_unlock:
1275 spin_unlock(&this_parent->d_lock);
1276 done_seqretry(&rename_lock, seq);
1277 return;
1278
1279 rename_retry:
1280 spin_unlock(&this_parent->d_lock);
1281 rcu_read_unlock();
1282 BUG_ON(seq & 1);
1283 if (!retry)
1284 return;
1285 seq = 1;
1286 goto again;
1287 }
1288
1289 /*
1290 * Search for at least 1 mount point in the dentry's subdirs.
1291 * We descend to the next level whenever the d_subdirs
1292 * list is non-empty and continue searching.
1293 */
1294
1295 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1296 {
1297 int *ret = data;
1298 if (d_mountpoint(dentry)) {
1299 *ret = 1;
1300 return D_WALK_QUIT;
1301 }
1302 return D_WALK_CONTINUE;
1303 }
1304
1305 /**
1306 * have_submounts - check for mounts over a dentry
1307 * @parent: dentry to check.
1308 *
1309 * Return true if the parent or its subdirectories contain
1310 * a mount point
1311 */
1312 int have_submounts(struct dentry *parent)
1313 {
1314 int ret = 0;
1315
1316 d_walk(parent, &ret, check_mount, NULL);
1317
1318 return ret;
1319 }
1320 EXPORT_SYMBOL(have_submounts);
1321
1322 /*
1323 * Called by mount code to set a mountpoint and check if the mountpoint is
1324 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1325 * subtree can become unreachable).
1326 *
1327 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1328 * this reason take rename_lock and d_lock on dentry and ancestors.
1329 */
1330 int d_set_mounted(struct dentry *dentry)
1331 {
1332 struct dentry *p;
1333 int ret = -ENOENT;
1334 write_seqlock(&rename_lock);
1335 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1336 /* Need exclusion wrt. d_invalidate() */
1337 spin_lock(&p->d_lock);
1338 if (unlikely(d_unhashed(p))) {
1339 spin_unlock(&p->d_lock);
1340 goto out;
1341 }
1342 spin_unlock(&p->d_lock);
1343 }
1344 spin_lock(&dentry->d_lock);
1345 if (!d_unlinked(dentry)) {
1346 dentry->d_flags |= DCACHE_MOUNTED;
1347 ret = 0;
1348 }
1349 spin_unlock(&dentry->d_lock);
1350 out:
1351 write_sequnlock(&rename_lock);
1352 return ret;
1353 }
1354
1355 /*
1356 * Search the dentry child list of the specified parent,
1357 * and move any unused dentries to the end of the unused
1358 * list for prune_dcache(). We descend to the next level
1359 * whenever the d_subdirs list is non-empty and continue
1360 * searching.
1361 *
1362 * It returns zero iff there are no unused children,
1363 * otherwise it returns the number of children moved to
1364 * the end of the unused list. This may not be the total
1365 * number of unused children, because select_parent can
1366 * drop the lock and return early due to latency
1367 * constraints.
1368 */
1369
1370 struct select_data {
1371 struct dentry *start;
1372 struct list_head dispose;
1373 int found;
1374 };
1375
1376 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1377 {
1378 struct select_data *data = _data;
1379 enum d_walk_ret ret = D_WALK_CONTINUE;
1380
1381 if (data->start == dentry)
1382 goto out;
1383
1384 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1385 data->found++;
1386 } else {
1387 if (dentry->d_flags & DCACHE_LRU_LIST)
1388 d_lru_del(dentry);
1389 if (!dentry->d_lockref.count) {
1390 d_shrink_add(dentry, &data->dispose);
1391 data->found++;
1392 }
1393 }
1394 /*
1395 * We can return to the caller if we have found some (this
1396 * ensures forward progress). We'll be coming back to find
1397 * the rest.
1398 */
1399 if (!list_empty(&data->dispose))
1400 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1401 out:
1402 return ret;
1403 }
1404
1405 /**
1406 * shrink_dcache_parent - prune dcache
1407 * @parent: parent of entries to prune
1408 *
1409 * Prune the dcache to remove unused children of the parent dentry.
1410 */
1411 void shrink_dcache_parent(struct dentry *parent)
1412 {
1413 for (;;) {
1414 struct select_data data;
1415
1416 INIT_LIST_HEAD(&data.dispose);
1417 data.start = parent;
1418 data.found = 0;
1419
1420 d_walk(parent, &data, select_collect, NULL);
1421 if (!data.found)
1422 break;
1423
1424 shrink_dentry_list(&data.dispose);
1425 cond_resched();
1426 }
1427 }
1428 EXPORT_SYMBOL(shrink_dcache_parent);
1429
1430 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1431 {
1432 /* it has busy descendents; complain about those instead */
1433 if (!list_empty(&dentry->d_subdirs))
1434 return D_WALK_CONTINUE;
1435
1436 /* root with refcount 1 is fine */
1437 if (dentry == _data && dentry->d_lockref.count == 1)
1438 return D_WALK_CONTINUE;
1439
1440 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1441 " still in use (%d) [unmount of %s %s]\n",
1442 dentry,
1443 dentry->d_inode ?
1444 dentry->d_inode->i_ino : 0UL,
1445 dentry,
1446 dentry->d_lockref.count,
1447 dentry->d_sb->s_type->name,
1448 dentry->d_sb->s_id);
1449 WARN_ON(1);
1450 return D_WALK_CONTINUE;
1451 }
1452
1453 static void do_one_tree(struct dentry *dentry)
1454 {
1455 shrink_dcache_parent(dentry);
1456 d_walk(dentry, dentry, umount_check, NULL);
1457 d_drop(dentry);
1458 dput(dentry);
1459 }
1460
1461 /*
1462 * destroy the dentries attached to a superblock on unmounting
1463 */
1464 void shrink_dcache_for_umount(struct super_block *sb)
1465 {
1466 struct dentry *dentry;
1467
1468 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1469
1470 dentry = sb->s_root;
1471 sb->s_root = NULL;
1472 do_one_tree(dentry);
1473
1474 while (!hlist_bl_empty(&sb->s_anon)) {
1475 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1476 do_one_tree(dentry);
1477 }
1478 }
1479
1480 struct detach_data {
1481 struct select_data select;
1482 struct dentry *mountpoint;
1483 };
1484 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1485 {
1486 struct detach_data *data = _data;
1487
1488 if (d_mountpoint(dentry)) {
1489 __dget_dlock(dentry);
1490 data->mountpoint = dentry;
1491 return D_WALK_QUIT;
1492 }
1493
1494 return select_collect(&data->select, dentry);
1495 }
1496
1497 static void check_and_drop(void *_data)
1498 {
1499 struct detach_data *data = _data;
1500
1501 if (!data->mountpoint && !data->select.found)
1502 __d_drop(data->select.start);
1503 }
1504
1505 /**
1506 * d_invalidate - detach submounts, prune dcache, and drop
1507 * @dentry: dentry to invalidate (aka detach, prune and drop)
1508 *
1509 * no dcache lock.
1510 *
1511 * The final d_drop is done as an atomic operation relative to
1512 * rename_lock ensuring there are no races with d_set_mounted. This
1513 * ensures there are no unhashed dentries on the path to a mountpoint.
1514 */
1515 void d_invalidate(struct dentry *dentry)
1516 {
1517 /*
1518 * If it's already been dropped, return OK.
1519 */
1520 spin_lock(&dentry->d_lock);
1521 if (d_unhashed(dentry)) {
1522 spin_unlock(&dentry->d_lock);
1523 return;
1524 }
1525 spin_unlock(&dentry->d_lock);
1526
1527 /* Negative dentries can be dropped without further checks */
1528 if (!dentry->d_inode) {
1529 d_drop(dentry);
1530 return;
1531 }
1532
1533 for (;;) {
1534 struct detach_data data;
1535
1536 data.mountpoint = NULL;
1537 INIT_LIST_HEAD(&data.select.dispose);
1538 data.select.start = dentry;
1539 data.select.found = 0;
1540
1541 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1542
1543 if (data.select.found)
1544 shrink_dentry_list(&data.select.dispose);
1545
1546 if (data.mountpoint) {
1547 detach_mounts(data.mountpoint);
1548 dput(data.mountpoint);
1549 }
1550
1551 if (!data.mountpoint && !data.select.found)
1552 break;
1553
1554 cond_resched();
1555 }
1556 }
1557 EXPORT_SYMBOL(d_invalidate);
1558
1559 /**
1560 * __d_alloc - allocate a dcache entry
1561 * @sb: filesystem it will belong to
1562 * @name: qstr of the name
1563 *
1564 * Allocates a dentry. It returns %NULL if there is insufficient memory
1565 * available. On a success the dentry is returned. The name passed in is
1566 * copied and the copy passed in may be reused after this call.
1567 */
1568
1569 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1570 {
1571 struct dentry *dentry;
1572 char *dname;
1573 int err;
1574
1575 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1576 if (!dentry)
1577 return NULL;
1578
1579 /*
1580 * We guarantee that the inline name is always NUL-terminated.
1581 * This way the memcpy() done by the name switching in rename
1582 * will still always have a NUL at the end, even if we might
1583 * be overwriting an internal NUL character
1584 */
1585 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1586 if (unlikely(!name)) {
1587 static const struct qstr anon = QSTR_INIT("/", 1);
1588 name = &anon;
1589 dname = dentry->d_iname;
1590 } else if (name->len > DNAME_INLINE_LEN-1) {
1591 size_t size = offsetof(struct external_name, name[1]);
1592 struct external_name *p = kmalloc(size + name->len,
1593 GFP_KERNEL_ACCOUNT);
1594 if (!p) {
1595 kmem_cache_free(dentry_cache, dentry);
1596 return NULL;
1597 }
1598 atomic_set(&p->u.count, 1);
1599 dname = p->name;
1600 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1601 kasan_unpoison_shadow(dname,
1602 round_up(name->len + 1, sizeof(unsigned long)));
1603 } else {
1604 dname = dentry->d_iname;
1605 }
1606
1607 dentry->d_name.len = name->len;
1608 dentry->d_name.hash = name->hash;
1609 memcpy(dname, name->name, name->len);
1610 dname[name->len] = 0;
1611
1612 /* Make sure we always see the terminating NUL character */
1613 smp_wmb();
1614 dentry->d_name.name = dname;
1615
1616 dentry->d_lockref.count = 1;
1617 dentry->d_flags = 0;
1618 spin_lock_init(&dentry->d_lock);
1619 seqcount_init(&dentry->d_seq);
1620 dentry->d_inode = NULL;
1621 dentry->d_parent = dentry;
1622 dentry->d_sb = sb;
1623 dentry->d_op = NULL;
1624 dentry->d_fsdata = NULL;
1625 INIT_HLIST_BL_NODE(&dentry->d_hash);
1626 INIT_LIST_HEAD(&dentry->d_lru);
1627 INIT_LIST_HEAD(&dentry->d_subdirs);
1628 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1629 INIT_LIST_HEAD(&dentry->d_child);
1630 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1631
1632 if (dentry->d_op && dentry->d_op->d_init) {
1633 err = dentry->d_op->d_init(dentry);
1634 if (err) {
1635 if (dname_external(dentry))
1636 kfree(external_name(dentry));
1637 kmem_cache_free(dentry_cache, dentry);
1638 return NULL;
1639 }
1640 }
1641
1642 this_cpu_inc(nr_dentry);
1643
1644 return dentry;
1645 }
1646
1647 /**
1648 * d_alloc - allocate a dcache entry
1649 * @parent: parent of entry to allocate
1650 * @name: qstr of the name
1651 *
1652 * Allocates a dentry. It returns %NULL if there is insufficient memory
1653 * available. On a success the dentry is returned. The name passed in is
1654 * copied and the copy passed in may be reused after this call.
1655 */
1656 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1657 {
1658 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1659 if (!dentry)
1660 return NULL;
1661 dentry->d_flags |= DCACHE_RCUACCESS;
1662 spin_lock(&parent->d_lock);
1663 /*
1664 * don't need child lock because it is not subject
1665 * to concurrency here
1666 */
1667 __dget_dlock(parent);
1668 dentry->d_parent = parent;
1669 list_add(&dentry->d_child, &parent->d_subdirs);
1670 spin_unlock(&parent->d_lock);
1671
1672 return dentry;
1673 }
1674 EXPORT_SYMBOL(d_alloc);
1675
1676 struct dentry *d_alloc_cursor(struct dentry * parent)
1677 {
1678 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1679 if (dentry) {
1680 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1681 dentry->d_parent = dget(parent);
1682 }
1683 return dentry;
1684 }
1685
1686 /**
1687 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1688 * @sb: the superblock
1689 * @name: qstr of the name
1690 *
1691 * For a filesystem that just pins its dentries in memory and never
1692 * performs lookups at all, return an unhashed IS_ROOT dentry.
1693 */
1694 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1695 {
1696 return __d_alloc(sb, name);
1697 }
1698 EXPORT_SYMBOL(d_alloc_pseudo);
1699
1700 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1701 {
1702 struct qstr q;
1703
1704 q.name = name;
1705 q.hash_len = hashlen_string(parent, name);
1706 return d_alloc(parent, &q);
1707 }
1708 EXPORT_SYMBOL(d_alloc_name);
1709
1710 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1711 {
1712 WARN_ON_ONCE(dentry->d_op);
1713 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1714 DCACHE_OP_COMPARE |
1715 DCACHE_OP_REVALIDATE |
1716 DCACHE_OP_WEAK_REVALIDATE |
1717 DCACHE_OP_DELETE |
1718 DCACHE_OP_REAL));
1719 dentry->d_op = op;
1720 if (!op)
1721 return;
1722 if (op->d_hash)
1723 dentry->d_flags |= DCACHE_OP_HASH;
1724 if (op->d_compare)
1725 dentry->d_flags |= DCACHE_OP_COMPARE;
1726 if (op->d_revalidate)
1727 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1728 if (op->d_weak_revalidate)
1729 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1730 if (op->d_delete)
1731 dentry->d_flags |= DCACHE_OP_DELETE;
1732 if (op->d_prune)
1733 dentry->d_flags |= DCACHE_OP_PRUNE;
1734 if (op->d_real)
1735 dentry->d_flags |= DCACHE_OP_REAL;
1736
1737 }
1738 EXPORT_SYMBOL(d_set_d_op);
1739
1740
1741 /*
1742 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1743 * @dentry - The dentry to mark
1744 *
1745 * Mark a dentry as falling through to the lower layer (as set with
1746 * d_pin_lower()). This flag may be recorded on the medium.
1747 */
1748 void d_set_fallthru(struct dentry *dentry)
1749 {
1750 spin_lock(&dentry->d_lock);
1751 dentry->d_flags |= DCACHE_FALLTHRU;
1752 spin_unlock(&dentry->d_lock);
1753 }
1754 EXPORT_SYMBOL(d_set_fallthru);
1755
1756 static unsigned d_flags_for_inode(struct inode *inode)
1757 {
1758 unsigned add_flags = DCACHE_REGULAR_TYPE;
1759
1760 if (!inode)
1761 return DCACHE_MISS_TYPE;
1762
1763 if (S_ISDIR(inode->i_mode)) {
1764 add_flags = DCACHE_DIRECTORY_TYPE;
1765 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1766 if (unlikely(!inode->i_op->lookup))
1767 add_flags = DCACHE_AUTODIR_TYPE;
1768 else
1769 inode->i_opflags |= IOP_LOOKUP;
1770 }
1771 goto type_determined;
1772 }
1773
1774 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1775 if (unlikely(inode->i_op->get_link)) {
1776 add_flags = DCACHE_SYMLINK_TYPE;
1777 goto type_determined;
1778 }
1779 inode->i_opflags |= IOP_NOFOLLOW;
1780 }
1781
1782 if (unlikely(!S_ISREG(inode->i_mode)))
1783 add_flags = DCACHE_SPECIAL_TYPE;
1784
1785 type_determined:
1786 if (unlikely(IS_AUTOMOUNT(inode)))
1787 add_flags |= DCACHE_NEED_AUTOMOUNT;
1788 return add_flags;
1789 }
1790
1791 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1792 {
1793 unsigned add_flags = d_flags_for_inode(inode);
1794 WARN_ON(d_in_lookup(dentry));
1795
1796 spin_lock(&dentry->d_lock);
1797 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1798 raw_write_seqcount_begin(&dentry->d_seq);
1799 __d_set_inode_and_type(dentry, inode, add_flags);
1800 raw_write_seqcount_end(&dentry->d_seq);
1801 fsnotify_update_flags(dentry);
1802 spin_unlock(&dentry->d_lock);
1803 }
1804
1805 /**
1806 * d_instantiate - fill in inode information for a dentry
1807 * @entry: dentry to complete
1808 * @inode: inode to attach to this dentry
1809 *
1810 * Fill in inode information in the entry.
1811 *
1812 * This turns negative dentries into productive full members
1813 * of society.
1814 *
1815 * NOTE! This assumes that the inode count has been incremented
1816 * (or otherwise set) by the caller to indicate that it is now
1817 * in use by the dcache.
1818 */
1819
1820 void d_instantiate(struct dentry *entry, struct inode * inode)
1821 {
1822 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1823 if (inode) {
1824 security_d_instantiate(entry, inode);
1825 spin_lock(&inode->i_lock);
1826 __d_instantiate(entry, inode);
1827 spin_unlock(&inode->i_lock);
1828 }
1829 }
1830 EXPORT_SYMBOL(d_instantiate);
1831
1832 /**
1833 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1834 * @entry: dentry to complete
1835 * @inode: inode to attach to this dentry
1836 *
1837 * Fill in inode information in the entry. If a directory alias is found, then
1838 * return an error (and drop inode). Together with d_materialise_unique() this
1839 * guarantees that a directory inode may never have more than one alias.
1840 */
1841 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1842 {
1843 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1844
1845 security_d_instantiate(entry, inode);
1846 spin_lock(&inode->i_lock);
1847 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1848 spin_unlock(&inode->i_lock);
1849 iput(inode);
1850 return -EBUSY;
1851 }
1852 __d_instantiate(entry, inode);
1853 spin_unlock(&inode->i_lock);
1854
1855 return 0;
1856 }
1857 EXPORT_SYMBOL(d_instantiate_no_diralias);
1858
1859 struct dentry *d_make_root(struct inode *root_inode)
1860 {
1861 struct dentry *res = NULL;
1862
1863 if (root_inode) {
1864 res = __d_alloc(root_inode->i_sb, NULL);
1865 if (res)
1866 d_instantiate(res, root_inode);
1867 else
1868 iput(root_inode);
1869 }
1870 return res;
1871 }
1872 EXPORT_SYMBOL(d_make_root);
1873
1874 static struct dentry * __d_find_any_alias(struct inode *inode)
1875 {
1876 struct dentry *alias;
1877
1878 if (hlist_empty(&inode->i_dentry))
1879 return NULL;
1880 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1881 __dget(alias);
1882 return alias;
1883 }
1884
1885 /**
1886 * d_find_any_alias - find any alias for a given inode
1887 * @inode: inode to find an alias for
1888 *
1889 * If any aliases exist for the given inode, take and return a
1890 * reference for one of them. If no aliases exist, return %NULL.
1891 */
1892 struct dentry *d_find_any_alias(struct inode *inode)
1893 {
1894 struct dentry *de;
1895
1896 spin_lock(&inode->i_lock);
1897 de = __d_find_any_alias(inode);
1898 spin_unlock(&inode->i_lock);
1899 return de;
1900 }
1901 EXPORT_SYMBOL(d_find_any_alias);
1902
1903 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1904 {
1905 struct dentry *tmp;
1906 struct dentry *res;
1907 unsigned add_flags;
1908
1909 if (!inode)
1910 return ERR_PTR(-ESTALE);
1911 if (IS_ERR(inode))
1912 return ERR_CAST(inode);
1913
1914 res = d_find_any_alias(inode);
1915 if (res)
1916 goto out_iput;
1917
1918 tmp = __d_alloc(inode->i_sb, NULL);
1919 if (!tmp) {
1920 res = ERR_PTR(-ENOMEM);
1921 goto out_iput;
1922 }
1923
1924 security_d_instantiate(tmp, inode);
1925 spin_lock(&inode->i_lock);
1926 res = __d_find_any_alias(inode);
1927 if (res) {
1928 spin_unlock(&inode->i_lock);
1929 dput(tmp);
1930 goto out_iput;
1931 }
1932
1933 /* attach a disconnected dentry */
1934 add_flags = d_flags_for_inode(inode);
1935
1936 if (disconnected)
1937 add_flags |= DCACHE_DISCONNECTED;
1938
1939 spin_lock(&tmp->d_lock);
1940 __d_set_inode_and_type(tmp, inode, add_flags);
1941 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1942 hlist_bl_lock(&tmp->d_sb->s_anon);
1943 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1944 hlist_bl_unlock(&tmp->d_sb->s_anon);
1945 spin_unlock(&tmp->d_lock);
1946 spin_unlock(&inode->i_lock);
1947
1948 return tmp;
1949
1950 out_iput:
1951 iput(inode);
1952 return res;
1953 }
1954
1955 /**
1956 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1957 * @inode: inode to allocate the dentry for
1958 *
1959 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1960 * similar open by handle operations. The returned dentry may be anonymous,
1961 * or may have a full name (if the inode was already in the cache).
1962 *
1963 * When called on a directory inode, we must ensure that the inode only ever
1964 * has one dentry. If a dentry is found, that is returned instead of
1965 * allocating a new one.
1966 *
1967 * On successful return, the reference to the inode has been transferred
1968 * to the dentry. In case of an error the reference on the inode is released.
1969 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1970 * be passed in and the error will be propagated to the return value,
1971 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1972 */
1973 struct dentry *d_obtain_alias(struct inode *inode)
1974 {
1975 return __d_obtain_alias(inode, 1);
1976 }
1977 EXPORT_SYMBOL(d_obtain_alias);
1978
1979 /**
1980 * d_obtain_root - find or allocate a dentry for a given inode
1981 * @inode: inode to allocate the dentry for
1982 *
1983 * Obtain an IS_ROOT dentry for the root of a filesystem.
1984 *
1985 * We must ensure that directory inodes only ever have one dentry. If a
1986 * dentry is found, that is returned instead of allocating a new one.
1987 *
1988 * On successful return, the reference to the inode has been transferred
1989 * to the dentry. In case of an error the reference on the inode is
1990 * released. A %NULL or IS_ERR inode may be passed in and will be the
1991 * error will be propagate to the return value, with a %NULL @inode
1992 * replaced by ERR_PTR(-ESTALE).
1993 */
1994 struct dentry *d_obtain_root(struct inode *inode)
1995 {
1996 return __d_obtain_alias(inode, 0);
1997 }
1998 EXPORT_SYMBOL(d_obtain_root);
1999
2000 /**
2001 * d_add_ci - lookup or allocate new dentry with case-exact name
2002 * @inode: the inode case-insensitive lookup has found
2003 * @dentry: the negative dentry that was passed to the parent's lookup func
2004 * @name: the case-exact name to be associated with the returned dentry
2005 *
2006 * This is to avoid filling the dcache with case-insensitive names to the
2007 * same inode, only the actual correct case is stored in the dcache for
2008 * case-insensitive filesystems.
2009 *
2010 * For a case-insensitive lookup match and if the the case-exact dentry
2011 * already exists in in the dcache, use it and return it.
2012 *
2013 * If no entry exists with the exact case name, allocate new dentry with
2014 * the exact case, and return the spliced entry.
2015 */
2016 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2017 struct qstr *name)
2018 {
2019 struct dentry *found, *res;
2020
2021 /*
2022 * First check if a dentry matching the name already exists,
2023 * if not go ahead and create it now.
2024 */
2025 found = d_hash_and_lookup(dentry->d_parent, name);
2026 if (found) {
2027 iput(inode);
2028 return found;
2029 }
2030 if (d_in_lookup(dentry)) {
2031 found = d_alloc_parallel(dentry->d_parent, name,
2032 dentry->d_wait);
2033 if (IS_ERR(found) || !d_in_lookup(found)) {
2034 iput(inode);
2035 return found;
2036 }
2037 } else {
2038 found = d_alloc(dentry->d_parent, name);
2039 if (!found) {
2040 iput(inode);
2041 return ERR_PTR(-ENOMEM);
2042 }
2043 }
2044 res = d_splice_alias(inode, found);
2045 if (res) {
2046 dput(found);
2047 return res;
2048 }
2049 return found;
2050 }
2051 EXPORT_SYMBOL(d_add_ci);
2052
2053
2054 static inline bool d_same_name(const struct dentry *dentry,
2055 const struct dentry *parent,
2056 const struct qstr *name)
2057 {
2058 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2059 if (dentry->d_name.len != name->len)
2060 return false;
2061 return dentry_cmp(dentry, name->name, name->len) == 0;
2062 }
2063 return parent->d_op->d_compare(parent, dentry,
2064 dentry->d_name.len, dentry->d_name.name,
2065 name) == 0;
2066 }
2067
2068 /**
2069 * __d_lookup_rcu - search for a dentry (racy, store-free)
2070 * @parent: parent dentry
2071 * @name: qstr of name we wish to find
2072 * @seqp: returns d_seq value at the point where the dentry was found
2073 * Returns: dentry, or NULL
2074 *
2075 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2076 * resolution (store-free path walking) design described in
2077 * Documentation/filesystems/path-lookup.txt.
2078 *
2079 * This is not to be used outside core vfs.
2080 *
2081 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2082 * held, and rcu_read_lock held. The returned dentry must not be stored into
2083 * without taking d_lock and checking d_seq sequence count against @seq
2084 * returned here.
2085 *
2086 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2087 * function.
2088 *
2089 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2090 * the returned dentry, so long as its parent's seqlock is checked after the
2091 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2092 * is formed, giving integrity down the path walk.
2093 *
2094 * NOTE! The caller *has* to check the resulting dentry against the sequence
2095 * number we've returned before using any of the resulting dentry state!
2096 */
2097 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2098 const struct qstr *name,
2099 unsigned *seqp)
2100 {
2101 u64 hashlen = name->hash_len;
2102 const unsigned char *str = name->name;
2103 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2104 struct hlist_bl_node *node;
2105 struct dentry *dentry;
2106
2107 /*
2108 * Note: There is significant duplication with __d_lookup_rcu which is
2109 * required to prevent single threaded performance regressions
2110 * especially on architectures where smp_rmb (in seqcounts) are costly.
2111 * Keep the two functions in sync.
2112 */
2113
2114 /*
2115 * The hash list is protected using RCU.
2116 *
2117 * Carefully use d_seq when comparing a candidate dentry, to avoid
2118 * races with d_move().
2119 *
2120 * It is possible that concurrent renames can mess up our list
2121 * walk here and result in missing our dentry, resulting in the
2122 * false-negative result. d_lookup() protects against concurrent
2123 * renames using rename_lock seqlock.
2124 *
2125 * See Documentation/filesystems/path-lookup.txt for more details.
2126 */
2127 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2128 unsigned seq;
2129
2130 seqretry:
2131 /*
2132 * The dentry sequence count protects us from concurrent
2133 * renames, and thus protects parent and name fields.
2134 *
2135 * The caller must perform a seqcount check in order
2136 * to do anything useful with the returned dentry.
2137 *
2138 * NOTE! We do a "raw" seqcount_begin here. That means that
2139 * we don't wait for the sequence count to stabilize if it
2140 * is in the middle of a sequence change. If we do the slow
2141 * dentry compare, we will do seqretries until it is stable,
2142 * and if we end up with a successful lookup, we actually
2143 * want to exit RCU lookup anyway.
2144 *
2145 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2146 * we are still guaranteed NUL-termination of ->d_name.name.
2147 */
2148 seq = raw_seqcount_begin(&dentry->d_seq);
2149 if (dentry->d_parent != parent)
2150 continue;
2151 if (d_unhashed(dentry))
2152 continue;
2153
2154 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2155 int tlen;
2156 const char *tname;
2157 if (dentry->d_name.hash != hashlen_hash(hashlen))
2158 continue;
2159 tlen = dentry->d_name.len;
2160 tname = dentry->d_name.name;
2161 /* we want a consistent (name,len) pair */
2162 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2163 cpu_relax();
2164 goto seqretry;
2165 }
2166 if (parent->d_op->d_compare(parent, dentry,
2167 tlen, tname, name) != 0)
2168 continue;
2169 } else {
2170 if (dentry->d_name.hash_len != hashlen)
2171 continue;
2172 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2173 continue;
2174 }
2175 *seqp = seq;
2176 return dentry;
2177 }
2178 return NULL;
2179 }
2180
2181 /**
2182 * d_lookup - search for a dentry
2183 * @parent: parent dentry
2184 * @name: qstr of name we wish to find
2185 * Returns: dentry, or NULL
2186 *
2187 * d_lookup searches the children of the parent dentry for the name in
2188 * question. If the dentry is found its reference count is incremented and the
2189 * dentry is returned. The caller must use dput to free the entry when it has
2190 * finished using it. %NULL is returned if the dentry does not exist.
2191 */
2192 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2193 {
2194 struct dentry *dentry;
2195 unsigned seq;
2196
2197 do {
2198 seq = read_seqbegin(&rename_lock);
2199 dentry = __d_lookup(parent, name);
2200 if (dentry)
2201 break;
2202 } while (read_seqretry(&rename_lock, seq));
2203 return dentry;
2204 }
2205 EXPORT_SYMBOL(d_lookup);
2206
2207 /**
2208 * __d_lookup - search for a dentry (racy)
2209 * @parent: parent dentry
2210 * @name: qstr of name we wish to find
2211 * Returns: dentry, or NULL
2212 *
2213 * __d_lookup is like d_lookup, however it may (rarely) return a
2214 * false-negative result due to unrelated rename activity.
2215 *
2216 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2217 * however it must be used carefully, eg. with a following d_lookup in
2218 * the case of failure.
2219 *
2220 * __d_lookup callers must be commented.
2221 */
2222 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2223 {
2224 unsigned int hash = name->hash;
2225 struct hlist_bl_head *b = d_hash(hash);
2226 struct hlist_bl_node *node;
2227 struct dentry *found = NULL;
2228 struct dentry *dentry;
2229
2230 /*
2231 * Note: There is significant duplication with __d_lookup_rcu which is
2232 * required to prevent single threaded performance regressions
2233 * especially on architectures where smp_rmb (in seqcounts) are costly.
2234 * Keep the two functions in sync.
2235 */
2236
2237 /*
2238 * The hash list is protected using RCU.
2239 *
2240 * Take d_lock when comparing a candidate dentry, to avoid races
2241 * with d_move().
2242 *
2243 * It is possible that concurrent renames can mess up our list
2244 * walk here and result in missing our dentry, resulting in the
2245 * false-negative result. d_lookup() protects against concurrent
2246 * renames using rename_lock seqlock.
2247 *
2248 * See Documentation/filesystems/path-lookup.txt for more details.
2249 */
2250 rcu_read_lock();
2251
2252 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2253
2254 if (dentry->d_name.hash != hash)
2255 continue;
2256
2257 spin_lock(&dentry->d_lock);
2258 if (dentry->d_parent != parent)
2259 goto next;
2260 if (d_unhashed(dentry))
2261 goto next;
2262
2263 if (!d_same_name(dentry, parent, name))
2264 goto next;
2265
2266 dentry->d_lockref.count++;
2267 found = dentry;
2268 spin_unlock(&dentry->d_lock);
2269 break;
2270 next:
2271 spin_unlock(&dentry->d_lock);
2272 }
2273 rcu_read_unlock();
2274
2275 return found;
2276 }
2277
2278 /**
2279 * d_hash_and_lookup - hash the qstr then search for a dentry
2280 * @dir: Directory to search in
2281 * @name: qstr of name we wish to find
2282 *
2283 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2284 */
2285 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2286 {
2287 /*
2288 * Check for a fs-specific hash function. Note that we must
2289 * calculate the standard hash first, as the d_op->d_hash()
2290 * routine may choose to leave the hash value unchanged.
2291 */
2292 name->hash = full_name_hash(dir, name->name, name->len);
2293 if (dir->d_flags & DCACHE_OP_HASH) {
2294 int err = dir->d_op->d_hash(dir, name);
2295 if (unlikely(err < 0))
2296 return ERR_PTR(err);
2297 }
2298 return d_lookup(dir, name);
2299 }
2300 EXPORT_SYMBOL(d_hash_and_lookup);
2301
2302 /*
2303 * When a file is deleted, we have two options:
2304 * - turn this dentry into a negative dentry
2305 * - unhash this dentry and free it.
2306 *
2307 * Usually, we want to just turn this into
2308 * a negative dentry, but if anybody else is
2309 * currently using the dentry or the inode
2310 * we can't do that and we fall back on removing
2311 * it from the hash queues and waiting for
2312 * it to be deleted later when it has no users
2313 */
2314
2315 /**
2316 * d_delete - delete a dentry
2317 * @dentry: The dentry to delete
2318 *
2319 * Turn the dentry into a negative dentry if possible, otherwise
2320 * remove it from the hash queues so it can be deleted later
2321 */
2322
2323 void d_delete(struct dentry * dentry)
2324 {
2325 struct inode *inode;
2326 int isdir = 0;
2327 /*
2328 * Are we the only user?
2329 */
2330 again:
2331 spin_lock(&dentry->d_lock);
2332 inode = dentry->d_inode;
2333 isdir = S_ISDIR(inode->i_mode);
2334 if (dentry->d_lockref.count == 1) {
2335 if (!spin_trylock(&inode->i_lock)) {
2336 spin_unlock(&dentry->d_lock);
2337 cpu_relax();
2338 goto again;
2339 }
2340 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2341 dentry_unlink_inode(dentry);
2342 fsnotify_nameremove(dentry, isdir);
2343 return;
2344 }
2345
2346 if (!d_unhashed(dentry))
2347 __d_drop(dentry);
2348
2349 spin_unlock(&dentry->d_lock);
2350
2351 fsnotify_nameremove(dentry, isdir);
2352 }
2353 EXPORT_SYMBOL(d_delete);
2354
2355 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2356 {
2357 BUG_ON(!d_unhashed(entry));
2358 hlist_bl_lock(b);
2359 hlist_bl_add_head_rcu(&entry->d_hash, b);
2360 hlist_bl_unlock(b);
2361 }
2362
2363 static void _d_rehash(struct dentry * entry)
2364 {
2365 __d_rehash(entry, d_hash(entry->d_name.hash));
2366 }
2367
2368 /**
2369 * d_rehash - add an entry back to the hash
2370 * @entry: dentry to add to the hash
2371 *
2372 * Adds a dentry to the hash according to its name.
2373 */
2374
2375 void d_rehash(struct dentry * entry)
2376 {
2377 spin_lock(&entry->d_lock);
2378 _d_rehash(entry);
2379 spin_unlock(&entry->d_lock);
2380 }
2381 EXPORT_SYMBOL(d_rehash);
2382
2383 static inline unsigned start_dir_add(struct inode *dir)
2384 {
2385
2386 for (;;) {
2387 unsigned n = dir->i_dir_seq;
2388 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2389 return n;
2390 cpu_relax();
2391 }
2392 }
2393
2394 static inline void end_dir_add(struct inode *dir, unsigned n)
2395 {
2396 smp_store_release(&dir->i_dir_seq, n + 2);
2397 }
2398
2399 static void d_wait_lookup(struct dentry *dentry)
2400 {
2401 if (d_in_lookup(dentry)) {
2402 DECLARE_WAITQUEUE(wait, current);
2403 add_wait_queue(dentry->d_wait, &wait);
2404 do {
2405 set_current_state(TASK_UNINTERRUPTIBLE);
2406 spin_unlock(&dentry->d_lock);
2407 schedule();
2408 spin_lock(&dentry->d_lock);
2409 } while (d_in_lookup(dentry));
2410 }
2411 }
2412
2413 struct dentry *d_alloc_parallel(struct dentry *parent,
2414 const struct qstr *name,
2415 wait_queue_head_t *wq)
2416 {
2417 unsigned int hash = name->hash;
2418 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2419 struct hlist_bl_node *node;
2420 struct dentry *new = d_alloc(parent, name);
2421 struct dentry *dentry;
2422 unsigned seq, r_seq, d_seq;
2423
2424 if (unlikely(!new))
2425 return ERR_PTR(-ENOMEM);
2426
2427 retry:
2428 rcu_read_lock();
2429 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2430 r_seq = read_seqbegin(&rename_lock);
2431 dentry = __d_lookup_rcu(parent, name, &d_seq);
2432 if (unlikely(dentry)) {
2433 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2434 rcu_read_unlock();
2435 goto retry;
2436 }
2437 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2438 rcu_read_unlock();
2439 dput(dentry);
2440 goto retry;
2441 }
2442 rcu_read_unlock();
2443 dput(new);
2444 return dentry;
2445 }
2446 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2447 rcu_read_unlock();
2448 goto retry;
2449 }
2450 hlist_bl_lock(b);
2451 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2452 hlist_bl_unlock(b);
2453 rcu_read_unlock();
2454 goto retry;
2455 }
2456 /*
2457 * No changes for the parent since the beginning of d_lookup().
2458 * Since all removals from the chain happen with hlist_bl_lock(),
2459 * any potential in-lookup matches are going to stay here until
2460 * we unlock the chain. All fields are stable in everything
2461 * we encounter.
2462 */
2463 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2464 if (dentry->d_name.hash != hash)
2465 continue;
2466 if (dentry->d_parent != parent)
2467 continue;
2468 if (!d_same_name(dentry, parent, name))
2469 continue;
2470 hlist_bl_unlock(b);
2471 /* now we can try to grab a reference */
2472 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2473 rcu_read_unlock();
2474 goto retry;
2475 }
2476
2477 rcu_read_unlock();
2478 /*
2479 * somebody is likely to be still doing lookup for it;
2480 * wait for them to finish
2481 */
2482 spin_lock(&dentry->d_lock);
2483 d_wait_lookup(dentry);
2484 /*
2485 * it's not in-lookup anymore; in principle we should repeat
2486 * everything from dcache lookup, but it's likely to be what
2487 * d_lookup() would've found anyway. If it is, just return it;
2488 * otherwise we really have to repeat the whole thing.
2489 */
2490 if (unlikely(dentry->d_name.hash != hash))
2491 goto mismatch;
2492 if (unlikely(dentry->d_parent != parent))
2493 goto mismatch;
2494 if (unlikely(d_unhashed(dentry)))
2495 goto mismatch;
2496 if (unlikely(!d_same_name(dentry, parent, name)))
2497 goto mismatch;
2498 /* OK, it *is* a hashed match; return it */
2499 spin_unlock(&dentry->d_lock);
2500 dput(new);
2501 return dentry;
2502 }
2503 rcu_read_unlock();
2504 /* we can't take ->d_lock here; it's OK, though. */
2505 new->d_flags |= DCACHE_PAR_LOOKUP;
2506 new->d_wait = wq;
2507 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2508 hlist_bl_unlock(b);
2509 return new;
2510 mismatch:
2511 spin_unlock(&dentry->d_lock);
2512 dput(dentry);
2513 goto retry;
2514 }
2515 EXPORT_SYMBOL(d_alloc_parallel);
2516
2517 void __d_lookup_done(struct dentry *dentry)
2518 {
2519 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2520 dentry->d_name.hash);
2521 hlist_bl_lock(b);
2522 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2523 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2524 wake_up_all(dentry->d_wait);
2525 dentry->d_wait = NULL;
2526 hlist_bl_unlock(b);
2527 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2528 INIT_LIST_HEAD(&dentry->d_lru);
2529 }
2530 EXPORT_SYMBOL(__d_lookup_done);
2531
2532 /* inode->i_lock held if inode is non-NULL */
2533
2534 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2535 {
2536 struct inode *dir = NULL;
2537 unsigned n;
2538 spin_lock(&dentry->d_lock);
2539 if (unlikely(d_in_lookup(dentry))) {
2540 dir = dentry->d_parent->d_inode;
2541 n = start_dir_add(dir);
2542 __d_lookup_done(dentry);
2543 }
2544 if (inode) {
2545 unsigned add_flags = d_flags_for_inode(inode);
2546 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2547 raw_write_seqcount_begin(&dentry->d_seq);
2548 __d_set_inode_and_type(dentry, inode, add_flags);
2549 raw_write_seqcount_end(&dentry->d_seq);
2550 fsnotify_update_flags(dentry);
2551 }
2552 _d_rehash(dentry);
2553 if (dir)
2554 end_dir_add(dir, n);
2555 spin_unlock(&dentry->d_lock);
2556 if (inode)
2557 spin_unlock(&inode->i_lock);
2558 }
2559
2560 /**
2561 * d_add - add dentry to hash queues
2562 * @entry: dentry to add
2563 * @inode: The inode to attach to this dentry
2564 *
2565 * This adds the entry to the hash queues and initializes @inode.
2566 * The entry was actually filled in earlier during d_alloc().
2567 */
2568
2569 void d_add(struct dentry *entry, struct inode *inode)
2570 {
2571 if (inode) {
2572 security_d_instantiate(entry, inode);
2573 spin_lock(&inode->i_lock);
2574 }
2575 __d_add(entry, inode);
2576 }
2577 EXPORT_SYMBOL(d_add);
2578
2579 /**
2580 * d_exact_alias - find and hash an exact unhashed alias
2581 * @entry: dentry to add
2582 * @inode: The inode to go with this dentry
2583 *
2584 * If an unhashed dentry with the same name/parent and desired
2585 * inode already exists, hash and return it. Otherwise, return
2586 * NULL.
2587 *
2588 * Parent directory should be locked.
2589 */
2590 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2591 {
2592 struct dentry *alias;
2593 unsigned int hash = entry->d_name.hash;
2594
2595 spin_lock(&inode->i_lock);
2596 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2597 /*
2598 * Don't need alias->d_lock here, because aliases with
2599 * d_parent == entry->d_parent are not subject to name or
2600 * parent changes, because the parent inode i_mutex is held.
2601 */
2602 if (alias->d_name.hash != hash)
2603 continue;
2604 if (alias->d_parent != entry->d_parent)
2605 continue;
2606 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2607 continue;
2608 spin_lock(&alias->d_lock);
2609 if (!d_unhashed(alias)) {
2610 spin_unlock(&alias->d_lock);
2611 alias = NULL;
2612 } else {
2613 __dget_dlock(alias);
2614 _d_rehash(alias);
2615 spin_unlock(&alias->d_lock);
2616 }
2617 spin_unlock(&inode->i_lock);
2618 return alias;
2619 }
2620 spin_unlock(&inode->i_lock);
2621 return NULL;
2622 }
2623 EXPORT_SYMBOL(d_exact_alias);
2624
2625 /**
2626 * dentry_update_name_case - update case insensitive dentry with a new name
2627 * @dentry: dentry to be updated
2628 * @name: new name
2629 *
2630 * Update a case insensitive dentry with new case of name.
2631 *
2632 * dentry must have been returned by d_lookup with name @name. Old and new
2633 * name lengths must match (ie. no d_compare which allows mismatched name
2634 * lengths).
2635 *
2636 * Parent inode i_mutex must be held over d_lookup and into this call (to
2637 * keep renames and concurrent inserts, and readdir(2) away).
2638 */
2639 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2640 {
2641 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2642 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2643
2644 spin_lock(&dentry->d_lock);
2645 write_seqcount_begin(&dentry->d_seq);
2646 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2647 write_seqcount_end(&dentry->d_seq);
2648 spin_unlock(&dentry->d_lock);
2649 }
2650 EXPORT_SYMBOL(dentry_update_name_case);
2651
2652 static void swap_names(struct dentry *dentry, struct dentry *target)
2653 {
2654 if (unlikely(dname_external(target))) {
2655 if (unlikely(dname_external(dentry))) {
2656 /*
2657 * Both external: swap the pointers
2658 */
2659 swap(target->d_name.name, dentry->d_name.name);
2660 } else {
2661 /*
2662 * dentry:internal, target:external. Steal target's
2663 * storage and make target internal.
2664 */
2665 memcpy(target->d_iname, dentry->d_name.name,
2666 dentry->d_name.len + 1);
2667 dentry->d_name.name = target->d_name.name;
2668 target->d_name.name = target->d_iname;
2669 }
2670 } else {
2671 if (unlikely(dname_external(dentry))) {
2672 /*
2673 * dentry:external, target:internal. Give dentry's
2674 * storage to target and make dentry internal
2675 */
2676 memcpy(dentry->d_iname, target->d_name.name,
2677 target->d_name.len + 1);
2678 target->d_name.name = dentry->d_name.name;
2679 dentry->d_name.name = dentry->d_iname;
2680 } else {
2681 /*
2682 * Both are internal.
2683 */
2684 unsigned int i;
2685 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2686 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2687 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2688 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2689 swap(((long *) &dentry->d_iname)[i],
2690 ((long *) &target->d_iname)[i]);
2691 }
2692 }
2693 }
2694 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2695 }
2696
2697 static void copy_name(struct dentry *dentry, struct dentry *target)
2698 {
2699 struct external_name *old_name = NULL;
2700 if (unlikely(dname_external(dentry)))
2701 old_name = external_name(dentry);
2702 if (unlikely(dname_external(target))) {
2703 atomic_inc(&external_name(target)->u.count);
2704 dentry->d_name = target->d_name;
2705 } else {
2706 memcpy(dentry->d_iname, target->d_name.name,
2707 target->d_name.len + 1);
2708 dentry->d_name.name = dentry->d_iname;
2709 dentry->d_name.hash_len = target->d_name.hash_len;
2710 }
2711 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2712 kfree_rcu(old_name, u.head);
2713 }
2714
2715 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2716 {
2717 /*
2718 * XXXX: do we really need to take target->d_lock?
2719 */
2720 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2721 spin_lock(&target->d_parent->d_lock);
2722 else {
2723 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2724 spin_lock(&dentry->d_parent->d_lock);
2725 spin_lock_nested(&target->d_parent->d_lock,
2726 DENTRY_D_LOCK_NESTED);
2727 } else {
2728 spin_lock(&target->d_parent->d_lock);
2729 spin_lock_nested(&dentry->d_parent->d_lock,
2730 DENTRY_D_LOCK_NESTED);
2731 }
2732 }
2733 if (target < dentry) {
2734 spin_lock_nested(&target->d_lock, 2);
2735 spin_lock_nested(&dentry->d_lock, 3);
2736 } else {
2737 spin_lock_nested(&dentry->d_lock, 2);
2738 spin_lock_nested(&target->d_lock, 3);
2739 }
2740 }
2741
2742 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2743 {
2744 if (target->d_parent != dentry->d_parent)
2745 spin_unlock(&dentry->d_parent->d_lock);
2746 if (target->d_parent != target)
2747 spin_unlock(&target->d_parent->d_lock);
2748 spin_unlock(&target->d_lock);
2749 spin_unlock(&dentry->d_lock);
2750 }
2751
2752 /*
2753 * When switching names, the actual string doesn't strictly have to
2754 * be preserved in the target - because we're dropping the target
2755 * anyway. As such, we can just do a simple memcpy() to copy over
2756 * the new name before we switch, unless we are going to rehash
2757 * it. Note that if we *do* unhash the target, we are not allowed
2758 * to rehash it without giving it a new name/hash key - whether
2759 * we swap or overwrite the names here, resulting name won't match
2760 * the reality in filesystem; it's only there for d_path() purposes.
2761 * Note that all of this is happening under rename_lock, so the
2762 * any hash lookup seeing it in the middle of manipulations will
2763 * be discarded anyway. So we do not care what happens to the hash
2764 * key in that case.
2765 */
2766 /*
2767 * __d_move - move a dentry
2768 * @dentry: entry to move
2769 * @target: new dentry
2770 * @exchange: exchange the two dentries
2771 *
2772 * Update the dcache to reflect the move of a file name. Negative
2773 * dcache entries should not be moved in this way. Caller must hold
2774 * rename_lock, the i_mutex of the source and target directories,
2775 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2776 */
2777 static void __d_move(struct dentry *dentry, struct dentry *target,
2778 bool exchange)
2779 {
2780 struct inode *dir = NULL;
2781 unsigned n;
2782 if (!dentry->d_inode)
2783 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2784
2785 BUG_ON(d_ancestor(dentry, target));
2786 BUG_ON(d_ancestor(target, dentry));
2787
2788 dentry_lock_for_move(dentry, target);
2789 if (unlikely(d_in_lookup(target))) {
2790 dir = target->d_parent->d_inode;
2791 n = start_dir_add(dir);
2792 __d_lookup_done(target);
2793 }
2794
2795 write_seqcount_begin(&dentry->d_seq);
2796 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2797
2798 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2799
2800 /*
2801 * Move the dentry to the target hash queue. Don't bother checking
2802 * for the same hash queue because of how unlikely it is.
2803 */
2804 __d_drop(dentry);
2805 __d_rehash(dentry, d_hash(target->d_name.hash));
2806
2807 /*
2808 * Unhash the target (d_delete() is not usable here). If exchanging
2809 * the two dentries, then rehash onto the other's hash queue.
2810 */
2811 __d_drop(target);
2812 if (exchange) {
2813 __d_rehash(target, d_hash(dentry->d_name.hash));
2814 }
2815
2816 /* Switch the names.. */
2817 if (exchange)
2818 swap_names(dentry, target);
2819 else
2820 copy_name(dentry, target);
2821
2822 /* ... and switch them in the tree */
2823 if (IS_ROOT(dentry)) {
2824 /* splicing a tree */
2825 dentry->d_flags |= DCACHE_RCUACCESS;
2826 dentry->d_parent = target->d_parent;
2827 target->d_parent = target;
2828 list_del_init(&target->d_child);
2829 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2830 } else {
2831 /* swapping two dentries */
2832 swap(dentry->d_parent, target->d_parent);
2833 list_move(&target->d_child, &target->d_parent->d_subdirs);
2834 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2835 if (exchange)
2836 fsnotify_update_flags(target);
2837 fsnotify_update_flags(dentry);
2838 }
2839
2840 write_seqcount_end(&target->d_seq);
2841 write_seqcount_end(&dentry->d_seq);
2842
2843 if (dir)
2844 end_dir_add(dir, n);
2845 dentry_unlock_for_move(dentry, target);
2846 }
2847
2848 /*
2849 * d_move - move a dentry
2850 * @dentry: entry to move
2851 * @target: new dentry
2852 *
2853 * Update the dcache to reflect the move of a file name. Negative
2854 * dcache entries should not be moved in this way. See the locking
2855 * requirements for __d_move.
2856 */
2857 void d_move(struct dentry *dentry, struct dentry *target)
2858 {
2859 write_seqlock(&rename_lock);
2860 __d_move(dentry, target, false);
2861 write_sequnlock(&rename_lock);
2862 }
2863 EXPORT_SYMBOL(d_move);
2864
2865 /*
2866 * d_exchange - exchange two dentries
2867 * @dentry1: first dentry
2868 * @dentry2: second dentry
2869 */
2870 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2871 {
2872 write_seqlock(&rename_lock);
2873
2874 WARN_ON(!dentry1->d_inode);
2875 WARN_ON(!dentry2->d_inode);
2876 WARN_ON(IS_ROOT(dentry1));
2877 WARN_ON(IS_ROOT(dentry2));
2878
2879 __d_move(dentry1, dentry2, true);
2880
2881 write_sequnlock(&rename_lock);
2882 }
2883
2884 /**
2885 * d_ancestor - search for an ancestor
2886 * @p1: ancestor dentry
2887 * @p2: child dentry
2888 *
2889 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2890 * an ancestor of p2, else NULL.
2891 */
2892 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2893 {
2894 struct dentry *p;
2895
2896 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2897 if (p->d_parent == p1)
2898 return p;
2899 }
2900 return NULL;
2901 }
2902
2903 /*
2904 * This helper attempts to cope with remotely renamed directories
2905 *
2906 * It assumes that the caller is already holding
2907 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2908 *
2909 * Note: If ever the locking in lock_rename() changes, then please
2910 * remember to update this too...
2911 */
2912 static int __d_unalias(struct inode *inode,
2913 struct dentry *dentry, struct dentry *alias)
2914 {
2915 struct mutex *m1 = NULL;
2916 struct rw_semaphore *m2 = NULL;
2917 int ret = -ESTALE;
2918
2919 /* If alias and dentry share a parent, then no extra locks required */
2920 if (alias->d_parent == dentry->d_parent)
2921 goto out_unalias;
2922
2923 /* See lock_rename() */
2924 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2925 goto out_err;
2926 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2927 if (!inode_trylock_shared(alias->d_parent->d_inode))
2928 goto out_err;
2929 m2 = &alias->d_parent->d_inode->i_rwsem;
2930 out_unalias:
2931 __d_move(alias, dentry, false);
2932 ret = 0;
2933 out_err:
2934 if (m2)
2935 up_read(m2);
2936 if (m1)
2937 mutex_unlock(m1);
2938 return ret;
2939 }
2940
2941 /**
2942 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2943 * @inode: the inode which may have a disconnected dentry
2944 * @dentry: a negative dentry which we want to point to the inode.
2945 *
2946 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2947 * place of the given dentry and return it, else simply d_add the inode
2948 * to the dentry and return NULL.
2949 *
2950 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2951 * we should error out: directories can't have multiple aliases.
2952 *
2953 * This is needed in the lookup routine of any filesystem that is exportable
2954 * (via knfsd) so that we can build dcache paths to directories effectively.
2955 *
2956 * If a dentry was found and moved, then it is returned. Otherwise NULL
2957 * is returned. This matches the expected return value of ->lookup.
2958 *
2959 * Cluster filesystems may call this function with a negative, hashed dentry.
2960 * In that case, we know that the inode will be a regular file, and also this
2961 * will only occur during atomic_open. So we need to check for the dentry
2962 * being already hashed only in the final case.
2963 */
2964 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2965 {
2966 if (IS_ERR(inode))
2967 return ERR_CAST(inode);
2968
2969 BUG_ON(!d_unhashed(dentry));
2970
2971 if (!inode)
2972 goto out;
2973
2974 security_d_instantiate(dentry, inode);
2975 spin_lock(&inode->i_lock);
2976 if (S_ISDIR(inode->i_mode)) {
2977 struct dentry *new = __d_find_any_alias(inode);
2978 if (unlikely(new)) {
2979 /* The reference to new ensures it remains an alias */
2980 spin_unlock(&inode->i_lock);
2981 write_seqlock(&rename_lock);
2982 if (unlikely(d_ancestor(new, dentry))) {
2983 write_sequnlock(&rename_lock);
2984 dput(new);
2985 new = ERR_PTR(-ELOOP);
2986 pr_warn_ratelimited(
2987 "VFS: Lookup of '%s' in %s %s"
2988 " would have caused loop\n",
2989 dentry->d_name.name,
2990 inode->i_sb->s_type->name,
2991 inode->i_sb->s_id);
2992 } else if (!IS_ROOT(new)) {
2993 int err = __d_unalias(inode, dentry, new);
2994 write_sequnlock(&rename_lock);
2995 if (err) {
2996 dput(new);
2997 new = ERR_PTR(err);
2998 }
2999 } else {
3000 __d_move(new, dentry, false);
3001 write_sequnlock(&rename_lock);
3002 }
3003 iput(inode);
3004 return new;
3005 }
3006 }
3007 out:
3008 __d_add(dentry, inode);
3009 return NULL;
3010 }
3011 EXPORT_SYMBOL(d_splice_alias);
3012
3013 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3014 {
3015 *buflen -= namelen;
3016 if (*buflen < 0)
3017 return -ENAMETOOLONG;
3018 *buffer -= namelen;
3019 memcpy(*buffer, str, namelen);
3020 return 0;
3021 }
3022
3023 /**
3024 * prepend_name - prepend a pathname in front of current buffer pointer
3025 * @buffer: buffer pointer
3026 * @buflen: allocated length of the buffer
3027 * @name: name string and length qstr structure
3028 *
3029 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3030 * make sure that either the old or the new name pointer and length are
3031 * fetched. However, there may be mismatch between length and pointer.
3032 * The length cannot be trusted, we need to copy it byte-by-byte until
3033 * the length is reached or a null byte is found. It also prepends "/" at
3034 * the beginning of the name. The sequence number check at the caller will
3035 * retry it again when a d_move() does happen. So any garbage in the buffer
3036 * due to mismatched pointer and length will be discarded.
3037 *
3038 * Data dependency barrier is needed to make sure that we see that terminating
3039 * NUL. Alpha strikes again, film at 11...
3040 */
3041 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
3042 {
3043 const char *dname = ACCESS_ONCE(name->name);
3044 u32 dlen = ACCESS_ONCE(name->len);
3045 char *p;
3046
3047 smp_read_barrier_depends();
3048
3049 *buflen -= dlen + 1;
3050 if (*buflen < 0)
3051 return -ENAMETOOLONG;
3052 p = *buffer -= dlen + 1;
3053 *p++ = '/';
3054 while (dlen--) {
3055 char c = *dname++;
3056 if (!c)
3057 break;
3058 *p++ = c;
3059 }
3060 return 0;
3061 }
3062
3063 /**
3064 * prepend_path - Prepend path string to a buffer
3065 * @path: the dentry/vfsmount to report
3066 * @root: root vfsmnt/dentry
3067 * @buffer: pointer to the end of the buffer
3068 * @buflen: pointer to buffer length
3069 *
3070 * The function will first try to write out the pathname without taking any
3071 * lock other than the RCU read lock to make sure that dentries won't go away.
3072 * It only checks the sequence number of the global rename_lock as any change
3073 * in the dentry's d_seq will be preceded by changes in the rename_lock
3074 * sequence number. If the sequence number had been changed, it will restart
3075 * the whole pathname back-tracing sequence again by taking the rename_lock.
3076 * In this case, there is no need to take the RCU read lock as the recursive
3077 * parent pointer references will keep the dentry chain alive as long as no
3078 * rename operation is performed.
3079 */
3080 static int prepend_path(const struct path *path,
3081 const struct path *root,
3082 char **buffer, int *buflen)
3083 {
3084 struct dentry *dentry;
3085 struct vfsmount *vfsmnt;
3086 struct mount *mnt;
3087 int error = 0;
3088 unsigned seq, m_seq = 0;
3089 char *bptr;
3090 int blen;
3091
3092 rcu_read_lock();
3093 restart_mnt:
3094 read_seqbegin_or_lock(&mount_lock, &m_seq);
3095 seq = 0;
3096 rcu_read_lock();
3097 restart:
3098 bptr = *buffer;
3099 blen = *buflen;
3100 error = 0;
3101 dentry = path->dentry;
3102 vfsmnt = path->mnt;
3103 mnt = real_mount(vfsmnt);
3104 read_seqbegin_or_lock(&rename_lock, &seq);
3105 while (dentry != root->dentry || vfsmnt != root->mnt) {
3106 struct dentry * parent;
3107
3108 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3109 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3110 /* Escaped? */
3111 if (dentry != vfsmnt->mnt_root) {
3112 bptr = *buffer;
3113 blen = *buflen;
3114 error = 3;
3115 break;
3116 }
3117 /* Global root? */
3118 if (mnt != parent) {
3119 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3120 mnt = parent;
3121 vfsmnt = &mnt->mnt;
3122 continue;
3123 }
3124 if (!error)
3125 error = is_mounted(vfsmnt) ? 1 : 2;
3126 break;
3127 }
3128 parent = dentry->d_parent;
3129 prefetch(parent);
3130 error = prepend_name(&bptr, &blen, &dentry->d_name);
3131 if (error)
3132 break;
3133
3134 dentry = parent;
3135 }
3136 if (!(seq & 1))
3137 rcu_read_unlock();
3138 if (need_seqretry(&rename_lock, seq)) {
3139 seq = 1;
3140 goto restart;
3141 }
3142 done_seqretry(&rename_lock, seq);
3143
3144 if (!(m_seq & 1))
3145 rcu_read_unlock();
3146 if (need_seqretry(&mount_lock, m_seq)) {
3147 m_seq = 1;
3148 goto restart_mnt;
3149 }
3150 done_seqretry(&mount_lock, m_seq);
3151
3152 if (error >= 0 && bptr == *buffer) {
3153 if (--blen < 0)
3154 error = -ENAMETOOLONG;
3155 else
3156 *--bptr = '/';
3157 }
3158 *buffer = bptr;
3159 *buflen = blen;
3160 return error;
3161 }
3162
3163 /**
3164 * __d_path - return the path of a dentry
3165 * @path: the dentry/vfsmount to report
3166 * @root: root vfsmnt/dentry
3167 * @buf: buffer to return value in
3168 * @buflen: buffer length
3169 *
3170 * Convert a dentry into an ASCII path name.
3171 *
3172 * Returns a pointer into the buffer or an error code if the
3173 * path was too long.
3174 *
3175 * "buflen" should be positive.
3176 *
3177 * If the path is not reachable from the supplied root, return %NULL.
3178 */
3179 char *__d_path(const struct path *path,
3180 const struct path *root,
3181 char *buf, int buflen)
3182 {
3183 char *res = buf + buflen;
3184 int error;
3185
3186 prepend(&res, &buflen, "\0", 1);
3187 error = prepend_path(path, root, &res, &buflen);
3188
3189 if (error < 0)
3190 return ERR_PTR(error);
3191 if (error > 0)
3192 return NULL;
3193 return res;
3194 }
3195
3196 char *d_absolute_path(const struct path *path,
3197 char *buf, int buflen)
3198 {
3199 struct path root = {};
3200 char *res = buf + buflen;
3201 int error;
3202
3203 prepend(&res, &buflen, "\0", 1);
3204 error = prepend_path(path, &root, &res, &buflen);
3205
3206 if (error > 1)
3207 error = -EINVAL;
3208 if (error < 0)
3209 return ERR_PTR(error);
3210 return res;
3211 }
3212
3213 /*
3214 * same as __d_path but appends "(deleted)" for unlinked files.
3215 */
3216 static int path_with_deleted(const struct path *path,
3217 const struct path *root,
3218 char **buf, int *buflen)
3219 {
3220 prepend(buf, buflen, "\0", 1);
3221 if (d_unlinked(path->dentry)) {
3222 int error = prepend(buf, buflen, " (deleted)", 10);
3223 if (error)
3224 return error;
3225 }
3226
3227 return prepend_path(path, root, buf, buflen);
3228 }
3229
3230 static int prepend_unreachable(char **buffer, int *buflen)
3231 {
3232 return prepend(buffer, buflen, "(unreachable)", 13);
3233 }
3234
3235 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3236 {
3237 unsigned seq;
3238
3239 do {
3240 seq = read_seqcount_begin(&fs->seq);
3241 *root = fs->root;
3242 } while (read_seqcount_retry(&fs->seq, seq));
3243 }
3244
3245 /**
3246 * d_path - return the path of a dentry
3247 * @path: path to report
3248 * @buf: buffer to return value in
3249 * @buflen: buffer length
3250 *
3251 * Convert a dentry into an ASCII path name. If the entry has been deleted
3252 * the string " (deleted)" is appended. Note that this is ambiguous.
3253 *
3254 * Returns a pointer into the buffer or an error code if the path was
3255 * too long. Note: Callers should use the returned pointer, not the passed
3256 * in buffer, to use the name! The implementation often starts at an offset
3257 * into the buffer, and may leave 0 bytes at the start.
3258 *
3259 * "buflen" should be positive.
3260 */
3261 char *d_path(const struct path *path, char *buf, int buflen)
3262 {
3263 char *res = buf + buflen;
3264 struct path root;
3265 int error;
3266
3267 /*
3268 * We have various synthetic filesystems that never get mounted. On
3269 * these filesystems dentries are never used for lookup purposes, and
3270 * thus don't need to be hashed. They also don't need a name until a
3271 * user wants to identify the object in /proc/pid/fd/. The little hack
3272 * below allows us to generate a name for these objects on demand:
3273 *
3274 * Some pseudo inodes are mountable. When they are mounted
3275 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3276 * and instead have d_path return the mounted path.
3277 */
3278 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3279 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3280 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3281
3282 rcu_read_lock();
3283 get_fs_root_rcu(current->fs, &root);
3284 error = path_with_deleted(path, &root, &res, &buflen);
3285 rcu_read_unlock();
3286
3287 if (error < 0)
3288 res = ERR_PTR(error);
3289 return res;
3290 }
3291 EXPORT_SYMBOL(d_path);
3292
3293 /*
3294 * Helper function for dentry_operations.d_dname() members
3295 */
3296 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3297 const char *fmt, ...)
3298 {
3299 va_list args;
3300 char temp[64];
3301 int sz;
3302
3303 va_start(args, fmt);
3304 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3305 va_end(args);
3306
3307 if (sz > sizeof(temp) || sz > buflen)
3308 return ERR_PTR(-ENAMETOOLONG);
3309
3310 buffer += buflen - sz;
3311 return memcpy(buffer, temp, sz);
3312 }
3313
3314 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3315 {
3316 char *end = buffer + buflen;
3317 /* these dentries are never renamed, so d_lock is not needed */
3318 if (prepend(&end, &buflen, " (deleted)", 11) ||
3319 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3320 prepend(&end, &buflen, "/", 1))
3321 end = ERR_PTR(-ENAMETOOLONG);
3322 return end;
3323 }
3324 EXPORT_SYMBOL(simple_dname);
3325
3326 /*
3327 * Write full pathname from the root of the filesystem into the buffer.
3328 */
3329 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3330 {
3331 struct dentry *dentry;
3332 char *end, *retval;
3333 int len, seq = 0;
3334 int error = 0;
3335
3336 if (buflen < 2)
3337 goto Elong;
3338
3339 rcu_read_lock();
3340 restart:
3341 dentry = d;
3342 end = buf + buflen;
3343 len = buflen;
3344 prepend(&end, &len, "\0", 1);
3345 /* Get '/' right */
3346 retval = end-1;
3347 *retval = '/';
3348 read_seqbegin_or_lock(&rename_lock, &seq);
3349 while (!IS_ROOT(dentry)) {
3350 struct dentry *parent = dentry->d_parent;
3351
3352 prefetch(parent);
3353 error = prepend_name(&end, &len, &dentry->d_name);
3354 if (error)
3355 break;
3356
3357 retval = end;
3358 dentry = parent;
3359 }
3360 if (!(seq & 1))
3361 rcu_read_unlock();
3362 if (need_seqretry(&rename_lock, seq)) {
3363 seq = 1;
3364 goto restart;
3365 }
3366 done_seqretry(&rename_lock, seq);
3367 if (error)
3368 goto Elong;
3369 return retval;
3370 Elong:
3371 return ERR_PTR(-ENAMETOOLONG);
3372 }
3373
3374 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3375 {
3376 return __dentry_path(dentry, buf, buflen);
3377 }
3378 EXPORT_SYMBOL(dentry_path_raw);
3379
3380 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3381 {
3382 char *p = NULL;
3383 char *retval;
3384
3385 if (d_unlinked(dentry)) {
3386 p = buf + buflen;
3387 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3388 goto Elong;
3389 buflen++;
3390 }
3391 retval = __dentry_path(dentry, buf, buflen);
3392 if (!IS_ERR(retval) && p)
3393 *p = '/'; /* restore '/' overriden with '\0' */
3394 return retval;
3395 Elong:
3396 return ERR_PTR(-ENAMETOOLONG);
3397 }
3398
3399 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3400 struct path *pwd)
3401 {
3402 unsigned seq;
3403
3404 do {
3405 seq = read_seqcount_begin(&fs->seq);
3406 *root = fs->root;
3407 *pwd = fs->pwd;
3408 } while (read_seqcount_retry(&fs->seq, seq));
3409 }
3410
3411 /*
3412 * NOTE! The user-level library version returns a
3413 * character pointer. The kernel system call just
3414 * returns the length of the buffer filled (which
3415 * includes the ending '\0' character), or a negative
3416 * error value. So libc would do something like
3417 *
3418 * char *getcwd(char * buf, size_t size)
3419 * {
3420 * int retval;
3421 *
3422 * retval = sys_getcwd(buf, size);
3423 * if (retval >= 0)
3424 * return buf;
3425 * errno = -retval;
3426 * return NULL;
3427 * }
3428 */
3429 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3430 {
3431 int error;
3432 struct path pwd, root;
3433 char *page = __getname();
3434
3435 if (!page)
3436 return -ENOMEM;
3437
3438 rcu_read_lock();
3439 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3440
3441 error = -ENOENT;
3442 if (!d_unlinked(pwd.dentry)) {
3443 unsigned long len;
3444 char *cwd = page + PATH_MAX;
3445 int buflen = PATH_MAX;
3446
3447 prepend(&cwd, &buflen, "\0", 1);
3448 error = prepend_path(&pwd, &root, &cwd, &buflen);
3449 rcu_read_unlock();
3450
3451 if (error < 0)
3452 goto out;
3453
3454 /* Unreachable from current root */
3455 if (error > 0) {
3456 error = prepend_unreachable(&cwd, &buflen);
3457 if (error)
3458 goto out;
3459 }
3460
3461 error = -ERANGE;
3462 len = PATH_MAX + page - cwd;
3463 if (len <= size) {
3464 error = len;
3465 if (copy_to_user(buf, cwd, len))
3466 error = -EFAULT;
3467 }
3468 } else {
3469 rcu_read_unlock();
3470 }
3471
3472 out:
3473 __putname(page);
3474 return error;
3475 }
3476
3477 /*
3478 * Test whether new_dentry is a subdirectory of old_dentry.
3479 *
3480 * Trivially implemented using the dcache structure
3481 */
3482
3483 /**
3484 * is_subdir - is new dentry a subdirectory of old_dentry
3485 * @new_dentry: new dentry
3486 * @old_dentry: old dentry
3487 *
3488 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3489 * Returns false otherwise.
3490 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3491 */
3492
3493 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3494 {
3495 bool result;
3496 unsigned seq;
3497
3498 if (new_dentry == old_dentry)
3499 return true;
3500
3501 do {
3502 /* for restarting inner loop in case of seq retry */
3503 seq = read_seqbegin(&rename_lock);
3504 /*
3505 * Need rcu_readlock to protect against the d_parent trashing
3506 * due to d_move
3507 */
3508 rcu_read_lock();
3509 if (d_ancestor(old_dentry, new_dentry))
3510 result = true;
3511 else
3512 result = false;
3513 rcu_read_unlock();
3514 } while (read_seqretry(&rename_lock, seq));
3515
3516 return result;
3517 }
3518
3519 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3520 {
3521 struct dentry *root = data;
3522 if (dentry != root) {
3523 if (d_unhashed(dentry) || !dentry->d_inode)
3524 return D_WALK_SKIP;
3525
3526 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3527 dentry->d_flags |= DCACHE_GENOCIDE;
3528 dentry->d_lockref.count--;
3529 }
3530 }
3531 return D_WALK_CONTINUE;
3532 }
3533
3534 void d_genocide(struct dentry *parent)
3535 {
3536 d_walk(parent, parent, d_genocide_kill, NULL);
3537 }
3538
3539 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3540 {
3541 inode_dec_link_count(inode);
3542 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3543 !hlist_unhashed(&dentry->d_u.d_alias) ||
3544 !d_unlinked(dentry));
3545 spin_lock(&dentry->d_parent->d_lock);
3546 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3547 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3548 (unsigned long long)inode->i_ino);
3549 spin_unlock(&dentry->d_lock);
3550 spin_unlock(&dentry->d_parent->d_lock);
3551 d_instantiate(dentry, inode);
3552 }
3553 EXPORT_SYMBOL(d_tmpfile);
3554
3555 static __initdata unsigned long dhash_entries;
3556 static int __init set_dhash_entries(char *str)
3557 {
3558 if (!str)
3559 return 0;
3560 dhash_entries = simple_strtoul(str, &str, 0);
3561 return 1;
3562 }
3563 __setup("dhash_entries=", set_dhash_entries);
3564
3565 static void __init dcache_init_early(void)
3566 {
3567 unsigned int loop;
3568
3569 /* If hashes are distributed across NUMA nodes, defer
3570 * hash allocation until vmalloc space is available.
3571 */
3572 if (hashdist)
3573 return;
3574
3575 dentry_hashtable =
3576 alloc_large_system_hash("Dentry cache",
3577 sizeof(struct hlist_bl_head),
3578 dhash_entries,
3579 13,
3580 HASH_EARLY,
3581 &d_hash_shift,
3582 &d_hash_mask,
3583 0,
3584 0);
3585
3586 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3587 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3588 }
3589
3590 static void __init dcache_init(void)
3591 {
3592 unsigned int loop;
3593
3594 /*
3595 * A constructor could be added for stable state like the lists,
3596 * but it is probably not worth it because of the cache nature
3597 * of the dcache.
3598 */
3599 dentry_cache = KMEM_CACHE(dentry,
3600 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3601
3602 /* Hash may have been set up in dcache_init_early */
3603 if (!hashdist)
3604 return;
3605
3606 dentry_hashtable =
3607 alloc_large_system_hash("Dentry cache",
3608 sizeof(struct hlist_bl_head),
3609 dhash_entries,
3610 13,
3611 0,
3612 &d_hash_shift,
3613 &d_hash_mask,
3614 0,
3615 0);
3616
3617 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3618 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3619 }
3620
3621 /* SLAB cache for __getname() consumers */
3622 struct kmem_cache *names_cachep __read_mostly;
3623 EXPORT_SYMBOL(names_cachep);
3624
3625 EXPORT_SYMBOL(d_genocide);
3626
3627 void __init vfs_caches_init_early(void)
3628 {
3629 dcache_init_early();
3630 inode_init_early();
3631 }
3632
3633 void __init vfs_caches_init(void)
3634 {
3635 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3636 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3637
3638 dcache_init();
3639 inode_init();
3640 files_init();
3641 files_maxfiles_init();
3642 mnt_init();
3643 bdev_cache_init();
3644 chrdev_init();
3645 }
This page took 0.102273 seconds and 6 git commands to generate.