1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
29 const struct inode_operations ceph_dir_iops
;
30 const struct file_operations ceph_dir_fops
;
31 const struct dentry_operations ceph_dentry_ops
;
34 * Initialize ceph dentry state.
36 int ceph_init_dentry(struct dentry
*dentry
)
38 struct ceph_dentry_info
*di
;
43 di
= kmem_cache_alloc(ceph_dentry_cachep
, GFP_NOFS
| __GFP_ZERO
);
45 return -ENOMEM
; /* oh well */
47 spin_lock(&dentry
->d_lock
);
48 if (dentry
->d_fsdata
) {
50 kmem_cache_free(ceph_dentry_cachep
, di
);
54 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
)
55 d_set_d_op(dentry
, &ceph_dentry_ops
);
56 else if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_SNAPDIR
)
57 d_set_d_op(dentry
, &ceph_snapdir_dentry_ops
);
59 d_set_d_op(dentry
, &ceph_snap_dentry_ops
);
62 di
->lease_session
= NULL
;
63 dentry
->d_time
= jiffies
;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
66 dentry
->d_fsdata
= di
;
67 ceph_dentry_lru_add(dentry
);
69 spin_unlock(&dentry
->d_lock
);
73 struct inode
*ceph_get_dentry_parent_inode(struct dentry
*dentry
)
75 struct inode
*inode
= NULL
;
80 spin_lock(&dentry
->d_lock
);
81 if (!IS_ROOT(dentry
)) {
82 inode
= dentry
->d_parent
->d_inode
;
85 spin_unlock(&dentry
->d_lock
);
91 * for readdir, we encode the directory frag and offset within that
94 static unsigned fpos_frag(loff_t p
)
98 static unsigned fpos_off(loff_t p
)
100 return p
& 0xffffffff;
103 static int fpos_cmp(loff_t l
, loff_t r
)
105 int v
= ceph_frag_compare(fpos_frag(l
), fpos_frag(r
));
108 return (int)(fpos_off(l
) - fpos_off(r
));
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_u.d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
118 * Complete dir indicates that we have all dentries in the dir. It is
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
122 static int __dcache_readdir(struct file
*file
, struct dir_context
*ctx
,
125 struct ceph_file_info
*fi
= file
->private_data
;
126 struct dentry
*parent
= file
->f_dentry
;
127 struct inode
*dir
= parent
->d_inode
;
129 struct dentry
*dentry
, *last
;
130 struct ceph_dentry_info
*di
;
133 /* claim ref on last dentry we returned */
137 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
138 dir
, shared_gen
, ctx
->pos
, last
);
140 spin_lock(&parent
->d_lock
);
142 /* start at beginning? */
143 if (ctx
->pos
== 2 || last
== NULL
||
144 fpos_cmp(ctx
->pos
, ceph_dentry(last
)->offset
) < 0) {
145 if (list_empty(&parent
->d_subdirs
))
147 p
= parent
->d_subdirs
.prev
;
148 dout(" initial p %p/%p\n", p
->prev
, p
->next
);
150 p
= last
->d_u
.d_child
.prev
;
154 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
155 di
= ceph_dentry(dentry
);
157 dout(" p %p/%p %s d_subdirs %p/%p\n", p
->prev
, p
->next
,
158 d_unhashed(dentry
) ? "!hashed" : "hashed",
159 parent
->d_subdirs
.prev
, parent
->d_subdirs
.next
);
160 if (p
== &parent
->d_subdirs
) {
161 fi
->flags
|= CEPH_F_ATEND
;
164 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
165 if (di
->lease_shared_gen
== shared_gen
&&
166 !d_unhashed(dentry
) && dentry
->d_inode
&&
167 ceph_snap(dentry
->d_inode
) != CEPH_SNAPDIR
&&
168 ceph_ino(dentry
->d_inode
) != CEPH_INO_CEPH
&&
169 fpos_cmp(ctx
->pos
, di
->offset
) <= 0)
171 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry
,
172 dentry
->d_name
.len
, dentry
->d_name
.name
, di
->offset
,
173 ctx
->pos
, d_unhashed(dentry
) ? " unhashed" : "",
174 !dentry
->d_inode
? " null" : "");
175 spin_unlock(&dentry
->d_lock
);
177 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
178 di
= ceph_dentry(dentry
);
182 spin_unlock(&dentry
->d_lock
);
183 spin_unlock(&parent
->d_lock
);
185 /* make sure a dentry wasn't dropped while we didn't have parent lock */
186 if (!ceph_dir_is_complete_ordered(dir
)) {
187 dout(" lost dir complete on %p; falling back to mds\n", dir
);
193 dout(" %llu (%llu) dentry %p %.*s %p\n", di
->offset
, ctx
->pos
,
194 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
195 if (!dir_emit(ctx
, dentry
->d_name
.name
,
197 ceph_translate_ino(dentry
->d_sb
, dentry
->d_inode
->i_ino
),
198 dentry
->d_inode
->i_mode
>> 12)) {
200 /* remember our position */
202 fi
->next_offset
= fpos_off(di
->offset
);
208 ctx
->pos
= di
->offset
+ 1;
214 spin_lock(&parent
->d_lock
);
215 p
= p
->prev
; /* advance to next dentry */
219 spin_unlock(&parent
->d_lock
);
227 * make note of the last dentry we read, so we can
228 * continue at the same lexicographical point,
229 * regardless of what dir changes take place on the
232 static int note_last_dentry(struct ceph_file_info
*fi
, const char *name
,
235 kfree(fi
->last_name
);
236 fi
->last_name
= kmalloc(len
+1, GFP_NOFS
);
239 memcpy(fi
->last_name
, name
, len
);
240 fi
->last_name
[len
] = 0;
241 dout("note_last_dentry '%s'\n", fi
->last_name
);
245 static int ceph_readdir(struct file
*file
, struct dir_context
*ctx
)
247 struct ceph_file_info
*fi
= file
->private_data
;
248 struct inode
*inode
= file_inode(file
);
249 struct ceph_inode_info
*ci
= ceph_inode(inode
);
250 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
251 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
252 unsigned frag
= fpos_frag(ctx
->pos
);
253 int off
= fpos_off(ctx
->pos
);
256 struct ceph_mds_reply_info_parsed
*rinfo
;
258 dout("readdir %p file %p frag %u off %u\n", inode
, file
, frag
, off
);
259 if (fi
->flags
& CEPH_F_ATEND
)
262 /* always start with . and .. */
264 dout("readdir off 0 -> '.'\n");
265 if (!dir_emit(ctx
, ".", 1,
266 ceph_translate_ino(inode
->i_sb
, inode
->i_ino
),
267 inode
->i_mode
>> 12))
273 ino_t ino
= parent_ino(file
->f_dentry
);
274 dout("readdir off 1 -> '..'\n");
275 if (!dir_emit(ctx
, "..", 2,
276 ceph_translate_ino(inode
->i_sb
, ino
),
277 inode
->i_mode
>> 12))
283 /* can we use the dcache? */
284 spin_lock(&ci
->i_ceph_lock
);
285 if ((ctx
->pos
== 2 || fi
->dentry
) &&
286 !ceph_test_mount_opt(fsc
, NOASYNCREADDIR
) &&
287 ceph_snap(inode
) != CEPH_SNAPDIR
&&
288 __ceph_dir_is_complete_ordered(ci
) &&
289 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
290 u32 shared_gen
= ci
->i_shared_gen
;
291 spin_unlock(&ci
->i_ceph_lock
);
292 err
= __dcache_readdir(file
, ctx
, shared_gen
);
295 frag
= fpos_frag(ctx
->pos
);
296 off
= fpos_off(ctx
->pos
);
298 spin_unlock(&ci
->i_ceph_lock
);
301 err
= note_last_dentry(fi
, fi
->dentry
->d_name
.name
,
302 fi
->dentry
->d_name
.len
);
309 /* proceed with a normal readdir */
312 /* note dir version at start of readdir so we can tell
313 * if any dentries get dropped */
314 fi
->dir_release_count
= atomic_read(&ci
->i_release_count
);
315 fi
->dir_ordered_count
= ci
->i_ordered_count
;
319 /* do we have the correct frag content buffered? */
320 if (fi
->frag
!= frag
|| fi
->last_readdir
== NULL
) {
321 struct ceph_mds_request
*req
;
322 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
323 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
325 /* discard old result, if any */
326 if (fi
->last_readdir
) {
327 ceph_mdsc_put_request(fi
->last_readdir
);
328 fi
->last_readdir
= NULL
;
331 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
332 ceph_vinop(inode
), frag
, fi
->last_name
);
333 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
336 err
= ceph_alloc_readdir_reply_buffer(req
, inode
);
338 ceph_mdsc_put_request(req
);
341 req
->r_inode
= inode
;
343 req
->r_dentry
= dget(file
->f_dentry
);
344 /* hints to request -> mds selection code */
345 req
->r_direct_mode
= USE_AUTH_MDS
;
346 req
->r_direct_hash
= ceph_frag_value(frag
);
347 req
->r_direct_is_hash
= true;
348 req
->r_path2
= kstrdup(fi
->last_name
, GFP_NOFS
);
349 req
->r_readdir_offset
= fi
->next_offset
;
350 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
351 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
353 ceph_mdsc_put_request(req
);
356 dout("readdir got and parsed readdir result=%d"
357 " on frag %x, end=%d, complete=%d\n", err
, frag
,
358 (int)req
->r_reply_info
.dir_end
,
359 (int)req
->r_reply_info
.dir_complete
);
361 if (!req
->r_did_prepopulate
) {
362 dout("readdir !did_prepopulate");
363 /* preclude from marking dir complete */
364 fi
->dir_release_count
--;
367 /* note next offset and last dentry name */
368 rinfo
= &req
->r_reply_info
;
369 if (le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
370 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
371 if (ceph_frag_is_leftmost(frag
))
375 off
= fi
->next_offset
;
378 fi
->offset
= fi
->next_offset
;
379 fi
->last_readdir
= req
;
381 if (req
->r_reply_info
.dir_end
) {
382 kfree(fi
->last_name
);
383 fi
->last_name
= NULL
;
384 if (ceph_frag_is_rightmost(frag
))
389 err
= note_last_dentry(fi
,
390 rinfo
->dir_dname
[rinfo
->dir_nr
-1],
391 rinfo
->dir_dname_len
[rinfo
->dir_nr
-1]);
394 fi
->next_offset
+= rinfo
->dir_nr
;
398 rinfo
= &fi
->last_readdir
->r_reply_info
;
399 dout("readdir frag %x num %d off %d chunkoff %d\n", frag
,
400 rinfo
->dir_nr
, off
, fi
->offset
);
402 ctx
->pos
= ceph_make_fpos(frag
, off
);
403 while (off
>= fi
->offset
&& off
- fi
->offset
< rinfo
->dir_nr
) {
404 struct ceph_mds_reply_inode
*in
=
405 rinfo
->dir_in
[off
- fi
->offset
].in
;
406 struct ceph_vino vino
;
409 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
410 off
, off
- fi
->offset
, rinfo
->dir_nr
, ctx
->pos
,
411 rinfo
->dir_dname_len
[off
- fi
->offset
],
412 rinfo
->dir_dname
[off
- fi
->offset
], in
);
414 ftype
= le32_to_cpu(in
->mode
) >> 12;
415 vino
.ino
= le64_to_cpu(in
->ino
);
416 vino
.snap
= le64_to_cpu(in
->snapid
);
417 ino
= ceph_vino_to_ino(vino
);
419 rinfo
->dir_dname
[off
- fi
->offset
],
420 rinfo
->dir_dname_len
[off
- fi
->offset
],
421 ceph_translate_ino(inode
->i_sb
, ino
), ftype
)) {
422 dout("filldir stopping us...\n");
430 ceph_mdsc_put_request(fi
->last_readdir
);
431 fi
->last_readdir
= NULL
;
436 if (!ceph_frag_is_rightmost(frag
)) {
437 frag
= ceph_frag_next(frag
);
439 ctx
->pos
= ceph_make_fpos(frag
, off
);
440 dout("readdir next frag is %x\n", frag
);
443 fi
->flags
|= CEPH_F_ATEND
;
446 * if dir_release_count still matches the dir, no dentries
447 * were released during the whole readdir, and we should have
448 * the complete dir contents in our cache.
450 spin_lock(&ci
->i_ceph_lock
);
451 if (atomic_read(&ci
->i_release_count
) == fi
->dir_release_count
) {
452 if (ci
->i_ordered_count
== fi
->dir_ordered_count
)
453 dout(" marking %p complete and ordered\n", inode
);
455 dout(" marking %p complete\n", inode
);
456 __ceph_dir_set_complete(ci
, fi
->dir_release_count
,
457 fi
->dir_ordered_count
);
459 spin_unlock(&ci
->i_ceph_lock
);
461 dout("readdir %p file %p done.\n", inode
, file
);
465 static void reset_readdir(struct ceph_file_info
*fi
, unsigned frag
)
467 if (fi
->last_readdir
) {
468 ceph_mdsc_put_request(fi
->last_readdir
);
469 fi
->last_readdir
= NULL
;
471 kfree(fi
->last_name
);
472 fi
->last_name
= NULL
;
473 if (ceph_frag_is_leftmost(frag
))
474 fi
->next_offset
= 2; /* compensate for . and .. */
481 fi
->flags
&= ~CEPH_F_ATEND
;
484 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
486 struct ceph_file_info
*fi
= file
->private_data
;
487 struct inode
*inode
= file
->f_mapping
->host
;
488 loff_t old_offset
= ceph_make_fpos(fi
->frag
, fi
->next_offset
);
491 mutex_lock(&inode
->i_mutex
);
495 offset
+= inode
->i_size
+ 2; /* FIXME */
498 offset
+= file
->f_pos
;
506 if (offset
!= file
->f_pos
) {
507 file
->f_pos
= offset
;
509 fi
->flags
&= ~CEPH_F_ATEND
;
514 * discard buffered readdir content on seekdir(0), or
515 * seek to new frag, or seek prior to current chunk.
518 fpos_frag(offset
) != fi
->frag
||
519 fpos_off(offset
) < fi
->offset
) {
520 dout("dir_llseek dropping %p content\n", file
);
521 reset_readdir(fi
, fpos_frag(offset
));
524 /* bump dir_release_count if we did a forward seek */
525 if (fpos_cmp(offset
, old_offset
) > 0)
526 fi
->dir_release_count
--;
529 mutex_unlock(&inode
->i_mutex
);
534 * Handle lookups for the hidden .snap directory.
536 int ceph_handle_snapdir(struct ceph_mds_request
*req
,
537 struct dentry
*dentry
, int err
)
539 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
540 struct inode
*parent
= dentry
->d_parent
->d_inode
; /* we hold i_mutex */
543 if (err
== -ENOENT
&&
544 ceph_snap(parent
) == CEPH_NOSNAP
&&
545 strcmp(dentry
->d_name
.name
,
546 fsc
->mount_options
->snapdir_name
) == 0) {
547 struct inode
*inode
= ceph_get_snapdir(parent
);
548 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
549 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, inode
);
550 BUG_ON(!d_unhashed(dentry
));
551 d_add(dentry
, inode
);
558 * Figure out final result of a lookup/open request.
560 * Mainly, make sure we return the final req->r_dentry (if it already
561 * existed) in place of the original VFS-provided dentry when they
564 * Gracefully handle the case where the MDS replies with -ENOENT and
565 * no trace (which it may do, at its discretion, e.g., if it doesn't
566 * care to issue a lease on the negative dentry).
568 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
569 struct dentry
*dentry
, int err
)
571 if (err
== -ENOENT
) {
574 if (!req
->r_reply_info
.head
->is_dentry
) {
575 dout("ENOENT and no trace, dentry %p inode %p\n",
576 dentry
, dentry
->d_inode
);
577 if (dentry
->d_inode
) {
586 dentry
= ERR_PTR(err
);
587 else if (dentry
!= req
->r_dentry
)
588 dentry
= dget(req
->r_dentry
); /* we got spliced */
594 static int is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
596 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
597 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
601 * Look up a single dir entry. If there is a lookup intent, inform
602 * the MDS so that it gets our 'caps wanted' value in a single op.
604 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
607 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
608 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
609 struct ceph_mds_request
*req
;
613 dout("lookup %p dentry %p '%.*s'\n",
614 dir
, dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
);
616 if (dentry
->d_name
.len
> NAME_MAX
)
617 return ERR_PTR(-ENAMETOOLONG
);
619 err
= ceph_init_dentry(dentry
);
623 /* can we conclude ENOENT locally? */
624 if (dentry
->d_inode
== NULL
) {
625 struct ceph_inode_info
*ci
= ceph_inode(dir
);
626 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
628 spin_lock(&ci
->i_ceph_lock
);
629 dout(" dir %p flags are %d\n", dir
, ci
->i_ceph_flags
);
630 if (strncmp(dentry
->d_name
.name
,
631 fsc
->mount_options
->snapdir_name
,
632 dentry
->d_name
.len
) &&
633 !is_root_ceph_dentry(dir
, dentry
) &&
634 __ceph_dir_is_complete(ci
) &&
635 (__ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1))) {
636 spin_unlock(&ci
->i_ceph_lock
);
637 dout(" dir %p complete, -ENOENT\n", dir
);
639 di
->lease_shared_gen
= ci
->i_shared_gen
;
642 spin_unlock(&ci
->i_ceph_lock
);
645 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
646 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
647 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
649 return ERR_CAST(req
);
650 req
->r_dentry
= dget(dentry
);
652 /* we only need inode linkage */
653 req
->r_args
.getattr
.mask
= cpu_to_le32(CEPH_STAT_CAP_INODE
);
654 req
->r_locked_dir
= dir
;
655 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
656 err
= ceph_handle_snapdir(req
, dentry
, err
);
657 dentry
= ceph_finish_lookup(req
, dentry
, err
);
658 ceph_mdsc_put_request(req
); /* will dput(dentry) */
659 dout("lookup result=%p\n", dentry
);
664 * If we do a create but get no trace back from the MDS, follow up with
665 * a lookup (the VFS expects us to link up the provided dentry).
667 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
669 struct dentry
*result
= ceph_lookup(dir
, dentry
, 0);
671 if (result
&& !IS_ERR(result
)) {
673 * We created the item, then did a lookup, and found
674 * it was already linked to another inode we already
675 * had in our cache (and thus got spliced). Link our
676 * dentry to that inode, but don't hash it, just in
677 * case the VFS wants to dereference it.
679 BUG_ON(!result
->d_inode
);
680 d_instantiate(dentry
, result
->d_inode
);
683 return PTR_ERR(result
);
686 static int ceph_mknod(struct inode
*dir
, struct dentry
*dentry
,
687 umode_t mode
, dev_t rdev
)
689 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
690 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
691 struct ceph_mds_request
*req
;
692 struct ceph_acls_info acls
= {};
695 if (ceph_snap(dir
) != CEPH_NOSNAP
)
698 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
702 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
703 dir
, dentry
, mode
, rdev
);
704 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
709 req
->r_dentry
= dget(dentry
);
711 req
->r_locked_dir
= dir
;
712 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
713 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
714 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
715 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
717 req
->r_pagelist
= acls
.pagelist
;
718 acls
.pagelist
= NULL
;
720 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
721 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
722 err
= ceph_handle_notrace_create(dir
, dentry
);
723 ceph_mdsc_put_request(req
);
726 ceph_init_inode_acls(dentry
->d_inode
, &acls
);
729 ceph_release_acls_info(&acls
);
733 static int ceph_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
736 return ceph_mknod(dir
, dentry
, mode
, 0);
739 static int ceph_symlink(struct inode
*dir
, struct dentry
*dentry
,
742 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
743 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
744 struct ceph_mds_request
*req
;
747 if (ceph_snap(dir
) != CEPH_NOSNAP
)
750 dout("symlink in dir %p dentry %p to '%s'\n", dir
, dentry
, dest
);
751 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
756 req
->r_dentry
= dget(dentry
);
758 req
->r_path2
= kstrdup(dest
, GFP_NOFS
);
759 req
->r_locked_dir
= dir
;
760 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
761 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
762 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
763 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
764 err
= ceph_handle_notrace_create(dir
, dentry
);
765 ceph_mdsc_put_request(req
);
772 static int ceph_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
774 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
775 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
776 struct ceph_mds_request
*req
;
777 struct ceph_acls_info acls
= {};
781 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
782 /* mkdir .snap/foo is a MKSNAP */
783 op
= CEPH_MDS_OP_MKSNAP
;
784 dout("mksnap dir %p snap '%.*s' dn %p\n", dir
,
785 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
);
786 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
787 dout("mkdir dir %p dn %p mode 0%ho\n", dir
, dentry
, mode
);
788 op
= CEPH_MDS_OP_MKDIR
;
794 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
798 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
804 req
->r_dentry
= dget(dentry
);
806 req
->r_locked_dir
= dir
;
807 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
808 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
809 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
811 req
->r_pagelist
= acls
.pagelist
;
812 acls
.pagelist
= NULL
;
814 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
815 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
816 err
= ceph_handle_notrace_create(dir
, dentry
);
817 ceph_mdsc_put_request(req
);
820 ceph_init_inode_acls(dentry
->d_inode
, &acls
);
823 ceph_release_acls_info(&acls
);
827 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
828 struct dentry
*dentry
)
830 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
831 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
832 struct ceph_mds_request
*req
;
835 if (ceph_snap(dir
) != CEPH_NOSNAP
)
838 dout("link in dir %p old_dentry %p dentry %p\n", dir
,
840 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
845 req
->r_dentry
= dget(dentry
);
847 req
->r_old_dentry
= dget(old_dentry
);
848 req
->r_locked_dir
= dir
;
849 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
850 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
851 /* release LINK_SHARED on source inode (mds will lock it) */
852 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
853 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
856 } else if (!req
->r_reply_info
.head
->is_dentry
) {
857 ihold(old_dentry
->d_inode
);
858 d_instantiate(dentry
, old_dentry
->d_inode
);
860 ceph_mdsc_put_request(req
);
865 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
866 * looks like the link count will hit 0, drop any other caps (other
867 * than PIN) we don't specifically want (due to the file still being
870 static int drop_caps_for_unlink(struct inode
*inode
)
872 struct ceph_inode_info
*ci
= ceph_inode(inode
);
873 int drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
875 spin_lock(&ci
->i_ceph_lock
);
876 if (inode
->i_nlink
== 1) {
877 drop
|= ~(__ceph_caps_wanted(ci
) | CEPH_CAP_PIN
);
878 ci
->i_ceph_flags
|= CEPH_I_NODELAY
;
880 spin_unlock(&ci
->i_ceph_lock
);
885 * rmdir and unlink are differ only by the metadata op code
887 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
889 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
890 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
891 struct inode
*inode
= dentry
->d_inode
;
892 struct ceph_mds_request
*req
;
896 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
897 /* rmdir .snap/foo is RMSNAP */
898 dout("rmsnap dir %p '%.*s' dn %p\n", dir
, dentry
->d_name
.len
,
899 dentry
->d_name
.name
, dentry
);
900 op
= CEPH_MDS_OP_RMSNAP
;
901 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
902 dout("unlink/rmdir dir %p dn %p inode %p\n",
904 op
= S_ISDIR(dentry
->d_inode
->i_mode
) ?
905 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
908 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
913 req
->r_dentry
= dget(dentry
);
915 req
->r_locked_dir
= dir
;
916 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
917 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
918 req
->r_inode_drop
= drop_caps_for_unlink(inode
);
919 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
920 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
922 ceph_mdsc_put_request(req
);
927 static int ceph_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
928 struct inode
*new_dir
, struct dentry
*new_dentry
)
930 struct ceph_fs_client
*fsc
= ceph_sb_to_client(old_dir
->i_sb
);
931 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
932 struct ceph_mds_request
*req
;
935 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
937 if (ceph_snap(old_dir
) != CEPH_NOSNAP
||
938 ceph_snap(new_dir
) != CEPH_NOSNAP
)
940 dout("rename dir %p dentry %p to dir %p dentry %p\n",
941 old_dir
, old_dentry
, new_dir
, new_dentry
);
942 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RENAME
, USE_AUTH_MDS
);
946 req
->r_dentry
= dget(new_dentry
);
948 req
->r_old_dentry
= dget(old_dentry
);
949 req
->r_old_dentry_dir
= old_dir
;
950 req
->r_locked_dir
= new_dir
;
951 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
;
952 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
953 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
954 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
955 /* release LINK_RDCACHE on source inode (mds will lock it) */
956 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
957 if (new_dentry
->d_inode
)
958 req
->r_inode_drop
= drop_caps_for_unlink(new_dentry
->d_inode
);
959 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
960 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
962 * Normally d_move() is done by fill_trace (called by
963 * do_request, above). If there is no trace, we need
967 d_move(old_dentry
, new_dentry
);
969 /* ensure target dentry is invalidated, despite
970 rehashing bug in vfs_rename_dir */
971 ceph_invalidate_dentry_lease(new_dentry
);
973 /* d_move screws up sibling dentries' offsets */
974 ceph_dir_clear_complete(old_dir
);
975 ceph_dir_clear_complete(new_dir
);
978 ceph_mdsc_put_request(req
);
983 * Ensure a dentry lease will no longer revalidate.
985 void ceph_invalidate_dentry_lease(struct dentry
*dentry
)
987 spin_lock(&dentry
->d_lock
);
988 dentry
->d_time
= jiffies
;
989 ceph_dentry(dentry
)->lease_shared_gen
= 0;
990 spin_unlock(&dentry
->d_lock
);
994 * Check if dentry lease is valid. If not, delete the lease. Try to
995 * renew if the least is more than half up.
997 static int dentry_lease_is_valid(struct dentry
*dentry
)
999 struct ceph_dentry_info
*di
;
1000 struct ceph_mds_session
*s
;
1004 struct ceph_mds_session
*session
= NULL
;
1005 struct inode
*dir
= NULL
;
1008 spin_lock(&dentry
->d_lock
);
1009 di
= ceph_dentry(dentry
);
1010 if (di
->lease_session
) {
1011 s
= di
->lease_session
;
1012 spin_lock(&s
->s_gen_ttl_lock
);
1015 spin_unlock(&s
->s_gen_ttl_lock
);
1017 if (di
->lease_gen
== gen
&&
1018 time_before(jiffies
, dentry
->d_time
) &&
1019 time_before(jiffies
, ttl
)) {
1021 if (di
->lease_renew_after
&&
1022 time_after(jiffies
, di
->lease_renew_after
)) {
1023 /* we should renew */
1024 dir
= dentry
->d_parent
->d_inode
;
1025 session
= ceph_get_mds_session(s
);
1026 seq
= di
->lease_seq
;
1027 di
->lease_renew_after
= 0;
1028 di
->lease_renew_from
= jiffies
;
1032 spin_unlock(&dentry
->d_lock
);
1035 ceph_mdsc_lease_send_msg(session
, dir
, dentry
,
1036 CEPH_MDS_LEASE_RENEW
, seq
);
1037 ceph_put_mds_session(session
);
1039 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry
, valid
);
1044 * Check if directory-wide content lease/cap is valid.
1046 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
)
1048 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1049 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1052 spin_lock(&ci
->i_ceph_lock
);
1053 if (ci
->i_shared_gen
== di
->lease_shared_gen
)
1054 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
1055 spin_unlock(&ci
->i_ceph_lock
);
1056 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1057 dir
, (unsigned)ci
->i_shared_gen
, dentry
,
1058 (unsigned)di
->lease_shared_gen
, valid
);
1063 * Check if cached dentry can be trusted.
1065 static int ceph_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
1070 if (flags
& LOOKUP_RCU
)
1073 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry
,
1074 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
,
1075 ceph_dentry(dentry
)->offset
);
1077 dir
= ceph_get_dentry_parent_inode(dentry
);
1079 /* always trust cached snapped dentries, snapdir dentry */
1080 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
1081 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry
,
1082 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
1084 } else if (dentry
->d_inode
&&
1085 ceph_snap(dentry
->d_inode
) == CEPH_SNAPDIR
) {
1087 } else if (dentry_lease_is_valid(dentry
) ||
1088 dir_lease_is_valid(dir
, dentry
)) {
1089 if (dentry
->d_inode
)
1090 valid
= ceph_is_any_caps(dentry
->d_inode
);
1095 dout("d_revalidate %p %s\n", dentry
, valid
? "valid" : "invalid");
1097 ceph_dentry_lru_touch(dentry
);
1099 ceph_dir_clear_complete(dir
);
1106 * Release our ceph_dentry_info.
1108 static void ceph_d_release(struct dentry
*dentry
)
1110 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1112 dout("d_release %p\n", dentry
);
1113 ceph_dentry_lru_del(dentry
);
1114 if (di
->lease_session
)
1115 ceph_put_mds_session(di
->lease_session
);
1116 kmem_cache_free(ceph_dentry_cachep
, di
);
1117 dentry
->d_fsdata
= NULL
;
1120 static int ceph_snapdir_d_revalidate(struct dentry
*dentry
,
1124 * Eventually, we'll want to revalidate snapped metadata
1125 * too... probably...
1131 * When the VFS prunes a dentry from the cache, we need to clear the
1132 * complete flag on the parent directory.
1134 * Called under dentry->d_lock.
1136 static void ceph_d_prune(struct dentry
*dentry
)
1138 dout("ceph_d_prune %p\n", dentry
);
1140 /* do we have a valid parent? */
1141 if (IS_ROOT(dentry
))
1144 /* if we are not hashed, we don't affect dir's completeness */
1145 if (d_unhashed(dentry
))
1149 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1150 * cleared until d_release
1152 ceph_dir_clear_complete(dentry
->d_parent
->d_inode
);
1156 * read() on a dir. This weird interface hack only works if mounted
1157 * with '-o dirstat'.
1159 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
1162 struct ceph_file_info
*cf
= file
->private_data
;
1163 struct inode
*inode
= file_inode(file
);
1164 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1166 const int bufsize
= 1024;
1168 if (!ceph_test_mount_opt(ceph_sb_to_client(inode
->i_sb
), DIRSTAT
))
1171 if (!cf
->dir_info
) {
1172 cf
->dir_info
= kmalloc(bufsize
, GFP_NOFS
);
1176 snprintf(cf
->dir_info
, bufsize
,
1179 " subdirs: %20lld\n"
1180 "rentries: %20lld\n"
1182 " rsubdirs: %20lld\n"
1184 "rctime: %10ld.%09ld\n",
1185 ci
->i_files
+ ci
->i_subdirs
,
1188 ci
->i_rfiles
+ ci
->i_rsubdirs
,
1192 (long)ci
->i_rctime
.tv_sec
,
1193 (long)ci
->i_rctime
.tv_nsec
);
1196 if (*ppos
>= cf
->dir_info_len
)
1198 size
= min_t(unsigned, size
, cf
->dir_info_len
-*ppos
);
1199 left
= copy_to_user(buf
, cf
->dir_info
+ *ppos
, size
);
1202 *ppos
+= (size
- left
);
1207 * an fsync() on a dir will wait for any uncommitted directory
1208 * operations to commit.
1210 static int ceph_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
1213 struct inode
*inode
= file_inode(file
);
1214 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1215 struct list_head
*head
= &ci
->i_unsafe_dirops
;
1216 struct ceph_mds_request
*req
;
1220 dout("dir_fsync %p\n", inode
);
1221 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1224 mutex_lock(&inode
->i_mutex
);
1226 spin_lock(&ci
->i_unsafe_lock
);
1227 if (list_empty(head
))
1230 req
= list_entry(head
->prev
,
1231 struct ceph_mds_request
, r_unsafe_dir_item
);
1232 last_tid
= req
->r_tid
;
1235 ceph_mdsc_get_request(req
);
1236 spin_unlock(&ci
->i_unsafe_lock
);
1238 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1239 inode
, req
->r_tid
, last_tid
);
1240 if (req
->r_timeout
) {
1241 ret
= wait_for_completion_timeout(
1242 &req
->r_safe_completion
, req
->r_timeout
);
1246 ret
= -EIO
; /* timed out */
1248 wait_for_completion(&req
->r_safe_completion
);
1250 ceph_mdsc_put_request(req
);
1252 spin_lock(&ci
->i_unsafe_lock
);
1253 if (ret
|| list_empty(head
))
1255 req
= list_entry(head
->next
,
1256 struct ceph_mds_request
, r_unsafe_dir_item
);
1257 } while (req
->r_tid
< last_tid
);
1259 spin_unlock(&ci
->i_unsafe_lock
);
1260 mutex_unlock(&inode
->i_mutex
);
1266 * We maintain a private dentry LRU.
1268 * FIXME: this needs to be changed to a per-mds lru to be useful.
1270 void ceph_dentry_lru_add(struct dentry
*dn
)
1272 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1273 struct ceph_mds_client
*mdsc
;
1275 dout("dentry_lru_add %p %p '%.*s'\n", di
, dn
,
1276 dn
->d_name
.len
, dn
->d_name
.name
);
1277 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1278 spin_lock(&mdsc
->dentry_lru_lock
);
1279 list_add_tail(&di
->lru
, &mdsc
->dentry_lru
);
1281 spin_unlock(&mdsc
->dentry_lru_lock
);
1284 void ceph_dentry_lru_touch(struct dentry
*dn
)
1286 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1287 struct ceph_mds_client
*mdsc
;
1289 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di
, dn
,
1290 dn
->d_name
.len
, dn
->d_name
.name
, di
->offset
);
1291 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1292 spin_lock(&mdsc
->dentry_lru_lock
);
1293 list_move_tail(&di
->lru
, &mdsc
->dentry_lru
);
1294 spin_unlock(&mdsc
->dentry_lru_lock
);
1297 void ceph_dentry_lru_del(struct dentry
*dn
)
1299 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1300 struct ceph_mds_client
*mdsc
;
1302 dout("dentry_lru_del %p %p '%.*s'\n", di
, dn
,
1303 dn
->d_name
.len
, dn
->d_name
.name
);
1304 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1305 spin_lock(&mdsc
->dentry_lru_lock
);
1306 list_del_init(&di
->lru
);
1308 spin_unlock(&mdsc
->dentry_lru_lock
);
1312 * Return name hash for a given dentry. This is dependent on
1313 * the parent directory's hash function.
1315 unsigned ceph_dentry_hash(struct inode
*dir
, struct dentry
*dn
)
1317 struct ceph_inode_info
*dci
= ceph_inode(dir
);
1319 switch (dci
->i_dir_layout
.dl_dir_hash
) {
1320 case 0: /* for backward compat */
1321 case CEPH_STR_HASH_LINUX
:
1322 return dn
->d_name
.hash
;
1325 return ceph_str_hash(dci
->i_dir_layout
.dl_dir_hash
,
1326 dn
->d_name
.name
, dn
->d_name
.len
);
1330 const struct file_operations ceph_dir_fops
= {
1331 .read
= ceph_read_dir
,
1332 .iterate
= ceph_readdir
,
1333 .llseek
= ceph_dir_llseek
,
1335 .release
= ceph_release
,
1336 .unlocked_ioctl
= ceph_ioctl
,
1337 .fsync
= ceph_dir_fsync
,
1340 const struct inode_operations ceph_dir_iops
= {
1341 .lookup
= ceph_lookup
,
1342 .permission
= ceph_permission
,
1343 .getattr
= ceph_getattr
,
1344 .setattr
= ceph_setattr
,
1345 .setxattr
= ceph_setxattr
,
1346 .getxattr
= ceph_getxattr
,
1347 .listxattr
= ceph_listxattr
,
1348 .removexattr
= ceph_removexattr
,
1349 .get_acl
= ceph_get_acl
,
1350 .set_acl
= ceph_set_acl
,
1351 .mknod
= ceph_mknod
,
1352 .symlink
= ceph_symlink
,
1353 .mkdir
= ceph_mkdir
,
1355 .unlink
= ceph_unlink
,
1356 .rmdir
= ceph_unlink
,
1357 .rename
= ceph_rename
,
1358 .create
= ceph_create
,
1359 .atomic_open
= ceph_atomic_open
,
1362 const struct dentry_operations ceph_dentry_ops
= {
1363 .d_revalidate
= ceph_d_revalidate
,
1364 .d_release
= ceph_d_release
,
1365 .d_prune
= ceph_d_prune
,
1368 const struct dentry_operations ceph_snapdir_dentry_ops
= {
1369 .d_revalidate
= ceph_snapdir_d_revalidate
,
1370 .d_release
= ceph_d_release
,
1373 const struct dentry_operations ceph_snap_dentry_ops
= {
1374 .d_release
= ceph_d_release
,
1375 .d_prune
= ceph_d_prune
,