ceph: add missing init_acl() for mkdir() and atomic_open()
[deliverable/linux.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
32
33 /*
34 * Initialize ceph dentry state.
35 */
36 int ceph_init_dentry(struct dentry *dentry)
37 {
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
51 goto out_unlock;
52 }
53
54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
61 di->dentry = dentry;
62 di->lease_session = NULL;
63 dentry->d_time = jiffies;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
67 ceph_dentry_lru_add(dentry);
68 out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71 }
72
73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74 {
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
81 if (!IS_ROOT(dentry)) {
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87 }
88
89
90 /*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94 static unsigned fpos_frag(loff_t p)
95 {
96 return p >> 32;
97 }
98 static unsigned fpos_off(loff_t p)
99 {
100 return p & 0xffffffff;
101 }
102
103 /*
104 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on
106 * d_u.d_child when we initially get results back from the MDS, and
107 * falling back to a "normal" sync readdir if any dentries in the dir
108 * are dropped.
109 *
110 * Complete dir indicates that we have all dentries in the dir. It is
111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
112 * the MDS if/when the directory is modified).
113 */
114 static int __dcache_readdir(struct file *file, struct dir_context *ctx)
115 {
116 struct ceph_file_info *fi = file->private_data;
117 struct dentry *parent = file->f_dentry;
118 struct inode *dir = parent->d_inode;
119 struct list_head *p;
120 struct dentry *dentry, *last;
121 struct ceph_dentry_info *di;
122 int err = 0;
123
124 /* claim ref on last dentry we returned */
125 last = fi->dentry;
126 fi->dentry = NULL;
127
128 dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
129 last);
130
131 spin_lock(&parent->d_lock);
132
133 /* start at beginning? */
134 if (ctx->pos == 2 || last == NULL ||
135 ctx->pos < ceph_dentry(last)->offset) {
136 if (list_empty(&parent->d_subdirs))
137 goto out_unlock;
138 p = parent->d_subdirs.prev;
139 dout(" initial p %p/%p\n", p->prev, p->next);
140 } else {
141 p = last->d_u.d_child.prev;
142 }
143
144 more:
145 dentry = list_entry(p, struct dentry, d_u.d_child);
146 di = ceph_dentry(dentry);
147 while (1) {
148 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
149 d_unhashed(dentry) ? "!hashed" : "hashed",
150 parent->d_subdirs.prev, parent->d_subdirs.next);
151 if (p == &parent->d_subdirs) {
152 fi->flags |= CEPH_F_ATEND;
153 goto out_unlock;
154 }
155 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
156 if (!d_unhashed(dentry) && dentry->d_inode &&
157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
159 ctx->pos <= di->offset)
160 break;
161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
162 dentry->d_name.len, dentry->d_name.name, di->offset,
163 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
164 !dentry->d_inode ? " null" : "");
165 spin_unlock(&dentry->d_lock);
166 p = p->prev;
167 dentry = list_entry(p, struct dentry, d_u.d_child);
168 di = ceph_dentry(dentry);
169 }
170
171 dget_dlock(dentry);
172 spin_unlock(&dentry->d_lock);
173 spin_unlock(&parent->d_lock);
174
175 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
176 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
177 ctx->pos = di->offset;
178 if (!dir_emit(ctx, dentry->d_name.name,
179 dentry->d_name.len,
180 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
181 dentry->d_inode->i_mode >> 12)) {
182 if (last) {
183 /* remember our position */
184 fi->dentry = last;
185 fi->next_offset = di->offset;
186 }
187 dput(dentry);
188 return 0;
189 }
190
191 if (last)
192 dput(last);
193 last = dentry;
194
195 ctx->pos++;
196
197 /* make sure a dentry wasn't dropped while we didn't have parent lock */
198 if (!ceph_dir_is_complete(dir)) {
199 dout(" lost dir complete on %p; falling back to mds\n", dir);
200 err = -EAGAIN;
201 goto out;
202 }
203
204 spin_lock(&parent->d_lock);
205 p = p->prev; /* advance to next dentry */
206 goto more;
207
208 out_unlock:
209 spin_unlock(&parent->d_lock);
210 out:
211 if (last)
212 dput(last);
213 return err;
214 }
215
216 /*
217 * make note of the last dentry we read, so we can
218 * continue at the same lexicographical point,
219 * regardless of what dir changes take place on the
220 * server.
221 */
222 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
223 int len)
224 {
225 kfree(fi->last_name);
226 fi->last_name = kmalloc(len+1, GFP_NOFS);
227 if (!fi->last_name)
228 return -ENOMEM;
229 memcpy(fi->last_name, name, len);
230 fi->last_name[len] = 0;
231 dout("note_last_dentry '%s'\n", fi->last_name);
232 return 0;
233 }
234
235 static int ceph_readdir(struct file *file, struct dir_context *ctx)
236 {
237 struct ceph_file_info *fi = file->private_data;
238 struct inode *inode = file_inode(file);
239 struct ceph_inode_info *ci = ceph_inode(inode);
240 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
241 struct ceph_mds_client *mdsc = fsc->mdsc;
242 unsigned frag = fpos_frag(ctx->pos);
243 int off = fpos_off(ctx->pos);
244 int err;
245 u32 ftype;
246 struct ceph_mds_reply_info_parsed *rinfo;
247 const int max_entries = fsc->mount_options->max_readdir;
248 const int max_bytes = fsc->mount_options->max_readdir_bytes;
249
250 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
251 if (fi->flags & CEPH_F_ATEND)
252 return 0;
253
254 /* always start with . and .. */
255 if (ctx->pos == 0) {
256 /* note dir version at start of readdir so we can tell
257 * if any dentries get dropped */
258 fi->dir_release_count = atomic_read(&ci->i_release_count);
259
260 dout("readdir off 0 -> '.'\n");
261 if (!dir_emit(ctx, ".", 1,
262 ceph_translate_ino(inode->i_sb, inode->i_ino),
263 inode->i_mode >> 12))
264 return 0;
265 ctx->pos = 1;
266 off = 1;
267 }
268 if (ctx->pos == 1) {
269 ino_t ino = parent_ino(file->f_dentry);
270 dout("readdir off 1 -> '..'\n");
271 if (!dir_emit(ctx, "..", 2,
272 ceph_translate_ino(inode->i_sb, ino),
273 inode->i_mode >> 12))
274 return 0;
275 ctx->pos = 2;
276 off = 2;
277 }
278
279 /* can we use the dcache? */
280 spin_lock(&ci->i_ceph_lock);
281 if ((ctx->pos == 2 || fi->dentry) &&
282 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
283 ceph_snap(inode) != CEPH_SNAPDIR &&
284 __ceph_dir_is_complete(ci) &&
285 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
286 spin_unlock(&ci->i_ceph_lock);
287 err = __dcache_readdir(file, ctx);
288 if (err != -EAGAIN)
289 return err;
290 } else {
291 spin_unlock(&ci->i_ceph_lock);
292 }
293 if (fi->dentry) {
294 err = note_last_dentry(fi, fi->dentry->d_name.name,
295 fi->dentry->d_name.len);
296 if (err)
297 return err;
298 dput(fi->dentry);
299 fi->dentry = NULL;
300 }
301
302 /* proceed with a normal readdir */
303
304 more:
305 /* do we have the correct frag content buffered? */
306 if (fi->frag != frag || fi->last_readdir == NULL) {
307 struct ceph_mds_request *req;
308 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
309 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
310
311 /* discard old result, if any */
312 if (fi->last_readdir) {
313 ceph_mdsc_put_request(fi->last_readdir);
314 fi->last_readdir = NULL;
315 }
316
317 /* requery frag tree, as the frag topology may have changed */
318 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
319
320 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
321 ceph_vinop(inode), frag, fi->last_name);
322 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
323 if (IS_ERR(req))
324 return PTR_ERR(req);
325 req->r_inode = inode;
326 ihold(inode);
327 req->r_dentry = dget(file->f_dentry);
328 /* hints to request -> mds selection code */
329 req->r_direct_mode = USE_AUTH_MDS;
330 req->r_direct_hash = ceph_frag_value(frag);
331 req->r_direct_is_hash = true;
332 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
333 req->r_readdir_offset = fi->next_offset;
334 req->r_args.readdir.frag = cpu_to_le32(frag);
335 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
336 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
337 req->r_num_caps = max_entries + 1;
338 err = ceph_mdsc_do_request(mdsc, NULL, req);
339 if (err < 0) {
340 ceph_mdsc_put_request(req);
341 return err;
342 }
343 dout("readdir got and parsed readdir result=%d"
344 " on frag %x, end=%d, complete=%d\n", err, frag,
345 (int)req->r_reply_info.dir_end,
346 (int)req->r_reply_info.dir_complete);
347
348 if (!req->r_did_prepopulate) {
349 dout("readdir !did_prepopulate");
350 /* preclude from marking dir complete */
351 fi->dir_release_count--;
352 }
353
354 /* note next offset and last dentry name */
355 rinfo = &req->r_reply_info;
356 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
357 frag = le32_to_cpu(rinfo->dir_dir->frag);
358 if (ceph_frag_is_leftmost(frag))
359 fi->next_offset = 2;
360 else
361 fi->next_offset = 0;
362 off = fi->next_offset;
363 }
364 fi->offset = fi->next_offset;
365 fi->last_readdir = req;
366 fi->frag = frag;
367
368 if (req->r_reply_info.dir_end) {
369 kfree(fi->last_name);
370 fi->last_name = NULL;
371 if (ceph_frag_is_rightmost(frag))
372 fi->next_offset = 2;
373 else
374 fi->next_offset = 0;
375 } else {
376 err = note_last_dentry(fi,
377 rinfo->dir_dname[rinfo->dir_nr-1],
378 rinfo->dir_dname_len[rinfo->dir_nr-1]);
379 if (err)
380 return err;
381 fi->next_offset += rinfo->dir_nr;
382 }
383 }
384
385 rinfo = &fi->last_readdir->r_reply_info;
386 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
387 rinfo->dir_nr, off, fi->offset);
388
389 ctx->pos = ceph_make_fpos(frag, off);
390 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
391 struct ceph_mds_reply_inode *in =
392 rinfo->dir_in[off - fi->offset].in;
393 struct ceph_vino vino;
394 ino_t ino;
395
396 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
397 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
398 rinfo->dir_dname_len[off - fi->offset],
399 rinfo->dir_dname[off - fi->offset], in);
400 BUG_ON(!in);
401 ftype = le32_to_cpu(in->mode) >> 12;
402 vino.ino = le64_to_cpu(in->ino);
403 vino.snap = le64_to_cpu(in->snapid);
404 ino = ceph_vino_to_ino(vino);
405 if (!dir_emit(ctx,
406 rinfo->dir_dname[off - fi->offset],
407 rinfo->dir_dname_len[off - fi->offset],
408 ceph_translate_ino(inode->i_sb, ino), ftype)) {
409 dout("filldir stopping us...\n");
410 return 0;
411 }
412 off++;
413 ctx->pos++;
414 }
415
416 if (fi->last_name) {
417 ceph_mdsc_put_request(fi->last_readdir);
418 fi->last_readdir = NULL;
419 goto more;
420 }
421
422 /* more frags? */
423 if (!ceph_frag_is_rightmost(frag)) {
424 frag = ceph_frag_next(frag);
425 off = 0;
426 ctx->pos = ceph_make_fpos(frag, off);
427 dout("readdir next frag is %x\n", frag);
428 goto more;
429 }
430 fi->flags |= CEPH_F_ATEND;
431
432 /*
433 * if dir_release_count still matches the dir, no dentries
434 * were released during the whole readdir, and we should have
435 * the complete dir contents in our cache.
436 */
437 spin_lock(&ci->i_ceph_lock);
438 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
439 dout(" marking %p complete\n", inode);
440 __ceph_dir_set_complete(ci, fi->dir_release_count);
441 ci->i_max_offset = ctx->pos;
442 }
443 spin_unlock(&ci->i_ceph_lock);
444
445 dout("readdir %p file %p done.\n", inode, file);
446 return 0;
447 }
448
449 static void reset_readdir(struct ceph_file_info *fi)
450 {
451 if (fi->last_readdir) {
452 ceph_mdsc_put_request(fi->last_readdir);
453 fi->last_readdir = NULL;
454 }
455 kfree(fi->last_name);
456 fi->last_name = NULL;
457 fi->next_offset = 2; /* compensate for . and .. */
458 if (fi->dentry) {
459 dput(fi->dentry);
460 fi->dentry = NULL;
461 }
462 fi->flags &= ~CEPH_F_ATEND;
463 }
464
465 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
466 {
467 struct ceph_file_info *fi = file->private_data;
468 struct inode *inode = file->f_mapping->host;
469 loff_t old_offset = offset;
470 loff_t retval;
471
472 mutex_lock(&inode->i_mutex);
473 retval = -EINVAL;
474 switch (whence) {
475 case SEEK_END:
476 offset += inode->i_size + 2; /* FIXME */
477 break;
478 case SEEK_CUR:
479 offset += file->f_pos;
480 case SEEK_SET:
481 break;
482 default:
483 goto out;
484 }
485
486 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
487 if (offset != file->f_pos) {
488 file->f_pos = offset;
489 file->f_version = 0;
490 fi->flags &= ~CEPH_F_ATEND;
491 }
492 retval = offset;
493
494 /*
495 * discard buffered readdir content on seekdir(0), or
496 * seek to new frag, or seek prior to current chunk.
497 */
498 if (offset == 0 ||
499 fpos_frag(offset) != fpos_frag(old_offset) ||
500 fpos_off(offset) < fi->offset) {
501 dout("dir_llseek dropping %p content\n", file);
502 reset_readdir(fi);
503 }
504
505 /* bump dir_release_count if we did a forward seek */
506 if (offset > old_offset)
507 fi->dir_release_count--;
508 }
509 out:
510 mutex_unlock(&inode->i_mutex);
511 return retval;
512 }
513
514 /*
515 * Handle lookups for the hidden .snap directory.
516 */
517 int ceph_handle_snapdir(struct ceph_mds_request *req,
518 struct dentry *dentry, int err)
519 {
520 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
521 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
522
523 /* .snap dir? */
524 if (err == -ENOENT &&
525 ceph_snap(parent) == CEPH_NOSNAP &&
526 strcmp(dentry->d_name.name,
527 fsc->mount_options->snapdir_name) == 0) {
528 struct inode *inode = ceph_get_snapdir(parent);
529 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
530 dentry, dentry->d_name.len, dentry->d_name.name, inode);
531 BUG_ON(!d_unhashed(dentry));
532 d_add(dentry, inode);
533 err = 0;
534 }
535 return err;
536 }
537
538 /*
539 * Figure out final result of a lookup/open request.
540 *
541 * Mainly, make sure we return the final req->r_dentry (if it already
542 * existed) in place of the original VFS-provided dentry when they
543 * differ.
544 *
545 * Gracefully handle the case where the MDS replies with -ENOENT and
546 * no trace (which it may do, at its discretion, e.g., if it doesn't
547 * care to issue a lease on the negative dentry).
548 */
549 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
550 struct dentry *dentry, int err)
551 {
552 if (err == -ENOENT) {
553 /* no trace? */
554 err = 0;
555 if (!req->r_reply_info.head->is_dentry) {
556 dout("ENOENT and no trace, dentry %p inode %p\n",
557 dentry, dentry->d_inode);
558 if (dentry->d_inode) {
559 d_drop(dentry);
560 err = -ENOENT;
561 } else {
562 d_add(dentry, NULL);
563 }
564 }
565 }
566 if (err)
567 dentry = ERR_PTR(err);
568 else if (dentry != req->r_dentry)
569 dentry = dget(req->r_dentry); /* we got spliced */
570 else
571 dentry = NULL;
572 return dentry;
573 }
574
575 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
576 {
577 return ceph_ino(inode) == CEPH_INO_ROOT &&
578 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
579 }
580
581 /*
582 * Look up a single dir entry. If there is a lookup intent, inform
583 * the MDS so that it gets our 'caps wanted' value in a single op.
584 */
585 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
586 unsigned int flags)
587 {
588 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
589 struct ceph_mds_client *mdsc = fsc->mdsc;
590 struct ceph_mds_request *req;
591 int op;
592 int err;
593
594 dout("lookup %p dentry %p '%.*s'\n",
595 dir, dentry, dentry->d_name.len, dentry->d_name.name);
596
597 if (dentry->d_name.len > NAME_MAX)
598 return ERR_PTR(-ENAMETOOLONG);
599
600 err = ceph_init_dentry(dentry);
601 if (err < 0)
602 return ERR_PTR(err);
603
604 /* can we conclude ENOENT locally? */
605 if (dentry->d_inode == NULL) {
606 struct ceph_inode_info *ci = ceph_inode(dir);
607 struct ceph_dentry_info *di = ceph_dentry(dentry);
608
609 spin_lock(&ci->i_ceph_lock);
610 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
611 if (strncmp(dentry->d_name.name,
612 fsc->mount_options->snapdir_name,
613 dentry->d_name.len) &&
614 !is_root_ceph_dentry(dir, dentry) &&
615 __ceph_dir_is_complete(ci) &&
616 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
617 spin_unlock(&ci->i_ceph_lock);
618 dout(" dir %p complete, -ENOENT\n", dir);
619 d_add(dentry, NULL);
620 di->lease_shared_gen = ci->i_shared_gen;
621 return NULL;
622 }
623 spin_unlock(&ci->i_ceph_lock);
624 }
625
626 op = ceph_snap(dir) == CEPH_SNAPDIR ?
627 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
628 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
629 if (IS_ERR(req))
630 return ERR_CAST(req);
631 req->r_dentry = dget(dentry);
632 req->r_num_caps = 2;
633 /* we only need inode linkage */
634 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
635 req->r_locked_dir = dir;
636 err = ceph_mdsc_do_request(mdsc, NULL, req);
637 err = ceph_handle_snapdir(req, dentry, err);
638 dentry = ceph_finish_lookup(req, dentry, err);
639 ceph_mdsc_put_request(req); /* will dput(dentry) */
640 dout("lookup result=%p\n", dentry);
641 return dentry;
642 }
643
644 /*
645 * If we do a create but get no trace back from the MDS, follow up with
646 * a lookup (the VFS expects us to link up the provided dentry).
647 */
648 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
649 {
650 struct dentry *result = ceph_lookup(dir, dentry, 0);
651
652 if (result && !IS_ERR(result)) {
653 /*
654 * We created the item, then did a lookup, and found
655 * it was already linked to another inode we already
656 * had in our cache (and thus got spliced). Link our
657 * dentry to that inode, but don't hash it, just in
658 * case the VFS wants to dereference it.
659 */
660 BUG_ON(!result->d_inode);
661 d_instantiate(dentry, result->d_inode);
662 return 0;
663 }
664 return PTR_ERR(result);
665 }
666
667 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
668 umode_t mode, dev_t rdev)
669 {
670 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
671 struct ceph_mds_client *mdsc = fsc->mdsc;
672 struct ceph_mds_request *req;
673 int err;
674
675 if (ceph_snap(dir) != CEPH_NOSNAP)
676 return -EROFS;
677
678 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
679 dir, dentry, mode, rdev);
680 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
681 if (IS_ERR(req)) {
682 d_drop(dentry);
683 return PTR_ERR(req);
684 }
685 req->r_dentry = dget(dentry);
686 req->r_num_caps = 2;
687 req->r_locked_dir = dir;
688 req->r_args.mknod.mode = cpu_to_le32(mode);
689 req->r_args.mknod.rdev = cpu_to_le32(rdev);
690 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
691 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
692 err = ceph_mdsc_do_request(mdsc, dir, req);
693 if (!err && !req->r_reply_info.head->is_dentry)
694 err = ceph_handle_notrace_create(dir, dentry);
695 ceph_mdsc_put_request(req);
696
697 if (!err)
698 ceph_init_acl(dentry, dentry->d_inode, dir);
699 else
700 d_drop(dentry);
701 return err;
702 }
703
704 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
705 bool excl)
706 {
707 return ceph_mknod(dir, dentry, mode, 0);
708 }
709
710 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
711 const char *dest)
712 {
713 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
714 struct ceph_mds_client *mdsc = fsc->mdsc;
715 struct ceph_mds_request *req;
716 int err;
717
718 if (ceph_snap(dir) != CEPH_NOSNAP)
719 return -EROFS;
720
721 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
722 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
723 if (IS_ERR(req)) {
724 d_drop(dentry);
725 return PTR_ERR(req);
726 }
727 req->r_dentry = dget(dentry);
728 req->r_num_caps = 2;
729 req->r_path2 = kstrdup(dest, GFP_NOFS);
730 req->r_locked_dir = dir;
731 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
732 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
733 err = ceph_mdsc_do_request(mdsc, dir, req);
734 if (!err && !req->r_reply_info.head->is_dentry)
735 err = ceph_handle_notrace_create(dir, dentry);
736 ceph_mdsc_put_request(req);
737 if (!err)
738 ceph_init_acl(dentry, dentry->d_inode, dir);
739 else
740 d_drop(dentry);
741 return err;
742 }
743
744 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
745 {
746 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
747 struct ceph_mds_client *mdsc = fsc->mdsc;
748 struct ceph_mds_request *req;
749 int err = -EROFS;
750 int op;
751
752 if (ceph_snap(dir) == CEPH_SNAPDIR) {
753 /* mkdir .snap/foo is a MKSNAP */
754 op = CEPH_MDS_OP_MKSNAP;
755 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
756 dentry->d_name.len, dentry->d_name.name, dentry);
757 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
758 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
759 op = CEPH_MDS_OP_MKDIR;
760 } else {
761 goto out;
762 }
763 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
764 if (IS_ERR(req)) {
765 err = PTR_ERR(req);
766 goto out;
767 }
768
769 req->r_dentry = dget(dentry);
770 req->r_num_caps = 2;
771 req->r_locked_dir = dir;
772 req->r_args.mkdir.mode = cpu_to_le32(mode);
773 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
774 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
775 err = ceph_mdsc_do_request(mdsc, dir, req);
776 if (!err && !req->r_reply_info.head->is_dentry)
777 err = ceph_handle_notrace_create(dir, dentry);
778 ceph_mdsc_put_request(req);
779 out:
780 if (!err)
781 ceph_init_acl(dentry, dentry->d_inode, dir);
782 else
783 d_drop(dentry);
784 return err;
785 }
786
787 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
788 struct dentry *dentry)
789 {
790 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
791 struct ceph_mds_client *mdsc = fsc->mdsc;
792 struct ceph_mds_request *req;
793 int err;
794
795 if (ceph_snap(dir) != CEPH_NOSNAP)
796 return -EROFS;
797
798 dout("link in dir %p old_dentry %p dentry %p\n", dir,
799 old_dentry, dentry);
800 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
801 if (IS_ERR(req)) {
802 d_drop(dentry);
803 return PTR_ERR(req);
804 }
805 req->r_dentry = dget(dentry);
806 req->r_num_caps = 2;
807 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
808 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
809 req->r_locked_dir = dir;
810 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
811 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
812 /* release LINK_SHARED on source inode (mds will lock it) */
813 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
814 err = ceph_mdsc_do_request(mdsc, dir, req);
815 if (err) {
816 d_drop(dentry);
817 } else if (!req->r_reply_info.head->is_dentry) {
818 ihold(old_dentry->d_inode);
819 d_instantiate(dentry, old_dentry->d_inode);
820 }
821 ceph_mdsc_put_request(req);
822 return err;
823 }
824
825 /*
826 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
827 * looks like the link count will hit 0, drop any other caps (other
828 * than PIN) we don't specifically want (due to the file still being
829 * open).
830 */
831 static int drop_caps_for_unlink(struct inode *inode)
832 {
833 struct ceph_inode_info *ci = ceph_inode(inode);
834 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
835
836 spin_lock(&ci->i_ceph_lock);
837 if (inode->i_nlink == 1) {
838 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
839 ci->i_ceph_flags |= CEPH_I_NODELAY;
840 }
841 spin_unlock(&ci->i_ceph_lock);
842 return drop;
843 }
844
845 /*
846 * rmdir and unlink are differ only by the metadata op code
847 */
848 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
849 {
850 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
851 struct ceph_mds_client *mdsc = fsc->mdsc;
852 struct inode *inode = dentry->d_inode;
853 struct ceph_mds_request *req;
854 int err = -EROFS;
855 int op;
856
857 if (ceph_snap(dir) == CEPH_SNAPDIR) {
858 /* rmdir .snap/foo is RMSNAP */
859 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
860 dentry->d_name.name, dentry);
861 op = CEPH_MDS_OP_RMSNAP;
862 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
863 dout("unlink/rmdir dir %p dn %p inode %p\n",
864 dir, dentry, inode);
865 op = S_ISDIR(dentry->d_inode->i_mode) ?
866 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
867 } else
868 goto out;
869 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
870 if (IS_ERR(req)) {
871 err = PTR_ERR(req);
872 goto out;
873 }
874 req->r_dentry = dget(dentry);
875 req->r_num_caps = 2;
876 req->r_locked_dir = dir;
877 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
878 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
879 req->r_inode_drop = drop_caps_for_unlink(inode);
880 err = ceph_mdsc_do_request(mdsc, dir, req);
881 if (!err && !req->r_reply_info.head->is_dentry)
882 d_delete(dentry);
883 ceph_mdsc_put_request(req);
884 out:
885 return err;
886 }
887
888 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
889 struct inode *new_dir, struct dentry *new_dentry)
890 {
891 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
892 struct ceph_mds_client *mdsc = fsc->mdsc;
893 struct ceph_mds_request *req;
894 int err;
895
896 if (ceph_snap(old_dir) != ceph_snap(new_dir))
897 return -EXDEV;
898 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
899 ceph_snap(new_dir) != CEPH_NOSNAP)
900 return -EROFS;
901 dout("rename dir %p dentry %p to dir %p dentry %p\n",
902 old_dir, old_dentry, new_dir, new_dentry);
903 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
904 if (IS_ERR(req))
905 return PTR_ERR(req);
906 req->r_dentry = dget(new_dentry);
907 req->r_num_caps = 2;
908 req->r_old_dentry = dget(old_dentry);
909 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
910 req->r_locked_dir = new_dir;
911 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
912 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
913 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
914 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
915 /* release LINK_RDCACHE on source inode (mds will lock it) */
916 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
917 if (new_dentry->d_inode)
918 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
919 err = ceph_mdsc_do_request(mdsc, old_dir, req);
920 if (!err && !req->r_reply_info.head->is_dentry) {
921 /*
922 * Normally d_move() is done by fill_trace (called by
923 * do_request, above). If there is no trace, we need
924 * to do it here.
925 */
926
927 /* d_move screws up d_subdirs order */
928 ceph_dir_clear_complete(new_dir);
929
930 d_move(old_dentry, new_dentry);
931
932 /* ensure target dentry is invalidated, despite
933 rehashing bug in vfs_rename_dir */
934 ceph_invalidate_dentry_lease(new_dentry);
935 }
936 ceph_mdsc_put_request(req);
937 return err;
938 }
939
940 /*
941 * Ensure a dentry lease will no longer revalidate.
942 */
943 void ceph_invalidate_dentry_lease(struct dentry *dentry)
944 {
945 spin_lock(&dentry->d_lock);
946 dentry->d_time = jiffies;
947 ceph_dentry(dentry)->lease_shared_gen = 0;
948 spin_unlock(&dentry->d_lock);
949 }
950
951 /*
952 * Check if dentry lease is valid. If not, delete the lease. Try to
953 * renew if the least is more than half up.
954 */
955 static int dentry_lease_is_valid(struct dentry *dentry)
956 {
957 struct ceph_dentry_info *di;
958 struct ceph_mds_session *s;
959 int valid = 0;
960 u32 gen;
961 unsigned long ttl;
962 struct ceph_mds_session *session = NULL;
963 struct inode *dir = NULL;
964 u32 seq = 0;
965
966 spin_lock(&dentry->d_lock);
967 di = ceph_dentry(dentry);
968 if (di->lease_session) {
969 s = di->lease_session;
970 spin_lock(&s->s_gen_ttl_lock);
971 gen = s->s_cap_gen;
972 ttl = s->s_cap_ttl;
973 spin_unlock(&s->s_gen_ttl_lock);
974
975 if (di->lease_gen == gen &&
976 time_before(jiffies, dentry->d_time) &&
977 time_before(jiffies, ttl)) {
978 valid = 1;
979 if (di->lease_renew_after &&
980 time_after(jiffies, di->lease_renew_after)) {
981 /* we should renew */
982 dir = dentry->d_parent->d_inode;
983 session = ceph_get_mds_session(s);
984 seq = di->lease_seq;
985 di->lease_renew_after = 0;
986 di->lease_renew_from = jiffies;
987 }
988 }
989 }
990 spin_unlock(&dentry->d_lock);
991
992 if (session) {
993 ceph_mdsc_lease_send_msg(session, dir, dentry,
994 CEPH_MDS_LEASE_RENEW, seq);
995 ceph_put_mds_session(session);
996 }
997 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
998 return valid;
999 }
1000
1001 /*
1002 * Check if directory-wide content lease/cap is valid.
1003 */
1004 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1005 {
1006 struct ceph_inode_info *ci = ceph_inode(dir);
1007 struct ceph_dentry_info *di = ceph_dentry(dentry);
1008 int valid = 0;
1009
1010 spin_lock(&ci->i_ceph_lock);
1011 if (ci->i_shared_gen == di->lease_shared_gen)
1012 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1013 spin_unlock(&ci->i_ceph_lock);
1014 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1015 dir, (unsigned)ci->i_shared_gen, dentry,
1016 (unsigned)di->lease_shared_gen, valid);
1017 return valid;
1018 }
1019
1020 /*
1021 * Check if cached dentry can be trusted.
1022 */
1023 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1024 {
1025 int valid = 0;
1026 struct inode *dir;
1027
1028 if (flags & LOOKUP_RCU)
1029 return -ECHILD;
1030
1031 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1032 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1033 ceph_dentry(dentry)->offset);
1034
1035 dir = ceph_get_dentry_parent_inode(dentry);
1036
1037 /* always trust cached snapped dentries, snapdir dentry */
1038 if (ceph_snap(dir) != CEPH_NOSNAP) {
1039 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1040 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1041 valid = 1;
1042 } else if (dentry->d_inode &&
1043 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1044 valid = 1;
1045 } else if (dentry_lease_is_valid(dentry) ||
1046 dir_lease_is_valid(dir, dentry)) {
1047 if (dentry->d_inode)
1048 valid = ceph_is_any_caps(dentry->d_inode);
1049 else
1050 valid = 1;
1051 }
1052
1053 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1054 if (valid) {
1055 ceph_dentry_lru_touch(dentry);
1056 } else {
1057 ceph_dir_clear_complete(dir);
1058 d_drop(dentry);
1059 }
1060 iput(dir);
1061 return valid;
1062 }
1063
1064 /*
1065 * Release our ceph_dentry_info.
1066 */
1067 static void ceph_d_release(struct dentry *dentry)
1068 {
1069 struct ceph_dentry_info *di = ceph_dentry(dentry);
1070
1071 dout("d_release %p\n", dentry);
1072 ceph_dentry_lru_del(dentry);
1073 if (di->lease_session)
1074 ceph_put_mds_session(di->lease_session);
1075 kmem_cache_free(ceph_dentry_cachep, di);
1076 dentry->d_fsdata = NULL;
1077 }
1078
1079 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1080 unsigned int flags)
1081 {
1082 /*
1083 * Eventually, we'll want to revalidate snapped metadata
1084 * too... probably...
1085 */
1086 return 1;
1087 }
1088
1089 /*
1090 * When the VFS prunes a dentry from the cache, we need to clear the
1091 * complete flag on the parent directory.
1092 *
1093 * Called under dentry->d_lock.
1094 */
1095 static void ceph_d_prune(struct dentry *dentry)
1096 {
1097 dout("ceph_d_prune %p\n", dentry);
1098
1099 /* do we have a valid parent? */
1100 if (IS_ROOT(dentry))
1101 return;
1102
1103 /* if we are not hashed, we don't affect dir's completeness */
1104 if (d_unhashed(dentry))
1105 return;
1106
1107 /*
1108 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1109 * cleared until d_release
1110 */
1111 ceph_dir_clear_complete(dentry->d_parent->d_inode);
1112 }
1113
1114 /*
1115 * read() on a dir. This weird interface hack only works if mounted
1116 * with '-o dirstat'.
1117 */
1118 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1119 loff_t *ppos)
1120 {
1121 struct ceph_file_info *cf = file->private_data;
1122 struct inode *inode = file_inode(file);
1123 struct ceph_inode_info *ci = ceph_inode(inode);
1124 int left;
1125 const int bufsize = 1024;
1126
1127 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1128 return -EISDIR;
1129
1130 if (!cf->dir_info) {
1131 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1132 if (!cf->dir_info)
1133 return -ENOMEM;
1134 cf->dir_info_len =
1135 snprintf(cf->dir_info, bufsize,
1136 "entries: %20lld\n"
1137 " files: %20lld\n"
1138 " subdirs: %20lld\n"
1139 "rentries: %20lld\n"
1140 " rfiles: %20lld\n"
1141 " rsubdirs: %20lld\n"
1142 "rbytes: %20lld\n"
1143 "rctime: %10ld.%09ld\n",
1144 ci->i_files + ci->i_subdirs,
1145 ci->i_files,
1146 ci->i_subdirs,
1147 ci->i_rfiles + ci->i_rsubdirs,
1148 ci->i_rfiles,
1149 ci->i_rsubdirs,
1150 ci->i_rbytes,
1151 (long)ci->i_rctime.tv_sec,
1152 (long)ci->i_rctime.tv_nsec);
1153 }
1154
1155 if (*ppos >= cf->dir_info_len)
1156 return 0;
1157 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1158 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1159 if (left == size)
1160 return -EFAULT;
1161 *ppos += (size - left);
1162 return size - left;
1163 }
1164
1165 /*
1166 * an fsync() on a dir will wait for any uncommitted directory
1167 * operations to commit.
1168 */
1169 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1170 int datasync)
1171 {
1172 struct inode *inode = file_inode(file);
1173 struct ceph_inode_info *ci = ceph_inode(inode);
1174 struct list_head *head = &ci->i_unsafe_dirops;
1175 struct ceph_mds_request *req;
1176 u64 last_tid;
1177 int ret = 0;
1178
1179 dout("dir_fsync %p\n", inode);
1180 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1181 if (ret)
1182 return ret;
1183 mutex_lock(&inode->i_mutex);
1184
1185 spin_lock(&ci->i_unsafe_lock);
1186 if (list_empty(head))
1187 goto out;
1188
1189 req = list_entry(head->prev,
1190 struct ceph_mds_request, r_unsafe_dir_item);
1191 last_tid = req->r_tid;
1192
1193 do {
1194 ceph_mdsc_get_request(req);
1195 spin_unlock(&ci->i_unsafe_lock);
1196
1197 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1198 inode, req->r_tid, last_tid);
1199 if (req->r_timeout) {
1200 ret = wait_for_completion_timeout(
1201 &req->r_safe_completion, req->r_timeout);
1202 if (ret > 0)
1203 ret = 0;
1204 else if (ret == 0)
1205 ret = -EIO; /* timed out */
1206 } else {
1207 wait_for_completion(&req->r_safe_completion);
1208 }
1209 ceph_mdsc_put_request(req);
1210
1211 spin_lock(&ci->i_unsafe_lock);
1212 if (ret || list_empty(head))
1213 break;
1214 req = list_entry(head->next,
1215 struct ceph_mds_request, r_unsafe_dir_item);
1216 } while (req->r_tid < last_tid);
1217 out:
1218 spin_unlock(&ci->i_unsafe_lock);
1219 mutex_unlock(&inode->i_mutex);
1220
1221 return ret;
1222 }
1223
1224 /*
1225 * We maintain a private dentry LRU.
1226 *
1227 * FIXME: this needs to be changed to a per-mds lru to be useful.
1228 */
1229 void ceph_dentry_lru_add(struct dentry *dn)
1230 {
1231 struct ceph_dentry_info *di = ceph_dentry(dn);
1232 struct ceph_mds_client *mdsc;
1233
1234 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1235 dn->d_name.len, dn->d_name.name);
1236 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1237 spin_lock(&mdsc->dentry_lru_lock);
1238 list_add_tail(&di->lru, &mdsc->dentry_lru);
1239 mdsc->num_dentry++;
1240 spin_unlock(&mdsc->dentry_lru_lock);
1241 }
1242
1243 void ceph_dentry_lru_touch(struct dentry *dn)
1244 {
1245 struct ceph_dentry_info *di = ceph_dentry(dn);
1246 struct ceph_mds_client *mdsc;
1247
1248 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1249 dn->d_name.len, dn->d_name.name, di->offset);
1250 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1251 spin_lock(&mdsc->dentry_lru_lock);
1252 list_move_tail(&di->lru, &mdsc->dentry_lru);
1253 spin_unlock(&mdsc->dentry_lru_lock);
1254 }
1255
1256 void ceph_dentry_lru_del(struct dentry *dn)
1257 {
1258 struct ceph_dentry_info *di = ceph_dentry(dn);
1259 struct ceph_mds_client *mdsc;
1260
1261 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1262 dn->d_name.len, dn->d_name.name);
1263 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1264 spin_lock(&mdsc->dentry_lru_lock);
1265 list_del_init(&di->lru);
1266 mdsc->num_dentry--;
1267 spin_unlock(&mdsc->dentry_lru_lock);
1268 }
1269
1270 /*
1271 * Return name hash for a given dentry. This is dependent on
1272 * the parent directory's hash function.
1273 */
1274 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1275 {
1276 struct ceph_inode_info *dci = ceph_inode(dir);
1277
1278 switch (dci->i_dir_layout.dl_dir_hash) {
1279 case 0: /* for backward compat */
1280 case CEPH_STR_HASH_LINUX:
1281 return dn->d_name.hash;
1282
1283 default:
1284 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1285 dn->d_name.name, dn->d_name.len);
1286 }
1287 }
1288
1289 const struct file_operations ceph_dir_fops = {
1290 .read = ceph_read_dir,
1291 .iterate = ceph_readdir,
1292 .llseek = ceph_dir_llseek,
1293 .open = ceph_open,
1294 .release = ceph_release,
1295 .unlocked_ioctl = ceph_ioctl,
1296 .fsync = ceph_dir_fsync,
1297 };
1298
1299 const struct inode_operations ceph_dir_iops = {
1300 .lookup = ceph_lookup,
1301 .permission = ceph_permission,
1302 .getattr = ceph_getattr,
1303 .setattr = ceph_setattr,
1304 .setxattr = ceph_setxattr,
1305 .getxattr = ceph_getxattr,
1306 .listxattr = ceph_listxattr,
1307 .removexattr = ceph_removexattr,
1308 .get_acl = ceph_get_acl,
1309 .set_acl = ceph_set_acl,
1310 .mknod = ceph_mknod,
1311 .symlink = ceph_symlink,
1312 .mkdir = ceph_mkdir,
1313 .link = ceph_link,
1314 .unlink = ceph_unlink,
1315 .rmdir = ceph_unlink,
1316 .rename = ceph_rename,
1317 .create = ceph_create,
1318 .atomic_open = ceph_atomic_open,
1319 };
1320
1321 const struct dentry_operations ceph_dentry_ops = {
1322 .d_revalidate = ceph_d_revalidate,
1323 .d_release = ceph_d_release,
1324 .d_prune = ceph_d_prune,
1325 };
1326
1327 const struct dentry_operations ceph_snapdir_dentry_ops = {
1328 .d_revalidate = ceph_snapdir_d_revalidate,
1329 .d_release = ceph_d_release,
1330 };
1331
1332 const struct dentry_operations ceph_snap_dentry_ops = {
1333 .d_release = ceph_d_release,
1334 .d_prune = ceph_d_prune,
1335 };
This page took 0.073125 seconds and 5 git commands to generate.