1 #include <linux/ceph/ceph_debug.h>
2 #include <linux/ceph/pagelist.h>
5 #include "mds_client.h"
7 #include <linux/ceph/decode.h>
9 #include <linux/xattr.h>
10 #include <linux/posix_acl_xattr.h>
11 #include <linux/slab.h>
13 #define XATTR_CEPH_PREFIX "ceph."
14 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
16 static int __remove_xattr(struct ceph_inode_info
*ci
,
17 struct ceph_inode_xattr
*xattr
);
20 * List of handlers for synthetic system.* attributes. Other
21 * attributes are handled directly.
23 const struct xattr_handler
*ceph_xattr_handlers
[] = {
24 #ifdef CONFIG_CEPH_FS_POSIX_ACL
25 &posix_acl_access_xattr_handler
,
26 &posix_acl_default_xattr_handler
,
31 static bool ceph_is_valid_xattr(const char *name
)
33 return !strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
) ||
34 !strncmp(name
, XATTR_SECURITY_PREFIX
,
35 XATTR_SECURITY_PREFIX_LEN
) ||
36 !strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
) ||
37 !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
38 !strncmp(name
, XATTR_USER_PREFIX
, XATTR_USER_PREFIX_LEN
);
42 * These define virtual xattrs exposing the recursive directory
43 * statistics and layout metadata.
47 size_t name_size
; /* strlen(name) + 1 (for '\0') */
48 size_t (*getxattr_cb
)(struct ceph_inode_info
*ci
, char *val
,
50 bool readonly
, hidden
;
51 bool (*exists_cb
)(struct ceph_inode_info
*ci
);
56 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info
*ci
)
59 char *p
= (char *)&ci
->i_layout
;
61 for (s
= 0; s
< sizeof(ci
->i_layout
); s
++, p
++)
67 static size_t ceph_vxattrcb_layout(struct ceph_inode_info
*ci
, char *val
,
71 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
72 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
73 s64 pool
= ceph_file_layout_pg_pool(ci
->i_layout
);
74 const char *pool_name
;
77 dout("ceph_vxattrcb_layout %p\n", &ci
->vfs_inode
);
78 down_read(&osdc
->map_sem
);
79 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
81 size_t len
= strlen(pool_name
);
82 ret
= snprintf(buf
, sizeof(buf
),
83 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=",
84 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
85 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
86 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
89 } else if (ret
+ len
> size
) {
92 memcpy(val
, buf
, ret
);
93 memcpy(val
+ ret
, pool_name
, len
);
97 ret
= snprintf(buf
, sizeof(buf
),
98 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
99 (unsigned long long)ceph_file_layout_su(ci
->i_layout
),
100 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
),
101 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
),
102 (unsigned long long)pool
);
105 memcpy(val
, buf
, ret
);
110 up_read(&osdc
->map_sem
);
114 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info
*ci
,
115 char *val
, size_t size
)
117 return snprintf(val
, size
, "%lld",
118 (unsigned long long)ceph_file_layout_su(ci
->i_layout
));
121 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info
*ci
,
122 char *val
, size_t size
)
124 return snprintf(val
, size
, "%lld",
125 (unsigned long long)ceph_file_layout_stripe_count(ci
->i_layout
));
128 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info
*ci
,
129 char *val
, size_t size
)
131 return snprintf(val
, size
, "%lld",
132 (unsigned long long)ceph_file_layout_object_size(ci
->i_layout
));
135 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info
*ci
,
136 char *val
, size_t size
)
139 struct ceph_fs_client
*fsc
= ceph_sb_to_client(ci
->vfs_inode
.i_sb
);
140 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
141 s64 pool
= ceph_file_layout_pg_pool(ci
->i_layout
);
142 const char *pool_name
;
144 down_read(&osdc
->map_sem
);
145 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, pool
);
147 ret
= snprintf(val
, size
, "%s", pool_name
);
149 ret
= snprintf(val
, size
, "%lld", (unsigned long long)pool
);
150 up_read(&osdc
->map_sem
);
156 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info
*ci
, char *val
,
159 return snprintf(val
, size
, "%lld", ci
->i_files
+ ci
->i_subdirs
);
162 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info
*ci
, char *val
,
165 return snprintf(val
, size
, "%lld", ci
->i_files
);
168 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info
*ci
, char *val
,
171 return snprintf(val
, size
, "%lld", ci
->i_subdirs
);
174 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info
*ci
, char *val
,
177 return snprintf(val
, size
, "%lld", ci
->i_rfiles
+ ci
->i_rsubdirs
);
180 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info
*ci
, char *val
,
183 return snprintf(val
, size
, "%lld", ci
->i_rfiles
);
186 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info
*ci
, char *val
,
189 return snprintf(val
, size
, "%lld", ci
->i_rsubdirs
);
192 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info
*ci
, char *val
,
195 return snprintf(val
, size
, "%lld", ci
->i_rbytes
);
198 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info
*ci
, char *val
,
201 return snprintf(val
, size
, "%ld.09%ld", (long)ci
->i_rctime
.tv_sec
,
202 (long)ci
->i_rctime
.tv_nsec
);
206 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
207 #define CEPH_XATTR_NAME2(_type, _name, _name2) \
208 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
210 #define XATTR_NAME_CEPH(_type, _name) \
212 .name = CEPH_XATTR_NAME(_type, _name), \
213 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
214 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
219 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \
221 .name = CEPH_XATTR_NAME2(_type, _name, _field), \
222 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
223 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
226 .exists_cb = ceph_vxattrcb_layout_exists, \
229 static struct ceph_vxattr ceph_dir_vxattrs
[] = {
231 .name
= "ceph.dir.layout",
232 .name_size
= sizeof("ceph.dir.layout"),
233 .getxattr_cb
= ceph_vxattrcb_layout
,
236 .exists_cb
= ceph_vxattrcb_layout_exists
,
238 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_unit
),
239 XATTR_LAYOUT_FIELD(dir
, layout
, stripe_count
),
240 XATTR_LAYOUT_FIELD(dir
, layout
, object_size
),
241 XATTR_LAYOUT_FIELD(dir
, layout
, pool
),
242 XATTR_NAME_CEPH(dir
, entries
),
243 XATTR_NAME_CEPH(dir
, files
),
244 XATTR_NAME_CEPH(dir
, subdirs
),
245 XATTR_NAME_CEPH(dir
, rentries
),
246 XATTR_NAME_CEPH(dir
, rfiles
),
247 XATTR_NAME_CEPH(dir
, rsubdirs
),
248 XATTR_NAME_CEPH(dir
, rbytes
),
249 XATTR_NAME_CEPH(dir
, rctime
),
250 { .name
= NULL
, 0 } /* Required table terminator */
252 static size_t ceph_dir_vxattrs_name_size
; /* total size of all names */
256 static struct ceph_vxattr ceph_file_vxattrs
[] = {
258 .name
= "ceph.file.layout",
259 .name_size
= sizeof("ceph.file.layout"),
260 .getxattr_cb
= ceph_vxattrcb_layout
,
263 .exists_cb
= ceph_vxattrcb_layout_exists
,
265 XATTR_LAYOUT_FIELD(file
, layout
, stripe_unit
),
266 XATTR_LAYOUT_FIELD(file
, layout
, stripe_count
),
267 XATTR_LAYOUT_FIELD(file
, layout
, object_size
),
268 XATTR_LAYOUT_FIELD(file
, layout
, pool
),
269 { .name
= NULL
, 0 } /* Required table terminator */
271 static size_t ceph_file_vxattrs_name_size
; /* total size of all names */
273 static struct ceph_vxattr
*ceph_inode_vxattrs(struct inode
*inode
)
275 if (S_ISDIR(inode
->i_mode
))
276 return ceph_dir_vxattrs
;
277 else if (S_ISREG(inode
->i_mode
))
278 return ceph_file_vxattrs
;
282 static size_t ceph_vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
284 if (vxattrs
== ceph_dir_vxattrs
)
285 return ceph_dir_vxattrs_name_size
;
286 if (vxattrs
== ceph_file_vxattrs
)
287 return ceph_file_vxattrs_name_size
;
293 * Compute the aggregate size (including terminating '\0') of all
294 * virtual extended attribute names in the given vxattr table.
296 static size_t __init
vxattrs_name_size(struct ceph_vxattr
*vxattrs
)
298 struct ceph_vxattr
*vxattr
;
301 for (vxattr
= vxattrs
; vxattr
->name
; vxattr
++)
303 size
+= vxattr
->name_size
;
308 /* Routines called at initialization and exit time */
310 void __init
ceph_xattr_init(void)
312 ceph_dir_vxattrs_name_size
= vxattrs_name_size(ceph_dir_vxattrs
);
313 ceph_file_vxattrs_name_size
= vxattrs_name_size(ceph_file_vxattrs
);
316 void ceph_xattr_exit(void)
318 ceph_dir_vxattrs_name_size
= 0;
319 ceph_file_vxattrs_name_size
= 0;
322 static struct ceph_vxattr
*ceph_match_vxattr(struct inode
*inode
,
325 struct ceph_vxattr
*vxattr
= ceph_inode_vxattrs(inode
);
328 while (vxattr
->name
) {
329 if (!strcmp(vxattr
->name
, name
))
338 static int __set_xattr(struct ceph_inode_info
*ci
,
339 const char *name
, int name_len
,
340 const char *val
, int val_len
,
341 int flags
, int update_xattr
,
342 struct ceph_inode_xattr
**newxattr
)
345 struct rb_node
*parent
= NULL
;
346 struct ceph_inode_xattr
*xattr
= NULL
;
350 p
= &ci
->i_xattrs
.index
.rb_node
;
353 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
354 c
= strncmp(name
, xattr
->name
, min(name_len
, xattr
->name_len
));
360 if (name_len
== xattr
->name_len
)
362 else if (name_len
< xattr
->name_len
)
372 if (xattr
&& (flags
& XATTR_CREATE
))
374 else if (!xattr
&& (flags
& XATTR_REPLACE
))
381 if (update_xattr
< 0) {
383 __remove_xattr(ci
, xattr
);
393 xattr
->name_len
= name_len
;
394 xattr
->should_free_name
= update_xattr
;
396 ci
->i_xattrs
.count
++;
397 dout("__set_xattr count=%d\n", ci
->i_xattrs
.count
);
401 if (xattr
->should_free_val
)
402 kfree((void *)xattr
->val
);
408 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
409 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
411 ci
->i_xattrs
.names_size
+= name_len
;
412 ci
->i_xattrs
.vals_size
+= val_len
;
418 xattr
->val_len
= val_len
;
419 xattr
->dirty
= update_xattr
;
420 xattr
->should_free_val
= (val
&& update_xattr
);
423 rb_link_node(&xattr
->node
, parent
, p
);
424 rb_insert_color(&xattr
->node
, &ci
->i_xattrs
.index
);
425 dout("__set_xattr_val p=%p\n", p
);
428 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
429 ceph_vinop(&ci
->vfs_inode
), xattr
, name
, val_len
, val
);
434 static struct ceph_inode_xattr
*__get_xattr(struct ceph_inode_info
*ci
,
438 struct rb_node
*parent
= NULL
;
439 struct ceph_inode_xattr
*xattr
= NULL
;
440 int name_len
= strlen(name
);
443 p
= &ci
->i_xattrs
.index
.rb_node
;
446 xattr
= rb_entry(parent
, struct ceph_inode_xattr
, node
);
447 c
= strncmp(name
, xattr
->name
, xattr
->name_len
);
448 if (c
== 0 && name_len
> xattr
->name_len
)
455 dout("__get_xattr %s: found %.*s\n", name
,
456 xattr
->val_len
, xattr
->val
);
461 dout("__get_xattr %s: not found\n", name
);
466 static void __free_xattr(struct ceph_inode_xattr
*xattr
)
470 if (xattr
->should_free_name
)
471 kfree((void *)xattr
->name
);
472 if (xattr
->should_free_val
)
473 kfree((void *)xattr
->val
);
478 static int __remove_xattr(struct ceph_inode_info
*ci
,
479 struct ceph_inode_xattr
*xattr
)
484 rb_erase(&xattr
->node
, &ci
->i_xattrs
.index
);
486 if (xattr
->should_free_name
)
487 kfree((void *)xattr
->name
);
488 if (xattr
->should_free_val
)
489 kfree((void *)xattr
->val
);
491 ci
->i_xattrs
.names_size
-= xattr
->name_len
;
492 ci
->i_xattrs
.vals_size
-= xattr
->val_len
;
493 ci
->i_xattrs
.count
--;
499 static int __remove_xattr_by_name(struct ceph_inode_info
*ci
,
503 struct ceph_inode_xattr
*xattr
;
506 p
= &ci
->i_xattrs
.index
.rb_node
;
507 xattr
= __get_xattr(ci
, name
);
508 err
= __remove_xattr(ci
, xattr
);
512 static char *__copy_xattr_names(struct ceph_inode_info
*ci
,
516 struct ceph_inode_xattr
*xattr
= NULL
;
518 p
= rb_first(&ci
->i_xattrs
.index
);
519 dout("__copy_xattr_names count=%d\n", ci
->i_xattrs
.count
);
522 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
523 memcpy(dest
, xattr
->name
, xattr
->name_len
);
524 dest
[xattr
->name_len
] = '\0';
526 dout("dest=%s %p (%s) (%d/%d)\n", dest
, xattr
, xattr
->name
,
527 xattr
->name_len
, ci
->i_xattrs
.names_size
);
529 dest
+= xattr
->name_len
+ 1;
536 void __ceph_destroy_xattrs(struct ceph_inode_info
*ci
)
538 struct rb_node
*p
, *tmp
;
539 struct ceph_inode_xattr
*xattr
= NULL
;
541 p
= rb_first(&ci
->i_xattrs
.index
);
543 dout("__ceph_destroy_xattrs p=%p\n", p
);
546 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
549 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p
,
550 xattr
->name_len
, xattr
->name
);
551 rb_erase(tmp
, &ci
->i_xattrs
.index
);
556 ci
->i_xattrs
.names_size
= 0;
557 ci
->i_xattrs
.vals_size
= 0;
558 ci
->i_xattrs
.index_version
= 0;
559 ci
->i_xattrs
.count
= 0;
560 ci
->i_xattrs
.index
= RB_ROOT
;
563 static int __build_xattrs(struct inode
*inode
)
564 __releases(ci
->i_ceph_lock
)
565 __acquires(ci
->i_ceph_lock
)
571 const char *name
, *val
;
572 struct ceph_inode_info
*ci
= ceph_inode(inode
);
574 struct ceph_inode_xattr
**xattrs
= NULL
;
578 dout("__build_xattrs() len=%d\n",
579 ci
->i_xattrs
.blob
? (int)ci
->i_xattrs
.blob
->vec
.iov_len
: 0);
581 if (ci
->i_xattrs
.index_version
>= ci
->i_xattrs
.version
)
582 return 0; /* already built */
584 __ceph_destroy_xattrs(ci
);
587 /* updated internal xattr rb tree */
588 if (ci
->i_xattrs
.blob
&& ci
->i_xattrs
.blob
->vec
.iov_len
> 4) {
589 p
= ci
->i_xattrs
.blob
->vec
.iov_base
;
590 end
= p
+ ci
->i_xattrs
.blob
->vec
.iov_len
;
591 ceph_decode_32_safe(&p
, end
, numattr
, bad
);
592 xattr_version
= ci
->i_xattrs
.version
;
593 spin_unlock(&ci
->i_ceph_lock
);
595 xattrs
= kcalloc(numattr
, sizeof(struct ceph_inode_xattr
*),
601 for (i
= 0; i
< numattr
; i
++) {
602 xattrs
[i
] = kmalloc(sizeof(struct ceph_inode_xattr
),
608 spin_lock(&ci
->i_ceph_lock
);
609 if (ci
->i_xattrs
.version
!= xattr_version
) {
610 /* lost a race, retry */
611 for (i
= 0; i
< numattr
; i
++)
619 ceph_decode_32_safe(&p
, end
, len
, bad
);
623 ceph_decode_32_safe(&p
, end
, len
, bad
);
627 err
= __set_xattr(ci
, name
, namelen
, val
, len
,
628 0, 0, &xattrs
[numattr
]);
635 ci
->i_xattrs
.index_version
= ci
->i_xattrs
.version
;
636 ci
->i_xattrs
.dirty
= false;
640 spin_lock(&ci
->i_ceph_lock
);
643 for (i
= 0; i
< numattr
; i
++)
647 ci
->i_xattrs
.names_size
= 0;
651 static int __get_required_blob_size(struct ceph_inode_info
*ci
, int name_size
,
655 * 4 bytes for the length, and additional 4 bytes per each xattr name,
656 * 4 bytes per each value
658 int size
= 4 + ci
->i_xattrs
.count
*(4 + 4) +
659 ci
->i_xattrs
.names_size
+
660 ci
->i_xattrs
.vals_size
;
661 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
662 ci
->i_xattrs
.count
, ci
->i_xattrs
.names_size
,
663 ci
->i_xattrs
.vals_size
);
666 size
+= 4 + 4 + name_size
+ val_size
;
672 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
673 * and swap into place.
675 void __ceph_build_xattrs_blob(struct ceph_inode_info
*ci
)
678 struct ceph_inode_xattr
*xattr
= NULL
;
681 dout("__build_xattrs_blob %p\n", &ci
->vfs_inode
);
682 if (ci
->i_xattrs
.dirty
) {
683 int need
= __get_required_blob_size(ci
, 0, 0);
685 BUG_ON(need
> ci
->i_xattrs
.prealloc_blob
->alloc_len
);
687 p
= rb_first(&ci
->i_xattrs
.index
);
688 dest
= ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
690 ceph_encode_32(&dest
, ci
->i_xattrs
.count
);
692 xattr
= rb_entry(p
, struct ceph_inode_xattr
, node
);
694 ceph_encode_32(&dest
, xattr
->name_len
);
695 memcpy(dest
, xattr
->name
, xattr
->name_len
);
696 dest
+= xattr
->name_len
;
697 ceph_encode_32(&dest
, xattr
->val_len
);
698 memcpy(dest
, xattr
->val
, xattr
->val_len
);
699 dest
+= xattr
->val_len
;
704 /* adjust buffer len; it may be larger than we need */
705 ci
->i_xattrs
.prealloc_blob
->vec
.iov_len
=
706 dest
- ci
->i_xattrs
.prealloc_blob
->vec
.iov_base
;
708 if (ci
->i_xattrs
.blob
)
709 ceph_buffer_put(ci
->i_xattrs
.blob
);
710 ci
->i_xattrs
.blob
= ci
->i_xattrs
.prealloc_blob
;
711 ci
->i_xattrs
.prealloc_blob
= NULL
;
712 ci
->i_xattrs
.dirty
= false;
713 ci
->i_xattrs
.version
++;
717 static inline int __get_request_mask(struct inode
*in
) {
718 struct ceph_mds_request
*req
= current
->journal_info
;
720 if (req
&& req
->r_target_inode
== in
) {
721 if (req
->r_op
== CEPH_MDS_OP_LOOKUP
||
722 req
->r_op
== CEPH_MDS_OP_LOOKUPINO
||
723 req
->r_op
== CEPH_MDS_OP_LOOKUPPARENT
||
724 req
->r_op
== CEPH_MDS_OP_GETATTR
) {
725 mask
= le32_to_cpu(req
->r_args
.getattr
.mask
);
726 } else if (req
->r_op
== CEPH_MDS_OP_OPEN
||
727 req
->r_op
== CEPH_MDS_OP_CREATE
) {
728 mask
= le32_to_cpu(req
->r_args
.open
.mask
);
734 ssize_t
__ceph_getxattr(struct inode
*inode
, const char *name
, void *value
,
737 struct ceph_inode_info
*ci
= ceph_inode(inode
);
738 struct ceph_inode_xattr
*xattr
;
739 struct ceph_vxattr
*vxattr
= NULL
;
743 if (!ceph_is_valid_xattr(name
))
746 /* let's see if a virtual xattr was requested */
747 vxattr
= ceph_match_vxattr(inode
, name
);
750 if (!(vxattr
->exists_cb
&& !vxattr
->exists_cb(ci
)))
751 err
= vxattr
->getxattr_cb(ci
, value
, size
);
755 req_mask
= __get_request_mask(inode
);
757 spin_lock(&ci
->i_ceph_lock
);
758 dout("getxattr %p ver=%lld index_ver=%lld\n", inode
,
759 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
761 if (ci
->i_xattrs
.version
== 0 ||
762 !((req_mask
& CEPH_CAP_XATTR_SHARED
) ||
763 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1))) {
764 spin_unlock(&ci
->i_ceph_lock
);
766 /* security module gets xattr while filling trace */
767 if (current
->journal_info
!= NULL
) {
768 pr_warn_ratelimited("sync getxattr %p "
769 "during filling trace\n", inode
);
773 /* get xattrs from mds (if we don't already have them) */
774 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
777 spin_lock(&ci
->i_ceph_lock
);
780 err
= __build_xattrs(inode
);
784 err
= -ENODATA
; /* == ENOATTR */
785 xattr
= __get_xattr(ci
, name
);
790 if (size
&& size
< xattr
->val_len
)
793 err
= xattr
->val_len
;
797 memcpy(value
, xattr
->val
, xattr
->val_len
);
799 if (current
->journal_info
!= NULL
&&
800 !strncmp(name
, XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
))
801 ci
->i_ceph_flags
|= CEPH_I_SEC_INITED
;
803 spin_unlock(&ci
->i_ceph_lock
);
807 ssize_t
ceph_getxattr(struct dentry
*dentry
, struct inode
*inode
,
808 const char *name
, void *value
, size_t size
)
810 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
811 return generic_getxattr(dentry
, inode
, name
, value
, size
);
813 return __ceph_getxattr(inode
, name
, value
, size
);
816 ssize_t
ceph_listxattr(struct dentry
*dentry
, char *names
, size_t size
)
818 struct inode
*inode
= d_inode(dentry
);
819 struct ceph_inode_info
*ci
= ceph_inode(inode
);
820 struct ceph_vxattr
*vxattrs
= ceph_inode_vxattrs(inode
);
827 spin_lock(&ci
->i_ceph_lock
);
828 dout("listxattr %p ver=%lld index_ver=%lld\n", inode
,
829 ci
->i_xattrs
.version
, ci
->i_xattrs
.index_version
);
831 if (ci
->i_xattrs
.version
== 0 ||
832 !__ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 1)) {
833 spin_unlock(&ci
->i_ceph_lock
);
834 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_XATTR
, true);
837 spin_lock(&ci
->i_ceph_lock
);
840 err
= __build_xattrs(inode
);
844 * Start with virtual dir xattr names (if any) (including
845 * terminating '\0' characters for each).
847 vir_namelen
= ceph_vxattrs_name_size(vxattrs
);
849 /* adding 1 byte per each variable due to the null termination */
850 namelen
= ci
->i_xattrs
.names_size
+ ci
->i_xattrs
.count
;
852 if (size
&& vir_namelen
+ namelen
> size
)
855 err
= namelen
+ vir_namelen
;
859 names
= __copy_xattr_names(ci
, names
);
861 /* virtual xattr names, too */
864 for (i
= 0; vxattrs
[i
].name
; i
++) {
865 if (!vxattrs
[i
].hidden
&&
866 !(vxattrs
[i
].exists_cb
&&
867 !vxattrs
[i
].exists_cb(ci
))) {
868 len
= sprintf(names
, "%s", vxattrs
[i
].name
);
876 spin_unlock(&ci
->i_ceph_lock
);
880 static int ceph_sync_setxattr(struct dentry
*dentry
, const char *name
,
881 const char *value
, size_t size
, int flags
)
883 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
884 struct inode
*inode
= d_inode(dentry
);
885 struct ceph_inode_info
*ci
= ceph_inode(inode
);
886 struct ceph_mds_request
*req
;
887 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
888 struct ceph_pagelist
*pagelist
= NULL
;
892 /* copy value into pagelist */
893 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
897 ceph_pagelist_init(pagelist
);
898 err
= ceph_pagelist_append(pagelist
, value
, size
);
902 flags
|= CEPH_XATTR_REMOVE
;
905 dout("setxattr value=%.*s\n", (int)size
, value
);
908 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SETXATTR
,
915 req
->r_args
.setxattr
.flags
= cpu_to_le32(flags
);
916 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
918 ceph_mdsc_put_request(req
);
923 req
->r_pagelist
= pagelist
;
926 req
->r_inode
= inode
;
929 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
931 dout("xattr.ver (before): %lld\n", ci
->i_xattrs
.version
);
932 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
933 ceph_mdsc_put_request(req
);
934 dout("xattr.ver (after): %lld\n", ci
->i_xattrs
.version
);
938 ceph_pagelist_release(pagelist
);
942 int __ceph_setxattr(struct dentry
*dentry
, const char *name
,
943 const void *value
, size_t size
, int flags
)
945 struct inode
*inode
= d_inode(dentry
);
946 struct ceph_vxattr
*vxattr
;
947 struct ceph_inode_info
*ci
= ceph_inode(inode
);
948 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(dentry
->d_sb
)->mdsc
;
949 struct ceph_cap_flush
*prealloc_cf
= NULL
;
953 int name_len
= strlen(name
);
955 char *newname
= NULL
;
957 struct ceph_inode_xattr
*xattr
= NULL
;
958 int required_blob_size
;
959 bool lock_snap_rwsem
= false;
961 if (!ceph_is_valid_xattr(name
))
964 vxattr
= ceph_match_vxattr(inode
, name
);
965 if (vxattr
&& vxattr
->readonly
)
968 /* pass any unhandled ceph.* xattrs through to the MDS */
969 if (!strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
))
970 goto do_sync_unlocked
;
972 /* preallocate memory for xattr name, value, index node */
974 newname
= kmemdup(name
, name_len
+ 1, GFP_NOFS
);
979 newval
= kmemdup(value
, val_len
, GFP_NOFS
);
984 xattr
= kmalloc(sizeof(struct ceph_inode_xattr
), GFP_NOFS
);
988 prealloc_cf
= ceph_alloc_cap_flush();
992 spin_lock(&ci
->i_ceph_lock
);
994 issued
= __ceph_caps_issued(ci
, NULL
);
995 if (ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
))
998 if (!lock_snap_rwsem
&& !ci
->i_head_snapc
) {
999 lock_snap_rwsem
= true;
1000 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
1001 spin_unlock(&ci
->i_ceph_lock
);
1002 down_read(&mdsc
->snap_rwsem
);
1003 spin_lock(&ci
->i_ceph_lock
);
1008 dout("setxattr %p issued %s\n", inode
, ceph_cap_string(issued
));
1009 __build_xattrs(inode
);
1011 required_blob_size
= __get_required_blob_size(ci
, name_len
, val_len
);
1013 if (!ci
->i_xattrs
.prealloc_blob
||
1014 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
1015 struct ceph_buffer
*blob
;
1017 spin_unlock(&ci
->i_ceph_lock
);
1018 dout(" preaallocating new blob size=%d\n", required_blob_size
);
1019 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
1021 goto do_sync_unlocked
;
1022 spin_lock(&ci
->i_ceph_lock
);
1023 if (ci
->i_xattrs
.prealloc_blob
)
1024 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
1025 ci
->i_xattrs
.prealloc_blob
= blob
;
1029 err
= __set_xattr(ci
, newname
, name_len
, newval
, val_len
,
1030 flags
, value
? 1 : -1, &xattr
);
1033 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
,
1035 ci
->i_xattrs
.dirty
= true;
1036 inode
->i_ctime
= current_fs_time(inode
->i_sb
);
1039 spin_unlock(&ci
->i_ceph_lock
);
1040 if (lock_snap_rwsem
)
1041 up_read(&mdsc
->snap_rwsem
);
1043 __mark_inode_dirty(inode
, dirty
);
1044 ceph_free_cap_flush(prealloc_cf
);
1048 spin_unlock(&ci
->i_ceph_lock
);
1050 if (lock_snap_rwsem
)
1051 up_read(&mdsc
->snap_rwsem
);
1053 /* security module set xattr while filling trace */
1054 if (current
->journal_info
!= NULL
) {
1055 pr_warn_ratelimited("sync setxattr %p "
1056 "during filling trace\n", inode
);
1059 err
= ceph_sync_setxattr(dentry
, name
, value
, size
, flags
);
1062 ceph_free_cap_flush(prealloc_cf
);
1069 int ceph_setxattr(struct dentry
*dentry
, const char *name
,
1070 const void *value
, size_t size
, int flags
)
1072 if (ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
1075 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1076 return generic_setxattr(dentry
, name
, value
, size
, flags
);
1079 value
= ""; /* empty EA, do not remove */
1081 return __ceph_setxattr(dentry
, name
, value
, size
, flags
);
1084 static int ceph_send_removexattr(struct dentry
*dentry
, const char *name
)
1086 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
1087 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1088 struct inode
*inode
= d_inode(dentry
);
1089 struct ceph_mds_request
*req
;
1092 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RMXATTR
,
1095 return PTR_ERR(req
);
1096 req
->r_path2
= kstrdup(name
, GFP_NOFS
);
1100 req
->r_inode
= inode
;
1102 req
->r_num_caps
= 1;
1103 req
->r_inode_drop
= CEPH_CAP_XATTR_SHARED
;
1104 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
1105 ceph_mdsc_put_request(req
);
1109 int __ceph_removexattr(struct dentry
*dentry
, const char *name
)
1111 struct inode
*inode
= d_inode(dentry
);
1112 struct ceph_vxattr
*vxattr
;
1113 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1114 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(dentry
->d_sb
)->mdsc
;
1115 struct ceph_cap_flush
*prealloc_cf
= NULL
;
1118 int required_blob_size
;
1120 bool lock_snap_rwsem
= false;
1122 if (!ceph_is_valid_xattr(name
))
1125 vxattr
= ceph_match_vxattr(inode
, name
);
1126 if (vxattr
&& vxattr
->readonly
)
1129 /* pass any unhandled ceph.* xattrs through to the MDS */
1130 if (!strncmp(name
, XATTR_CEPH_PREFIX
, XATTR_CEPH_PREFIX_LEN
))
1131 goto do_sync_unlocked
;
1133 prealloc_cf
= ceph_alloc_cap_flush();
1138 spin_lock(&ci
->i_ceph_lock
);
1140 issued
= __ceph_caps_issued(ci
, NULL
);
1141 if (ci
->i_xattrs
.version
== 0 || !(issued
& CEPH_CAP_XATTR_EXCL
))
1144 if (!lock_snap_rwsem
&& !ci
->i_head_snapc
) {
1145 lock_snap_rwsem
= true;
1146 if (!down_read_trylock(&mdsc
->snap_rwsem
)) {
1147 spin_unlock(&ci
->i_ceph_lock
);
1148 down_read(&mdsc
->snap_rwsem
);
1149 spin_lock(&ci
->i_ceph_lock
);
1154 dout("removexattr %p issued %s\n", inode
, ceph_cap_string(issued
));
1156 __build_xattrs(inode
);
1158 required_blob_size
= __get_required_blob_size(ci
, 0, 0);
1160 if (!ci
->i_xattrs
.prealloc_blob
||
1161 required_blob_size
> ci
->i_xattrs
.prealloc_blob
->alloc_len
) {
1162 struct ceph_buffer
*blob
;
1164 spin_unlock(&ci
->i_ceph_lock
);
1165 dout(" preaallocating new blob size=%d\n", required_blob_size
);
1166 blob
= ceph_buffer_new(required_blob_size
, GFP_NOFS
);
1168 goto do_sync_unlocked
;
1169 spin_lock(&ci
->i_ceph_lock
);
1170 if (ci
->i_xattrs
.prealloc_blob
)
1171 ceph_buffer_put(ci
->i_xattrs
.prealloc_blob
);
1172 ci
->i_xattrs
.prealloc_blob
= blob
;
1176 err
= __remove_xattr_by_name(ceph_inode(inode
), name
);
1178 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_XATTR_EXCL
,
1180 ci
->i_xattrs
.dirty
= true;
1181 inode
->i_ctime
= current_fs_time(inode
->i_sb
);
1182 spin_unlock(&ci
->i_ceph_lock
);
1183 if (lock_snap_rwsem
)
1184 up_read(&mdsc
->snap_rwsem
);
1186 __mark_inode_dirty(inode
, dirty
);
1187 ceph_free_cap_flush(prealloc_cf
);
1190 spin_unlock(&ci
->i_ceph_lock
);
1192 if (lock_snap_rwsem
)
1193 up_read(&mdsc
->snap_rwsem
);
1194 ceph_free_cap_flush(prealloc_cf
);
1195 err
= ceph_send_removexattr(dentry
, name
);
1199 int ceph_removexattr(struct dentry
*dentry
, const char *name
)
1201 if (ceph_snap(d_inode(dentry
)) != CEPH_NOSNAP
)
1204 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
1205 return generic_removexattr(dentry
, name
);
1207 return __ceph_removexattr(dentry
, name
);
1210 #ifdef CONFIG_SECURITY
1211 bool ceph_security_xattr_wanted(struct inode
*in
)
1213 return in
->i_security
!= NULL
;
1216 bool ceph_security_xattr_deadlock(struct inode
*in
)
1218 struct ceph_inode_info
*ci
;
1220 if (in
->i_security
== NULL
)
1222 ci
= ceph_inode(in
);
1223 spin_lock(&ci
->i_ceph_lock
);
1224 ret
= !(ci
->i_ceph_flags
& CEPH_I_SEC_INITED
) &&
1225 !(ci
->i_xattrs
.version
> 0 &&
1226 __ceph_caps_issued_mask(ci
, CEPH_CAP_XATTR_SHARED
, 0));
1227 spin_unlock(&ci
->i_ceph_lock
);