ceph: use empty snap context for uninline_data and get_pool_perm
authorYan, Zheng <zyan@redhat.com>
Fri, 1 May 2015 02:03:40 +0000 (10:03 +0800)
committerIlya Dryomov <idryomov@gmail.com>
Thu, 25 Jun 2015 08:49:28 +0000 (11:49 +0300)
Cached_context in ceph_snap_realm is directly accessed by
uninline_data() and get_pool_perm(). This is racy in theory.
both uninline_data() and get_pool_perm() do not modify existing
object, they only create new object. So we can pass the empty
snap context to them.  Unlike cached_context in ceph_snap_realm,
we do not need to protect the empty snap context.

Signed-off-by: Yan, Zheng <zyan@redhat.com>
fs/ceph/addr.c
fs/ceph/snap.c
fs/ceph/super.h

index b960277272482697a2e215bf7ce7e6e80209c3e1..ccc4325aa5c5793cc8c06e6a22c34643a6c63c0b 100644 (file)
@@ -1510,8 +1510,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 0, 1,
                                    CEPH_OSD_OP_CREATE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ci->i_snap_realm->cached_context,
-                                   0, 0, false);
+                                   ceph_empty_snapc, 0, 0, false);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                goto out;
@@ -1529,7 +1528,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
                                    ceph_vino(inode), 0, &len, 1, 3,
                                    CEPH_OSD_OP_WRITE,
                                    CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
-                                   ci->i_snap_realm->cached_context,
+                                   ceph_empty_snapc,
                                    ci->i_truncate_seq, ci->i_truncate_size,
                                    false);
        if (IS_ERR(req)) {
@@ -1653,7 +1652,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
        }
 
        rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
-                                        ci->i_snap_realm->cached_context,
+                                        ceph_empty_snapc,
                                         1, false, GFP_NOFS);
        if (!rd_req) {
                err = -ENOMEM;
@@ -1668,7 +1667,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
        rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
 
        wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
-                                        ci->i_snap_realm->cached_context,
+                                        ceph_empty_snapc,
                                         1, false, GFP_NOFS);
        if (!wr_req) {
                err = -ENOMEM;
index a97e39f09ba683349bb5f97e44f0d229b3a88936..b2a945345d2b97633d1c0ec78adb48c68fdcb21b 100644 (file)
@@ -296,7 +296,7 @@ static int cmpu64_rev(const void *a, const void *b)
 }
 
 
-static struct ceph_snap_context *empty_snapc;
+struct ceph_snap_context *ceph_empty_snapc;
 
 /*
  * build the snap context for a given realm.
@@ -338,9 +338,9 @@ static int build_snap_context(struct ceph_snap_realm *realm)
                return 0;
        }
 
-       if (num == 0 && realm->seq == empty_snapc->seq) {
-               ceph_get_snap_context(empty_snapc);
-               snapc = empty_snapc;
+       if (num == 0 && realm->seq == ceph_empty_snapc->seq) {
+               ceph_get_snap_context(ceph_empty_snapc);
+               snapc = ceph_empty_snapc;
                goto done;
        }
 
@@ -482,7 +482,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                   cap_snap.  lucky us. */
                dout("queue_cap_snap %p already pending\n", inode);
                kfree(capsnap);
-       } else if (ci->i_snap_realm->cached_context == empty_snapc) {
+       } else if (ci->i_snap_realm->cached_context == ceph_empty_snapc) {
                dout("queue_cap_snap %p empty snapc\n", inode);
                kfree(capsnap);
        } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
@@ -964,14 +964,14 @@ out:
 
 int __init ceph_snap_init(void)
 {
-       empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
-       if (!empty_snapc)
+       ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
+       if (!ceph_empty_snapc)
                return -ENOMEM;
-       empty_snapc->seq = 1;
+       ceph_empty_snapc->seq = 1;
        return 0;
 }
 
 void ceph_snap_exit(void)
 {
-       ceph_put_snap_context(empty_snapc);
+       ceph_put_snap_context(ceph_empty_snapc);
 }
index 18b917c7feb41885ec222d223c37e918fa7e84eb..b182fd7499d9b3aadb1ef2a976938ec810e7b744 100644 (file)
@@ -692,6 +692,7 @@ static inline int default_congestion_kb(void)
 
 
 /* snap.c */
+extern struct ceph_snap_context *ceph_empty_snapc;
 struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
                                               u64 ino);
 extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
This page took 0.029428 seconds and 5 git commands to generate.