Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / mdc / mdc_request.c
index 542801f04b0d8d61637394897c2c855d3c636411..5bf95f964438e1a05a7e0f426bd83589c599ddd8 100644 (file)
@@ -39,7 +39,9 @@
 # include <linux/utsname.h>
 
 #include "../include/lustre_acl.h"
+#include "../include/lustre/lustre_ioctl.h"
 #include "../include/obd_class.h"
+#include "../include/lustre_lmv.h"
 #include "../include/lustre_fid.h"
 #include "../include/lprocfs_status.h"
 #include "../include/lustre_param.h"
@@ -57,16 +59,16 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req)
        struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
        int rc;
 
-       /* mdc_enter_request() ensures that this client has no more
+       /* obd_get_request_slot() ensures that this client has no more
         * than cl_max_rpcs_in_flight RPCs simultaneously inf light
         * against an MDT.
         */
-       rc = mdc_enter_request(cli);
+       rc = obd_get_request_slot(cli);
        if (rc != 0)
                return rc;
 
        rc = ptlrpc_queue_wait(req);
-       mdc_exit_request(cli);
+       obd_put_request_slot(cli);
 
        return rc;
 }
@@ -98,7 +100,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
                goto out;
        }
 
-       *rootfid = body->fid1;
+       *rootfid = body->mbo_fid1;
        CDEBUG(D_NET,
               "root fid="DFID", last_committed=%llu\n",
               PFID(rootfid),
@@ -136,12 +138,12 @@ static int mdc_getattr_common(struct obd_export *exp,
        if (!body)
                return -EPROTO;
 
-       CDEBUG(D_NET, "mode: %o\n", body->mode);
+       CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
 
        mdc_update_max_ea_from_body(exp, body);
-       if (body->eadatasize != 0) {
+       if (body->mbo_eadatasize != 0) {
                eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
-                                                     body->eadatasize);
+                                                     body->mbo_eadatasize);
                if (!eadata)
                        return -EPROTO;
        }
@@ -230,32 +232,6 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
        return rc;
 }
 
-static int mdc_is_subdir(struct obd_export *exp,
-                        const struct lu_fid *pfid,
-                        const struct lu_fid *cfid,
-                        struct ptlrpc_request **request)
-{
-       struct ptlrpc_request  *req;
-       int                  rc;
-
-       *request = NULL;
-       req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
-                                       &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
-                                       MDS_IS_SUBDIR);
-       if (!req)
-               return -ENOMEM;
-
-       mdc_is_subdir_pack(req, pfid, cfid, 0);
-       ptlrpc_request_set_replen(req);
-
-       rc = ptlrpc_queue_wait(req);
-       if (rc && rc != -EREMOTE)
-               ptlrpc_req_finished(req);
-       else
-               *request = req;
-       return rc;
-}
-
 static int mdc_xattr_common(struct obd_export *exp,
                            const struct req_format *fmt,
                            const struct lu_fid *fid,
@@ -397,15 +373,15 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
        void               *buf;
        int                  rc;
 
-       if (!body->aclsize)
+       if (!body->mbo_aclsize)
                return 0;
 
-       buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
+       buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize);
 
        if (!buf)
                return -EPROTO;
 
-       acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
+       acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize);
        if (!acl)
                return 0;
 
@@ -443,24 +419,24 @@ static int mdc_get_lustre_md(struct obd_export *exp,
 
        md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
 
-       if (md->body->valid & OBD_MD_FLEASIZE) {
+       if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
                int lmmsize;
                struct lov_mds_md *lmm;
 
-               if (!S_ISREG(md->body->mode)) {
+               if (!S_ISREG(md->body->mbo_mode)) {
                        CDEBUG(D_INFO,
                               "OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
                        rc = -EPROTO;
                        goto out;
                }
 
-               if (md->body->eadatasize == 0) {
+               if (md->body->mbo_eadatasize == 0) {
                        CDEBUG(D_INFO,
                               "OBD_MD_FLEASIZE set, but eadatasize 0\n");
                        rc = -EPROTO;
                        goto out;
                }
-               lmmsize = md->body->eadatasize;
+               lmmsize = md->body->mbo_eadatasize;
                lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
                if (!lmm) {
                        rc = -EPROTO;
@@ -479,24 +455,24 @@ static int mdc_get_lustre_md(struct obd_export *exp,
                        goto out;
                }
 
-       } else if (md->body->valid & OBD_MD_FLDIREA) {
+       } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
                int lmvsize;
                struct lov_mds_md *lmv;
 
-               if (!S_ISDIR(md->body->mode)) {
+               if (!S_ISDIR(md->body->mbo_mode)) {
                        CDEBUG(D_INFO,
                               "OBD_MD_FLDIREA set, should be a directory, but is not\n");
                        rc = -EPROTO;
                        goto out;
                }
 
-               if (md->body->eadatasize == 0) {
+               if (md->body->mbo_eadatasize == 0) {
                        CDEBUG(D_INFO,
                               "OBD_MD_FLDIREA is set, but eadatasize 0\n");
                        return -EPROTO;
                }
-               if (md->body->valid & OBD_MD_MEA) {
-                       lmvsize = md->body->eadatasize;
+               if (md->body->mbo_valid & OBD_MD_MEA) {
+                       lmvsize = md->body->mbo_eadatasize;
                        lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
                                                           lmvsize);
                        if (!lmv) {
@@ -504,15 +480,15 @@ static int mdc_get_lustre_md(struct obd_export *exp,
                                goto out;
                        }
 
-                       rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv,
+                       rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv,
                                          lmvsize);
                        if (rc < 0)
                                goto out;
 
-                       if (rc < sizeof(*md->mea)) {
+                       if (rc < sizeof(*md->lmv)) {
                                CDEBUG(D_INFO,
-                                      "size too small: rc < sizeof(*md->mea) (%d < %d)\n",
-                                       rc, (int)sizeof(*md->mea));
+                                      "size too small: rc < sizeof(*md->lmv) (%d < %d)\n",
+                                       rc, (int)sizeof(*md->lmv));
                                rc = -EPROTO;
                                goto out;
                        }
@@ -520,12 +496,12 @@ static int mdc_get_lustre_md(struct obd_export *exp,
        }
        rc = 0;
 
-       if (md->body->valid & OBD_MD_FLACL) {
+       if (md->body->mbo_valid & OBD_MD_FLACL) {
                /* for ACL, it's possible that FLACL is set but aclsize is zero.
                 * only when aclsize != 0 there's an actual segment for ACL
                 * in reply buffer.
                 */
-               if (md->body->aclsize) {
+               if (md->body->mbo_aclsize) {
                        rc = mdc_unpack_acl(req, md);
                        if (rc)
                                goto out;
@@ -580,9 +556,9 @@ void mdc_replay_open(struct ptlrpc_request *req)
 
                file_fh = &och->och_fh;
                CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
-                      file_fh->cookie, body->handle.cookie);
+                      file_fh->cookie, body->mbo_handle.cookie);
                old = *file_fh;
-               *file_fh = body->handle;
+               *file_fh = body->mbo_handle;
        }
        close_req = mod->mod_close_req;
        if (close_req) {
@@ -597,7 +573,7 @@ void mdc_replay_open(struct ptlrpc_request *req)
                if (och)
                        LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
                DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
-               epoch->handle = body->handle;
+               epoch->handle = body->mbo_handle;
        }
 }
 
@@ -679,11 +655,11 @@ int mdc_set_open_replay_data(struct obd_export *exp,
                spin_unlock(&open_req->rq_lock);
        }
 
-       rec->cr_fid2 = body->fid1;
-       rec->cr_ioepoch = body->ioepoch;
-       rec->cr_old_handle.cookie = body->handle.cookie;
+       rec->cr_fid2 = body->mbo_fid1;
+       rec->cr_ioepoch = body->mbo_ioepoch;
+       rec->cr_old_handle.cookie = body->mbo_handle.cookie;
        open_req->rq_replay_cb = mdc_replay_open;
-       if (!fid_is_sane(&body->fid1)) {
+       if (!fid_is_sane(&body->mbo_fid1)) {
                DEBUG_REQ(D_ERROR, open_req,
                          "Saving replay request with insane fid");
                LBUG();
@@ -701,9 +677,15 @@ static void mdc_free_open(struct md_open_data *mod)
            imp_connect_disp_stripe(mod->mod_open_req->rq_import))
                committed = 1;
 
-       LASSERT(mod->mod_open_req->rq_replay == 0);
-
-       DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
+       /*
+        * No reason to asssert here if the open request has
+        * rq_replay == 1. It means that mdc_close failed, and
+        * close request wasn`t sent. It is not fatal to client.
+        * The worst thing is eviction if the client gets open lock
+        */
+       DEBUG_REQ(D_RPCTRACE, mod->mod_open_req,
+                 "free open request rq_replay = %d\n",
+                  mod->mod_open_req->rq_replay);
 
        ptlrpc_request_committed(mod->mod_open_req, committed);
        if (mod->mod_close_req)
@@ -744,7 +726,7 @@ static void mdc_close_handle_reply(struct ptlrpc_request *req,
                epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
 
                epoch->flags |= MF_SOM_AU;
-               if (repbody->valid & OBD_MD_FLGETATTRLOCK)
+               if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
                        op_data->op_flags |= MF_GETATTR_LOCK;
        }
 }
@@ -763,7 +745,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
                req_fmt = &RQF_MDS_RELEASE_CLOSE;
 
                /* allocate a FID for volatile file */
-               rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+               rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
                if (rc < 0) {
                        CERROR("%s: "DFID" failed to allocate FID: %d\n",
                               obd->obd_name, PFID(&op_data->op_fid1), rc);
@@ -773,22 +755,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
        }
 
        *request = NULL;
-       req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
-       if (!req)
-               return -ENOMEM;
-
-       rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
-       if (rc) {
-               ptlrpc_request_free(req);
-               return rc;
-       }
-
-       /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
-        * portal whose threads are not taking any DLM locks and are therefore
-        * always progressing
-        */
-       req->rq_request_portal = MDS_READPAGE_PORTAL;
-       ptlrpc_at_set_req_timeout(req);
+       if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
+               req = NULL;
+       else
+               req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
 
        /* Ensure that this close's handle is fixed up during replay. */
        if (likely(mod)) {
@@ -809,6 +779,29 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
                 CDEBUG(D_HA,
                        "couldn't find open req; expecting close error\n");
        }
+       if (!req) {
+               /*
+                * TODO: repeat close after errors
+                */
+               CWARN("%s: close of FID "DFID" failed, file reference will be dropped when this client unmounts or is evicted\n",
+                     obd->obd_name, PFID(&op_data->op_fid1));
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
+       if (rc) {
+               ptlrpc_request_free(req);
+               goto out;
+       }
+
+       /*
+        * To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
+        * portal whose threads are not taking any DLM locks and are therefore
+        * always progressing
+        */
+       req->rq_request_portal = MDS_READPAGE_PORTAL;
+       ptlrpc_at_set_req_timeout(req);
 
        mdc_close_pack(req, op_data);
 
@@ -854,6 +847,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
                }
        }
 
+out:
        if (mod) {
                if (rc != 0)
                        mod->mod_close_req = NULL;
@@ -936,16 +930,17 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
        return rc;
 }
 
-static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
-                       struct page **pages, struct ptlrpc_request **request)
+static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
+                      u64 offset, struct page **pages, int npages,
+                      struct ptlrpc_request **request)
 {
-       struct ptlrpc_request   *req;
        struct ptlrpc_bulk_desc *desc;
-       int                   i;
-       wait_queue_head_t             waitq;
-       int                   resends = 0;
-       struct l_wait_info       lwi;
-       int                   rc;
+       struct ptlrpc_request *req;
+       wait_queue_head_t waitq;
+       struct l_wait_info lwi;
+       int resends = 0;
+       int rc;
+       int i;
 
        *request = NULL;
        init_waitqueue_head(&waitq);
@@ -964,7 +959,7 @@ restart_bulk:
        req->rq_request_portal = MDS_READPAGE_PORTAL;
        ptlrpc_at_set_req_timeout(req);
 
-       desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
+       desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
                                    MDS_BULK_PORTAL);
        if (!desc) {
                ptlrpc_request_free(req);
@@ -972,12 +967,10 @@ restart_bulk:
        }
 
        /* NB req now owns desc and will free it when it gets freed */
-       for (i = 0; i < op_data->op_npages; i++)
+       for (i = 0; i < npages; i++)
                ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
 
-       mdc_readdir_pack(req, op_data->op_offset,
-                        PAGE_SIZE * op_data->op_npages,
-                        &op_data->op_fid1);
+       mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
 
        ptlrpc_request_set_replen(req);
        rc = ptlrpc_queue_wait(req);
@@ -988,11 +981,12 @@ restart_bulk:
 
                resends++;
                if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
-                       CERROR("too many resend retries, returning error\n");
+                       CERROR("%s: too many resend retries: rc = %d\n",
+                              exp->exp_obd->obd_name, -EIO);
                        return -EIO;
                }
-               lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends),
-                                      NULL, NULL, NULL);
+               lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
+                                      NULL);
                l_wait_event(waitq, 0, &lwi);
 
                goto restart_bulk;
@@ -1006,9 +1000,9 @@ restart_bulk:
        }
 
        if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
-               CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
-                      req->rq_bulk->bd_nob_transferred,
-                      PAGE_SIZE * op_data->op_npages);
+               CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
+                      exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
+                      PAGE_SIZE * npages);
                ptlrpc_req_finished(req);
                return -EPROTO;
        }
@@ -1017,6 +1011,454 @@ restart_bulk:
        return 0;
 }
 
+static void mdc_release_page(struct page *page, int remove)
+{
+       if (remove) {
+               lock_page(page);
+               if (likely(page->mapping))
+                       truncate_complete_page(page->mapping, page);
+               unlock_page(page);
+       }
+       put_page(page);
+}
+
+static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
+                                   __u64 *start, __u64 *end, int hash64)
+{
+       /*
+        * Complement of hash is used as an index so that
+        * radix_tree_gang_lookup() can be used to find a page with starting
+        * hash _smaller_ than one we are looking for.
+        */
+       unsigned long offset = hash_x_index(*hash, hash64);
+       struct page *page;
+       int found;
+
+       spin_lock_irq(&mapping->tree_lock);
+       found = radix_tree_gang_lookup(&mapping->page_tree,
+                                      (void **)&page, offset, 1);
+       if (found > 0 && !radix_tree_exceptional_entry(page)) {
+               struct lu_dirpage *dp;
+
+               get_page(page);
+               spin_unlock_irq(&mapping->tree_lock);
+               /*
+                * In contrast to find_lock_page() we are sure that directory
+                * page cannot be truncated (while DLM lock is held) and,
+                * hence, can avoid restart.
+                *
+                * In fact, page cannot be locked here at all, because
+                * mdc_read_page_remote does synchronous io.
+                */
+               wait_on_page_locked(page);
+               if (PageUptodate(page)) {
+                       dp = kmap(page);
+                       if (BITS_PER_LONG == 32 && hash64) {
+                               *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+                               *end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
+                               *hash  = *hash >> 32;
+                       } else {
+                               *start = le64_to_cpu(dp->ldp_hash_start);
+                               *end   = le64_to_cpu(dp->ldp_hash_end);
+                       }
+                       if (unlikely(*start == 1 && *hash == 0))
+                               *hash = *start;
+                       else
+                               LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
+                                        *start, *end, *hash);
+                       CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx], hash %#llx\n",
+                              offset, *start, *end, *hash);
+                       if (*hash > *end) {
+                               kunmap(page);
+                               mdc_release_page(page, 0);
+                               page = NULL;
+                       } else if (*end != *start && *hash == *end) {
+                               /*
+                                * upon hash collision, remove this page,
+                                * otherwise put page reference, and
+                                * mdc_read_page_remote() will issue RPC to
+                                * fetch the page we want.
+                                */
+                               kunmap(page);
+                               mdc_release_page(page,
+                                                le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+                               page = NULL;
+                       }
+               } else {
+                       put_page(page);
+                       page = ERR_PTR(-EIO);
+               }
+       } else {
+               spin_unlock_irq(&mapping->tree_lock);
+               page = NULL;
+       }
+       return page;
+}
+
+/*
+ * Adjust a set of pages, each page containing an array of lu_dirpages,
+ * so that each page can be used as a single logical lu_dirpage.
+ *
+ * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
+ * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
+ * struct lu_dirent.  It has size up to LU_PAGE_SIZE. The ldp_hash_end
+ * value is used as a cookie to request the next lu_dirpage in a
+ * directory listing that spans multiple pages (two in this example):
+ *   ________
+ *  |        |
+ * .|--------v-------   -----.
+ * |s|e|f|p|ent|ent| ... |ent|
+ * '--|--------------   -----'   Each PAGE contains a single
+ *    '------.                   lu_dirpage.
+ * .---------v-------   -----.
+ * |s|e|f|p|ent| 0 | ... | 0 |
+ * '-----------------   -----'
+ *
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
+ * larger than LU_PAGE_SIZE, a single host page may contain multiple
+ * lu_dirpages. After reading the lu_dirpages from the MDS, the
+ * ldp_hash_end of the first lu_dirpage refers to the one immediately
+ * after it in the same PAGE (arrows simplified for brevity, but
+ * in general e0==s1, e1==s2, etc.):
+ *
+ * .--------------------   -----.
+ * |s0|e0|f0|p|ent|ent| ... |ent|
+ * |---v----------------   -----|
+ * |s1|e1|f1|p|ent|ent| ... |ent|
+ * |---v----------------   -----|  Here, each PAGE contains
+ *             ...                 multiple lu_dirpages.
+ * |---v----------------   -----|
+ * |s'|e'|f'|p|ent|ent| ... |ent|
+ * '---|----------------   -----'
+ *     v
+ * .----------------------------.
+ * |        next PAGE           |
+ *
+ * This structure is transformed into a single logical lu_dirpage as follows:
+ *
+ * - Replace e0 with e' so the request for the next lu_dirpage gets the page
+ *   labeled 'next PAGE'.
+ *
+ * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
+ *   a hash collision with the next page exists.
+ *
+ * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
+ *   to the first entry of the next lu_dirpage.
+ */
+#if PAGE_SIZE > LU_PAGE_SIZE
+static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
+{
+       int i;
+
+       for (i = 0; i < cfs_pgs; i++) {
+               struct lu_dirpage *dp = kmap(pages[i]);
+               __u64 hash_end = le64_to_cpu(dp->ldp_hash_end);
+               __u32 flags = le32_to_cpu(dp->ldp_flags);
+               struct lu_dirpage *first = dp;
+               struct lu_dirent *end_dirent = NULL;
+               struct lu_dirent *ent;
+
+               while (--lu_pgs > 0) {
+                       ent = lu_dirent_start(dp);
+                       for (end_dirent = ent; ent;
+                            end_dirent = ent, ent = lu_dirent_next(ent));
+
+                       /* Advance dp to next lu_dirpage. */
+                       dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
+
+                       /* Check if we've reached the end of the CFS_PAGE. */
+                       if (!((unsigned long)dp & ~PAGE_MASK))
+                               break;
+
+                       /* Save the hash and flags of this lu_dirpage. */
+                       hash_end = le64_to_cpu(dp->ldp_hash_end);
+                       flags = le32_to_cpu(dp->ldp_flags);
+
+                       /* Check if lu_dirpage contains no entries. */
+                       if (!end_dirent)
+                               break;
+
+                       /*
+                        * Enlarge the end entry lde_reclen from 0 to
+                        * first entry of next lu_dirpage.
+                        */
+                       LASSERT(!le16_to_cpu(end_dirent->lde_reclen));
+                       end_dirent->lde_reclen =
+                               cpu_to_le16((char *)(dp->ldp_entries) -
+                                           (char *)end_dirent);
+               }
+
+               first->ldp_hash_end = hash_end;
+               first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
+               first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
+
+               kunmap(pages[i]);
+       }
+       LASSERTF(lu_pgs == 0, "left = %d", lu_pgs);
+}
+#else
+#define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
+#endif  /* PAGE_SIZE > LU_PAGE_SIZE */
+
+/* parameters for readdir page */
+struct readpage_param {
+       struct md_op_data       *rp_mod;
+       __u64                   rp_off;
+       int                     rp_hash64;
+       struct obd_export       *rp_exp;
+       struct md_callback      *rp_cb;
+};
+
+/**
+ * Read pages from server.
+ *
+ * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
+ * a header lu_dirpage which describes the start/end hash, and whether this
+ * page is empty (contains no dir entry) or hash collide with next page.
+ * After client receives reply, several pages will be integrated into dir page
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
+ * lu_dirpage for this integrated page will be adjusted.
+ **/
+static int mdc_read_page_remote(void *data, struct page *page0)
+{
+       struct readpage_param *rp = data;
+       struct page **page_pool;
+       struct page *page;
+       struct lu_dirpage *dp;
+       int rd_pgs = 0; /* number of pages read actually */
+       int npages;
+       struct md_op_data *op_data = rp->rp_mod;
+       struct ptlrpc_request *req;
+       int max_pages = op_data->op_max_pages;
+       struct inode *inode;
+       struct lu_fid *fid;
+       int i;
+       int rc;
+
+       LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
+       inode = op_data->op_data;
+       fid = &op_data->op_fid1;
+       LASSERT(inode);
+
+       page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
+       if (page_pool) {
+               page_pool[0] = page0;
+       } else {
+               page_pool = &page0;
+               max_pages = 1;
+       }
+
+       for (npages = 1; npages < max_pages; npages++) {
+               page = page_cache_alloc_cold(inode->i_mapping);
+               if (!page)
+                       break;
+               page_pool[npages] = page;
+       }
+
+       rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
+       if (!rc) {
+               int lu_pgs = req->rq_bulk->bd_nob_transferred;
+
+               rd_pgs = (req->rq_bulk->bd_nob_transferred +
+                         PAGE_SIZE - 1) >> PAGE_SHIFT;
+               lu_pgs >>= LU_PAGE_SHIFT;
+               LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
+
+               CDEBUG(D_INODE, "read %d(%d)/%d pages\n", rd_pgs, lu_pgs,
+                      op_data->op_npages);
+
+               mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
+
+               SetPageUptodate(page0);
+       }
+
+       unlock_page(page0);
+       ptlrpc_req_finished(req);
+       CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
+       for (i = 1; i < npages; i++) {
+               unsigned long offset;
+               __u64 hash;
+               int ret;
+
+               page = page_pool[i];
+
+               if (rc < 0 || i >= rd_pgs) {
+                       put_page(page);
+                       continue;
+               }
+
+               SetPageUptodate(page);
+
+               dp = kmap(page);
+               hash = le64_to_cpu(dp->ldp_hash_start);
+               kunmap(page);
+
+               offset = hash_x_index(hash, rp->rp_hash64);
+
+               prefetchw(&page->flags);
+               ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
+                                           GFP_KERNEL);
+               if (!ret)
+                       unlock_page(page);
+               else
+                       CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: rc = %d\n",
+                              offset, ret);
+               put_page(page);
+       }
+
+       if (page_pool != &page0)
+               kfree(page_pool);
+
+       return rc;
+}
+
+/**
+ * Read dir page from cache first, if it can not find it, read it from
+ * server and add into the cache.
+ *
+ * \param[in] exp      MDC export
+ * \param[in] op_data  client MD stack parameters, transferring parameters
+ *                     between different layers on client MD stack.
+ * \param[in] cb_op    callback required for ldlm lock enqueue during
+ *                     read page
+ * \param[in] hash_offset the hash offset of the page to be read
+ * \param[in] ppage    the page to be read
+ *
+ * retval              = 0 get the page successfully
+ *                     errno(<0) get the page failed
+ */
+static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
+                        struct md_callback *cb_op, __u64 hash_offset,
+                        struct page **ppage)
+{
+       struct lookup_intent it = { .it_op = IT_READDIR };
+       struct page *page;
+       struct inode *dir = op_data->op_data;
+       struct address_space *mapping;
+       struct lu_dirpage *dp;
+       __u64 start = 0;
+       __u64 end = 0;
+       struct lustre_handle lockh;
+       struct ptlrpc_request *enq_req = NULL;
+       struct readpage_param rp_param;
+       int rc;
+
+       *ppage = NULL;
+
+       LASSERT(dir);
+       mapping = dir->i_mapping;
+
+       rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
+                            cb_op->md_blocking_ast, 0);
+       if (enq_req)
+               ptlrpc_req_finished(enq_req);
+
+       if (rc < 0) {
+               CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
+                      exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
+               return rc;
+       }
+
+       rc = 0;
+       lockh.cookie = it.it_lock_handle;
+       mdc_set_lock_data(exp, &lockh, dir, NULL);
+
+       rp_param.rp_off = hash_offset;
+       rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
+       page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
+                              rp_param.rp_hash64);
+       if (IS_ERR(page)) {
+               CERROR("%s: dir page locate: "DFID" at %llu: rc %ld\n",
+                      exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+                      rp_param.rp_off, PTR_ERR(page));
+               rc = PTR_ERR(page);
+               goto out_unlock;
+       } else if (page) {
+               /*
+                * XXX nikita: not entirely correct handling of a corner case:
+                * suppose hash chain of entries with hash value HASH crosses
+                * border between pages P0 and P1. First both P0 and P1 are
+                * cached, seekdir() is called for some entry from the P0 part
+                * of the chain. Later P0 goes out of cache. telldir(HASH)
+                * happens and finds P1, as it starts with matching hash
+                * value. Remaining entries from P0 part of the chain are
+                * skipped. (Is that really a bug?)
+                *
+                * Possible solutions: 0. don't cache P1 is such case, handle
+                * it as an "overflow" page. 1. invalidate all pages at
+                * once. 2. use HASH|1 as an index for P1.
+                */
+               goto hash_collision;
+       }
+
+       rp_param.rp_exp = exp;
+       rp_param.rp_mod = op_data;
+       page = read_cache_page(mapping,
+                              hash_x_index(rp_param.rp_off,
+                                           rp_param.rp_hash64),
+                              mdc_read_page_remote, &rp_param);
+       if (IS_ERR(page)) {
+               CERROR("%s: read cache page: "DFID" at %llu: rc %ld\n",
+                      exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+                      rp_param.rp_off, PTR_ERR(page));
+               rc = PTR_ERR(page);
+               goto out_unlock;
+       }
+
+       wait_on_page_locked(page);
+       (void)kmap(page);
+       if (!PageUptodate(page)) {
+               CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
+                      exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+                      rp_param.rp_off, -5);
+               goto fail;
+       }
+       if (!PageChecked(page))
+               SetPageChecked(page);
+       if (PageError(page)) {
+               CERROR("%s: page error: "DFID" at %llu: rc %d\n",
+                      exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
+                      rp_param.rp_off, -5);
+               goto fail;
+       }
+
+hash_collision:
+       dp = page_address(page);
+       if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
+               start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+               end = le64_to_cpu(dp->ldp_hash_end) >> 32;
+               rp_param.rp_off = hash_offset >> 32;
+       } else {
+               start = le64_to_cpu(dp->ldp_hash_start);
+               end = le64_to_cpu(dp->ldp_hash_end);
+               rp_param.rp_off = hash_offset;
+       }
+       if (end == start) {
+               LASSERT(start == rp_param.rp_off);
+               CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
+#if BITS_PER_LONG == 32
+               CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
+                     le64_to_cpu(dp->ldp_hash_start),
+                     le64_to_cpu(dp->ldp_hash_end), hash_offset);
+#endif
+               /*
+                * Fetch whole overflow chain...
+                *
+                * XXX not yet.
+                */
+               goto fail;
+       }
+       *ppage = page;
+out_unlock:
+       ldlm_lock_decref(&lockh, it.it_lock_mode);
+       return rc;
+fail:
+       kunmap(page);
+       mdc_release_page(page, 1);
+       rc = -EIO;
+       goto out_unlock;
+}
+
 static int mdc_statfs(const struct lu_env *env,
                      struct obd_export *exp, struct obd_statfs *osfs,
                      __u64 max_age, __u32 flags)
@@ -1669,9 +2111,11 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
         * with the request RPC to avoid extra RPC round trips
         */
        count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
-                                       LCK_CR, MDS_INODELOCK_LAYOUT);
+                                       LCK_CR, MDS_INODELOCK_LAYOUT |
+                                       MDS_INODELOCK_XATTR);
        count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
-                                        LCK_CR, MDS_INODELOCK_LAYOUT);
+                                        LCK_CR, MDS_INODELOCK_LAYOUT |
+                                        MDS_INODELOCK_XATTR);
 
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_SWAP_LAYOUTS);
@@ -2199,13 +2643,13 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
        return rc;
 }
 
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
-                 struct md_op_data *op_data)
+int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+                 struct lu_fid *fid, struct md_op_data *op_data)
 {
        struct client_obd *cli = &exp->exp_obd->u.cli;
        struct lu_client_seq *seq = cli->cl_seq;
 
-       return seq_client_alloc_fid(NULL, seq, fid);
+       return seq_client_alloc_fid(env, seq, fid);
 }
 
 static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
@@ -2430,7 +2874,6 @@ static struct obd_ops mdc_obd_ops = {
 static struct md_ops mdc_md_ops = {
        .getstatus              = mdc_getstatus,
        .null_inode             = mdc_null_inode,
-       .find_cbdata            = mdc_find_cbdata,
        .close                  = mdc_close,
        .create                 = mdc_create,
        .done_writing           = mdc_done_writing,
@@ -2439,13 +2882,12 @@ static struct md_ops mdc_md_ops = {
        .getattr_name           = mdc_getattr_name,
        .intent_lock            = mdc_intent_lock,
        .link                   = mdc_link,
-       .is_subdir              = mdc_is_subdir,
        .rename                 = mdc_rename,
        .setattr                = mdc_setattr,
        .setxattr               = mdc_setxattr,
        .getxattr               = mdc_getxattr,
        .sync                   = mdc_sync,
-       .readpage               = mdc_readpage,
+       .read_page              = mdc_read_page,
        .unlink                 = mdc_unlink,
        .cancel_unused          = mdc_cancel_unused,
        .init_ea_size           = mdc_init_ea_size,
This page took 0.03665 seconds and 5 git commands to generate.