Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / osc / osc_page.c
index 355f496a2093a5b957c12147ca2bd7695ec1dd4b..c8889eabc402f17adb647ee08e07b59f018d78ba 100644 (file)
@@ -323,32 +323,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
        return result;
 }
 
-int osc_over_unstable_soft_limit(struct client_obd *cli)
-{
-       long obd_upages, obd_dpages, osc_upages;
-
-       /* Can't check cli->cl_unstable_count, therefore, no soft limit */
-       if (!cli)
-               return 0;
-
-       obd_upages = atomic_read(&obd_unstable_pages);
-       obd_dpages = atomic_read(&obd_dirty_pages);
-
-       osc_upages = atomic_read(&cli->cl_unstable_count);
-
-       /*
-        * obd_max_dirty_pages is the max number of (dirty + unstable)
-        * pages allowed at any given time. To simulate an unstable page
-        * only limit, we subtract the current number of dirty pages
-        * from this max. This difference is roughly the amount of pages
-        * currently available for unstable pages. Thus, the soft limit
-        * is half of that difference. Check osc_upages to ensure we don't
-        * set SOFT_SYNC for OSCs without any outstanding unstable pages.
-        */
-       return osc_upages &&
-              obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
-}
-
 /**
  * Helper function called by osc_io_submit() for every page in an immediate
  * transfer (i.e., transferred synchronously).
@@ -368,9 +342,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
        oap->oap_count = opg->ops_to - opg->ops_from;
        oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
 
-       if (osc_over_unstable_soft_limit(oap->oap_cli))
-               oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-
        if (capable(CFS_CAP_SYS_RESOURCE)) {
                oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
                oap->oap_cmd |= OBD_BRW_NOQUOTA;
@@ -539,6 +510,28 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
        }
 }
 
+/**
+ * Check if a cl_page can be released, i.e, it's not being used.
+ *
+ * If unstable account is turned on, bulk transfer may hold one refcount
+ * for recovery so we need to check vmpage refcount as well; otherwise,
+ * even we can destroy cl_page but the corresponding vmpage can't be reused.
+ */
+static inline bool lru_page_busy(struct client_obd *cli, struct cl_page *page)
+{
+       if (cl_page_in_use_noref(page))
+               return true;
+
+       if (cli->cl_cache->ccc_unstable_check) {
+               struct page *vmpage = cl_page_vmpage(page);
+
+               /* vmpage have two known users: cl_page and VM page cache */
+               if (page_count(vmpage) - page_mapcount(vmpage) > 2)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * Drop @target of pages from LRU at most.
  */
@@ -584,7 +577,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                        break;
 
                page = opg->ops_cl.cpl_page;
-               if (cl_page_in_use_noref(page)) {
+               if (lru_page_busy(cli, page)) {
                        list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
                        continue;
                }
@@ -620,7 +613,7 @@ int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
                }
 
                if (cl_page_own_try(env, io, page) == 0) {
-                       if (!cl_page_in_use_noref(page)) {
+                       if (!lru_page_busy(cli, page)) {
                                /* remove it from lru list earlier to avoid
                                 * lock contention
                                 */
@@ -742,6 +735,13 @@ out:
        return rc;
 }
 
+/**
+ * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ *
+ * Usually the LRU slots are reserved in osc_io_iter_rw_init().
+ * Only in the case that the LRU slots are in extreme shortage, it should
+ * have reserved enough slots for an IO.
+ */
 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                           struct osc_page *opg)
 {
@@ -787,4 +787,150 @@ out:
        return rc;
 }
 
+/**
+ * Atomic operations are expensive. We accumulate the accounting for the
+ * same page pgdat to get better performance.
+ * In practice this can work pretty good because the pages in the same RPC
+ * are likely from the same page zone.
+ */
+static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
+                                           int factor)
+{
+       int page_count = desc->bd_iov_count;
+       pg_data_t *last = NULL;
+       int count = 0;
+       int i;
+
+       for (i = 0; i < page_count; i++) {
+               pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+
+               if (likely(pgdat == last)) {
+                       ++count;
+                       continue;
+               }
+
+               if (count > 0) {
+                       mod_node_page_state(pgdat, NR_UNSTABLE_NFS,
+                                           factor * count);
+                       count = 0;
+               }
+               last = pgdat;
+               ++count;
+       }
+       if (count > 0)
+               mod_node_page_state(last, NR_UNSTABLE_NFS, factor * count);
+}
+
+static inline void add_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+       unstable_page_accounting(desc, 1);
+}
+
+static inline void dec_unstable_page_accounting(struct ptlrpc_bulk_desc *desc)
+{
+       unstable_page_accounting(desc, -1);
+}
+
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ *
+ * If this function is called, the request should have been committed
+ * or req:rq_unstable must have been set; it implies that the unstable
+ * statistic have been added.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+       struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+       struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+       int page_count = desc->bd_iov_count;
+       int unstable_count;
+
+       LASSERT(page_count >= 0);
+       dec_unstable_page_accounting(desc);
+
+       unstable_count = atomic_sub_return(page_count, &cli->cl_unstable_count);
+       LASSERT(unstable_count >= 0);
+
+       unstable_count = atomic_sub_return(page_count,
+                                          &cli->cl_cache->ccc_unstable_nr);
+       LASSERT(unstable_count >= 0);
+       if (!unstable_count)
+               wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+
+       if (osc_cache_too_much(cli))
+               (void)ptlrpcd_queue_work(cli->cl_lru_work);
+}
+
+/**
+ * "unstable" page accounting. See: osc_dec_unstable_pages.
+ */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+       struct client_obd *cli  = &req->rq_import->imp_obd->u.cli;
+       struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+       int page_count = desc->bd_iov_count;
+
+       /* No unstable page tracking */
+       if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+               return;
+
+       add_unstable_page_accounting(desc);
+       atomic_add(page_count, &cli->cl_unstable_count);
+       atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+       /*
+        * If the request has already been committed (i.e. brw_commit
+        * called via rq_commit_cb), we need to undo the unstable page
+        * increments we just performed because rq_commit_cb wont be
+        * called again.
+        */
+       spin_lock(&req->rq_lock);
+       if (unlikely(req->rq_committed)) {
+               spin_unlock(&req->rq_lock);
+
+               osc_dec_unstable_pages(req);
+       } else {
+               req->rq_unstable = 1;
+               spin_unlock(&req->rq_lock);
+       }
+}
+
+/**
+ * Check if it piggybacks SOFT_SYNC flag to OST from this OSC.
+ * This function will be called by every BRW RPC so it's critical
+ * to make this function fast.
+ */
+bool osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+       long unstable_nr, osc_unstable_count;
+
+       /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+       if (!cli->cl_cache || !cli->cl_cache->ccc_unstable_check)
+               return false;
+
+       osc_unstable_count = atomic_read(&cli->cl_unstable_count);
+       unstable_nr = atomic_read(&cli->cl_cache->ccc_unstable_nr);
+
+       CDEBUG(D_CACHE,
+              "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
+              cli->cl_import->imp_obd->obd_name, cli,
+              unstable_nr, osc_unstable_count);
+
+       /*
+        * If the LRU slots are in shortage - 25% remaining AND this OSC
+        * has one full RPC window of unstable pages, it's a good chance
+        * to piggyback a SOFT_SYNC flag.
+        * Please notice that the OST won't take immediate response for the
+        * SOFT_SYNC request so active OSCs will have more chance to carry
+        * the flag, this is reasonable.
+        */
+       return unstable_nr > cli->cl_cache->ccc_lru_max >> 2 &&
+              osc_unstable_count > cli->cl_max_pages_per_rpc *
+                                   cli->cl_max_rpcs_in_flight;
+}
+
 /** @} osc */
This page took 0.026298 seconds and 5 git commands to generate.