regulator: aat2870: Add AAT2870 regulator driver
[deliverable/linux.git] / fs / fs-writeback.c
index 2c947da39f6ea276e47aa59037ba7ecfb7642bfb..04cf3b91e5016a1f7e3ceef734c7d85574ec296d 100644 (file)
 #include <linux/tracepoint.h>
 #include "internal.h"
 
-/*
- * The maximum number of pages to writeout in a single bdi flush/kupdate
- * operation.  We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode.  Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES     1024L
-
 /*
  * Passed into wb_writeback(), essentially a subset of writeback_control
  */
@@ -489,33 +480,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
        return ret;
 }
 
-/*
- * For background writeback the caller does not have the sb pinned
- * before calling writeback. So make sure that we do pin it, so it doesn't
- * go away while we are writing inodes from it.
- */
-static bool pin_sb_for_writeback(struct super_block *sb)
-{
-       spin_lock(&sb_lock);
-       if (list_empty(&sb->s_instances)) {
-               spin_unlock(&sb_lock);
-               return false;
-       }
-
-       sb->s_count++;
-       spin_unlock(&sb_lock);
-
-       if (down_read_trylock(&sb->s_umount)) {
-               if (sb->s_root)
-                       return true;
-               up_read(&sb->s_umount);
-       }
-
-       put_super(sb);
-       return false;
-}
-
-static long writeback_chunk_size(struct wb_writeback_work *work)
+static long writeback_chunk_size(struct backing_dev_info *bdi,
+                                struct wb_writeback_work *work)
 {
        long pages;
 
@@ -534,8 +500,13 @@ static long writeback_chunk_size(struct wb_writeback_work *work)
         */
        if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
                pages = LONG_MAX;
-       else
-               pages = min(MAX_WRITEBACK_PAGES, work->nr_pages);
+       else {
+               pages = min(bdi->avg_write_bandwidth / 2,
+                           global_dirty_limit / DIRTY_SCOPE);
+               pages = min(pages, work->nr_pages);
+               pages = round_down(pages + MIN_WRITEBACK_PAGES,
+                                  MIN_WRITEBACK_PAGES);
+       }
 
        return pages;
 }
@@ -596,11 +567,11 @@ static long writeback_sb_inodes(struct super_block *sb,
                spin_lock(&inode->i_lock);
                if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
                        spin_unlock(&inode->i_lock);
-                       requeue_io(inode, wb);
+                       redirty_tail(inode, wb);
                        continue;
                }
                __iget(inode);
-               write_chunk = writeback_chunk_size(work);
+               write_chunk = writeback_chunk_size(wb->bdi, work);
                wbc.nr_to_write = write_chunk;
                wbc.pages_skipped = 0;
 
@@ -646,8 +617,13 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
-               if (!pin_sb_for_writeback(sb)) {
-                       requeue_io(inode, wb);
+               if (!grab_super_passive(sb)) {
+                       /*
+                        * grab_super_passive() may fail consistently due to
+                        * s_umount being grabbed by someone else. Don't use
+                        * requeue_io() to avoid busy retrying the inode/sb.
+                        */
+                       redirty_tail(inode, wb);
                        continue;
                }
                wrote += writeback_sb_inodes(sb, wb, work);
@@ -692,6 +668,16 @@ static inline bool over_bground_thresh(void)
                global_page_state(NR_UNSTABLE_NFS) > background_thresh);
 }
 
+/*
+ * Called under wb->list_lock. If there are multiple wb per bdi,
+ * only the flusher working on the first wb should do it.
+ */
+static void wb_update_bandwidth(struct bdi_writeback *wb,
+                               unsigned long start_time)
+{
+       __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time);
+}
+
 /*
  * Explicit flushing or periodic writeback of "old" data.
  *
@@ -710,6 +696,7 @@ static inline bool over_bground_thresh(void)
 static long wb_writeback(struct bdi_writeback *wb,
                         struct wb_writeback_work *work)
 {
+       unsigned long wb_start = jiffies;
        long nr_pages = work->nr_pages;
        unsigned long oldest_jif;
        struct inode *inode;
@@ -758,6 +745,8 @@ static long wb_writeback(struct bdi_writeback *wb,
                        progress = __writeback_inodes_wb(wb, work);
                trace_writeback_written(wb->bdi, work);
 
+               wb_update_bandwidth(wb, wb_start);
+
                /*
                 * Did we write something? Try for more
                 *
This page took 0.025573 seconds and 5 git commands to generate.