Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 May 2016 20:08:35 +0000 (13:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 May 2016 20:08:35 +0000 (13:08 -0700)
Pull writeback fix from Jens Axboe:
 "Just a single fix for domain aware writeback, fixing a regression that
  can cause balance_dirty_pages() to keep looping while not getting any
  work done"

* 'for-linus' of git://git.kernel.dk/linux-block:
  writeback: Fix performance regression in wb_over_bg_thresh()

1  2 
mm/page-writeback.c

diff --combined mm/page-writeback.c
index 999792d35ccc0faee6c4f85d70b6e31876855284,eeaa431ee4ec034563239b9a285253f7291ae0db..bc5149d5ec38016da91a8b1c85aeca0193143f0c
@@@ -1910,7 -1910,8 +1910,8 @@@ bool wb_over_bg_thresh(struct bdi_write
        if (gdtc->dirty > gdtc->bg_thresh)
                return true;
  
-       if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+       if (wb_stat(wb, WB_RECLAIMABLE) >
+           wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
                return true;
  
        if (mdtc) {
                if (mdtc->dirty > mdtc->bg_thresh)
                        return true;
  
-               if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+               if (wb_stat(wb, WB_RECLAIMABLE) >
+                   wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
                        return true;
        }
  
@@@ -2176,8 -2178,8 +2178,8 @@@ int write_cache_pages(struct address_sp
                        cycled = 0;
                end = -1;
        } else {
 -              index = wbc->range_start >> PAGE_CACHE_SHIFT;
 -              end = wbc->range_end >> PAGE_CACHE_SHIFT;
 +              index = wbc->range_start >> PAGE_SHIFT;
 +              end = wbc->range_end >> PAGE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
                cycled = 1; /* ignore range_cyclic tests */
@@@ -2382,14 -2384,14 +2384,14 @@@ int write_one_page(struct page *page, i
                wait_on_page_writeback(page);
  
        if (clear_page_dirty_for_io(page)) {
 -              page_cache_get(page);
 +              get_page(page);
                ret = mapping->a_ops->writepage(page, &wbc);
                if (ret == 0 && wait) {
                        wait_on_page_writeback(page);
                        if (PageError(page))
                                ret = -EIO;
                }
 -              page_cache_release(page);
 +              put_page(page);
        } else {
                unlock_page(page);
        }
@@@ -2431,7 -2433,7 +2433,7 @@@ void account_page_dirtied(struct page *
                __inc_zone_page_state(page, NR_DIRTIED);
                __inc_wb_stat(wb, WB_RECLAIMABLE);
                __inc_wb_stat(wb, WB_DIRTIED);
 -              task_io_account_write(PAGE_CACHE_SIZE);
 +              task_io_account_write(PAGE_SIZE);
                current->nr_dirtied++;
                this_cpu_inc(bdp_ratelimits);
        }
@@@ -2450,7 -2452,7 +2452,7 @@@ void account_page_cleaned(struct page *
                mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
                dec_zone_page_state(page, NR_FILE_DIRTY);
                dec_wb_stat(wb, WB_RECLAIMABLE);
 -              task_io_account_cancelled_write(PAGE_CACHE_SIZE);
 +              task_io_account_cancelled_write(PAGE_SIZE);
        }
  }
  
This page took 0.029343 seconds and 5 git commands to generate.