f2fs: increase pages_skipped when skipping writepages
[deliverable/linux.git] / fs / f2fs / checkpoint.c
index 293d0486a40f7e8560b25d012a7a2398ef4e955c..aef32f36e2f3f7ce12c5578b65cd6e34ca202416 100644 (file)
@@ -75,23 +75,102 @@ out:
        return page;
 }
 
+inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
+{
+       switch (type) {
+       case META_NAT:
+               return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
+       case META_SIT:
+               return SIT_BLK_CNT(sbi);
+       case META_SSA:
+       case META_CP:
+               return 0;
+       default:
+               BUG();
+       }
+}
+
+/*
+ * Readahead CP/NAT/SIT/SSA pages
+ */
+int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
+{
+       block_t prev_blk_addr = 0;
+       struct page *page;
+       int blkno = start;
+       int max_blks = get_max_meta_blks(sbi, type);
+
+       struct f2fs_io_info fio = {
+               .type = META,
+               .rw = READ_SYNC | REQ_META | REQ_PRIO
+       };
+
+       for (; nrpages-- > 0; blkno++) {
+               block_t blk_addr;
+
+               switch (type) {
+               case META_NAT:
+                       /* get nat block addr */
+                       if (unlikely(blkno >= max_blks))
+                               blkno = 0;
+                       blk_addr = current_nat_addr(sbi,
+                                       blkno * NAT_ENTRY_PER_BLOCK);
+                       break;
+               case META_SIT:
+                       /* get sit block addr */
+                       if (unlikely(blkno >= max_blks))
+                               goto out;
+                       blk_addr = current_sit_addr(sbi,
+                                       blkno * SIT_ENTRY_PER_BLOCK);
+                       if (blkno != start && prev_blk_addr + 1 != blk_addr)
+                               goto out;
+                       prev_blk_addr = blk_addr;
+                       break;
+               case META_SSA:
+               case META_CP:
+                       /* get ssa/cp block addr */
+                       blk_addr = blkno;
+                       break;
+               default:
+                       BUG();
+               }
+
+               page = grab_cache_page(META_MAPPING(sbi), blk_addr);
+               if (!page)
+                       continue;
+               if (PageUptodate(page)) {
+                       mark_page_accessed(page);
+                       f2fs_put_page(page, 1);
+                       continue;
+               }
+
+               f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
+               mark_page_accessed(page);
+               f2fs_put_page(page, 0);
+       }
+out:
+       f2fs_submit_merged_bio(sbi, META, READ);
+       return blkno - start;
+}
+
 static int f2fs_write_meta_page(struct page *page,
                                struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
-       /* Should not write any meta pages, if any IO error was occurred */
-       if (unlikely(sbi->por_doing ||
-                       is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
+       if (unlikely(sbi->por_doing))
                goto redirty_out;
-
        if (wbc->for_reclaim)
                goto redirty_out;
 
-       wait_on_page_writeback(page);
+       /* Should not write any meta pages, if any IO error was occurred */
+       if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
+               goto no_write;
 
+       wait_on_page_writeback(page);
        write_meta_page(sbi, page);
+no_write:
        dec_page_count(sbi, F2FS_DIRTY_META);
        unlock_page(page);
        return 0;
@@ -99,6 +178,7 @@ static int f2fs_write_meta_page(struct page *page,
 redirty_out:
        dec_page_count(sbi, F2FS_DIRTY_META);
        wbc->pages_skipped++;
+       account_page_redirty(page);
        set_page_dirty(page);
        return AOP_WRITEPAGE_ACTIVATE;
 }
@@ -107,15 +187,12 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
                                struct writeback_control *wbc)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
-       int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+       int nrpages = nr_pages_to_skip(sbi, META);
        long written;
 
-       if (wbc->for_kupdate)
-               return 0;
-
        /* collect a number of dirty meta pages and write together */
-       if (get_pages(sbi, F2FS_DIRTY_META) < nrpages)
-               return 0;
+       if (wbc->for_kupdate || get_pages(sbi, F2FS_DIRTY_META) < nrpages)
+               goto skip_write;
 
        /* if mounting is failed, skip writing node pages */
        mutex_lock(&sbi->cp_mutex);
@@ -123,6 +200,10 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
        mutex_unlock(&sbi->cp_mutex);
        wbc->nr_to_write -= written;
        return 0;
+
+skip_write:
+       wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+       return 0;
 }
 
 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
@@ -148,10 +229,22 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
+
                        lock_page(page);
-                       f2fs_bug_on(page->mapping != mapping);
-                       f2fs_bug_on(!PageDirty(page));
-                       clear_page_dirty_for_io(page);
+
+                       if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
+                       }
+
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
                        if (f2fs_write_meta_page(page, &wbc)) {
                                unlock_page(page);
                                break;
@@ -285,6 +378,8 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
        start_blk = __start_cp_addr(sbi) + 1;
        orphan_blkaddr = __start_sum_addr(sbi) - 1;
 
+       ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
+
        for (i = 0; i < orphan_blkaddr; i++) {
                struct page *page = get_meta_page(sbi, start_blk + i);
                struct f2fs_orphan_block *orphan_blk;
@@ -495,7 +590,6 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
        if (__add_dirty_inode(inode, new))
                kmem_cache_free(inode_entry_slab, new);
 
-       inc_page_count(sbi, F2FS_DIRTY_DENTS);
        inode_inc_dirty_dents(inode);
        SetPagePrivate(page);
        spin_unlock(&sbi->dir_inode_lock);
@@ -526,7 +620,7 @@ void remove_dirty_dir_inode(struct inode *inode)
                return;
 
        spin_lock(&sbi->dir_inode_lock);
-       if (atomic_read(&F2FS_I(inode)->dirty_dents)) {
+       if (get_dirty_dents(inode)) {
                spin_unlock(&sbi->dir_inode_lock);
                return;
        }
@@ -589,7 +683,7 @@ retry:
        inode = igrab(entry->inode);
        spin_unlock(&sbi->dir_inode_lock);
        if (inode) {
-               filemap_flush(inode->i_mapping);
+               filemap_fdatawrite(inode->i_mapping);
                iput(inode);
        } else {
                /*
@@ -824,6 +918,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        unblock_operations(sbi);
        mutex_unlock(&sbi->cp_mutex);
 
+       stat_inc_cp_count(sbi->stat_info);
        trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
 }
 
@@ -845,11 +940,11 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
 int __init create_checkpoint_caches(void)
 {
        orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
-                       sizeof(struct orphan_inode_entry), NULL);
+                       sizeof(struct orphan_inode_entry));
        if (!orphan_entry_slab)
                return -ENOMEM;
        inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
-                       sizeof(struct dir_inode_entry), NULL);
+                       sizeof(struct dir_inode_entry));
        if (!inode_entry_slab) {
                kmem_cache_destroy(orphan_entry_slab);
                return -ENOMEM;
This page took 0.032716 seconds and 5 git commands to generate.