memcg, vmscan: Fix forced scan of anonymous pages
[deliverable/linux.git] / mm / shmem.c
index 1140f49b6ded6f7a72d89d2e89f9fce0df1a940d..302d1cf7ad07c385ebfeb381dd42af542b4787a5 100644 (file)
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
-       int     mode;           /* FALLOC_FL mode currently operating */
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -149,6 +149,19 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
                vm_unacct_memory(VM_ACCT(size));
 }
 
+static inline int shmem_reacct_size(unsigned long flags,
+               loff_t oldsize, loff_t newsize)
+{
+       if (!(flags & VM_NORESERVE)) {
+               if (VM_ACCT(newsize) > VM_ACCT(oldsize))
+                       return security_vm_enough_memory_mm(current->mm,
+                                       VM_ACCT(newsize) - VM_ACCT(oldsize));
+               else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
+                       vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
+       }
+       return 0;
+}
+
 /*
  * ... whereas tmpfs objects are accounted incrementally as
  * pages are allocated, in order to allow huge sparse files.
@@ -280,7 +293,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
  */
 static int shmem_add_to_page_cache(struct page *page,
                                   struct address_space *mapping,
-                                  pgoff_t index, gfp_t gfp, void *expected)
+                                  pgoff_t index, void *expected)
 {
        int error;
 
@@ -468,23 +481,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -496,8 +506,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -506,6 +520,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -543,6 +562,10 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                loff_t newsize = attr->ia_size;
 
                if (newsize != oldsize) {
+                       error = shmem_reacct_size(SHMEM_I(inode)->flags,
+                                       oldsize, newsize);
+                       if (error)
+                               return error;
                        i_size_write(inode, newsize);
                        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
                }
@@ -643,7 +666,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
         */
        if (!error)
                error = shmem_add_to_page_cache(*pagep, mapping, index,
-                                               GFP_NOWAIT, radswap);
+                                               radswap);
        if (error != -ENOMEM) {
                /*
                 * Truncation and eviction use free_swap_and_cache(), which
@@ -760,7 +783,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
-                           !shmem_falloc->mode &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1089,7 +1112,7 @@ repeat:
                                                gfp & GFP_RECLAIM_MASK);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                               gfp, swp_to_radix_entry(swap));
+                                               swp_to_radix_entry(swap));
                        /*
                         * We already confirmed swap under page lock, and make
                         * no memory allocation here, so usually no possibility
@@ -1143,7 +1166,7 @@ repeat:
                __SetPageSwapBacked(page);
                __set_page_locked(page);
                if (sgp == SGP_WRITE)
-                       init_page_accessed(page);
+                       __SetPageReferenced(page);
 
                error = mem_cgroup_charge_file(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
@@ -1152,7 +1175,7 @@ repeat:
                error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
                if (!error) {
                        error = shmem_add_to_page_cache(page, mapping, index,
-                                                       gfp, NULL);
+                                                       NULL);
                        radix_tree_preload_end();
                }
                if (error) {
@@ -1248,38 +1271,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
         * locks writers out with its hold on i_mutex.  So refrain from
-        * faulting pages into the hole while it's being punched, and
-        * wait on i_mutex to be released if vmf->flags permits.
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
                struct shmem_falloc *shmem_falloc;
 
                spin_lock(&inode->i_lock);
                shmem_falloc = inode->i_private;
-               if (!shmem_falloc ||
-                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
-                   vmf->pgoff < shmem_falloc->start ||
-                   vmf->pgoff >= shmem_falloc->next)
-                       shmem_falloc = NULL;
-               spin_unlock(&inode->i_lock);
-               /*
-                * i_lock has protected us from taking shmem_falloc seriously
-                * once return from shmem_fallocate() went back up that stack.
-                * i_lock does not serialize with i_mutex at all, but it does
-                * not matter if sometimes we wait unnecessarily, or sometimes
-                * miss out on waiting: we just need to make those cases rare.
-                */
-               if (shmem_falloc) {
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
                           !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
                                up_read(&vma->vm_mm->mmap_sem);
-                               mutex_lock(&inode->i_mutex);
-                               mutex_unlock(&inode->i_mutex);
-                               return VM_FAULT_RETRY;
+                               ret = VM_FAULT_RETRY;
                        }
-                       /* cond_resched? Leave that to GUP or return to user */
-                       return VM_FAULT_NOPAGE;
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
                }
+               spin_unlock(&inode->i_lock);
        }
 
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1817,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 
        mutex_lock(&inode->i_mutex);
 
-       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
+               shmem_falloc.waitq = &shmem_falloc_waitq;
                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
                spin_lock(&inode->i_lock);
@@ -1792,8 +1835,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
-               goto undone;
+               goto out;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1857,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
@@ -2900,16 +2949,16 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        this.len = strlen(name);
        this.hash = 0; /* will go */
        sb = shm_mnt->mnt_sb;
+       path.mnt = mntget(shm_mnt);
        path.dentry = d_alloc_pseudo(sb, &this);
        if (!path.dentry)
                goto put_memory;
        d_set_d_op(path.dentry, &anon_ops);
-       path.mnt = mntget(shm_mnt);
 
        res = ERR_PTR(-ENOSPC);
        inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
        if (!inode)
-               goto put_dentry;
+               goto put_memory;
 
        inode->i_flags |= i_flags;
        d_instantiate(path.dentry, inode);
@@ -2917,19 +2966,19 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
        clear_nlink(inode);     /* It is unlinked */
        res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
                  &shmem_file_operations);
        if (IS_ERR(res))
-               goto put_dentry;
+               goto put_path;
 
        return res;
 
-put_dentry:
-       path_put(&path);
 put_memory:
        shmem_unacct_size(flags, size);
+put_path:
+       path_put(&path);
        return res;
 }
 
This page took 0.045598 seconds and 5 git commands to generate.