mm: use clear_page_mlock() in page_remove_rmap()
[deliverable/linux.git] / mm / memory.c
index 57361708d1a57d7bc11c8f34d269a35c50317dbb..5f5d1f039bf4df66b4f4cdb80fac36b83fdd30bf 100644 (file)
@@ -1047,7 +1047,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
-       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
+       if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
+                              VM_PFNMAP | VM_MIXEDMAP))) {
                if (!vma->anon_vma)
                        return 0;
        }
@@ -1055,12 +1056,12 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
-       if (unlikely(is_pfn_mapping(vma))) {
+       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
                /*
                 * We do not free on error cases below as remove_vma
                 * gets called on error from higher level routine
                 */
-               ret = track_pfn_vma_copy(vma);
+               ret = track_pfn_copy(vma);
                if (ret)
                        return ret;
        }
@@ -1327,8 +1328,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
        if (vma->vm_file)
                uprobe_munmap(vma, start, end);
 
-       if (unlikely(is_pfn_mapping(vma)))
-               untrack_pfn_vma(vma, 0, 0);
+       if (unlikely(vma->vm_flags & VM_PFNMAP))
+               untrack_pfn(vma, 0, 0);
 
        if (start != end) {
                if (unlikely(is_vm_hugetlb_page(vma))) {
@@ -1576,12 +1577,12 @@ split_fallthrough:
                if (page->mapping && trylock_page(page)) {
                        lru_add_drain();  /* push cached pages to LRU */
                        /*
-                        * Because we lock page here and migration is
-                        * blocked by the pte's page reference, we need
-                        * only check for file-cache page truncation.
+                        * Because we lock page here, and migration is
+                        * blocked by the pte's page reference, and we
+                        * know the page is still mapped, we don't even
+                        * need to check for file-cache page truncation.
                         */
-                       if (page->mapping)
-                               mlock_vma_page(page);
+                       mlock_vma_page(page);
                        unlock_page(page);
                }
        }
@@ -2085,6 +2086,11 @@ out:
  * ask for a shared writable mapping!
  *
  * The page does not need to be reserved.
+ *
+ * Usually this function is called from f_op->mmap() handler
+ * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
+ * Caller must set VM_MIXEDMAP on vma if it wants to call this
+ * function from other places, for example from page-fault handler.
  */
 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                        struct page *page)
@@ -2093,7 +2099,11 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
-       vma->vm_flags |= VM_INSERTPAGE;
+       if (!(vma->vm_flags & VM_MIXEDMAP)) {
+               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(vma->vm_flags & VM_PFNMAP);
+               vma->vm_flags |= VM_MIXEDMAP;
+       }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
@@ -2162,14 +2172,11 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
+       if (track_pfn_insert(vma, &pgprot, pfn))
                return -EINVAL;
 
        ret = insert_pfn(vma, addr, pfn, pgprot);
 
-       if (ret)
-               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
-
        return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
@@ -2290,37 +2297,30 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * rest of the world about it:
         *   VM_IO tells people not to look at these pages
         *      (accesses can have side effects).
-        *   VM_RESERVED is specified all over the place, because
-        *      in 2.4 it kept swapout's vma scan off this vma; but
-        *      in 2.6 the LRU scan won't even find its pages, so this
-        *      flag means no more than count its pages in reserved_vm,
-        *      and omit it from core dump, even when VM_IO turned off.
         *   VM_PFNMAP tells the core MM that the base pages are just
         *      raw PFN mappings, and do not have a "struct page" associated
         *      with them.
+        *   VM_DONTEXPAND
+        *      Disable vma merging and expanding with mremap().
+        *   VM_DONTDUMP
+        *      Omit vma from core dump, even when VM_IO turned off.
         *
         * There's a horrible special case to handle copy-on-write
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
+        * See vm_normal_page() for details.
         */
-       if (addr == vma->vm_start && end == vma->vm_end) {
+       if (is_cow_mapping(vma->vm_flags)) {
+               if (addr != vma->vm_start || end != vma->vm_end)
+                       return -EINVAL;
                vma->vm_pgoff = pfn;
-               vma->vm_flags |= VM_PFN_AT_MMAP;
-       } else if (is_cow_mapping(vma->vm_flags))
-               return -EINVAL;
-
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+       }
 
-       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
-       if (err) {
-               /*
-                * To indicate that track_pfn related cleanup is not
-                * needed from higher level routine calling unmap_vmas
-                */
-               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
-               vma->vm_flags &= ~VM_PFN_AT_MMAP;
+       err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
+       if (err)
                return -EINVAL;
-       }
+
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -2335,7 +2335,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
        } while (pgd++, addr = next, addr != end);
 
        if (err)
-               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+               untrack_pfn(vma, pfn, PAGE_ALIGN(size));
 
        return err;
 }
@@ -2801,14 +2801,13 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
        zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
 }
 
-static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
+static inline void unmap_mapping_range_tree(struct rb_root *root,
                                            struct zap_details *details)
 {
        struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
        pgoff_t vba, vea, zba, zea;
 
-       vma_prio_tree_foreach(vma, &iter, root,
+       vma_interval_tree_foreach(vma, root,
                        details->first_index, details->last_index) {
 
                vba = vma->vm_pgoff;
@@ -2839,7 +2838,7 @@ static inline void unmap_mapping_range_list(struct list_head *head,
         * across *all* the pages in each nonlinear VMA, not just the pages
         * whose virtual address lies outside the file truncation point.
         */
-       list_for_each_entry(vma, head, shared.vm_set.list) {
+       list_for_each_entry(vma, head, shared.nonlinear) {
                details->nonlinear_vma = vma;
                unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
        }
@@ -2883,7 +2882,7 @@ void unmap_mapping_range(struct address_space *mapping,
 
 
        mutex_lock(&mapping->i_mmap_mutex);
-       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
This page took 0.026048 seconds and 5 git commands to generate.