Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / mm / memory.c
index 6359a4f80c4a951dd1eaeb2ebbb007fde4924db6..b1443ac07c00a4f1a46de6bb260d00e8f52f99db 100644 (file)
@@ -108,6 +108,18 @@ static int __init disable_randmaps(char *s)
 }
 __setup("norandmaps", disable_randmaps);
 
+unsigned long zero_pfn __read_mostly;
+unsigned long highest_memmap_pfn __read_mostly;
+
+/*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+static int __init init_zero_pfn(void)
+{
+       zero_pfn = page_to_pfn(ZERO_PAGE(0));
+       return 0;
+}
+core_initcall(init_zero_pfn);
 
 /*
  * If a p?d_bad entry is found while walking page tables, report
@@ -444,6 +456,20 @@ static inline int is_cow_mapping(unsigned int flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+#ifndef is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+       return pfn == zero_pfn;
+}
+#endif
+
+#ifndef my_zero_pfn
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+       return zero_pfn;
+}
+#endif
+
 /*
  * vm_normal_page -- This function gets the "struct page" associated with a pte.
  *
@@ -499,7 +525,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
        if (HAVE_PTE_SPECIAL) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
-               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+               if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+                       return NULL;
+               if (!is_zero_pfn(pfn))
                        print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
@@ -521,6 +549,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
+       if (is_zero_pfn(pfn))
+               return NULL;
 check_pfn:
        if (unlikely(pfn > highest_memmap_pfn)) {
                print_bad_pte(vma, addr, pte, NULL);
@@ -1144,9 +1174,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
+
        page = vm_normal_page(vma, address, pte);
-       if (unlikely(!page))
-               goto bad_page;
+       if (unlikely(!page)) {
+               if ((flags & FOLL_DUMP) ||
+                   !is_zero_pfn(pte_pfn(pte)))
+                       goto bad_page;
+               page = pte_page(pte);
+       }
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -1191,27 +1226,29 @@ no_page_table:
 }
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int nr_pages, int flags,
+                    unsigned long start, int nr_pages, unsigned int gup_flags,
                     struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags = 0;
-       int write = !!(flags & GUP_FLAGS_WRITE);
-       int force = !!(flags & GUP_FLAGS_FORCE);
+       unsigned long vm_flags;
 
        if (nr_pages <= 0)
                return 0;
+
+       VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
        /* 
         * Require read or write permissions.
-        * If 'force' is set, we only require the "MAY" flags.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
-       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = (gup_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (gup_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0;
 
        do {
                struct vm_area_struct *vma;
-               unsigned int foll_flags;
 
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(tsk, start)) {
@@ -1223,7 +1260,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pte_t *pte;
 
                        /* user gate pages are read-only */
-                       if (write)
+                       if (gup_flags & FOLL_WRITE)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1260,22 +1297,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                    !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 
-               foll_flags = FOLL_TOUCH;
-               if (pages)
-                       foll_flags |= FOLL_GET;
-               if (flags & GUP_FLAGS_DUMP)
-                       foll_flags |= FOLL_DUMP;
-               if (write)
-                       foll_flags |= FOLL_WRITE;
-
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                       &start, &nr_pages, i, foll_flags);
+                                       &start, &nr_pages, i, gup_flags);
                        continue;
                }
 
                do {
                        struct page *page;
+                       unsigned int foll_flags = gup_flags;
 
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
@@ -1284,9 +1314,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
 
-                       if (write)
-                               foll_flags |= FOLL_WRITE;
-
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
@@ -1397,12 +1424,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int nr_pages, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
 {
-       int flags = 0;
+       int flags = FOLL_TOUCH;
 
+       if (pages)
+               flags |= FOLL_GET;
        if (write)
-               flags |= GUP_FLAGS_WRITE;
+               flags |= FOLL_WRITE;
        if (force)
-               flags |= GUP_FLAGS_FORCE;
+               flags |= FOLL_FORCE;
 
        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
 }
@@ -1429,12 +1458,8 @@ struct page *get_dump_page(unsigned long addr)
        struct page *page;
 
        if (__get_user_pages(current, current->mm, addr, 1,
-                       GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
-               return NULL;
-       if (page == ZERO_PAGE(0)) {
-               page_cache_release(page);
+                       FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
                return NULL;
-       }
        flush_cache_page(vma, addr, page_to_pfn(page));
        return page;
 }
@@ -1617,7 +1642,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * If we don't have pte special, then we have to use the pfn_valid()
         * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
         * refcount the page if pfn_valid is true (hence insert_page rather
-        * than insert_pfn).
+        * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
+        * without pte special, it would there be refcounted as a normal page.
         */
        if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
                struct page *page;
@@ -2084,10 +2110,19 @@ gotten:
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       VM_BUG_ON(old_page == ZERO_PAGE(0));
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-       if (!new_page)
-               goto oom;
+
+       if (is_zero_pfn(pte_pfn(orig_pte))) {
+               new_page = alloc_zeroed_user_highpage_movable(vma, address);
+               if (!new_page)
+                       goto oom;
+       } else {
+               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+               if (!new_page)
+                       goto oom;
+               cow_user_page(new_page, old_page, address, vma);
+       }
+       __SetPageUptodate(new_page);
+
        /*
         * Don't let another task, with possibly unlocked vma,
         * keep the mlocked page.
@@ -2097,8 +2132,6 @@ gotten:
                clear_page_mlock(old_page);
                unlock_page(old_page);
        }
-       cow_user_page(new_page, old_page, address, vma);
-       __SetPageUptodate(new_page);
 
        if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
@@ -2639,6 +2672,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
+       if (!(flags & FAULT_FLAG_WRITE)) {
+               entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+                                               vma->vm_page_prot));
+               ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
+               if (!pte_none(*page_table))
+                       goto unlock;
+               goto setpte;
+       }
+
        /* Allocate our own private page. */
        pte_unmap(page_table);
 
@@ -2653,7 +2696,8 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       if (vma->vm_flags & VM_WRITE)
+               entry = pte_mkwrite(pte_mkdirty(entry));
 
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!pte_none(*page_table))
@@ -2661,6 +2705,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        inc_mm_counter(mm, anon_rss);
        page_add_new_anon_rmap(page, vma, address);
+setpte:
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
This page took 0.027404 seconds and 5 git commands to generate.