mm: remove unused GUP flags
[deliverable/linux.git] / mm / memory.c
index e8f63d9961ea58f5c9643c111859911bc91dc675..4b5200f5f35ac96f176a75482fee45ca995ce8d5 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/swap.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/module.h>
 #include <linux/delayacct.h>
@@ -597,8 +598,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        page = vm_normal_page(vma, addr, pte);
        if (page) {
                get_page(page);
-               page_dup_rmap(page, vma, addr);
-               rss[!!PageAnon(page)]++;
+               page_dup_rmap(page);
+               rss[PageAnon(page)]++;
        }
 
 out_set_pte:
@@ -1216,8 +1217,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        unsigned int vm_flags = 0;
        int write = !!(flags & GUP_FLAGS_WRITE);
        int force = !!(flags & GUP_FLAGS_FORCE);
-       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
-       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
        if (nr_pages <= 0)
                return 0;
@@ -1243,7 +1242,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pte_t *pte;
 
                        /* user gate pages are read-only */
-                       if (!ignore && write)
+                       if (write)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1277,7 +1276,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
                if (!vma ||
                    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
-                   (!ignore && !(vm_flags & vma->vm_flags)))
+                   !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
@@ -1297,13 +1296,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
-                        * pages and potentially allocating memory, unless
-                        * current is handling munlock--e.g., on exit. In
-                        * that case, we are not allocating memory.  Rather,
-                        * we're only unlocking already resident/mapped pages.
+                        * pages and potentially allocating memory.
                         */
-                       if (unlikely(!ignore_sigkill &&
-                                       fatal_signal_pending(current)))
+                       if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
 
                        if (write)
@@ -1974,7 +1969,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
-       if (PageAnon(old_page)) {
+       if (PageAnon(old_page) && !PageKsm(old_page)) {
                if (!trylock_page(old_page)) {
                        page_cache_get(old_page);
                        pte_unmap_unlock(page_table, ptl);
@@ -2115,9 +2110,14 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush_notify(vma, address, page_table);
+               ptep_clear_flush(vma, address, page_table);
                page_add_new_anon_rmap(new_page, vma, address);
-               set_pte_at(mm, address, page_table, entry);
+               /*
+                * We call the notify macro here because, when using secondary
+                * mmu page tables (such as kvm shadow page tables), we want the
+                * new page to be mapped directly into the secondary page table.
+                */
+               set_pte_at_notify(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                if (old_page) {
                        /*
@@ -2644,6 +2644,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!pte_none(*page_table))
                goto release;
+
        inc_mm_counter(mm, anon_rss);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
This page took 0.025264 seconds and 5 git commands to generate.