mm: update min_free_kbytes from khugepaged after core initialization
[deliverable/linux.git] / mm / nommu.c
index 6402f2715d4842096dea6035e719b58ed487fcea..c8bd59a03c71563b73c146d35e00550b8119d4c0 100644 (file)
@@ -139,7 +139,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                if (pages) {
                        pages[i] = virt_to_page(start);
                        if (pages[i])
-                               page_cache_get(pages[i]);
+                               get_page(pages[i]);
                }
                if (vmas)
                        vmas[i] = vma;
@@ -159,8 +159,7 @@ finish_or_fault:
  *   slab page or a secondary page from a compound page
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
-long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                   unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
                    int write, int force, struct page **pages,
                    struct vm_area_struct **vmas)
 {
@@ -171,18 +170,16 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        if (force)
                flags |= FOLL_FORCE;
 
-       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
-                               NULL);
+       return __get_user_pages(current, current->mm, start, nr_pages, flags,
+                               pages, vmas, NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
-long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
-                          unsigned long start, unsigned long nr_pages,
-                          int write, int force, struct page **pages,
-                          int *locked)
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+                           int write, int force, struct page **pages,
+                           int *locked)
 {
-       return get_user_pages(tsk, mm, start, nr_pages, write, force,
-                             pages, NULL);
+       return get_user_pages(start, nr_pages, write, force, pages, NULL);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
@@ -193,19 +190,18 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 {
        long ret;
        down_read(&mm->mmap_sem);
-       ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
-                            pages, NULL);
+       ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
+                               NULL, NULL);
        up_read(&mm->mmap_sem);
        return ret;
 }
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
-long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-                            unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                             int write, int force, struct page **pages)
 {
-       return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
-                                        force, pages, 0);
+       return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
+                                        write, force, pages, 0);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -1061,7 +1057,7 @@ static unsigned long determine_vm_flags(struct file *file,
 {
        unsigned long vm_flags;
 
-       vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
+       vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
        /* vm_flags |= mm->def_flags; */
 
        if (!(capabilities & NOMMU_MAP_DIRECT)) {
This page took 0.027644 seconds and 5 git commands to generate.