thp: update documentation
[deliverable/linux.git] / mm / gup.c
CommitLineData
4bbd4c77
KS
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
4bbd4c77
KS
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/rmap.h>
9#include <linux/swap.h>
10#include <linux/swapops.h>
11
2667f50e
SC
12#include <linux/sched.h>
13#include <linux/rwsem.h>
f30c59e9 14#include <linux/hugetlb.h>
1027e443 15
2667f50e 16#include <asm/pgtable.h>
1027e443 17#include <asm/tlbflush.h>
2667f50e 18
4bbd4c77
KS
19#include "internal.h"
20
69e68b4f
KS
21static struct page *no_page_table(struct vm_area_struct *vma,
22 unsigned int flags)
4bbd4c77 23{
69e68b4f
KS
24 /*
25 * When core dumping an enormous anonymous area that nobody
26 * has touched so far, we don't want to allocate unnecessary pages or
27 * page tables. Return error instead of NULL to skip handle_mm_fault,
28 * then get_dump_page() will return NULL to leave a hole in the dump.
29 * But we can only make this optimization where a hole would surely
30 * be zero-filled if handle_mm_fault() actually did handle it.
31 */
32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
33 return ERR_PTR(-EFAULT);
34 return NULL;
35}
4bbd4c77 36
1027e443
KS
37static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
38 pte_t *pte, unsigned int flags)
39{
40 /* No page to get reference */
41 if (flags & FOLL_GET)
42 return -EFAULT;
43
44 if (flags & FOLL_TOUCH) {
45 pte_t entry = *pte;
46
47 if (flags & FOLL_WRITE)
48 entry = pte_mkdirty(entry);
49 entry = pte_mkyoung(entry);
50
51 if (!pte_same(*pte, entry)) {
52 set_pte_at(vma->vm_mm, address, pte, entry);
53 update_mmu_cache(vma, address, pte);
54 }
55 }
56
57 /* Proper page table entry exists, but no corresponding struct page */
58 return -EEXIST;
59}
60
69e68b4f
KS
61static struct page *follow_page_pte(struct vm_area_struct *vma,
62 unsigned long address, pmd_t *pmd, unsigned int flags)
63{
64 struct mm_struct *mm = vma->vm_mm;
65 struct page *page;
66 spinlock_t *ptl;
67 pte_t *ptep, pte;
4bbd4c77 68
69e68b4f 69retry:
4bbd4c77 70 if (unlikely(pmd_bad(*pmd)))
69e68b4f 71 return no_page_table(vma, flags);
4bbd4c77
KS
72
73 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
74 pte = *ptep;
75 if (!pte_present(pte)) {
76 swp_entry_t entry;
77 /*
78 * KSM's break_ksm() relies upon recognizing a ksm page
79 * even while it is being migrated, so for that case we
80 * need migration_entry_wait().
81 */
82 if (likely(!(flags & FOLL_MIGRATION)))
83 goto no_page;
0661a336 84 if (pte_none(pte))
4bbd4c77
KS
85 goto no_page;
86 entry = pte_to_swp_entry(pte);
87 if (!is_migration_entry(entry))
88 goto no_page;
89 pte_unmap_unlock(ptep, ptl);
90 migration_entry_wait(mm, pmd, address);
69e68b4f 91 goto retry;
4bbd4c77 92 }
8a0516ed 93 if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c77 94 goto no_page;
69e68b4f
KS
95 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
96 pte_unmap_unlock(ptep, ptl);
97 return NULL;
98 }
4bbd4c77
KS
99
100 page = vm_normal_page(vma, address, pte);
101 if (unlikely(!page)) {
1027e443
KS
102 if (flags & FOLL_DUMP) {
103 /* Avoid special (like zero) pages in core dumps */
104 page = ERR_PTR(-EFAULT);
105 goto out;
106 }
107
108 if (is_zero_pfn(pte_pfn(pte))) {
109 page = pte_page(pte);
110 } else {
111 int ret;
112
113 ret = follow_pfn_pte(vma, address, ptep, flags);
114 page = ERR_PTR(ret);
115 goto out;
116 }
4bbd4c77
KS
117 }
118
6742d293
KS
119 if (flags & FOLL_SPLIT && PageTransCompound(page)) {
120 int ret;
121 get_page(page);
122 pte_unmap_unlock(ptep, ptl);
123 lock_page(page);
124 ret = split_huge_page(page);
125 unlock_page(page);
126 put_page(page);
127 if (ret)
128 return ERR_PTR(ret);
129 goto retry;
130 }
131
4bbd4c77 132 if (flags & FOLL_GET)
ddc58f27 133 get_page(page);
4bbd4c77
KS
134 if (flags & FOLL_TOUCH) {
135 if ((flags & FOLL_WRITE) &&
136 !pte_dirty(pte) && !PageDirty(page))
137 set_page_dirty(page);
138 /*
139 * pte_mkyoung() would be more correct here, but atomic care
140 * is needed to avoid losing the dirty bit: it is easier to use
141 * mark_page_accessed().
142 */
143 mark_page_accessed(page);
144 }
de60f5f1 145 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
4bbd4c77
KS
146 /*
147 * The preliminary mapping check is mainly to avoid the
148 * pointless overhead of lock_page on the ZERO_PAGE
149 * which might bounce very badly if there is contention.
150 *
151 * If the page is already locked, we don't need to
152 * handle it now - vmscan will handle it later if and
153 * when it attempts to reclaim the page.
154 */
155 if (page->mapping && trylock_page(page)) {
156 lru_add_drain(); /* push cached pages to LRU */
157 /*
158 * Because we lock page here, and migration is
159 * blocked by the pte's page reference, and we
160 * know the page is still mapped, we don't even
161 * need to check for file-cache page truncation.
162 */
163 mlock_vma_page(page);
164 unlock_page(page);
165 }
166 }
1027e443 167out:
4bbd4c77 168 pte_unmap_unlock(ptep, ptl);
4bbd4c77 169 return page;
4bbd4c77
KS
170no_page:
171 pte_unmap_unlock(ptep, ptl);
172 if (!pte_none(pte))
69e68b4f
KS
173 return NULL;
174 return no_page_table(vma, flags);
175}
176
177/**
178 * follow_page_mask - look up a page descriptor from a user-virtual address
179 * @vma: vm_area_struct mapping @address
180 * @address: virtual address to look up
181 * @flags: flags modifying lookup behaviour
182 * @page_mask: on output, *page_mask is set according to the size of the page
183 *
184 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
185 *
186 * Returns the mapped (struct page *), %NULL if no mapping exists, or
187 * an error pointer if there is a mapping to something not represented
188 * by a page descriptor (see also vm_normal_page()).
189 */
190struct page *follow_page_mask(struct vm_area_struct *vma,
191 unsigned long address, unsigned int flags,
192 unsigned int *page_mask)
193{
194 pgd_t *pgd;
195 pud_t *pud;
196 pmd_t *pmd;
197 spinlock_t *ptl;
198 struct page *page;
199 struct mm_struct *mm = vma->vm_mm;
200
201 *page_mask = 0;
202
203 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
204 if (!IS_ERR(page)) {
205 BUG_ON(flags & FOLL_GET);
4bbd4c77 206 return page;
69e68b4f 207 }
4bbd4c77 208
69e68b4f
KS
209 pgd = pgd_offset(mm, address);
210 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
211 return no_page_table(vma, flags);
212
213 pud = pud_offset(pgd, address);
214 if (pud_none(*pud))
215 return no_page_table(vma, flags);
216 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
e66f17ff
NH
217 page = follow_huge_pud(mm, address, pud, flags);
218 if (page)
219 return page;
220 return no_page_table(vma, flags);
69e68b4f
KS
221 }
222 if (unlikely(pud_bad(*pud)))
223 return no_page_table(vma, flags);
224
225 pmd = pmd_offset(pud, address);
226 if (pmd_none(*pmd))
227 return no_page_table(vma, flags);
228 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
e66f17ff
NH
229 page = follow_huge_pmd(mm, address, pmd, flags);
230 if (page)
231 return page;
232 return no_page_table(vma, flags);
69e68b4f 233 }
8a0516ed 234 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
69e68b4f 235 return no_page_table(vma, flags);
6742d293
KS
236 if (likely(!pmd_trans_huge(*pmd)))
237 return follow_page_pte(vma, address, pmd, flags);
238
239 ptl = pmd_lock(mm, pmd);
240 if (unlikely(!pmd_trans_huge(*pmd))) {
241 spin_unlock(ptl);
242 return follow_page_pte(vma, address, pmd, flags);
243 }
6742d293
KS
244 if (flags & FOLL_SPLIT) {
245 int ret;
246 page = pmd_page(*pmd);
247 if (is_huge_zero_page(page)) {
248 spin_unlock(ptl);
249 ret = 0;
78ddc534 250 split_huge_pmd(vma, pmd, address);
6742d293
KS
251 } else {
252 get_page(page);
69e68b4f 253 spin_unlock(ptl);
6742d293
KS
254 lock_page(page);
255 ret = split_huge_page(page);
256 unlock_page(page);
257 put_page(page);
258 }
259
260 return ret ? ERR_PTR(ret) :
261 follow_page_pte(vma, address, pmd, flags);
69e68b4f 262 }
6742d293
KS
263
264 page = follow_trans_huge_pmd(vma, address, pmd, flags);
265 spin_unlock(ptl);
266 *page_mask = HPAGE_PMD_NR - 1;
267 return page;
4bbd4c77
KS
268}
269
f2b495ca
KS
270static int get_gate_page(struct mm_struct *mm, unsigned long address,
271 unsigned int gup_flags, struct vm_area_struct **vma,
272 struct page **page)
273{
274 pgd_t *pgd;
275 pud_t *pud;
276 pmd_t *pmd;
277 pte_t *pte;
278 int ret = -EFAULT;
279
280 /* user gate pages are read-only */
281 if (gup_flags & FOLL_WRITE)
282 return -EFAULT;
283 if (address > TASK_SIZE)
284 pgd = pgd_offset_k(address);
285 else
286 pgd = pgd_offset_gate(mm, address);
287 BUG_ON(pgd_none(*pgd));
288 pud = pud_offset(pgd, address);
289 BUG_ON(pud_none(*pud));
290 pmd = pmd_offset(pud, address);
291 if (pmd_none(*pmd))
292 return -EFAULT;
293 VM_BUG_ON(pmd_trans_huge(*pmd));
294 pte = pte_offset_map(pmd, address);
295 if (pte_none(*pte))
296 goto unmap;
297 *vma = get_gate_vma(mm);
298 if (!page)
299 goto out;
300 *page = vm_normal_page(*vma, address, *pte);
301 if (!*page) {
302 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
303 goto unmap;
304 *page = pte_page(*pte);
305 }
306 get_page(*page);
307out:
308 ret = 0;
309unmap:
310 pte_unmap(pte);
311 return ret;
312}
313
9a95f3cf
PC
314/*
315 * mmap_sem must be held on entry. If @nonblocking != NULL and
316 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
317 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
318 */
16744483
KS
319static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
320 unsigned long address, unsigned int *flags, int *nonblocking)
321{
322 struct mm_struct *mm = vma->vm_mm;
323 unsigned int fault_flags = 0;
324 int ret;
325
de60f5f1
EM
326 /* mlock all present pages, but do not fault in new pages */
327 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
328 return -ENOENT;
84d33df2
KS
329 /* For mm_populate(), just skip the stack guard page. */
330 if ((*flags & FOLL_POPULATE) &&
16744483
KS
331 (stack_guard_page_start(vma, address) ||
332 stack_guard_page_end(vma, address + PAGE_SIZE)))
333 return -ENOENT;
334 if (*flags & FOLL_WRITE)
335 fault_flags |= FAULT_FLAG_WRITE;
336 if (nonblocking)
337 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
338 if (*flags & FOLL_NOWAIT)
339 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b
ALC
340 if (*flags & FOLL_TRIED) {
341 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
342 fault_flags |= FAULT_FLAG_TRIED;
343 }
16744483
KS
344
345 ret = handle_mm_fault(mm, vma, address, fault_flags);
346 if (ret & VM_FAULT_ERROR) {
347 if (ret & VM_FAULT_OOM)
348 return -ENOMEM;
349 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
350 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
33692f27 351 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
16744483
KS
352 return -EFAULT;
353 BUG();
354 }
355
356 if (tsk) {
357 if (ret & VM_FAULT_MAJOR)
358 tsk->maj_flt++;
359 else
360 tsk->min_flt++;
361 }
362
363 if (ret & VM_FAULT_RETRY) {
364 if (nonblocking)
365 *nonblocking = 0;
366 return -EBUSY;
367 }
368
369 /*
370 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
371 * necessary, even if maybe_mkwrite decided not to set pte_write. We
372 * can thus safely do subsequent page lookups as if they were reads.
373 * But only do so when looping for pte_write is futile: in some cases
374 * userspace may also be wanting to write to the gotten user page,
375 * which a read fault here might prevent (a readonly page might get
376 * reCOWed by userspace write).
377 */
378 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
379 *flags &= ~FOLL_WRITE;
380 return 0;
381}
382
fa5bb209
KS
383static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
384{
385 vm_flags_t vm_flags = vma->vm_flags;
386
387 if (vm_flags & (VM_IO | VM_PFNMAP))
388 return -EFAULT;
389
390 if (gup_flags & FOLL_WRITE) {
391 if (!(vm_flags & VM_WRITE)) {
392 if (!(gup_flags & FOLL_FORCE))
393 return -EFAULT;
394 /*
395 * We used to let the write,force case do COW in a
396 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
397 * set a breakpoint in a read-only mapping of an
398 * executable, without corrupting the file (yet only
399 * when that file had been opened for writing!).
400 * Anon pages in shared mappings are surprising: now
401 * just reject it.
402 */
403 if (!is_cow_mapping(vm_flags)) {
404 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
405 return -EFAULT;
406 }
407 }
408 } else if (!(vm_flags & VM_READ)) {
409 if (!(gup_flags & FOLL_FORCE))
410 return -EFAULT;
411 /*
412 * Is there actually any vma we can reach here which does not
413 * have VM_MAYREAD set?
414 */
415 if (!(vm_flags & VM_MAYREAD))
416 return -EFAULT;
417 }
418 return 0;
419}
420
4bbd4c77
KS
421/**
422 * __get_user_pages() - pin user pages in memory
423 * @tsk: task_struct of target task
424 * @mm: mm_struct of target mm
425 * @start: starting user address
426 * @nr_pages: number of pages from start to pin
427 * @gup_flags: flags modifying pin behaviour
428 * @pages: array that receives pointers to the pages pinned.
429 * Should be at least nr_pages long. Or NULL, if caller
430 * only intends to ensure the pages are faulted in.
431 * @vmas: array of pointers to vmas corresponding to each page.
432 * Or NULL if the caller does not require them.
433 * @nonblocking: whether waiting for disk IO or mmap_sem contention
434 *
435 * Returns number of pages pinned. This may be fewer than the number
436 * requested. If nr_pages is 0 or negative, returns 0. If no pages
437 * were pinned, returns -errno. Each page returned must be released
438 * with a put_page() call when it is finished with. vmas will only
439 * remain valid while mmap_sem is held.
440 *
9a95f3cf 441 * Must be called with mmap_sem held. It may be released. See below.
4bbd4c77
KS
442 *
443 * __get_user_pages walks a process's page tables and takes a reference to
444 * each struct page that each user address corresponds to at a given
445 * instant. That is, it takes the page that would be accessed if a user
446 * thread accesses the given user virtual address at that instant.
447 *
448 * This does not guarantee that the page exists in the user mappings when
449 * __get_user_pages returns, and there may even be a completely different
450 * page there in some cases (eg. if mmapped pagecache has been invalidated
451 * and subsequently re faulted). However it does guarantee that the page
452 * won't be freed completely. And mostly callers simply care that the page
453 * contains data that was valid *at some point in time*. Typically, an IO
454 * or similar operation cannot guarantee anything stronger anyway because
455 * locks can't be held over the syscall boundary.
456 *
457 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
458 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
459 * appropriate) must be called after the page is finished with, and
460 * before put_page is called.
461 *
462 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
463 * or mmap_sem contention, and if waiting is needed to pin all pages,
9a95f3cf
PC
464 * *@nonblocking will be set to 0. Further, if @gup_flags does not
465 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
466 * this case.
467 *
468 * A caller using such a combination of @nonblocking and @gup_flags
469 * must therefore hold the mmap_sem for reading only, and recognize
470 * when it's been released. Otherwise, it must be held for either
471 * reading or writing and will not be released.
4bbd4c77
KS
472 *
473 * In most cases, get_user_pages or get_user_pages_fast should be used
474 * instead of __get_user_pages. __get_user_pages should be used only if
475 * you need some special @gup_flags.
476 */
477long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
478 unsigned long start, unsigned long nr_pages,
479 unsigned int gup_flags, struct page **pages,
480 struct vm_area_struct **vmas, int *nonblocking)
481{
fa5bb209 482 long i = 0;
4bbd4c77 483 unsigned int page_mask;
fa5bb209 484 struct vm_area_struct *vma = NULL;
4bbd4c77
KS
485
486 if (!nr_pages)
487 return 0;
488
489 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
490
491 /*
492 * If FOLL_FORCE is set then do not force a full fault as the hinting
493 * fault information is unrelated to the reference behaviour of a task
494 * using the address space
495 */
496 if (!(gup_flags & FOLL_FORCE))
497 gup_flags |= FOLL_NUMA;
498
4bbd4c77 499 do {
fa5bb209
KS
500 struct page *page;
501 unsigned int foll_flags = gup_flags;
502 unsigned int page_increm;
503
504 /* first iteration or cross vma bound */
505 if (!vma || start >= vma->vm_end) {
506 vma = find_extend_vma(mm, start);
507 if (!vma && in_gate_area(mm, start)) {
508 int ret;
509 ret = get_gate_page(mm, start & PAGE_MASK,
510 gup_flags, &vma,
511 pages ? &pages[i] : NULL);
512 if (ret)
513 return i ? : ret;
514 page_mask = 0;
515 goto next_page;
516 }
4bbd4c77 517
fa5bb209
KS
518 if (!vma || check_vma_flags(vma, gup_flags))
519 return i ? : -EFAULT;
520 if (is_vm_hugetlb_page(vma)) {
521 i = follow_hugetlb_page(mm, vma, pages, vmas,
522 &start, &nr_pages, i,
523 gup_flags);
524 continue;
4bbd4c77 525 }
fa5bb209
KS
526 }
527retry:
528 /*
529 * If we have a pending SIGKILL, don't keep faulting pages and
530 * potentially allocating memory.
531 */
532 if (unlikely(fatal_signal_pending(current)))
533 return i ? i : -ERESTARTSYS;
534 cond_resched();
535 page = follow_page_mask(vma, start, foll_flags, &page_mask);
536 if (!page) {
537 int ret;
538 ret = faultin_page(tsk, vma, start, &foll_flags,
539 nonblocking);
540 switch (ret) {
541 case 0:
542 goto retry;
543 case -EFAULT:
544 case -ENOMEM:
545 case -EHWPOISON:
546 return i ? i : ret;
547 case -EBUSY:
548 return i;
549 case -ENOENT:
550 goto next_page;
4bbd4c77 551 }
fa5bb209 552 BUG();
1027e443
KS
553 } else if (PTR_ERR(page) == -EEXIST) {
554 /*
555 * Proper page table entry exists, but no corresponding
556 * struct page.
557 */
558 goto next_page;
559 } else if (IS_ERR(page)) {
fa5bb209 560 return i ? i : PTR_ERR(page);
1027e443 561 }
fa5bb209
KS
562 if (pages) {
563 pages[i] = page;
564 flush_anon_page(vma, page, start);
565 flush_dcache_page(page);
566 page_mask = 0;
4bbd4c77 567 }
4bbd4c77 568next_page:
fa5bb209
KS
569 if (vmas) {
570 vmas[i] = vma;
571 page_mask = 0;
572 }
573 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
574 if (page_increm > nr_pages)
575 page_increm = nr_pages;
576 i += page_increm;
577 start += page_increm * PAGE_SIZE;
578 nr_pages -= page_increm;
4bbd4c77
KS
579 } while (nr_pages);
580 return i;
4bbd4c77
KS
581}
582EXPORT_SYMBOL(__get_user_pages);
583
584/*
585 * fixup_user_fault() - manually resolve a user page fault
586 * @tsk: the task_struct to use for page fault accounting, or
587 * NULL if faults are not to be recorded.
588 * @mm: mm_struct of target mm
589 * @address: user address
590 * @fault_flags:flags to pass down to handle_mm_fault()
591 *
592 * This is meant to be called in the specific scenario where for locking reasons
593 * we try to access user memory in atomic context (within a pagefault_disable()
594 * section), this returns -EFAULT, and we want to resolve the user fault before
595 * trying again.
596 *
597 * Typically this is meant to be used by the futex code.
598 *
599 * The main difference with get_user_pages() is that this function will
600 * unconditionally call handle_mm_fault() which will in turn perform all the
601 * necessary SW fixup of the dirty and young bits in the PTE, while
602 * handle_mm_fault() only guarantees to update these in the struct page.
603 *
604 * This is important for some architectures where those bits also gate the
605 * access permission to the page because they are maintained in software. On
606 * such architectures, gup() will not be enough to make a subsequent access
607 * succeed.
608 *
9a95f3cf 609 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
4bbd4c77
KS
610 */
611int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
612 unsigned long address, unsigned int fault_flags)
613{
614 struct vm_area_struct *vma;
615 vm_flags_t vm_flags;
616 int ret;
617
618 vma = find_extend_vma(mm, address);
619 if (!vma || address < vma->vm_start)
620 return -EFAULT;
621
622 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
623 if (!(vm_flags & vma->vm_flags))
624 return -EFAULT;
625
626 ret = handle_mm_fault(mm, vma, address, fault_flags);
627 if (ret & VM_FAULT_ERROR) {
628 if (ret & VM_FAULT_OOM)
629 return -ENOMEM;
630 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
631 return -EHWPOISON;
33692f27 632 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4bbd4c77
KS
633 return -EFAULT;
634 BUG();
635 }
636 if (tsk) {
637 if (ret & VM_FAULT_MAJOR)
638 tsk->maj_flt++;
639 else
640 tsk->min_flt++;
641 }
642 return 0;
643}
644
f0818f47
AA
645static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
646 struct mm_struct *mm,
647 unsigned long start,
648 unsigned long nr_pages,
649 int write, int force,
650 struct page **pages,
651 struct vm_area_struct **vmas,
0fd71a56
AA
652 int *locked, bool notify_drop,
653 unsigned int flags)
f0818f47 654{
f0818f47
AA
655 long ret, pages_done;
656 bool lock_dropped;
657
658 if (locked) {
659 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
660 BUG_ON(vmas);
661 /* check caller initialized locked */
662 BUG_ON(*locked != 1);
663 }
664
665 if (pages)
666 flags |= FOLL_GET;
667 if (write)
668 flags |= FOLL_WRITE;
669 if (force)
670 flags |= FOLL_FORCE;
671
672 pages_done = 0;
673 lock_dropped = false;
674 for (;;) {
675 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
676 vmas, locked);
677 if (!locked)
678 /* VM_FAULT_RETRY couldn't trigger, bypass */
679 return ret;
680
681 /* VM_FAULT_RETRY cannot return errors */
682 if (!*locked) {
683 BUG_ON(ret < 0);
684 BUG_ON(ret >= nr_pages);
685 }
686
687 if (!pages)
688 /* If it's a prefault don't insist harder */
689 return ret;
690
691 if (ret > 0) {
692 nr_pages -= ret;
693 pages_done += ret;
694 if (!nr_pages)
695 break;
696 }
697 if (*locked) {
698 /* VM_FAULT_RETRY didn't trigger */
699 if (!pages_done)
700 pages_done = ret;
701 break;
702 }
703 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
704 pages += ret;
705 start += ret << PAGE_SHIFT;
706
707 /*
708 * Repeat on the address that fired VM_FAULT_RETRY
709 * without FAULT_FLAG_ALLOW_RETRY but with
710 * FAULT_FLAG_TRIED.
711 */
712 *locked = 1;
713 lock_dropped = true;
714 down_read(&mm->mmap_sem);
715 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
716 pages, NULL, NULL);
717 if (ret != 1) {
718 BUG_ON(ret > 1);
719 if (!pages_done)
720 pages_done = ret;
721 break;
722 }
723 nr_pages--;
724 pages_done++;
725 if (!nr_pages)
726 break;
727 pages++;
728 start += PAGE_SIZE;
729 }
730 if (notify_drop && lock_dropped && *locked) {
731 /*
732 * We must let the caller know we temporarily dropped the lock
733 * and so the critical section protected by it was lost.
734 */
735 up_read(&mm->mmap_sem);
736 *locked = 0;
737 }
738 return pages_done;
739}
740
741/*
742 * We can leverage the VM_FAULT_RETRY functionality in the page fault
743 * paths better by using either get_user_pages_locked() or
744 * get_user_pages_unlocked().
745 *
746 * get_user_pages_locked() is suitable to replace the form:
747 *
748 * down_read(&mm->mmap_sem);
749 * do_something()
750 * get_user_pages(tsk, mm, ..., pages, NULL);
751 * up_read(&mm->mmap_sem);
752 *
753 * to:
754 *
755 * int locked = 1;
756 * down_read(&mm->mmap_sem);
757 * do_something()
758 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
759 * if (locked)
760 * up_read(&mm->mmap_sem);
761 */
762long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
763 unsigned long start, unsigned long nr_pages,
764 int write, int force, struct page **pages,
765 int *locked)
766{
767 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
0fd71a56 768 pages, NULL, locked, true, FOLL_TOUCH);
f0818f47
AA
769}
770EXPORT_SYMBOL(get_user_pages_locked);
771
0fd71a56
AA
772/*
773 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
774 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
775 *
776 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
777 * caller if required (just like with __get_user_pages). "FOLL_GET",
778 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
779 * according to the parameters "pages", "write", "force"
780 * respectively.
781 */
782__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
783 unsigned long start, unsigned long nr_pages,
784 int write, int force, struct page **pages,
785 unsigned int gup_flags)
786{
787 long ret;
788 int locked = 1;
789 down_read(&mm->mmap_sem);
790 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
791 pages, NULL, &locked, false, gup_flags);
792 if (locked)
793 up_read(&mm->mmap_sem);
794 return ret;
795}
796EXPORT_SYMBOL(__get_user_pages_unlocked);
797
f0818f47
AA
798/*
799 * get_user_pages_unlocked() is suitable to replace the form:
800 *
801 * down_read(&mm->mmap_sem);
802 * get_user_pages(tsk, mm, ..., pages, NULL);
803 * up_read(&mm->mmap_sem);
804 *
805 * with:
806 *
807 * get_user_pages_unlocked(tsk, mm, ..., pages);
808 *
809 * It is functionally equivalent to get_user_pages_fast so
810 * get_user_pages_fast should be used instead, if the two parameters
811 * "tsk" and "mm" are respectively equal to current and current->mm,
812 * or if "force" shall be set to 1 (get_user_pages_fast misses the
813 * "force" parameter).
814 */
815long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
816 unsigned long start, unsigned long nr_pages,
817 int write, int force, struct page **pages)
818{
0fd71a56
AA
819 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
820 force, pages, FOLL_TOUCH);
f0818f47
AA
821}
822EXPORT_SYMBOL(get_user_pages_unlocked);
823
4bbd4c77
KS
824/*
825 * get_user_pages() - pin user pages in memory
826 * @tsk: the task_struct to use for page fault accounting, or
827 * NULL if faults are not to be recorded.
828 * @mm: mm_struct of target mm
829 * @start: starting user address
830 * @nr_pages: number of pages from start to pin
831 * @write: whether pages will be written to by the caller
832 * @force: whether to force access even when user mapping is currently
833 * protected (but never forces write access to shared mapping).
834 * @pages: array that receives pointers to the pages pinned.
835 * Should be at least nr_pages long. Or NULL, if caller
836 * only intends to ensure the pages are faulted in.
837 * @vmas: array of pointers to vmas corresponding to each page.
838 * Or NULL if the caller does not require them.
839 *
840 * Returns number of pages pinned. This may be fewer than the number
841 * requested. If nr_pages is 0 or negative, returns 0. If no pages
842 * were pinned, returns -errno. Each page returned must be released
843 * with a put_page() call when it is finished with. vmas will only
844 * remain valid while mmap_sem is held.
845 *
846 * Must be called with mmap_sem held for read or write.
847 *
848 * get_user_pages walks a process's page tables and takes a reference to
849 * each struct page that each user address corresponds to at a given
850 * instant. That is, it takes the page that would be accessed if a user
851 * thread accesses the given user virtual address at that instant.
852 *
853 * This does not guarantee that the page exists in the user mappings when
854 * get_user_pages returns, and there may even be a completely different
855 * page there in some cases (eg. if mmapped pagecache has been invalidated
856 * and subsequently re faulted). However it does guarantee that the page
857 * won't be freed completely. And mostly callers simply care that the page
858 * contains data that was valid *at some point in time*. Typically, an IO
859 * or similar operation cannot guarantee anything stronger anyway because
860 * locks can't be held over the syscall boundary.
861 *
862 * If write=0, the page must not be written to. If the page is written to,
863 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
864 * after the page is finished with, and before put_page is called.
865 *
866 * get_user_pages is typically used for fewer-copy IO operations, to get a
867 * handle on the memory by some means other than accesses via the user virtual
868 * addresses. The pages may be submitted for DMA to devices or accessed via
869 * their kernel linear mapping (via the kmap APIs). Care should be taken to
870 * use the correct cache flushing APIs.
871 *
872 * See also get_user_pages_fast, for performance critical applications.
f0818f47
AA
873 *
874 * get_user_pages should be phased out in favor of
875 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
876 * should use get_user_pages because it cannot pass
877 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
4bbd4c77
KS
878 */
879long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
880 unsigned long start, unsigned long nr_pages, int write,
881 int force, struct page **pages, struct vm_area_struct **vmas)
882{
f0818f47 883 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
0fd71a56 884 pages, vmas, NULL, false, FOLL_TOUCH);
4bbd4c77
KS
885}
886EXPORT_SYMBOL(get_user_pages);
887
acc3c8d1
KS
888/**
889 * populate_vma_page_range() - populate a range of pages in the vma.
890 * @vma: target vma
891 * @start: start address
892 * @end: end address
893 * @nonblocking:
894 *
895 * This takes care of mlocking the pages too if VM_LOCKED is set.
896 *
897 * return 0 on success, negative error code on error.
898 *
899 * vma->vm_mm->mmap_sem must be held.
900 *
901 * If @nonblocking is NULL, it may be held for read or write and will
902 * be unperturbed.
903 *
904 * If @nonblocking is non-NULL, it must held for read only and may be
905 * released. If it's released, *@nonblocking will be set to 0.
906 */
907long populate_vma_page_range(struct vm_area_struct *vma,
908 unsigned long start, unsigned long end, int *nonblocking)
909{
910 struct mm_struct *mm = vma->vm_mm;
911 unsigned long nr_pages = (end - start) / PAGE_SIZE;
912 int gup_flags;
913
914 VM_BUG_ON(start & ~PAGE_MASK);
915 VM_BUG_ON(end & ~PAGE_MASK);
916 VM_BUG_ON_VMA(start < vma->vm_start, vma);
917 VM_BUG_ON_VMA(end > vma->vm_end, vma);
918 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
919
de60f5f1
EM
920 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
921 if (vma->vm_flags & VM_LOCKONFAULT)
922 gup_flags &= ~FOLL_POPULATE;
7479df6d
KS
923 if (vma->vm_flags & VM_LOCKED)
924 gup_flags |= FOLL_SPLIT;
acc3c8d1
KS
925 /*
926 * We want to touch writable mappings with a write fault in order
927 * to break COW, except for shared mappings because these don't COW
928 * and we would not want to dirty them for nothing.
929 */
930 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
931 gup_flags |= FOLL_WRITE;
932
933 /*
934 * We want mlock to succeed for regions that have any permissions
935 * other than PROT_NONE.
936 */
937 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
938 gup_flags |= FOLL_FORCE;
939
940 /*
941 * We made sure addr is within a VMA, so the following will
942 * not result in a stack expansion that recurses back here.
943 */
944 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
945 NULL, NULL, nonblocking);
946}
947
948/*
949 * __mm_populate - populate and/or mlock pages within a range of address space.
950 *
951 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
952 * flags. VMAs must be already marked with the desired vm_flags, and
953 * mmap_sem must not be held.
954 */
955int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
956{
957 struct mm_struct *mm = current->mm;
958 unsigned long end, nstart, nend;
959 struct vm_area_struct *vma = NULL;
960 int locked = 0;
961 long ret = 0;
962
963 VM_BUG_ON(start & ~PAGE_MASK);
964 VM_BUG_ON(len != PAGE_ALIGN(len));
965 end = start + len;
966
967 for (nstart = start; nstart < end; nstart = nend) {
968 /*
969 * We want to fault in pages for [nstart; end) address range.
970 * Find first corresponding VMA.
971 */
972 if (!locked) {
973 locked = 1;
974 down_read(&mm->mmap_sem);
975 vma = find_vma(mm, nstart);
976 } else if (nstart >= vma->vm_end)
977 vma = vma->vm_next;
978 if (!vma || vma->vm_start >= end)
979 break;
980 /*
981 * Set [nstart; nend) to intersection of desired address
982 * range with the first VMA. Also, skip undesirable VMA types.
983 */
984 nend = min(end, vma->vm_end);
985 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
986 continue;
987 if (nstart < vma->vm_start)
988 nstart = vma->vm_start;
989 /*
990 * Now fault in a range of pages. populate_vma_page_range()
991 * double checks the vma flags, so that it won't mlock pages
992 * if the vma was already munlocked.
993 */
994 ret = populate_vma_page_range(vma, nstart, nend, &locked);
995 if (ret < 0) {
996 if (ignore_errors) {
997 ret = 0;
998 continue; /* continue at next VMA */
999 }
1000 break;
1001 }
1002 nend = nstart + ret * PAGE_SIZE;
1003 ret = 0;
1004 }
1005 if (locked)
1006 up_read(&mm->mmap_sem);
1007 return ret; /* 0 or negative error code */
1008}
1009
4bbd4c77
KS
1010/**
1011 * get_dump_page() - pin user page in memory while writing it to core dump
1012 * @addr: user address
1013 *
1014 * Returns struct page pointer of user page pinned for dump,
1015 * to be freed afterwards by page_cache_release() or put_page().
1016 *
1017 * Returns NULL on any kind of failure - a hole must then be inserted into
1018 * the corefile, to preserve alignment with its headers; and also returns
1019 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1020 * allowing a hole to be left in the corefile to save diskspace.
1021 *
1022 * Called without mmap_sem, but after all other threads have been killed.
1023 */
1024#ifdef CONFIG_ELF_CORE
1025struct page *get_dump_page(unsigned long addr)
1026{
1027 struct vm_area_struct *vma;
1028 struct page *page;
1029
1030 if (__get_user_pages(current, current->mm, addr, 1,
1031 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1032 NULL) < 1)
1033 return NULL;
1034 flush_cache_page(vma, addr, page_to_pfn(page));
1035 return page;
1036}
1037#endif /* CONFIG_ELF_CORE */
2667f50e
SC
1038
1039/*
1040 * Generic RCU Fast GUP
1041 *
1042 * get_user_pages_fast attempts to pin user pages by walking the page
1043 * tables directly and avoids taking locks. Thus the walker needs to be
1044 * protected from page table pages being freed from under it, and should
1045 * block any THP splits.
1046 *
1047 * One way to achieve this is to have the walker disable interrupts, and
1048 * rely on IPIs from the TLB flushing code blocking before the page table
1049 * pages are freed. This is unsuitable for architectures that do not need
1050 * to broadcast an IPI when invalidating TLBs.
1051 *
1052 * Another way to achieve this is to batch up page table containing pages
1053 * belonging to more than one mm_user, then rcu_sched a callback to free those
1054 * pages. Disabling interrupts will allow the fast_gup walker to both block
1055 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1056 * (which is a relatively rare event). The code below adopts this strategy.
1057 *
1058 * Before activating this code, please be aware that the following assumptions
1059 * are currently made:
1060 *
1061 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1062 * pages containing page tables.
1063 *
2667f50e
SC
1064 * *) ptes can be read atomically by the architecture.
1065 *
1066 * *) access_ok is sufficient to validate userspace address ranges.
1067 *
1068 * The last two assumptions can be relaxed by the addition of helper functions.
1069 *
1070 * This code is based heavily on the PowerPC implementation by Nick Piggin.
1071 */
1072#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
1073
1074#ifdef __HAVE_ARCH_PTE_SPECIAL
1075static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1076 int write, struct page **pages, int *nr)
1077{
1078 pte_t *ptep, *ptem;
1079 int ret = 0;
1080
1081 ptem = ptep = pte_offset_map(&pmd, addr);
1082 do {
1083 /*
1084 * In the line below we are assuming that the pte can be read
1085 * atomically. If this is not the case for your architecture,
1086 * please wrap this in a helper function!
1087 *
1088 * for an example see gup_get_pte in arch/x86/mm/gup.c
1089 */
9d8c47e4 1090 pte_t pte = READ_ONCE(*ptep);
7aef4172 1091 struct page *head, *page;
2667f50e
SC
1092
1093 /*
1094 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed 1095 * path using the pte_protnone check.
2667f50e
SC
1096 */
1097 if (!pte_present(pte) || pte_special(pte) ||
8a0516ed 1098 pte_protnone(pte) || (write && !pte_write(pte)))
2667f50e
SC
1099 goto pte_unmap;
1100
1101 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1102 page = pte_page(pte);
7aef4172 1103 head = compound_head(page);
2667f50e 1104
7aef4172 1105 if (!page_cache_get_speculative(head))
2667f50e
SC
1106 goto pte_unmap;
1107
1108 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
7aef4172 1109 put_page(head);
2667f50e
SC
1110 goto pte_unmap;
1111 }
1112
7aef4172 1113 VM_BUG_ON_PAGE(compound_head(page) != head, page);
2667f50e
SC
1114 pages[*nr] = page;
1115 (*nr)++;
1116
1117 } while (ptep++, addr += PAGE_SIZE, addr != end);
1118
1119 ret = 1;
1120
1121pte_unmap:
1122 pte_unmap(ptem);
1123 return ret;
1124}
1125#else
1126
1127/*
1128 * If we can't determine whether or not a pte is special, then fail immediately
1129 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1130 * to be special.
1131 *
1132 * For a futex to be placed on a THP tail page, get_futex_key requires a
1133 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1134 * useful to have gup_huge_pmd even if we can't operate on ptes.
1135 */
1136static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1137 int write, struct page **pages, int *nr)
1138{
1139 return 0;
1140}
1141#endif /* __HAVE_ARCH_PTE_SPECIAL */
1142
1143static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1144 unsigned long end, int write, struct page **pages, int *nr)
1145{
ddc58f27 1146 struct page *head, *page;
2667f50e
SC
1147 int refs;
1148
1149 if (write && !pmd_write(orig))
1150 return 0;
1151
1152 refs = 0;
1153 head = pmd_page(orig);
1154 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2667f50e
SC
1155 do {
1156 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1157 pages[*nr] = page;
1158 (*nr)++;
1159 page++;
1160 refs++;
1161 } while (addr += PAGE_SIZE, addr != end);
1162
1163 if (!page_cache_add_speculative(head, refs)) {
1164 *nr -= refs;
1165 return 0;
1166 }
1167
1168 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1169 *nr -= refs;
1170 while (refs--)
1171 put_page(head);
1172 return 0;
1173 }
1174
2667f50e
SC
1175 return 1;
1176}
1177
1178static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1179 unsigned long end, int write, struct page **pages, int *nr)
1180{
ddc58f27 1181 struct page *head, *page;
2667f50e
SC
1182 int refs;
1183
1184 if (write && !pud_write(orig))
1185 return 0;
1186
1187 refs = 0;
1188 head = pud_page(orig);
1189 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2667f50e
SC
1190 do {
1191 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1192 pages[*nr] = page;
1193 (*nr)++;
1194 page++;
1195 refs++;
1196 } while (addr += PAGE_SIZE, addr != end);
1197
1198 if (!page_cache_add_speculative(head, refs)) {
1199 *nr -= refs;
1200 return 0;
1201 }
1202
1203 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1204 *nr -= refs;
1205 while (refs--)
1206 put_page(head);
1207 return 0;
1208 }
1209
2667f50e
SC
1210 return 1;
1211}
1212
f30c59e9
AK
1213static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1214 unsigned long end, int write,
1215 struct page **pages, int *nr)
1216{
1217 int refs;
ddc58f27 1218 struct page *head, *page;
f30c59e9
AK
1219
1220 if (write && !pgd_write(orig))
1221 return 0;
1222
1223 refs = 0;
1224 head = pgd_page(orig);
1225 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
f30c59e9
AK
1226 do {
1227 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1228 pages[*nr] = page;
1229 (*nr)++;
1230 page++;
1231 refs++;
1232 } while (addr += PAGE_SIZE, addr != end);
1233
1234 if (!page_cache_add_speculative(head, refs)) {
1235 *nr -= refs;
1236 return 0;
1237 }
1238
1239 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1240 *nr -= refs;
1241 while (refs--)
1242 put_page(head);
1243 return 0;
1244 }
1245
f30c59e9
AK
1246 return 1;
1247}
1248
2667f50e
SC
1249static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1250 int write, struct page **pages, int *nr)
1251{
1252 unsigned long next;
1253 pmd_t *pmdp;
1254
1255 pmdp = pmd_offset(&pud, addr);
1256 do {
38c5ce93 1257 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
1258
1259 next = pmd_addr_end(addr, end);
4b471e88 1260 if (pmd_none(pmd))
2667f50e
SC
1261 return 0;
1262
1263 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1264 /*
1265 * NUMA hinting faults need to be handled in the GUP
1266 * slowpath for accounting purposes and so that they
1267 * can be serialised against THP migration.
1268 */
8a0516ed 1269 if (pmd_protnone(pmd))
2667f50e
SC
1270 return 0;
1271
1272 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1273 pages, nr))
1274 return 0;
1275
f30c59e9
AK
1276 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1277 /*
1278 * architecture have different format for hugetlbfs
1279 * pmd format and THP pmd format
1280 */
1281 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1282 PMD_SHIFT, next, write, pages, nr))
1283 return 0;
2667f50e
SC
1284 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1285 return 0;
1286 } while (pmdp++, addr = next, addr != end);
1287
1288 return 1;
1289}
1290
f30c59e9
AK
1291static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1292 int write, struct page **pages, int *nr)
2667f50e
SC
1293{
1294 unsigned long next;
1295 pud_t *pudp;
1296
f30c59e9 1297 pudp = pud_offset(&pgd, addr);
2667f50e 1298 do {
e37c6982 1299 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
1300
1301 next = pud_addr_end(addr, end);
1302 if (pud_none(pud))
1303 return 0;
f30c59e9 1304 if (unlikely(pud_huge(pud))) {
2667f50e 1305 if (!gup_huge_pud(pud, pudp, addr, next, write,
f30c59e9
AK
1306 pages, nr))
1307 return 0;
1308 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1309 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1310 PUD_SHIFT, next, write, pages, nr))
2667f50e
SC
1311 return 0;
1312 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1313 return 0;
1314 } while (pudp++, addr = next, addr != end);
1315
1316 return 1;
1317}
1318
1319/*
1320 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1321 * the regular GUP. It will only return non-negative values.
1322 */
1323int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1324 struct page **pages)
1325{
1326 struct mm_struct *mm = current->mm;
1327 unsigned long addr, len, end;
1328 unsigned long next, flags;
1329 pgd_t *pgdp;
1330 int nr = 0;
1331
1332 start &= PAGE_MASK;
1333 addr = start;
1334 len = (unsigned long) nr_pages << PAGE_SHIFT;
1335 end = start + len;
1336
1337 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1338 start, len)))
1339 return 0;
1340
1341 /*
1342 * Disable interrupts. We use the nested form as we can already have
1343 * interrupts disabled by get_futex_key.
1344 *
1345 * With interrupts disabled, we block page table pages from being
1346 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1347 * for more details.
1348 *
1349 * We do not adopt an rcu_read_lock(.) here as we also want to
1350 * block IPIs that come from THPs splitting.
1351 */
1352
1353 local_irq_save(flags);
1354 pgdp = pgd_offset(mm, addr);
1355 do {
9d8c47e4 1356 pgd_t pgd = READ_ONCE(*pgdp);
f30c59e9 1357
2667f50e 1358 next = pgd_addr_end(addr, end);
f30c59e9 1359 if (pgd_none(pgd))
2667f50e 1360 break;
f30c59e9
AK
1361 if (unlikely(pgd_huge(pgd))) {
1362 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1363 pages, &nr))
1364 break;
1365 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1366 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1367 PGDIR_SHIFT, next, write, pages, &nr))
1368 break;
1369 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
2667f50e
SC
1370 break;
1371 } while (pgdp++, addr = next, addr != end);
1372 local_irq_restore(flags);
1373
1374 return nr;
1375}
1376
1377/**
1378 * get_user_pages_fast() - pin user pages in memory
1379 * @start: starting user address
1380 * @nr_pages: number of pages from start to pin
1381 * @write: whether pages will be written to
1382 * @pages: array that receives pointers to the pages pinned.
1383 * Should be at least nr_pages long.
1384 *
1385 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1386 * If not successful, it will fall back to taking the lock and
1387 * calling get_user_pages().
1388 *
1389 * Returns number of pages pinned. This may be fewer than the number
1390 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1391 * were pinned, returns -errno.
1392 */
1393int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1394 struct page **pages)
1395{
1396 struct mm_struct *mm = current->mm;
1397 int nr, ret;
1398
1399 start &= PAGE_MASK;
1400 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1401 ret = nr;
1402
1403 if (nr < nr_pages) {
1404 /* Try to get the remaining pages with get_user_pages */
1405 start += nr << PAGE_SHIFT;
1406 pages += nr;
1407
a7b78075
AA
1408 ret = get_user_pages_unlocked(current, mm, start,
1409 nr_pages - nr, write, 0, pages);
2667f50e
SC
1410
1411 /* Have to be a bit careful with return values */
1412 if (nr > 0) {
1413 if (ret < 0)
1414 ret = nr;
1415 else
1416 ret += nr;
1417 }
1418 }
1419
1420 return ret;
1421}
1422
1423#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
This page took 0.166749 seconds and 5 git commands to generate.