4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK
))
28 if (current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
!= 0)
32 EXPORT_SYMBOL(can_do_mlock
);
34 #ifdef CONFIG_UNEVICTABLE_LRU
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
53 * LRU accounting for clear_page_mlock()
55 void __clear_page_mlock(struct page
*page
)
57 VM_BUG_ON(!PageLocked(page
));
59 if (!page
->mapping
) { /* truncated ? */
63 dec_zone_page_state(page
, NR_MLOCK
);
64 count_vm_event(UNEVICTABLE_PGCLEARED
);
65 if (!isolate_lru_page(page
)) {
66 putback_lru_page(page
);
69 * Page not on the LRU yet. Flush all pagevecs and retry.
72 if (!isolate_lru_page(page
))
73 putback_lru_page(page
);
74 else if (PageUnevictable(page
))
75 count_vm_event(UNEVICTABLE_PGSTRANDED
);
81 * Mark page as mlocked if not already.
82 * If page on LRU, isolate and putback to move to unevictable list.
84 void mlock_vma_page(struct page
*page
)
86 BUG_ON(!PageLocked(page
));
88 if (!TestSetPageMlocked(page
)) {
89 inc_zone_page_state(page
, NR_MLOCK
);
90 count_vm_event(UNEVICTABLE_PGMLOCKED
);
91 if (!isolate_lru_page(page
))
92 putback_lru_page(page
);
97 * called from munlock()/munmap() path with page supposedly on the LRU.
99 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
100 * [in try_to_munlock()] and then attempt to isolate the page. We must
101 * isolate the page to keep others from messing with its unevictable
102 * and mlocked state while trying to munlock. However, we pre-clear the
103 * mlocked state anyway as we might lose the isolation race and we might
104 * not get another chance to clear PageMlocked. If we successfully
105 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
106 * mapping the page, it will restore the PageMlocked state, unless the page
107 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
108 * perhaps redundantly.
109 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
110 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
111 * either of which will restore the PageMlocked state by calling
112 * mlock_vma_page() above, if it can grab the vma's mmap sem.
114 static void munlock_vma_page(struct page
*page
)
116 BUG_ON(!PageLocked(page
));
118 if (TestClearPageMlocked(page
)) {
119 dec_zone_page_state(page
, NR_MLOCK
);
120 if (!isolate_lru_page(page
)) {
121 int ret
= try_to_munlock(page
);
123 * did try_to_unlock() succeed or punt?
125 if (ret
== SWAP_SUCCESS
|| ret
== SWAP_AGAIN
)
126 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
128 putback_lru_page(page
);
131 * We lost the race. let try_to_unmap() deal
132 * with it. At least we get the page state and
133 * mlock stats right. However, page is still on
134 * the noreclaim list. We'll fix that up when
135 * the page is eventually freed or we scan the
138 if (PageUnevictable(page
))
139 count_vm_event(UNEVICTABLE_PGSTRANDED
);
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED
);
147 * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
149 * @start: start address
151 * @mlock: 0 indicate munlock, otherwise mlock.
153 * If @mlock == 0, unlock an mlocked range;
154 * else mlock the range of pages. This takes care of making the pages present ,
157 * return 0 on success, negative error code on error.
159 * vma->vm_mm->mmap_sem must be held for at least read.
161 static long __mlock_vma_pages_range(struct vm_area_struct
*vma
,
162 unsigned long start
, unsigned long end
,
165 struct mm_struct
*mm
= vma
->vm_mm
;
166 unsigned long addr
= start
;
167 struct page
*pages
[16]; /* 16 gives a reasonable batch */
168 int nr_pages
= (end
- start
) / PAGE_SIZE
;
172 VM_BUG_ON(start
& ~PAGE_MASK
);
173 VM_BUG_ON(end
& ~PAGE_MASK
);
174 VM_BUG_ON(start
< vma
->vm_start
);
175 VM_BUG_ON(end
> vma
->vm_end
);
176 VM_BUG_ON((!rwsem_is_locked(&mm
->mmap_sem
)) &&
177 (atomic_read(&mm
->mm_users
) != 0));
180 * mlock: don't page populate if page has PROT_NONE permission.
181 * munlock: the pages always do munlock althrough
182 * its has PROT_NONE permission.
185 gup_flags
|= GUP_FLAGS_IGNORE_VMA_PERMISSIONS
;
187 if (vma
->vm_flags
& VM_WRITE
)
188 gup_flags
|= GUP_FLAGS_WRITE
;
190 lru_add_drain_all(); /* push cached pages to LRU */
192 while (nr_pages
> 0) {
198 * get_user_pages makes pages present if we are
199 * setting mlock. and this extra reference count will
200 * disable migration of this page. However, page may
201 * still be truncated out from under us.
203 ret
= __get_user_pages(current
, mm
, addr
,
204 min_t(int, nr_pages
, ARRAY_SIZE(pages
)),
205 gup_flags
, pages
, NULL
);
207 * This can happen for, e.g., VM_NONLINEAR regions before
208 * a page has been allocated and mapped at a given offset,
209 * or for addresses that map beyond end of a file.
210 * We'll mlock the the pages if/when they get faulted in.
216 * We know the vma is there, so the only time
217 * we cannot get a single page should be an
218 * error (ret < 0) case.
224 lru_add_drain(); /* push cached pages to LRU */
226 for (i
= 0; i
< ret
; i
++) {
227 struct page
*page
= pages
[i
];
231 * Because we lock page here and migration is blocked
232 * by the elevated reference, we need only check for
233 * page truncation (file-cache only).
237 mlock_vma_page(page
);
239 munlock_vma_page(page
);
242 put_page(page
); /* ref from get_user_pages() */
245 * here we assume that get_user_pages() has given us
246 * a list of virtually contiguous pages.
248 addr
+= PAGE_SIZE
; /* for next get_user_pages() */
253 lru_add_drain_all(); /* to update stats */
255 return 0; /* count entire vma as locked_vm */
258 #else /* CONFIG_UNEVICTABLE_LRU */
261 * Just make pages present if VM_LOCKED. No-op if unlocking.
263 static long __mlock_vma_pages_range(struct vm_area_struct
*vma
,
264 unsigned long start
, unsigned long end
,
267 if (mlock
&& (vma
->vm_flags
& VM_LOCKED
))
268 make_pages_present(start
, end
);
271 #endif /* CONFIG_UNEVICTABLE_LRU */
274 * mlock_vma_pages_range() - mlock pages in specified vma range.
275 * @vma - the vma containing the specfied address range
276 * @start - starting address in @vma to mlock
277 * @end - end address [+1] in @vma to mlock
279 * For mmap()/mremap()/expansion of mlocked vma.
281 * return 0 on success for "normal" vmas.
283 * return number of pages [> 0] to be removed from locked_vm on success
286 * return negative error if vma spanning @start-@range disappears while
287 * mmap semaphore is dropped. Unlikely?
289 long mlock_vma_pages_range(struct vm_area_struct
*vma
,
290 unsigned long start
, unsigned long end
)
292 struct mm_struct
*mm
= vma
->vm_mm
;
293 int nr_pages
= (end
- start
) / PAGE_SIZE
;
294 BUG_ON(!(vma
->vm_flags
& VM_LOCKED
));
297 * filter unlockable vmas
299 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
302 if (!((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
303 is_vm_hugetlb_page(vma
) ||
304 vma
== get_gate_vma(current
))) {
306 downgrade_write(&mm
->mmap_sem
);
308 error
= __mlock_vma_pages_range(vma
, start
, end
, 1);
310 up_read(&mm
->mmap_sem
);
311 /* vma can change or disappear */
312 down_write(&mm
->mmap_sem
);
313 vma
= find_vma(mm
, start
);
314 /* non-NULL vma must contain @start, but need to check @end */
315 if (!vma
|| end
> vma
->vm_end
)
318 return 0; /* hide other errors from mmap(), et al */
322 * User mapped kernel pages or huge pages:
323 * make these pages present to populate the ptes, but
324 * fall thru' to reset VM_LOCKED--no need to unlock, and
325 * return nr_pages so these don't get counted against task's
326 * locked limit. huge pages are already counted against
329 make_pages_present(start
, end
);
332 vma
->vm_flags
&= ~VM_LOCKED
; /* and don't come back! */
333 return nr_pages
; /* error or pages NOT mlocked */
338 * munlock_vma_pages_range() - munlock all pages in the vma range.'
339 * @vma - vma containing range to be munlock()ed.
340 * @start - start address in @vma of the range
341 * @end - end of range in @vma.
343 * For mremap(), munmap() and exit().
345 * Called with @vma VM_LOCKED.
347 * Returns with VM_LOCKED cleared. Callers must be prepared to
350 * We don't save and restore VM_LOCKED here because pages are
351 * still on lru. In unmap path, pages might be scanned by reclaim
352 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
353 * free them. This will result in freeing mlocked pages.
355 void munlock_vma_pages_range(struct vm_area_struct
*vma
,
356 unsigned long start
, unsigned long end
)
358 vma
->vm_flags
&= ~VM_LOCKED
;
359 __mlock_vma_pages_range(vma
, start
, end
, 0);
363 * mlock_fixup - handle mlock[all]/munlock[all] requests.
365 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
366 * munlock is a no-op. However, for some special vmas, we go ahead and
367 * populate the ptes via make_pages_present().
369 * For vmas that pass the filters, merge/split as appropriate.
371 static int mlock_fixup(struct vm_area_struct
*vma
, struct vm_area_struct
**prev
,
372 unsigned long start
, unsigned long end
, unsigned int newflags
)
374 struct mm_struct
*mm
= vma
->vm_mm
;
378 int lock
= newflags
& VM_LOCKED
;
380 if (newflags
== vma
->vm_flags
||
381 (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)))
382 goto out
; /* don't set VM_LOCKED, don't count */
384 if ((vma
->vm_flags
& (VM_DONTEXPAND
| VM_RESERVED
)) ||
385 is_vm_hugetlb_page(vma
) ||
386 vma
== get_gate_vma(current
)) {
388 make_pages_present(start
, end
);
389 goto out
; /* don't set VM_LOCKED, don't count */
392 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
393 *prev
= vma_merge(mm
, *prev
, start
, end
, newflags
, vma
->anon_vma
,
394 vma
->vm_file
, pgoff
, vma_policy(vma
));
400 if (start
!= vma
->vm_start
) {
401 ret
= split_vma(mm
, vma
, start
, 1);
406 if (end
!= vma
->vm_end
) {
407 ret
= split_vma(mm
, vma
, end
, 0);
414 * Keep track of amount of locked VM.
416 nr_pages
= (end
- start
) >> PAGE_SHIFT
;
418 nr_pages
= -nr_pages
;
419 mm
->locked_vm
+= nr_pages
;
422 * vm_flags is protected by the mmap_sem held in write mode.
423 * It's okay if try_to_unmap_one unmaps a page just after we
424 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
426 vma
->vm_flags
= newflags
;
430 * mmap_sem is currently held for write. Downgrade the write
431 * lock to a read lock so that other faults, mmap scans, ...
432 * while we fault in all pages.
434 downgrade_write(&mm
->mmap_sem
);
436 ret
= __mlock_vma_pages_range(vma
, start
, end
, 1);
438 mm
->locked_vm
-= ret
;
442 * Need to reacquire mmap sem in write mode, as our callers
443 * expect this. We have no support for atomically upgrading
444 * a sem to write, so we need to check for ranges while sem
447 up_read(&mm
->mmap_sem
);
448 /* vma can change or disappear */
449 down_write(&mm
->mmap_sem
);
450 *prev
= find_vma(mm
, start
);
451 /* non-NULL *prev must contain @start, but need to check @end */
452 if (!(*prev
) || end
> (*prev
)->vm_end
)
456 * TODO: for unlocking, pages will already be resident, so
457 * we don't need to wait for allocations/reclaim/pagein, ...
458 * However, unlocking a very large region can still take a
459 * while. Should we downgrade the semaphore for both lock
462 __mlock_vma_pages_range(vma
, start
, end
, 0);
470 static int do_mlock(unsigned long start
, size_t len
, int on
)
472 unsigned long nstart
, end
, tmp
;
473 struct vm_area_struct
* vma
, * prev
;
476 len
= PAGE_ALIGN(len
);
482 vma
= find_vma_prev(current
->mm
, start
, &prev
);
483 if (!vma
|| vma
->vm_start
> start
)
486 if (start
> vma
->vm_start
)
489 for (nstart
= start
; ; ) {
490 unsigned int newflags
;
492 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
494 newflags
= vma
->vm_flags
| VM_LOCKED
;
496 newflags
&= ~VM_LOCKED
;
501 error
= mlock_fixup(vma
, &prev
, nstart
, tmp
, newflags
);
505 if (nstart
< prev
->vm_end
)
506 nstart
= prev
->vm_end
;
511 if (!vma
|| vma
->vm_start
!= nstart
) {
519 asmlinkage
long sys_mlock(unsigned long start
, size_t len
)
521 unsigned long locked
;
522 unsigned long lock_limit
;
528 down_write(¤t
->mm
->mmap_sem
);
529 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
532 locked
= len
>> PAGE_SHIFT
;
533 locked
+= current
->mm
->locked_vm
;
535 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
536 lock_limit
>>= PAGE_SHIFT
;
538 /* check against resource limits */
539 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
540 error
= do_mlock(start
, len
, 1);
541 up_write(¤t
->mm
->mmap_sem
);
545 asmlinkage
long sys_munlock(unsigned long start
, size_t len
)
549 down_write(¤t
->mm
->mmap_sem
);
550 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
552 ret
= do_mlock(start
, len
, 0);
553 up_write(¤t
->mm
->mmap_sem
);
557 static int do_mlockall(int flags
)
559 struct vm_area_struct
* vma
, * prev
= NULL
;
560 unsigned int def_flags
= 0;
562 if (flags
& MCL_FUTURE
)
563 def_flags
= VM_LOCKED
;
564 current
->mm
->def_flags
= def_flags
;
565 if (flags
== MCL_FUTURE
)
568 for (vma
= current
->mm
->mmap
; vma
; vma
= prev
->vm_next
) {
569 unsigned int newflags
;
571 newflags
= vma
->vm_flags
| VM_LOCKED
;
572 if (!(flags
& MCL_CURRENT
))
573 newflags
&= ~VM_LOCKED
;
576 mlock_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
, newflags
);
582 asmlinkage
long sys_mlockall(int flags
)
584 unsigned long lock_limit
;
587 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
594 down_write(¤t
->mm
->mmap_sem
);
596 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
597 lock_limit
>>= PAGE_SHIFT
;
600 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
601 capable(CAP_IPC_LOCK
))
602 ret
= do_mlockall(flags
);
603 up_write(¤t
->mm
->mmap_sem
);
608 asmlinkage
long sys_munlockall(void)
612 down_write(¤t
->mm
->mmap_sem
);
613 ret
= do_mlockall(0);
614 up_write(¤t
->mm
->mmap_sem
);
619 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
620 * shm segments) get accounted against the user_struct instead.
622 static DEFINE_SPINLOCK(shmlock_user_lock
);
624 int user_shm_lock(size_t size
, struct user_struct
*user
)
626 unsigned long lock_limit
, locked
;
629 locked
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
630 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
631 if (lock_limit
== RLIM_INFINITY
)
633 lock_limit
>>= PAGE_SHIFT
;
634 spin_lock(&shmlock_user_lock
);
636 locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
639 user
->locked_shm
+= locked
;
642 spin_unlock(&shmlock_user_lock
);
646 void user_shm_unlock(size_t size
, struct user_struct
*user
)
648 spin_lock(&shmlock_user_lock
);
649 user
->locked_shm
-= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
650 spin_unlock(&shmlock_user_lock
);