4 * Copyright (C) 1994-1999 Linus Torvalds
8 * The msync() system call.
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/writeback.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
22 static unsigned long msync_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
23 unsigned long addr
, unsigned long end
)
28 unsigned long ret
= 0;
31 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
37 if (need_resched() || need_lockbreak(ptl
))
41 if (!pte_present(*pte
))
43 if (!pte_maybe_dirty(*pte
))
45 page
= vm_normal_page(vma
, addr
, *pte
);
48 if (ptep_clear_flush_dirty(vma
, addr
, pte
) ||
49 page_test_and_clear_dirty(page
))
50 ret
+= set_page_dirty(page
);
52 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
53 pte_unmap_unlock(pte
- 1, ptl
);
60 static inline unsigned long msync_pmd_range(struct vm_area_struct
*vma
,
61 pud_t
*pud
, unsigned long addr
, unsigned long end
)
65 unsigned long ret
= 0;
67 pmd
= pmd_offset(pud
, addr
);
69 next
= pmd_addr_end(addr
, end
);
70 if (pmd_none_or_clear_bad(pmd
))
72 ret
+= msync_pte_range(vma
, pmd
, addr
, next
);
73 } while (pmd
++, addr
= next
, addr
!= end
);
77 static inline unsigned long msync_pud_range(struct vm_area_struct
*vma
,
78 pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
82 unsigned long ret
= 0;
84 pud
= pud_offset(pgd
, addr
);
86 next
= pud_addr_end(addr
, end
);
87 if (pud_none_or_clear_bad(pud
))
89 ret
+= msync_pmd_range(vma
, pud
, addr
, next
);
90 } while (pud
++, addr
= next
, addr
!= end
);
94 static unsigned long msync_page_range(struct vm_area_struct
*vma
,
95 unsigned long addr
, unsigned long end
)
99 unsigned long ret
= 0;
101 /* For hugepages we can't go walking the page table normally,
102 * but that's ok, hugetlbfs is memory based, so we don't need
103 * to do anything more on an msync().
105 if (vma
->vm_flags
& VM_HUGETLB
)
109 pgd
= pgd_offset(vma
->vm_mm
, addr
);
110 flush_cache_range(vma
, addr
, end
);
112 next
= pgd_addr_end(addr
, end
);
113 if (pgd_none_or_clear_bad(pgd
))
115 ret
+= msync_pud_range(vma
, pgd
, addr
, next
);
116 } while (pgd
++, addr
= next
, addr
!= end
);
121 * MS_SYNC syncs the entire file - including mappings.
123 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
124 * marks the relevant pages dirty. The application may now run fsync() to
125 * write out the dirty pages and wait on the writeout and check the result.
126 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
127 * async writeout immediately.
128 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
131 static int msync_interval(struct vm_area_struct
*vma
, unsigned long addr
,
132 unsigned long end
, int flags
,
133 unsigned long *nr_pages_dirtied
)
136 struct file
*file
= vma
->vm_file
;
138 if ((flags
& MS_INVALIDATE
) && (vma
->vm_flags
& VM_LOCKED
))
141 if (file
&& (vma
->vm_flags
& VM_SHARED
)) {
142 *nr_pages_dirtied
= msync_page_range(vma
, addr
, end
);
144 if (flags
& MS_SYNC
) {
145 struct address_space
*mapping
= file
->f_mapping
;
148 ret
= filemap_fdatawrite(mapping
);
149 if (file
->f_op
&& file
->f_op
->fsync
) {
151 * We don't take i_mutex here because mmap_sem
154 err
= file
->f_op
->fsync(file
,file
->f_dentry
,1);
158 err
= filemap_fdatawait(mapping
);
166 asmlinkage
long sys_msync(unsigned long start
, size_t len
, int flags
)
169 struct vm_area_struct
*vma
;
170 int unmapped_error
, error
= -EINVAL
;
173 if (flags
& ~(MS_ASYNC
| MS_INVALIDATE
| MS_SYNC
))
175 if (start
& ~PAGE_MASK
)
177 if ((flags
& MS_ASYNC
) && (flags
& MS_SYNC
))
180 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
188 * If the interval [start,end) covers some unmapped address ranges,
189 * just ignore them, but return -ENOMEM at the end.
191 down_read(¤t
->mm
->mmap_sem
);
193 current
->flags
|= PF_SYNCWRITE
;
194 vma
= find_vma(current
->mm
, start
);
197 unsigned long nr_pages_dirtied
= 0;
200 /* Still start < end. */
204 /* Here start < vma->vm_end. */
205 if (start
< vma
->vm_start
) {
206 unmapped_error
= -ENOMEM
;
207 start
= vma
->vm_start
;
209 /* Here vma->vm_start <= start < vma->vm_end. */
210 if (end
<= vma
->vm_end
) {
212 error
= msync_interval(vma
, start
, end
, flags
,
217 error
= unmapped_error
;
220 /* Here vma->vm_start <= start < vma->vm_end < end. */
221 error
= msync_interval(vma
, start
, vma
->vm_end
, flags
,
228 if ((flags
& MS_ASYNC
) && file
&& nr_pages_dirtied
) {
230 up_read(¤t
->mm
->mmap_sem
);
231 balance_dirty_pages_ratelimited_nr(file
->f_mapping
,
234 down_read(¤t
->mm
->mmap_sem
);
235 vma
= find_vma(current
->mm
, start
);
241 current
->flags
&= ~PF_SYNCWRITE
;
242 up_read(¤t
->mm
->mmap_sem
);
This page took 0.036469 seconds and 6 git commands to generate.