[PATCH] msync(): perform dirty page levelling
[deliverable/linux.git] / mm / msync.c
1 /*
2 * linux/mm/msync.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7 /*
8 * The msync() system call.
9 */
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/writeback.h>
16 #include <linux/file.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21
22 static unsigned long msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
23 unsigned long addr, unsigned long end)
24 {
25 pte_t *pte;
26 spinlock_t *ptl;
27 int progress = 0;
28 unsigned long ret = 0;
29
30 again:
31 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
32 do {
33 struct page *page;
34
35 if (progress >= 64) {
36 progress = 0;
37 if (need_resched() || need_lockbreak(ptl))
38 break;
39 }
40 progress++;
41 if (!pte_present(*pte))
42 continue;
43 if (!pte_maybe_dirty(*pte))
44 continue;
45 page = vm_normal_page(vma, addr, *pte);
46 if (!page)
47 continue;
48 if (ptep_clear_flush_dirty(vma, addr, pte) ||
49 page_test_and_clear_dirty(page))
50 ret += set_page_dirty(page);
51 progress += 3;
52 } while (pte++, addr += PAGE_SIZE, addr != end);
53 pte_unmap_unlock(pte - 1, ptl);
54 cond_resched();
55 if (addr != end)
56 goto again;
57 return ret;
58 }
59
60 static inline unsigned long msync_pmd_range(struct vm_area_struct *vma,
61 pud_t *pud, unsigned long addr, unsigned long end)
62 {
63 pmd_t *pmd;
64 unsigned long next;
65 unsigned long ret = 0;
66
67 pmd = pmd_offset(pud, addr);
68 do {
69 next = pmd_addr_end(addr, end);
70 if (pmd_none_or_clear_bad(pmd))
71 continue;
72 ret += msync_pte_range(vma, pmd, addr, next);
73 } while (pmd++, addr = next, addr != end);
74 return ret;
75 }
76
77 static inline unsigned long msync_pud_range(struct vm_area_struct *vma,
78 pgd_t *pgd, unsigned long addr, unsigned long end)
79 {
80 pud_t *pud;
81 unsigned long next;
82 unsigned long ret = 0;
83
84 pud = pud_offset(pgd, addr);
85 do {
86 next = pud_addr_end(addr, end);
87 if (pud_none_or_clear_bad(pud))
88 continue;
89 ret += msync_pmd_range(vma, pud, addr, next);
90 } while (pud++, addr = next, addr != end);
91 return ret;
92 }
93
94 static unsigned long msync_page_range(struct vm_area_struct *vma,
95 unsigned long addr, unsigned long end)
96 {
97 pgd_t *pgd;
98 unsigned long next;
99 unsigned long ret = 0;
100
101 /* For hugepages we can't go walking the page table normally,
102 * but that's ok, hugetlbfs is memory based, so we don't need
103 * to do anything more on an msync().
104 */
105 if (vma->vm_flags & VM_HUGETLB)
106 return 0;
107
108 BUG_ON(addr >= end);
109 pgd = pgd_offset(vma->vm_mm, addr);
110 flush_cache_range(vma, addr, end);
111 do {
112 next = pgd_addr_end(addr, end);
113 if (pgd_none_or_clear_bad(pgd))
114 continue;
115 ret += msync_pud_range(vma, pgd, addr, next);
116 } while (pgd++, addr = next, addr != end);
117 return ret;
118 }
119
120 /*
121 * MS_SYNC syncs the entire file - including mappings.
122 *
123 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
124 * marks the relevant pages dirty. The application may now run fsync() to
125 * write out the dirty pages and wait on the writeout and check the result.
126 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
127 * async writeout immediately.
128 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
129 * applications.
130 */
131 static int msync_interval(struct vm_area_struct *vma, unsigned long addr,
132 unsigned long end, int flags,
133 unsigned long *nr_pages_dirtied)
134 {
135 int ret = 0;
136 struct file *file = vma->vm_file;
137
138 if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
139 return -EBUSY;
140
141 if (file && (vma->vm_flags & VM_SHARED)) {
142 *nr_pages_dirtied = msync_page_range(vma, addr, end);
143
144 if (flags & MS_SYNC) {
145 struct address_space *mapping = file->f_mapping;
146 int err;
147
148 ret = filemap_fdatawrite(mapping);
149 if (file->f_op && file->f_op->fsync) {
150 /*
151 * We don't take i_mutex here because mmap_sem
152 * is already held.
153 */
154 err = file->f_op->fsync(file,file->f_dentry,1);
155 if (err && !ret)
156 ret = err;
157 }
158 err = filemap_fdatawait(mapping);
159 if (!ret)
160 ret = err;
161 }
162 }
163 return ret;
164 }
165
166 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
167 {
168 unsigned long end;
169 struct vm_area_struct *vma;
170 int unmapped_error, error = -EINVAL;
171 int done = 0;
172
173 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
174 goto out;
175 if (start & ~PAGE_MASK)
176 goto out;
177 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
178 goto out;
179 error = -ENOMEM;
180 len = (len + ~PAGE_MASK) & PAGE_MASK;
181 end = start + len;
182 if (end < start)
183 goto out;
184 error = 0;
185 if (end == start)
186 goto out;
187 /*
188 * If the interval [start,end) covers some unmapped address ranges,
189 * just ignore them, but return -ENOMEM at the end.
190 */
191 down_read(&current->mm->mmap_sem);
192 if (flags & MS_SYNC)
193 current->flags |= PF_SYNCWRITE;
194 vma = find_vma(current->mm, start);
195 unmapped_error = 0;
196 do {
197 unsigned long nr_pages_dirtied = 0;
198 struct file *file;
199
200 /* Still start < end. */
201 error = -ENOMEM;
202 if (!vma)
203 goto out_unlock;
204 /* Here start < vma->vm_end. */
205 if (start < vma->vm_start) {
206 unmapped_error = -ENOMEM;
207 start = vma->vm_start;
208 }
209 /* Here vma->vm_start <= start < vma->vm_end. */
210 if (end <= vma->vm_end) {
211 if (start < end) {
212 error = msync_interval(vma, start, end, flags,
213 &nr_pages_dirtied);
214 if (error)
215 goto out_unlock;
216 }
217 error = unmapped_error;
218 done = 1;
219 } else {
220 /* Here vma->vm_start <= start < vma->vm_end < end. */
221 error = msync_interval(vma, start, vma->vm_end, flags,
222 &nr_pages_dirtied);
223 if (error)
224 goto out_unlock;
225 }
226 file = vma->vm_file;
227 start = vma->vm_end;
228 if ((flags & MS_ASYNC) && file && nr_pages_dirtied) {
229 get_file(file);
230 up_read(&current->mm->mmap_sem);
231 balance_dirty_pages_ratelimited_nr(file->f_mapping,
232 nr_pages_dirtied);
233 fput(file);
234 down_read(&current->mm->mmap_sem);
235 vma = find_vma(current->mm, start);
236 } else {
237 vma = vma->vm_next;
238 }
239 } while (!done);
240 out_unlock:
241 current->flags &= ~PF_SYNCWRITE;
242 up_read(&current->mm->mmap_sem);
243 out:
244 return error;
245 }
This page took 0.036469 seconds and 6 git commands to generate.