14f93e62270f7b2ad38fa0846c0275e8aeccf231
[deliverable/linux.git] / mm / mprotect.c
1 /*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28
29 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30 unsigned long addr, unsigned long end, pgprot_t newprot)
31 {
32 pte_t *pte, oldpte;
33 spinlock_t *ptl;
34
35 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
36 do {
37 oldpte = *pte;
38 if (pte_present(oldpte)) {
39 pte_t ptent;
40
41 /* Avoid an SMP race with hardware updated dirty/clean
42 * bits by wiping the pte and then setting the new pte
43 * into place.
44 */
45 ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
46 set_pte_at(mm, addr, pte, ptent);
47 lazy_mmu_prot_update(ptent);
48 #ifdef CONFIG_MIGRATION
49 } else if (!pte_file(oldpte)) {
50 swp_entry_t entry = pte_to_swp_entry(oldpte);
51
52 if (is_write_migration_entry(entry)) {
53 /*
54 * A protection check is difficult so
55 * just be safe and disable write
56 */
57 make_migration_entry_read(&entry);
58 set_pte_at(mm, addr, pte,
59 swp_entry_to_pte(entry));
60 }
61 #endif
62 }
63
64 } while (pte++, addr += PAGE_SIZE, addr != end);
65 pte_unmap_unlock(pte - 1, ptl);
66 }
67
68 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
69 unsigned long addr, unsigned long end, pgprot_t newprot)
70 {
71 pmd_t *pmd;
72 unsigned long next;
73
74 pmd = pmd_offset(pud, addr);
75 do {
76 next = pmd_addr_end(addr, end);
77 if (pmd_none_or_clear_bad(pmd))
78 continue;
79 change_pte_range(mm, pmd, addr, next, newprot);
80 } while (pmd++, addr = next, addr != end);
81 }
82
83 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
84 unsigned long addr, unsigned long end, pgprot_t newprot)
85 {
86 pud_t *pud;
87 unsigned long next;
88
89 pud = pud_offset(pgd, addr);
90 do {
91 next = pud_addr_end(addr, end);
92 if (pud_none_or_clear_bad(pud))
93 continue;
94 change_pmd_range(mm, pud, addr, next, newprot);
95 } while (pud++, addr = next, addr != end);
96 }
97
98 static void change_protection(struct vm_area_struct *vma,
99 unsigned long addr, unsigned long end, pgprot_t newprot)
100 {
101 struct mm_struct *mm = vma->vm_mm;
102 pgd_t *pgd;
103 unsigned long next;
104 unsigned long start = addr;
105
106 BUG_ON(addr >= end);
107 pgd = pgd_offset(mm, addr);
108 flush_cache_range(vma, addr, end);
109 do {
110 next = pgd_addr_end(addr, end);
111 if (pgd_none_or_clear_bad(pgd))
112 continue;
113 change_pud_range(mm, pgd, addr, next, newprot);
114 } while (pgd++, addr = next, addr != end);
115 flush_tlb_range(vma, start, end);
116 }
117
118 static int
119 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
120 unsigned long start, unsigned long end, unsigned long newflags)
121 {
122 struct mm_struct *mm = vma->vm_mm;
123 unsigned long oldflags = vma->vm_flags;
124 long nrpages = (end - start) >> PAGE_SHIFT;
125 unsigned long charged = 0;
126 pgprot_t newprot;
127 pgoff_t pgoff;
128 int error;
129
130 if (newflags == oldflags) {
131 *pprev = vma;
132 return 0;
133 }
134
135 /*
136 * If we make a private mapping writable we increase our commit;
137 * but (without finer accounting) cannot reduce our commit if we
138 * make it unwritable again.
139 *
140 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
141 * a MAP_NORESERVE private mapping to writable will now reserve.
142 */
143 if (newflags & VM_WRITE) {
144 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
145 charged = nrpages;
146 if (security_vm_enough_memory(charged))
147 return -ENOMEM;
148 newflags |= VM_ACCOUNT;
149 }
150 }
151
152 newprot = protection_map[newflags & 0xf];
153
154 /*
155 * First try to merge with previous and/or next vma.
156 */
157 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
158 *pprev = vma_merge(mm, *pprev, start, end, newflags,
159 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
160 if (*pprev) {
161 vma = *pprev;
162 goto success;
163 }
164
165 *pprev = vma;
166
167 if (start != vma->vm_start) {
168 error = split_vma(mm, vma, start, 1);
169 if (error)
170 goto fail;
171 }
172
173 if (end != vma->vm_end) {
174 error = split_vma(mm, vma, end, 0);
175 if (error)
176 goto fail;
177 }
178
179 success:
180 /*
181 * vm_flags and vm_page_prot are protected by the mmap_sem
182 * held in write mode.
183 */
184 vma->vm_flags = newflags;
185 vma->vm_page_prot = newprot;
186 if (is_vm_hugetlb_page(vma))
187 hugetlb_change_protection(vma, start, end, newprot);
188 else
189 change_protection(vma, start, end, newprot);
190 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
191 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
192 return 0;
193
194 fail:
195 vm_unacct_memory(charged);
196 return error;
197 }
198
199 asmlinkage long
200 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
201 {
202 unsigned long vm_flags, nstart, end, tmp, reqprot;
203 struct vm_area_struct *vma, *prev;
204 int error = -EINVAL;
205 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
206 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
207 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
208 return -EINVAL;
209
210 if (start & ~PAGE_MASK)
211 return -EINVAL;
212 if (!len)
213 return 0;
214 len = PAGE_ALIGN(len);
215 end = start + len;
216 if (end <= start)
217 return -ENOMEM;
218 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
219 return -EINVAL;
220
221 reqprot = prot;
222 /*
223 * Does the application expect PROT_READ to imply PROT_EXEC:
224 */
225 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
226 prot |= PROT_EXEC;
227
228 vm_flags = calc_vm_prot_bits(prot);
229
230 down_write(&current->mm->mmap_sem);
231
232 vma = find_vma_prev(current->mm, start, &prev);
233 error = -ENOMEM;
234 if (!vma)
235 goto out;
236 if (unlikely(grows & PROT_GROWSDOWN)) {
237 if (vma->vm_start >= end)
238 goto out;
239 start = vma->vm_start;
240 error = -EINVAL;
241 if (!(vma->vm_flags & VM_GROWSDOWN))
242 goto out;
243 }
244 else {
245 if (vma->vm_start > start)
246 goto out;
247 if (unlikely(grows & PROT_GROWSUP)) {
248 end = vma->vm_end;
249 error = -EINVAL;
250 if (!(vma->vm_flags & VM_GROWSUP))
251 goto out;
252 }
253 }
254 if (start > vma->vm_start)
255 prev = vma;
256
257 for (nstart = start ; ; ) {
258 unsigned long newflags;
259
260 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
261
262 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
263
264 /* newflags >> 4 shift VM_MAY% in place of VM_% */
265 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
266 error = -EACCES;
267 goto out;
268 }
269
270 error = security_file_mprotect(vma, reqprot, prot);
271 if (error)
272 goto out;
273
274 tmp = vma->vm_end;
275 if (tmp > end)
276 tmp = end;
277 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
278 if (error)
279 goto out;
280 nstart = tmp;
281
282 if (nstart < prev->vm_end)
283 nstart = prev->vm_end;
284 if (nstart >= end)
285 goto out;
286
287 vma = prev->vm_next;
288 if (!vma || vma->vm_start != nstart) {
289 error = -ENOMEM;
290 goto out;
291 }
292 }
293 out:
294 up_write(&current->mm->mmap_sem);
295 return error;
296 }
This page took 0.048817 seconds and 5 git commands to generate.