hugetlb: modular state for hugetlb page size
[deliverable/linux.git] / arch / x86 / mm / hugetlbpage.c
CommitLineData
1da177e4
LT
1/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
1da177e4
LT
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/sysctl.h>
15#include <asm/mman.h>
16#include <asm/tlb.h>
17#include <asm/tlbflush.h>
a5a19c63 18#include <asm/pgalloc.h>
1da177e4 19
39dde65c
CK
20static unsigned long page_table_shareable(struct vm_area_struct *svma,
21 struct vm_area_struct *vma,
22 unsigned long addr, pgoff_t idx)
23{
24 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
25 svma->vm_start;
26 unsigned long sbase = saddr & PUD_MASK;
27 unsigned long s_end = sbase + PUD_SIZE;
28
29 /*
30 * match the virtual addresses, permission and the alignment of the
31 * page table page.
32 */
33 if (pmd_index(addr) != pmd_index(saddr) ||
34 vma->vm_flags != svma->vm_flags ||
35 sbase < svma->vm_start || svma->vm_end < s_end)
36 return 0;
37
38 return saddr;
39}
40
41static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
42{
43 unsigned long base = addr & PUD_MASK;
44 unsigned long end = base + PUD_SIZE;
45
46 /*
47 * check on proper vm_flags and page table alignment
48 */
49 if (vma->vm_flags & VM_MAYSHARE &&
50 vma->vm_start <= base && end <= vma->vm_end)
51 return 1;
52 return 0;
53}
54
55/*
56 * search for a shareable pmd page for hugetlb.
57 */
58static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
59{
60 struct vm_area_struct *vma = find_vma(mm, addr);
61 struct address_space *mapping = vma->vm_file->f_mapping;
62 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
63 vma->vm_pgoff;
64 struct prio_tree_iter iter;
65 struct vm_area_struct *svma;
66 unsigned long saddr;
67 pte_t *spte = NULL;
68
69 if (!vma_shareable(vma, addr))
70 return;
71
72 spin_lock(&mapping->i_mmap_lock);
73 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
74 if (svma == vma)
75 continue;
76
77 saddr = page_table_shareable(svma, vma, addr, idx);
78 if (saddr) {
79 spte = huge_pte_offset(svma->vm_mm, saddr);
80 if (spte) {
81 get_page(virt_to_page(spte));
82 break;
83 }
84 }
85 }
86
87 if (!spte)
88 goto out;
89
90 spin_lock(&mm->page_table_lock);
91 if (pud_none(*pud))
a5a19c63 92 pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
39dde65c
CK
93 else
94 put_page(virt_to_page(spte));
95 spin_unlock(&mm->page_table_lock);
96out:
97 spin_unlock(&mapping->i_mmap_lock);
98}
99
100/*
101 * unmap huge page backed by shared pte.
102 *
103 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
104 * indicated by page_count > 1, unmap is achieved by clearing pud and
105 * decrementing the ref count. If count == 1, the pte page is not shared.
106 *
107 * called with vma->vm_mm->page_table_lock held.
108 *
109 * returns: 1 successfully unmapped a shared pte page
110 * 0 the underlying pte page is not shared, or it is the last user
111 */
112int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
113{
114 pgd_t *pgd = pgd_offset(mm, *addr);
115 pud_t *pud = pud_offset(pgd, *addr);
116
117 BUG_ON(page_count(virt_to_page(ptep)) == 0);
118 if (page_count(virt_to_page(ptep)) == 1)
119 return 0;
120
121 pud_clear(pud);
122 put_page(virt_to_page(ptep));
123 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
124 return 1;
125}
126
a5516438
AK
127pte_t *huge_pte_alloc(struct mm_struct *mm,
128 unsigned long addr, unsigned long sz)
1da177e4
LT
129{
130 pgd_t *pgd;
131 pud_t *pud;
7bf07f3d 132 pte_t *pte = NULL;
1da177e4
LT
133
134 pgd = pgd_offset(mm, addr);
135 pud = pud_alloc(mm, pgd, addr);
39dde65c
CK
136 if (pud) {
137 if (pud_none(*pud))
138 huge_pmd_share(mm, addr, pud);
0e5c9f39 139 pte = (pte_t *) pmd_alloc(mm, pud, addr);
39dde65c 140 }
0e5c9f39 141 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
7bf07f3d 142
7bf07f3d 143 return pte;
1da177e4
LT
144}
145
63551ae0 146pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
147{
148 pgd_t *pgd;
149 pud_t *pud;
150 pmd_t *pmd = NULL;
151
152 pgd = pgd_offset(mm, addr);
02b0ccef
AL
153 if (pgd_present(*pgd)) {
154 pud = pud_offset(pgd, addr);
155 if (pud_present(*pud))
156 pmd = pmd_offset(pud, addr);
157 }
1da177e4
LT
158 return (pte_t *) pmd;
159}
160
1da177e4
LT
161#if 0 /* This is just for testing */
162struct page *
163follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
164{
165 unsigned long start = address;
166 int length = 1;
167 int nr;
168 struct page *page;
169 struct vm_area_struct *vma;
170
171 vma = find_vma(mm, addr);
172 if (!vma || !is_vm_hugetlb_page(vma))
173 return ERR_PTR(-EINVAL);
174
175 pte = huge_pte_offset(mm, address);
176
177 /* hugetlb should be locked, and hence, prefaulted */
178 WARN_ON(!pte || pte_none(*pte));
179
180 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
181
25e59881 182 WARN_ON(!PageHead(page));
1da177e4
LT
183
184 return page;
185}
186
187int pmd_huge(pmd_t pmd)
188{
189 return 0;
190}
191
192struct page *
193follow_huge_pmd(struct mm_struct *mm, unsigned long address,
194 pmd_t *pmd, int write)
195{
196 return NULL;
197}
198
199#else
200
201struct page *
202follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
203{
204 return ERR_PTR(-EINVAL);
205}
206
207int pmd_huge(pmd_t pmd)
208{
209 return !!(pmd_val(pmd) & _PAGE_PSE);
210}
211
212struct page *
213follow_huge_pmd(struct mm_struct *mm, unsigned long address,
214 pmd_t *pmd, int write)
215{
216 struct page *page;
217
218 page = pte_page(*(pte_t *)pmd);
219 if (page)
220 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
221 return page;
222}
223#endif
224
1da177e4
LT
225/* x86_64 also uses this file */
226
227#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
228static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
229 unsigned long addr, unsigned long len,
230 unsigned long pgoff, unsigned long flags)
231{
232 struct mm_struct *mm = current->mm;
233 struct vm_area_struct *vma;
234 unsigned long start_addr;
235
1363c3cd
WW
236 if (len > mm->cached_hole_size) {
237 start_addr = mm->free_area_cache;
238 } else {
239 start_addr = TASK_UNMAPPED_BASE;
240 mm->cached_hole_size = 0;
241 }
1da177e4
LT
242
243full_search:
244 addr = ALIGN(start_addr, HPAGE_SIZE);
245
246 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
247 /* At this point: (!vma || addr < vma->vm_end). */
248 if (TASK_SIZE - len < addr) {
249 /*
250 * Start a new search - just in case we missed
251 * some holes.
252 */
253 if (start_addr != TASK_UNMAPPED_BASE) {
254 start_addr = TASK_UNMAPPED_BASE;
1363c3cd 255 mm->cached_hole_size = 0;
1da177e4
LT
256 goto full_search;
257 }
258 return -ENOMEM;
259 }
260 if (!vma || addr + len <= vma->vm_start) {
261 mm->free_area_cache = addr + len;
262 return addr;
263 }
1363c3cd
WW
264 if (addr + mm->cached_hole_size < vma->vm_start)
265 mm->cached_hole_size = vma->vm_start - addr;
1da177e4
LT
266 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
267 }
268}
269
270static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
271 unsigned long addr0, unsigned long len,
272 unsigned long pgoff, unsigned long flags)
273{
274 struct mm_struct *mm = current->mm;
275 struct vm_area_struct *vma, *prev_vma;
276 unsigned long base = mm->mmap_base, addr = addr0;
1363c3cd 277 unsigned long largest_hole = mm->cached_hole_size;
1da177e4
LT
278 int first_time = 1;
279
280 /* don't allow allocations above current base */
281 if (mm->free_area_cache > base)
282 mm->free_area_cache = base;
283
1363c3cd
WW
284 if (len <= largest_hole) {
285 largest_hole = 0;
286 mm->free_area_cache = base;
287 }
1da177e4
LT
288try_again:
289 /* make sure it can fit in the remaining address space */
290 if (mm->free_area_cache < len)
291 goto fail;
292
293 /* either no address requested or cant fit in requested address hole */
294 addr = (mm->free_area_cache - len) & HPAGE_MASK;
295 do {
296 /*
297 * Lookup failure means no vma is above this address,
298 * i.e. return with success:
299 */
300 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
301 return addr;
302
303 /*
304 * new region fits between prev_vma->vm_end and
305 * vma->vm_start, use it:
306 */
307 if (addr + len <= vma->vm_start &&
1363c3cd 308 (!prev_vma || (addr >= prev_vma->vm_end))) {
1da177e4 309 /* remember the address as a hint for next time */
1363c3cd
WW
310 mm->cached_hole_size = largest_hole;
311 return (mm->free_area_cache = addr);
312 } else {
1da177e4 313 /* pull free_area_cache down to the first hole */
1363c3cd 314 if (mm->free_area_cache == vma->vm_end) {
1da177e4 315 mm->free_area_cache = vma->vm_start;
1363c3cd
WW
316 mm->cached_hole_size = largest_hole;
317 }
318 }
319
320 /* remember the largest hole we saw so far */
321 if (addr + largest_hole < vma->vm_start)
322 largest_hole = vma->vm_start - addr;
1da177e4
LT
323
324 /* try just below the current vma->vm_start */
325 addr = (vma->vm_start - len) & HPAGE_MASK;
326 } while (len <= vma->vm_start);
327
328fail:
329 /*
330 * if hint left us with no space for the requested
331 * mapping then try again:
332 */
333 if (first_time) {
334 mm->free_area_cache = base;
1363c3cd 335 largest_hole = 0;
1da177e4
LT
336 first_time = 0;
337 goto try_again;
338 }
339 /*
340 * A failed mmap() very likely causes application failure,
341 * so fall back to the bottom-up function here. This scenario
342 * can happen with large stack limits and large mmap()
343 * allocations.
344 */
345 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 346 mm->cached_hole_size = ~0UL;
1da177e4
LT
347 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
348 len, pgoff, flags);
349
350 /*
351 * Restore the topdown base:
352 */
353 mm->free_area_cache = base;
1363c3cd 354 mm->cached_hole_size = ~0UL;
1da177e4
LT
355
356 return addr;
357}
358
359unsigned long
360hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
361 unsigned long len, unsigned long pgoff, unsigned long flags)
362{
363 struct mm_struct *mm = current->mm;
364 struct vm_area_struct *vma;
365
366 if (len & ~HPAGE_MASK)
367 return -EINVAL;
368 if (len > TASK_SIZE)
369 return -ENOMEM;
370
5a8130f2 371 if (flags & MAP_FIXED) {
a5516438 372 if (prepare_hugepage_range(file, addr, len))
5a8130f2
BH
373 return -EINVAL;
374 return addr;
375 }
376
1da177e4
LT
377 if (addr) {
378 addr = ALIGN(addr, HPAGE_SIZE);
379 vma = find_vma(mm, addr);
380 if (TASK_SIZE - len >= addr &&
381 (!vma || addr + len <= vma->vm_start))
382 return addr;
383 }
384 if (mm->get_unmapped_area == arch_get_unmapped_area)
385 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
386 pgoff, flags);
387 else
388 return hugetlb_get_unmapped_area_topdown(file, addr, len,
389 pgoff, flags);
390}
391
392#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
393
This page took 0.399814 seconds and 5 git commands to generate.