Merge branch 'drm-next-3.16' of git://people.freedesktop.org/~agd5f/linux into drm...
[deliverable/linux.git] / arch / tile / mm / hugetlbpage.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/hugetlb.h>
23#include <linux/pagemap.h>
867e359b
CM
24#include <linux/slab.h>
25#include <linux/err.h>
26#include <linux/sysctl.h>
27#include <linux/mman.h>
28#include <asm/tlb.h>
29#include <asm/tlbflush.h>
621b1955
CM
30#include <asm/setup.h>
31
32#ifdef CONFIG_HUGETLB_SUPER_PAGES
33
34/*
35 * Provide an additional huge page size (in addition to the regular default
36 * huge page size) if no "hugepagesz" arguments are specified.
37 * Note that it must be smaller than the default huge page size so
38 * that it's possible to allocate them on demand from the buddy allocator.
39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
40 * or not define it at all.
41 */
42#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
43
44/* "Extra" page-size multipliers, one per level of the page table. */
45int huge_shift[HUGE_SHIFT_ENTRIES] = {
46#ifdef ADDITIONAL_HUGE_SIZE
47#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
48 [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
49#endif
50};
51
621b1955 52#endif
867e359b
CM
53
54pte_t *huge_pte_alloc(struct mm_struct *mm,
55 unsigned long addr, unsigned long sz)
56{
57 pgd_t *pgd;
58 pud_t *pud;
867e359b 59
621b1955 60 addr &= -sz; /* Mask off any low bits in the address. */
867e359b
CM
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
867e359b 64
621b1955
CM
65#ifdef CONFIG_HUGETLB_SUPER_PAGES
66 if (sz >= PGDIR_SIZE) {
67 BUG_ON(sz != PGDIR_SIZE &&
68 sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
69 return (pte_t *)pud;
70 } else {
71 pmd_t *pmd = pmd_alloc(mm, pud, addr);
72 if (sz >= PMD_SIZE) {
73 BUG_ON(sz != PMD_SIZE &&
74 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
75 return (pte_t *)pmd;
76 }
77 else {
78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
79 panic("Unexpected page size %#lx\n", sz);
a0bd12d7 80 return pte_alloc_map(mm, NULL, pmd, addr);
621b1955
CM
81 }
82 }
83#else
84 BUG_ON(sz != PMD_SIZE);
85 return (pte_t *) pmd_alloc(mm, pud, addr);
86#endif
867e359b
CM
87}
88
621b1955 89static pte_t *get_pte(pte_t *base, int index, int level)
867e359b 90{
621b1955
CM
91 pte_t *ptep = base + index;
92#ifdef CONFIG_HUGETLB_SUPER_PAGES
93 if (!pte_present(*ptep) && huge_shift[level] != 0) {
94 unsigned long mask = -1UL << huge_shift[level];
95 pte_t *super_ptep = base + (index & mask);
96 pte_t pte = *super_ptep;
97 if (pte_present(pte) && pte_super(pte))
98 ptep = super_ptep;
867e359b 99 }
621b1955
CM
100#endif
101 return ptep;
867e359b
CM
102}
103
621b1955 104pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
867e359b 105{
621b1955
CM
106 pgd_t *pgd;
107 pud_t *pud;
108 pmd_t *pmd;
109#ifdef CONFIG_HUGETLB_SUPER_PAGES
110 pte_t *pte;
111#endif
867e359b 112
621b1955
CM
113 /* Get the top-level page table entry. */
114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
867e359b 115
621b1955
CM
116 /* We don't have four levels. */
117 pud = pud_offset(pgd, addr);
118#ifndef __PAGETABLE_PUD_FOLDED
119# error support fourth page table level
120#endif
a0bd12d7
CM
121 if (!pud_present(*pud))
122 return NULL;
867e359b 123
621b1955
CM
124 /* Check for an L0 huge PTE, if we have three levels. */
125#ifndef __PAGETABLE_PMD_FOLDED
126 if (pud_huge(*pud))
127 return (pte_t *)pud;
867e359b 128
621b1955
CM
129 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
130 pmd_index(addr), 1);
131 if (!pmd_present(*pmd))
132 return NULL;
133#else
134 pmd = pmd_offset(pud, addr);
135#endif
867e359b 136
621b1955
CM
137 /* Check for an L1 huge PTE. */
138 if (pmd_huge(*pmd))
139 return (pte_t *)pmd;
140
141#ifdef CONFIG_HUGETLB_SUPER_PAGES
142 /* Check for an L2 huge PTE. */
143 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
144 if (!pte_present(*pte))
145 return NULL;
146 if (pte_super(*pte))
147 return pte;
148#endif
867e359b 149
867e359b
CM
150 return NULL;
151}
152
867e359b
CM
153struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
154 int write)
155{
156 return ERR_PTR(-EINVAL);
157}
158
159int pmd_huge(pmd_t pmd)
160{
161 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
162}
163
164int pud_huge(pud_t pud)
165{
166 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
167}
168
169struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
170 pmd_t *pmd, int write)
171{
172 struct page *page;
173
174 page = pte_page(*(pte_t *)pmd);
175 if (page)
176 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
177 return page;
178}
179
180struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
181 pud_t *pud, int write)
182{
183 struct page *page;
184
185 page = pte_page(*(pte_t *)pud);
186 if (page)
187 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
188 return page;
189}
190
191int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
192{
193 return 0;
194}
195
867e359b
CM
196#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
197static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
198 unsigned long addr, unsigned long len,
199 unsigned long pgoff, unsigned long flags)
200{
201 struct hstate *h = hstate_file(file);
dd529596
ML
202 struct vm_unmapped_area_info info;
203
204 info.flags = 0;
205 info.length = len;
206 info.low_limit = TASK_UNMAPPED_BASE;
207 info.high_limit = TASK_SIZE;
208 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
209 info.align_offset = 0;
210 return vm_unmapped_area(&info);
867e359b
CM
211}
212
213static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
214 unsigned long addr0, unsigned long len,
215 unsigned long pgoff, unsigned long flags)
216{
217 struct hstate *h = hstate_file(file);
dd529596
ML
218 struct vm_unmapped_area_info info;
219 unsigned long addr;
867e359b 220
dd529596
ML
221 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
222 info.length = len;
223 info.low_limit = PAGE_SIZE;
224 info.high_limit = current->mm->mmap_base;
225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
226 info.align_offset = 0;
227 addr = vm_unmapped_area(&info);
867e359b 228
867e359b
CM
229 /*
230 * A failed mmap() very likely causes application failure,
231 * so fall back to the bottom-up function here. This scenario
232 * can happen with large stack limits and large mmap()
233 * allocations.
234 */
dd529596
ML
235 if (addr & ~PAGE_MASK) {
236 VM_BUG_ON(addr != -ENOMEM);
237 info.flags = 0;
238 info.low_limit = TASK_UNMAPPED_BASE;
239 info.high_limit = TASK_SIZE;
240 addr = vm_unmapped_area(&info);
241 }
867e359b
CM
242
243 return addr;
244}
245
246unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
247 unsigned long len, unsigned long pgoff, unsigned long flags)
248{
249 struct hstate *h = hstate_file(file);
250 struct mm_struct *mm = current->mm;
251 struct vm_area_struct *vma;
252
253 if (len & ~huge_page_mask(h))
254 return -EINVAL;
255 if (len > TASK_SIZE)
256 return -ENOMEM;
257
258 if (flags & MAP_FIXED) {
259 if (prepare_hugepage_range(file, addr, len))
260 return -EINVAL;
261 return addr;
262 }
263
264 if (addr) {
265 addr = ALIGN(addr, huge_page_size(h));
266 vma = find_vma(mm, addr);
267 if (TASK_SIZE - len >= addr &&
268 (!vma || addr + len <= vma->vm_start))
269 return addr;
270 }
271 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
272 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
273 pgoff, flags);
274 else
275 return hugetlb_get_unmapped_area_topdown(file, addr, len,
276 pgoff, flags);
277}
621b1955 278#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
867e359b 279
621b1955
CM
280#ifdef CONFIG_HUGETLB_SUPER_PAGES
281static __init int __setup_hugepagesz(unsigned long ps)
867e359b 282{
621b1955
CM
283 int log_ps = __builtin_ctzl(ps);
284 int level, base_shift;
285
286 if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
287 pr_warn("Not enabling %ld byte huge pages;"
288 " must be a power of four.\n", ps);
289 return -EINVAL;
290 }
291
292 if (ps > 64*1024*1024*1024UL) {
293 pr_warn("Not enabling %ld MB huge pages;"
294 " largest legal value is 64 GB .\n", ps >> 20);
295 return -EINVAL;
296 } else if (ps >= PUD_SIZE) {
297 static long hv_jpage_size;
298 if (hv_jpage_size == 0)
299 hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
300 if (hv_jpage_size != PUD_SIZE) {
301 pr_warn("Not enabling >= %ld MB huge pages:"
302 " hypervisor reports size %ld\n",
303 PUD_SIZE >> 20, hv_jpage_size);
304 return -EINVAL;
305 }
306 level = 0;
307 base_shift = PUD_SHIFT;
308 } else if (ps >= PMD_SIZE) {
309 level = 1;
310 base_shift = PMD_SHIFT;
311 } else if (ps > PAGE_SIZE) {
312 level = 2;
313 base_shift = PAGE_SHIFT;
867e359b 314 } else {
621b1955
CM
315 pr_err("hugepagesz: huge page size %ld too small\n", ps);
316 return -EINVAL;
867e359b 317 }
621b1955
CM
318
319 if (log_ps != base_shift) {
320 int shift_val = log_ps - base_shift;
321 if (huge_shift[level] != 0) {
322 int old_shift = base_shift + huge_shift[level];
323 pr_warn("Not enabling %ld MB huge pages;"
324 " already have size %ld MB.\n",
325 ps >> 20, (1UL << old_shift) >> 20);
326 return -EINVAL;
327 }
328 if (hv_set_pte_super_shift(level, shift_val) != 0) {
329 pr_warn("Not enabling %ld MB huge pages;"
330 " no hypervisor support.\n", ps >> 20);
331 return -EINVAL;
332 }
333 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
334 huge_shift[level] = shift_val;
335 }
336
337 hugetlb_add_hstate(log_ps - PAGE_SHIFT);
338
339 return 0;
340}
341
342static bool saw_hugepagesz;
343
344static __init int setup_hugepagesz(char *opt)
345{
346 if (!saw_hugepagesz) {
347 saw_hugepagesz = true;
348 memset(huge_shift, 0, sizeof(huge_shift));
349 }
350 return __setup_hugepagesz(memparse(opt, NULL));
867e359b
CM
351}
352__setup("hugepagesz=", setup_hugepagesz);
353
621b1955
CM
354#ifdef ADDITIONAL_HUGE_SIZE
355/*
356 * Provide an additional huge page size if no "hugepagesz" args are given.
357 * In that case, all the cores have properly set up their hv super_shift
358 * already, but we need to notify the hugetlb code to enable the
359 * new huge page size from the Linux point of view.
360 */
361static __init int add_default_hugepagesz(void)
362{
363 if (!saw_hugepagesz) {
364 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
365 ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
366 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
367 ADDITIONAL_HUGE_SIZE);
368 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
369 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
370 }
371 return 0;
372}
373arch_initcall(add_default_hugepagesz);
374#endif
375
376#endif /* CONFIG_HUGETLB_SUPER_PAGES */
This page took 0.243546 seconds and 5 git commands to generate.