Merge tag 'pr-20150201-x86-entry' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / include / linux / hugetlb.h
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
6 #include <linux/fs.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11
12 struct ctl_table;
13 struct user_struct;
14 struct mmu_gather;
15
16 #ifdef CONFIG_HUGETLB_PAGE
17
18 #include <linux/mempolicy.h>
19 #include <linux/shm.h>
20 #include <asm/tlbflush.h>
21
22 struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
25 long max_hpages, used_hpages;
26 };
27
28 struct resv_map {
29 struct kref refs;
30 spinlock_t lock;
31 struct list_head regions;
32 };
33 extern struct resv_map *resv_map_alloc(void);
34 void resv_map_release(struct kref *ref);
35
36 extern spinlock_t hugetlb_lock;
37 extern int hugetlb_max_hstate __read_mostly;
38 #define for_each_hstate(h) \
39 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
40
41 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
42 void hugepage_put_subpool(struct hugepage_subpool *spool);
43
44 int PageHuge(struct page *page);
45
46 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
47 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
48 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
49 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
50
51 #ifdef CONFIG_NUMA
52 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
53 void __user *, size_t *, loff_t *);
54 #endif
55
56 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
57 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
58 struct page **, struct vm_area_struct **,
59 unsigned long *, unsigned long *, long, unsigned int);
60 void unmap_hugepage_range(struct vm_area_struct *,
61 unsigned long, unsigned long, struct page *);
62 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
63 struct vm_area_struct *vma,
64 unsigned long start, unsigned long end,
65 struct page *ref_page);
66 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
67 unsigned long start, unsigned long end,
68 struct page *ref_page);
69 void hugetlb_report_meminfo(struct seq_file *);
70 int hugetlb_report_node_meminfo(int, char *);
71 void hugetlb_show_meminfo(void);
72 unsigned long hugetlb_total_pages(void);
73 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74 unsigned long address, unsigned int flags);
75 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
76 struct vm_area_struct *vma,
77 vm_flags_t vm_flags);
78 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
79 int dequeue_hwpoisoned_huge_page(struct page *page);
80 bool isolate_huge_page(struct page *page, struct list_head *list);
81 void putback_active_hugepage(struct page *page);
82 bool is_hugepage_active(struct page *page);
83 void free_huge_page(struct page *page);
84
85 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
86 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
87 #endif
88
89 extern unsigned long hugepages_treat_as_movable;
90 extern int sysctl_hugetlb_shm_group;
91 extern struct list_head huge_boot_pages;
92
93 /* arch callbacks */
94
95 pte_t *huge_pte_alloc(struct mm_struct *mm,
96 unsigned long addr, unsigned long sz);
97 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
98 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
99 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
100 int write);
101 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
102 pmd_t *pmd, int write);
103 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
104 pud_t *pud, int write);
105 int pmd_huge(pmd_t pmd);
106 int pud_huge(pud_t pmd);
107 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
108 unsigned long address, unsigned long end, pgprot_t newprot);
109
110 #else /* !CONFIG_HUGETLB_PAGE */
111
112 static inline int PageHuge(struct page *page)
113 {
114 return 0;
115 }
116
117 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
118 {
119 }
120
121 static inline unsigned long hugetlb_total_pages(void)
122 {
123 return 0;
124 }
125
126 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
127 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
128 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
129 static inline void hugetlb_report_meminfo(struct seq_file *m)
130 {
131 }
132 #define hugetlb_report_node_meminfo(n, buf) 0
133 static inline void hugetlb_show_meminfo(void)
134 {
135 }
136 #define follow_huge_pmd(mm, addr, pmd, write) NULL
137 #define follow_huge_pud(mm, addr, pud, write) NULL
138 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
139 #define pmd_huge(x) 0
140 #define pud_huge(x) 0
141 #define is_hugepage_only_range(mm, addr, len) 0
142 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
143 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
144 #define huge_pte_offset(mm, address) 0
145 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
146 {
147 return 0;
148 }
149
150 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
151 {
152 return false;
153 }
154 #define putback_active_hugepage(p) do {} while (0)
155 #define is_hugepage_active(x) false
156
157 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 unsigned long address, unsigned long end, pgprot_t newprot)
159 {
160 return 0;
161 }
162
163 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
164 struct vm_area_struct *vma, unsigned long start,
165 unsigned long end, struct page *ref_page)
166 {
167 BUG();
168 }
169
170 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
171 struct vm_area_struct *vma, unsigned long start,
172 unsigned long end, struct page *ref_page)
173 {
174 BUG();
175 }
176
177 #endif /* !CONFIG_HUGETLB_PAGE */
178 /*
179 * hugepages at page global directory. If arch support
180 * hugepages at pgd level, they need to define this.
181 */
182 #ifndef pgd_huge
183 #define pgd_huge(x) 0
184 #endif
185
186 #ifndef pgd_write
187 static inline int pgd_write(pgd_t pgd)
188 {
189 BUG();
190 return 0;
191 }
192 #endif
193
194 #ifndef pud_write
195 static inline int pud_write(pud_t pud)
196 {
197 BUG();
198 return 0;
199 }
200 #endif
201
202 #ifndef is_hugepd
203 /*
204 * Some architectures requires a hugepage directory format that is
205 * required to support multiple hugepage sizes. For example
206 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
207 * introduced the same on powerpc. This allows for a more flexible hugepage
208 * pagetable layout.
209 */
210 typedef struct { unsigned long pd; } hugepd_t;
211 #define is_hugepd(hugepd) (0)
212 #define __hugepd(x) ((hugepd_t) { (x) })
213 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
214 unsigned pdshift, unsigned long end,
215 int write, struct page **pages, int *nr)
216 {
217 return 0;
218 }
219 #else
220 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 unsigned pdshift, unsigned long end,
222 int write, struct page **pages, int *nr);
223 #endif
224
225 #define HUGETLB_ANON_FILE "anon_hugepage"
226
227 enum {
228 /*
229 * The file will be used as an shm file so shmfs accounting rules
230 * apply
231 */
232 HUGETLB_SHMFS_INODE = 1,
233 /*
234 * The file is being created on the internal vfs mount and shmfs
235 * accounting rules do not apply
236 */
237 HUGETLB_ANONHUGE_INODE = 2,
238 };
239
240 #ifdef CONFIG_HUGETLBFS
241 struct hugetlbfs_sb_info {
242 long max_inodes; /* inodes allowed */
243 long free_inodes; /* inodes free */
244 spinlock_t stat_lock;
245 struct hstate *hstate;
246 struct hugepage_subpool *spool;
247 };
248
249 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
250 {
251 return sb->s_fs_info;
252 }
253
254 extern const struct file_operations hugetlbfs_file_operations;
255 extern const struct vm_operations_struct hugetlb_vm_ops;
256 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
257 struct user_struct **user, int creat_flags,
258 int page_size_log);
259
260 static inline int is_file_hugepages(struct file *file)
261 {
262 if (file->f_op == &hugetlbfs_file_operations)
263 return 1;
264 if (is_file_shm_hugepages(file))
265 return 1;
266
267 return 0;
268 }
269
270
271 #else /* !CONFIG_HUGETLBFS */
272
273 #define is_file_hugepages(file) 0
274 static inline struct file *
275 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
276 struct user_struct **user, int creat_flags,
277 int page_size_log)
278 {
279 return ERR_PTR(-ENOSYS);
280 }
281
282 #endif /* !CONFIG_HUGETLBFS */
283
284 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
285 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
286 unsigned long len, unsigned long pgoff,
287 unsigned long flags);
288 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
289
290 #ifdef CONFIG_HUGETLB_PAGE
291
292 #define HSTATE_NAME_LEN 32
293 /* Defines one hugetlb page size */
294 struct hstate {
295 int next_nid_to_alloc;
296 int next_nid_to_free;
297 unsigned int order;
298 unsigned long mask;
299 unsigned long max_huge_pages;
300 unsigned long nr_huge_pages;
301 unsigned long free_huge_pages;
302 unsigned long resv_huge_pages;
303 unsigned long surplus_huge_pages;
304 unsigned long nr_overcommit_huge_pages;
305 struct list_head hugepage_activelist;
306 struct list_head hugepage_freelists[MAX_NUMNODES];
307 unsigned int nr_huge_pages_node[MAX_NUMNODES];
308 unsigned int free_huge_pages_node[MAX_NUMNODES];
309 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
310 #ifdef CONFIG_CGROUP_HUGETLB
311 /* cgroup control files */
312 struct cftype cgroup_files[5];
313 #endif
314 char name[HSTATE_NAME_LEN];
315 };
316
317 struct huge_bootmem_page {
318 struct list_head list;
319 struct hstate *hstate;
320 #ifdef CONFIG_HIGHMEM
321 phys_addr_t phys;
322 #endif
323 };
324
325 struct page *alloc_huge_page_node(struct hstate *h, int nid);
326 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
327 unsigned long addr, int avoid_reserve);
328
329 /* arch callback */
330 int __init alloc_bootmem_huge_page(struct hstate *h);
331
332 void __init hugetlb_add_hstate(unsigned order);
333 struct hstate *size_to_hstate(unsigned long size);
334
335 #ifndef HUGE_MAX_HSTATE
336 #define HUGE_MAX_HSTATE 1
337 #endif
338
339 extern struct hstate hstates[HUGE_MAX_HSTATE];
340 extern unsigned int default_hstate_idx;
341
342 #define default_hstate (hstates[default_hstate_idx])
343
344 static inline struct hstate *hstate_inode(struct inode *i)
345 {
346 struct hugetlbfs_sb_info *hsb;
347 hsb = HUGETLBFS_SB(i->i_sb);
348 return hsb->hstate;
349 }
350
351 static inline struct hstate *hstate_file(struct file *f)
352 {
353 return hstate_inode(file_inode(f));
354 }
355
356 static inline struct hstate *hstate_sizelog(int page_size_log)
357 {
358 if (!page_size_log)
359 return &default_hstate;
360
361 return size_to_hstate(1UL << page_size_log);
362 }
363
364 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
365 {
366 return hstate_file(vma->vm_file);
367 }
368
369 static inline unsigned long huge_page_size(struct hstate *h)
370 {
371 return (unsigned long)PAGE_SIZE << h->order;
372 }
373
374 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
375
376 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
377
378 static inline unsigned long huge_page_mask(struct hstate *h)
379 {
380 return h->mask;
381 }
382
383 static inline unsigned int huge_page_order(struct hstate *h)
384 {
385 return h->order;
386 }
387
388 static inline unsigned huge_page_shift(struct hstate *h)
389 {
390 return h->order + PAGE_SHIFT;
391 }
392
393 static inline bool hstate_is_gigantic(struct hstate *h)
394 {
395 return huge_page_order(h) >= MAX_ORDER;
396 }
397
398 static inline unsigned int pages_per_huge_page(struct hstate *h)
399 {
400 return 1 << h->order;
401 }
402
403 static inline unsigned int blocks_per_huge_page(struct hstate *h)
404 {
405 return huge_page_size(h) / 512;
406 }
407
408 #include <asm/hugetlb.h>
409
410 #ifndef arch_make_huge_pte
411 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
412 struct page *page, int writable)
413 {
414 return entry;
415 }
416 #endif
417
418 static inline struct hstate *page_hstate(struct page *page)
419 {
420 VM_BUG_ON_PAGE(!PageHuge(page), page);
421 return size_to_hstate(PAGE_SIZE << compound_order(page));
422 }
423
424 static inline unsigned hstate_index_to_shift(unsigned index)
425 {
426 return hstates[index].order + PAGE_SHIFT;
427 }
428
429 static inline int hstate_index(struct hstate *h)
430 {
431 return h - hstates;
432 }
433
434 pgoff_t __basepage_index(struct page *page);
435
436 /* Return page->index in PAGE_SIZE units */
437 static inline pgoff_t basepage_index(struct page *page)
438 {
439 if (!PageCompound(page))
440 return page->index;
441
442 return __basepage_index(page);
443 }
444
445 extern void dissolve_free_huge_pages(unsigned long start_pfn,
446 unsigned long end_pfn);
447 static inline int hugepage_migration_supported(struct hstate *h)
448 {
449 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
450 return huge_page_shift(h) == PMD_SHIFT;
451 #else
452 return 0;
453 #endif
454 }
455
456 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
457 struct mm_struct *mm, pte_t *pte)
458 {
459 if (huge_page_size(h) == PMD_SIZE)
460 return pmd_lockptr(mm, (pmd_t *) pte);
461 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
462 return &mm->page_table_lock;
463 }
464
465 static inline bool hugepages_supported(void)
466 {
467 /*
468 * Some platform decide whether they support huge pages at boot
469 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
470 * there is no such support
471 */
472 return HPAGE_SHIFT != 0;
473 }
474
475 #else /* CONFIG_HUGETLB_PAGE */
476 struct hstate {};
477 #define alloc_huge_page_node(h, nid) NULL
478 #define alloc_huge_page_noerr(v, a, r) NULL
479 #define alloc_bootmem_huge_page(h) NULL
480 #define hstate_file(f) NULL
481 #define hstate_sizelog(s) NULL
482 #define hstate_vma(v) NULL
483 #define hstate_inode(i) NULL
484 #define page_hstate(page) NULL
485 #define huge_page_size(h) PAGE_SIZE
486 #define huge_page_mask(h) PAGE_MASK
487 #define vma_kernel_pagesize(v) PAGE_SIZE
488 #define vma_mmu_pagesize(v) PAGE_SIZE
489 #define huge_page_order(h) 0
490 #define huge_page_shift(h) PAGE_SHIFT
491 static inline unsigned int pages_per_huge_page(struct hstate *h)
492 {
493 return 1;
494 }
495 #define hstate_index_to_shift(index) 0
496 #define hstate_index(h) 0
497
498 static inline pgoff_t basepage_index(struct page *page)
499 {
500 return page->index;
501 }
502 #define dissolve_free_huge_pages(s, e) do {} while (0)
503 #define hugepage_migration_supported(h) 0
504
505 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
506 struct mm_struct *mm, pte_t *pte)
507 {
508 return &mm->page_table_lock;
509 }
510 #endif /* CONFIG_HUGETLB_PAGE */
511
512 static inline spinlock_t *huge_pte_lock(struct hstate *h,
513 struct mm_struct *mm, pte_t *pte)
514 {
515 spinlock_t *ptl;
516
517 ptl = huge_pte_lockptr(h, mm, pte);
518 spin_lock(ptl);
519 return ptl;
520 }
521
522 #endif /* _LINUX_HUGETLB_H */
This page took 0.113697 seconds and 5 git commands to generate.