Commit | Line | Data |
---|---|---|
71e3aac0 AA |
1 | #ifndef _LINUX_HUGE_MM_H |
2 | #define _LINUX_HUGE_MM_H | |
3 | ||
4 | extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, | |
5 | struct vm_area_struct *vma, | |
6 | unsigned long address, pmd_t *pmd, | |
7 | unsigned int flags); | |
8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
9 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
10 | struct vm_area_struct *vma); | |
11 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |
12 | unsigned long address, pmd_t *pmd, | |
13 | pmd_t orig_pmd); | |
14 | extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); | |
15 | extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | |
16 | unsigned long addr, | |
17 | pmd_t *pmd, | |
18 | unsigned int flags); | |
19 | extern int zap_huge_pmd(struct mmu_gather *tlb, | |
20 | struct vm_area_struct *vma, | |
21 | pmd_t *pmd); | |
0ca1634d JW |
22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
23 | unsigned long addr, unsigned long end, | |
24 | unsigned char *vec); | |
cd7548ab JW |
25 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
26 | unsigned long addr, pgprot_t newprot); | |
71e3aac0 AA |
27 | |
28 | enum transparent_hugepage_flag { | |
29 | TRANSPARENT_HUGEPAGE_FLAG, | |
30 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
31 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, | |
32 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, | |
ba76149f | 33 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
71e3aac0 AA |
34 | #ifdef CONFIG_DEBUG_VM |
35 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, | |
36 | #endif | |
37 | }; | |
38 | ||
39 | enum page_check_address_pmd_flag { | |
40 | PAGE_CHECK_ADDRESS_PMD_FLAG, | |
41 | PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, | |
42 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, | |
43 | }; | |
44 | extern pmd_t *page_check_address_pmd(struct page *page, | |
45 | struct mm_struct *mm, | |
46 | unsigned long address, | |
47 | enum page_check_address_pmd_flag flag); | |
48 | ||
49 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
50 | #define HPAGE_PMD_SHIFT HPAGE_SHIFT | |
51 | #define HPAGE_PMD_MASK HPAGE_MASK | |
52 | #define HPAGE_PMD_SIZE HPAGE_SIZE | |
53 | ||
54 | #define transparent_hugepage_enabled(__vma) \ | |
55 | (transparent_hugepage_flags & (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ | |
56 | (transparent_hugepage_flags & \ | |
57 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ | |
58 | (__vma)->vm_flags & VM_HUGEPAGE)) | |
59 | #define transparent_hugepage_defrag(__vma) \ | |
60 | ((transparent_hugepage_flags & \ | |
61 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ | |
62 | (transparent_hugepage_flags & \ | |
63 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ | |
64 | (__vma)->vm_flags & VM_HUGEPAGE)) | |
65 | #ifdef CONFIG_DEBUG_VM | |
66 | #define transparent_hugepage_debug_cow() \ | |
67 | (transparent_hugepage_flags & \ | |
68 | (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) | |
69 | #else /* CONFIG_DEBUG_VM */ | |
70 | #define transparent_hugepage_debug_cow() 0 | |
71 | #endif /* CONFIG_DEBUG_VM */ | |
72 | ||
73 | extern unsigned long transparent_hugepage_flags; | |
74 | extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
75 | pmd_t *dst_pmd, pmd_t *src_pmd, | |
76 | struct vm_area_struct *vma, | |
77 | unsigned long addr, unsigned long end); | |
78 | extern int handle_pte_fault(struct mm_struct *mm, | |
79 | struct vm_area_struct *vma, unsigned long address, | |
80 | pte_t *pte, pmd_t *pmd, unsigned int flags); | |
81 | extern int split_huge_page(struct page *page); | |
82 | extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | |
83 | #define split_huge_page_pmd(__mm, __pmd) \ | |
84 | do { \ | |
85 | pmd_t *____pmd = (__pmd); \ | |
86 | if (unlikely(pmd_trans_huge(*____pmd))) \ | |
87 | __split_huge_page_pmd(__mm, ____pmd); \ | |
88 | } while (0) | |
89 | #define wait_split_huge_page(__anon_vma, __pmd) \ | |
90 | do { \ | |
91 | pmd_t *____pmd = (__pmd); \ | |
92 | spin_unlock_wait(&(__anon_vma)->root->lock); \ | |
93 | /* \ | |
94 | * spin_unlock_wait() is just a loop in C and so the \ | |
95 | * CPU can reorder anything around it. \ | |
96 | */ \ | |
97 | smp_mb(); \ | |
98 | BUG_ON(pmd_trans_splitting(*____pmd) || \ | |
99 | pmd_trans_huge(*____pmd)); \ | |
100 | } while (0) | |
101 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) | |
102 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | |
103 | #if HPAGE_PMD_ORDER > MAX_ORDER | |
104 | #error "hugepages can't be allocated by the buddy allocator" | |
105 | #endif | |
0af4e98b | 106 | extern int hugepage_madvise(unsigned long *vm_flags); |
71e3aac0 AA |
107 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
108 | #define HPAGE_PMD_SHIFT ({ BUG(); 0; }) | |
109 | #define HPAGE_PMD_MASK ({ BUG(); 0; }) | |
110 | #define HPAGE_PMD_SIZE ({ BUG(); 0; }) | |
111 | ||
112 | #define transparent_hugepage_enabled(__vma) 0 | |
113 | ||
114 | #define transparent_hugepage_flags 0UL | |
115 | static inline int split_huge_page(struct page *page) | |
116 | { | |
117 | return 0; | |
118 | } | |
119 | #define split_huge_page_pmd(__mm, __pmd) \ | |
120 | do { } while (0) | |
121 | #define wait_split_huge_page(__anon_vma, __pmd) \ | |
122 | do { } while (0) | |
0af4e98b AA |
123 | static inline int hugepage_madvise(unsigned long *vm_flags) |
124 | { | |
125 | BUG(); | |
126 | return 0; | |
127 | } | |
71e3aac0 AA |
128 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
129 | ||
130 | #endif /* _LINUX_HUGE_MM_H */ |