Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[deliverable/linux.git] / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
22 extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
24 pmd_t *pmd, unsigned long addr, unsigned long next);
25 extern int zap_huge_pmd(struct mmu_gather *tlb,
26 struct vm_area_struct *vma,
27 pmd_t *pmd, unsigned long addr);
28 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29 unsigned long addr, unsigned long end,
30 unsigned char *vec);
31 extern bool move_huge_pmd(struct vm_area_struct *vma,
32 struct vm_area_struct *new_vma,
33 unsigned long old_addr,
34 unsigned long new_addr, unsigned long old_end,
35 pmd_t *old_pmd, pmd_t *new_pmd);
36 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
37 unsigned long addr, pgprot_t newprot,
38 int prot_numa);
39 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
40 pfn_t pfn, bool write);
41 enum transparent_hugepage_flag {
42 TRANSPARENT_HUGEPAGE_FLAG,
43 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
44 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
45 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
46 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
47 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
48 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
49 #ifdef CONFIG_DEBUG_VM
50 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
51 #endif
52 };
53
54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
56
57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
59 pmd_t *pmd, int flags);
60
61 #define HPAGE_PMD_SHIFT PMD_SHIFT
62 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
63 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
64
65 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
66
67 #define transparent_hugepage_enabled(__vma) \
68 ((transparent_hugepage_flags & \
69 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
70 (transparent_hugepage_flags & \
71 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
72 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
73 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
74 !is_vma_temporary_stack(__vma))
75 #define transparent_hugepage_use_zero_page() \
76 (transparent_hugepage_flags & \
77 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
78 #ifdef CONFIG_DEBUG_VM
79 #define transparent_hugepage_debug_cow() \
80 (transparent_hugepage_flags & \
81 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
82 #else /* CONFIG_DEBUG_VM */
83 #define transparent_hugepage_debug_cow() 0
84 #endif /* CONFIG_DEBUG_VM */
85
86 extern unsigned long transparent_hugepage_flags;
87
88 extern void prep_transhuge_page(struct page *page);
89 extern void free_transhuge_page(struct page *page);
90
91 int split_huge_page_to_list(struct page *page, struct list_head *list);
92 static inline int split_huge_page(struct page *page)
93 {
94 return split_huge_page_to_list(page, NULL);
95 }
96 void deferred_split_huge_page(struct page *page);
97
98 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
99 unsigned long address, bool freeze);
100
101 #define split_huge_pmd(__vma, __pmd, __address) \
102 do { \
103 pmd_t *____pmd = (__pmd); \
104 if (pmd_trans_huge(*____pmd) \
105 || pmd_devmap(*____pmd)) \
106 __split_huge_pmd(__vma, __pmd, __address, \
107 false); \
108 } while (0)
109
110
111 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
112 bool freeze, struct page *page);
113
114 #if HPAGE_PMD_ORDER >= MAX_ORDER
115 #error "hugepages can't be allocated by the buddy allocator"
116 #endif
117 extern int hugepage_madvise(struct vm_area_struct *vma,
118 unsigned long *vm_flags, int advice);
119 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
120 unsigned long start,
121 unsigned long end,
122 long adjust_next);
123 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
124 struct vm_area_struct *vma);
125 /* mmap_sem must be held on entry */
126 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
127 struct vm_area_struct *vma)
128 {
129 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
130 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
131 return __pmd_trans_huge_lock(pmd, vma);
132 else
133 return false;
134 }
135 static inline int hpage_nr_pages(struct page *page)
136 {
137 if (unlikely(PageTransHuge(page)))
138 return HPAGE_PMD_NR;
139 return 1;
140 }
141
142 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
143 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
144
145 extern struct page *huge_zero_page;
146
147 static inline bool is_huge_zero_page(struct page *page)
148 {
149 return ACCESS_ONCE(huge_zero_page) == page;
150 }
151
152 static inline bool is_huge_zero_pmd(pmd_t pmd)
153 {
154 return is_huge_zero_page(pmd_page(pmd));
155 }
156
157 struct page *get_huge_zero_page(void);
158
159 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
160 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
161 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
162 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
163
164 #define hpage_nr_pages(x) 1
165
166 #define transparent_hugepage_enabled(__vma) 0
167
168 #define transparent_hugepage_flags 0UL
169 static inline int
170 split_huge_page_to_list(struct page *page, struct list_head *list)
171 {
172 return 0;
173 }
174 static inline int split_huge_page(struct page *page)
175 {
176 return 0;
177 }
178 static inline void deferred_split_huge_page(struct page *page) {}
179 #define split_huge_pmd(__vma, __pmd, __address) \
180 do { } while (0)
181
182 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
183 unsigned long address, bool freeze, struct page *page) {}
184
185 static inline int hugepage_madvise(struct vm_area_struct *vma,
186 unsigned long *vm_flags, int advice)
187 {
188 BUG();
189 return 0;
190 }
191 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
192 unsigned long start,
193 unsigned long end,
194 long adjust_next)
195 {
196 }
197 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
198 struct vm_area_struct *vma)
199 {
200 return NULL;
201 }
202
203 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
204 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
205 {
206 return 0;
207 }
208
209 static inline bool is_huge_zero_page(struct page *page)
210 {
211 return false;
212 }
213
214
215 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
216 unsigned long addr, pmd_t *pmd, int flags)
217 {
218 return NULL;
219 }
220 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
221
222 #endif /* _LINUX_HUGE_MM_H */
This page took 0.040139 seconds and 6 git commands to generate.