mm: Some arch may want to use HPAGE_PMD related values as variables
[deliverable/linux.git] / include / linux / huge_mm.h
CommitLineData
71e3aac0
AA
1#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
a1dd450b
WD
11extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
71e3aac0
AA
15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
b676b293 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
b8d3c4c3
MK
22extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
24 pmd_t *pmd, unsigned long addr, unsigned long next);
71e3aac0
AA
25extern int zap_huge_pmd(struct mmu_gather *tlb,
26 struct vm_area_struct *vma,
f21760b1 27 pmd_t *pmd, unsigned long addr);
0ca1634d
JW
28extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29 unsigned long addr, unsigned long end,
30 unsigned char *vec);
4b471e88 31extern bool move_huge_pmd(struct vm_area_struct *vma,
37a1c49a
AA
32 struct vm_area_struct *new_vma,
33 unsigned long old_addr,
34 unsigned long new_addr, unsigned long old_end,
35 pmd_t *old_pmd, pmd_t *new_pmd);
cd7548ab 36extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4b10e7d5
MG
37 unsigned long addr, pgprot_t newprot,
38 int prot_numa);
5cad465d 39int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
f25748e3 40 pfn_t pfn, bool write);
71e3aac0
AA
41enum transparent_hugepage_flag {
42 TRANSPARENT_HUGEPAGE_FLAG,
43 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
44 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
45 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 46 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 47 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
48#ifdef CONFIG_DEBUG_VM
49 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
50#endif
51};
52
d8c37c48
NH
53#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
54#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
55
71e3aac0 56#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3565fce3
DW
57struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
58 pmd_t *pmd, int flags);
59
fde52796
AK
60#define HPAGE_PMD_SHIFT PMD_SHIFT
61#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
62#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 63
20995974
AS
64extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
65
71e3aac0 66#define transparent_hugepage_enabled(__vma) \
a664b2d8
AA
67 ((transparent_hugepage_flags & \
68 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
69 (transparent_hugepage_flags & \
70 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
71 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
a7d6e4ec
AA
72 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
73 !is_vma_temporary_stack(__vma))
71e3aac0
AA
74#define transparent_hugepage_defrag(__vma) \
75 ((transparent_hugepage_flags & \
76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
77 (transparent_hugepage_flags & \
78 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
79 (__vma)->vm_flags & VM_HUGEPAGE))
79da5407
KS
80#define transparent_hugepage_use_zero_page() \
81 (transparent_hugepage_flags & \
82 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0
AA
83#ifdef CONFIG_DEBUG_VM
84#define transparent_hugepage_debug_cow() \
85 (transparent_hugepage_flags & \
86 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
87#else /* CONFIG_DEBUG_VM */
88#define transparent_hugepage_debug_cow() 0
89#endif /* CONFIG_DEBUG_VM */
90
91extern unsigned long transparent_hugepage_flags;
ad0bed24 92
9a982250
KS
93extern void prep_transhuge_page(struct page *page);
94extern void free_transhuge_page(struct page *page);
95
e9b61f19
KS
96int split_huge_page_to_list(struct page *page, struct list_head *list);
97static inline int split_huge_page(struct page *page)
98{
99 return split_huge_page_to_list(page, NULL);
100}
9a982250 101void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
102
103void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
104 unsigned long address);
105
106#define split_huge_pmd(__vma, __pmd, __address) \
107 do { \
108 pmd_t *____pmd = (__pmd); \
5c7fb56e
DW
109 if (pmd_trans_huge(*____pmd) \
110 || pmd_devmap(*____pmd)) \
eef1b3ba
KS
111 __split_huge_pmd(__vma, __pmd, __address); \
112 } while (0)
ad0bed24 113
60ab3244
AA
114extern int hugepage_madvise(struct vm_area_struct *vma,
115 unsigned long *vm_flags, int advice);
e1b9996b 116extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
117 unsigned long start,
118 unsigned long end,
119 long adjust_next);
b6ec57f4
KS
120extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
121 struct vm_area_struct *vma);
025c5b24 122/* mmap_sem must be held on entry */
b6ec57f4
KS
123static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
124 struct vm_area_struct *vma)
025c5b24 125{
81d1b09c 126 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
5c7fb56e 127 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 128 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 129 else
4b471e88 130 return false;
025c5b24 131}
2c888cfb
RR
132static inline int hpage_nr_pages(struct page *page)
133{
134 if (unlikely(PageTransHuge(page)))
135 return HPAGE_PMD_NR;
136 return 1;
137}
d10e63f2 138
4daae3b4
MG
139extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
140 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
d10e63f2 141
56873f43
WY
142extern struct page *huge_zero_page;
143
144static inline bool is_huge_zero_page(struct page *page)
145{
146 return ACCESS_ONCE(huge_zero_page) == page;
147}
148
fc437044
MW
149static inline bool is_huge_zero_pmd(pmd_t pmd)
150{
151 return is_huge_zero_page(pmd_page(pmd));
152}
153
154struct page *get_huge_zero_page(void);
fc437044 155
71e3aac0 156#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
157#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
158#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
159#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 160
2c888cfb
RR
161#define hpage_nr_pages(x) 1
162
71e3aac0
AA
163#define transparent_hugepage_enabled(__vma) 0
164
165#define transparent_hugepage_flags 0UL
5bc7b8ac
SL
166static inline int
167split_huge_page_to_list(struct page *page, struct list_head *list)
168{
169 return 0;
170}
71e3aac0
AA
171static inline int split_huge_page(struct page *page)
172{
173 return 0;
174}
9a982250 175static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 176#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 177 do { } while (0)
60ab3244
AA
178static inline int hugepage_madvise(struct vm_area_struct *vma,
179 unsigned long *vm_flags, int advice)
0af4e98b
AA
180{
181 BUG();
182 return 0;
183}
94fcc585
AA
184static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
185 unsigned long start,
186 unsigned long end,
187 long adjust_next)
188{
189}
b6ec57f4
KS
190static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
191 struct vm_area_struct *vma)
025c5b24 192{
b6ec57f4 193 return NULL;
025c5b24 194}
d10e63f2 195
4daae3b4
MG
196static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
197 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
d10e63f2 198{
4daae3b4 199 return 0;
d10e63f2
MG
200}
201
56873f43
WY
202static inline bool is_huge_zero_page(struct page *page)
203{
204 return false;
205}
206
3565fce3
DW
207
208static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
209 unsigned long addr, pmd_t *pmd, int flags)
210{
211 return NULL;
212}
71e3aac0
AA
213#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
214
215#endif /* _LINUX_HUGE_MM_H */
This page took 0.459137 seconds and 5 git commands to generate.