1 #ifndef _LINUX_KHUGEPAGED_H
2 #define _LINUX_KHUGEPAGED_H
4 #include <linux/sched.h> /* MMF_VM_HUGEPAGE */
6 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7 extern int __khugepaged_enter(struct mm_struct
*mm
);
8 extern void __khugepaged_exit(struct mm_struct
*mm
);
9 extern int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
10 unsigned long vm_flags
);
12 #define khugepaged_enabled() \
13 (transparent_hugepage_flags & \
14 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
15 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
16 #define khugepaged_always() \
17 (transparent_hugepage_flags & \
18 (1<<TRANSPARENT_HUGEPAGE_FLAG))
19 #define khugepaged_req_madv() \
20 (transparent_hugepage_flags & \
21 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
22 #define khugepaged_defrag() \
23 (transparent_hugepage_flags & \
24 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
26 static inline int khugepaged_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
28 if (test_bit(MMF_VM_HUGEPAGE
, &oldmm
->flags
))
29 return __khugepaged_enter(mm
);
33 static inline void khugepaged_exit(struct mm_struct
*mm
)
35 if (test_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))
36 __khugepaged_exit(mm
);
39 static inline int khugepaged_enter(struct vm_area_struct
*vma
,
40 unsigned long vm_flags
)
42 if (!test_bit(MMF_VM_HUGEPAGE
, &vma
->vm_mm
->flags
))
43 if ((khugepaged_always() ||
44 (khugepaged_req_madv() && (vm_flags
& VM_HUGEPAGE
))) &&
45 !(vm_flags
& VM_NOHUGEPAGE
))
46 if (__khugepaged_enter(vma
->vm_mm
))
50 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
51 static inline int khugepaged_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
55 static inline void khugepaged_exit(struct mm_struct
*mm
)
58 static inline int khugepaged_enter(struct vm_area_struct
*vma
,
59 unsigned long vm_flags
)
63 static inline int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
64 unsigned long vm_flags
)
68 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
70 #endif /* _LINUX_KHUGEPAGED_H */
This page took 0.042409 seconds and 5 git commands to generate.