mm: numa: Add THP migration for the NUMA working set scanning fault case build fix
[deliverable/linux.git] / include / linux / migrate.h
1 #ifndef _LINUX_MIGRATE_H
2 #define _LINUX_MIGRATE_H
3
4 #include <linux/mm.h>
5 #include <linux/mempolicy.h>
6 #include <linux/migrate_mode.h>
7
8 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
9
10 enum migrate_reason {
11 MR_COMPACTION,
12 MR_MEMORY_FAILURE,
13 MR_MEMORY_HOTPLUG,
14 MR_SYSCALL, /* also applies to cpusets */
15 MR_MEMPOLICY_MBIND,
16 MR_NUMA_MISPLACED,
17 MR_CMA
18 };
19
20 #ifdef CONFIG_MIGRATION
21
22 extern void putback_lru_pages(struct list_head *l);
23 extern int migrate_page(struct address_space *,
24 struct page *, struct page *, enum migrate_mode);
25 extern int migrate_pages(struct list_head *l, new_page_t x,
26 unsigned long private, bool offlining,
27 enum migrate_mode mode, int reason);
28 extern int migrate_huge_page(struct page *, new_page_t x,
29 unsigned long private, bool offlining,
30 enum migrate_mode mode);
31
32 extern int fail_migrate_page(struct address_space *,
33 struct page *, struct page *);
34
35 extern int migrate_prep(void);
36 extern int migrate_prep_local(void);
37 extern int migrate_vmas(struct mm_struct *mm,
38 const nodemask_t *from, const nodemask_t *to,
39 unsigned long flags);
40 extern void migrate_page_copy(struct page *newpage, struct page *page);
41 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
42 struct page *newpage, struct page *page);
43 #else
44
45 static inline void putback_lru_pages(struct list_head *l) {}
46 static inline int migrate_pages(struct list_head *l, new_page_t x,
47 unsigned long private, bool offlining,
48 enum migrate_mode mode, int reason) { return -ENOSYS; }
49 static inline int migrate_huge_page(struct page *page, new_page_t x,
50 unsigned long private, bool offlining,
51 enum migrate_mode mode) { return -ENOSYS; }
52
53 static inline int migrate_prep(void) { return -ENOSYS; }
54 static inline int migrate_prep_local(void) { return -ENOSYS; }
55
56 static inline int migrate_vmas(struct mm_struct *mm,
57 const nodemask_t *from, const nodemask_t *to,
58 unsigned long flags)
59 {
60 return -ENOSYS;
61 }
62
63 static inline void migrate_page_copy(struct page *newpage,
64 struct page *page) {}
65
66 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
67 struct page *newpage, struct page *page)
68 {
69 return -ENOSYS;
70 }
71
72 /* Possible settings for the migrate_page() method in address_operations */
73 #define migrate_page NULL
74 #define fail_migrate_page NULL
75
76 #endif /* CONFIG_MIGRATION */
77
78 #ifdef CONFIG_NUMA_BALANCING
79 extern int migrate_misplaced_page(struct page *page, int node);
80 extern int migrate_misplaced_page(struct page *page, int node);
81 extern bool migrate_ratelimited(int node);
82 #else
83 static inline int migrate_misplaced_page(struct page *page, int node)
84 {
85 return -EAGAIN; /* can't migrate now */
86 }
87 static inline bool migrate_ratelimited(int node)
88 {
89 return false;
90 }
91 #endif /* CONFIG_NUMA_BALANCING */
92
93 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
94 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 pmd_t *pmd, pmd_t entry,
97 unsigned long address,
98 struct page *page, int node);
99 #else
100 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
101 struct vm_area_struct *vma,
102 pmd_t *pmd, pmd_t entry,
103 unsigned long address,
104 struct page *page, int node)
105 {
106 return -EAGAIN;
107 }
108 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
109
110 #endif /* _LINUX_MIGRATE_H */
This page took 0.044759 seconds and 6 git commands to generate.