Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_HIGHMEM_H |
2 | #define _LINUX_HIGHMEM_H | |
3 | ||
1da177e4 | 4 | #include <linux/fs.h> |
597781f3 | 5 | #include <linux/kernel.h> |
187f1882 | 6 | #include <linux/bug.h> |
1da177e4 | 7 | #include <linux/mm.h> |
ad76fb6b | 8 | #include <linux/uaccess.h> |
43b3a0c7 | 9 | #include <linux/hardirq.h> |
1da177e4 LT |
10 | |
11 | #include <asm/cacheflush.h> | |
12 | ||
03beb076 | 13 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
a6f36be3 | 14 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
03beb076 JB |
15 | { |
16 | } | |
17 | #endif | |
18 | ||
5a3a5a98 JB |
19 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
20 | static inline void flush_kernel_dcache_page(struct page *page) | |
21 | { | |
22 | } | |
9df5f741 JB |
23 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
24 | { | |
25 | } | |
26 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |
27 | { | |
28 | } | |
5a3a5a98 JB |
29 | #endif |
30 | ||
3688e07f KG |
31 | #include <asm/kmap_types.h> |
32 | ||
3688e07f | 33 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
34 | #include <asm/highmem.h> |
35 | ||
36 | /* declarations for linux/mm/highmem.c */ | |
37 | unsigned int nr_free_highpages(void); | |
c1f60a5a | 38 | extern unsigned long totalhigh_pages; |
1da177e4 | 39 | |
ce6234b5 JF |
40 | void kmap_flush_unused(void); |
41 | ||
5a178119 MG |
42 | struct page *kmap_to_page(void *addr); |
43 | ||
1da177e4 LT |
44 | #else /* CONFIG_HIGHMEM */ |
45 | ||
46 | static inline unsigned int nr_free_highpages(void) { return 0; } | |
47 | ||
5a178119 MG |
48 | static inline struct page *kmap_to_page(void *addr) |
49 | { | |
50 | return virt_to_page(addr); | |
51 | } | |
52 | ||
4b529401 | 53 | #define totalhigh_pages 0UL |
c1f60a5a | 54 | |
a6ca1b99 | 55 | #ifndef ARCH_HAS_KMAP |
1da177e4 LT |
56 | static inline void *kmap(struct page *page) |
57 | { | |
58 | might_sleep(); | |
59 | return page_address(page); | |
60 | } | |
61 | ||
31c91132 MW |
62 | static inline void kunmap(struct page *page) |
63 | { | |
64 | } | |
1da177e4 | 65 | |
a24401bc | 66 | static inline void *kmap_atomic(struct page *page) |
254f9c5c | 67 | { |
2cb7c9cb | 68 | preempt_disable(); |
254f9c5c GU |
69 | pagefault_disable(); |
70 | return page_address(page); | |
71 | } | |
a24401bc | 72 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
254f9c5c | 73 | |
3e4d3af5 | 74 | static inline void __kunmap_atomic(void *addr) |
4e60c86b AK |
75 | { |
76 | pagefault_enable(); | |
2cb7c9cb | 77 | preempt_enable(); |
4e60c86b AK |
78 | } |
79 | ||
3e4d3af5 | 80 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
1da177e4 | 81 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
ce6234b5 JF |
82 | |
83 | #define kmap_flush_unused() do {} while(0) | |
a6ca1b99 | 84 | #endif |
1da177e4 LT |
85 | |
86 | #endif /* CONFIG_HIGHMEM */ | |
87 | ||
a8e23a29 PZ |
88 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
89 | ||
90 | DECLARE_PER_CPU(int, __kmap_atomic_idx); | |
91 | ||
92 | static inline int kmap_atomic_idx_push(void) | |
93 | { | |
cfb82434 CL |
94 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
95 | ||
a8e23a29 PZ |
96 | #ifdef CONFIG_DEBUG_HIGHMEM |
97 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | |
1d352bfd | 98 | BUG_ON(idx >= KM_TYPE_NR); |
a8e23a29 PZ |
99 | #endif |
100 | return idx; | |
101 | } | |
102 | ||
20273941 PZ |
103 | static inline int kmap_atomic_idx(void) |
104 | { | |
cfb82434 | 105 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
20273941 PZ |
106 | } |
107 | ||
cfb82434 | 108 | static inline void kmap_atomic_idx_pop(void) |
a8e23a29 | 109 | { |
a8e23a29 | 110 | #ifdef CONFIG_DEBUG_HIGHMEM |
cfb82434 CL |
111 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
112 | ||
a8e23a29 | 113 | BUG_ON(idx < 0); |
cfb82434 CL |
114 | #else |
115 | __this_cpu_dec(__kmap_atomic_idx); | |
a8e23a29 | 116 | #endif |
a8e23a29 PZ |
117 | } |
118 | ||
119 | #endif | |
120 | ||
3e4d3af5 PZ |
121 | /* |
122 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() | |
123 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. | |
124 | */ | |
1285e4c8 | 125 | #define kunmap_atomic(addr) \ |
980c19e3 CW |
126 | do { \ |
127 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ | |
128 | __kunmap_atomic(addr); \ | |
129 | } while (0) | |
130 | ||
980c19e3 | 131 | |
1da177e4 | 132 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 133 | #ifndef clear_user_highpage |
1da177e4 LT |
134 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
135 | { | |
1ec9c5dd | 136 | void *addr = kmap_atomic(page); |
1da177e4 | 137 | clear_user_page(addr, vaddr, page); |
1ec9c5dd | 138 | kunmap_atomic(addr); |
1da177e4 | 139 | } |
487ff320 | 140 | #endif |
1da177e4 LT |
141 | |
142 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | |
769848c0 MG |
143 | /** |
144 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | |
145 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | |
146 | * @vma: The VMA the page is to be allocated for | |
147 | * @vaddr: The virtual address the page will be inserted into | |
148 | * | |
149 | * This function will allocate a page for a VMA but the caller is expected | |
150 | * to specify via movableflags whether the page will be movable in the | |
151 | * future or not | |
152 | * | |
153 | * An architecture may override this function by defining | |
154 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | |
155 | * implementation. | |
156 | */ | |
1da177e4 | 157 | static inline struct page * |
769848c0 MG |
158 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
159 | struct vm_area_struct *vma, | |
160 | unsigned long vaddr) | |
1da177e4 | 161 | { |
769848c0 MG |
162 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
163 | vma, vaddr); | |
1da177e4 LT |
164 | |
165 | if (page) | |
166 | clear_user_highpage(page, vaddr); | |
167 | ||
168 | return page; | |
169 | } | |
170 | #endif | |
171 | ||
769848c0 MG |
172 | /** |
173 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | |
174 | * @vma: The VMA the page is to be allocated for | |
175 | * @vaddr: The virtual address the page will be inserted into | |
176 | * | |
177 | * This function will allocate a page for a VMA that the caller knows will | |
178 | * be able to migrate in the future using move_pages() or reclaimed | |
179 | */ | |
180 | static inline struct page * | |
181 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | |
182 | unsigned long vaddr) | |
183 | { | |
184 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | |
185 | } | |
186 | ||
1da177e4 LT |
187 | static inline void clear_highpage(struct page *page) |
188 | { | |
1ec9c5dd | 189 | void *kaddr = kmap_atomic(page); |
1da177e4 | 190 | clear_page(kaddr); |
1ec9c5dd | 191 | kunmap_atomic(kaddr); |
1da177e4 LT |
192 | } |
193 | ||
eebd2aa3 CL |
194 | static inline void zero_user_segments(struct page *page, |
195 | unsigned start1, unsigned end1, | |
196 | unsigned start2, unsigned end2) | |
197 | { | |
1ec9c5dd | 198 | void *kaddr = kmap_atomic(page); |
eebd2aa3 CL |
199 | |
200 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | |
201 | ||
202 | if (end1 > start1) | |
203 | memset(kaddr + start1, 0, end1 - start1); | |
204 | ||
205 | if (end2 > start2) | |
206 | memset(kaddr + start2, 0, end2 - start2); | |
207 | ||
1ec9c5dd | 208 | kunmap_atomic(kaddr); |
eebd2aa3 CL |
209 | flush_dcache_page(page); |
210 | } | |
211 | ||
212 | static inline void zero_user_segment(struct page *page, | |
213 | unsigned start, unsigned end) | |
214 | { | |
215 | zero_user_segments(page, start, end, 0, 0); | |
216 | } | |
217 | ||
218 | static inline void zero_user(struct page *page, | |
219 | unsigned start, unsigned size) | |
220 | { | |
221 | zero_user_segments(page, start, start + size, 0, 0); | |
222 | } | |
01f2705d | 223 | |
77fff4ae AN |
224 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
225 | ||
9de455b2 AN |
226 | static inline void copy_user_highpage(struct page *to, struct page *from, |
227 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
228 | { |
229 | char *vfrom, *vto; | |
230 | ||
1ec9c5dd CW |
231 | vfrom = kmap_atomic(from); |
232 | vto = kmap_atomic(to); | |
1da177e4 | 233 | copy_user_page(vto, vfrom, vaddr, to); |
1ec9c5dd CW |
234 | kunmap_atomic(vto); |
235 | kunmap_atomic(vfrom); | |
1da177e4 LT |
236 | } |
237 | ||
77fff4ae AN |
238 | #endif |
239 | ||
1da177e4 LT |
240 | static inline void copy_highpage(struct page *to, struct page *from) |
241 | { | |
242 | char *vfrom, *vto; | |
243 | ||
1ec9c5dd CW |
244 | vfrom = kmap_atomic(from); |
245 | vto = kmap_atomic(to); | |
1da177e4 | 246 | copy_page(vto, vfrom); |
1ec9c5dd CW |
247 | kunmap_atomic(vto); |
248 | kunmap_atomic(vfrom); | |
1da177e4 LT |
249 | } |
250 | ||
251 | #endif /* _LINUX_HIGHMEM_H */ |