x86, thp: remove infrastructure for handling splitting PMDs
[deliverable/linux.git] / arch / x86 / include / asm / pgtable_types.h
CommitLineData
8d19c99f
JF
1#ifndef _ASM_X86_PGTABLE_DEFS_H
2#define _ASM_X86_PGTABLE_DEFS_H
3
4#include <linux/const.h>
e43623b4 5#include <asm/page_types.h>
8d19c99f 6
d016bf7e 7#define FIRST_USER_ADDRESS 0UL
8d19c99f
JF
8
9#define _PAGE_BIT_PRESENT 0 /* is present */
10#define _PAGE_BIT_RW 1 /* writeable */
11#define _PAGE_BIT_USER 2 /* userspace addressable */
12#define _PAGE_BIT_PWT 3 /* page write through */
13#define _PAGE_BIT_PCD 4 /* page cache disabled */
14#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
15#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
16#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17#define _PAGE_BIT_PAT 7 /* on 4KB pages */
18#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
c46a7c81
MG
19#define _PAGE_BIT_SOFTW1 9 /* available for programmer */
20#define _PAGE_BIT_SOFTW2 10 /* " */
21#define _PAGE_BIT_SOFTW3 11 /* " */
8d19c99f 22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
c46a7c81
MG
23#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
c46a7c81
MG
25#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
26#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
8d19c99f
JF
27#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
28
29/* If _PAGE_BIT_PRESENT is clear, we use these: */
30/* - if the user mapped it with PROT_NONE; pte_present gives true */
31#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
8d19c99f
JF
32
33#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
34#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
35#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
36#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
37#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
38#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
39#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
40#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
41#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
c46a7c81 42#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
f955371c 43#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
8d19c99f
JF
44#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
45#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
48#define __HAVE_ARCH_PTE_SPECIAL
49
9d31c506
JF
50#ifdef CONFIG_KMEMCHECK
51#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
52#else
53#define _PAGE_HIDDEN (_AT(pteval_t, 0))
54#endif
55
0f8975ec
PE
56/*
57 * The same hidden bit is used by kmemcheck, but since kmemcheck
58 * works on kernel pages while soft-dirty engine on user space,
59 * they do not conflict with each other.
60 */
61
62#ifdef CONFIG_MEM_SOFT_DIRTY
41bb3476 63#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
0f8975ec
PE
64#else
65#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
66#endif
67
179ef71c
CG
68/*
69 * Tracking soft dirty bit when a page goes to a swap is tricky.
70 * We need a bit which can be stored in pte _and_ not conflict
71 * with swap entry format. On x86 bits 6 and 7 are *not* involved
72 * into swap entry computation, but bit 6 is used for nonlinear
73 * file mapping, so we borrow bit 7 for soft dirty tracking.
fa0f281c
CG
74 *
75 * Please note that this bit must be treated as swap dirty page
76 * mark if and only if the PTE has present bit clear!
179ef71c
CG
77 */
78#ifdef CONFIG_MEM_SOFT_DIRTY
79#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
80#else
81#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
82#endif
83
8d19c99f
JF
84#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
85#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
86#else
87#define _PAGE_NX (_AT(pteval_t, 0))
88#endif
89
8d19c99f
JF
90#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
91
92#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
93 _PAGE_ACCESSED | _PAGE_DIRTY)
94#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
95 _PAGE_DIRTY)
96
97/* Set of bits not changed in pte_modify */
98#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
24f91eba 99 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
21d9ee3e
MG
100 _PAGE_SOFT_DIRTY)
101#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
8d19c99f 102
281d4078
JG
103/*
104 * The cache modes defined here are used to translate between pure SW usage
105 * and the HW defined cache mode bits and/or PAT entries.
106 *
107 * The resulting bits for PWT, PCD and PAT should be chosen in a way
108 * to have the WB mode at index 0 (all bits clear). This is the default
109 * right now and likely would break too much if changed.
110 */
111#ifndef __ASSEMBLY__
112enum page_cache_mode {
113 _PAGE_CACHE_MODE_WB = 0,
114 _PAGE_CACHE_MODE_WC = 1,
115 _PAGE_CACHE_MODE_UC_MINUS = 2,
116 _PAGE_CACHE_MODE_UC = 3,
117 _PAGE_CACHE_MODE_WT = 4,
118 _PAGE_CACHE_MODE_WP = 5,
119 _PAGE_CACHE_MODE_NUM = 8
120};
121#endif
122
123#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
124#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
125
8d19c99f
JF
126#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
127#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
128 _PAGE_ACCESSED | _PAGE_NX)
129
130#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
131 _PAGE_USER | _PAGE_ACCESSED)
132#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
133 _PAGE_ACCESSED | _PAGE_NX)
134#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
135 _PAGE_ACCESSED)
136#define PAGE_COPY PAGE_COPY_NOEXEC
137#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
138 _PAGE_ACCESSED | _PAGE_NX)
139#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
140 _PAGE_ACCESSED)
141
142#define __PAGE_KERNEL_EXEC \
143 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
144#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
145
146#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
147#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
87ad0b71 148#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
8d19c99f 149#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9fd67b4e 150#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
8d19c99f 151#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8d19c99f
JF
152#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
153
f955371c
DV
154#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
155#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
8d19c99f
JF
156
157#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
158#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
159#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
160#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
8d19c99f 161#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
8d19c99f 162#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
8d19c99f
JF
163#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
164#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
9fd67b4e 165#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
8d19c99f
JF
166
167#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
168#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
8d19c99f
JF
169
170/* xwr */
171#define __P000 PAGE_NONE
172#define __P001 PAGE_READONLY
173#define __P010 PAGE_COPY
174#define __P011 PAGE_COPY
175#define __P100 PAGE_READONLY_EXEC
176#define __P101 PAGE_READONLY_EXEC
177#define __P110 PAGE_COPY_EXEC
178#define __P111 PAGE_COPY_EXEC
179
180#define __S000 PAGE_NONE
181#define __S001 PAGE_READONLY
182#define __S010 PAGE_SHARED
183#define __S011 PAGE_SHARED
184#define __S100 PAGE_READONLY_EXEC
185#define __S101 PAGE_READONLY_EXEC
186#define __S110 PAGE_SHARED_EXEC
187#define __S111 PAGE_SHARED_EXEC
188
189/*
190 * early identity mapping pte attrib macros.
191 */
192#ifdef CONFIG_X86_64
193#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
194#else
8d19c99f 195#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
7dda0387 196#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8d19c99f
JF
197#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
198#endif
199
54321d94 200#ifdef CONFIG_X86_32
a1ce3928 201# include <asm/pgtable_32_types.h>
54321d94 202#else
a1ce3928 203# include <asm/pgtable_64_types.h>
54321d94
JF
204#endif
205
8d19c99f
JF
206#ifndef __ASSEMBLY__
207
54321d94
JF
208#include <linux/types.h>
209
4be4c1fb 210/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
9b3651cb
JF
211#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
212
4be4c1fb 213/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
9b3651cb
JF
214#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
215
54321d94
JF
216typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
217
218typedef struct { pgdval_t pgd; } pgd_t;
219
220static inline pgd_t native_make_pgd(pgdval_t val)
221{
222 return (pgd_t) { val };
223}
224
225static inline pgdval_t native_pgd_val(pgd_t pgd)
226{
227 return pgd.pgd;
228}
229
230static inline pgdval_t pgd_flags(pgd_t pgd)
231{
232 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
233}
234
98233368 235#if CONFIG_PGTABLE_LEVELS > 3
54321d94
JF
236typedef struct { pudval_t pud; } pud_t;
237
238static inline pud_t native_make_pud(pmdval_t val)
239{
240 return (pud_t) { val };
241}
242
243static inline pudval_t native_pud_val(pud_t pud)
244{
245 return pud.pud;
246}
247#else
248#include <asm-generic/pgtable-nopud.h>
249
250static inline pudval_t native_pud_val(pud_t pud)
251{
252 return native_pgd_val(pud.pgd);
253}
254#endif
255
98233368 256#if CONFIG_PGTABLE_LEVELS > 2
54321d94
JF
257typedef struct { pmdval_t pmd; } pmd_t;
258
259static inline pmd_t native_make_pmd(pmdval_t val)
260{
261 return (pmd_t) { val };
262}
263
264static inline pmdval_t native_pmd_val(pmd_t pmd)
265{
266 return pmd.pmd;
267}
268#else
269#include <asm-generic/pgtable-nopmd.h>
270
271static inline pmdval_t native_pmd_val(pmd_t pmd)
272{
273 return native_pgd_val(pmd.pud.pgd);
274}
275#endif
276
4be4c1fb
TK
277static inline pudval_t pud_pfn_mask(pud_t pud)
278{
279 if (native_pud_val(pud) & _PAGE_PSE)
70f15287 280 return PHYSICAL_PUD_PAGE_MASK;
4be4c1fb
TK
281 else
282 return PTE_PFN_MASK;
283}
284
285static inline pudval_t pud_flags_mask(pud_t pud)
286{
70f15287 287 return ~pud_pfn_mask(pud);
4be4c1fb
TK
288}
289
54321d94
JF
290static inline pudval_t pud_flags(pud_t pud)
291{
f70abb0f 292 return native_pud_val(pud) & pud_flags_mask(pud);
54321d94
JF
293}
294
4be4c1fb
TK
295static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
296{
297 if (native_pmd_val(pmd) & _PAGE_PSE)
70f15287 298 return PHYSICAL_PMD_PAGE_MASK;
4be4c1fb
TK
299 else
300 return PTE_PFN_MASK;
301}
302
303static inline pmdval_t pmd_flags_mask(pmd_t pmd)
304{
70f15287 305 return ~pmd_pfn_mask(pmd);
4be4c1fb
TK
306}
307
54321d94
JF
308static inline pmdval_t pmd_flags(pmd_t pmd)
309{
f70abb0f 310 return native_pmd_val(pmd) & pmd_flags_mask(pmd);
54321d94
JF
311}
312
313static inline pte_t native_make_pte(pteval_t val)
314{
315 return (pte_t) { .pte = val };
316}
317
318static inline pteval_t native_pte_val(pte_t pte)
319{
320 return pte.pte;
321}
322
323static inline pteval_t pte_flags(pte_t pte)
324{
325 return native_pte_val(pte) & PTE_FLAGS_MASK;
326}
327
328#define pgprot_val(x) ((x).pgprot)
329#define __pgprot(x) ((pgprot_t) { (x) } )
330
281d4078
JG
331extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
332extern uint8_t __pte2cachemode_tbl[8];
333
334#define __pte2cm_idx(cb) \
335 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
336 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
337 (((cb) >> _PAGE_BIT_PWT) & 1))
bd809af1
JG
338#define __cm_idx2pte(i) \
339 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
340 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
341 (((i) & 1) << _PAGE_BIT_PWT))
281d4078
JG
342
343static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
344{
345 if (likely(pcm == 0))
346 return 0;
347 return __cachemode2pte_tbl[pcm];
348}
349static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
350{
351 return __pgprot(cachemode2protval(pcm));
352}
353static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
354{
355 unsigned long masked;
356
357 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
358 if (likely(masked == 0))
359 return 0;
360 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
361}
362static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
363{
364 pgprot_t new;
365 unsigned long val;
366
367 val = pgprot_val(pgprot);
368 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
369 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
370 return new;
371}
372static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
373{
374 pgprot_t new;
375 unsigned long val;
376
377 val = pgprot_val(pgprot);
378 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
379 ((val & _PAGE_PAT_LARGE) >>
380 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
381 return new;
382}
383
54321d94
JF
384
385typedef struct page *pgtable_t;
386
8d19c99f 387extern pteval_t __supported_pte_mask;
c44c9ec0 388extern void set_nx(void);
54321d94 389extern int nx_enabled;
8d19c99f
JF
390
391#define pgprot_writecombine pgprot_writecombine
392extern pgprot_t pgprot_writecombine(pgprot_t prot);
393
d1b4bfbf
TK
394#define pgprot_writethrough pgprot_writethrough
395extern pgprot_t pgprot_writethrough(pgprot_t prot);
396
8d19c99f
JF
397/* Indicate that x86 has its own track and untrack pfn vma functions */
398#define __HAVE_PFNMAP_TRACKING
399
400#define __HAVE_PHYS_MEM_ACCESS_PROT
401struct file;
402pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
403 unsigned long size, pgprot_t vma_prot);
404int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
405 unsigned long size, pgprot_t *vma_prot);
406
407/* Install a pte for a particular vaddr in kernel space. */
408void set_pte_vaddr(unsigned long vaddr, pte_t pte);
409
410#ifdef CONFIG_X86_32
7737b215 411extern void native_pagetable_init(void);
8d19c99f 412#else
843b8ed2 413#define native_pagetable_init paging_init
8d19c99f
JF
414#endif
415
416struct seq_file;
417extern void arch_report_meminfo(struct seq_file *m);
418
4cbeb51b 419enum pg_level {
8d19c99f
JF
420 PG_LEVEL_NONE,
421 PG_LEVEL_4K,
422 PG_LEVEL_2M,
423 PG_LEVEL_1G,
424 PG_LEVEL_NUM
425};
426
427#ifdef CONFIG_PROC_FS
428extern void update_page_count(int level, unsigned long pages);
429#else
430static inline void update_page_count(int level, unsigned long pages) { }
431#endif
432
433/*
434 * Helper function that returns the kernel pagetable entry controlling
435 * the virtual address 'address'. NULL means no pagetable entry present.
436 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
437 * as a pte too.
438 */
439extern pte_t *lookup_address(unsigned long address, unsigned int *level);
426e34cc
MF
440extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
441 unsigned int *level);
792230c3 442extern pmd_t *lookup_pmd_address(unsigned long address);
d7656534 443extern phys_addr_t slow_virt_to_phys(void *__address);
d2f7cbe7
BP
444extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
445 unsigned numpages, unsigned long page_flags);
42a54772
BP
446void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
447 unsigned numpages);
8d19c99f
JF
448#endif /* !__ASSEMBLY__ */
449
450#endif /* _ASM_X86_PGTABLE_DEFS_H */
This page took 0.387925 seconds and 5 git commands to generate.