Commit | Line | Data |
---|---|---|
18aecc2b CM |
1 | /* |
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | */ | |
15 | ||
16 | #ifndef _ASM_TILE_PGTABLE_64_H | |
17 | #define _ASM_TILE_PGTABLE_64_H | |
18 | ||
19 | /* The level-0 page table breaks the address space into 32-bit chunks. */ | |
20 | #define PGDIR_SHIFT HV_LOG2_L1_SPAN | |
21 | #define PGDIR_SIZE HV_L1_SPAN | |
22 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
23 | #define PTRS_PER_PGD HV_L0_ENTRIES | |
d5d14ed6 CM |
24 | #define PGD_INDEX(va) HV_L0_INDEX(va) |
25 | #define SIZEOF_PGD HV_L0_SIZE | |
18aecc2b CM |
26 | |
27 | /* | |
28 | * The level-1 index is defined by the huge page size. A PMD is composed | |
29 | * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. | |
30 | */ | |
d5d14ed6 CM |
31 | #define PMD_SHIFT HPAGE_SHIFT |
32 | #define PMD_SIZE HPAGE_SIZE | |
18aecc2b | 33 | #define PMD_MASK (~(PMD_SIZE-1)) |
d5d14ed6 CM |
34 | #define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT) |
35 | #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) | |
36 | #define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT) | |
18aecc2b CM |
37 | |
38 | /* | |
39 | * The level-2 index is defined by the difference between the huge | |
40 | * page size and the normal page size. A PTE is composed of | |
41 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | |
42 | * Note that the hypervisor docs use PTE for what we call pte_t, so | |
43 | * this nomenclature is somewhat confusing. | |
44 | */ | |
d5d14ed6 CM |
45 | #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) |
46 | #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) | |
47 | #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) | |
18aecc2b CM |
48 | |
49 | /* | |
d5d14ed6 CM |
50 | * Align the vmalloc area to an L2 page table. Omit guard pages at |
51 | * the beginning and end for simplicity (particularly in the per-cpu | |
52 | * memory allocation code). The vmalloc code puts in an internal | |
18aecc2b CM |
53 | * guard page between each allocation. |
54 | */ | |
4b12909f | 55 | #define _VMALLOC_END MEM_SV_START |
d5d14ed6 CM |
56 | #define VMALLOC_END _VMALLOC_END |
57 | #define VMALLOC_START _VMALLOC_START | |
18aecc2b | 58 | |
18aecc2b CM |
59 | #ifndef __ASSEMBLY__ |
60 | ||
61 | /* We have no pud since we are a three-level page table. */ | |
62 | #include <asm-generic/pgtable-nopud.h> | |
63 | ||
a718e10c CM |
64 | /* |
65 | * pmds are the same as pgds and ptes, so converting is a no-op. | |
66 | */ | |
67 | #define pmd_pte(pmd) (pmd) | |
68 | #define pmdp_ptep(pmdp) (pmdp) | |
69 | #define pte_pmd(pte) (pte) | |
70 | ||
71 | #define pud_pte(pud) ((pud).pgd) | |
72 | ||
18aecc2b CM |
73 | static inline int pud_none(pud_t pud) |
74 | { | |
75 | return pud_val(pud) == 0; | |
76 | } | |
77 | ||
78 | static inline int pud_present(pud_t pud) | |
79 | { | |
80 | return pud_val(pud) & _PAGE_PRESENT; | |
81 | } | |
82 | ||
a718e10c CM |
83 | static inline int pud_huge_page(pud_t pud) |
84 | { | |
85 | return pud_val(pud) & _PAGE_HUGE_PAGE; | |
86 | } | |
87 | ||
18aecc2b CM |
88 | #define pmd_ERROR(e) \ |
89 | pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) | |
90 | ||
91 | static inline void pud_clear(pud_t *pudp) | |
92 | { | |
93 | __pte_clear(&pudp->pgd); | |
94 | } | |
95 | ||
96 | static inline int pud_bad(pud_t pud) | |
97 | { | |
98 | return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE); | |
99 | } | |
100 | ||
101 | /* Return the page-table frame number (ptfn) that a pud_t points at. */ | |
102 | #define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd) | |
103 | ||
a718e10c CM |
104 | /* Return the page frame number (pfn) that a pud_t points at. */ |
105 | #define pud_pfn(pud) pte_pfn(pud_pte(pud)) | |
106 | ||
18aecc2b CM |
107 | /* |
108 | * A given kernel pud_t maps to a kernel pmd_t table at a specific | |
109 | * virtual address. Since kernel pmd_t tables can be aligned at | |
110 | * sub-page granularity, this macro can return non-page-aligned | |
111 | * pointers, despite its name. | |
112 | */ | |
113 | #define pud_page_vaddr(pud) \ | |
114 | (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN)) | |
115 | ||
116 | /* | |
117 | * A pud_t points to a pmd_t array. Since we can have multiple per | |
118 | * page, we don't have a one-to-one mapping of pud_t's to pages. | |
119 | */ | |
d5d14ed6 | 120 | #define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud)))) |
18aecc2b CM |
121 | |
122 | static inline unsigned long pud_index(unsigned long address) | |
123 | { | |
124 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | |
125 | } | |
126 | ||
127 | #define pmd_offset(pud, address) \ | |
128 | ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) | |
129 | ||
18aecc2b CM |
130 | /* Normalize an address to having the correct high bits set. */ |
131 | #define pgd_addr_normalize pgd_addr_normalize | |
132 | static inline unsigned long pgd_addr_normalize(unsigned long addr) | |
133 | { | |
134 | return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >> | |
135 | (CHIP_WORD_SIZE() - CHIP_VA_WIDTH()); | |
136 | } | |
137 | ||
138 | /* We don't define any pgds for these addresses. */ | |
139 | static inline int pgd_addr_invalid(unsigned long addr) | |
140 | { | |
acbde1db | 141 | return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr); |
18aecc2b CM |
142 | } |
143 | ||
144 | /* | |
145 | * Use atomic instructions to provide atomicity against the hypervisor. | |
146 | */ | |
147 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
148 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
149 | unsigned long addr, pte_t *ptep) | |
150 | { | |
151 | return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >> | |
152 | HV_PTE_INDEX_ACCESSED) & 0x1; | |
153 | } | |
154 | ||
155 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
156 | static inline void ptep_set_wrprotect(struct mm_struct *mm, | |
157 | unsigned long addr, pte_t *ptep) | |
158 | { | |
159 | __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE); | |
160 | } | |
161 | ||
162 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
163 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
164 | unsigned long addr, pte_t *ptep) | |
165 | { | |
166 | return hv_pte(__insn_exch(&ptep->val, 0UL)); | |
167 | } | |
168 | ||
169 | #endif /* __ASSEMBLY__ */ | |
170 | ||
171 | #endif /* _ASM_TILE_PGTABLE_64_H */ |