Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/page.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1995-2003 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_PAGE_H | |
11 | #define _ASMARM_PAGE_H | |
12 | ||
1da177e4 LT |
13 | /* PAGE_SHIFT determines the page size */ |
14 | #define PAGE_SHIFT 12 | |
15 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | |
16 | #define PAGE_MASK (~(PAGE_SIZE-1)) | |
17 | ||
1da177e4 LT |
18 | #ifndef __ASSEMBLY__ |
19 | ||
002547b4 RK |
20 | #ifndef CONFIG_MMU |
21 | ||
22 | #include "page-nommu.h" | |
23 | ||
24 | #else | |
25 | ||
1da177e4 LT |
26 | #include <asm/glue.h> |
27 | ||
28 | /* | |
29 | * User Space Model | |
30 | * ================ | |
31 | * | |
32 | * This section selects the correct set of functions for dealing with | |
33 | * page-based copying and clearing for user space for the particular | |
34 | * processor(s) we're building for. | |
35 | * | |
36 | * We have the following to choose from: | |
37 | * v3 - ARMv3 | |
38 | * v4wt - ARMv4 with writethrough cache, without minicache | |
39 | * v4wb - ARMv4 with writeback cache, without minicache | |
40 | * v4_mc - ARMv4 with minicache | |
41 | * xscale - Xscale | |
23bdf86a | 42 | * xsc3 - XScalev3 |
1da177e4 LT |
43 | */ |
44 | #undef _USER | |
45 | #undef MULTI_USER | |
46 | ||
47 | #ifdef CONFIG_CPU_COPY_V3 | |
48 | # ifdef _USER | |
49 | # define MULTI_USER 1 | |
50 | # else | |
51 | # define _USER v3 | |
52 | # endif | |
53 | #endif | |
54 | ||
55 | #ifdef CONFIG_CPU_COPY_V4WT | |
56 | # ifdef _USER | |
57 | # define MULTI_USER 1 | |
58 | # else | |
59 | # define _USER v4wt | |
60 | # endif | |
61 | #endif | |
62 | ||
63 | #ifdef CONFIG_CPU_COPY_V4WB | |
64 | # ifdef _USER | |
65 | # define MULTI_USER 1 | |
66 | # else | |
67 | # define _USER v4wb | |
68 | # endif | |
69 | #endif | |
70 | ||
0ed15071 LB |
71 | #ifdef CONFIG_CPU_COPY_FEROCEON |
72 | # ifdef _USER | |
73 | # define MULTI_USER 1 | |
74 | # else | |
75 | # define _USER feroceon | |
76 | # endif | |
77 | #endif | |
78 | ||
1da177e4 LT |
79 | #ifdef CONFIG_CPU_SA1100 |
80 | # ifdef _USER | |
81 | # define MULTI_USER 1 | |
82 | # else | |
83 | # define _USER v4_mc | |
84 | # endif | |
85 | #endif | |
86 | ||
87 | #ifdef CONFIG_CPU_XSCALE | |
88 | # ifdef _USER | |
89 | # define MULTI_USER 1 | |
90 | # else | |
91 | # define _USER xscale_mc | |
92 | # endif | |
93 | #endif | |
94 | ||
23bdf86a LB |
95 | #ifdef CONFIG_CPU_XSC3 |
96 | # ifdef _USER | |
97 | # define MULTI_USER 1 | |
98 | # else | |
99 | # define _USER xsc3_mc | |
100 | # endif | |
101 | #endif | |
102 | ||
1da177e4 LT |
103 | #ifdef CONFIG_CPU_COPY_V6 |
104 | # define MULTI_USER 1 | |
105 | #endif | |
106 | ||
107 | #if !defined(_USER) && !defined(MULTI_USER) | |
108 | #error Unknown user operations model | |
109 | #endif | |
110 | ||
063b0a42 RK |
111 | struct page; |
112 | ||
1da177e4 LT |
113 | struct cpu_user_fns { |
114 | void (*cpu_clear_user_page)(void *p, unsigned long user); | |
063b0a42 RK |
115 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, |
116 | unsigned long vaddr); | |
1da177e4 LT |
117 | }; |
118 | ||
119 | #ifdef MULTI_USER | |
120 | extern struct cpu_user_fns cpu_user; | |
121 | ||
063b0a42 RK |
122 | #define __cpu_clear_user_page cpu_user.cpu_clear_user_page |
123 | #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage | |
1da177e4 LT |
124 | |
125 | #else | |
126 | ||
063b0a42 RK |
127 | #define __cpu_clear_user_page __glue(_USER,_clear_user_page) |
128 | #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) | |
1da177e4 LT |
129 | |
130 | extern void __cpu_clear_user_page(void *p, unsigned long user); | |
063b0a42 RK |
131 | extern void __cpu_copy_user_highpage(struct page *to, struct page *from, |
132 | unsigned long vaddr); | |
1da177e4 LT |
133 | #endif |
134 | ||
d2bab05a | 135 | #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) |
063b0a42 RK |
136 | |
137 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | |
138 | #define copy_user_highpage(to,from,vaddr,vma) \ | |
139 | __cpu_copy_user_highpage(to, from, vaddr) | |
1da177e4 LT |
140 | |
141 | #define clear_page(page) memzero((void *)(page), PAGE_SIZE) | |
142 | extern void copy_page(void *to, const void *from); | |
143 | ||
144 | #undef STRICT_MM_TYPECHECKS | |
145 | ||
146 | #ifdef STRICT_MM_TYPECHECKS | |
147 | /* | |
148 | * These are used to make use of C type-checking.. | |
149 | */ | |
150 | typedef struct { unsigned long pte; } pte_t; | |
151 | typedef struct { unsigned long pmd; } pmd_t; | |
152 | typedef struct { unsigned long pgd[2]; } pgd_t; | |
153 | typedef struct { unsigned long pgprot; } pgprot_t; | |
154 | ||
155 | #define pte_val(x) ((x).pte) | |
156 | #define pmd_val(x) ((x).pmd) | |
157 | #define pgd_val(x) ((x).pgd[0]) | |
158 | #define pgprot_val(x) ((x).pgprot) | |
159 | ||
160 | #define __pte(x) ((pte_t) { (x) } ) | |
161 | #define __pmd(x) ((pmd_t) { (x) } ) | |
162 | #define __pgprot(x) ((pgprot_t) { (x) } ) | |
163 | ||
164 | #else | |
165 | /* | |
166 | * .. while these make it easier on the compiler | |
167 | */ | |
168 | typedef unsigned long pte_t; | |
169 | typedef unsigned long pmd_t; | |
170 | typedef unsigned long pgd_t[2]; | |
171 | typedef unsigned long pgprot_t; | |
172 | ||
173 | #define pte_val(x) (x) | |
174 | #define pmd_val(x) (x) | |
175 | #define pgd_val(x) ((x)[0]) | |
176 | #define pgprot_val(x) (x) | |
177 | ||
178 | #define __pte(x) (x) | |
179 | #define __pmd(x) (x) | |
180 | #define __pgprot(x) (x) | |
181 | ||
182 | #endif /* STRICT_MM_TYPECHECKS */ | |
183 | ||
002547b4 RK |
184 | #endif /* CONFIG_MMU */ |
185 | ||
50346e62 GU |
186 | typedef struct page *pgtable_t; |
187 | ||
1da177e4 LT |
188 | #include <asm/memory.h> |
189 | ||
190 | #endif /* !__ASSEMBLY__ */ | |
191 | ||
8ec53663 RK |
192 | #define VM_DATA_DEFAULT_FLAGS \ |
193 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ | |
194 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | |
1da177e4 | 195 | |
da2b1cd6 NP |
196 | /* |
197 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | |
198 | */ | |
199 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
200 | #define ARCH_SLAB_MINALIGN 8 | |
201 | #endif | |
202 | ||
fd4fd5aa SR |
203 | #include <asm-generic/page.h> |
204 | ||
1da177e4 | 205 | #endif |