Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/proc-v6.S | |
3 | * | |
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | |
d090ddda | 5 | * Modified by Catalin Marinas for noMMU support |
1da177e4 LT |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This is the "shell" of the ARMv6 processor support. | |
12 | */ | |
13 | #include <linux/linkage.h> | |
14 | #include <asm/assembler.h> | |
e6ae744d | 15 | #include <asm/asm-offsets.h> |
862184fe | 16 | #include <asm/hardware/arm_scu.h> |
1da177e4 | 17 | #include <asm/procinfo.h> |
74945c86 | 18 | #include <asm/pgtable-hwdef.h> |
1da177e4 LT |
19 | #include <asm/pgtable.h> |
20 | ||
21 | #include "proc-macros.S" | |
22 | ||
23 | #define D_CACHE_LINE_SIZE 32 | |
24 | ||
3747b36e RK |
25 | #define TTB_C (1 << 0) |
26 | #define TTB_S (1 << 1) | |
27 | #define TTB_IMP (1 << 2) | |
28 | #define TTB_RGN_NC (0 << 3) | |
29 | #define TTB_RGN_WBWA (1 << 3) | |
30 | #define TTB_RGN_WT (2 << 3) | |
31 | #define TTB_RGN_WB (3 << 3) | |
32 | ||
1da177e4 LT |
33 | ENTRY(cpu_v6_proc_init) |
34 | mov pc, lr | |
35 | ||
36 | ENTRY(cpu_v6_proc_fin) | |
67c5587a TL |
37 | stmfd sp!, {lr} |
38 | cpsid if @ disable interrupts | |
39 | bl v6_flush_kern_cache_all | |
40 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | |
41 | bic r0, r0, #0x1000 @ ...i............ | |
42 | bic r0, r0, #0x0006 @ .............ca. | |
43 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | |
44 | ldmfd sp!, {pc} | |
1da177e4 LT |
45 | |
46 | /* | |
47 | * cpu_v6_reset(loc) | |
48 | * | |
49 | * Perform a soft reset of the system. Put the CPU into the | |
50 | * same state as it would be if it had been reset, and branch | |
51 | * to what would be the reset vector. | |
52 | * | |
53 | * - loc - location to jump to for soft reset | |
54 | * | |
55 | * It is assumed that: | |
56 | */ | |
57 | .align 5 | |
58 | ENTRY(cpu_v6_reset) | |
59 | mov pc, r0 | |
60 | ||
61 | /* | |
62 | * cpu_v6_do_idle() | |
63 | * | |
64 | * Idle the processor (eg, wait for interrupt). | |
65 | * | |
66 | * IRQs are already disabled. | |
67 | */ | |
68 | ENTRY(cpu_v6_do_idle) | |
69 | mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt | |
70 | mov pc, lr | |
71 | ||
72 | ENTRY(cpu_v6_dcache_clean_area) | |
73 | #ifndef TLB_CAN_READ_FROM_L1_CACHE | |
74 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
75 | add r0, r0, #D_CACHE_LINE_SIZE | |
76 | subs r1, r1, #D_CACHE_LINE_SIZE | |
77 | bhi 1b | |
78 | #endif | |
79 | mov pc, lr | |
80 | ||
81 | /* | |
82 | * cpu_arm926_switch_mm(pgd_phys, tsk) | |
83 | * | |
84 | * Set the translation table base pointer to be pgd_phys | |
85 | * | |
86 | * - pgd_phys - physical address of new TTB | |
87 | * | |
88 | * It is assumed that: | |
89 | * - we are not using split page tables | |
90 | */ | |
91 | ENTRY(cpu_v6_switch_mm) | |
d090ddda | 92 | #ifdef CONFIG_MMU |
1da177e4 LT |
93 | mov r2, #0 |
94 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | |
cd03adb0 | 95 | #ifdef CONFIG_SMP |
3747b36e | 96 | orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable |
cd03adb0 | 97 | #endif |
d93742f5 | 98 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
1da177e4 LT |
99 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
100 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | |
101 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | |
d090ddda | 102 | #endif |
1da177e4 LT |
103 | mov pc, lr |
104 | ||
1da177e4 LT |
105 | /* |
106 | * cpu_v6_set_pte(ptep, pte) | |
107 | * | |
108 | * Set a level 2 translation table entry. | |
109 | * | |
110 | * - ptep - pointer to level 2 translation table entry | |
111 | * (hardware version is stored at -1024 bytes) | |
112 | * - pte - PTE value to store | |
113 | * | |
114 | * Permissions: | |
115 | * YUWD APX AP1 AP0 SVC User | |
116 | * 0xxx 0 0 0 no acc no acc | |
117 | * 100x 1 0 1 r/o no acc | |
118 | * 10x0 1 0 1 r/o no acc | |
119 | * 1011 0 0 1 r/w no acc | |
79042f08 CM |
120 | * 110x 0 1 0 r/w r/o |
121 | * 11x0 0 1 0 r/w r/o | |
1da177e4 LT |
122 | * 1111 0 1 1 r/w r/w |
123 | */ | |
124 | ENTRY(cpu_v6_set_pte) | |
d090ddda | 125 | #ifdef CONFIG_MMU |
1da177e4 LT |
126 | str r1, [r0], #-2048 @ linux version |
127 | ||
cd03adb0 | 128 | bic r2, r1, #0x000003f0 |
1da177e4 | 129 | bic r2, r2, #0x00000003 |
1b9749e7 | 130 | orr r2, r2, #PTE_EXT_AP0 | 2 |
1da177e4 LT |
131 | |
132 | tst r1, #L_PTE_WRITE | |
133 | tstne r1, #L_PTE_DIRTY | |
1b9749e7 | 134 | orreq r2, r2, #PTE_EXT_APX |
1da177e4 LT |
135 | |
136 | tst r1, #L_PTE_USER | |
6626a707 | 137 | orrne r2, r2, #PTE_EXT_AP1 |
1b9749e7 RK |
138 | tstne r2, #PTE_EXT_APX |
139 | bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0 | |
1da177e4 LT |
140 | |
141 | tst r1, #L_PTE_YOUNG | |
1b9749e7 | 142 | biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK |
1da177e4 | 143 | |
3747b36e RK |
144 | tst r1, #L_PTE_EXEC |
145 | orreq r2, r2, #PTE_EXT_XN | |
1da177e4 LT |
146 | |
147 | tst r1, #L_PTE_PRESENT | |
148 | moveq r2, #0 | |
149 | ||
150 | str r2, [r0] | |
151 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | |
d090ddda | 152 | #endif |
1da177e4 LT |
153 | mov pc, lr |
154 | ||
155 | ||
156 | ||
157 | ||
158 | cpu_v6_name: | |
159 | .asciz "Some Random V6 Processor" | |
160 | .align | |
161 | ||
162 | .section ".text.init", #alloc, #execinstr | |
163 | ||
164 | /* | |
165 | * __v6_setup | |
166 | * | |
167 | * Initialise TLB, Caches, and MMU state ready to switch the MMU | |
168 | * on. Return in r0 the new CP15 C1 control register setting. | |
169 | * | |
170 | * We automatically detect if we have a Harvard cache, and use the | |
171 | * Harvard cache control instructions insead of the unified cache | |
172 | * control instructions. | |
173 | * | |
174 | * This should be able to cover all ARMv6 cores. | |
175 | * | |
176 | * It is assumed that: | |
177 | * - cache type register is implemented | |
178 | */ | |
179 | __v6_setup: | |
862184fe RK |
180 | #ifdef CONFIG_SMP |
181 | /* Set up the SCU on core 0 only */ | |
182 | mrc p15, 0, r0, c0, c0, 5 @ CPU core number | |
183 | ands r0, r0, #15 | |
184 | moveq r0, #0x10000000 @ SCU_BASE | |
185 | orreq r0, r0, #0x00100000 | |
186 | ldreq r5, [r0, #SCU_CTRL] | |
187 | orreq r5, r5, #1 | |
188 | streq r5, [r0, #SCU_CTRL] | |
189 | ||
190 | #ifndef CONFIG_CPU_DCACHE_DISABLE | |
191 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | |
192 | orr r0, r0, #0x20 | |
193 | mcr p15, 0, r0, c1, c0, 1 | |
194 | #endif | |
195 | #endif | |
196 | ||
1da177e4 LT |
197 | mov r0, #0 |
198 | mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache | |
199 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | |
200 | mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache | |
201 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | |
d090ddda | 202 | #ifdef CONFIG_MMU |
1da177e4 LT |
203 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs |
204 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register | |
cd03adb0 | 205 | #ifdef CONFIG_SMP |
3747b36e | 206 | orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable |
cd03adb0 | 207 | #endif |
1da177e4 | 208 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
d090ddda | 209 | #endif /* CONFIG_MMU */ |
1da177e4 LT |
210 | #ifdef CONFIG_VFP |
211 | mrc p15, 0, r0, c1, c0, 2 | |
d1d890ed | 212 | orr r0, r0, #(0xf << 20) |
1da177e4 LT |
213 | mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP |
214 | #endif | |
215 | mrc p15, 0, r0, c1, c0, 0 @ read control register | |
216 | ldr r5, v6_cr1_clear @ get mask for bits to clear | |
217 | bic r0, r0, r5 @ clear bits them | |
218 | ldr r5, v6_cr1_set @ get mask for bits to set | |
219 | orr r0, r0, r5 @ set them | |
220 | mov pc, lr @ return to head.S:__ret | |
221 | ||
222 | /* | |
223 | * V X F I D LR | |
224 | * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM | |
225 | * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced | |
226 | * 0 110 0011 1.00 .111 1101 < we want | |
227 | */ | |
228 | .type v6_cr1_clear, #object | |
229 | .type v6_cr1_set, #object | |
230 | v6_cr1_clear: | |
231 | .word 0x01e0fb7f | |
232 | v6_cr1_set: | |
233 | .word 0x00c0387d | |
234 | ||
235 | .type v6_processor_functions, #object | |
236 | ENTRY(v6_processor_functions) | |
237 | .word v6_early_abort | |
238 | .word cpu_v6_proc_init | |
239 | .word cpu_v6_proc_fin | |
240 | .word cpu_v6_reset | |
241 | .word cpu_v6_do_idle | |
242 | .word cpu_v6_dcache_clean_area | |
243 | .word cpu_v6_switch_mm | |
244 | .word cpu_v6_set_pte | |
245 | .size v6_processor_functions, . - v6_processor_functions | |
246 | ||
247 | .type cpu_arch_name, #object | |
248 | cpu_arch_name: | |
249 | .asciz "armv6" | |
250 | .size cpu_arch_name, . - cpu_arch_name | |
251 | ||
252 | .type cpu_elf_name, #object | |
253 | cpu_elf_name: | |
254 | .asciz "v6" | |
255 | .size cpu_elf_name, . - cpu_elf_name | |
256 | .align | |
257 | ||
02b7dd12 | 258 | .section ".proc.info.init", #alloc, #execinstr |
1da177e4 LT |
259 | |
260 | /* | |
261 | * Match any ARMv6 processor core. | |
262 | */ | |
263 | .type __v6_proc_info, #object | |
264 | __v6_proc_info: | |
265 | .long 0x0007b000 | |
266 | .long 0x0007f000 | |
267 | .long PMD_TYPE_SECT | \ | |
268 | PMD_SECT_BUFFERABLE | \ | |
269 | PMD_SECT_CACHEABLE | \ | |
270 | PMD_SECT_AP_WRITE | \ | |
271 | PMD_SECT_AP_READ | |
272 | b __v6_setup | |
273 | .long cpu_arch_name | |
274 | .long cpu_elf_name | |
275 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA | |
276 | .long cpu_v6_name | |
277 | .long v6_processor_functions | |
278 | .long v6wbi_tlb_fns | |
279 | .long v6_user_fns | |
280 | .long v6_cache_fns | |
281 | .size __v6_proc_info, . - __v6_proc_info |