powerpc/pseries: Little endian fixes for post mobility device tree update
[deliverable/linux.git] / arch / powerpc / kernel / cpu_setup_fsl_booke.S
1 /*
2 * This file contains low level CPU setup functions.
3 * Kumar Gala <galak@kernel.crashing.org>
4 * Copyright 2009 Freescale Semiconductor, Inc.
5 *
6 * Based on cpu_setup_6xx code by
7 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16 #include <asm/processor.h>
17 #include <asm/cputable.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/mmu-book3e.h>
20 #include <asm/asm-offsets.h>
21
22 _GLOBAL(__e500_icache_setup)
23 mfspr r0, SPRN_L1CSR1
24 andi. r3, r0, L1CSR1_ICE
25 bnelr /* Already enabled */
26 oris r0, r0, L1CSR1_CPE@h
27 ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
28 mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
29 isync
30 blr
31
32 _GLOBAL(__e500_dcache_setup)
33 mfspr r0, SPRN_L1CSR0
34 andi. r3, r0, L1CSR0_DCE
35 bnelr /* Already enabled */
36 msync
37 isync
38 li r0, 0
39 mtspr SPRN_L1CSR0, r0 /* Disable */
40 msync
41 isync
42 li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
43 mtspr SPRN_L1CSR0, r0 /* Invalidate */
44 isync
45 1: mfspr r0, SPRN_L1CSR0
46 andi. r3, r0, L1CSR0_CLFC
47 bne+ 1b /* Wait for lock bits reset */
48 oris r0, r0, L1CSR0_CPE@h
49 ori r0, r0, L1CSR0_DCE
50 msync
51 isync
52 mtspr SPRN_L1CSR0, r0 /* Enable */
53 isync
54 blr
55
56 /*
57 * FIXME - we haven't yet done testing to determine a reasonable default
58 * value for PW20_WAIT_IDLE_BIT.
59 */
60 #define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
61 _GLOBAL(setup_pw20_idle)
62 mfspr r3, SPRN_PWRMGTCR0
63
64 /* Set PW20_WAIT bit, enable pw20 state*/
65 ori r3, r3, PWRMGTCR0_PW20_WAIT
66 li r11, PW20_WAIT_IDLE_BIT
67
68 /* Set Automatic PW20 Core Idle Count */
69 rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
70
71 mtspr SPRN_PWRMGTCR0, r3
72
73 blr
74
75 /*
76 * FIXME - we haven't yet done testing to determine a reasonable default
77 * value for AV_WAIT_IDLE_BIT.
78 */
79 #define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
80 _GLOBAL(setup_altivec_idle)
81 mfspr r3, SPRN_PWRMGTCR0
82
83 /* Enable Altivec Idle */
84 oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
85 li r11, AV_WAIT_IDLE_BIT
86
87 /* Set Automatic AltiVec Idle Count */
88 rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
89
90 mtspr SPRN_PWRMGTCR0, r3
91
92 blr
93
94 #ifdef CONFIG_PPC_E500MC
95 _GLOBAL(__setup_cpu_e6500)
96 mflr r6
97 #ifdef CONFIG_PPC64
98 bl setup_altivec_ivors
99 /* Touch IVOR42 only if the CPU supports E.HV category */
100 mfspr r10,SPRN_MMUCFG
101 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
102 beq 1f
103 bl setup_lrat_ivor
104 1:
105 #endif
106 bl setup_pw20_idle
107 bl setup_altivec_idle
108 bl __setup_cpu_e5500
109 mtlr r6
110 blr
111 #endif /* CONFIG_PPC_E500MC */
112
113 #ifdef CONFIG_PPC32
114 #ifdef CONFIG_E200
115 _GLOBAL(__setup_cpu_e200)
116 /* enable dedicated debug exception handling resources (Debug APU) */
117 mfspr r3,SPRN_HID0
118 ori r3,r3,HID0_DAPUEN@l
119 mtspr SPRN_HID0,r3
120 b __setup_e200_ivors
121 #endif /* CONFIG_E200 */
122
123 #ifdef CONFIG_E500
124 #ifndef CONFIG_PPC_E500MC
125 _GLOBAL(__setup_cpu_e500v1)
126 _GLOBAL(__setup_cpu_e500v2)
127 mflr r4
128 bl __e500_icache_setup
129 bl __e500_dcache_setup
130 bl __setup_e500_ivors
131 #if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
132 /* Ensure that RFXE is set */
133 mfspr r3,SPRN_HID1
134 oris r3,r3,HID1_RFXE@h
135 mtspr SPRN_HID1,r3
136 #endif
137 mtlr r4
138 blr
139 #else /* CONFIG_PPC_E500MC */
140 _GLOBAL(__setup_cpu_e500mc)
141 _GLOBAL(__setup_cpu_e5500)
142 mflr r5
143 bl __e500_icache_setup
144 bl __e500_dcache_setup
145 bl __setup_e500mc_ivors
146 /*
147 * We only want to touch IVOR38-41 if we're running on hardware
148 * that supports category E.HV. The architectural way to determine
149 * this is MMUCFG[LPIDSIZE].
150 */
151 mfspr r3, SPRN_MMUCFG
152 rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
153 beq 1f
154 bl __setup_ehv_ivors
155 b 2f
156 1:
157 lwz r3, CPU_SPEC_FEATURES(r4)
158 /* We need this check as cpu_setup is also called for
159 * the secondary cores. So, if we have already cleared
160 * the feature on the primary core, avoid doing it on the
161 * secondary core.
162 */
163 andis. r6, r3, CPU_FTR_EMB_HV@h
164 beq 2f
165 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
166 stw r3, CPU_SPEC_FEATURES(r4)
167 2:
168 mtlr r5
169 blr
170 #endif /* CONFIG_PPC_E500MC */
171 #endif /* CONFIG_E500 */
172 #endif /* CONFIG_PPC32 */
173
174 #ifdef CONFIG_PPC_BOOK3E_64
175 _GLOBAL(__restore_cpu_e6500)
176 mflr r5
177 bl setup_altivec_ivors
178 /* Touch IVOR42 only if the CPU supports E.HV category */
179 mfspr r10,SPRN_MMUCFG
180 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
181 beq 1f
182 bl setup_lrat_ivor
183 1:
184 bl setup_pw20_idle
185 bl setup_altivec_idle
186 bl __restore_cpu_e5500
187 mtlr r5
188 blr
189
190 _GLOBAL(__restore_cpu_e5500)
191 mflr r4
192 bl __e500_icache_setup
193 bl __e500_dcache_setup
194 bl __setup_base_ivors
195 bl setup_perfmon_ivor
196 bl setup_doorbell_ivors
197 /*
198 * We only want to touch IVOR38-41 if we're running on hardware
199 * that supports category E.HV. The architectural way to determine
200 * this is MMUCFG[LPIDSIZE].
201 */
202 mfspr r10,SPRN_MMUCFG
203 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
204 beq 1f
205 bl setup_ehv_ivors
206 1:
207 mtlr r4
208 blr
209
210 _GLOBAL(__setup_cpu_e5500)
211 mflr r5
212 bl __e500_icache_setup
213 bl __e500_dcache_setup
214 bl __setup_base_ivors
215 bl setup_perfmon_ivor
216 bl setup_doorbell_ivors
217 /*
218 * We only want to touch IVOR38-41 if we're running on hardware
219 * that supports category E.HV. The architectural way to determine
220 * this is MMUCFG[LPIDSIZE].
221 */
222 mfspr r10,SPRN_MMUCFG
223 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
224 beq 1f
225 bl setup_ehv_ivors
226 b 2f
227 1:
228 ld r10,CPU_SPEC_FEATURES(r4)
229 LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
230 andc r10,r10,r9
231 std r10,CPU_SPEC_FEATURES(r4)
232 2:
233 mtlr r5
234 blr
235 #endif
This page took 0.036912 seconds and 5 git commands to generate.