Linux-2.6.12-rc2
[deliverable/linux.git] / arch / ppc / kernel / cpu_setup_6xx.S
1 /*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12 #include <linux/config.h>
13 #include <asm/processor.h>
14 #include <asm/page.h>
15 #include <asm/ppc_asm.h>
16 #include <asm/cputable.h>
17 #include <asm/ppc_asm.h>
18 #include <asm/offsets.h>
19 #include <asm/cache.h>
20
21 _GLOBAL(__setup_cpu_601)
22 blr
23 _GLOBAL(__setup_cpu_603)
24 b setup_common_caches
25 _GLOBAL(__setup_cpu_604)
26 mflr r4
27 bl setup_common_caches
28 bl setup_604_hid0
29 mtlr r4
30 blr
31 _GLOBAL(__setup_cpu_750)
32 mflr r4
33 bl setup_common_caches
34 bl setup_750_7400_hid0
35 mtlr r4
36 blr
37 _GLOBAL(__setup_cpu_750cx)
38 mflr r4
39 bl setup_common_caches
40 bl setup_750_7400_hid0
41 bl setup_750cx
42 mtlr r4
43 blr
44 _GLOBAL(__setup_cpu_750fx)
45 mflr r4
46 bl setup_common_caches
47 bl setup_750_7400_hid0
48 bl setup_750fx
49 mtlr r4
50 blr
51 _GLOBAL(__setup_cpu_7400)
52 mflr r4
53 bl setup_7400_workarounds
54 bl setup_common_caches
55 bl setup_750_7400_hid0
56 mtlr r4
57 blr
58 _GLOBAL(__setup_cpu_7410)
59 mflr r4
60 bl setup_7410_workarounds
61 bl setup_common_caches
62 bl setup_750_7400_hid0
63 li r3,0
64 mtspr SPRN_L2CR2,r3
65 mtlr r4
66 blr
67 _GLOBAL(__setup_cpu_745x)
68 mflr r4
69 bl setup_common_caches
70 bl setup_745x_specifics
71 mtlr r4
72 blr
73
74 /* Enable caches for 603's, 604, 750 & 7400 */
75 setup_common_caches:
76 mfspr r11,SPRN_HID0
77 andi. r0,r11,HID0_DCE
78 ori r11,r11,HID0_ICE|HID0_DCE
79 ori r8,r11,HID0_ICFI
80 bne 1f /* don't invalidate the D-cache */
81 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
82 1: sync
83 mtspr SPRN_HID0,r8 /* enable and invalidate caches */
84 sync
85 mtspr SPRN_HID0,r11 /* enable caches */
86 sync
87 isync
88 blr
89
90 /* 604, 604e, 604ev, ...
91 * Enable superscalar execution & branch history table
92 */
93 setup_604_hid0:
94 mfspr r11,SPRN_HID0
95 ori r11,r11,HID0_SIED|HID0_BHTE
96 ori r8,r11,HID0_BTCD
97 sync
98 mtspr SPRN_HID0,r8 /* flush branch target address cache */
99 sync /* on 604e/604r */
100 mtspr SPRN_HID0,r11
101 sync
102 isync
103 blr
104
105 /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
106 * erratas we work around here.
107 * Moto MPC710CE.pdf describes them, those are errata
108 * #3, #4 and #5
109 * Note that we assume the firmware didn't choose to
110 * apply other workarounds (there are other ones documented
111 * in the .pdf). It appear that Apple firmware only works
112 * around #3 and with the same fix we use. We may want to
113 * check if the CPU is using 60x bus mode in which case
114 * the workaround for errata #4 is useless. Also, we may
115 * want to explicitely clear HID0_NOPDST as this is not
116 * needed once we have applied workaround #5 (though it's
117 * not set by Apple's firmware at least).
118 */
119 setup_7400_workarounds:
120 mfpvr r3
121 rlwinm r3,r3,0,20,31
122 cmpwi 0,r3,0x0207
123 ble 1f
124 blr
125 setup_7410_workarounds:
126 mfpvr r3
127 rlwinm r3,r3,0,20,31
128 cmpwi 0,r3,0x0100
129 bnelr
130 1:
131 mfspr r11,SPRN_MSSSR0
132 /* Errata #3: Set L1OPQ_SIZE to 0x10 */
133 rlwinm r11,r11,0,9,6
134 oris r11,r11,0x0100
135 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
136 oris r11,r11,0x0002
137 /* Errata #5: Set DRLT_SIZE to 0x01 */
138 rlwinm r11,r11,0,5,2
139 oris r11,r11,0x0800
140 sync
141 mtspr SPRN_MSSSR0,r11
142 sync
143 isync
144 blr
145
146 /* 740/750/7400/7410
147 * Enable Store Gathering (SGE), Address Brodcast (ABE),
148 * Branch History Table (BHTE), Branch Target ICache (BTIC)
149 * Dynamic Power Management (DPM), Speculative (SPD)
150 * Clear Instruction cache throttling (ICTC)
151 */
152 setup_750_7400_hid0:
153 mfspr r11,SPRN_HID0
154 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
155 BEGIN_FTR_SECTION
156 oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
157 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
158 li r3,HID0_SPD
159 andc r11,r11,r3 /* clear SPD: enable speculative */
160 li r3,0
161 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
162 isync
163 mtspr SPRN_HID0,r11
164 sync
165 isync
166 blr
167
168 /* 750cx specific
169 * Looks like we have to disable NAP feature for some PLL settings...
170 * (waiting for confirmation)
171 */
172 setup_750cx:
173 mfspr r10, SPRN_HID1
174 rlwinm r10,r10,4,28,31
175 cmpwi cr0,r10,7
176 cmpwi cr1,r10,9
177 cmpwi cr2,r10,11
178 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
179 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
180 bnelr
181 lwz r6,CPU_SPEC_FEATURES(r5)
182 li r7,CPU_FTR_CAN_NAP
183 andc r6,r6,r7
184 stw r6,CPU_SPEC_FEATURES(r5)
185 blr
186
187 /* 750fx specific
188 */
189 setup_750fx:
190 blr
191
192 /* MPC 745x
193 * Enable Store Gathering (SGE), Branch Folding (FOLD)
194 * Branch History Table (BHTE), Branch Target ICache (BTIC)
195 * Dynamic Power Management (DPM), Speculative (SPD)
196 * Ensure our data cache instructions really operate.
197 * Timebase has to be running or we wouldn't have made it here,
198 * just ensure we don't disable it.
199 * Clear Instruction cache throttling (ICTC)
200 * Enable L2 HW prefetch
201 */
202 setup_745x_specifics:
203 /* We check for the presence of an L3 cache setup by
204 * the firmware. If any, we disable NAP capability as
205 * it's known to be bogus on rev 2.1 and earlier
206 */
207 mfspr r11,SPRN_L3CR
208 andis. r11,r11,L3CR_L3E@h
209 beq 1f
210 lwz r6,CPU_SPEC_FEATURES(r5)
211 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
212 beq 1f
213 li r7,CPU_FTR_CAN_NAP
214 andc r6,r6,r7
215 stw r6,CPU_SPEC_FEATURES(r5)
216 1:
217 mfspr r11,SPRN_HID0
218
219 /* All of the bits we have to set.....
220 */
221 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK | HID0_BTIC
222 BEGIN_FTR_SECTION
223 xori r11,r11,HID0_BTIC
224 END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
225 BEGIN_FTR_SECTION
226 oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
227 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
228
229 /* All of the bits we have to clear....
230 */
231 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
232 andc r11,r11,r3 /* clear SPD: enable speculative */
233 li r3,0
234
235 mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */
236 isync
237 mtspr SPRN_HID0,r11
238 sync
239 isync
240
241 /* Enable L2 HW prefetch
242 */
243 mfspr r3,SPRN_MSSCR0
244 ori r3,r3,3
245 sync
246 mtspr SPRN_MSSCR0,r3
247 sync
248 isync
249 blr
250
251 /* Definitions for the table use to save CPU states */
252 #define CS_HID0 0
253 #define CS_HID1 4
254 #define CS_HID2 8
255 #define CS_MSSCR0 12
256 #define CS_MSSSR0 16
257 #define CS_ICTRL 20
258 #define CS_LDSTCR 24
259 #define CS_LDSTDB 28
260 #define CS_SIZE 32
261
262 .data
263 .balign L1_CACHE_LINE_SIZE
264 cpu_state_storage:
265 .space CS_SIZE
266 .balign L1_CACHE_LINE_SIZE,0
267 .text
268
269 /* Called in normal context to backup CPU 0 state. This
270 * does not include cache settings. This function is also
271 * called for machine sleep. This does not include the MMU
272 * setup, BATs, etc... but rather the "special" registers
273 * like HID0, HID1, MSSCR0, etc...
274 */
275 _GLOBAL(__save_cpu_setup)
276 /* Some CR fields are volatile, we back it up all */
277 mfcr r7
278
279 /* Get storage ptr */
280 lis r5,cpu_state_storage@h
281 ori r5,r5,cpu_state_storage@l
282
283 /* Save HID0 (common to all CONFIG_6xx cpus) */
284 mfspr r3,SPRN_HID0
285 stw r3,CS_HID0(r5)
286
287 /* Now deal with CPU type dependent registers */
288 mfspr r3,SPRN_PVR
289 srwi r3,r3,16
290 cmplwi cr0,r3,0x8000 /* 7450 */
291 cmplwi cr1,r3,0x000c /* 7400 */
292 cmplwi cr2,r3,0x800c /* 7410 */
293 cmplwi cr3,r3,0x8001 /* 7455 */
294 cmplwi cr4,r3,0x8002 /* 7457 */
295 cmplwi cr5,r3,0x8003 /* 7447A */
296 cmplwi cr6,r3,0x7000 /* 750FX */
297 /* cr1 is 7400 || 7410 */
298 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
299 /* cr0 is 74xx */
300 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
301 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
302 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
303 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
304 bne 1f
305 /* Backup 74xx specific regs */
306 mfspr r4,SPRN_MSSCR0
307 stw r4,CS_MSSCR0(r5)
308 mfspr r4,SPRN_MSSSR0
309 stw r4,CS_MSSSR0(r5)
310 beq cr1,1f
311 /* Backup 745x specific registers */
312 mfspr r4,SPRN_HID1
313 stw r4,CS_HID1(r5)
314 mfspr r4,SPRN_ICTRL
315 stw r4,CS_ICTRL(r5)
316 mfspr r4,SPRN_LDSTCR
317 stw r4,CS_LDSTCR(r5)
318 mfspr r4,SPRN_LDSTDB
319 stw r4,CS_LDSTDB(r5)
320 1:
321 bne cr6,1f
322 /* Backup 750FX specific registers */
323 mfspr r4,SPRN_HID1
324 stw r4,CS_HID1(r5)
325 /* If rev 2.x, backup HID2 */
326 mfspr r3,SPRN_PVR
327 andi. r3,r3,0xff00
328 cmpwi cr0,r3,0x0200
329 bne 1f
330 mfspr r4,SPRN_HID2
331 stw r4,CS_HID2(r5)
332 1:
333 mtcr r7
334 blr
335
336 /* Called with no MMU context (typically MSR:IR/DR off) to
337 * restore CPU state as backed up by the previous
338 * function. This does not include cache setting
339 */
340 _GLOBAL(__restore_cpu_setup)
341 /* Some CR fields are volatile, we back it up all */
342 mfcr r7
343
344 /* Get storage ptr */
345 lis r5,(cpu_state_storage-KERNELBASE)@h
346 ori r5,r5,cpu_state_storage@l
347
348 /* Restore HID0 */
349 lwz r3,CS_HID0(r5)
350 sync
351 isync
352 mtspr SPRN_HID0,r3
353 sync
354 isync
355
356 /* Now deal with CPU type dependent registers */
357 mfspr r3,SPRN_PVR
358 srwi r3,r3,16
359 cmplwi cr0,r3,0x8000 /* 7450 */
360 cmplwi cr1,r3,0x000c /* 7400 */
361 cmplwi cr2,r3,0x800c /* 7410 */
362 cmplwi cr3,r3,0x8001 /* 7455 */
363 cmplwi cr4,r3,0x8002 /* 7457 */
364 cmplwi cr5,r3,0x8003 /* 7447A */
365 cmplwi cr6,r3,0x7000 /* 750FX */
366 /* cr1 is 7400 || 7410 */
367 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq
368 /* cr0 is 74xx */
369 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq
370 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq
371 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
372 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq
373 bne 2f
374 /* Restore 74xx specific regs */
375 lwz r4,CS_MSSCR0(r5)
376 sync
377 mtspr SPRN_MSSCR0,r4
378 sync
379 isync
380 lwz r4,CS_MSSSR0(r5)
381 sync
382 mtspr SPRN_MSSSR0,r4
383 sync
384 isync
385 bne cr2,1f
386 /* Clear 7410 L2CR2 */
387 li r4,0
388 mtspr SPRN_L2CR2,r4
389 1: beq cr1,2f
390 /* Restore 745x specific registers */
391 lwz r4,CS_HID1(r5)
392 sync
393 mtspr SPRN_HID1,r4
394 isync
395 sync
396 lwz r4,CS_ICTRL(r5)
397 sync
398 mtspr SPRN_ICTRL,r4
399 isync
400 sync
401 lwz r4,CS_LDSTCR(r5)
402 sync
403 mtspr SPRN_LDSTCR,r4
404 isync
405 sync
406 lwz r4,CS_LDSTDB(r5)
407 sync
408 mtspr SPRN_LDSTDB,r4
409 isync
410 sync
411 2: bne cr6,1f
412 /* Restore 750FX specific registers
413 * that is restore HID2 on rev 2.x and PLL config & switch
414 * to PLL 0 on all
415 */
416 /* If rev 2.x, restore HID2 with low voltage bit cleared */
417 mfspr r3,SPRN_PVR
418 andi. r3,r3,0xff00
419 cmpwi cr0,r3,0x0200
420 bne 4f
421 lwz r4,CS_HID2(r5)
422 rlwinm r4,r4,0,19,17
423 mtspr SPRN_HID2,r4
424 sync
425 4:
426 lwz r4,CS_HID1(r5)
427 rlwinm r5,r4,0,16,14
428 mtspr SPRN_HID1,r5
429 /* Wait for PLL to stabilize */
430 mftbl r5
431 3: mftbl r6
432 sub r6,r6,r5
433 cmplwi cr0,r6,10000
434 ble 3b
435 /* Setup final PLL */
436 mtspr SPRN_HID1,r4
437 1:
438 mtcr r7
439 blr
440
This page took 0.061517 seconds and 5 git commands to generate.