Merge branch 'header-move' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemo...
[deliverable/linux.git] / arch / arm / mach-pxa / sleep.S
CommitLineData
1da177e4
LT
1/*
2 * Low-level PXA250/210 sleep/wakeUp support
3 *
4 * Initial SA1110 code:
5 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
6 *
7 * Adapted for PXA by Nicolas Pitre:
8 * Copyright (c) 2002 Monta Vista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License.
12 */
13
1da177e4
LT
14#include <linux/linkage.h>
15#include <asm/assembler.h>
be509729 16#include <asm/arch/hardware.h>
1da177e4
LT
17
18#include <asm/arch/pxa-regs.h>
8785a8fb 19#include <asm/arch/pxa2xx-regs.h>
1da177e4 20
41130d37
JL
21#define MDREFR_KDIV 0x200a4000 // all banks
22#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
41130d37 23
1da177e4
LT
24 .text
25
b750a093 26pxa_cpu_save_cp:
1da177e4
LT
27 @ get coprocessor registers
28 mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
29 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
30 mrc p15, 0, r5, c13, c0, 0 @ PID
31 mrc p15, 0, r6, c3, c0, 0 @ domain ID
32 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
33 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
34 mrc p15, 0, r9, c1, c0, 0 @ control reg
35
36 bic r3, r3, #2 @ clear frequency change bit
37
38 @ store them plus current virtual stack ptr on stack
39 mov r10, sp
40 stmfd sp!, {r3 - r10}
41
b750a093
EM
42 mov pc, lr
43
44pxa_cpu_save_sp:
1da177e4
LT
45 @ preserve phys address of stack
46 mov r0, sp
3b1904d0 47 str lr, [sp, #-4]!
1da177e4
LT
48 bl sleep_phys_sp
49 ldr r1, =sleep_save_sp
50 str r0, [r1]
3b1904d0 51 ldr pc, [sp], #4
b750a093 52
c4d1fb62 53#ifdef CONFIG_PXA3xx
54/*
55 * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
56 *
57 * NOTE: unfortunately, pxa_cpu_save_cp can not be reused here since
58 * the auxiliary control register address is different between pxa3xx
59 * and pxa{25x,27x}
60 */
61
62ENTRY(pxa3xx_cpu_suspend)
63
64#ifndef CONFIG_IWMMXT
65 mra r2, r3, acc0
66#endif
67 stmfd sp!, {r2 - r12, lr} @ save registers on stack
68
69 mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
70 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
71 mrc p15, 0, r5, c13, c0, 0 @ PID
72 mrc p15, 0, r6, c3, c0, 0 @ domain ID
73 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
74 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
75 mrc p15, 0, r9, c1, c0, 0 @ control reg
76
77 bic r3, r3, #2 @ clear frequency change bit
78
79 @ store them plus current virtual stack ptr on stack
80 mov r10, sp
81 stmfd sp!, {r3 - r10}
82
83 @ store physical address of stack pointer
84 mov r0, sp
85 bl sleep_phys_sp
86 ldr r1, =sleep_save_sp
87 str r0, [r1]
88
89 @ clean data cache
90 bl xsc3_flush_kern_cache_all
91
92 mov r0, #0x06 @ S2D3C4 mode
93 mcr p14, 0, r0, c7, c0, 0 @ enter sleep
94
9520: b 20b @ waiting for sleep
96
97 .data
98 .align 5
99/*
100 * pxa3xx_cpu_resume
101 */
102
103ENTRY(pxa3xx_cpu_resume)
104
105 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
106 msr cpsr_c, r0
107
108 ldr r0, sleep_save_sp @ stack phys addr
109 ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
110
111 mov r1, #0
112 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
113 mcr p15, 0, r1, c7, c10, 4 @ drain write (&fill) buffer
114 mcr p15, 0, r1, c7, c5, 4 @ flush prefetch buffer
115 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
116
117 mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
118 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
119 mcr p15, 0, r5, c13, c0, 0 @ PID
120 mcr p15, 0, r6, c3, c0, 0 @ domain ID
121 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
122 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
123
124 @ temporarily map resume_turn_on_mmu into the page table,
125 @ otherwise prefetch abort occurs after MMU is turned on
126 mov r1, r7
127 bic r1, r1, #0x00ff
128 bic r1, r1, #0x3f00
129 ldr r2, =0x542e
130
131 adr r3, resume_turn_on_mmu
132 mov r3, r3, lsr #20
133 orr r4, r2, r3, lsl #20
134 ldr r5, [r1, r3, lsl #2]
135 str r4, [r1, r3, lsl #2]
136
137 @ Mapping page table address in the page table
138 mov r6, r1, lsr #20
139 orr r7, r2, r6, lsl #20
140 ldr r8, [r1, r6, lsl #2]
141 str r7, [r1, r6, lsl #2]
142
143 ldr r2, =pxa3xx_resume_after_mmu @ absolute virtual address
144 b resume_turn_on_mmu @ cache align execution
145
146 .text
147pxa3xx_resume_after_mmu:
148 /* restore the temporary mapping */
149 str r5, [r1, r3, lsl #2]
150 str r8, [r1, r6, lsl #2]
151 b resume_after_mmu
152
153#endif /* CONFIG_PXA3xx */
154
533462fb 155#ifdef CONFIG_PXA27x
b750a093
EM
156/*
157 * pxa27x_cpu_suspend()
158 *
159 * Forces CPU into sleep state.
160 *
161 * r0 = value for PWRMODE M field for desired sleep state
162 */
163
164ENTRY(pxa27x_cpu_suspend)
165
166#ifndef CONFIG_IWMMXT
167 mra r2, r3, acc0
168#endif
169 stmfd sp!, {r2 - r12, lr} @ save registers on stack
170
171 bl pxa_cpu_save_cp
172
173 mov r5, r0 @ save sleep mode
174 bl pxa_cpu_save_sp
1da177e4
LT
175
176 @ clean data cache
177 bl xscale_flush_kern_cache_all
178
179 @ Put the processor to sleep
180 @ (also workaround for sighting 28071)
181
182 @ prepare value for sleep mode
80a18573 183 mov r1, r5 @ sleep mode
1da177e4 184
41130d37
JL
185 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
186 mov r2, #UNCACHED_PHYS_0
187
188 @ prepare SDRAM refresh settings
1da177e4
LT
189 ldr r4, =MDREFR
190 ldr r5, [r4]
41130d37
JL
191
192 @ enable SDRAM self-refresh mode
1da177e4
LT
193 orr r5, r5, #MDREFR_SLFRSH
194
41130d37
JL
195 @ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
196 ldr r6, =MDREFR_KDIV
197 orr r5, r5, r6
1da177e4 198
b750a093
EM
199 @ Intel PXA270 Specification Update notes problems sleeping
200 @ with core operating above 91 MHz
201 @ (see Errata 50, ...processor does not exit from sleep...)
202
203 ldr r6, =CCCR
204 ldr r8, [r6] @ keep original value for resume
205
206 ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
207 mov r0, #0x2 @ prepare value for CLKCFG
208
209 @ align execution to a cache line
210 b pxa_cpu_do_suspend
533462fb 211#endif
b750a093 212
533462fb 213#ifdef CONFIG_PXA25x
b750a093 214/*
533462fb 215 * pxa25x_cpu_suspend()
b750a093
EM
216 *
217 * Forces CPU into sleep state.
218 *
219 * r0 = value for PWRMODE M field for desired sleep state
220 */
221
222ENTRY(pxa25x_cpu_suspend)
223 stmfd sp!, {r2 - r12, lr} @ save registers on stack
224
225 bl pxa_cpu_save_cp
226
227 mov r5, r0 @ save sleep mode
228 bl pxa_cpu_save_sp
229
230 @ clean data cache
231 bl xscale_flush_kern_cache_all
232
233 @ prepare value for sleep mode
234 mov r1, r5 @ sleep mode
235
236 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
237 mov r2, #UNCACHED_PHYS_0
238
239 @ prepare SDRAM refresh settings
240 ldr r4, =MDREFR
241 ldr r5, [r4]
242
243 @ enable SDRAM self-refresh mode
244 orr r5, r5, #MDREFR_SLFRSH
245
1da177e4
LT
246 @ Intel PXA255 Specification Update notes problems
247 @ about suspending with PXBus operating above 133MHz
248 @ (see Errata 31, GPIO output signals, ... unpredictable in sleep
249 @
250 @ We keep the change-down close to the actual suspend on SDRAM
251 @ as possible to eliminate messing about with the refresh clock
252 @ as the system will restore with the original speed settings
253 @
254 @ Ben Dooks, 13-Sep-2004
255
256 ldr r6, =CCCR
257 ldr r8, [r6] @ keep original value for resume
258
259 @ ensure x1 for run and turbo mode with memory clock
260 bic r7, r8, #CCCR_M_MASK | CCCR_N_MASK
261 orr r7, r7, #(1<<5) | (2<<7)
262
263 @ check that the memory frequency is within limits
264 and r14, r7, #CCCR_L_MASK
265 teq r14, #1
266 bicne r7, r7, #CCCR_L_MASK
267 orrne r7, r7, #1 @@ 99.53MHz
268
269 @ get ready for the change
270
271 @ note, turbo is not preserved over sleep so there is no
272 @ point in preserving it here. we save it on the stack with the
273 @ other CP registers instead.
274 mov r0, #0
275 mcr p14, 0, r0, c6, c0, 0
276 orr r0, r0, #2 @ initiate change bit
b750a093 277 b pxa_cpu_do_suspend
533462fb 278#endif
1da177e4
LT
279
280 .ltorg
281 .align 5
b750a093 282pxa_cpu_do_suspend:
1da177e4
LT
283
284 @ All needed values are now in registers.
285 @ These last instructions should be in cache
286
287 @ initiate the frequency change...
288 str r7, [r6]
289 mcr p14, 0, r0, c6, c0, 0
290
291 @ restore the original cpu speed value for resume
292 str r8, [r6]
293
41130d37
JL
294 @ need 6 13-MHz cycles before changing PWRMODE
295 @ just set frequency to 91-MHz... 6*91/13 = 42
296
297 mov r0, #42
29810: subs r0, r0, #1
299 bne 10b
41130d37
JL
300
301 @ Do not reorder...
302 @ Intel PXA270 Specification Update notes problems performing
303 @ external accesses after SDRAM is put in self-refresh mode
304 @ (see Errata 39 ...hangs when entering self-refresh mode)
1da177e4
LT
305
306 @ force address lines low by reading at physical address 0
307 ldr r3, [r2]
308
41130d37
JL
309 @ put SDRAM into self-refresh
310 str r5, [r4]
311
1da177e4 312 @ enter sleep mode
41130d37 313 mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
1da177e4
LT
314
31520: b 20b @ loop waiting for sleep
316
317/*
533462fb 318 * pxa_cpu_resume()
1da177e4
LT
319 *
320 * entry point from bootloader into kernel during resume
321 *
322 * Note: Yes, part of the following code is located into the .data section.
323 * This is to allow sleep_save_sp to be accessed with a relative load
324 * while we can't rely on any MMU translation. We could have put
325 * sleep_save_sp in the .text section as well, but some setups might
326 * insist on it to be truly read-only.
327 */
328
329 .data
330 .align 5
331ENTRY(pxa_cpu_resume)
801194e3 332 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
1da177e4
LT
333 msr cpsr_c, r0
334
335 ldr r0, sleep_save_sp @ stack phys addr
336 ldr r2, =resume_after_mmu @ its absolute virtual address
337 ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
338
339 mov r1, #0
340 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
341 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
342
343#ifdef CONFIG_XSCALE_CACHE_ERRATA
344 bic r9, r9, #0x0004 @ see cpu_xscale_proc_init
345#endif
346
347 mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
348 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
349 mcr p15, 0, r5, c13, c0, 0 @ PID
350 mcr p15, 0, r6, c3, c0, 0 @ domain ID
351 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
352 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
353 b resume_turn_on_mmu @ cache align execution
354
355 .align 5
356resume_turn_on_mmu:
357 mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, caches, etc.
358
359 @ Let us ensure we jump to resume_after_mmu only when the mcr above
360 @ actually took effect. They call it the "cpwait" operation.
361 mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
362 sub pc, r2, r1, lsr #32 @ jump to virtual addr
363 nop
364 nop
365 nop
366
367sleep_save_sp:
368 .word 0 @ preserve stack phys ptr here
369
370 .text
371resume_after_mmu:
372#ifdef CONFIG_XSCALE_CACHE_ERRATA
373 bl cpu_xscale_proc_init
374#endif
375 ldmfd sp!, {r2, r3}
41130d37 376#ifndef CONFIG_IWMMXT
1da177e4 377 mar acc0, r2, r3
41130d37 378#endif
1da177e4 379 ldmfd sp!, {r4 - r12, pc} @ return to caller
This page took 0.32854 seconds and 5 git commands to generate.