Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / arm / mach-omap2 / sleep34xx.S
1 /*
2 * (C) Copyright 2007
3 * Texas Instruments
4 * Karthik Dasu <karthik-dp@ti.com>
5 *
6 * (C) Copyright 2004
7 * Texas Instruments, <www.ti.com>
8 * Richard Woodruff <r-woodruff2@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23 * MA 02111-1307 USA
24 */
25 #include <linux/linkage.h>
26
27 #include <asm/assembler.h>
28
29 #include "omap34xx.h"
30 #include "iomap.h"
31 #include "cm3xxx.h"
32 #include "prm3xxx.h"
33 #include "sdrc.h"
34 #include "sram.h"
35 #include "control.h"
36
37 /*
38 * Registers access definitions
39 */
40 #define SDRC_SCRATCHPAD_SEM_OFFS 0xc
41 #define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
42 (SDRC_SCRATCHPAD_SEM_OFFS)
43 #define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
44 OMAP3430_PM_PREPWSTST
45 #define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
46 #define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
47 #define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
48 #define SRAM_BASE_P OMAP3_SRAM_PA
49 #define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
50 #define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
51 OMAP36XX_CONTROL_MEM_RTA_CTRL)
52
53 /* Move this as correct place is available */
54 #define SCRATCHPAD_MEM_OFFS 0x310
55 #define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
56 OMAP343X_CONTROL_MEM_WKUP +\
57 SCRATCHPAD_MEM_OFFS)
58 #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
59 #define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
60 #define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
61 #define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
62 #define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
63 #define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
64 #define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
65 #define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
66 #define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
67 #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
68
69 /*
70 * This file needs be built unconditionally as ARM to interoperate correctly
71 * with non-Thumb-2-capable firmware.
72 */
73 .arm
74
75 /*
76 * API functions
77 */
78
79 .text
80 /*
81 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
82 * This function sets up a flag that will allow for this toggling to take
83 * place on 3630. Hopefully some version in the future may not need this.
84 */
85 ENTRY(enable_omap3630_toggle_l2_on_restore)
86 stmfd sp!, {lr} @ save registers on stack
87 /* Setup so that we will disable and enable l2 */
88 mov r1, #0x1
89 adrl r2, l2dis_3630 @ may be too distant for plain adr
90 str r1, [r2]
91 ldmfd sp!, {pc} @ restore regs and return
92 ENDPROC(enable_omap3630_toggle_l2_on_restore)
93
94 .text
95 /* Function to call rom code to save secure ram context */
96 .align 3
97 ENTRY(save_secure_ram_context)
98 stmfd sp!, {r4 - r11, lr} @ save registers on stack
99 adr r3, api_params @ r3 points to parameters
100 str r0, [r3,#0x4] @ r0 has sdram address
101 ldr r12, high_mask
102 and r3, r3, r12
103 ldr r12, sram_phy_addr_mask
104 orr r3, r3, r12
105 mov r0, #25 @ set service ID for PPA
106 mov r12, r0 @ copy secure service ID in r12
107 mov r1, #0 @ set task id for ROM code in r1
108 mov r2, #4 @ set some flags in r2, r6
109 mov r6, #0xff
110 dsb @ data write barrier
111 dmb @ data memory barrier
112 smc #1 @ call SMI monitor (smi #1)
113 nop
114 nop
115 nop
116 nop
117 ldmfd sp!, {r4 - r11, pc}
118 .align
119 sram_phy_addr_mask:
120 .word SRAM_BASE_P
121 high_mask:
122 .word 0xffff
123 api_params:
124 .word 0x4, 0x0, 0x0, 0x1, 0x1
125 ENDPROC(save_secure_ram_context)
126 ENTRY(save_secure_ram_context_sz)
127 .word . - save_secure_ram_context
128
129 /*
130 * ======================
131 * == Idle entry point ==
132 * ======================
133 */
134
135 /*
136 * Forces OMAP into idle state
137 *
138 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
139 * and executes the WFI instruction. Calling WFI effectively changes the
140 * power domains states to the desired target power states.
141 *
142 *
143 * Notes:
144 * - only the minimum set of functions gets copied to internal SRAM at boot
145 * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
146 * pointers in SDRAM or SRAM are called depending on the desired low power
147 * target state.
148 * - when the OMAP wakes up it continues at different execution points
149 * depending on the low power mode (non-OFF vs OFF modes),
150 * cf. 'Resume path for xxx mode' comments.
151 */
152 .align 3
153 ENTRY(omap34xx_cpu_suspend)
154 stmfd sp!, {r4 - r11, lr} @ save registers on stack
155
156 /*
157 * r0 contains information about saving context:
158 * 0 - No context lost
159 * 1 - Only L1 and logic lost
160 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
161 * 3 - Both L1 and L2 lost and logic lost
162 */
163
164 /*
165 * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
166 * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
167 */
168 ldr r4, omap3_do_wfi_sram_addr
169 ldr r5, [r4]
170 cmp r0, #0x0 @ If no context save required,
171 bxeq r5 @ jump to the WFI code in SRAM
172
173
174 /* Otherwise fall through to the save context code */
175 save_context_wfi:
176 /*
177 * jump out to kernel flush routine
178 * - reuse that code is better
179 * - it executes in a cached space so is faster than refetch per-block
180 * - should be faster and will change with kernel
181 * - 'might' have to copy address, load and jump to it
182 * Flush all data from the L1 data cache before disabling
183 * SCTLR.C bit.
184 */
185 ldr r1, kernel_flush
186 mov lr, pc
187 bx r1
188
189 /*
190 * Clear the SCTLR.C bit to prevent further data cache
191 * allocation. Clearing SCTLR.C would make all the data accesses
192 * strongly ordered and would not hit the cache.
193 */
194 mrc p15, 0, r0, c1, c0, 0
195 bic r0, r0, #(1 << 2) @ Disable the C bit
196 mcr p15, 0, r0, c1, c0, 0
197 isb
198
199 /*
200 * Invalidate L1 data cache. Even though only invalidate is
201 * necessary exported flush API is used here. Doing clean
202 * on already clean cache would be almost NOP.
203 */
204 ldr r1, kernel_flush
205 blx r1
206 /*
207 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
208 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
209 * This sequence switches back to ARM. Note that .align may insert a
210 * nop: bx pc needs to be word-aligned in order to work.
211 */
212 THUMB( .thumb )
213 THUMB( .align )
214 THUMB( bx pc )
215 THUMB( nop )
216 .arm
217
218 b omap3_do_wfi
219
220 /*
221 * Local variables
222 */
223 omap3_do_wfi_sram_addr:
224 .word omap3_do_wfi_sram
225 kernel_flush:
226 .word v7_flush_dcache_all
227
228 /* ===================================
229 * == WFI instruction => Enter idle ==
230 * ===================================
231 */
232
233 /*
234 * Do WFI instruction
235 * Includes the resume path for non-OFF modes
236 *
237 * This code gets copied to internal SRAM and is accessible
238 * from both SDRAM and SRAM:
239 * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
240 * - executed from SDRAM for OFF mode (omap3_do_wfi).
241 */
242 .align 3
243 ENTRY(omap3_do_wfi)
244 ldr r4, sdrc_power @ read the SDRC_POWER register
245 ldr r5, [r4] @ read the contents of SDRC_POWER
246 orr r5, r5, #0x40 @ enable self refresh on idle req
247 str r5, [r4] @ write back to SDRC_POWER register
248
249 /* Data memory barrier and Data sync barrier */
250 dsb
251 dmb
252
253 /*
254 * ===================================
255 * == WFI instruction => Enter idle ==
256 * ===================================
257 */
258 wfi @ wait for interrupt
259
260 /*
261 * ===================================
262 * == Resume path for non-OFF modes ==
263 * ===================================
264 */
265 nop
266 nop
267 nop
268 nop
269 nop
270 nop
271 nop
272 nop
273 nop
274 nop
275
276 /*
277 * This function implements the erratum ID i581 WA:
278 * SDRC state restore before accessing the SDRAM
279 *
280 * Only used at return from non-OFF mode. For OFF
281 * mode the ROM code configures the SDRC and
282 * the DPLL before calling the restore code directly
283 * from DDR.
284 */
285
286 /* Make sure SDRC accesses are ok */
287 wait_sdrc_ok:
288
289 /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
290 ldr r4, cm_idlest_ckgen
291 wait_dpll3_lock:
292 ldr r5, [r4]
293 tst r5, #1
294 beq wait_dpll3_lock
295
296 ldr r4, cm_idlest1_core
297 wait_sdrc_ready:
298 ldr r5, [r4]
299 tst r5, #0x2
300 bne wait_sdrc_ready
301 /* allow DLL powerdown upon hw idle req */
302 ldr r4, sdrc_power
303 ldr r5, [r4]
304 bic r5, r5, #0x40
305 str r5, [r4]
306
307 /*
308 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
309 * base instead.
310 * Be careful not to clobber r7 when maintaing this code.
311 */
312
313 is_dll_in_lock_mode:
314 /* Is dll in lock mode? */
315 ldr r4, sdrc_dlla_ctrl
316 ldr r5, [r4]
317 tst r5, #0x4
318 bne exit_nonoff_modes @ Return if locked
319 /* wait till dll locks */
320 adr r7, kick_counter
321 wait_dll_lock_timed:
322 ldr r4, wait_dll_lock_counter
323 add r4, r4, #1
324 str r4, [r7, #wait_dll_lock_counter - kick_counter]
325 ldr r4, sdrc_dlla_status
326 /* Wait 20uS for lock */
327 mov r6, #8
328 wait_dll_lock:
329 subs r6, r6, #0x1
330 beq kick_dll
331 ldr r5, [r4]
332 and r5, r5, #0x4
333 cmp r5, #0x4
334 bne wait_dll_lock
335 b exit_nonoff_modes @ Return when locked
336
337 /* disable/reenable DLL if not locked */
338 kick_dll:
339 ldr r4, sdrc_dlla_ctrl
340 ldr r5, [r4]
341 mov r6, r5
342 bic r6, #(1<<3) @ disable dll
343 str r6, [r4]
344 dsb
345 orr r6, r6, #(1<<3) @ enable dll
346 str r6, [r4]
347 dsb
348 ldr r4, kick_counter
349 add r4, r4, #1
350 str r4, [r7] @ kick_counter
351 b wait_dll_lock_timed
352
353 exit_nonoff_modes:
354 /* Re-enable C-bit if needed */
355 mrc p15, 0, r0, c1, c0, 0
356 tst r0, #(1 << 2) @ Check C bit enabled?
357 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
358 mcreq p15, 0, r0, c1, c0, 0
359 isb
360
361 /*
362 * ===================================
363 * == Exit point from non-OFF modes ==
364 * ===================================
365 */
366 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
367
368 /*
369 * Local variables
370 */
371 sdrc_power:
372 .word SDRC_POWER_V
373 cm_idlest1_core:
374 .word CM_IDLEST1_CORE_V
375 cm_idlest_ckgen:
376 .word CM_IDLEST_CKGEN_V
377 sdrc_dlla_status:
378 .word SDRC_DLLA_STATUS_V
379 sdrc_dlla_ctrl:
380 .word SDRC_DLLA_CTRL_V
381 /*
382 * When exporting to userspace while the counters are in SRAM,
383 * these 2 words need to be at the end to facilitate retrival!
384 */
385 kick_counter:
386 .word 0
387 wait_dll_lock_counter:
388 .word 0
389
390 ENTRY(omap3_do_wfi_sz)
391 .word . - omap3_do_wfi
392
393
394 /*
395 * ==============================
396 * == Resume path for OFF mode ==
397 * ==============================
398 */
399
400 /*
401 * The restore_* functions are called by the ROM code
402 * when back from WFI in OFF mode.
403 * Cf. the get_*restore_pointer functions.
404 *
405 * restore_es3: applies to 34xx >= ES3.0
406 * restore_3630: applies to 36xx
407 * restore: common code for 3xxx
408 *
409 * Note: when back from CORE and MPU OFF mode we are running
410 * from SDRAM, without MMU, without the caches and prediction.
411 * Also the SRAM content has been cleared.
412 */
413 ENTRY(omap3_restore_es3)
414 ldr r5, pm_prepwstst_core_p
415 ldr r4, [r5]
416 and r4, r4, #0x3
417 cmp r4, #0x0 @ Check if previous power state of CORE is OFF
418 bne omap3_restore @ Fall through to OMAP3 common code
419 adr r0, es3_sdrc_fix
420 ldr r1, sram_base
421 ldr r2, es3_sdrc_fix_sz
422 mov r2, r2, ror #2
423 copy_to_sram:
424 ldmia r0!, {r3} @ val = *src
425 stmia r1!, {r3} @ *dst = val
426 subs r2, r2, #0x1 @ num_words--
427 bne copy_to_sram
428 ldr r1, sram_base
429 blx r1
430 b omap3_restore @ Fall through to OMAP3 common code
431 ENDPROC(omap3_restore_es3)
432
433 ENTRY(omap3_restore_3630)
434 ldr r1, pm_prepwstst_core_p
435 ldr r2, [r1]
436 and r2, r2, #0x3
437 cmp r2, #0x0 @ Check if previous power state of CORE is OFF
438 bne omap3_restore @ Fall through to OMAP3 common code
439 /* Disable RTA before giving control */
440 ldr r1, control_mem_rta
441 mov r2, #OMAP36XX_RTA_DISABLE
442 str r2, [r1]
443 ENDPROC(omap3_restore_3630)
444
445 /* Fall through to common code for the remaining logic */
446
447 ENTRY(omap3_restore)
448 /*
449 * Read the pwstctrl register to check the reason for mpu reset.
450 * This tells us what was lost.
451 */
452 ldr r1, pm_pwstctrl_mpu
453 ldr r2, [r1]
454 and r2, r2, #0x3
455 cmp r2, #0x0 @ Check if target power state was OFF or RET
456 bne logic_l1_restore
457
458 ldr r0, l2dis_3630
459 cmp r0, #0x1 @ should we disable L2 on 3630?
460 bne skipl2dis
461 mrc p15, 0, r0, c1, c0, 1
462 bic r0, r0, #2 @ disable L2 cache
463 mcr p15, 0, r0, c1, c0, 1
464 skipl2dis:
465 ldr r0, control_stat
466 ldr r1, [r0]
467 and r1, #0x700
468 cmp r1, #0x300
469 beq l2_inv_gp
470 mov r0, #40 @ set service ID for PPA
471 mov r12, r0 @ copy secure Service ID in r12
472 mov r1, #0 @ set task id for ROM code in r1
473 mov r2, #4 @ set some flags in r2, r6
474 mov r6, #0xff
475 adr r3, l2_inv_api_params @ r3 points to dummy parameters
476 dsb @ data write barrier
477 dmb @ data memory barrier
478 smc #1 @ call SMI monitor (smi #1)
479 /* Write to Aux control register to set some bits */
480 mov r0, #42 @ set service ID for PPA
481 mov r12, r0 @ copy secure Service ID in r12
482 mov r1, #0 @ set task id for ROM code in r1
483 mov r2, #4 @ set some flags in r2, r6
484 mov r6, #0xff
485 ldr r4, scratchpad_base
486 ldr r3, [r4, #0xBC] @ r3 points to parameters
487 dsb @ data write barrier
488 dmb @ data memory barrier
489 smc #1 @ call SMI monitor (smi #1)
490
491 #ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
492 /* Restore L2 aux control register */
493 @ set service ID for PPA
494 mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
495 mov r12, r0 @ copy service ID in r12
496 mov r1, #0 @ set task ID for ROM code in r1
497 mov r2, #4 @ set some flags in r2, r6
498 mov r6, #0xff
499 ldr r4, scratchpad_base
500 ldr r3, [r4, #0xBC]
501 adds r3, r3, #8 @ r3 points to parameters
502 dsb @ data write barrier
503 dmb @ data memory barrier
504 smc #1 @ call SMI monitor (smi #1)
505 #endif
506 b logic_l1_restore
507
508 .align
509 l2_inv_api_params:
510 .word 0x1, 0x00
511 l2_inv_gp:
512 /* Execute smi to invalidate L2 cache */
513 mov r12, #0x1 @ set up to invalidate L2
514 smc #0 @ Call SMI monitor (smieq)
515 /* Write to Aux control register to set some bits */
516 ldr r4, scratchpad_base
517 ldr r3, [r4,#0xBC]
518 ldr r0, [r3,#4]
519 mov r12, #0x3
520 smc #0 @ Call SMI monitor (smieq)
521 ldr r4, scratchpad_base
522 ldr r3, [r4,#0xBC]
523 ldr r0, [r3,#12]
524 mov r12, #0x2
525 smc #0 @ Call SMI monitor (smieq)
526 logic_l1_restore:
527 ldr r1, l2dis_3630
528 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
529 bne skipl2reen
530 mrc p15, 0, r1, c1, c0, 1
531 orr r1, r1, #2 @ re-enable L2 cache
532 mcr p15, 0, r1, c1, c0, 1
533 skipl2reen:
534
535 /* Now branch to the common CPU resume function */
536 b cpu_resume
537 ENDPROC(omap3_restore)
538
539 .ltorg
540
541 /*
542 * Local variables
543 */
544 pm_prepwstst_core_p:
545 .word PM_PREPWSTST_CORE_P
546 pm_pwstctrl_mpu:
547 .word PM_PWSTCTRL_MPU_P
548 scratchpad_base:
549 .word SCRATCHPAD_BASE_P
550 sram_base:
551 .word SRAM_BASE_P + 0x8000
552 control_stat:
553 .word CONTROL_STAT
554 control_mem_rta:
555 .word CONTROL_MEM_RTA_CTRL
556 l2dis_3630:
557 .word 0
558
559 /*
560 * Internal functions
561 */
562
563 /*
564 * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
565 * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
566 */
567 .text
568 .align 3
569 ENTRY(es3_sdrc_fix)
570 ldr r4, sdrc_syscfg @ get config addr
571 ldr r5, [r4] @ get value
572 tst r5, #0x100 @ is part access blocked
573 it eq
574 biceq r5, r5, #0x100 @ clear bit if set
575 str r5, [r4] @ write back change
576 ldr r4, sdrc_mr_0 @ get config addr
577 ldr r5, [r4] @ get value
578 str r5, [r4] @ write back change
579 ldr r4, sdrc_emr2_0 @ get config addr
580 ldr r5, [r4] @ get value
581 str r5, [r4] @ write back change
582 ldr r4, sdrc_manual_0 @ get config addr
583 mov r5, #0x2 @ autorefresh command
584 str r5, [r4] @ kick off refreshes
585 ldr r4, sdrc_mr_1 @ get config addr
586 ldr r5, [r4] @ get value
587 str r5, [r4] @ write back change
588 ldr r4, sdrc_emr2_1 @ get config addr
589 ldr r5, [r4] @ get value
590 str r5, [r4] @ write back change
591 ldr r4, sdrc_manual_1 @ get config addr
592 mov r5, #0x2 @ autorefresh command
593 str r5, [r4] @ kick off refreshes
594 bx lr
595
596 /*
597 * Local variables
598 */
599 .align
600 sdrc_syscfg:
601 .word SDRC_SYSCONFIG_P
602 sdrc_mr_0:
603 .word SDRC_MR_0_P
604 sdrc_emr2_0:
605 .word SDRC_EMR2_0_P
606 sdrc_manual_0:
607 .word SDRC_MANUAL_0_P
608 sdrc_mr_1:
609 .word SDRC_MR_1_P
610 sdrc_emr2_1:
611 .word SDRC_EMR2_1_P
612 sdrc_manual_1:
613 .word SDRC_MANUAL_1_P
614 ENDPROC(es3_sdrc_fix)
615 ENTRY(es3_sdrc_fix_sz)
616 .word . - es3_sdrc_fix
This page took 0.0525 seconds and 5 git commands to generate.