OMAP3: PM: Allow the cache clean when L1 is lost.
[deliverable/linux.git] / arch / arm / mach-omap2 / sleep34xx.S
1 /*
2 * (C) Copyright 2007
3 * Texas Instruments
4 * Karthik Dasu <karthik-dp@ti.com>
5 *
6 * (C) Copyright 2004
7 * Texas Instruments, <www.ti.com>
8 * Richard Woodruff <r-woodruff2@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23 * MA 02111-1307 USA
24 */
25 #include <linux/linkage.h>
26 #include <asm/assembler.h>
27 #include <plat/sram.h>
28 #include <mach/io.h>
29
30 #include "cm2xxx_3xxx.h"
31 #include "prm2xxx_3xxx.h"
32 #include "sdrc.h"
33 #include "control.h"
34
35 /*
36 * Registers access definitions
37 */
38 #define SDRC_SCRATCHPAD_SEM_OFFS 0xc
39 #define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
40 (SDRC_SCRATCHPAD_SEM_OFFS)
41 #define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
42 OMAP3430_PM_PREPWSTST
43 #define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
44 #define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
45 #define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
46 #define SRAM_BASE_P OMAP3_SRAM_PA
47 #define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
48 #define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
49 OMAP36XX_CONTROL_MEM_RTA_CTRL)
50
51 /* Move this as correct place is available */
52 #define SCRATCHPAD_MEM_OFFS 0x310
53 #define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
54 OMAP343X_CONTROL_MEM_WKUP +\
55 SCRATCHPAD_MEM_OFFS)
56 #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
57 #define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
58 #define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
59 #define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
60 #define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
61 #define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
62 #define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
63 #define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
64 #define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
65 #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
66
67 /*
68 * This file needs be built unconditionally as ARM to interoperate correctly
69 * with non-Thumb-2-capable firmware.
70 */
71 .arm
72
73 /*
74 * API functions
75 */
76
77 /*
78 * The "get_*restore_pointer" functions are used to provide a
79 * physical restore address where the ROM code jumps while waking
80 * up from MPU OFF/OSWR state.
81 * The restore pointer is stored into the scratchpad.
82 */
83
84 .text
85 /* Function call to get the restore pointer for resume from OFF */
86 ENTRY(get_restore_pointer)
87 stmfd sp!, {lr} @ save registers on stack
88 adr r0, restore
89 ldmfd sp!, {pc} @ restore regs and return
90 ENDPROC(get_restore_pointer)
91 .align
92 ENTRY(get_restore_pointer_sz)
93 .word . - get_restore_pointer
94
95 .text
96 /* Function call to get the restore pointer for 3630 resume from OFF */
97 ENTRY(get_omap3630_restore_pointer)
98 stmfd sp!, {lr} @ save registers on stack
99 adr r0, restore_3630
100 ldmfd sp!, {pc} @ restore regs and return
101 ENDPROC(get_omap3630_restore_pointer)
102 .align
103 ENTRY(get_omap3630_restore_pointer_sz)
104 .word . - get_omap3630_restore_pointer
105
106 .text
107 /* Function call to get the restore pointer for ES3 to resume from OFF */
108 ENTRY(get_es3_restore_pointer)
109 stmfd sp!, {lr} @ save registers on stack
110 adr r0, restore_es3
111 ldmfd sp!, {pc} @ restore regs and return
112 ENDPROC(get_es3_restore_pointer)
113 .align
114 ENTRY(get_es3_restore_pointer_sz)
115 .word . - get_es3_restore_pointer
116
117 .text
118 /*
119 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
120 * This function sets up a flag that will allow for this toggling to take
121 * place on 3630. Hopefully some version in the future may not need this.
122 */
123 ENTRY(enable_omap3630_toggle_l2_on_restore)
124 stmfd sp!, {lr} @ save registers on stack
125 /* Setup so that we will disable and enable l2 */
126 mov r1, #0x1
127 adrl r2, l2dis_3630 @ may be too distant for plain adr
128 str r1, [r2]
129 ldmfd sp!, {pc} @ restore regs and return
130 ENDPROC(enable_omap3630_toggle_l2_on_restore)
131
132 .text
133 /* Function to call rom code to save secure ram context */
134 ENTRY(save_secure_ram_context)
135 stmfd sp!, {r1-r12, lr} @ save registers on stack
136 adr r3, api_params @ r3 points to parameters
137 str r0, [r3,#0x4] @ r0 has sdram address
138 ldr r12, high_mask
139 and r3, r3, r12
140 ldr r12, sram_phy_addr_mask
141 orr r3, r3, r12
142 mov r0, #25 @ set service ID for PPA
143 mov r12, r0 @ copy secure service ID in r12
144 mov r1, #0 @ set task id for ROM code in r1
145 mov r2, #4 @ set some flags in r2, r6
146 mov r6, #0xff
147 dsb @ data write barrier
148 dmb @ data memory barrier
149 smc #1 @ call SMI monitor (smi #1)
150 nop
151 nop
152 nop
153 nop
154 ldmfd sp!, {r1-r12, pc}
155 .align
156 sram_phy_addr_mask:
157 .word SRAM_BASE_P
158 high_mask:
159 .word 0xffff
160 api_params:
161 .word 0x4, 0x0, 0x0, 0x1, 0x1
162 ENDPROC(save_secure_ram_context)
163 ENTRY(save_secure_ram_context_sz)
164 .word . - save_secure_ram_context
165
166 /*
167 * ======================
168 * == Idle entry point ==
169 * ======================
170 */
171
172 /*
173 * Forces OMAP into idle state
174 *
175 * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
176 * and executes the WFI instruction. Calling WFI effectively changes the
177 * power domains states to the desired target power states.
178 *
179 *
180 * Notes:
181 * - this code gets copied to internal SRAM at boot and after wake-up
182 * from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
183 * - when the OMAP wakes up it continues at different execution points
184 * depending on the low power mode (non-OFF vs OFF modes),
185 * cf. 'Resume path for xxx mode' comments.
186 */
187 ENTRY(omap34xx_cpu_suspend)
188 stmfd sp!, {r0-r12, lr} @ save registers on stack
189
190 /*
191 * r0 contains CPU context save/restore pointer in sdram
192 * r1 contains information about saving context:
193 * 0 - No context lost
194 * 1 - Only L1 and logic lost
195 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
196 * 3 - Both L1 and L2 lost and logic lost
197 */
198
199 /* Directly jump to WFI is the context save is not required */
200 cmp r1, #0x0
201 beq omap3_do_wfi
202
203 /* Otherwise fall through to the save context code */
204 save_context_wfi:
205 mov r8, r0 @ Store SDRAM address in r8
206 mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
207 mov r4, #0x1 @ Number of parameters for restore call
208 stmia r8!, {r4-r5} @ Push parameters for restore call
209 mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
210 stmia r8!, {r4-r5} @ Push parameters for restore call
211
212 /* Check what that target sleep state is from r1 */
213 cmp r1, #0x2 @ Only L2 lost, no need to save context
214 beq clean_caches
215
216 l1_logic_lost:
217 /* Store sp and spsr to SDRAM */
218 mov r4, sp
219 mrs r5, spsr
220 mov r6, lr
221 stmia r8!, {r4-r6}
222 /* Save all ARM registers */
223 /* Coprocessor access control register */
224 mrc p15, 0, r6, c1, c0, 2
225 stmia r8!, {r6}
226 /* TTBR0, TTBR1 and Translation table base control */
227 mrc p15, 0, r4, c2, c0, 0
228 mrc p15, 0, r5, c2, c0, 1
229 mrc p15, 0, r6, c2, c0, 2
230 stmia r8!, {r4-r6}
231 /*
232 * Domain access control register, data fault status register,
233 * and instruction fault status register
234 */
235 mrc p15, 0, r4, c3, c0, 0
236 mrc p15, 0, r5, c5, c0, 0
237 mrc p15, 0, r6, c5, c0, 1
238 stmia r8!, {r4-r6}
239 /*
240 * Data aux fault status register, instruction aux fault status,
241 * data fault address register and instruction fault address register
242 */
243 mrc p15, 0, r4, c5, c1, 0
244 mrc p15, 0, r5, c5, c1, 1
245 mrc p15, 0, r6, c6, c0, 0
246 mrc p15, 0, r7, c6, c0, 2
247 stmia r8!, {r4-r7}
248 /*
249 * user r/w thread and process ID, user r/o thread and process ID,
250 * priv only thread and process ID, cache size selection
251 */
252 mrc p15, 0, r4, c13, c0, 2
253 mrc p15, 0, r5, c13, c0, 3
254 mrc p15, 0, r6, c13, c0, 4
255 mrc p15, 2, r7, c0, c0, 0
256 stmia r8!, {r4-r7}
257 /* Data TLB lockdown, instruction TLB lockdown registers */
258 mrc p15, 0, r5, c10, c0, 0
259 mrc p15, 0, r6, c10, c0, 1
260 stmia r8!, {r5-r6}
261 /* Secure or non secure vector base address, FCSE PID, Context PID*/
262 mrc p15, 0, r4, c12, c0, 0
263 mrc p15, 0, r5, c13, c0, 0
264 mrc p15, 0, r6, c13, c0, 1
265 stmia r8!, {r4-r6}
266 /* Primary remap, normal remap registers */
267 mrc p15, 0, r4, c10, c2, 0
268 mrc p15, 0, r5, c10, c2, 1
269 stmia r8!,{r4-r5}
270
271 /* Store current cpsr*/
272 mrs r2, cpsr
273 stmia r8!, {r2}
274
275 mrc p15, 0, r4, c1, c0, 0
276 /* save control register */
277 stmia r8!, {r4}
278
279 clean_caches:
280 /*
281 * jump out to kernel flush routine
282 * - reuse that code is better
283 * - it executes in a cached space so is faster than refetch per-block
284 * - should be faster and will change with kernel
285 * - 'might' have to copy address, load and jump to it
286 */
287 ldr r1, kernel_flush
288 blx r1
289 /*
290 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
291 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
292 * This sequence switches back to ARM. Note that .align may insert a
293 * nop: bx pc needs to be word-aligned in order to work.
294 */
295 THUMB( .thumb )
296 THUMB( .align )
297 THUMB( bx pc )
298 THUMB( nop )
299 .arm
300
301 omap3_do_wfi:
302 ldr r4, sdrc_power @ read the SDRC_POWER register
303 ldr r5, [r4] @ read the contents of SDRC_POWER
304 orr r5, r5, #0x40 @ enable self refresh on idle req
305 str r5, [r4] @ write back to SDRC_POWER register
306
307 /* Data memory barrier and Data sync barrier */
308 dsb
309 dmb
310
311 /*
312 * ===================================
313 * == WFI instruction => Enter idle ==
314 * ===================================
315 */
316 wfi @ wait for interrupt
317
318 /*
319 * ===================================
320 * == Resume path for non-OFF modes ==
321 * ===================================
322 */
323 nop
324 nop
325 nop
326 nop
327 nop
328 nop
329 nop
330 nop
331 nop
332 nop
333 bl wait_sdrc_ok
334
335 /*
336 * ===================================
337 * == Exit point from non-OFF modes ==
338 * ===================================
339 */
340 ldmfd sp!, {r0-r12, pc} @ restore regs and return
341
342
343 /*
344 * ==============================
345 * == Resume path for OFF mode ==
346 * ==============================
347 */
348
349 /*
350 * The restore_* functions are called by the ROM code
351 * when back from WFI in OFF mode.
352 * Cf. the get_*restore_pointer functions.
353 *
354 * restore_es3: applies to 34xx >= ES3.0
355 * restore_3630: applies to 36xx
356 * restore: common code for 3xxx
357 */
358 restore_es3:
359 ldr r5, pm_prepwstst_core_p
360 ldr r4, [r5]
361 and r4, r4, #0x3
362 cmp r4, #0x0 @ Check if previous power state of CORE is OFF
363 bne restore
364 adr r0, es3_sdrc_fix
365 ldr r1, sram_base
366 ldr r2, es3_sdrc_fix_sz
367 mov r2, r2, ror #2
368 copy_to_sram:
369 ldmia r0!, {r3} @ val = *src
370 stmia r1!, {r3} @ *dst = val
371 subs r2, r2, #0x1 @ num_words--
372 bne copy_to_sram
373 ldr r1, sram_base
374 blx r1
375 b restore
376
377 restore_3630:
378 ldr r1, pm_prepwstst_core_p
379 ldr r2, [r1]
380 and r2, r2, #0x3
381 cmp r2, #0x0 @ Check if previous power state of CORE is OFF
382 bne restore
383 /* Disable RTA before giving control */
384 ldr r1, control_mem_rta
385 mov r2, #OMAP36XX_RTA_DISABLE
386 str r2, [r1]
387
388 /* Fall through to common code for the remaining logic */
389
390 restore:
391 /*
392 * Check what was the reason for mpu reset and store the reason in r9:
393 * 0 - No context lost
394 * 1 - Only L1 and logic lost
395 * 2 - Only L2 lost - In this case, we wont be here
396 * 3 - Both L1 and L2 lost
397 */
398 ldr r1, pm_pwstctrl_mpu
399 ldr r2, [r1]
400 and r2, r2, #0x3
401 cmp r2, #0x0 @ Check if target power state was OFF or RET
402 moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
403 movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
404 bne logic_l1_restore
405
406 ldr r0, l2dis_3630
407 cmp r0, #0x1 @ should we disable L2 on 3630?
408 bne skipl2dis
409 mrc p15, 0, r0, c1, c0, 1
410 bic r0, r0, #2 @ disable L2 cache
411 mcr p15, 0, r0, c1, c0, 1
412 skipl2dis:
413 ldr r0, control_stat
414 ldr r1, [r0]
415 and r1, #0x700
416 cmp r1, #0x300
417 beq l2_inv_gp
418 mov r0, #40 @ set service ID for PPA
419 mov r12, r0 @ copy secure Service ID in r12
420 mov r1, #0 @ set task id for ROM code in r1
421 mov r2, #4 @ set some flags in r2, r6
422 mov r6, #0xff
423 adr r3, l2_inv_api_params @ r3 points to dummy parameters
424 dsb @ data write barrier
425 dmb @ data memory barrier
426 smc #1 @ call SMI monitor (smi #1)
427 /* Write to Aux control register to set some bits */
428 mov r0, #42 @ set service ID for PPA
429 mov r12, r0 @ copy secure Service ID in r12
430 mov r1, #0 @ set task id for ROM code in r1
431 mov r2, #4 @ set some flags in r2, r6
432 mov r6, #0xff
433 ldr r4, scratchpad_base
434 ldr r3, [r4, #0xBC] @ r3 points to parameters
435 dsb @ data write barrier
436 dmb @ data memory barrier
437 smc #1 @ call SMI monitor (smi #1)
438
439 #ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
440 /* Restore L2 aux control register */
441 @ set service ID for PPA
442 mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
443 mov r12, r0 @ copy service ID in r12
444 mov r1, #0 @ set task ID for ROM code in r1
445 mov r2, #4 @ set some flags in r2, r6
446 mov r6, #0xff
447 ldr r4, scratchpad_base
448 ldr r3, [r4, #0xBC]
449 adds r3, r3, #8 @ r3 points to parameters
450 dsb @ data write barrier
451 dmb @ data memory barrier
452 smc #1 @ call SMI monitor (smi #1)
453 #endif
454 b logic_l1_restore
455
456 .align
457 l2_inv_api_params:
458 .word 0x1, 0x00
459 l2_inv_gp:
460 /* Execute smi to invalidate L2 cache */
461 mov r12, #0x1 @ set up to invalidate L2
462 smc #0 @ Call SMI monitor (smieq)
463 /* Write to Aux control register to set some bits */
464 ldr r4, scratchpad_base
465 ldr r3, [r4,#0xBC]
466 ldr r0, [r3,#4]
467 mov r12, #0x3
468 smc #0 @ Call SMI monitor (smieq)
469 ldr r4, scratchpad_base
470 ldr r3, [r4,#0xBC]
471 ldr r0, [r3,#12]
472 mov r12, #0x2
473 smc #0 @ Call SMI monitor (smieq)
474 logic_l1_restore:
475 ldr r1, l2dis_3630
476 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
477 bne skipl2reen
478 mrc p15, 0, r1, c1, c0, 1
479 orr r1, r1, #2 @ re-enable L2 cache
480 mcr p15, 0, r1, c1, c0, 1
481 skipl2reen:
482 mov r1, #0
483 /*
484 * Invalidate all instruction caches to PoU
485 * and flush branch target cache
486 */
487 mcr p15, 0, r1, c7, c5, 0
488
489 ldr r4, scratchpad_base
490 ldr r3, [r4,#0xBC]
491 adds r3, r3, #16
492 ldmia r3!, {r4-r6}
493 mov sp, r4
494 msr spsr_cxsf, r5
495 mov lr, r6
496
497 ldmia r3!, {r4-r9}
498 /* Coprocessor access Control Register */
499 mcr p15, 0, r4, c1, c0, 2
500
501 /* TTBR0 */
502 MCR p15, 0, r5, c2, c0, 0
503 /* TTBR1 */
504 MCR p15, 0, r6, c2, c0, 1
505 /* Translation table base control register */
506 MCR p15, 0, r7, c2, c0, 2
507 /* Domain access Control Register */
508 MCR p15, 0, r8, c3, c0, 0
509 /* Data fault status Register */
510 MCR p15, 0, r9, c5, c0, 0
511
512 ldmia r3!,{r4-r8}
513 /* Instruction fault status Register */
514 MCR p15, 0, r4, c5, c0, 1
515 /* Data Auxiliary Fault Status Register */
516 MCR p15, 0, r5, c5, c1, 0
517 /* Instruction Auxiliary Fault Status Register*/
518 MCR p15, 0, r6, c5, c1, 1
519 /* Data Fault Address Register */
520 MCR p15, 0, r7, c6, c0, 0
521 /* Instruction Fault Address Register*/
522 MCR p15, 0, r8, c6, c0, 2
523 ldmia r3!,{r4-r7}
524
525 /* User r/w thread and process ID */
526 MCR p15, 0, r4, c13, c0, 2
527 /* User ro thread and process ID */
528 MCR p15, 0, r5, c13, c0, 3
529 /* Privileged only thread and process ID */
530 MCR p15, 0, r6, c13, c0, 4
531 /* Cache size selection */
532 MCR p15, 2, r7, c0, c0, 0
533 ldmia r3!,{r4-r8}
534 /* Data TLB lockdown registers */
535 MCR p15, 0, r4, c10, c0, 0
536 /* Instruction TLB lockdown registers */
537 MCR p15, 0, r5, c10, c0, 1
538 /* Secure or Nonsecure Vector Base Address */
539 MCR p15, 0, r6, c12, c0, 0
540 /* FCSE PID */
541 MCR p15, 0, r7, c13, c0, 0
542 /* Context PID */
543 MCR p15, 0, r8, c13, c0, 1
544
545 ldmia r3!,{r4-r5}
546 /* Primary memory remap register */
547 MCR p15, 0, r4, c10, c2, 0
548 /* Normal memory remap register */
549 MCR p15, 0, r5, c10, c2, 1
550
551 /* Restore cpsr */
552 ldmia r3!,{r4} @ load CPSR from SDRAM
553 msr cpsr, r4 @ store cpsr
554
555 /* Enabling MMU here */
556 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
557 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
558 and r7, #0x7
559 cmp r7, #0x0
560 beq usettbr0
561 ttbr_error:
562 /*
563 * More work needs to be done to support N[0:2] value other than 0
564 * So looping here so that the error can be detected
565 */
566 b ttbr_error
567 usettbr0:
568 mrc p15, 0, r2, c2, c0, 0
569 ldr r5, ttbrbit_mask
570 and r2, r5
571 mov r4, pc
572 ldr r5, table_index_mask
573 and r4, r5 @ r4 = 31 to 20 bits of pc
574 /* Extract the value to be written to table entry */
575 ldr r1, table_entry
576 /* r1 has the value to be written to table entry*/
577 add r1, r1, r4
578 /* Getting the address of table entry to modify */
579 lsr r4, #18
580 /* r2 has the location which needs to be modified */
581 add r2, r4
582 /* Storing previous entry of location being modified */
583 ldr r5, scratchpad_base
584 ldr r4, [r2]
585 str r4, [r5, #0xC0]
586 /* Modify the table entry */
587 str r1, [r2]
588 /*
589 * Storing address of entry being modified
590 * - will be restored after enabling MMU
591 */
592 ldr r5, scratchpad_base
593 str r2, [r5, #0xC4]
594
595 mov r0, #0
596 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
597 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
598 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
599 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
600 /*
601 * Restore control register. This enables the MMU.
602 * The caches and prediction are not enabled here, they
603 * will be enabled after restoring the MMU table entry.
604 */
605 ldmia r3!, {r4}
606 /* Store previous value of control register in scratchpad */
607 str r4, [r5, #0xC8]
608 ldr r2, cache_pred_disable_mask
609 and r4, r2
610 mcr p15, 0, r4, c1, c0, 0
611 dsb
612 isb
613 ldr r0, =restoremmu_on
614 bx r0
615
616 /*
617 * ==============================
618 * == Exit point from OFF mode ==
619 * ==============================
620 */
621 restoremmu_on:
622 ldmfd sp!, {r0-r12, pc} @ restore regs and return
623
624
625 /*
626 * Internal functions
627 */
628
629 /* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
630 .text
631 .align 3
632 ENTRY(es3_sdrc_fix)
633 ldr r4, sdrc_syscfg @ get config addr
634 ldr r5, [r4] @ get value
635 tst r5, #0x100 @ is part access blocked
636 it eq
637 biceq r5, r5, #0x100 @ clear bit if set
638 str r5, [r4] @ write back change
639 ldr r4, sdrc_mr_0 @ get config addr
640 ldr r5, [r4] @ get value
641 str r5, [r4] @ write back change
642 ldr r4, sdrc_emr2_0 @ get config addr
643 ldr r5, [r4] @ get value
644 str r5, [r4] @ write back change
645 ldr r4, sdrc_manual_0 @ get config addr
646 mov r5, #0x2 @ autorefresh command
647 str r5, [r4] @ kick off refreshes
648 ldr r4, sdrc_mr_1 @ get config addr
649 ldr r5, [r4] @ get value
650 str r5, [r4] @ write back change
651 ldr r4, sdrc_emr2_1 @ get config addr
652 ldr r5, [r4] @ get value
653 str r5, [r4] @ write back change
654 ldr r4, sdrc_manual_1 @ get config addr
655 mov r5, #0x2 @ autorefresh command
656 str r5, [r4] @ kick off refreshes
657 bx lr
658
659 .align
660 sdrc_syscfg:
661 .word SDRC_SYSCONFIG_P
662 sdrc_mr_0:
663 .word SDRC_MR_0_P
664 sdrc_emr2_0:
665 .word SDRC_EMR2_0_P
666 sdrc_manual_0:
667 .word SDRC_MANUAL_0_P
668 sdrc_mr_1:
669 .word SDRC_MR_1_P
670 sdrc_emr2_1:
671 .word SDRC_EMR2_1_P
672 sdrc_manual_1:
673 .word SDRC_MANUAL_1_P
674 ENDPROC(es3_sdrc_fix)
675 ENTRY(es3_sdrc_fix_sz)
676 .word . - es3_sdrc_fix
677
678 /*
679 * This function implements the erratum ID i581 WA:
680 * SDRC state restore before accessing the SDRAM
681 *
682 * Only used at return from non-OFF mode. For OFF
683 * mode the ROM code configures the SDRC and
684 * the DPLL before calling the restore code directly
685 * from DDR.
686 */
687
688 /* Make sure SDRC accesses are ok */
689 wait_sdrc_ok:
690
691 /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
692 ldr r4, cm_idlest_ckgen
693 wait_dpll3_lock:
694 ldr r5, [r4]
695 tst r5, #1
696 beq wait_dpll3_lock
697
698 ldr r4, cm_idlest1_core
699 wait_sdrc_ready:
700 ldr r5, [r4]
701 tst r5, #0x2
702 bne wait_sdrc_ready
703 /* allow DLL powerdown upon hw idle req */
704 ldr r4, sdrc_power
705 ldr r5, [r4]
706 bic r5, r5, #0x40
707 str r5, [r4]
708
709 /*
710 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
711 * base instead.
712 * Be careful not to clobber r7 when maintaing this code.
713 */
714
715 is_dll_in_lock_mode:
716 /* Is dll in lock mode? */
717 ldr r4, sdrc_dlla_ctrl
718 ldr r5, [r4]
719 tst r5, #0x4
720 bxne lr @ Return if locked
721 /* wait till dll locks */
722 adr r7, kick_counter
723 wait_dll_lock_timed:
724 ldr r4, wait_dll_lock_counter
725 add r4, r4, #1
726 str r4, [r7, #wait_dll_lock_counter - kick_counter]
727 ldr r4, sdrc_dlla_status
728 /* Wait 20uS for lock */
729 mov r6, #8
730 wait_dll_lock:
731 subs r6, r6, #0x1
732 beq kick_dll
733 ldr r5, [r4]
734 and r5, r5, #0x4
735 cmp r5, #0x4
736 bne wait_dll_lock
737 bx lr @ Return when locked
738
739 /* disable/reenable DLL if not locked */
740 kick_dll:
741 ldr r4, sdrc_dlla_ctrl
742 ldr r5, [r4]
743 mov r6, r5
744 bic r6, #(1<<3) @ disable dll
745 str r6, [r4]
746 dsb
747 orr r6, r6, #(1<<3) @ enable dll
748 str r6, [r4]
749 dsb
750 ldr r4, kick_counter
751 add r4, r4, #1
752 str r4, [r7] @ kick_counter
753 b wait_dll_lock_timed
754
755 .align
756 cm_idlest1_core:
757 .word CM_IDLEST1_CORE_V
758 cm_idlest_ckgen:
759 .word CM_IDLEST_CKGEN_V
760 sdrc_dlla_status:
761 .word SDRC_DLLA_STATUS_V
762 sdrc_dlla_ctrl:
763 .word SDRC_DLLA_CTRL_V
764 pm_prepwstst_core_p:
765 .word PM_PREPWSTST_CORE_P
766 pm_pwstctrl_mpu:
767 .word PM_PWSTCTRL_MPU_P
768 scratchpad_base:
769 .word SCRATCHPAD_BASE_P
770 sram_base:
771 .word SRAM_BASE_P + 0x8000
772 sdrc_power:
773 .word SDRC_POWER_V
774 ttbrbit_mask:
775 .word 0xFFFFC000
776 table_index_mask:
777 .word 0xFFF00000
778 table_entry:
779 .word 0x00000C02
780 cache_pred_disable_mask:
781 .word 0xFFFFE7FB
782 control_stat:
783 .word CONTROL_STAT
784 control_mem_rta:
785 .word CONTROL_MEM_RTA_CTRL
786 kernel_flush:
787 .word v7_flush_dcache_all
788 l2dis_3630:
789 .word 0
790 /*
791 * When exporting to userspace while the counters are in SRAM,
792 * these 2 words need to be at the end to facilitate retrival!
793 */
794 kick_counter:
795 .word 0
796 wait_dll_lock_counter:
797 .word 0
798 ENDPROC(omap34xx_cpu_suspend)
799
800 ENTRY(omap34xx_cpu_suspend_sz)
801 .word . - omap34xx_cpu_suspend
This page took 0.084675 seconds and 6 git commands to generate.