Commit | Line | Data |
---|---|---|
948cf67c | 1 | /* |
bcef83a0 SP |
2 | * This file contains idle entry/exit functions for POWER7, |
3 | * POWER8 and POWER9 CPUs. | |
948cf67c BH |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/threads.h> | |
12 | #include <asm/processor.h> | |
13 | #include <asm/page.h> | |
14 | #include <asm/cputable.h> | |
15 | #include <asm/thread_info.h> | |
16 | #include <asm/ppc_asm.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <asm/ppc-opcode.h> | |
7230c564 | 19 | #include <asm/hw_irq.h> |
f0888f70 | 20 | #include <asm/kvm_book3s_asm.h> |
97eb001f | 21 | #include <asm/opal.h> |
7cba160a | 22 | #include <asm/cpuidle.h> |
f64e8084 | 23 | #include <asm/book3s/64/mmu-hash.h> |
bcef83a0 | 24 | #include <asm/mmu.h> |
948cf67c BH |
25 | |
26 | #undef DEBUG | |
27 | ||
77b54e9f SP |
28 | /* |
29 | * Use unused space in the interrupt stack to save and restore | |
30 | * registers for winkle support. | |
31 | */ | |
32 | #define _SDR1 GPR3 | |
33 | #define _RPR GPR4 | |
34 | #define _SPURR GPR5 | |
35 | #define _PURR GPR6 | |
36 | #define _TSCR GPR7 | |
37 | #define _DSCR GPR8 | |
38 | #define _AMOR GPR9 | |
39 | #define _WORT GPR10 | |
40 | #define _WORC GPR11 | |
bcef83a0 SP |
41 | #define _PTCR GPR12 |
42 | ||
43 | #define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \ | |
44 | PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ | |
45 | PSSCR_MTL_MASK | |
77b54e9f | 46 | |
aca79d2b | 47 | /* Idle state entry routines */ |
948cf67c | 48 | |
aca79d2b VS |
49 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ |
50 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ | |
51 | std r0,0(r1); \ | |
52 | ptesync; \ | |
53 | ld r0,0(r1); \ | |
54 | 1: cmp cr0,r0,r0; \ | |
55 | bne 1b; \ | |
56 | IDLE_INST; \ | |
57 | b . | |
948cf67c | 58 | |
aca79d2b VS |
59 | .text |
60 | ||
0dfffb48 SP |
61 | /* |
62 | * Used by threads before entering deep idle states. Saves SPRs | |
63 | * in interrupt stack frame | |
64 | */ | |
65 | save_sprs_to_stack: | |
66 | /* | |
67 | * Note all register i.e per-core, per-subcore or per-thread is saved | |
68 | * here since any thread in the core might wake up first | |
69 | */ | |
bcef83a0 SP |
70 | BEGIN_FTR_SECTION |
71 | mfspr r3,SPRN_PTCR | |
72 | std r3,_PTCR(r1) | |
73 | /* | |
74 | * Note - SDR1 is dropped in Power ISA v3. Hence not restoring | |
75 | * SDR1 here | |
76 | */ | |
77 | FTR_SECTION_ELSE | |
0dfffb48 SP |
78 | mfspr r3,SPRN_SDR1 |
79 | std r3,_SDR1(r1) | |
bcef83a0 | 80 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
0dfffb48 SP |
81 | mfspr r3,SPRN_RPR |
82 | std r3,_RPR(r1) | |
83 | mfspr r3,SPRN_SPURR | |
84 | std r3,_SPURR(r1) | |
85 | mfspr r3,SPRN_PURR | |
86 | std r3,_PURR(r1) | |
87 | mfspr r3,SPRN_TSCR | |
88 | std r3,_TSCR(r1) | |
89 | mfspr r3,SPRN_DSCR | |
90 | std r3,_DSCR(r1) | |
91 | mfspr r3,SPRN_AMOR | |
92 | std r3,_AMOR(r1) | |
93 | mfspr r3,SPRN_WORT | |
94 | std r3,_WORT(r1) | |
95 | mfspr r3,SPRN_WORC | |
96 | std r3,_WORC(r1) | |
97 | ||
98 | blr | |
99 | ||
b32aadc1 SP |
100 | /* |
101 | * Used by threads when the lock bit of core_idle_state is set. | |
102 | * Threads will spin in HMT_LOW until the lock bit is cleared. | |
103 | * r14 - pointer to core_idle_state | |
104 | * r15 - used to load contents of core_idle_state | |
105 | */ | |
106 | ||
107 | core_idle_lock_held: | |
108 | HMT_LOW | |
109 | 3: lwz r15,0(r14) | |
110 | andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
111 | bne 3b | |
112 | HMT_MEDIUM | |
113 | lwarx r15,0,r14 | |
114 | blr | |
115 | ||
aca79d2b VS |
116 | /* |
117 | * Pass requested state in r3: | |
bcef83a0 SP |
118 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
119 | * - Requested STOP state in POWER9 | |
8d6f7c5a ME |
120 | * |
121 | * To check IRQ_HAPPENED in r4 | |
122 | * 0 - don't check | |
123 | * 1 - check | |
4eae2c9a SP |
124 | * |
125 | * Address to 'rfid' to in r5 | |
aca79d2b | 126 | */ |
5fa6b6bd | 127 | _GLOBAL(pnv_powersave_common) |
aca79d2b | 128 | /* Use r3 to pass state nap/sleep/winkle */ |
948cf67c BH |
129 | /* NAP is a state loss, we create a regs frame on the |
130 | * stack, fill it up with the state we care about and | |
131 | * stick a pointer to it in PACAR1. We really only | |
132 | * need to save PC, some CR bits and the NV GPRs, | |
133 | * but for now an interrupt frame will do. | |
134 | */ | |
135 | mflr r0 | |
136 | std r0,16(r1) | |
137 | stdu r1,-INT_FRAME_SIZE(r1) | |
138 | std r0,_LINK(r1) | |
139 | std r0,_NIP(r1) | |
140 | ||
948cf67c BH |
141 | /* Hard disable interrupts */ |
142 | mfmsr r9 | |
143 | rldicl r9,r9,48,1 | |
144 | rotldi r9,r9,16 | |
145 | mtmsrd r9,1 /* hard-disable interrupts */ | |
7230c564 BH |
146 | |
147 | /* Check if something happened while soft-disabled */ | |
148 | lbz r0,PACAIRQHAPPENED(r13) | |
d6a4f709 | 149 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
7230c564 | 150 | beq 1f |
8d6f7c5a ME |
151 | cmpwi cr0,r4,0 |
152 | beq 1f | |
7230c564 BH |
153 | addi r1,r1,INT_FRAME_SIZE |
154 | ld r0,16(r1) | |
f57333a7 | 155 | li r3,0 /* Return 0 (no nap) */ |
7230c564 BH |
156 | mtlr r0 |
157 | blr | |
158 | ||
159 | 1: /* We mark irqs hard disabled as this is the state we'll | |
160 | * be in when returning and we need to tell arch_local_irq_restore() | |
161 | * about it | |
162 | */ | |
163 | li r0,PACA_IRQ_HARD_DIS | |
164 | stb r0,PACAIRQHAPPENED(r13) | |
165 | ||
166 | /* We haven't lost state ... yet */ | |
948cf67c | 167 | li r0,0 |
2fde6d20 | 168 | stb r0,PACA_NAPSTATELOST(r13) |
948cf67c BH |
169 | |
170 | /* Continue saving state */ | |
171 | SAVE_GPR(2, r1) | |
172 | SAVE_NVGPRS(r1) | |
aca79d2b VS |
173 | mfcr r4 |
174 | std r4,_CCR(r1) | |
948cf67c BH |
175 | std r9,_MSR(r1) |
176 | std r1,PACAR1(r13) | |
177 | ||
4eae2c9a SP |
178 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
179 | /* Tell KVM we're entering idle */ | |
bcef83a0 | 180 | li r4,KVM_HWTHREAD_IN_IDLE |
4eae2c9a SP |
181 | stb r4,HSTATE_HWTHREAD_STATE(r13) |
182 | #endif | |
183 | ||
8117ac6a PM |
184 | /* |
185 | * Go to real mode to do the nap, as required by the architecture. | |
186 | * Also, we need to be in real mode before setting hwthread_state, | |
187 | * because as soon as we do that, another thread can switch | |
188 | * the MMU context to the guest. | |
189 | */ | |
4eae2c9a | 190 | LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
8117ac6a PM |
191 | li r6, MSR_RI |
192 | andc r6, r9, r6 | |
8117ac6a | 193 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
4eae2c9a SP |
194 | mtspr SPRN_SRR0, r5 |
195 | mtspr SPRN_SRR1, r7 | |
8117ac6a PM |
196 | rfid |
197 | ||
5fa6b6bd SP |
198 | .globl pnv_enter_arch207_idle_mode |
199 | pnv_enter_arch207_idle_mode: | |
7cba160a | 200 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
77b54e9f SP |
201 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
202 | bge cr3,2f | |
aca79d2b VS |
203 | IDLE_STATE_ENTER_SEQ(PPC_NAP) |
204 | /* No return */ | |
7cba160a SP |
205 | 2: |
206 | /* Sleep or winkle */ | |
207 | lbz r7,PACA_THREAD_MASK(r13) | |
208 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
209 | lwarx_loop1: | |
210 | lwarx r15,0,r14 | |
b32aadc1 SP |
211 | |
212 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
213 | bnel core_idle_lock_held | |
214 | ||
7cba160a SP |
215 | andc r15,r15,r7 /* Clear thread bit */ |
216 | ||
217 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
218 | ||
219 | /* | |
220 | * If cr0 = 0, then current thread is the last thread of the core entering | |
221 | * sleep. Last thread needs to execute the hardware bug workaround code if | |
222 | * required by the platform. | |
223 | * Make the workaround call unconditionally here. The below branch call is | |
224 | * patched out when the idle states are discovered if the platform does not | |
225 | * require it. | |
226 | */ | |
227 | .global pnv_fastsleep_workaround_at_entry | |
228 | pnv_fastsleep_workaround_at_entry: | |
229 | beq fastsleep_workaround_at_entry | |
230 | ||
231 | stwcx. r15,0,r14 | |
232 | bne- lwarx_loop1 | |
233 | isync | |
234 | ||
77b54e9f SP |
235 | common_enter: /* common code for all the threads entering sleep or winkle */ |
236 | bgt cr3,enter_winkle | |
7cba160a SP |
237 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) |
238 | ||
239 | fastsleep_workaround_at_entry: | |
240 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | |
241 | stwcx. r15,0,r14 | |
242 | bne- lwarx_loop1 | |
243 | isync | |
244 | ||
245 | /* Fast sleep workaround */ | |
246 | li r3,1 | |
247 | li r4,1 | |
69c592ed | 248 | bl opal_rm_config_cpu_idle_state |
7cba160a SP |
249 | |
250 | /* Clear Lock bit */ | |
251 | li r0,0 | |
252 | lwsync | |
253 | stw r0,0(r14) | |
254 | b common_enter | |
255 | ||
77b54e9f | 256 | enter_winkle: |
0dfffb48 SP |
257 | bl save_sprs_to_stack |
258 | ||
77b54e9f | 259 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) |
f0888f70 | 260 | |
bcef83a0 SP |
261 | /* |
262 | * r3 - requested stop state | |
263 | */ | |
264 | power_enter_stop: | |
265 | /* | |
266 | * Check if the requested state is a deep idle state. | |
267 | */ | |
268 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | |
269 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
270 | cmpd r3,r4 | |
271 | bge 2f | |
272 | IDLE_STATE_ENTER_SEQ(PPC_STOP) | |
273 | 2: | |
274 | /* | |
275 | * Entering deep idle state. | |
276 | * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to | |
277 | * stack and enter stop | |
278 | */ | |
279 | lbz r7,PACA_THREAD_MASK(r13) | |
280 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
281 | ||
282 | lwarx_loop_stop: | |
283 | lwarx r15,0,r14 | |
284 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
285 | bnel core_idle_lock_held | |
286 | andc r15,r15,r7 /* Clear thread bit */ | |
287 | ||
288 | stwcx. r15,0,r14 | |
289 | bne- lwarx_loop_stop | |
290 | isync | |
291 | ||
292 | bl save_sprs_to_stack | |
293 | ||
294 | IDLE_STATE_ENTER_SEQ(PPC_STOP) | |
295 | ||
aca79d2b VS |
296 | _GLOBAL(power7_idle) |
297 | /* Now check if user or arch enabled NAP mode */ | |
298 | LOAD_REG_ADDRBASE(r3,powersave_nap) | |
299 | lwz r4,ADDROFF(powersave_nap)(r3) | |
300 | cmpwi 0,r4,0 | |
301 | beqlr | |
8d6f7c5a | 302 | li r3, 1 |
aca79d2b VS |
303 | /* fall through */ |
304 | ||
305 | _GLOBAL(power7_nap) | |
8d6f7c5a | 306 | mr r4,r3 |
7cba160a | 307 | li r3,PNV_THREAD_NAP |
4eae2c9a | 308 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 309 | b pnv_powersave_common |
aca79d2b VS |
310 | /* No return */ |
311 | ||
312 | _GLOBAL(power7_sleep) | |
7cba160a | 313 | li r3,PNV_THREAD_SLEEP |
c733cf83 | 314 | li r4,1 |
4eae2c9a | 315 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 316 | b pnv_powersave_common |
aca79d2b | 317 | /* No return */ |
948cf67c | 318 | |
77b54e9f | 319 | _GLOBAL(power7_winkle) |
bfd1b7ae | 320 | li r3,PNV_THREAD_WINKLE |
77b54e9f | 321 | li r4,1 |
4eae2c9a | 322 | LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode) |
5fa6b6bd | 323 | b pnv_powersave_common |
77b54e9f SP |
324 | /* No return */ |
325 | ||
bbdb760d MS |
326 | #define CHECK_HMI_INTERRUPT \ |
327 | mfspr r0,SPRN_SRR1; \ | |
328 | BEGIN_FTR_SECTION_NESTED(66); \ | |
329 | rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \ | |
330 | FTR_SECTION_ELSE_NESTED(66); \ | |
331 | rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \ | |
332 | ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |
333 | cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ | |
334 | bne 20f; \ | |
335 | /* Invoke opal call to handle hmi */ \ | |
336 | ld r2,PACATOC(r13); \ | |
337 | ld r1,PACAR1(r13); \ | |
338 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ | |
fd7bacbc MS |
339 | li r3,0; /* NULL argument */ \ |
340 | bl hmi_exception_realmode; \ | |
341 | nop; \ | |
bbdb760d MS |
342 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
343 | 20: nop; | |
344 | ||
345 | ||
bcef83a0 SP |
346 | /* |
347 | * r3 - requested stop state | |
348 | */ | |
349 | _GLOBAL(power9_idle_stop) | |
350 | LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE) | |
351 | or r4,r4,r3 | |
352 | mtspr SPRN_PSSCR, r4 | |
353 | li r4, 1 | |
354 | LOAD_REG_ADDR(r5,power_enter_stop) | |
355 | b pnv_powersave_common | |
356 | /* No return */ | |
17065671 SP |
357 | /* |
358 | * Called from reset vector. Check whether we have woken up with | |
359 | * hypervisor state loss. If yes, restore hypervisor state and return | |
360 | * back to reset vector. | |
361 | * | |
362 | * r13 - Contents of HSPRG0 | |
363 | * cr3 - set to gt if waking up with partial/complete hypervisor state loss | |
364 | */ | |
5fa6b6bd | 365 | _GLOBAL(pnv_restore_hyp_resource) |
bcef83a0 SP |
366 | ld r2,PACATOC(r13); |
367 | BEGIN_FTR_SECTION | |
368 | /* | |
369 | * POWER ISA 3. Use PSSCR to determine if we | |
370 | * are waking up from deep idle state | |
371 | */ | |
372 | LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) | |
373 | ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) | |
374 | ||
375 | mfspr r5,SPRN_PSSCR | |
17065671 | 376 | /* |
bcef83a0 SP |
377 | * 0-3 bits correspond to Power-Saving Level Status |
378 | * which indicates the idle state we are waking up from | |
379 | */ | |
380 | rldicl r5,r5,4,60 | |
381 | cmpd cr4,r5,r4 | |
382 | bge cr4,pnv_wakeup_tb_loss | |
383 | /* | |
384 | * Waking up without hypervisor state loss. Return to | |
385 | * reset vector | |
386 | */ | |
387 | blr | |
388 | ||
389 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
390 | ||
391 | /* | |
392 | * POWER ISA 2.07 or less. | |
17065671 SP |
393 | * Check if last bit of HSPGR0 is set. This indicates whether we are |
394 | * waking up from winkle. | |
395 | */ | |
396 | clrldi r5,r13,63 | |
397 | clrrdi r13,r13,1 | |
398 | cmpwi cr4,r5,1 | |
399 | mtspr SPRN_HSPRG0,r13 | |
400 | ||
401 | lbz r0,PACA_THREAD_IDLE_STATE(r13) | |
402 | cmpwi cr2,r0,PNV_THREAD_NAP | |
5fa6b6bd | 403 | bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ |
17065671 SP |
404 | |
405 | /* | |
406 | * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking | |
407 | * up from nap. At this stage CR3 shouldn't contains 'gt' since that | |
408 | * indicates we are waking with hypervisor state loss from nap. | |
409 | */ | |
410 | bgt cr3,. | |
411 | ||
412 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 413 | pnv_restore_hyp_resource was invoked */ |
17065671 | 414 | |
bcef83a0 SP |
415 | /* |
416 | * Called if waking up from idle state which can cause either partial or | |
417 | * complete hyp state loss. | |
418 | * In POWER8, called if waking up from fastsleep or winkle | |
419 | * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state | |
420 | * | |
421 | * r13 - PACA | |
422 | * cr3 - gt if waking up with partial/complete hypervisor state loss | |
423 | * cr4 - eq if waking up from complete hypervisor state loss. | |
424 | */ | |
5fa6b6bd | 425 | _GLOBAL(pnv_wakeup_tb_loss) |
97eb001f | 426 | ld r1,PACAR1(r13) |
7cba160a SP |
427 | /* |
428 | * Before entering any idle state, the NVGPRs are saved in the stack | |
429 | * and they are restored before switching to the process context. Hence | |
430 | * until they are restored, they are free to be used. | |
431 | * | |
17065671 | 432 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
69c592ed | 433 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
17065671 SP |
434 | * to determine the wakeup reason if we branch to kvm_start_guest. LR |
435 | * is required to return back to reset vector after hypervisor state | |
436 | * restore is complete. | |
7cba160a | 437 | */ |
17065671 | 438 | mflr r17 |
7cba160a | 439 | mfspr r16,SPRN_SRR1 |
bbdb760d MS |
440 | BEGIN_FTR_SECTION |
441 | CHECK_HMI_INTERRUPT | |
442 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
7cba160a SP |
443 | |
444 | lbz r7,PACA_THREAD_MASK(r13) | |
445 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | |
446 | lwarx_loop2: | |
447 | lwarx r15,0,r14 | |
448 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | |
449 | /* | |
450 | * Lock bit is set in one of the 2 cases- | |
451 | * a. In the sleep/winkle enter path, the last thread is executing | |
452 | * fastsleep workaround code. | |
453 | * b. In the wake up path, another thread is executing fastsleep | |
454 | * workaround undo code or resyncing timebase or restoring context | |
455 | * In either case loop until the lock bit is cleared. | |
456 | */ | |
b32aadc1 | 457 | bnel core_idle_lock_held |
7cba160a SP |
458 | |
459 | cmpwi cr2,r15,0 | |
77b54e9f SP |
460 | |
461 | /* | |
462 | * At this stage | |
bcef83a0 SP |
463 | * cr2 - eq if first thread to wakeup in core |
464 | * cr3- gt if waking up with partial/complete hypervisor state loss | |
465 | * cr4 - eq if waking up from complete hypervisor state loss. | |
77b54e9f SP |
466 | */ |
467 | ||
7cba160a SP |
468 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
469 | stwcx. r15,0,r14 | |
470 | bne- lwarx_loop2 | |
471 | isync | |
472 | ||
bcef83a0 SP |
473 | BEGIN_FTR_SECTION |
474 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) | |
475 | and r4,r4,r15 | |
476 | cmpwi r4,0 /* Check if first in subcore */ | |
477 | ||
478 | or r15,r15,r7 /* Set thread bit */ | |
479 | beq first_thread_in_subcore | |
480 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
481 | ||
482 | or r15,r15,r7 /* Set thread bit */ | |
483 | beq cr2,first_thread_in_core | |
484 | ||
485 | /* Not first thread in core or subcore to wake up */ | |
486 | b clear_lock | |
487 | ||
488 | first_thread_in_subcore: | |
77b54e9f SP |
489 | /* |
490 | * If waking up from sleep, subcore state is not lost. Hence | |
491 | * skip subcore state restore | |
492 | */ | |
493 | bne cr4,subcore_state_restored | |
494 | ||
495 | /* Restore per-subcore state */ | |
496 | ld r4,_SDR1(r1) | |
497 | mtspr SPRN_SDR1,r4 | |
bcef83a0 | 498 | |
77b54e9f SP |
499 | ld r4,_RPR(r1) |
500 | mtspr SPRN_RPR,r4 | |
501 | ld r4,_AMOR(r1) | |
502 | mtspr SPRN_AMOR,r4 | |
503 | ||
504 | subcore_state_restored: | |
505 | /* | |
506 | * Check if the thread is also the first thread in the core. If not, | |
507 | * skip to clear_lock. | |
508 | */ | |
509 | bne cr2,clear_lock | |
510 | ||
511 | first_thread_in_core: | |
512 | ||
7cba160a | 513 | /* |
bcef83a0 SP |
514 | * First thread in the core waking up from any state which can cause |
515 | * partial or complete hypervisor state loss. It needs to | |
7cba160a SP |
516 | * call the fastsleep workaround code if the platform requires it. |
517 | * Call it unconditionally here. The below branch instruction will | |
bcef83a0 SP |
518 | * be patched out if the platform does not have fastsleep or does not |
519 | * require the workaround. Patching will be performed during the | |
520 | * discovery of idle-states. | |
7cba160a SP |
521 | */ |
522 | .global pnv_fastsleep_workaround_at_exit | |
523 | pnv_fastsleep_workaround_at_exit: | |
524 | b fastsleep_workaround_at_exit | |
525 | ||
526 | timebase_resync: | |
bcef83a0 SP |
527 | /* |
528 | * Use cr3 which indicates that we are waking up with atleast partial | |
529 | * hypervisor state loss to determine if TIMEBASE RESYNC is needed. | |
530 | */ | |
7cba160a | 531 | ble cr3,clear_lock |
97eb001f | 532 | /* Time base re-sync */ |
69c592ed | 533 | bl opal_rm_resync_timebase; |
77b54e9f SP |
534 | /* |
535 | * If waking up from sleep, per core state is not lost, skip to | |
536 | * clear_lock. | |
537 | */ | |
538 | bne cr4,clear_lock | |
539 | ||
bcef83a0 SP |
540 | /* |
541 | * First thread in the core to wake up and its waking up with | |
542 | * complete hypervisor state loss. Restore per core hypervisor | |
543 | * state. | |
544 | */ | |
545 | BEGIN_FTR_SECTION | |
546 | ld r4,_PTCR(r1) | |
547 | mtspr SPRN_PTCR,r4 | |
548 | ld r4,_RPR(r1) | |
549 | mtspr SPRN_RPR,r4 | |
550 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
551 | ||
77b54e9f SP |
552 | ld r4,_TSCR(r1) |
553 | mtspr SPRN_TSCR,r4 | |
554 | ld r4,_WORC(r1) | |
555 | mtspr SPRN_WORC,r4 | |
556 | ||
7cba160a SP |
557 | clear_lock: |
558 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | |
559 | lwsync | |
560 | stw r15,0(r14) | |
561 | ||
562 | common_exit: | |
77b54e9f SP |
563 | /* |
564 | * Common to all threads. | |
565 | * | |
566 | * If waking up from sleep, hypervisor state is not lost. Hence | |
567 | * skip hypervisor state restore. | |
568 | */ | |
569 | bne cr4,hypervisor_state_restored | |
570 | ||
571 | /* Waking up from winkle */ | |
572 | ||
bcef83a0 SP |
573 | BEGIN_MMU_FTR_SECTION |
574 | b no_segments | |
575 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) | |
77b54e9f SP |
576 | /* Restore SLB from PACA */ |
577 | ld r8,PACA_SLBSHADOWPTR(r13) | |
578 | ||
579 | .rept SLB_NUM_BOLTED | |
580 | li r3, SLBSHADOW_SAVEAREA | |
581 | LDX_BE r5, r8, r3 | |
582 | addi r3, r3, 8 | |
583 | LDX_BE r6, r8, r3 | |
584 | andis. r7,r5,SLB_ESID_V@h | |
585 | beq 1f | |
586 | slbmte r6,r5 | |
587 | 1: addi r8,r8,16 | |
588 | .endr | |
bcef83a0 SP |
589 | no_segments: |
590 | ||
591 | /* Restore per thread state */ | |
77b54e9f SP |
592 | |
593 | ld r4,_SPURR(r1) | |
594 | mtspr SPRN_SPURR,r4 | |
595 | ld r4,_PURR(r1) | |
596 | mtspr SPRN_PURR,r4 | |
597 | ld r4,_DSCR(r1) | |
598 | mtspr SPRN_DSCR,r4 | |
599 | ld r4,_WORT(r1) | |
600 | mtspr SPRN_WORT,r4 | |
601 | ||
bcef83a0 SP |
602 | /* Call cur_cpu_spec->cpu_restore() */ |
603 | LOAD_REG_ADDR(r4, cur_cpu_spec) | |
604 | ld r4,0(r4) | |
605 | ld r12,CPU_SPEC_RESTORE(r4) | |
606 | #ifdef PPC64_ELF_ABI_v1 | |
607 | ld r12,0(r12) | |
608 | #endif | |
609 | mtctr r12 | |
610 | bctrl | |
611 | ||
77b54e9f SP |
612 | hypervisor_state_restored: |
613 | ||
7cba160a | 614 | mtspr SPRN_SRR1,r16 |
17065671 SP |
615 | mtlr r17 |
616 | blr /* Return back to System Reset vector from where | |
5fa6b6bd | 617 | pnv_restore_hyp_resource was invoked */ |
97eb001f | 618 | |
7cba160a SP |
619 | fastsleep_workaround_at_exit: |
620 | li r3,1 | |
621 | li r4,0 | |
69c592ed | 622 | bl opal_rm_config_cpu_idle_state |
7cba160a SP |
623 | b timebase_resync |
624 | ||
56548fc0 PM |
625 | /* |
626 | * R3 here contains the value that will be returned to the caller | |
627 | * of power7_nap. | |
628 | */ | |
5fa6b6bd | 629 | _GLOBAL(pnv_wakeup_loss) |
948cf67c | 630 | ld r1,PACAR1(r13) |
bbdb760d MS |
631 | BEGIN_FTR_SECTION |
632 | CHECK_HMI_INTERRUPT | |
633 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c BH |
634 | REST_NVGPRS(r1) |
635 | REST_GPR(2, r1) | |
56548fc0 | 636 | ld r6,_CCR(r1) |
948cf67c BH |
637 | ld r4,_MSR(r1) |
638 | ld r5,_NIP(r1) | |
639 | addi r1,r1,INT_FRAME_SIZE | |
56548fc0 | 640 | mtcr r6 |
948cf67c BH |
641 | mtspr SPRN_SRR1,r4 |
642 | mtspr SPRN_SRR0,r5 | |
643 | rfid | |
644 | ||
56548fc0 PM |
645 | /* |
646 | * R3 here contains the value that will be returned to the caller | |
647 | * of power7_nap. | |
648 | */ | |
5fa6b6bd | 649 | _GLOBAL(pnv_wakeup_noloss) |
2fde6d20 PM |
650 | lbz r0,PACA_NAPSTATELOST(r13) |
651 | cmpwi r0,0 | |
5fa6b6bd | 652 | bne pnv_wakeup_loss |
bbdb760d MS |
653 | BEGIN_FTR_SECTION |
654 | CHECK_HMI_INTERRUPT | |
655 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
948cf67c | 656 | ld r1,PACAR1(r13) |
0aab3747 | 657 | ld r6,_CCR(r1) |
948cf67c BH |
658 | ld r4,_MSR(r1) |
659 | ld r5,_NIP(r1) | |
660 | addi r1,r1,INT_FRAME_SIZE | |
0aab3747 | 661 | mtcr r6 |
948cf67c BH |
662 | mtspr SPRN_SRR1,r4 |
663 | mtspr SPRN_SRR0,r5 | |
664 | rfid |