diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 35 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power7.S | 344 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 9 |
4 files changed, 343 insertions, 56 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 24d78e1871c9..e624f9646350 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -726,5 +726,16 @@ int main(void) | |||
726 | arch.timing_last_enter.tv32.tbl)); | 726 | arch.timing_last_enter.tv32.tbl)); |
727 | #endif | 727 | #endif |
728 | 728 | ||
729 | #ifdef CONFIG_PPC_POWERNV | ||
730 | DEFINE(PACA_CORE_IDLE_STATE_PTR, | ||
731 | offsetof(struct paca_struct, core_idle_state_ptr)); | ||
732 | DEFINE(PACA_THREAD_IDLE_STATE, | ||
733 | offsetof(struct paca_struct, thread_idle_state)); | ||
734 | DEFINE(PACA_THREAD_MASK, | ||
735 | offsetof(struct paca_struct, thread_mask)); | ||
736 | DEFINE(PACA_SUBCORE_SIBLING_MASK, | ||
737 | offsetof(struct paca_struct, subcore_sibling_mask)); | ||
738 | #endif | ||
739 | |||
729 | return 0; | 740 | return 0; |
730 | } | 741 | } |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index db08382e19f1..c2df8150bd7a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
16 | #include <asm/exception-64s.h> | 16 | #include <asm/exception-64s.h> |
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <asm/cpuidle.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * We layout physical memory as follows: | 21 | * We layout physical memory as follows: |
@@ -101,23 +102,34 @@ system_reset_pSeries: | |||
101 | #ifdef CONFIG_PPC_P7_NAP | 102 | #ifdef CONFIG_PPC_P7_NAP |
102 | BEGIN_FTR_SECTION | 103 | BEGIN_FTR_SECTION |
103 | /* Running native on arch 2.06 or later, check if we are | 104 | /* Running native on arch 2.06 or later, check if we are |
104 | * waking up from nap. We only handle no state loss and | 105 | * waking up from nap/sleep/winkle. |
105 | * supervisor state loss. We do -not- handle hypervisor | ||
106 | * state loss at this time. | ||
107 | */ | 106 | */ |
108 | mfspr r13,SPRN_SRR1 | 107 | mfspr r13,SPRN_SRR1 |
109 | rlwinm. r13,r13,47-31,30,31 | 108 | rlwinm. r13,r13,47-31,30,31 |
110 | beq 9f | 109 | beq 9f |
111 | 110 | ||
112 | /* waking up from powersave (nap) state */ | 111 | cmpwi cr3,r13,2 |
113 | cmpwi cr1,r13,2 | 112 | |
114 | /* Total loss of HV state is fatal, we could try to use the | 113 | /* |
115 | * PIR to locate a PACA, then use an emergency stack etc... | 114 | * Check if last bit of HSPGR0 is set. This indicates whether we are |
116 | * OPAL v3 based powernv platforms have new idle states | 115 | * waking up from winkle. |
117 | * which fall in this catagory. | ||
118 | */ | 116 | */ |
119 | bgt cr1,8f | ||
120 | GET_PACA(r13) | 117 | GET_PACA(r13) |
118 | clrldi r5,r13,63 | ||
119 | clrrdi r13,r13,1 | ||
120 | cmpwi cr4,r5,1 | ||
121 | mtspr SPRN_HSPRG0,r13 | ||
122 | |||
123 | lbz r0,PACA_THREAD_IDLE_STATE(r13) | ||
124 | cmpwi cr2,r0,PNV_THREAD_NAP | ||
125 | bgt cr2,8f /* Either sleep or Winkle */ | ||
126 | |||
127 | /* Waking up from nap should not cause hypervisor state loss */ | ||
128 | bgt cr3,. | ||
129 | |||
130 | /* Waking up from nap */ | ||
131 | li r0,PNV_THREAD_RUNNING | ||
132 | stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ | ||
121 | 133 | ||
122 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 134 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
123 | li r0,KVM_HWTHREAD_IN_KERNEL | 135 | li r0,KVM_HWTHREAD_IN_KERNEL |
@@ -133,7 +145,7 @@ BEGIN_FTR_SECTION | |||
133 | 145 | ||
134 | /* Return SRR1 from power7_nap() */ | 146 | /* Return SRR1 from power7_nap() */ |
135 | mfspr r3,SPRN_SRR1 | 147 | mfspr r3,SPRN_SRR1 |
136 | beq cr1,2f | 148 | beq cr3,2f |
137 | b power7_wakeup_noloss | 149 | b power7_wakeup_noloss |
138 | 2: b power7_wakeup_loss | 150 | 2: b power7_wakeup_loss |
139 | 151 | ||
@@ -1382,6 +1394,7 @@ machine_check_handle_early: | |||
1382 | MACHINE_CHECK_HANDLER_WINDUP | 1394 | MACHINE_CHECK_HANDLER_WINDUP |
1383 | GET_PACA(r13) | 1395 | GET_PACA(r13) |
1384 | ld r1,PACAR1(r13) | 1396 | ld r1,PACAR1(r13) |
1397 | li r3,PNV_THREAD_NAP | ||
1385 | b power7_enter_nap_mode | 1398 | b power7_enter_nap_mode |
1386 | 4: | 1399 | 4: |
1387 | #endif | 1400 | #endif |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 18c0687e5ab3..05adc8bbdef8 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -18,9 +18,25 @@ | |||
18 | #include <asm/hw_irq.h> | 18 | #include <asm/hw_irq.h> |
19 | #include <asm/kvm_book3s_asm.h> | 19 | #include <asm/kvm_book3s_asm.h> |
20 | #include <asm/opal.h> | 20 | #include <asm/opal.h> |
21 | #include <asm/cpuidle.h> | ||
22 | #include <asm/mmu-hash64.h> | ||
21 | 23 | ||
22 | #undef DEBUG | 24 | #undef DEBUG |
23 | 25 | ||
26 | /* | ||
27 | * Use unused space in the interrupt stack to save and restore | ||
28 | * registers for winkle support. | ||
29 | */ | ||
30 | #define _SDR1 GPR3 | ||
31 | #define _RPR GPR4 | ||
32 | #define _SPURR GPR5 | ||
33 | #define _PURR GPR6 | ||
34 | #define _TSCR GPR7 | ||
35 | #define _DSCR GPR8 | ||
36 | #define _AMOR GPR9 | ||
37 | #define _WORT GPR10 | ||
38 | #define _WORC GPR11 | ||
39 | |||
24 | /* Idle state entry routines */ | 40 | /* Idle state entry routines */ |
25 | 41 | ||
26 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ | 42 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ |
@@ -37,8 +53,7 @@ | |||
37 | 53 | ||
38 | /* | 54 | /* |
39 | * Pass requested state in r3: | 55 | * Pass requested state in r3: |
40 | * 0 - nap | 56 | * r3 - PNV_THREAD_NAP/SLEEP/WINKLE |
41 | * 1 - sleep | ||
42 | * | 57 | * |
43 | * To check IRQ_HAPPENED in r4 | 58 | * To check IRQ_HAPPENED in r4 |
44 | * 0 - don't check | 59 | * 0 - don't check |
@@ -101,18 +116,105 @@ _GLOBAL(power7_powersave_common) | |||
101 | std r9,_MSR(r1) | 116 | std r9,_MSR(r1) |
102 | std r1,PACAR1(r13) | 117 | std r1,PACAR1(r13) |
103 | 118 | ||
104 | _GLOBAL(power7_enter_nap_mode) | 119 | /* |
120 | * Go to real mode to do the nap, as required by the architecture. | ||
121 | * Also, we need to be in real mode before setting hwthread_state, | ||
122 | * because as soon as we do that, another thread can switch | ||
123 | * the MMU context to the guest. | ||
124 | */ | ||
125 | LOAD_REG_IMMEDIATE(r5, MSR_IDLE) | ||
126 | li r6, MSR_RI | ||
127 | andc r6, r9, r6 | ||
128 | LOAD_REG_ADDR(r7, power7_enter_nap_mode) | ||
129 | mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ | ||
130 | mtspr SPRN_SRR0, r7 | ||
131 | mtspr SPRN_SRR1, r5 | ||
132 | rfid | ||
133 | |||
134 | .globl power7_enter_nap_mode | ||
135 | power7_enter_nap_mode: | ||
105 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | 136 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
106 | /* Tell KVM we're napping */ | 137 | /* Tell KVM we're napping */ |
107 | li r4,KVM_HWTHREAD_IN_NAP | 138 | li r4,KVM_HWTHREAD_IN_NAP |
108 | stb r4,HSTATE_HWTHREAD_STATE(r13) | 139 | stb r4,HSTATE_HWTHREAD_STATE(r13) |
109 | #endif | 140 | #endif |
110 | cmpwi cr0,r3,1 | 141 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
111 | beq 2f | 142 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
143 | bge cr3,2f | ||
112 | IDLE_STATE_ENTER_SEQ(PPC_NAP) | 144 | IDLE_STATE_ENTER_SEQ(PPC_NAP) |
113 | /* No return */ | 145 | /* No return */ |
114 | 2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP) | 146 | 2: |
115 | /* No return */ | 147 | /* Sleep or winkle */ |
148 | lbz r7,PACA_THREAD_MASK(r13) | ||
149 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | ||
150 | lwarx_loop1: | ||
151 | lwarx r15,0,r14 | ||
152 | andc r15,r15,r7 /* Clear thread bit */ | ||
153 | |||
154 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | ||
155 | |||
156 | /* | ||
157 | * If cr0 = 0, then current thread is the last thread of the core entering | ||
158 | * sleep. Last thread needs to execute the hardware bug workaround code if | ||
159 | * required by the platform. | ||
160 | * Make the workaround call unconditionally here. The below branch call is | ||
161 | * patched out when the idle states are discovered if the platform does not | ||
162 | * require it. | ||
163 | */ | ||
164 | .global pnv_fastsleep_workaround_at_entry | ||
165 | pnv_fastsleep_workaround_at_entry: | ||
166 | beq fastsleep_workaround_at_entry | ||
167 | |||
168 | stwcx. r15,0,r14 | ||
169 | bne- lwarx_loop1 | ||
170 | isync | ||
171 | |||
172 | common_enter: /* common code for all the threads entering sleep or winkle */ | ||
173 | bgt cr3,enter_winkle | ||
174 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) | ||
175 | |||
176 | fastsleep_workaround_at_entry: | ||
177 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | ||
178 | stwcx. r15,0,r14 | ||
179 | bne- lwarx_loop1 | ||
180 | isync | ||
181 | |||
182 | /* Fast sleep workaround */ | ||
183 | li r3,1 | ||
184 | li r4,1 | ||
185 | li r0,OPAL_CONFIG_CPU_IDLE_STATE | ||
186 | bl opal_call_realmode | ||
187 | |||
188 | /* Clear Lock bit */ | ||
189 | li r0,0 | ||
190 | lwsync | ||
191 | stw r0,0(r14) | ||
192 | b common_enter | ||
193 | |||
194 | enter_winkle: | ||
195 | /* | ||
196 | * Note all register i.e per-core, per-subcore or per-thread is saved | ||
197 | * here since any thread in the core might wake up first | ||
198 | */ | ||
199 | mfspr r3,SPRN_SDR1 | ||
200 | std r3,_SDR1(r1) | ||
201 | mfspr r3,SPRN_RPR | ||
202 | std r3,_RPR(r1) | ||
203 | mfspr r3,SPRN_SPURR | ||
204 | std r3,_SPURR(r1) | ||
205 | mfspr r3,SPRN_PURR | ||
206 | std r3,_PURR(r1) | ||
207 | mfspr r3,SPRN_TSCR | ||
208 | std r3,_TSCR(r1) | ||
209 | mfspr r3,SPRN_DSCR | ||
210 | std r3,_DSCR(r1) | ||
211 | mfspr r3,SPRN_AMOR | ||
212 | std r3,_AMOR(r1) | ||
213 | mfspr r3,SPRN_WORT | ||
214 | std r3,_WORT(r1) | ||
215 | mfspr r3,SPRN_WORC | ||
216 | std r3,_WORC(r1) | ||
217 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) | ||
116 | 218 | ||
117 | _GLOBAL(power7_idle) | 219 | _GLOBAL(power7_idle) |
118 | /* Now check if user or arch enabled NAP mode */ | 220 | /* Now check if user or arch enabled NAP mode */ |
@@ -125,48 +227,21 @@ _GLOBAL(power7_idle) | |||
125 | 227 | ||
126 | _GLOBAL(power7_nap) | 228 | _GLOBAL(power7_nap) |
127 | mr r4,r3 | 229 | mr r4,r3 |
128 | li r3,0 | 230 | li r3,PNV_THREAD_NAP |
129 | b power7_powersave_common | 231 | b power7_powersave_common |
130 | /* No return */ | 232 | /* No return */ |
131 | 233 | ||
132 | _GLOBAL(power7_sleep) | 234 | _GLOBAL(power7_sleep) |
133 | li r3,1 | 235 | li r3,PNV_THREAD_SLEEP |
134 | li r4,1 | 236 | li r4,1 |
135 | b power7_powersave_common | 237 | b power7_powersave_common |
136 | /* No return */ | 238 | /* No return */ |
137 | 239 | ||
138 | /* | 240 | _GLOBAL(power7_winkle) |
139 | * Make opal call in realmode. This is a generic function to be called | 241 | li r3,3 |
140 | * from realmode from reset vector. It handles endianess. | 242 | li r4,1 |
141 | * | 243 | b power7_powersave_common |
142 | * r13 - paca pointer | 244 | /* No return */ |
143 | * r1 - stack pointer | ||
144 | * r3 - opal token | ||
145 | */ | ||
146 | opal_call_realmode: | ||
147 | mflr r12 | ||
148 | std r12,_LINK(r1) | ||
149 | ld r2,PACATOC(r13) | ||
150 | /* Set opal return address */ | ||
151 | LOAD_REG_ADDR(r0,return_from_opal_call) | ||
152 | mtlr r0 | ||
153 | /* Handle endian-ness */ | ||
154 | li r0,MSR_LE | ||
155 | mfmsr r12 | ||
156 | andc r12,r12,r0 | ||
157 | mtspr SPRN_HSRR1,r12 | ||
158 | mr r0,r3 /* Move opal token to r0 */ | ||
159 | LOAD_REG_ADDR(r11,opal) | ||
160 | ld r12,8(r11) | ||
161 | ld r2,0(r11) | ||
162 | mtspr SPRN_HSRR0,r12 | ||
163 | hrfid | ||
164 | |||
165 | return_from_opal_call: | ||
166 | FIXUP_ENDIAN | ||
167 | ld r0,_LINK(r1) | ||
168 | mtlr r0 | ||
169 | blr | ||
170 | 245 | ||
171 | #define CHECK_HMI_INTERRUPT \ | 246 | #define CHECK_HMI_INTERRUPT \ |
172 | mfspr r0,SPRN_SRR1; \ | 247 | mfspr r0,SPRN_SRR1; \ |
@@ -181,7 +256,7 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |||
181 | ld r2,PACATOC(r13); \ | 256 | ld r2,PACATOC(r13); \ |
182 | ld r1,PACAR1(r13); \ | 257 | ld r1,PACAR1(r13); \ |
183 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ | 258 | std r3,ORIG_GPR3(r1); /* Save original r3 */ \ |
184 | li r3,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \ | 259 | li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \ |
185 | bl opal_call_realmode; \ | 260 | bl opal_call_realmode; \ |
186 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ | 261 | ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
187 | 20: nop; | 262 | 20: nop; |
@@ -190,16 +265,190 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ | |||
190 | _GLOBAL(power7_wakeup_tb_loss) | 265 | _GLOBAL(power7_wakeup_tb_loss) |
191 | ld r2,PACATOC(r13); | 266 | ld r2,PACATOC(r13); |
192 | ld r1,PACAR1(r13) | 267 | ld r1,PACAR1(r13) |
268 | /* | ||
269 | * Before entering any idle state, the NVGPRs are saved in the stack | ||
270 | * and they are restored before switching to the process context. Hence | ||
271 | * until they are restored, they are free to be used. | ||
272 | * | ||
273 | * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode | ||
274 | * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the | ||
275 | * wakeup reason if we branch to kvm_start_guest. | ||
276 | */ | ||
193 | 277 | ||
278 | mfspr r16,SPRN_SRR1 | ||
194 | BEGIN_FTR_SECTION | 279 | BEGIN_FTR_SECTION |
195 | CHECK_HMI_INTERRUPT | 280 | CHECK_HMI_INTERRUPT |
196 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | 281 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
282 | |||
283 | lbz r7,PACA_THREAD_MASK(r13) | ||
284 | ld r14,PACA_CORE_IDLE_STATE_PTR(r13) | ||
285 | lwarx_loop2: | ||
286 | lwarx r15,0,r14 | ||
287 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | ||
288 | /* | ||
289 | * Lock bit is set in one of the 2 cases- | ||
290 | * a. In the sleep/winkle enter path, the last thread is executing | ||
291 | * fastsleep workaround code. | ||
292 | * b. In the wake up path, another thread is executing fastsleep | ||
293 | * workaround undo code or resyncing timebase or restoring context | ||
294 | * In either case loop until the lock bit is cleared. | ||
295 | */ | ||
296 | bne core_idle_lock_held | ||
297 | |||
298 | cmpwi cr2,r15,0 | ||
299 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) | ||
300 | and r4,r4,r15 | ||
301 | cmpwi cr1,r4,0 /* Check if first in subcore */ | ||
302 | |||
303 | /* | ||
304 | * At this stage | ||
305 | * cr1 - 0b0100 if first thread to wakeup in subcore | ||
306 | * cr2 - 0b0100 if first thread to wakeup in core | ||
307 | * cr3- 0b0010 if waking up from sleep or winkle | ||
308 | * cr4 - 0b0100 if waking up from winkle | ||
309 | */ | ||
310 | |||
311 | or r15,r15,r7 /* Set thread bit */ | ||
312 | |||
313 | beq cr1,first_thread_in_subcore | ||
314 | |||
315 | /* Not first thread in subcore to wake up */ | ||
316 | stwcx. r15,0,r14 | ||
317 | bne- lwarx_loop2 | ||
318 | isync | ||
319 | b common_exit | ||
320 | |||
321 | core_idle_lock_held: | ||
322 | HMT_LOW | ||
323 | core_idle_lock_loop: | ||
324 | lwz r15,0(14) | ||
325 | andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT | ||
326 | bne core_idle_lock_loop | ||
327 | HMT_MEDIUM | ||
328 | b lwarx_loop2 | ||
329 | |||
330 | first_thread_in_subcore: | ||
331 | /* First thread in subcore to wakeup */ | ||
332 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | ||
333 | stwcx. r15,0,r14 | ||
334 | bne- lwarx_loop2 | ||
335 | isync | ||
336 | |||
337 | /* | ||
338 | * If waking up from sleep, subcore state is not lost. Hence | ||
339 | * skip subcore state restore | ||
340 | */ | ||
341 | bne cr4,subcore_state_restored | ||
342 | |||
343 | /* Restore per-subcore state */ | ||
344 | ld r4,_SDR1(r1) | ||
345 | mtspr SPRN_SDR1,r4 | ||
346 | ld r4,_RPR(r1) | ||
347 | mtspr SPRN_RPR,r4 | ||
348 | ld r4,_AMOR(r1) | ||
349 | mtspr SPRN_AMOR,r4 | ||
350 | |||
351 | subcore_state_restored: | ||
352 | /* | ||
353 | * Check if the thread is also the first thread in the core. If not, | ||
354 | * skip to clear_lock. | ||
355 | */ | ||
356 | bne cr2,clear_lock | ||
357 | |||
358 | first_thread_in_core: | ||
359 | |||
360 | /* | ||
361 | * First thread in the core waking up from fastsleep. It needs to | ||
362 | * call the fastsleep workaround code if the platform requires it. | ||
363 | * Call it unconditionally here. The below branch instruction will | ||
364 | * be patched out when the idle states are discovered if platform | ||
365 | * does not require workaround. | ||
366 | */ | ||
367 | .global pnv_fastsleep_workaround_at_exit | ||
368 | pnv_fastsleep_workaround_at_exit: | ||
369 | b fastsleep_workaround_at_exit | ||
370 | |||
371 | timebase_resync: | ||
372 | /* Do timebase resync if we are waking up from sleep. Use cr3 value | ||
373 | * set in exceptions-64s.S */ | ||
374 | ble cr3,clear_lock | ||
197 | /* Time base re-sync */ | 375 | /* Time base re-sync */ |
198 | li r3,OPAL_RESYNC_TIMEBASE | 376 | li r0,OPAL_RESYNC_TIMEBASE |
199 | bl opal_call_realmode; | 377 | bl opal_call_realmode; |
200 | |||
201 | /* TODO: Check r3 for failure */ | 378 | /* TODO: Check r3 for failure */ |
202 | 379 | ||
380 | /* | ||
381 | * If waking up from sleep, per core state is not lost, skip to | ||
382 | * clear_lock. | ||
383 | */ | ||
384 | bne cr4,clear_lock | ||
385 | |||
386 | /* Restore per core state */ | ||
387 | ld r4,_TSCR(r1) | ||
388 | mtspr SPRN_TSCR,r4 | ||
389 | ld r4,_WORC(r1) | ||
390 | mtspr SPRN_WORC,r4 | ||
391 | |||
392 | clear_lock: | ||
393 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | ||
394 | lwsync | ||
395 | stw r15,0(r14) | ||
396 | |||
397 | common_exit: | ||
398 | /* | ||
399 | * Common to all threads. | ||
400 | * | ||
401 | * If waking up from sleep, hypervisor state is not lost. Hence | ||
402 | * skip hypervisor state restore. | ||
403 | */ | ||
404 | bne cr4,hypervisor_state_restored | ||
405 | |||
406 | /* Waking up from winkle */ | ||
407 | |||
408 | /* Restore per thread state */ | ||
409 | bl __restore_cpu_power8 | ||
410 | |||
411 | /* Restore SLB from PACA */ | ||
412 | ld r8,PACA_SLBSHADOWPTR(r13) | ||
413 | |||
414 | .rept SLB_NUM_BOLTED | ||
415 | li r3, SLBSHADOW_SAVEAREA | ||
416 | LDX_BE r5, r8, r3 | ||
417 | addi r3, r3, 8 | ||
418 | LDX_BE r6, r8, r3 | ||
419 | andis. r7,r5,SLB_ESID_V@h | ||
420 | beq 1f | ||
421 | slbmte r6,r5 | ||
422 | 1: addi r8,r8,16 | ||
423 | .endr | ||
424 | |||
425 | ld r4,_SPURR(r1) | ||
426 | mtspr SPRN_SPURR,r4 | ||
427 | ld r4,_PURR(r1) | ||
428 | mtspr SPRN_PURR,r4 | ||
429 | ld r4,_DSCR(r1) | ||
430 | mtspr SPRN_DSCR,r4 | ||
431 | ld r4,_WORT(r1) | ||
432 | mtspr SPRN_WORT,r4 | ||
433 | |||
434 | hypervisor_state_restored: | ||
435 | |||
436 | li r5,PNV_THREAD_RUNNING | ||
437 | stb r5,PACA_THREAD_IDLE_STATE(r13) | ||
438 | |||
439 | mtspr SPRN_SRR1,r16 | ||
440 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | ||
441 | li r0,KVM_HWTHREAD_IN_KERNEL | ||
442 | stb r0,HSTATE_HWTHREAD_STATE(r13) | ||
443 | /* Order setting hwthread_state vs. testing hwthread_req */ | ||
444 | sync | ||
445 | lbz r0,HSTATE_HWTHREAD_REQ(r13) | ||
446 | cmpwi r0,0 | ||
447 | beq 6f | ||
448 | b kvm_start_guest | ||
449 | 6: | ||
450 | #endif | ||
451 | |||
203 | REST_NVGPRS(r1) | 452 | REST_NVGPRS(r1) |
204 | REST_GPR(2, r1) | 453 | REST_GPR(2, r1) |
205 | ld r3,_CCR(r1) | 454 | ld r3,_CCR(r1) |
@@ -212,6 +461,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
212 | mtspr SPRN_SRR0,r5 | 461 | mtspr SPRN_SRR0,r5 |
213 | rfid | 462 | rfid |
214 | 463 | ||
464 | fastsleep_workaround_at_exit: | ||
465 | li r3,1 | ||
466 | li r4,0 | ||
467 | li r0,OPAL_CONFIG_CPU_IDLE_STATE | ||
468 | bl opal_call_realmode | ||
469 | b timebase_resync | ||
470 | |||
215 | /* | 471 | /* |
216 | * R3 here contains the value that will be returned to the caller | 472 | * R3 here contains the value that will be returned to the caller |
217 | * of power7_nap. | 473 | * of power7_nap. |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8b2d2dc8ef10..8ec017cb4446 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -700,7 +700,6 @@ void start_secondary(void *unused) | |||
700 | smp_store_cpu_info(cpu); | 700 | smp_store_cpu_info(cpu); |
701 | set_dec(tb_ticks_per_jiffy); | 701 | set_dec(tb_ticks_per_jiffy); |
702 | preempt_disable(); | 702 | preempt_disable(); |
703 | cpu_callin_map[cpu] = 1; | ||
704 | 703 | ||
705 | if (smp_ops->setup_cpu) | 704 | if (smp_ops->setup_cpu) |
706 | smp_ops->setup_cpu(cpu); | 705 | smp_ops->setup_cpu(cpu); |
@@ -739,6 +738,14 @@ void start_secondary(void *unused) | |||
739 | notify_cpu_starting(cpu); | 738 | notify_cpu_starting(cpu); |
740 | set_cpu_online(cpu, true); | 739 | set_cpu_online(cpu, true); |
741 | 740 | ||
741 | /* | ||
742 | * CPU must be marked active and online before we signal back to the | ||
743 | * master, because the scheduler needs to see the cpu_online and | ||
744 | * cpu_active bits set. | ||
745 | */ | ||
746 | smp_wmb(); | ||
747 | cpu_callin_map[cpu] = 1; | ||
748 | |||
742 | local_irq_enable(); | 749 | local_irq_enable(); |
743 | 750 | ||
744 | cpu_startup_entry(CPUHP_ONLINE); | 751 | cpu_startup_entry(CPUHP_ONLINE); |