aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShreyas B. Prabhu <shreyas@linux.vnet.ibm.com>2016-07-08 02:20:49 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-07-15 06:18:41 -0400
commitbcef83a00dc44ee25ff4d6e078cf6432ddf74dec (patch)
tree80976e43a34d585460eb806ba5eb7ed6f2b2f465
parent0dfffb48cecd8f84c6e649baee9bacd9be925734 (diff)
powerpc/powernv: Add platform support for stop instruction
POWER ISA v3 defines a new idle processor core mechanism. In summary, a) new instruction named stop is added. This instruction replaces instructions like nap, sleep, rvwinkle. b) new per thread SPR named Processor Stop Status and Control Register (PSSCR) is added which controls the behavior of stop instruction. PSSCR layout: ---------------------------------------------------------- | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | ---------------------------------------------------------- 0 4 41 42 43 44 48 54 56 60 PSSCR key fields: Bits 0:3 - Power-Saving Level Status. This field indicates the lowest power-saving state the thread entered since stop instruction was last executed. Bit 42 - Enable State Loss 0 - No state is lost irrespective of other fields 1 - Allows state loss Bits 44:47 - Power-Saving Level Limit This limits the power-saving level that can be entered into. Bits 60:63 - Requested Level Used to specify which power-saving level must be entered on executing stop instruction This patch adds support for stop instruction and PSSCR handling. Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com> Signed-off-by: Shreyas B. Prabhu <shreyas@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/opal-api.h11
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/reg.h10
-rw-r--r--arch/powerpc/kernel/idle_book3s.S193
-rw-r--r--arch/powerpc/platforms/powernv/idle.c174
8 files changed, 332 insertions, 66 deletions
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index d2f99ca1e3a6..3d7fc06532a1 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -13,6 +13,8 @@
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14extern u32 pnv_fastsleep_workaround_at_entry[]; 14extern u32 pnv_fastsleep_workaround_at_entry[];
15extern u32 pnv_fastsleep_workaround_at_exit[]; 15extern u32 pnv_fastsleep_workaround_at_exit[];
16
17extern u64 pnv_first_deep_stop_state;
16#endif 18#endif
17 19
18#endif 20#endif
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 72b6225aca73..d318d432caa9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -162,7 +162,7 @@ struct kvmppc_book3s_shadow_vcpu {
162 162
163/* Values for kvm_state */ 163/* Values for kvm_state */
164#define KVM_HWTHREAD_IN_KERNEL 0 164#define KVM_HWTHREAD_IN_KERNEL 0
165#define KVM_HWTHREAD_IN_NAP 1 165#define KVM_HWTHREAD_IN_IDLE 1
166#define KVM_HWTHREAD_IN_KVM 2 166#define KVM_HWTHREAD_IN_KVM 2
167 167
168#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ 168#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 72b5f27cd0b8..6de1e4e272f9 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -166,13 +166,20 @@
166 166
167/* Device tree flags */ 167/* Device tree flags */
168 168
169/* Flags set in power-mgmt nodes in device tree if 169/*
170 * respective idle states are supported in the platform. 170 * Flags set in power-mgmt nodes in device tree describing
171 * idle states that are supported in the platform.
171 */ 172 */
173
174#define OPAL_PM_TIMEBASE_STOP 0x00000002
175#define OPAL_PM_LOSE_HYP_CONTEXT 0x00002000
176#define OPAL_PM_LOSE_FULL_CONTEXT 0x00004000
172#define OPAL_PM_NAP_ENABLED 0x00010000 177#define OPAL_PM_NAP_ENABLED 0x00010000
173#define OPAL_PM_SLEEP_ENABLED 0x00020000 178#define OPAL_PM_SLEEP_ENABLED 0x00020000
174#define OPAL_PM_WINKLE_ENABLED 0x00040000 179#define OPAL_PM_WINKLE_ENABLED 0x00040000
175#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */ 180#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
181#define OPAL_PM_STOP_INST_FAST 0x00100000
182#define OPAL_PM_STOP_INST_DEEP 0x00200000
176 183
177/* 184/*
178 * OPAL_CONFIG_CPU_IDLE_STATE parameters 185 * OPAL_CONFIG_CPU_IDLE_STATE parameters
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 9de9df14a8d9..81657a1e03fe 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -205,6 +205,8 @@
205#define PPC_INST_SLEEP 0x4c0003a4 205#define PPC_INST_SLEEP 0x4c0003a4
206#define PPC_INST_WINKLE 0x4c0003e4 206#define PPC_INST_WINKLE 0x4c0003e4
207 207
208#define PPC_INST_STOP 0x4c0002e4
209
208/* A2 specific instructions */ 210/* A2 specific instructions */
209#define PPC_INST_ERATWE 0x7c0001a6 211#define PPC_INST_ERATWE 0x7c0001a6
210#define PPC_INST_ERATRE 0x7c000166 212#define PPC_INST_ERATRE 0x7c000166
@@ -394,6 +396,8 @@
394#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) 396#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
395#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE) 397#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
396 398
399#define PPC_STOP stringify_in_c(.long PPC_INST_STOP)
400
397/* BHRB instructions */ 401/* BHRB instructions */
398#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB) 402#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
399#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \ 403#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index b5925d5d4985..68e3bf57b027 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -460,6 +460,8 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */
460extern unsigned long power7_nap(int check_irq); 460extern unsigned long power7_nap(int check_irq);
461extern unsigned long power7_sleep(void); 461extern unsigned long power7_sleep(void);
462extern unsigned long power7_winkle(void); 462extern unsigned long power7_winkle(void);
463extern unsigned long power9_idle_stop(unsigned long stop_level);
464
463extern void flush_instruction_cache(void); 465extern void flush_instruction_cache(void);
464extern void hard_reset_now(void); 466extern void hard_reset_now(void);
465extern void poweroff_now(void); 467extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ac4be83f8fdc..c0263a2d1008 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -145,6 +145,15 @@
145#define MSR_64BIT 0 145#define MSR_64BIT 0
146#endif 146#endif
147 147
148/* Power Management - Processor Stop Status and Control Register Fields */
149#define PSSCR_RL_MASK 0x0000000F /* Requested Level */
150#define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */
151#define PSSCR_TR_MASK 0x00000300 /* Transition State */
152#define PSSCR_PSLL_MASK 0x000F0000 /* Power-Saving Level Limit */
153#define PSSCR_EC 0x00100000 /* Exit Criterion */
154#define PSSCR_ESL 0x00200000 /* Enable State Loss */
155#define PSSCR_SD 0x00400000 /* Status Disable */
156
148/* Floating Point Status and Control Register (FPSCR) Fields */ 157/* Floating Point Status and Control Register (FPSCR) Fields */
149#define FPSCR_FX 0x80000000 /* FPU exception summary */ 158#define FPSCR_FX 0x80000000 /* FPU exception summary */
150#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ 159#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -291,6 +300,7 @@
291#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */ 300#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */
292#define SPRN_PMSR 0x355 /* Power Management Status Reg */ 301#define SPRN_PMSR 0x355 /* Power Management Status Reg */
293#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */ 302#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
303#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
294#define SPRN_PMCR 0x374 /* Power Management Control Register */ 304#define SPRN_PMCR 0x374 /* Power Management Control Register */
295 305
296/* HFSCR and FSCR bit numbers are the same */ 306/* HFSCR and FSCR bit numbers are the same */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 2f909a12c76c..1f564eb409c3 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -1,6 +1,6 @@
1/* 1/*
2 * This file contains idle entry/exit functions for POWER7 and 2 * This file contains idle entry/exit functions for POWER7,
3 * POWER8 CPUs. 3 * POWER8 and POWER9 CPUs.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 6 * modify it under the terms of the GNU General Public License
@@ -21,6 +21,7 @@
21#include <asm/opal.h> 21#include <asm/opal.h>
22#include <asm/cpuidle.h> 22#include <asm/cpuidle.h>
23#include <asm/book3s/64/mmu-hash.h> 23#include <asm/book3s/64/mmu-hash.h>
24#include <asm/mmu.h>
24 25
25#undef DEBUG 26#undef DEBUG
26 27
@@ -37,6 +38,11 @@
37#define _AMOR GPR9 38#define _AMOR GPR9
38#define _WORT GPR10 39#define _WORT GPR10
39#define _WORC GPR11 40#define _WORC GPR11
41#define _PTCR GPR12
42
43#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \
44 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
45 PSSCR_MTL_MASK
40 46
41/* Idle state entry routines */ 47/* Idle state entry routines */
42 48
@@ -61,8 +67,17 @@ save_sprs_to_stack:
61 * Note all register i.e per-core, per-subcore or per-thread is saved 67 * Note all register i.e per-core, per-subcore or per-thread is saved
62 * here since any thread in the core might wake up first 68 * here since any thread in the core might wake up first
63 */ 69 */
70BEGIN_FTR_SECTION
71 mfspr r3,SPRN_PTCR
72 std r3,_PTCR(r1)
73 /*
74 * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
75 * SDR1 here
76 */
77FTR_SECTION_ELSE
64 mfspr r3,SPRN_SDR1 78 mfspr r3,SPRN_SDR1
65 std r3,_SDR1(r1) 79 std r3,_SDR1(r1)
80ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
66 mfspr r3,SPRN_RPR 81 mfspr r3,SPRN_RPR
67 std r3,_RPR(r1) 82 std r3,_RPR(r1)
68 mfspr r3,SPRN_SPURR 83 mfspr r3,SPRN_SPURR
@@ -100,7 +115,8 @@ core_idle_lock_held:
100 115
101/* 116/*
102 * Pass requested state in r3: 117 * Pass requested state in r3:
103 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE 118 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
119 * - Requested STOP state in POWER9
104 * 120 *
105 * To check IRQ_HAPPENED in r4 121 * To check IRQ_HAPPENED in r4
106 * 0 - don't check 122 * 0 - don't check
@@ -161,7 +177,7 @@ _GLOBAL(pnv_powersave_common)
161 177
162#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 178#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
163 /* Tell KVM we're entering idle */ 179 /* Tell KVM we're entering idle */
164 li r4,KVM_HWTHREAD_IN_NAP 180 li r4,KVM_HWTHREAD_IN_IDLE
165 stb r4,HSTATE_HWTHREAD_STATE(r13) 181 stb r4,HSTATE_HWTHREAD_STATE(r13)
166#endif 182#endif
167 183
@@ -243,6 +259,41 @@ enter_winkle:
243 259
244 IDLE_STATE_ENTER_SEQ(PPC_WINKLE) 260 IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
245 261
262/*
263 * r3 - requested stop state
264 */
265power_enter_stop:
266/*
267 * Check if the requested state is a deep idle state.
268 */
269 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
270 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
271 cmpd r3,r4
272 bge 2f
273 IDLE_STATE_ENTER_SEQ(PPC_STOP)
2742:
275/*
276 * Entering deep idle state.
277 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
278 * stack and enter stop
279 */
280 lbz r7,PACA_THREAD_MASK(r13)
281 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
282
283lwarx_loop_stop:
284 lwarx r15,0,r14
285 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
286 bnel core_idle_lock_held
287 andc r15,r15,r7 /* Clear thread bit */
288
289 stwcx. r15,0,r14
290 bne- lwarx_loop_stop
291 isync
292
293 bl save_sprs_to_stack
294
295 IDLE_STATE_ENTER_SEQ(PPC_STOP)
296
246_GLOBAL(power7_idle) 297_GLOBAL(power7_idle)
247 /* Now check if user or arch enabled NAP mode */ 298 /* Now check if user or arch enabled NAP mode */
248 LOAD_REG_ADDRBASE(r3,powersave_nap) 299 LOAD_REG_ADDRBASE(r3,powersave_nap)
@@ -293,6 +344,17 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
293 344
294 345
295/* 346/*
347 * r3 - requested stop state
348 */
349_GLOBAL(power9_idle_stop)
350 LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE)
351 or r4,r4,r3
352 mtspr SPRN_PSSCR, r4
353 li r4, 1
354 LOAD_REG_ADDR(r5,power_enter_stop)
355 b pnv_powersave_common
356 /* No return */
357/*
296 * Called from reset vector. Check whether we have woken up with 358 * Called from reset vector. Check whether we have woken up with
297 * hypervisor state loss. If yes, restore hypervisor state and return 359 * hypervisor state loss. If yes, restore hypervisor state and return
298 * back to reset vector. 360 * back to reset vector.
@@ -301,7 +363,33 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
301 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 363 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
302 */ 364 */
303_GLOBAL(pnv_restore_hyp_resource) 365_GLOBAL(pnv_restore_hyp_resource)
366 ld r2,PACATOC(r13);
367BEGIN_FTR_SECTION
368 /*
369 * POWER ISA 3. Use PSSCR to determine if we
370 * are waking up from deep idle state
371 */
372 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
373 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
374
375 mfspr r5,SPRN_PSSCR
304 /* 376 /*
377 * 0-3 bits correspond to Power-Saving Level Status
378 * which indicates the idle state we are waking up from
379 */
380 rldicl r5,r5,4,60
381 cmpd cr4,r5,r4
382 bge cr4,pnv_wakeup_tb_loss
383 /*
384 * Waking up without hypervisor state loss. Return to
385 * reset vector
386 */
387 blr
388
389END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
390
391 /*
392 * POWER ISA 2.07 or less.
305 * Check if last bit of HSPGR0 is set. This indicates whether we are 393 * Check if last bit of HSPGR0 is set. This indicates whether we are
306 * waking up from winkle. 394 * waking up from winkle.
307 */ 395 */
@@ -324,9 +412,17 @@ _GLOBAL(pnv_restore_hyp_resource)
324 blr /* Return back to System Reset vector from where 412 blr /* Return back to System Reset vector from where
325 pnv_restore_hyp_resource was invoked */ 413 pnv_restore_hyp_resource was invoked */
326 414
327 415/*
416 * Called if waking up from idle state which can cause either partial or
417 * complete hyp state loss.
418 * In POWER8, called if waking up from fastsleep or winkle
419 * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
420 *
421 * r13 - PACA
422 * cr3 - gt if waking up with partial/complete hypervisor state loss
423 * cr4 - eq if waking up from complete hypervisor state loss.
424 */
328_GLOBAL(pnv_wakeup_tb_loss) 425_GLOBAL(pnv_wakeup_tb_loss)
329 ld r2,PACATOC(r13);
330 ld r1,PACAR1(r13) 426 ld r1,PACAR1(r13)
331 /* 427 /*
332 * Before entering any idle state, the NVGPRs are saved in the stack 428 * Before entering any idle state, the NVGPRs are saved in the stack
@@ -361,35 +457,35 @@ lwarx_loop2:
361 bnel core_idle_lock_held 457 bnel core_idle_lock_held
362 458
363 cmpwi cr2,r15,0 459 cmpwi cr2,r15,0
364 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
365 and r4,r4,r15
366 cmpwi cr1,r4,0 /* Check if first in subcore */
367 460
368 /* 461 /*
369 * At this stage 462 * At this stage
370 * cr1 - 0b0100 if first thread to wakeup in subcore 463 * cr2 - eq if first thread to wakeup in core
371 * cr2 - 0b0100 if first thread to wakeup in core 464 * cr3- gt if waking up with partial/complete hypervisor state loss
372 * cr3- 0b0010 if waking up from sleep or winkle 465 * cr4 - eq if waking up from complete hypervisor state loss.
373 * cr4 - 0b0100 if waking up from winkle
374 */ 466 */
375 467
376 or r15,r15,r7 /* Set thread bit */
377
378 beq cr1,first_thread_in_subcore
379
380 /* Not first thread in subcore to wake up */
381 stwcx. r15,0,r14
382 bne- lwarx_loop2
383 isync
384 b common_exit
385
386first_thread_in_subcore:
387 /* First thread in subcore to wakeup */
388 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 468 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
389 stwcx. r15,0,r14 469 stwcx. r15,0,r14
390 bne- lwarx_loop2 470 bne- lwarx_loop2
391 isync 471 isync
392 472
473BEGIN_FTR_SECTION
474 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
475 and r4,r4,r15
476 cmpwi r4,0 /* Check if first in subcore */
477
478 or r15,r15,r7 /* Set thread bit */
479 beq first_thread_in_subcore
480END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
481
482 or r15,r15,r7 /* Set thread bit */
483 beq cr2,first_thread_in_core
484
485 /* Not first thread in core or subcore to wake up */
486 b clear_lock
487
488first_thread_in_subcore:
393 /* 489 /*
394 * If waking up from sleep, subcore state is not lost. Hence 490 * If waking up from sleep, subcore state is not lost. Hence
395 * skip subcore state restore 491 * skip subcore state restore
@@ -399,6 +495,7 @@ first_thread_in_subcore:
399 /* Restore per-subcore state */ 495 /* Restore per-subcore state */
400 ld r4,_SDR1(r1) 496 ld r4,_SDR1(r1)
401 mtspr SPRN_SDR1,r4 497 mtspr SPRN_SDR1,r4
498
402 ld r4,_RPR(r1) 499 ld r4,_RPR(r1)
403 mtspr SPRN_RPR,r4 500 mtspr SPRN_RPR,r4
404 ld r4,_AMOR(r1) 501 ld r4,_AMOR(r1)
@@ -414,19 +511,23 @@ subcore_state_restored:
414first_thread_in_core: 511first_thread_in_core:
415 512
416 /* 513 /*
417 * First thread in the core waking up from fastsleep. It needs to 514 * First thread in the core waking up from any state which can cause
515 * partial or complete hypervisor state loss. It needs to
418 * call the fastsleep workaround code if the platform requires it. 516 * call the fastsleep workaround code if the platform requires it.
419 * Call it unconditionally here. The below branch instruction will 517 * Call it unconditionally here. The below branch instruction will
420 * be patched out when the idle states are discovered if platform 518 * be patched out if the platform does not have fastsleep or does not
421 * does not require workaround. 519 * require the workaround. Patching will be performed during the
520 * discovery of idle-states.
422 */ 521 */
423.global pnv_fastsleep_workaround_at_exit 522.global pnv_fastsleep_workaround_at_exit
424pnv_fastsleep_workaround_at_exit: 523pnv_fastsleep_workaround_at_exit:
425 b fastsleep_workaround_at_exit 524 b fastsleep_workaround_at_exit
426 525
427timebase_resync: 526timebase_resync:
428 /* Do timebase resync if we are waking up from sleep. Use cr3 value 527 /*
429 * set in exceptions-64s.S */ 528 * Use cr3 which indicates that we are waking up with atleast partial
529 * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
530 */
430 ble cr3,clear_lock 531 ble cr3,clear_lock
431 /* Time base re-sync */ 532 /* Time base re-sync */
432 li r0,OPAL_RESYNC_TIMEBASE 533 li r0,OPAL_RESYNC_TIMEBASE
@@ -439,7 +540,18 @@ timebase_resync:
439 */ 540 */
440 bne cr4,clear_lock 541 bne cr4,clear_lock
441 542
442 /* Restore per core state */ 543 /*
544 * First thread in the core to wake up and its waking up with
545 * complete hypervisor state loss. Restore per core hypervisor
546 * state.
547 */
548BEGIN_FTR_SECTION
549 ld r4,_PTCR(r1)
550 mtspr SPRN_PTCR,r4
551 ld r4,_RPR(r1)
552 mtspr SPRN_RPR,r4
553END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
554
443 ld r4,_TSCR(r1) 555 ld r4,_TSCR(r1)
444 mtspr SPRN_TSCR,r4 556 mtspr SPRN_TSCR,r4
445 ld r4,_WORC(r1) 557 ld r4,_WORC(r1)
@@ -461,9 +573,9 @@ common_exit:
461 573
462 /* Waking up from winkle */ 574 /* Waking up from winkle */
463 575
464 /* Restore per thread state */ 576BEGIN_MMU_FTR_SECTION
465 bl __restore_cpu_power8 577 b no_segments
466 578END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
467 /* Restore SLB from PACA */ 579 /* Restore SLB from PACA */
468 ld r8,PACA_SLBSHADOWPTR(r13) 580 ld r8,PACA_SLBSHADOWPTR(r13)
469 581
@@ -477,6 +589,9 @@ common_exit:
477 slbmte r6,r5 589 slbmte r6,r5
4781: addi r8,r8,16 5901: addi r8,r8,16
479 .endr 591 .endr
592no_segments:
593
594 /* Restore per thread state */
480 595
481 ld r4,_SPURR(r1) 596 ld r4,_SPURR(r1)
482 mtspr SPRN_SPURR,r4 597 mtspr SPRN_SPURR,r4
@@ -487,6 +602,16 @@ common_exit:
487 ld r4,_WORT(r1) 602 ld r4,_WORT(r1)
488 mtspr SPRN_WORT,r4 603 mtspr SPRN_WORT,r4
489 604
605 /* Call cur_cpu_spec->cpu_restore() */
606 LOAD_REG_ADDR(r4, cur_cpu_spec)
607 ld r4,0(r4)
608 ld r12,CPU_SPEC_RESTORE(r4)
609#ifdef PPC64_ELF_ABI_v1
610 ld r12,0(r12)
611#endif
612 mtctr r12
613 bctrl
614
490hypervisor_state_restored: 615hypervisor_state_restored:
491 616
492 mtspr SPRN_SRR1,r16 617 mtspr SPRN_SRR1,r16
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 8a77f5c4159e..8219e22c2b91 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -27,9 +27,12 @@
27#include "powernv.h" 27#include "powernv.h"
28#include "subcore.h" 28#include "subcore.h"
29 29
30/* Power ISA 3.0 allows for stop states 0x0 - 0xF */
31#define MAX_STOP_STATE 0xF
32
30static u32 supported_cpuidle_states; 33static u32 supported_cpuidle_states;
31 34
32static int pnv_save_sprs_for_winkle(void) 35static int pnv_save_sprs_for_deep_states(void)
33{ 36{
34 int cpu; 37 int cpu;
35 int rc; 38 int rc;
@@ -50,15 +53,19 @@ static int pnv_save_sprs_for_winkle(void)
50 uint64_t pir = get_hard_smp_processor_id(cpu); 53 uint64_t pir = get_hard_smp_processor_id(cpu);
51 uint64_t hsprg0_val = (uint64_t)&paca[cpu]; 54 uint64_t hsprg0_val = (uint64_t)&paca[cpu];
52 55
53 /* 56 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
54 * HSPRG0 is used to store the cpu's pointer to paca. Hence last 57 /*
55 * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0 58 * HSPRG0 is used to store the cpu's pointer to paca.
56 * with 63rd bit set, so that when a thread wakes up at 0x100 we 59 * Hence last 3 bits are guaranteed to be 0. Program
57 * can use this bit to distinguish between fastsleep and 60 * slw to restore HSPRG0 with 63rd bit set, so that
58 * deep winkle. 61 * when a thread wakes up at 0x100 we can use this bit
59 */ 62 * to distinguish between fastsleep and deep winkle.
60 hsprg0_val |= 1; 63 * This is not necessary with stop/psscr since PLS
61 64 * field of psscr indicates which state we are waking
65 * up from.
66 */
67 hsprg0_val |= 1;
68 }
62 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); 69 rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
63 if (rc != 0) 70 if (rc != 0)
64 return rc; 71 return rc;
@@ -130,8 +137,8 @@ static void pnv_alloc_idle_core_states(void)
130 137
131 update_subcore_sibling_mask(); 138 update_subcore_sibling_mask();
132 139
133 if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) 140 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
134 pnv_save_sprs_for_winkle(); 141 pnv_save_sprs_for_deep_states();
135} 142}
136 143
137u32 pnv_get_supported_cpuidle_states(void) 144u32 pnv_get_supported_cpuidle_states(void)
@@ -230,43 +237,151 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
230 show_fastsleep_workaround_applyonce, 237 show_fastsleep_workaround_applyonce,
231 store_fastsleep_workaround_applyonce); 238 store_fastsleep_workaround_applyonce);
232 239
233static int __init pnv_init_idle_states(void) 240
241/*
242 * Used for ppc_md.power_save which needs a function with no parameters
243 */
244static void power9_idle(void)
234{ 245{
235 struct device_node *power_mgt; 246 /* Requesting stop state 0 */
236 int dt_idle_states; 247 power9_idle_stop(0);
237 u32 *flags; 248}
238 int i; 249/*
250 * First deep stop state. Used to figure out when to save/restore
251 * hypervisor context.
252 */
253u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
239 254
240 supported_cpuidle_states = 0; 255/*
256 * Power ISA 3.0 idle initialization.
257 *
258 * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
259 * Register (PSSCR) to control idle behavior.
260 *
261 * PSSCR layout:
262 * ----------------------------------------------------------
263 * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
264 * ----------------------------------------------------------
265 * 0 4 41 42 43 44 48 54 56 60
266 *
267 * PSSCR key fields:
268 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
269 * lowest power-saving state the thread entered since stop instruction was
270 * last executed.
271 *
272 * Bit 41 - Status Disable(SD)
273 * 0 - Shows PLS entries
274 * 1 - PLS entries are all 0
275 *
276 * Bit 42 - Enable State Loss
277 * 0 - No state is lost irrespective of other fields
278 * 1 - Allows state loss
279 *
280 * Bit 43 - Exit Criterion
281 * 0 - Exit from power-save mode on any interrupt
282 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
283 *
284 * Bits 44:47 - Power-Saving Level Limit
285 * This limits the power-saving level that can be entered into.
286 *
287 * Bits 60:63 - Requested Level
288 * Used to specify which power-saving level must be entered on executing
289 * stop instruction
290 *
291 * @np: /ibm,opal/power-mgt device node
292 * @flags: cpu-idle-state-flags array
293 * @dt_idle_states: Number of idle state entries
294 * Returns 0 on success
295 */
296static int __init pnv_arch300_idle_init(struct device_node *np, u32 *flags,
297 int dt_idle_states)
298{
299 u64 *psscr_val = NULL;
300 int rc = 0, i;
241 301
242 if (cpuidle_disable != IDLE_NO_OVERRIDE) 302 psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val),
303 GFP_KERNEL);
304 if (!psscr_val) {
305 rc = -1;
243 goto out; 306 goto out;
244 307 }
245 if (!firmware_has_feature(FW_FEATURE_OPAL)) 308 if (of_property_read_u64_array(np,
309 "ibm,cpu-idle-state-psscr",
310 psscr_val, dt_idle_states)) {
311 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
312 rc = -1;
246 goto out; 313 goto out;
314 }
247 315
248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 316 /*
249 if (!power_mgt) { 317 * Set pnv_first_deep_stop_state to the first stop level
318 * to cause hypervisor state loss
319 */
320 pnv_first_deep_stop_state = MAX_STOP_STATE;
321 for (i = 0; i < dt_idle_states; i++) {
322 u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
323
324 if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
325 (pnv_first_deep_stop_state > psscr_rl))
326 pnv_first_deep_stop_state = psscr_rl;
327 }
328
329out:
330 kfree(psscr_val);
331 return rc;
332}
333
334/*
335 * Probe device tree for supported idle states
336 */
337static void __init pnv_probe_idle_states(void)
338{
339 struct device_node *np;
340 int dt_idle_states;
341 u32 *flags = NULL;
342 int i;
343
344 np = of_find_node_by_path("/ibm,opal/power-mgt");
345 if (!np) {
250 pr_warn("opal: PowerMgmt Node not found\n"); 346 pr_warn("opal: PowerMgmt Node not found\n");
251 goto out; 347 goto out;
252 } 348 }
253 dt_idle_states = of_property_count_u32_elems(power_mgt, 349 dt_idle_states = of_property_count_u32_elems(np,
254 "ibm,cpu-idle-state-flags"); 350 "ibm,cpu-idle-state-flags");
255 if (dt_idle_states < 0) { 351 if (dt_idle_states < 0) {
256 pr_warn("cpuidle-powernv: no idle states found in the DT\n"); 352 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
257 goto out; 353 goto out;
258 } 354 }
259 355
260 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL); 356 flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL);
261 if (of_property_read_u32_array(power_mgt, 357
358 if (of_property_read_u32_array(np,
262 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { 359 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
263 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); 360 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
264 goto out_free; 361 goto out;
362 }
363
364 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
365 if (pnv_arch300_idle_init(np, flags, dt_idle_states))
366 goto out;
265 } 367 }
266 368
267 for (i = 0; i < dt_idle_states; i++) 369 for (i = 0; i < dt_idle_states; i++)
268 supported_cpuidle_states |= flags[i]; 370 supported_cpuidle_states |= flags[i];
269 371
372out:
373 kfree(flags);
374}
375static int __init pnv_init_idle_states(void)
376{
377
378 supported_cpuidle_states = 0;
379
380 if (cpuidle_disable != IDLE_NO_OVERRIDE)
381 goto out;
382
383 pnv_probe_idle_states();
384
270 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 385 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
271 patch_instruction( 386 patch_instruction(
272 (unsigned int *)pnv_fastsleep_workaround_at_entry, 387 (unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -288,8 +403,9 @@ static int __init pnv_init_idle_states(void)
288 403
289 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) 404 if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
290 ppc_md.power_save = power7_idle; 405 ppc_md.power_save = power7_idle;
291out_free: 406 else if (supported_cpuidle_states & OPAL_PM_STOP_INST_FAST)
292 kfree(flags); 407 ppc_md.power_save = power9_idle;
408
293out: 409out:
294 return 0; 410 return 0;
295} 411}