diff options
-rw-r--r-- | arch/powerpc/include/asm/opal.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 13 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power7.S | 145 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/opal-wrappers.S | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/setup.c | 72 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/smp.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/subcore.c | 34 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/subcore.h | 9 |
13 files changed, 281 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 3dea31c1080c..eb95b675109b 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -161,6 +161,7 @@ struct opal_sg_list { | |||
161 | #define OPAL_PCI_EEH_FREEZE_SET 97 | 161 | #define OPAL_PCI_EEH_FREEZE_SET 97 |
162 | #define OPAL_HANDLE_HMI 98 | 162 | #define OPAL_HANDLE_HMI 98 |
163 | #define OPAL_CONFIG_CPU_IDLE_STATE 99 | 163 | #define OPAL_CONFIG_CPU_IDLE_STATE 99 |
164 | #define OPAL_SLW_SET_REG 100 | ||
164 | #define OPAL_REGISTER_DUMP_REGION 101 | 165 | #define OPAL_REGISTER_DUMP_REGION 101 |
165 | #define OPAL_UNREGISTER_DUMP_REGION 102 | 166 | #define OPAL_UNREGISTER_DUMP_REGION 102 |
166 | #define OPAL_WRITE_TPO 103 | 167 | #define OPAL_WRITE_TPO 103 |
@@ -176,6 +177,7 @@ struct opal_sg_list { | |||
176 | */ | 177 | */ |
177 | #define OPAL_PM_NAP_ENABLED 0x00010000 | 178 | #define OPAL_PM_NAP_ENABLED 0x00010000 |
178 | #define OPAL_PM_SLEEP_ENABLED 0x00020000 | 179 | #define OPAL_PM_SLEEP_ENABLED 0x00020000 |
180 | #define OPAL_PM_WINKLE_ENABLED 0x00040000 | ||
179 | #define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 | 181 | #define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 |
180 | 182 | ||
181 | #ifndef __ASSEMBLY__ | 183 | #ifndef __ASSEMBLY__ |
@@ -913,6 +915,7 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); | |||
913 | int64_t opal_handle_hmi(void); | 915 | int64_t opal_handle_hmi(void); |
914 | int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); | 916 | int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); |
915 | int64_t opal_unregister_dump_region(uint32_t id); | 917 | int64_t opal_unregister_dump_region(uint32_t id); |
918 | int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val); | ||
916 | int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); | 919 | int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); |
917 | int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg, | 920 | int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg, |
918 | uint64_t msg_len); | 921 | uint64_t msg_len); |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a0a16847bd40..e5f22c6c4bf9 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -158,6 +158,8 @@ struct paca_struct { | |||
158 | u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */ | 158 | u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */ |
159 | /* Mask to indicate thread id in core */ | 159 | /* Mask to indicate thread id in core */ |
160 | u8 thread_mask; | 160 | u8 thread_mask; |
161 | /* Mask to denote subcore sibling threads */ | ||
162 | u8 subcore_sibling_mask; | ||
161 | #endif | 163 | #endif |
162 | 164 | ||
163 | #ifdef CONFIG_PPC_BOOK3S_64 | 165 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 6f8536208049..5155be7c0d48 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -194,6 +194,7 @@ | |||
194 | 194 | ||
195 | #define PPC_INST_NAP 0x4c000364 | 195 | #define PPC_INST_NAP 0x4c000364 |
196 | #define PPC_INST_SLEEP 0x4c0003a4 | 196 | #define PPC_INST_SLEEP 0x4c0003a4 |
197 | #define PPC_INST_WINKLE 0x4c0003e4 | ||
197 | 198 | ||
198 | /* A2 specific instructions */ | 199 | /* A2 specific instructions */ |
199 | #define PPC_INST_ERATWE 0x7c0001a6 | 200 | #define PPC_INST_ERATWE 0x7c0001a6 |
@@ -374,6 +375,7 @@ | |||
374 | 375 | ||
375 | #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) | 376 | #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) |
376 | #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) | 377 | #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) |
378 | #define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE) | ||
377 | 379 | ||
378 | /* BHRB instructions */ | 380 | /* BHRB instructions */ |
379 | #define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB) | 381 | #define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB) |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index f5c45b37c0d4..bf117d8fb45f 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -453,6 +453,7 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; | |||
453 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ | 453 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
454 | extern unsigned long power7_nap(int check_irq); | 454 | extern unsigned long power7_nap(int check_irq); |
455 | extern unsigned long power7_sleep(void); | 455 | extern unsigned long power7_sleep(void); |
456 | extern unsigned long power7_winkle(void); | ||
456 | extern void flush_instruction_cache(void); | 457 | extern void flush_instruction_cache(void); |
457 | extern void hard_reset_now(void); | 458 | extern void hard_reset_now(void); |
458 | extern void poweroff_now(void); | 459 | extern void poweroff_now(void); |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a68ee15964b3..1c874fb533bb 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -373,6 +373,7 @@ | |||
373 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ | 373 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ |
374 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ | 374 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ |
375 | #define SPRN_PPR 0x380 /* SMT Thread status Register */ | 375 | #define SPRN_PPR 0x380 /* SMT Thread status Register */ |
376 | #define SPRN_TSCR 0x399 /* Thread Switch Control Register */ | ||
376 | 377 | ||
377 | #define SPRN_DEC 0x016 /* Decrement Register */ | 378 | #define SPRN_DEC 0x016 /* Decrement Register */ |
378 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ | 379 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ |
@@ -730,6 +731,7 @@ | |||
730 | #define SPRN_BESCR 806 /* Branch event status and control register */ | 731 | #define SPRN_BESCR 806 /* Branch event status and control register */ |
731 | #define BESCR_GE 0x8000000000000000ULL /* Global Enable */ | 732 | #define BESCR_GE 0x8000000000000000ULL /* Global Enable */ |
732 | #define SPRN_WORT 895 /* Workload optimization register - thread */ | 733 | #define SPRN_WORT 895 /* Workload optimization register - thread */ |
734 | #define SPRN_WORC 863 /* Workload optimization register - core */ | ||
733 | 735 | ||
734 | #define SPRN_PMC1 787 | 736 | #define SPRN_PMC1 787 |
735 | #define SPRN_PMC2 788 | 737 | #define SPRN_PMC2 788 |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index bbd27fe0c039..f68de7a73faa 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -733,6 +733,8 @@ int main(void) | |||
733 | offsetof(struct paca_struct, thread_idle_state)); | 733 | offsetof(struct paca_struct, thread_idle_state)); |
734 | DEFINE(PACA_THREAD_MASK, | 734 | DEFINE(PACA_THREAD_MASK, |
735 | offsetof(struct paca_struct, thread_mask)); | 735 | offsetof(struct paca_struct, thread_mask)); |
736 | DEFINE(PACA_SUBCORE_SIBLING_MASK, | ||
737 | offsetof(struct paca_struct, subcore_sibling_mask)); | ||
736 | #endif | 738 | #endif |
737 | 739 | ||
738 | return 0; | 740 | return 0; |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 289fe718ecd4..c2df8150bd7a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -102,9 +102,7 @@ system_reset_pSeries: | |||
102 | #ifdef CONFIG_PPC_P7_NAP | 102 | #ifdef CONFIG_PPC_P7_NAP |
103 | BEGIN_FTR_SECTION | 103 | BEGIN_FTR_SECTION |
104 | /* Running native on arch 2.06 or later, check if we are | 104 | /* Running native on arch 2.06 or later, check if we are |
105 | * waking up from nap. We only handle no state loss and | 105 | * waking up from nap/sleep/winkle. |
106 | * supervisor state loss. We do -not- handle hypervisor | ||
107 | * state loss at this time. | ||
108 | */ | 106 | */ |
109 | mfspr r13,SPRN_SRR1 | 107 | mfspr r13,SPRN_SRR1 |
110 | rlwinm. r13,r13,47-31,30,31 | 108 | rlwinm. r13,r13,47-31,30,31 |
@@ -112,7 +110,16 @@ BEGIN_FTR_SECTION | |||
112 | 110 | ||
113 | cmpwi cr3,r13,2 | 111 | cmpwi cr3,r13,2 |
114 | 112 | ||
113 | /* | ||
114 | * Check if last bit of HSPGR0 is set. This indicates whether we are | ||
115 | * waking up from winkle. | ||
116 | */ | ||
115 | GET_PACA(r13) | 117 | GET_PACA(r13) |
118 | clrldi r5,r13,63 | ||
119 | clrrdi r13,r13,1 | ||
120 | cmpwi cr4,r5,1 | ||
121 | mtspr SPRN_HSPRG0,r13 | ||
122 | |||
116 | lbz r0,PACA_THREAD_IDLE_STATE(r13) | 123 | lbz r0,PACA_THREAD_IDLE_STATE(r13) |
117 | cmpwi cr2,r0,PNV_THREAD_NAP | 124 | cmpwi cr2,r0,PNV_THREAD_NAP |
118 | bgt cr2,8f /* Either sleep or Winkle */ | 125 | bgt cr2,8f /* Either sleep or Winkle */ |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 0f2c113c8ca5..05adc8bbdef8 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -19,9 +19,24 @@ | |||
19 | #include <asm/kvm_book3s_asm.h> | 19 | #include <asm/kvm_book3s_asm.h> |
20 | #include <asm/opal.h> | 20 | #include <asm/opal.h> |
21 | #include <asm/cpuidle.h> | 21 | #include <asm/cpuidle.h> |
22 | #include <asm/mmu-hash64.h> | ||
22 | 23 | ||
23 | #undef DEBUG | 24 | #undef DEBUG |
24 | 25 | ||
26 | /* | ||
27 | * Use unused space in the interrupt stack to save and restore | ||
28 | * registers for winkle support. | ||
29 | */ | ||
30 | #define _SDR1 GPR3 | ||
31 | #define _RPR GPR4 | ||
32 | #define _SPURR GPR5 | ||
33 | #define _PURR GPR6 | ||
34 | #define _TSCR GPR7 | ||
35 | #define _DSCR GPR8 | ||
36 | #define _AMOR GPR9 | ||
37 | #define _WORT GPR10 | ||
38 | #define _WORC GPR11 | ||
39 | |||
25 | /* Idle state entry routines */ | 40 | /* Idle state entry routines */ |
26 | 41 | ||
27 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ | 42 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ |
@@ -124,8 +139,8 @@ power7_enter_nap_mode: | |||
124 | stb r4,HSTATE_HWTHREAD_STATE(r13) | 139 | stb r4,HSTATE_HWTHREAD_STATE(r13) |
125 | #endif | 140 | #endif |
126 | stb r3,PACA_THREAD_IDLE_STATE(r13) | 141 | stb r3,PACA_THREAD_IDLE_STATE(r13) |
127 | cmpwi cr1,r3,PNV_THREAD_SLEEP | 142 | cmpwi cr3,r3,PNV_THREAD_SLEEP |
128 | bge cr1,2f | 143 | bge cr3,2f |
129 | IDLE_STATE_ENTER_SEQ(PPC_NAP) | 144 | IDLE_STATE_ENTER_SEQ(PPC_NAP) |
130 | /* No return */ | 145 | /* No return */ |
131 | 2: | 146 | 2: |
@@ -154,7 +169,8 @@ pnv_fastsleep_workaround_at_entry: | |||
154 | bne- lwarx_loop1 | 169 | bne- lwarx_loop1 |
155 | isync | 170 | isync |
156 | 171 | ||
157 | common_enter: /* common code for all the threads entering sleep */ | 172 | common_enter: /* common code for all the threads entering sleep or winkle */ |
173 | bgt cr3,enter_winkle | ||
158 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) | 174 | IDLE_STATE_ENTER_SEQ(PPC_SLEEP) |
159 | 175 | ||
160 | fastsleep_workaround_at_entry: | 176 | fastsleep_workaround_at_entry: |
@@ -175,6 +191,30 @@ fastsleep_workaround_at_entry: | |||
175 | stw r0,0(r14) | 191 | stw r0,0(r14) |
176 | b common_enter | 192 | b common_enter |
177 | 193 | ||
194 | enter_winkle: | ||
195 | /* | ||
196 | * Note all register i.e per-core, per-subcore or per-thread is saved | ||
197 | * here since any thread in the core might wake up first | ||
198 | */ | ||
199 | mfspr r3,SPRN_SDR1 | ||
200 | std r3,_SDR1(r1) | ||
201 | mfspr r3,SPRN_RPR | ||
202 | std r3,_RPR(r1) | ||
203 | mfspr r3,SPRN_SPURR | ||
204 | std r3,_SPURR(r1) | ||
205 | mfspr r3,SPRN_PURR | ||
206 | std r3,_PURR(r1) | ||
207 | mfspr r3,SPRN_TSCR | ||
208 | std r3,_TSCR(r1) | ||
209 | mfspr r3,SPRN_DSCR | ||
210 | std r3,_DSCR(r1) | ||
211 | mfspr r3,SPRN_AMOR | ||
212 | std r3,_AMOR(r1) | ||
213 | mfspr r3,SPRN_WORT | ||
214 | std r3,_WORT(r1) | ||
215 | mfspr r3,SPRN_WORC | ||
216 | std r3,_WORC(r1) | ||
217 | IDLE_STATE_ENTER_SEQ(PPC_WINKLE) | ||
178 | 218 | ||
179 | _GLOBAL(power7_idle) | 219 | _GLOBAL(power7_idle) |
180 | /* Now check if user or arch enabled NAP mode */ | 220 | /* Now check if user or arch enabled NAP mode */ |
@@ -197,6 +237,12 @@ _GLOBAL(power7_sleep) | |||
197 | b power7_powersave_common | 237 | b power7_powersave_common |
198 | /* No return */ | 238 | /* No return */ |
199 | 239 | ||
240 | _GLOBAL(power7_winkle) | ||
241 | li r3,3 | ||
242 | li r4,1 | ||
243 | b power7_powersave_common | ||
244 | /* No return */ | ||
245 | |||
200 | #define CHECK_HMI_INTERRUPT \ | 246 | #define CHECK_HMI_INTERRUPT \ |
201 | mfspr r0,SPRN_SRR1; \ | 247 | mfspr r0,SPRN_SRR1; \ |
202 | BEGIN_FTR_SECTION_NESTED(66); \ | 248 | BEGIN_FTR_SECTION_NESTED(66); \ |
@@ -250,11 +296,23 @@ lwarx_loop2: | |||
250 | bne core_idle_lock_held | 296 | bne core_idle_lock_held |
251 | 297 | ||
252 | cmpwi cr2,r15,0 | 298 | cmpwi cr2,r15,0 |
299 | lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) | ||
300 | and r4,r4,r15 | ||
301 | cmpwi cr1,r4,0 /* Check if first in subcore */ | ||
302 | |||
303 | /* | ||
304 | * At this stage | ||
305 | * cr1 - 0b0100 if first thread to wakeup in subcore | ||
306 | * cr2 - 0b0100 if first thread to wakeup in core | ||
307 | * cr3- 0b0010 if waking up from sleep or winkle | ||
308 | * cr4 - 0b0100 if waking up from winkle | ||
309 | */ | ||
310 | |||
253 | or r15,r15,r7 /* Set thread bit */ | 311 | or r15,r15,r7 /* Set thread bit */ |
254 | 312 | ||
255 | beq cr2,first_thread | 313 | beq cr1,first_thread_in_subcore |
256 | 314 | ||
257 | /* Not first thread in core to wake up */ | 315 | /* Not first thread in subcore to wake up */ |
258 | stwcx. r15,0,r14 | 316 | stwcx. r15,0,r14 |
259 | bne- lwarx_loop2 | 317 | bne- lwarx_loop2 |
260 | isync | 318 | isync |
@@ -269,14 +327,37 @@ core_idle_lock_loop: | |||
269 | HMT_MEDIUM | 327 | HMT_MEDIUM |
270 | b lwarx_loop2 | 328 | b lwarx_loop2 |
271 | 329 | ||
272 | first_thread: | 330 | first_thread_in_subcore: |
273 | /* First thread in core to wakeup */ | 331 | /* First thread in subcore to wakeup */ |
274 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT | 332 | ori r15,r15,PNV_CORE_IDLE_LOCK_BIT |
275 | stwcx. r15,0,r14 | 333 | stwcx. r15,0,r14 |
276 | bne- lwarx_loop2 | 334 | bne- lwarx_loop2 |
277 | isync | 335 | isync |
278 | 336 | ||
279 | /* | 337 | /* |
338 | * If waking up from sleep, subcore state is not lost. Hence | ||
339 | * skip subcore state restore | ||
340 | */ | ||
341 | bne cr4,subcore_state_restored | ||
342 | |||
343 | /* Restore per-subcore state */ | ||
344 | ld r4,_SDR1(r1) | ||
345 | mtspr SPRN_SDR1,r4 | ||
346 | ld r4,_RPR(r1) | ||
347 | mtspr SPRN_RPR,r4 | ||
348 | ld r4,_AMOR(r1) | ||
349 | mtspr SPRN_AMOR,r4 | ||
350 | |||
351 | subcore_state_restored: | ||
352 | /* | ||
353 | * Check if the thread is also the first thread in the core. If not, | ||
354 | * skip to clear_lock. | ||
355 | */ | ||
356 | bne cr2,clear_lock | ||
357 | |||
358 | first_thread_in_core: | ||
359 | |||
360 | /* | ||
280 | * First thread in the core waking up from fastsleep. It needs to | 361 | * First thread in the core waking up from fastsleep. It needs to |
281 | * call the fastsleep workaround code if the platform requires it. | 362 | * call the fastsleep workaround code if the platform requires it. |
282 | * Call it unconditionally here. The below branch instruction will | 363 | * Call it unconditionally here. The below branch instruction will |
@@ -296,12 +377,62 @@ timebase_resync: | |||
296 | bl opal_call_realmode; | 377 | bl opal_call_realmode; |
297 | /* TODO: Check r3 for failure */ | 378 | /* TODO: Check r3 for failure */ |
298 | 379 | ||
380 | /* | ||
381 | * If waking up from sleep, per core state is not lost, skip to | ||
382 | * clear_lock. | ||
383 | */ | ||
384 | bne cr4,clear_lock | ||
385 | |||
386 | /* Restore per core state */ | ||
387 | ld r4,_TSCR(r1) | ||
388 | mtspr SPRN_TSCR,r4 | ||
389 | ld r4,_WORC(r1) | ||
390 | mtspr SPRN_WORC,r4 | ||
391 | |||
299 | clear_lock: | 392 | clear_lock: |
300 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS | 393 | andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS |
301 | lwsync | 394 | lwsync |
302 | stw r15,0(r14) | 395 | stw r15,0(r14) |
303 | 396 | ||
304 | common_exit: | 397 | common_exit: |
398 | /* | ||
399 | * Common to all threads. | ||
400 | * | ||
401 | * If waking up from sleep, hypervisor state is not lost. Hence | ||
402 | * skip hypervisor state restore. | ||
403 | */ | ||
404 | bne cr4,hypervisor_state_restored | ||
405 | |||
406 | /* Waking up from winkle */ | ||
407 | |||
408 | /* Restore per thread state */ | ||
409 | bl __restore_cpu_power8 | ||
410 | |||
411 | /* Restore SLB from PACA */ | ||
412 | ld r8,PACA_SLBSHADOWPTR(r13) | ||
413 | |||
414 | .rept SLB_NUM_BOLTED | ||
415 | li r3, SLBSHADOW_SAVEAREA | ||
416 | LDX_BE r5, r8, r3 | ||
417 | addi r3, r3, 8 | ||
418 | LDX_BE r6, r8, r3 | ||
419 | andis. r7,r5,SLB_ESID_V@h | ||
420 | beq 1f | ||
421 | slbmte r6,r5 | ||
422 | 1: addi r8,r8,16 | ||
423 | .endr | ||
424 | |||
425 | ld r4,_SPURR(r1) | ||
426 | mtspr SPRN_SPURR,r4 | ||
427 | ld r4,_PURR(r1) | ||
428 | mtspr SPRN_PURR,r4 | ||
429 | ld r4,_DSCR(r1) | ||
430 | mtspr SPRN_DSCR,r4 | ||
431 | ld r4,_WORT(r1) | ||
432 | mtspr SPRN_WORT,r4 | ||
433 | |||
434 | hypervisor_state_restored: | ||
435 | |||
305 | li r5,PNV_THREAD_RUNNING | 436 | li r5,PNV_THREAD_RUNNING |
306 | stb r5,PACA_THREAD_IDLE_STATE(r13) | 437 | stb r5,PACA_THREAD_IDLE_STATE(r13) |
307 | 438 | ||
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 78289ed7058c..54eca8b3b288 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
@@ -284,6 +284,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); | |||
284 | OPAL_CALL(opal_get_param, OPAL_GET_PARAM); | 284 | OPAL_CALL(opal_get_param, OPAL_GET_PARAM); |
285 | OPAL_CALL(opal_set_param, OPAL_SET_PARAM); | 285 | OPAL_CALL(opal_set_param, OPAL_SET_PARAM); |
286 | OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); | 286 | OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); |
287 | OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); | ||
287 | OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); | 288 | OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); |
288 | OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); | 289 | OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); |
289 | OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); | 290 | OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 2e9b53bb73e2..b700a329c31d 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/code-patching.h> | 41 | #include <asm/code-patching.h> |
42 | 42 | ||
43 | #include "powernv.h" | 43 | #include "powernv.h" |
44 | #include "subcore.h" | ||
44 | 45 | ||
45 | static void __init pnv_setup_arch(void) | 46 | static void __init pnv_setup_arch(void) |
46 | { | 47 | { |
@@ -293,6 +294,72 @@ static void __init pnv_setup_machdep_rtas(void) | |||
293 | 294 | ||
294 | static u32 supported_cpuidle_states; | 295 | static u32 supported_cpuidle_states; |
295 | 296 | ||
297 | int pnv_save_sprs_for_winkle(void) | ||
298 | { | ||
299 | int cpu; | ||
300 | int rc; | ||
301 | |||
302 | /* | ||
303 | * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross | ||
304 | * all cpus at boot. Get these reg values of current cpu and use the | ||
305 | * same accross all cpus. | ||
306 | */ | ||
307 | uint64_t lpcr_val = mfspr(SPRN_LPCR); | ||
308 | uint64_t hid0_val = mfspr(SPRN_HID0); | ||
309 | uint64_t hid1_val = mfspr(SPRN_HID1); | ||
310 | uint64_t hid4_val = mfspr(SPRN_HID4); | ||
311 | uint64_t hid5_val = mfspr(SPRN_HID5); | ||
312 | uint64_t hmeer_val = mfspr(SPRN_HMEER); | ||
313 | |||
314 | for_each_possible_cpu(cpu) { | ||
315 | uint64_t pir = get_hard_smp_processor_id(cpu); | ||
316 | uint64_t hsprg0_val = (uint64_t)&paca[cpu]; | ||
317 | |||
318 | /* | ||
319 | * HSPRG0 is used to store the cpu's pointer to paca. Hence last | ||
320 | * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0 | ||
321 | * with 63rd bit set, so that when a thread wakes up at 0x100 we | ||
322 | * can use this bit to distinguish between fastsleep and | ||
323 | * deep winkle. | ||
324 | */ | ||
325 | hsprg0_val |= 1; | ||
326 | |||
327 | rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); | ||
328 | if (rc != 0) | ||
329 | return rc; | ||
330 | |||
331 | rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); | ||
332 | if (rc != 0) | ||
333 | return rc; | ||
334 | |||
335 | /* HIDs are per core registers */ | ||
336 | if (cpu_thread_in_core(cpu) == 0) { | ||
337 | |||
338 | rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); | ||
339 | if (rc != 0) | ||
340 | return rc; | ||
341 | |||
342 | rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); | ||
343 | if (rc != 0) | ||
344 | return rc; | ||
345 | |||
346 | rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); | ||
347 | if (rc != 0) | ||
348 | return rc; | ||
349 | |||
350 | rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); | ||
351 | if (rc != 0) | ||
352 | return rc; | ||
353 | |||
354 | rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); | ||
355 | if (rc != 0) | ||
356 | return rc; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
296 | static void pnv_alloc_idle_core_states(void) | 363 | static void pnv_alloc_idle_core_states(void) |
297 | { | 364 | { |
298 | int i, j; | 365 | int i, j; |
@@ -325,6 +392,11 @@ static void pnv_alloc_idle_core_states(void) | |||
325 | paca[cpu].thread_mask = 1 << j; | 392 | paca[cpu].thread_mask = 1 << j; |
326 | } | 393 | } |
327 | } | 394 | } |
395 | |||
396 | update_subcore_sibling_mask(); | ||
397 | |||
398 | if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) | ||
399 | pnv_save_sprs_for_winkle(); | ||
328 | } | 400 | } |
329 | 401 | ||
330 | u32 pnv_get_supported_cpuidle_states(void) | 402 | u32 pnv_get_supported_cpuidle_states(void) |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index c0691d0fb385..6c551a28e899 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -167,12 +167,17 @@ static void pnv_smp_cpu_kill_self(void) | |||
167 | */ | 167 | */ |
168 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); | 168 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); |
169 | while (!generic_check_cpu_restart(cpu)) { | 169 | while (!generic_check_cpu_restart(cpu)) { |
170 | |||
170 | ppc64_runlatch_off(); | 171 | ppc64_runlatch_off(); |
171 | if ((idle_states & OPAL_PM_SLEEP_ENABLED) || | 172 | |
173 | if (idle_states & OPAL_PM_WINKLE_ENABLED) | ||
174 | srr1 = power7_winkle(); | ||
175 | else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || | ||
172 | (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) | 176 | (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) |
173 | srr1 = power7_sleep(); | 177 | srr1 = power7_sleep(); |
174 | else | 178 | else |
175 | srr1 = power7_nap(1); | 179 | srr1 = power7_nap(1); |
180 | |||
176 | ppc64_runlatch_on(); | 181 | ppc64_runlatch_on(); |
177 | 182 | ||
178 | /* | 183 | /* |
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index c87f96b79d1a..f60f80ada903 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c | |||
@@ -160,6 +160,18 @@ static void wait_for_sync_step(int step) | |||
160 | mb(); | 160 | mb(); |
161 | } | 161 | } |
162 | 162 | ||
163 | static void update_hid_in_slw(u64 hid0) | ||
164 | { | ||
165 | u64 idle_states = pnv_get_supported_cpuidle_states(); | ||
166 | |||
167 | if (idle_states & OPAL_PM_WINKLE_ENABLED) { | ||
168 | /* OPAL call to patch slw with the new HID0 value */ | ||
169 | u64 cpu_pir = hard_smp_processor_id(); | ||
170 | |||
171 | opal_slw_set_reg(cpu_pir, SPRN_HID0, hid0); | ||
172 | } | ||
173 | } | ||
174 | |||
163 | static void unsplit_core(void) | 175 | static void unsplit_core(void) |
164 | { | 176 | { |
165 | u64 hid0, mask; | 177 | u64 hid0, mask; |
@@ -179,6 +191,7 @@ static void unsplit_core(void) | |||
179 | hid0 = mfspr(SPRN_HID0); | 191 | hid0 = mfspr(SPRN_HID0); |
180 | hid0 &= ~HID0_POWER8_DYNLPARDIS; | 192 | hid0 &= ~HID0_POWER8_DYNLPARDIS; |
181 | mtspr(SPRN_HID0, hid0); | 193 | mtspr(SPRN_HID0, hid0); |
194 | update_hid_in_slw(hid0); | ||
182 | 195 | ||
183 | while (mfspr(SPRN_HID0) & mask) | 196 | while (mfspr(SPRN_HID0) & mask) |
184 | cpu_relax(); | 197 | cpu_relax(); |
@@ -215,6 +228,7 @@ static void split_core(int new_mode) | |||
215 | hid0 = mfspr(SPRN_HID0); | 228 | hid0 = mfspr(SPRN_HID0); |
216 | hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value; | 229 | hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value; |
217 | mtspr(SPRN_HID0, hid0); | 230 | mtspr(SPRN_HID0, hid0); |
231 | update_hid_in_slw(hid0); | ||
218 | 232 | ||
219 | /* Wait for it to happen */ | 233 | /* Wait for it to happen */ |
220 | while (!(mfspr(SPRN_HID0) & split_parms[i].mask)) | 234 | while (!(mfspr(SPRN_HID0) & split_parms[i].mask)) |
@@ -251,6 +265,25 @@ bool cpu_core_split_required(void) | |||
251 | return true; | 265 | return true; |
252 | } | 266 | } |
253 | 267 | ||
268 | void update_subcore_sibling_mask(void) | ||
269 | { | ||
270 | int cpu; | ||
271 | /* | ||
272 | * sibling mask for the first cpu. Left shift this by required bits | ||
273 | * to get sibling mask for the rest of the cpus. | ||
274 | */ | ||
275 | int sibling_mask_first_cpu = (1 << threads_per_subcore) - 1; | ||
276 | |||
277 | for_each_possible_cpu(cpu) { | ||
278 | int tid = cpu_thread_in_core(cpu); | ||
279 | int offset = (tid / threads_per_subcore) * threads_per_subcore; | ||
280 | int mask = sibling_mask_first_cpu << offset; | ||
281 | |||
282 | paca[cpu].subcore_sibling_mask = mask; | ||
283 | |||
284 | } | ||
285 | } | ||
286 | |||
254 | static int cpu_update_split_mode(void *data) | 287 | static int cpu_update_split_mode(void *data) |
255 | { | 288 | { |
256 | int cpu, new_mode = *(int *)data; | 289 | int cpu, new_mode = *(int *)data; |
@@ -284,6 +317,7 @@ static int cpu_update_split_mode(void *data) | |||
284 | /* Make the new mode public */ | 317 | /* Make the new mode public */ |
285 | subcores_per_core = new_mode; | 318 | subcores_per_core = new_mode; |
286 | threads_per_subcore = threads_per_core / subcores_per_core; | 319 | threads_per_subcore = threads_per_core / subcores_per_core; |
320 | update_subcore_sibling_mask(); | ||
287 | 321 | ||
288 | /* Make sure the new mode is written before we exit */ | 322 | /* Make sure the new mode is written before we exit */ |
289 | mb(); | 323 | mb(); |
diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h index 148abc91debf..84e02ae52895 100644 --- a/arch/powerpc/platforms/powernv/subcore.h +++ b/arch/powerpc/platforms/powernv/subcore.h | |||
@@ -14,5 +14,12 @@ | |||
14 | #define SYNC_STEP_FINISHED 3 /* Set by secondary when split/unsplit is done */ | 14 | #define SYNC_STEP_FINISHED 3 /* Set by secondary when split/unsplit is done */ |
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | |||
18 | #ifdef CONFIG_SMP | ||
17 | void split_core_secondary_loop(u8 *state); | 19 | void split_core_secondary_loop(u8 *state); |
18 | #endif | 20 | extern void update_subcore_sibling_mask(void); |
21 | #else | ||
22 | static inline void update_subcore_sibling_mask(void) { }; | ||
23 | #endif /* CONFIG_SMP */ | ||
24 | |||
25 | #endif /* __ASSEMBLY__ */ | ||