diff options
author | Paul Mackerras <paulus@samba.org> | 2006-04-18 07:49:11 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-04-18 07:49:11 -0400 |
commit | f39224a8c1828bdd327539da72a53d8a13595838 (patch) | |
tree | cc9e9139e2e7e2e36774e5c52a5311439160633c /arch/powerpc/kernel/idle_6xx.S | |
parent | 183b73ae7c9e4e19fa95d88e1778481899a65210 (diff) |
powerpc: Use correct sequence for putting CPU into nap mode
We weren't using the recommended sequence for putting the CPU into
nap mode. When I changed the idle loop, for some reason 7447A cpus
started hanging when we put them into nap mode. Changing to the
recommended sequence fixes that.
The complexity here is that the recommended sequence is a loop that
keeps putting the cpu back into nap mode. Clearly we need some way
to break out of the loop when an interrupt (external interrupt,
decrementer, performance monitor) occurs. Here we use a bit in
the thread_info struct to indicate that we need this, and the exception
entry code notices this and arranges for the exception to return
to the value in the link register, thus breaking out of the loop.
We use a new `local_flags' field in the thread_info which we can
alter without needing to use an atomic update sequence.
The PPC970 has the same recommended sequence, so we do the same thing
there too.
This also fixes a bug in the kernel stack overflow handling code on
32-bit, since it was causing a value that we needed in a register to
get trashed.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/idle_6xx.S')
-rw-r--r-- | arch/powerpc/kernel/idle_6xx.S | 63 |
1 files changed, 21 insertions, 42 deletions
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 12a4efbaa08f..b45fa0e37212 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S | |||
@@ -22,8 +22,6 @@ | |||
22 | #include <asm/ppc_asm.h> | 22 | #include <asm/ppc_asm.h> |
23 | #include <asm/asm-offsets.h> | 23 | #include <asm/asm-offsets.h> |
24 | 24 | ||
25 | #undef DEBUG | ||
26 | |||
27 | .text | 25 | .text |
28 | 26 | ||
29 | /* | 27 | /* |
@@ -109,12 +107,6 @@ BEGIN_FTR_SECTION | |||
109 | dcbf 0,r4 | 107 | dcbf 0,r4 |
110 | dcbf 0,r4 | 108 | dcbf 0,r4 |
111 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | 109 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) |
112 | #ifdef DEBUG | ||
113 | lis r6,nap_enter_count@ha | ||
114 | lwz r4,nap_enter_count@l(r6) | ||
115 | addi r4,r4,1 | ||
116 | stw r4,nap_enter_count@l(r6) | ||
117 | #endif | ||
118 | 2: | 110 | 2: |
119 | BEGIN_FTR_SECTION | 111 | BEGIN_FTR_SECTION |
120 | /* Go to low speed mode on some 750FX */ | 112 | /* Go to low speed mode on some 750FX */ |
@@ -144,48 +136,42 @@ BEGIN_FTR_SECTION | |||
144 | DSSALL | 136 | DSSALL |
145 | sync | 137 | sync |
146 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 138 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
139 | rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
140 | lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ | ||
141 | ori r8,r8,_TLF_NAPPING /* so when we take an exception */ | ||
142 | stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ | ||
147 | mfmsr r7 | 143 | mfmsr r7 |
148 | ori r7,r7,MSR_EE | 144 | ori r7,r7,MSR_EE |
149 | oris r7,r7,MSR_POW@h | 145 | oris r7,r7,MSR_POW@h |
150 | sync | 146 | 1: sync |
151 | isync | ||
152 | mtmsr r7 | 147 | mtmsr r7 |
153 | isync | 148 | isync |
154 | sync | 149 | b 1b |
155 | blr | 150 | |
156 | |||
157 | /* | 151 | /* |
158 | * Return from NAP/DOZE mode, restore some CPU specific registers, | 152 | * Return from NAP/DOZE mode, restore some CPU specific registers, |
159 | * we are called with DR/IR still off and r2 containing physical | 153 | * we are called with DR/IR still off and r2 containing physical |
160 | * address of current. | 154 | * address of current. R11 points to the exception frame (physical |
155 | * address). We have to preserve r10. | ||
161 | */ | 156 | */ |
162 | _GLOBAL(power_save_6xx_restore) | 157 | _GLOBAL(power_save_6xx_restore) |
163 | mfspr r11,SPRN_HID0 | 158 | lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */ |
164 | rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ | 159 | stw r9,_NIP(r11) /* make it do a blr */ |
165 | cror 4*cr1+eq,4*cr0+eq,4*cr0+eq | ||
166 | BEGIN_FTR_SECTION | ||
167 | rlwinm r11,r11,0,9,7 /* Clear DOZE */ | ||
168 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
169 | mtspr SPRN_HID0, r11 | ||
170 | 160 | ||
171 | #ifdef DEBUG | 161 | #ifdef CONFIG_SMP |
172 | beq cr1,1f | 162 | mfspr r12,SPRN_SPRG3 |
173 | lis r11,(nap_return_count-KERNELBASE)@ha | 163 | lwz r11,TI_CPU(r12) /* get cpu number * 4 */ |
174 | lwz r9,nap_return_count@l(r11) | ||
175 | addi r9,r9,1 | ||
176 | stw r9,nap_return_count@l(r11) | ||
177 | 1: | ||
178 | #endif | ||
179 | |||
180 | rlwinm r9,r1,0,0,18 | ||
181 | tophys(r9,r9) | ||
182 | lwz r11,TI_CPU(r9) | ||
183 | slwi r11,r11,2 | 164 | slwi r11,r11,2 |
165 | #else | ||
166 | li r11,0 | ||
167 | #endif | ||
184 | /* Todo make sure all these are in the same page | 168 | /* Todo make sure all these are in the same page |
185 | * and load r22 (@ha part + CPU offset) only once | 169 | * and load r11 (@ha part + CPU offset) only once |
186 | */ | 170 | */ |
187 | BEGIN_FTR_SECTION | 171 | BEGIN_FTR_SECTION |
188 | beq cr1,1f | 172 | mfspr r9,SPRN_HID0 |
173 | andis. r9,r9,HID0_NAP@h | ||
174 | beq 1f | ||
189 | addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha | 175 | addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha |
190 | lwz r9,nap_save_msscr0@l(r9) | 176 | lwz r9,nap_save_msscr0@l(r9) |
191 | mtspr SPRN_MSSCR0, r9 | 177 | mtspr SPRN_MSSCR0, r9 |
@@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1) | |||
210 | 196 | ||
211 | _GLOBAL(powersave_lowspeed) | 197 | _GLOBAL(powersave_lowspeed) |
212 | .long 0 | 198 | .long 0 |
213 | |||
214 | #ifdef DEBUG | ||
215 | _GLOBAL(nap_enter_count) | ||
216 | .space 4 | ||
217 | _GLOBAL(nap_return_count) | ||
218 | .space 4 | ||
219 | #endif | ||