aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S35
-rw-r--r--arch/powerpc/kernel/head_64.S49
-rw-r--r--arch/powerpc/kernel/idle_6xx.S63
-rw-r--r--arch/powerpc/kernel/idle_power4.S10
-rw-r--r--include/asm-powerpc/thread_info.h8
8 files changed, 109 insertions, 65 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2cdc35ce8045..65f67f986156 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -366,6 +366,7 @@ config PPC_PMAC64
366 select U3_DART 366 select U3_DART
367 select MPIC_BROKEN_U3 367 select MPIC_BROKEN_U3
368 select GENERIC_TBSYNC 368 select GENERIC_TBSYNC
369 select PPC_970_NAP
369 default y 370 default y
370 371
371config PPC_PREP 372config PPC_PREP
@@ -383,6 +384,7 @@ config PPC_MAPLE
383 select MPIC_BROKEN_U3 384 select MPIC_BROKEN_U3
384 select GENERIC_TBSYNC 385 select GENERIC_TBSYNC
385 select PPC_UDBG_16550 386 select PPC_UDBG_16550
387 select PPC_970_NAP
386 default n 388 default n
387 help 389 help
388 This option enables support for the Maple 970FX Evaluation Board. 390 This option enables support for the Maple 970FX Evaluation Board.
@@ -457,6 +459,10 @@ config PPC_MPC106
457 bool 459 bool
458 default n 460 default n
459 461
462config PPC_970_NAP
463 bool
464 default n
465
460source "drivers/cpufreq/Kconfig" 466source "drivers/cpufreq/Kconfig"
461 467
462config CPU_FREQ_PMAC 468config CPU_FREQ_PMAC
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0cc0995b81b0..803858e86160 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
20 firmware.o sysfs.o 20 firmware.o sysfs.o
21obj-$(CONFIG_PPC64) += vdso64/ 21obj-$(CONFIG_PPC64) += vdso64/
22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
23obj-$(CONFIG_POWER4) += idle_power4.o 23obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
24obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o 24obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
25procfs-$(CONFIG_PPC64) := proc_ppc64.o 25procfs-$(CONFIG_PPC64) := proc_ppc64.o
26obj-$(CONFIG_PROC_FS) += $(procfs-y) 26obj-$(CONFIG_PROC_FS) += $(procfs-y)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 54b48f330051..8f85c5e8a55a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,6 +91,7 @@ int main(void)
91#endif /* CONFIG_PPC64 */ 91#endif /* CONFIG_PPC64 */
92 92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 95 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 96 DEFINE(TI_TASK, offsetof(struct thread_info, task));
96#ifdef CONFIG_PPC32 97#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index b3a979467225..8866fd26c6b9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -128,37 +128,36 @@ transfer_to_handler:
128 stw r12,4(r11) 128 stw r12,4(r11)
129#endif 129#endif
130 b 3f 130 b 3f
131
1312: /* if from kernel, check interrupted DOZE/NAP mode and 1322: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow 133 * check for stack overflow
133 */ 134 */
135 lwz r9,THREAD_INFO-THREAD(r12)
136 cmplw r1,r9 /* if r1 <= current->thread_info */
137 ble- stack_ovf /* then the kernel stack overflowed */
1385:
134#ifdef CONFIG_6xx 139#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0 140 tophys(r9,r9) /* check local flags */
136 mtcr r11 141 lwz r12,TI_LOCAL_FLAGS(r9)
137BEGIN_FTR_SECTION 142 mtcrf 0x01,r12
138 bt- 8,4f /* Check DOZE */ 143 bt- 31-TLF_NAPPING,4f
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,4f /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */ 144#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont 145 .globl transfer_to_handler_cont
145transfer_to_handler_cont: 146transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493: 1473:
150 mflr r9 148 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */ 149 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */ 150 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11 151 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10 152 mtspr SPRN_SRR1,r10
156 mtlr r9 153 mtlr r9
157 SYNC 154 SYNC
158 RFI /* jump to handler, enable MMU */ 155 RFI /* jump to handler, enable MMU */
159 156
160#ifdef CONFIG_6xx 157#ifdef CONFIG_6xx
1614: b power_save_6xx_restore 1584: rlwinm r12,r12,0,~_TLF_NAPPING
159 stw r12,TI_LOCAL_FLAGS(r9)
160 b power_save_6xx_restore
162#endif 161#endif
163 162
164/* 163/*
@@ -167,10 +166,10 @@ transfer_to_handler_cont:
167 */ 166 */
168stack_ovf: 167stack_ovf:
169 /* sometimes we use a statically-allocated stack, which is OK. */ 168 /* sometimes we use a statically-allocated stack, which is OK. */
170 lis r11,_end@h 169 lis r12,_end@h
171 ori r11,r11,_end@l 170 ori r12,r12,_end@l
172 cmplw r1,r11 171 cmplw r1,r12
173 ble 3b /* r1 <= &_end is OK */ 172 ble 5b /* r1 <= &_end is OK */
174 SAVE_NVGPRS(r11) 173 SAVE_NVGPRS(r11)
175 addi r3,r1,STACK_FRAME_OVERHEAD 174 addi r3,r1,STACK_FRAME_OVERHEAD
176 lis r1,init_thread_union@ha 175 lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index a5ae04a57c78..b7d140430a41 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -376,11 +376,28 @@ label##_common: \
376 bl hdlr; \ 376 bl hdlr; \
377 b .ret_from_except 377 b .ret_from_except
378 378
379/*
380 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
381 * in the idle task and therefore need the special idle handling.
382 */
383#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
384 .align 7; \
385 .globl label##_common; \
386label##_common: \
387 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
388 FINISH_NAP; \
389 DISABLE_INTS; \
390 bl .save_nvgprs; \
391 addi r3,r1,STACK_FRAME_OVERHEAD; \
392 bl hdlr; \
393 b .ret_from_except
394
379#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 395#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
380 .align 7; \ 396 .align 7; \
381 .globl label##_common; \ 397 .globl label##_common; \
382label##_common: \ 398label##_common: \
383 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 399 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
400 FINISH_NAP; \
384 DISABLE_INTS; \ 401 DISABLE_INTS; \
385 bl .ppc64_runlatch_on; \ 402 bl .ppc64_runlatch_on; \
386 addi r3,r1,STACK_FRAME_OVERHEAD; \ 403 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -388,6 +405,25 @@ label##_common: \
388 b .ret_from_except_lite 405 b .ret_from_except_lite
389 406
390/* 407/*
408 * When the idle code in power4_idle puts the CPU into NAP mode,
409 * it has to do so in a loop, and relies on the external interrupt
410 * and decrementer interrupt entry code to get it out of the loop.
411 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
412 * to signal that it is in the loop and needs help to get out.
413 */
414#ifdef CONFIG_PPC_970_NAP
415#define FINISH_NAP \
416BEGIN_FTR_SECTION \
417 clrrdi r11,r1,THREAD_SHIFT; \
418 ld r9,TI_LOCAL_FLAGS(r11); \
419 andi. r10,r9,_TLF_NAPPING; \
420 bnel power4_fixup_nap; \
421END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
422#else
423#define FINISH_NAP
424#endif
425
426/*
391 * Start of pSeries system interrupt routines 427 * Start of pSeries system interrupt routines
392 */ 428 */
393 . = 0x100 429 . = 0x100
@@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked:
772 .globl machine_check_common 808 .globl machine_check_common
773machine_check_common: 809machine_check_common:
774 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 810 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
811 FINISH_NAP
775 DISABLE_INTS 812 DISABLE_INTS
776 bl .save_nvgprs 813 bl .save_nvgprs
777 addi r3,r1,STACK_FRAME_OVERHEAD 814 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -783,7 +820,7 @@ machine_check_common:
783 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 820 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
784 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 821 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
785 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 822 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
786 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) 823 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
787 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 824 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
788#ifdef CONFIG_ALTIVEC 825#ifdef CONFIG_ALTIVEC
789 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 826 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
@@ -1034,6 +1071,7 @@ unrecov_slb:
1034 .globl hardware_interrupt_entry 1071 .globl hardware_interrupt_entry
1035hardware_interrupt_common: 1072hardware_interrupt_common:
1036 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 1073 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1074 FINISH_NAP
1037hardware_interrupt_entry: 1075hardware_interrupt_entry:
1038 DISABLE_INTS 1076 DISABLE_INTS
1039 bl .ppc64_runlatch_on 1077 bl .ppc64_runlatch_on
@@ -1041,6 +1079,15 @@ hardware_interrupt_entry:
1041 bl .do_IRQ 1079 bl .do_IRQ
1042 b .ret_from_except_lite 1080 b .ret_from_except_lite
1043 1081
1082#ifdef CONFIG_PPC_970_NAP
1083power4_fixup_nap:
1084 andc r9,r9,r10
1085 std r9,TI_LOCAL_FLAGS(r11)
1086 ld r10,_LINK(r1) /* make idle task do the */
1087 std r10,_NIP(r1) /* equivalent of a blr */
1088 blr
1089#endif
1090
1044 .align 7 1091 .align 7
1045 .globl alignment_common 1092 .globl alignment_common
1046alignment_common: 1093alignment_common:
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 12a4efbaa08f..b45fa0e37212 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -22,8 +22,6 @@
22#include <asm/ppc_asm.h> 22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24 24
25#undef DEBUG
26
27 .text 25 .text
28 26
29/* 27/*
@@ -109,12 +107,6 @@ BEGIN_FTR_SECTION
109 dcbf 0,r4 107 dcbf 0,r4
110 dcbf 0,r4 108 dcbf 0,r4
111END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) 109END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
112#ifdef DEBUG
113 lis r6,nap_enter_count@ha
114 lwz r4,nap_enter_count@l(r6)
115 addi r4,r4,1
116 stw r4,nap_enter_count@l(r6)
117#endif
1182: 1102:
119BEGIN_FTR_SECTION 111BEGIN_FTR_SECTION
120 /* Go to low speed mode on some 750FX */ 112 /* Go to low speed mode on some 750FX */
@@ -144,48 +136,42 @@ BEGIN_FTR_SECTION
144 DSSALL 136 DSSALL
145 sync 137 sync
146END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 138END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
139 rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
140 lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
141 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
142 stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
147 mfmsr r7 143 mfmsr r7
148 ori r7,r7,MSR_EE 144 ori r7,r7,MSR_EE
149 oris r7,r7,MSR_POW@h 145 oris r7,r7,MSR_POW@h
150 sync 1461: sync
151 isync
152 mtmsr r7 147 mtmsr r7
153 isync 148 isync
154 sync 149 b 1b
155 blr 150
156
157/* 151/*
158 * Return from NAP/DOZE mode, restore some CPU specific registers, 152 * Return from NAP/DOZE mode, restore some CPU specific registers,
159 * we are called with DR/IR still off and r2 containing physical 153 * we are called with DR/IR still off and r2 containing physical
160 * address of current. 154 * address of current. R11 points to the exception frame (physical
155 * address). We have to preserve r10.
161 */ 156 */
162_GLOBAL(power_save_6xx_restore) 157_GLOBAL(power_save_6xx_restore)
163 mfspr r11,SPRN_HID0 158 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
164 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ 159 stw r9,_NIP(r11) /* make it do a blr */
165 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
166BEGIN_FTR_SECTION
167 rlwinm r11,r11,0,9,7 /* Clear DOZE */
168END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
169 mtspr SPRN_HID0, r11
170 160
171#ifdef DEBUG 161#ifdef CONFIG_SMP
172 beq cr1,1f 162 mfspr r12,SPRN_SPRG3
173 lis r11,(nap_return_count-KERNELBASE)@ha 163 lwz r11,TI_CPU(r12) /* get cpu number * 4 */
174 lwz r9,nap_return_count@l(r11)
175 addi r9,r9,1
176 stw r9,nap_return_count@l(r11)
1771:
178#endif
179
180 rlwinm r9,r1,0,0,18
181 tophys(r9,r9)
182 lwz r11,TI_CPU(r9)
183 slwi r11,r11,2 164 slwi r11,r11,2
165#else
166 li r11,0
167#endif
184 /* Todo make sure all these are in the same page 168 /* Todo make sure all these are in the same page
185 * and load r22 (@ha part + CPU offset) only once 169 * and load r11 (@ha part + CPU offset) only once
186 */ 170 */
187BEGIN_FTR_SECTION 171BEGIN_FTR_SECTION
188 beq cr1,1f 172 mfspr r9,SPRN_HID0
173 andis. r9,r9,HID0_NAP@h
174 beq 1f
189 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha 175 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
190 lwz r9,nap_save_msscr0@l(r9) 176 lwz r9,nap_save_msscr0@l(r9)
191 mtspr SPRN_MSSCR0, r9 177 mtspr SPRN_MSSCR0, r9
@@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1)
210 196
211_GLOBAL(powersave_lowspeed) 197_GLOBAL(powersave_lowspeed)
212 .long 0 198 .long 0
213
214#ifdef DEBUG
215_GLOBAL(nap_enter_count)
216 .space 4
217_GLOBAL(nap_return_count)
218 .space 4
219#endif
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 6dad1c02496e..d85c7c938eed 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -35,12 +35,16 @@ BEGIN_FTR_SECTION
35 DSSALL 35 DSSALL
36 sync 36 sync
37END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 37END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
38 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
39 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
40 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
41 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
38 mfmsr r7 42 mfmsr r7
39 ori r7,r7,MSR_EE 43 ori r7,r7,MSR_EE
40 oris r7,r7,MSR_POW@h 44 oris r7,r7,MSR_POW@h
41 sync 451: sync
42 isync 46 isync
43 mtmsrd r7 47 mtmsrd r7
44 isync 48 isync
45 sync 49 b 1b
46 blr 50
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index ffc7462d77ba..88b553c6b26c 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -37,6 +37,8 @@ struct thread_info {
37 int preempt_count; /* 0 => preemptable, 37 int preempt_count; /* 0 => preemptable,
38 <0 => BUG */ 38 <0 => BUG */
39 struct restart_block restart_block; 39 struct restart_block restart_block;
40 unsigned long local_flags; /* private flags for thread */
41
40 /* low level flags - has atomic operations done on it */ 42 /* low level flags - has atomic operations done on it */
41 unsigned long flags ____cacheline_aligned_in_smp; 43 unsigned long flags ____cacheline_aligned_in_smp;
42}; 44};
@@ -143,6 +145,12 @@ static inline struct thread_info *current_thread_info(void)
143 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 145 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
144#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 146#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
145 147
148/* Bits in local_flags */
149/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
150#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
151
152#define _TLF_NAPPING (1 << TLF_NAPPING)
153
146#endif /* __KERNEL__ */ 154#endif /* __KERNEL__ */
147 155
148#endif /* _ASM_POWERPC_THREAD_INFO_H */ 156#endif /* _ASM_POWERPC_THREAD_INFO_H */