diff options
-rw-r--r-- | arch/powerpc/include/asm/exception-64s.h | 34 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 49 | ||||
-rw-r--r-- | arch/powerpc/include/asm/irqflags.h | 37 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/dbell.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 153 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 221 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 150 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_book3e.S | 25 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power4.S | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power7.S | 23 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 204 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/processor_idle.c | 18 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 4 |
19 files changed, 647 insertions, 342 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 70354af0740e..548da3aa0a30 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -232,23 +232,30 @@ label##_hv: \ | |||
232 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 232 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
233 | EXC_HV, KVMTEST, vec) | 233 | EXC_HV, KVMTEST, vec) |
234 | 234 | ||
235 | #define __SOFTEN_TEST(h) \ | 235 | /* This associate vector numbers with bits in paca->irq_happened */ |
236 | #define SOFTEN_VALUE_0x500 PACA_IRQ_EE | ||
237 | #define SOFTEN_VALUE_0x502 PACA_IRQ_EE | ||
238 | #define SOFTEN_VALUE_0x900 PACA_IRQ_DEC | ||
239 | #define SOFTEN_VALUE_0x982 PACA_IRQ_DEC | ||
240 | |||
241 | #define __SOFTEN_TEST(h, vec) \ | ||
236 | lbz r10,PACASOFTIRQEN(r13); \ | 242 | lbz r10,PACASOFTIRQEN(r13); \ |
237 | cmpwi r10,0; \ | 243 | cmpwi r10,0; \ |
244 | li r10,SOFTEN_VALUE_##vec; \ | ||
238 | beq masked_##h##interrupt | 245 | beq masked_##h##interrupt |
239 | #define _SOFTEN_TEST(h) __SOFTEN_TEST(h) | 246 | #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) |
240 | 247 | ||
241 | #define SOFTEN_TEST_PR(vec) \ | 248 | #define SOFTEN_TEST_PR(vec) \ |
242 | KVMTEST_PR(vec); \ | 249 | KVMTEST_PR(vec); \ |
243 | _SOFTEN_TEST(EXC_STD) | 250 | _SOFTEN_TEST(EXC_STD, vec) |
244 | 251 | ||
245 | #define SOFTEN_TEST_HV(vec) \ | 252 | #define SOFTEN_TEST_HV(vec) \ |
246 | KVMTEST(vec); \ | 253 | KVMTEST(vec); \ |
247 | _SOFTEN_TEST(EXC_HV) | 254 | _SOFTEN_TEST(EXC_HV, vec) |
248 | 255 | ||
249 | #define SOFTEN_TEST_HV_201(vec) \ | 256 | #define SOFTEN_TEST_HV_201(vec) \ |
250 | KVMTEST(vec); \ | 257 | KVMTEST(vec); \ |
251 | _SOFTEN_TEST(EXC_STD) | 258 | _SOFTEN_TEST(EXC_STD, vec) |
252 | 259 | ||
253 | #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ | 260 | #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ |
254 | HMT_MEDIUM; \ | 261 | HMT_MEDIUM; \ |
@@ -279,22 +286,7 @@ label##_hv: \ | |||
279 | */ | 286 | */ |
280 | 287 | ||
281 | /* Exception addition: Hard disable interrupts */ | 288 | /* Exception addition: Hard disable interrupts */ |
282 | #ifdef CONFIG_TRACE_IRQFLAGS | 289 | #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) |
283 | #define DISABLE_INTS \ | ||
284 | lbz r10,PACASOFTIRQEN(r13); \ | ||
285 | li r11,0; \ | ||
286 | cmpwi cr0,r10,0; \ | ||
287 | stb r11,PACAHARDIRQEN(r13); \ | ||
288 | beq 44f; \ | ||
289 | stb r11,PACASOFTIRQEN(r13); \ | ||
290 | TRACE_DISABLE_INTS; \ | ||
291 | 44: | ||
292 | #else | ||
293 | #define DISABLE_INTS \ | ||
294 | li r11,0; \ | ||
295 | stb r11,PACASOFTIRQEN(r13); \ | ||
296 | stb r11,PACAHARDIRQEN(r13) | ||
297 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
298 | 290 | ||
299 | /* Exception addition: Keep interrupt state */ | 291 | /* Exception addition: Keep interrupt state */ |
300 | #define ENABLE_INTS \ | 292 | #define ENABLE_INTS \ |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 6c6fa955baa7..51010bfc792e 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -11,6 +11,27 @@ | |||
11 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | 13 | ||
14 | #ifdef CONFIG_PPC64 | ||
15 | |||
16 | /* | ||
17 | * PACA flags in paca->irq_happened. | ||
18 | * | ||
19 | * This bits are set when interrupts occur while soft-disabled | ||
20 | * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS | ||
21 | * is set whenever we manually hard disable. | ||
22 | */ | ||
23 | #define PACA_IRQ_HARD_DIS 0x01 | ||
24 | #define PACA_IRQ_DBELL 0x02 | ||
25 | #define PACA_IRQ_EE 0x04 | ||
26 | #define PACA_IRQ_DEC 0x08 /* Or FIT */ | ||
27 | #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */ | ||
28 | |||
29 | #endif /* CONFIG_PPC64 */ | ||
30 | |||
31 | #ifndef __ASSEMBLY__ | ||
32 | |||
33 | extern void __replay_interrupt(unsigned int vector); | ||
34 | |||
14 | extern void timer_interrupt(struct pt_regs *); | 35 | extern void timer_interrupt(struct pt_regs *); |
15 | 36 | ||
16 | #ifdef CONFIG_PPC64 | 37 | #ifdef CONFIG_PPC64 |
@@ -42,7 +63,6 @@ static inline unsigned long arch_local_irq_disable(void) | |||
42 | } | 63 | } |
43 | 64 | ||
44 | extern void arch_local_irq_restore(unsigned long); | 65 | extern void arch_local_irq_restore(unsigned long); |
45 | extern void iseries_handle_interrupts(void); | ||
46 | 66 | ||
47 | static inline void arch_local_irq_enable(void) | 67 | static inline void arch_local_irq_enable(void) |
48 | { | 68 | { |
@@ -72,12 +92,24 @@ static inline bool arch_irqs_disabled(void) | |||
72 | #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) | 92 | #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) |
73 | #endif | 93 | #endif |
74 | 94 | ||
75 | #define hard_irq_disable() \ | 95 | static inline void hard_irq_disable(void) |
76 | do { \ | 96 | { |
77 | __hard_irq_disable(); \ | 97 | __hard_irq_disable(); |
78 | get_paca()->soft_enabled = 0; \ | 98 | get_paca()->soft_enabled = 0; |
79 | get_paca()->hard_enabled = 0; \ | 99 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; |
80 | } while(0) | 100 | } |
101 | |||
102 | /* | ||
103 | * This is called by asynchronous interrupts to conditionally | ||
104 | * re-enable hard interrupts when soft-disabled after having | ||
105 | * cleared the source of the interrupt | ||
106 | */ | ||
107 | static inline void may_hard_irq_enable(void) | ||
108 | { | ||
109 | get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; | ||
110 | if (!(get_paca()->irq_happened & PACA_IRQ_EE)) | ||
111 | __hard_irq_enable(); | ||
112 | } | ||
81 | 113 | ||
82 | static inline bool arch_irq_disabled_regs(struct pt_regs *regs) | 114 | static inline bool arch_irq_disabled_regs(struct pt_regs *regs) |
83 | { | 115 | { |
@@ -149,6 +181,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) | |||
149 | return !(regs->msr & MSR_EE); | 181 | return !(regs->msr & MSR_EE); |
150 | } | 182 | } |
151 | 183 | ||
184 | static inline void may_hard_irq_enable(void) { } | ||
185 | |||
152 | #endif /* CONFIG_PPC64 */ | 186 | #endif /* CONFIG_PPC64 */ |
153 | 187 | ||
154 | #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST | 188 | #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST |
@@ -159,5 +193,6 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) | |||
159 | */ | 193 | */ |
160 | struct irq_chip; | 194 | struct irq_chip; |
161 | 195 | ||
196 | #endif /* __ASSEMBLY__ */ | ||
162 | #endif /* __KERNEL__ */ | 197 | #endif /* __KERNEL__ */ |
163 | #endif /* _ASM_POWERPC_HW_IRQ_H */ | 198 | #endif /* _ASM_POWERPC_HW_IRQ_H */ |
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index b0b06d85788d..6f9b6e23dc5a 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h | |||
@@ -39,24 +39,31 @@ | |||
39 | #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) | 39 | #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) |
40 | #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) | 40 | #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) |
41 | 41 | ||
42 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ | 42 | /* |
43 | cmpdi en,0; \ | 43 | * This is used by assembly code to soft-disable interrupts |
44 | bne 95f; \ | 44 | */ |
45 | stb en,PACASOFTIRQEN(r13); \ | 45 | #define SOFT_DISABLE_INTS(__rA, __rB) \ |
46 | TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \ | 46 | lbz __rA,PACASOFTIRQEN(r13); \ |
47 | b skip; \ | 47 | lbz __rB,PACAIRQHAPPENED(r13); \ |
48 | 95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \ | 48 | cmpwi cr0,__rA,0; \ |
49 | li en,1; | 49 | li __rA,0; \ |
50 | #define TRACE_AND_RESTORE_IRQ(en) \ | 50 | ori __rB,__rB,PACA_IRQ_HARD_DIS; \ |
51 | TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ | 51 | stb __rB,PACAIRQHAPPENED(r13); \ |
52 | stb en,PACASOFTIRQEN(r13); \ | 52 | beq 44f; \ |
53 | 96: | 53 | stb __rA,PACASOFTIRQEN(r13); \ |
54 | TRACE_DISABLE_INTS; \ | ||
55 | 44: | ||
56 | |||
54 | #else | 57 | #else |
55 | #define TRACE_ENABLE_INTS | 58 | #define TRACE_ENABLE_INTS |
56 | #define TRACE_DISABLE_INTS | 59 | #define TRACE_DISABLE_INTS |
57 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) | 60 | |
58 | #define TRACE_AND_RESTORE_IRQ(en) \ | 61 | #define SOFT_DISABLE_INTS(__rA, __rB) \ |
59 | stb en,PACASOFTIRQEN(r13) | 62 | lbz __rA,PACAIRQHAPPENED(r13); \ |
63 | li __rB,0; \ | ||
64 | ori __rA,__rA,PACA_IRQ_HARD_DIS; \ | ||
65 | stb __rB,PACASOFTIRQEN(r13); \ | ||
66 | stb __rA,PACAIRQHAPPENED(r13) | ||
60 | #endif | 67 | #endif |
61 | #endif | 68 | #endif |
62 | 69 | ||
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 269c05a36d91..daf813fea91f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -132,7 +132,7 @@ struct paca_struct { | |||
132 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 132 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
133 | u16 trap_save; /* Used when bad stack is encountered */ | 133 | u16 trap_save; /* Used when bad stack is encountered */ |
134 | u8 soft_enabled; /* irq soft-enable flag */ | 134 | u8 soft_enabled; /* irq soft-enable flag */ |
135 | u8 hard_enabled; /* set if irqs are enabled in MSR */ | 135 | u8 irq_happened; /* irq happened while soft-disabled */ |
136 | u8 io_sync; /* writel() needs spin_unlock sync */ | 136 | u8 io_sync; /* writel() needs spin_unlock sync */ |
137 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ | 137 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ |
138 | u8 nap_state_lost; /* NV GPR values lost in power7_idle */ | 138 | u8 nap_state_lost; /* NV GPR values lost in power7_idle */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 04caee7d9bc1..cdd0d264415f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -147,7 +147,7 @@ int main(void) | |||
147 | DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); | 147 | DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); |
148 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | 148 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); |
149 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 149 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
150 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 150 | DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); |
151 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 151 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
152 | #ifdef CONFIG_PPC_MM_SLICES | 152 | #ifdef CONFIG_PPC_MM_SLICES |
153 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, | 153 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 2cc451aaaca7..5b25c8060fd6 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs) | |||
37 | 37 | ||
38 | irq_enter(); | 38 | irq_enter(); |
39 | 39 | ||
40 | may_hard_irq_enable(); | ||
41 | |||
40 | smp_ipi_demux(); | 42 | smp_ipi_demux(); |
41 | 43 | ||
42 | irq_exit(); | 44 | irq_exit(); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c513beb78b3b..f8a7a1a1a9f4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/irqflags.h> | 33 | #include <asm/irqflags.h> |
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/hw_irq.h> | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * System calls. | 38 | * System calls. |
@@ -583,18 +584,72 @@ _GLOBAL(ret_from_except_lite) | |||
583 | bne do_work | 584 | bne do_work |
584 | #endif /* !CONFIG_PREEMPT */ | 585 | #endif /* !CONFIG_PREEMPT */ |
585 | 586 | ||
587 | .globl fast_exc_return_irq | ||
588 | fast_exc_return_irq: | ||
586 | restore: | 589 | restore: |
590 | /* | ||
591 | * This is the main kernel exit path, we first check if we | ||
592 | * have to change our interrupt state. | ||
593 | */ | ||
587 | ld r5,SOFTE(r1) | 594 | ld r5,SOFTE(r1) |
588 | TRACE_AND_RESTORE_IRQ(r5); | 595 | lbz r6,PACASOFTIRQEN(r13) |
596 | cmpwi cr1,r5,0 | ||
597 | cmpw cr0,r5,r6 | ||
598 | beq cr0,4f | ||
599 | |||
600 | /* We do, handle disable first, which is easy */ | ||
601 | bne cr1,3f; | ||
602 | li r0,0 | ||
603 | stb r0,PACASOFTIRQEN(r13); | ||
604 | TRACE_DISABLE_INTS | ||
605 | b 4f | ||
589 | 606 | ||
590 | /* extract EE bit and use it to restore paca->hard_enabled */ | 607 | 3: /* |
591 | ld r3,_MSR(r1) | 608 | * We are about to soft-enable interrupts (we are hard disabled |
592 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 609 | * at this point). We check if there's anything that needs to |
593 | stb r4,PACAHARDIRQEN(r13) | 610 | * be replayed first. |
611 | */ | ||
612 | lbz r0,PACAIRQHAPPENED(r13) | ||
613 | cmpwi cr0,r0,0 | ||
614 | bne- restore_check_irq_replay | ||
615 | |||
616 | /* | ||
617 | * Get here when nothing happened while soft-disabled, just | ||
618 | * soft-enable and move-on. We will hard-enable as a side | ||
619 | * effect of rfi | ||
620 | */ | ||
621 | restore_no_replay: | ||
622 | TRACE_ENABLE_INTS | ||
623 | li r0,1 | ||
624 | stb r0,PACASOFTIRQEN(r13); | ||
594 | 625 | ||
626 | /* | ||
627 | * Final return path. BookE is handled in a different file | ||
628 | */ | ||
629 | 4: | ||
595 | #ifdef CONFIG_PPC_BOOK3E | 630 | #ifdef CONFIG_PPC_BOOK3E |
596 | b .exception_return_book3e | 631 | b .exception_return_book3e |
597 | #else | 632 | #else |
633 | /* | ||
634 | * Clear the reservation. If we know the CPU tracks the address of | ||
635 | * the reservation then we can potentially save some cycles and use | ||
636 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
637 | */ | ||
638 | BEGIN_FTR_SECTION | ||
639 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
640 | FTR_SECTION_ELSE | ||
641 | ldarx r4,0,r1 | ||
642 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
643 | |||
644 | /* | ||
645 | * Some code path such as load_up_fpu or altivec return directly | ||
646 | * here. They run entirely hard disabled and do not alter the | ||
647 | * interrupt state. They also don't use lwarx/stwcx. and thus | ||
648 | * are known not to leave dangling reservations. | ||
649 | */ | ||
650 | .globl fast_exception_return | ||
651 | fast_exception_return: | ||
652 | ld r3,_MSR(r1) | ||
598 | ld r4,_CTR(r1) | 653 | ld r4,_CTR(r1) |
599 | ld r0,_LINK(r1) | 654 | ld r0,_LINK(r1) |
600 | mtctr r4 | 655 | mtctr r4 |
@@ -608,17 +663,6 @@ restore: | |||
608 | beq- unrecov_restore | 663 | beq- unrecov_restore |
609 | 664 | ||
610 | /* | 665 | /* |
611 | * Clear the reservation. If we know the CPU tracks the address of | ||
612 | * the reservation then we can potentially save some cycles and use | ||
613 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
614 | */ | ||
615 | BEGIN_FTR_SECTION | ||
616 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
617 | FTR_SECTION_ELSE | ||
618 | ldarx r4,0,r1 | ||
619 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
620 | |||
621 | /* | ||
622 | * Clear RI before restoring r13. If we are returning to | 666 | * Clear RI before restoring r13. If we are returning to |
623 | * userspace and we take an exception after restoring r13, | 667 | * userspace and we take an exception after restoring r13, |
624 | * we end up corrupting the userspace r13 value. | 668 | * we end up corrupting the userspace r13 value. |
@@ -629,7 +673,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
629 | 673 | ||
630 | /* | 674 | /* |
631 | * r13 is our per cpu area, only restore it if we are returning to | 675 | * r13 is our per cpu area, only restore it if we are returning to |
632 | * userspace | 676 | * userspace the value stored in the stack frame may belong to |
677 | * another CPU. | ||
633 | */ | 678 | */ |
634 | andi. r0,r3,MSR_PR | 679 | andi. r0,r3,MSR_PR |
635 | beq 1f | 680 | beq 1f |
@@ -654,6 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
654 | 699 | ||
655 | #endif /* CONFIG_PPC_BOOK3E */ | 700 | #endif /* CONFIG_PPC_BOOK3E */ |
656 | 701 | ||
702 | /* | ||
703 | * Something did happen, check if a re-emit is needed | ||
704 | * (this also clears paca->irq_happened) | ||
705 | */ | ||
706 | restore_check_irq_replay: | ||
707 | /* XXX: We could implement a fast path here where we check | ||
708 | * for irq_happened being just 0x01, in which case we can | ||
709 | * clear it and return. That means that we would potentially | ||
710 | * miss a decrementer having wrapped all the way around. | ||
711 | * | ||
712 | * Still, this might be useful for things like hash_page | ||
713 | */ | ||
714 | bl .__check_irq_replay | ||
715 | cmpwi cr0,r3,0 | ||
716 | beq restore_no_replay | ||
717 | |||
718 | /* | ||
719 | * We need to re-emit an interrupt. We do so by re-using our | ||
720 | * existing exception frame. We first change the trap value, | ||
721 | * but we need to ensure we preserve the low nibble of it | ||
722 | */ | ||
723 | ld r4,_TRAP(r1) | ||
724 | clrldi r4,r4,60 | ||
725 | or r4,r4,r3 | ||
726 | std r4,_TRAP(r1) | ||
727 | |||
728 | /* | ||
729 | * Then find the right handler and call it. Interrupts are | ||
730 | * still soft-disabled and we keep them that way. | ||
731 | */ | ||
732 | cmpwi cr0,r3,0x500 | ||
733 | bne 1f | ||
734 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
735 | bl .do_IRQ | ||
736 | b .ret_from_except | ||
737 | 1: cmpwi cr0,r3,0x900 | ||
738 | bne 1f | ||
739 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
740 | bl .timer_interrupt | ||
741 | b .ret_from_except | ||
742 | #ifdef CONFIG_PPC_BOOK3E | ||
743 | 1: cmpwi cr0,r3,0x280 | ||
744 | bne 1f | ||
745 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
746 | bl .doorbell_exception | ||
747 | b .ret_from_except | ||
748 | #endif /* CONFIG_PPC_BOOK3E */ | ||
749 | 1: b .ret_from_except /* What else to do here ? */ | ||
750 | |||
657 | do_work: | 751 | do_work: |
658 | #ifdef CONFIG_PREEMPT | 752 | #ifdef CONFIG_PREEMPT |
659 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | 753 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
@@ -666,18 +760,11 @@ do_work: | |||
666 | crandc eq,cr1*4+eq,eq | 760 | crandc eq,cr1*4+eq,eq |
667 | bne restore | 761 | bne restore |
668 | 762 | ||
669 | /* Here we are preempting the current task. | 763 | /* |
670 | * | 764 | * Here we are preempting the current task. We want to make |
671 | * Ensure interrupts are soft-disabled. We also properly mark | 765 | * sure we are soft-disabled first |
672 | * the PACA to reflect the fact that they are hard-disabled | ||
673 | * and trace the change | ||
674 | */ | 766 | */ |
675 | li r0,0 | 767 | SOFT_DISABLE_INTS(r3,r4) |
676 | stb r0,PACASOFTIRQEN(r13) | ||
677 | stb r0,PACAHARDIRQEN(r13) | ||
678 | TRACE_DISABLE_INTS | ||
679 | |||
680 | /* Call the scheduler with soft IRQs off */ | ||
681 | 1: bl .preempt_schedule_irq | 768 | 1: bl .preempt_schedule_irq |
682 | 769 | ||
683 | /* Hard-disable interrupts again (and update PACA) */ | 770 | /* Hard-disable interrupts again (and update PACA) */ |
@@ -687,8 +774,8 @@ do_work: | |||
687 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ | 774 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ |
688 | mtmsrd r10,1 | 775 | mtmsrd r10,1 |
689 | #endif /* CONFIG_PPC_BOOK3E */ | 776 | #endif /* CONFIG_PPC_BOOK3E */ |
690 | li r0,0 | 777 | li r0,PACA_IRQ_HARD_DIS |
691 | stb r0,PACAHARDIRQEN(r13) | 778 | stb r0,PACAIRQHAPPENED(r13) |
692 | 779 | ||
693 | /* Re-test flags and eventually loop */ | 780 | /* Re-test flags and eventually loop */ |
694 | clrrdi r9,r1,THREAD_SHIFT | 781 | clrrdi r9,r1,THREAD_SHIFT |
@@ -710,14 +797,12 @@ user_work: | |||
710 | 797 | ||
711 | andi. r0,r4,_TIF_NEED_RESCHED | 798 | andi. r0,r4,_TIF_NEED_RESCHED |
712 | beq 1f | 799 | beq 1f |
713 | li r5,1 | 800 | bl .restore_interrupts |
714 | TRACE_AND_RESTORE_IRQ(r5); | ||
715 | bl .schedule | 801 | bl .schedule |
716 | b .ret_from_except_lite | 802 | b .ret_from_except_lite |
717 | 803 | ||
718 | 1: bl .save_nvgprs | 804 | 1: bl .save_nvgprs |
719 | li r5,1 | 805 | bl .restore_interrupts |
720 | TRACE_AND_RESTORE_IRQ(r5); | ||
721 | addi r3,r1,STACK_FRAME_OVERHEAD | 806 | addi r3,r1,STACK_FRAME_OVERHEAD |
722 | bl .do_notify_resume | 807 | bl .do_notify_resume |
723 | b .ret_from_except | 808 | b .ret_from_except |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c4c34665c221..7215cc2495df 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/ptrace.h> | 24 | #include <asm/ptrace.h> |
25 | #include <asm/ppc-opcode.h> | 25 | #include <asm/ppc-opcode.h> |
26 | #include <asm/mmu.h> | 26 | #include <asm/mmu.h> |
27 | #include <asm/hw_irq.h> | ||
27 | 28 | ||
28 | /* XXX This will ultimately add space for a special exception save | 29 | /* XXX This will ultimately add space for a special exception save |
29 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... | 30 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... |
@@ -77,59 +78,55 @@ | |||
77 | #define SPRN_MC_SRR1 SPRN_MCSRR1 | 78 | #define SPRN_MC_SRR1 SPRN_MCSRR1 |
78 | 79 | ||
79 | #define NORMAL_EXCEPTION_PROLOG(n, addition) \ | 80 | #define NORMAL_EXCEPTION_PROLOG(n, addition) \ |
80 | EXCEPTION_PROLOG(n, GEN, addition##_GEN) | 81 | EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) |
81 | 82 | ||
82 | #define CRIT_EXCEPTION_PROLOG(n, addition) \ | 83 | #define CRIT_EXCEPTION_PROLOG(n, addition) \ |
83 | EXCEPTION_PROLOG(n, CRIT, addition##_CRIT) | 84 | EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) |
84 | 85 | ||
85 | #define DBG_EXCEPTION_PROLOG(n, addition) \ | 86 | #define DBG_EXCEPTION_PROLOG(n, addition) \ |
86 | EXCEPTION_PROLOG(n, DBG, addition##_DBG) | 87 | EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) |
87 | 88 | ||
88 | #define MC_EXCEPTION_PROLOG(n, addition) \ | 89 | #define MC_EXCEPTION_PROLOG(n, addition) \ |
89 | EXCEPTION_PROLOG(n, MC, addition##_MC) | 90 | EXCEPTION_PROLOG(n, MC, addition##_MC(n)) |
90 | 91 | ||
91 | 92 | ||
92 | /* Variants of the "addition" argument for the prolog | 93 | /* Variants of the "addition" argument for the prolog |
93 | */ | 94 | */ |
94 | #define PROLOG_ADDITION_NONE_GEN | 95 | #define PROLOG_ADDITION_NONE_GEN(n) |
95 | #define PROLOG_ADDITION_NONE_CRIT | 96 | #define PROLOG_ADDITION_NONE_CRIT(n) |
96 | #define PROLOG_ADDITION_NONE_DBG | 97 | #define PROLOG_ADDITION_NONE_DBG(n) |
97 | #define PROLOG_ADDITION_NONE_MC | 98 | #define PROLOG_ADDITION_NONE_MC(n) |
98 | 99 | ||
99 | #define PROLOG_ADDITION_MASKABLE_GEN \ | 100 | #define PROLOG_ADDITION_MASKABLE_GEN(n) \ |
100 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ | 101 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ |
101 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ | 102 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ |
102 | beq masked_interrupt_book3e; | 103 | beq masked_interrupt_book3e_##n |
103 | 104 | ||
104 | #define PROLOG_ADDITION_2REGS_GEN \ | 105 | #define PROLOG_ADDITION_2REGS_GEN(n) \ |
105 | std r14,PACA_EXGEN+EX_R14(r13); \ | 106 | std r14,PACA_EXGEN+EX_R14(r13); \ |
106 | std r15,PACA_EXGEN+EX_R15(r13) | 107 | std r15,PACA_EXGEN+EX_R15(r13) |
107 | 108 | ||
108 | #define PROLOG_ADDITION_1REG_GEN \ | 109 | #define PROLOG_ADDITION_1REG_GEN(n) \ |
109 | std r14,PACA_EXGEN+EX_R14(r13); | 110 | std r14,PACA_EXGEN+EX_R14(r13); |
110 | 111 | ||
111 | #define PROLOG_ADDITION_2REGS_CRIT \ | 112 | #define PROLOG_ADDITION_2REGS_CRIT(n) \ |
112 | std r14,PACA_EXCRIT+EX_R14(r13); \ | 113 | std r14,PACA_EXCRIT+EX_R14(r13); \ |
113 | std r15,PACA_EXCRIT+EX_R15(r13) | 114 | std r15,PACA_EXCRIT+EX_R15(r13) |
114 | 115 | ||
115 | #define PROLOG_ADDITION_2REGS_DBG \ | 116 | #define PROLOG_ADDITION_2REGS_DBG(n) \ |
116 | std r14,PACA_EXDBG+EX_R14(r13); \ | 117 | std r14,PACA_EXDBG+EX_R14(r13); \ |
117 | std r15,PACA_EXDBG+EX_R15(r13) | 118 | std r15,PACA_EXDBG+EX_R15(r13) |
118 | 119 | ||
119 | #define PROLOG_ADDITION_2REGS_MC \ | 120 | #define PROLOG_ADDITION_2REGS_MC(n) \ |
120 | std r14,PACA_EXMC+EX_R14(r13); \ | 121 | std r14,PACA_EXMC+EX_R14(r13); \ |
121 | std r15,PACA_EXMC+EX_R15(r13) | 122 | std r15,PACA_EXMC+EX_R15(r13) |
122 | 123 | ||
123 | #define PROLOG_ADDITION_DOORBELL_GEN \ | ||
124 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ | ||
125 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ | ||
126 | beq masked_doorbell_book3e | ||
127 | |||
128 | 124 | ||
129 | /* Core exception code for all exceptions except TLB misses. | 125 | /* Core exception code for all exceptions except TLB misses. |
130 | * XXX: Needs to make SPRN_SPRG_GEN depend on exception type | 126 | * XXX: Needs to make SPRN_SPRG_GEN depend on exception type |
131 | */ | 127 | */ |
132 | #define EXCEPTION_COMMON(n, excf, ints) \ | 128 | #define EXCEPTION_COMMON(n, excf, ints) \ |
129 | exc_##n##_common: \ | ||
133 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | 130 | std r0,GPR0(r1); /* save r0 in stackframe */ \ |
134 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | 131 | std r2,GPR2(r1); /* save r2 in stackframe */ \ |
135 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | 132 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ |
@@ -167,20 +164,21 @@ | |||
167 | std r0,RESULT(r1); /* clear regs->result */ \ | 164 | std r0,RESULT(r1); /* clear regs->result */ \ |
168 | ints; | 165 | ints; |
169 | 166 | ||
170 | /* Variants for the "ints" argument */ | 167 | /* Variants for the "ints" argument. This one does nothing when we want |
168 | * to keep interrupts in their original state | ||
169 | */ | ||
171 | #define INTS_KEEP | 170 | #define INTS_KEEP |
172 | #define INTS_DISABLE_SOFT \ | 171 | |
173 | stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \ | 172 | /* This second version is meant for exceptions that don't immediately |
174 | TRACE_DISABLE_INTS; | 173 | * hard-enable. We set a bit in paca->irq_happened to ensure that |
175 | #define INTS_DISABLE_HARD \ | 174 | * a subsequent call to arch_local_irq_restore() will properly |
176 | stb r0,PACAHARDIRQEN(r13); /* and hard disabled */ | 175 | * hard-enable and avoid the fast-path |
177 | #define INTS_DISABLE_ALL \ | 176 | */ |
178 | INTS_DISABLE_SOFT \ | 177 | #define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4) |
179 | INTS_DISABLE_HARD | 178 | |
180 | 179 | /* This is called by exceptions that used INTS_KEEP (that did not touch | |
181 | /* This is called by exceptions that used INTS_KEEP (that is did not clear | 180 | * irq indicators in the PACA). This will restore MSR:EE to it's previous |
182 | * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE | 181 | * value |
183 | * to it's previous value | ||
184 | * | 182 | * |
185 | * XXX In the long run, we may want to open-code it in order to separate the | 183 | * XXX In the long run, we may want to open-code it in order to separate the |
186 | * load from the wrtee, thus limiting the latency caused by the dependency | 184 | * load from the wrtee, thus limiting the latency caused by the dependency |
@@ -238,7 +236,7 @@ exc_##n##_bad_stack: \ | |||
238 | #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ | 236 | #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ |
239 | START_EXCEPTION(label); \ | 237 | START_EXCEPTION(label); \ |
240 | NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ | 238 | NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ |
241 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \ | 239 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ |
242 | ack(r8); \ | 240 | ack(r8); \ |
243 | CHECK_NAPPING(); \ | 241 | CHECK_NAPPING(); \ |
244 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 242 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
@@ -289,7 +287,7 @@ interrupt_end_book3e: | |||
289 | /* Critical Input Interrupt */ | 287 | /* Critical Input Interrupt */ |
290 | START_EXCEPTION(critical_input); | 288 | START_EXCEPTION(critical_input); |
291 | CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) | 289 | CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) |
292 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL) | 290 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) |
293 | // bl special_reg_save_crit | 291 | // bl special_reg_save_crit |
294 | // CHECK_NAPPING(); | 292 | // CHECK_NAPPING(); |
295 | // addi r3,r1,STACK_FRAME_OVERHEAD | 293 | // addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -300,7 +298,7 @@ interrupt_end_book3e: | |||
300 | /* Machine Check Interrupt */ | 298 | /* Machine Check Interrupt */ |
301 | START_EXCEPTION(machine_check); | 299 | START_EXCEPTION(machine_check); |
302 | CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) | 300 | CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) |
303 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL) | 301 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) |
304 | // bl special_reg_save_mc | 302 | // bl special_reg_save_mc |
305 | // addi r3,r1,STACK_FRAME_OVERHEAD | 303 | // addi r3,r1,STACK_FRAME_OVERHEAD |
306 | // CHECK_NAPPING(); | 304 | // CHECK_NAPPING(); |
@@ -313,7 +311,7 @@ interrupt_end_book3e: | |||
313 | NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) | 311 | NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) |
314 | mfspr r14,SPRN_DEAR | 312 | mfspr r14,SPRN_DEAR |
315 | mfspr r15,SPRN_ESR | 313 | mfspr r15,SPRN_ESR |
316 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE_ALL) | 314 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) |
317 | b storage_fault_common | 315 | b storage_fault_common |
318 | 316 | ||
319 | /* Instruction Storage Interrupt */ | 317 | /* Instruction Storage Interrupt */ |
@@ -321,7 +319,7 @@ interrupt_end_book3e: | |||
321 | NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) | 319 | NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) |
322 | li r15,0 | 320 | li r15,0 |
323 | mr r14,r10 | 321 | mr r14,r10 |
324 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE_ALL) | 322 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) |
325 | b storage_fault_common | 323 | b storage_fault_common |
326 | 324 | ||
327 | /* External Input Interrupt */ | 325 | /* External Input Interrupt */ |
@@ -339,12 +337,11 @@ interrupt_end_book3e: | |||
339 | START_EXCEPTION(program); | 337 | START_EXCEPTION(program); |
340 | NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) | 338 | NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) |
341 | mfspr r14,SPRN_ESR | 339 | mfspr r14,SPRN_ESR |
342 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT) | 340 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) |
343 | std r14,_DSISR(r1) | 341 | std r14,_DSISR(r1) |
344 | addi r3,r1,STACK_FRAME_OVERHEAD | 342 | addi r3,r1,STACK_FRAME_OVERHEAD |
345 | ld r14,PACA_EXGEN+EX_R14(r13) | 343 | ld r14,PACA_EXGEN+EX_R14(r13) |
346 | bl .save_nvgprs | 344 | bl .save_nvgprs |
347 | INTS_RESTORE_HARD | ||
348 | bl .program_check_exception | 345 | bl .program_check_exception |
349 | b .ret_from_except | 346 | b .ret_from_except |
350 | 347 | ||
@@ -358,7 +355,7 @@ interrupt_end_book3e: | |||
358 | beq- 1f | 355 | beq- 1f |
359 | bl .load_up_fpu | 356 | bl .load_up_fpu |
360 | b fast_exception_return | 357 | b fast_exception_return |
361 | 1: INTS_DISABLE_ALL | 358 | 1: INTS_DISABLE |
362 | bl .save_nvgprs | 359 | bl .save_nvgprs |
363 | addi r3,r1,STACK_FRAME_OVERHEAD | 360 | addi r3,r1,STACK_FRAME_OVERHEAD |
364 | bl .kernel_fp_unavailable_exception | 361 | bl .kernel_fp_unavailable_exception |
@@ -373,7 +370,7 @@ interrupt_end_book3e: | |||
373 | /* Watchdog Timer Interrupt */ | 370 | /* Watchdog Timer Interrupt */ |
374 | START_EXCEPTION(watchdog); | 371 | START_EXCEPTION(watchdog); |
375 | CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) | 372 | CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) |
376 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL) | 373 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) |
377 | // bl special_reg_save_crit | 374 | // bl special_reg_save_crit |
378 | // CHECK_NAPPING(); | 375 | // CHECK_NAPPING(); |
379 | // addi r3,r1,STACK_FRAME_OVERHEAD | 376 | // addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -392,7 +389,7 @@ interrupt_end_book3e: | |||
392 | /* Auxiliary Processor Unavailable Interrupt */ | 389 | /* Auxiliary Processor Unavailable Interrupt */ |
393 | START_EXCEPTION(ap_unavailable); | 390 | START_EXCEPTION(ap_unavailable); |
394 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | 391 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) |
395 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE_ALL) | 392 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) |
396 | bl .save_nvgprs | 393 | bl .save_nvgprs |
397 | addi r3,r1,STACK_FRAME_OVERHEAD | 394 | addi r3,r1,STACK_FRAME_OVERHEAD |
398 | bl .unknown_exception | 395 | bl .unknown_exception |
@@ -450,7 +447,7 @@ interrupt_end_book3e: | |||
450 | mfspr r15,SPRN_SPRG_CRIT_SCRATCH | 447 | mfspr r15,SPRN_SPRG_CRIT_SCRATCH |
451 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | 448 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 |
452 | mfspr r14,SPRN_DBSR | 449 | mfspr r14,SPRN_DBSR |
453 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL) | 450 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) |
454 | std r14,_DSISR(r1) | 451 | std r14,_DSISR(r1) |
455 | addi r3,r1,STACK_FRAME_OVERHEAD | 452 | addi r3,r1,STACK_FRAME_OVERHEAD |
456 | mr r4,r14 | 453 | mr r4,r14 |
@@ -465,7 +462,7 @@ kernel_dbg_exc: | |||
465 | 462 | ||
466 | /* Debug exception as a debug interrupt*/ | 463 | /* Debug exception as a debug interrupt*/ |
467 | START_EXCEPTION(debug_debug); | 464 | START_EXCEPTION(debug_debug); |
468 | DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | 465 | DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) |
469 | 466 | ||
470 | /* | 467 | /* |
471 | * If there is a single step or branch-taken exception in an | 468 | * If there is a single step or branch-taken exception in an |
@@ -515,7 +512,7 @@ kernel_dbg_exc: | |||
515 | mfspr r15,SPRN_SPRG_DBG_SCRATCH | 512 | mfspr r15,SPRN_SPRG_DBG_SCRATCH |
516 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | 513 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 |
517 | mfspr r14,SPRN_DBSR | 514 | mfspr r14,SPRN_DBSR |
518 | EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) | 515 | EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE) |
519 | std r14,_DSISR(r1) | 516 | std r14,_DSISR(r1) |
520 | addi r3,r1,STACK_FRAME_OVERHEAD | 517 | addi r3,r1,STACK_FRAME_OVERHEAD |
521 | mr r4,r14 | 518 | mr r4,r14 |
@@ -525,21 +522,20 @@ kernel_dbg_exc: | |||
525 | bl .DebugException | 522 | bl .DebugException |
526 | b .ret_from_except | 523 | b .ret_from_except |
527 | 524 | ||
528 | MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) | 525 | START_EXCEPTION(perfmon); |
529 | 526 | NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) | |
530 | /* Doorbell interrupt */ | 527 | EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) |
531 | START_EXCEPTION(doorbell) | ||
532 | NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL) | ||
533 | EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL) | ||
534 | CHECK_NAPPING() | ||
535 | addi r3,r1,STACK_FRAME_OVERHEAD | 528 | addi r3,r1,STACK_FRAME_OVERHEAD |
536 | bl .doorbell_exception | 529 | bl .performance_monitor_exception |
537 | b .ret_from_except_lite | 530 | b .ret_from_except_lite |
538 | 531 | ||
532 | /* Doorbell interrupt */ | ||
533 | MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) | ||
534 | |||
539 | /* Doorbell critical Interrupt */ | 535 | /* Doorbell critical Interrupt */ |
540 | START_EXCEPTION(doorbell_crit); | 536 | START_EXCEPTION(doorbell_crit); |
541 | CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE) | 537 | CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) |
542 | // EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL) | 538 | // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) |
543 | // bl special_reg_save_crit | 539 | // bl special_reg_save_crit |
544 | // CHECK_NAPPING(); | 540 | // CHECK_NAPPING(); |
545 | // addi r3,r1,STACK_FRAME_OVERHEAD | 541 | // addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -547,36 +543,114 @@ kernel_dbg_exc: | |||
547 | // b ret_from_crit_except | 543 | // b ret_from_crit_except |
548 | b . | 544 | b . |
549 | 545 | ||
546 | /* Guest Doorbell */ | ||
550 | MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) | 547 | MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) |
551 | MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE) | ||
552 | MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE) | ||
553 | MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE) | ||
554 | 548 | ||
549 | /* Guest Doorbell critical Interrupt */ | ||
550 | START_EXCEPTION(guest_doorbell_crit); | ||
551 | CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) | ||
552 | // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) | ||
553 | // bl special_reg_save_crit | ||
554 | // CHECK_NAPPING(); | ||
555 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
556 | // bl .guest_doorbell_critical_exception | ||
557 | // b ret_from_crit_except | ||
558 | b . | ||
559 | |||
560 | /* Hypervisor call */ | ||
561 | START_EXCEPTION(hypercall); | ||
562 | NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) | ||
563 | EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) | ||
564 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
565 | bl .save_nvgprs | ||
566 | INTS_RESTORE_HARD | ||
567 | bl .unknown_exception | ||
568 | b .ret_from_except | ||
569 | |||
570 | /* Embedded Hypervisor priviledged */ | ||
571 | START_EXCEPTION(ehpriv); | ||
572 | NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) | ||
573 | EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) | ||
574 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
575 | bl .save_nvgprs | ||
576 | INTS_RESTORE_HARD | ||
577 | bl .unknown_exception | ||
578 | b .ret_from_except | ||
555 | 579 | ||
556 | /* | 580 | /* |
557 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 581 | * An interrupt came in while soft-disabled; We mark paca->irq_happened |
558 | * clear paca->hard_enabled and return. | 582 | * accordingly and if the interrupt is level sensitive, we hard disable |
559 | */ | 583 | */ |
560 | masked_doorbell_book3e: | ||
561 | mtcr r10 | ||
562 | /* Resend the doorbell to fire again when ints enabled */ | ||
563 | mfspr r10,SPRN_PIR | ||
564 | PPC_MSGSND(r10) | ||
565 | b masked_interrupt_book3e_common | ||
566 | 584 | ||
567 | masked_interrupt_book3e: | 585 | masked_interrupt_book3e_0x500: |
586 | /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ | ||
587 | li r11,PACA_IRQ_EE | ||
588 | b masked_interrupt_book3e_full_mask | ||
589 | |||
590 | masked_interrupt_book3e_0x900: | ||
591 | ACK_DEC(r11); | ||
592 | li r11,PACA_IRQ_DEC | ||
593 | b masked_interrupt_book3e_no_mask | ||
594 | masked_interrupt_book3e_0x980: | ||
595 | ACK_FIT(r11); | ||
596 | li r11,PACA_IRQ_DEC | ||
597 | b masked_interrupt_book3e_no_mask | ||
598 | masked_interrupt_book3e_0x280: | ||
599 | masked_interrupt_book3e_0x2c0: | ||
600 | li r11,PACA_IRQ_DBELL | ||
601 | b masked_interrupt_book3e_no_mask | ||
602 | |||
603 | masked_interrupt_book3e_no_mask: | ||
568 | mtcr r10 | 604 | mtcr r10 |
569 | masked_interrupt_book3e_common: | 605 | lbz r10,PACAIRQHAPPENED(r13) |
570 | stb r11,PACAHARDIRQEN(r13) | 606 | or r10,r10,r11 |
607 | stb r10,PACAIRQHAPPENED(r13) | ||
608 | b 1f | ||
609 | masked_interrupt_book3e_full_mask: | ||
610 | mtcr r10 | ||
611 | lbz r10,PACAIRQHAPPENED(r13) | ||
612 | or r10,r10,r11 | ||
613 | stb r10,PACAIRQHAPPENED(r13) | ||
571 | mfspr r10,SPRN_SRR1 | 614 | mfspr r10,SPRN_SRR1 |
572 | rldicl r11,r10,48,1 /* clear MSR_EE */ | 615 | rldicl r11,r10,48,1 /* clear MSR_EE */ |
573 | rotldi r10,r11,16 | 616 | rotldi r10,r11,16 |
574 | mtspr SPRN_SRR1,r10 | 617 | mtspr SPRN_SRR1,r10 |
575 | ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */ | 618 | 1: ld r10,PACA_EXGEN+EX_R10(r13); |
576 | ld r11,PACA_EXGEN+EX_R11(r13); | 619 | ld r11,PACA_EXGEN+EX_R11(r13); |
577 | mfspr r13,SPRN_SPRG_GEN_SCRATCH; | 620 | mfspr r13,SPRN_SPRG_GEN_SCRATCH; |
578 | rfi | 621 | rfi |
579 | b . | 622 | b . |
623 | /* | ||
624 | * Called from arch_local_irq_enable when an interrupt needs | ||
625 | * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 | ||
626 | * to indicate the kind of interrupt. MSR:EE is already off. | ||
627 | * We generate a stackframe like if a real interrupt had happened. | ||
628 | * | ||
629 | * Note: While MSR:EE is off, we need to make sure that _MSR | ||
630 | * in the generated frame has EE set to 1 or the exception | ||
631 | * handler will not properly re-enable them. | ||
632 | */ | ||
633 | _GLOBAL(__replay_interrupt) | ||
634 | /* We are going to jump to the exception common code which | ||
635 | * will retrieve various register values from the PACA which | ||
636 | * we don't give a damn about. | ||
637 | */ | ||
638 | mflr r10 | ||
639 | mfmsr r11 | ||
640 | mfcr r4 | ||
641 | mtspr SPRN_SPRG_GEN_SCRATCH,r13; | ||
642 | std r1,PACA_EXGEN+EX_R1(r13); | ||
643 | stw r4,PACA_EXGEN+EX_CR(r13); | ||
644 | ori r11,r11,MSR_EE | ||
645 | subi r1,r1,INT_FRAME_SIZE; | ||
646 | cmpwi cr0,r3,0x500 | ||
647 | beq exc_0x500_common | ||
648 | cmpwi cr0,r3,0x900 | ||
649 | beq exc_0x900_common | ||
650 | cmpwi cr0,r3,0x280 | ||
651 | beq exc_0x280_common | ||
652 | blr | ||
653 | |||
580 | 654 | ||
581 | /* | 655 | /* |
582 | * This is called from 0x300 and 0x400 handlers after the prologs with | 656 | * This is called from 0x300 and 0x400 handlers after the prologs with |
@@ -679,6 +753,8 @@ BAD_STACK_TRAMPOLINE(0x000) | |||
679 | BAD_STACK_TRAMPOLINE(0x100) | 753 | BAD_STACK_TRAMPOLINE(0x100) |
680 | BAD_STACK_TRAMPOLINE(0x200) | 754 | BAD_STACK_TRAMPOLINE(0x200) |
681 | BAD_STACK_TRAMPOLINE(0x260) | 755 | BAD_STACK_TRAMPOLINE(0x260) |
756 | BAD_STACK_TRAMPOLINE(0x280) | ||
757 | BAD_STACK_TRAMPOLINE(0x2a0) | ||
682 | BAD_STACK_TRAMPOLINE(0x2c0) | 758 | BAD_STACK_TRAMPOLINE(0x2c0) |
683 | BAD_STACK_TRAMPOLINE(0x2e0) | 759 | BAD_STACK_TRAMPOLINE(0x2e0) |
684 | BAD_STACK_TRAMPOLINE(0x300) | 760 | BAD_STACK_TRAMPOLINE(0x300) |
@@ -696,11 +772,10 @@ BAD_STACK_TRAMPOLINE(0xa00) | |||
696 | BAD_STACK_TRAMPOLINE(0xb00) | 772 | BAD_STACK_TRAMPOLINE(0xb00) |
697 | BAD_STACK_TRAMPOLINE(0xc00) | 773 | BAD_STACK_TRAMPOLINE(0xc00) |
698 | BAD_STACK_TRAMPOLINE(0xd00) | 774 | BAD_STACK_TRAMPOLINE(0xd00) |
775 | BAD_STACK_TRAMPOLINE(0xd08) | ||
699 | BAD_STACK_TRAMPOLINE(0xe00) | 776 | BAD_STACK_TRAMPOLINE(0xe00) |
700 | BAD_STACK_TRAMPOLINE(0xf00) | 777 | BAD_STACK_TRAMPOLINE(0xf00) |
701 | BAD_STACK_TRAMPOLINE(0xf20) | 778 | BAD_STACK_TRAMPOLINE(0xf20) |
702 | BAD_STACK_TRAMPOLINE(0x2070) | ||
703 | BAD_STACK_TRAMPOLINE(0x2080) | ||
704 | 779 | ||
705 | .globl bad_stack_book3e | 780 | .globl bad_stack_book3e |
706 | bad_stack_book3e: | 781 | bad_stack_book3e: |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 02448ea58ad3..2d0868a4e2f0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -12,6 +12,7 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <asm/hw_irq.h> | ||
15 | #include <asm/exception-64s.h> | 16 | #include <asm/exception-64s.h> |
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
@@ -356,34 +357,60 @@ do_stab_bolted_pSeries: | |||
356 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) | 357 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) |
357 | 358 | ||
358 | /* | 359 | /* |
359 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 360 | * An interrupt came in while soft-disabled. We set paca->irq_happened, |
360 | * clear paca->hard_enabled and return. | 361 | * then, if it was a decrementer interrupt, we bump the dec to max and |
362 | * and return, else we hard disable and return. This is called with | ||
363 | * r10 containing the value to OR to the paca field. | ||
361 | */ | 364 | */ |
362 | masked_interrupt: | 365 | #define MASKED_INTERRUPT(_H) \ |
363 | stb r10,PACAHARDIRQEN(r13) | 366 | masked_##_H##interrupt: \ |
364 | mtcrf 0x80,r9 | 367 | std r11,PACA_EXGEN+EX_R11(r13); \ |
365 | ld r9,PACA_EXGEN+EX_R9(r13) | 368 | lbz r11,PACAIRQHAPPENED(r13); \ |
366 | mfspr r10,SPRN_SRR1 | 369 | or r11,r11,r10; \ |
367 | rldicl r10,r10,48,1 /* clear MSR_EE */ | 370 | stb r11,PACAIRQHAPPENED(r13); \ |
368 | rotldi r10,r10,16 | 371 | andi. r10,r10,PACA_IRQ_DEC; \ |
369 | mtspr SPRN_SRR1,r10 | 372 | beq 1f; \ |
370 | ld r10,PACA_EXGEN+EX_R10(r13) | 373 | lis r10,0x7fff; \ |
371 | GET_SCRATCH0(r13) | 374 | ori r10,r10,0xffff; \ |
372 | rfid | 375 | mtspr SPRN_DEC,r10; \ |
376 | b 2f; \ | ||
377 | 1: mfspr r10,SPRN_##_H##SRR1; \ | ||
378 | rldicl r10,r10,48,1; /* clear MSR_EE */ \ | ||
379 | rotldi r10,r10,16; \ | ||
380 | mtspr SPRN_##_H##SRR1,r10; \ | ||
381 | 2: mtcrf 0x80,r9; \ | ||
382 | ld r9,PACA_EXGEN+EX_R9(r13); \ | ||
383 | ld r10,PACA_EXGEN+EX_R10(r13); \ | ||
384 | ld r11,PACA_EXGEN+EX_R11(r13); \ | ||
385 | GET_SCRATCH0(r13); \ | ||
386 | ##_H##rfid; \ | ||
373 | b . | 387 | b . |
388 | |||
389 | MASKED_INTERRUPT() | ||
390 | MASKED_INTERRUPT(H) | ||
374 | 391 | ||
375 | masked_Hinterrupt: | 392 | /* |
376 | stb r10,PACAHARDIRQEN(r13) | 393 | * Called from arch_local_irq_enable when an interrupt needs |
377 | mtcrf 0x80,r9 | 394 | * to be resent. r3 contains 0x500 or 0x900 to indicate which |
378 | ld r9,PACA_EXGEN+EX_R9(r13) | 395 | * kind of interrupt. MSR:EE is already off. We generate a |
379 | mfspr r10,SPRN_HSRR1 | 396 | * stackframe like if a real interrupt had happened. |
380 | rldicl r10,r10,48,1 /* clear MSR_EE */ | 397 | * |
381 | rotldi r10,r10,16 | 398 | * Note: While MSR:EE is off, we need to make sure that _MSR |
382 | mtspr SPRN_HSRR1,r10 | 399 | * in the generated frame has EE set to 1 or the exception |
383 | ld r10,PACA_EXGEN+EX_R10(r13) | 400 | * handler will not properly re-enable them. |
384 | GET_SCRATCH0(r13) | 401 | */ |
385 | hrfid | 402 | _GLOBAL(__replay_interrupt) |
386 | b . | 403 | /* We are going to jump to the exception common code which |
404 | * will retrieve various register values from the PACA which | ||
405 | * we don't give a damn about, so we don't bother storing them. | ||
406 | */ | ||
407 | mfmsr r12 | ||
408 | mflr r11 | ||
409 | mfcr r9 | ||
410 | ori r12,r12,MSR_EE | ||
411 | andi. r3,r3,0x0800 | ||
412 | bne decrementer_common | ||
413 | b hardware_interrupt_common | ||
387 | 414 | ||
388 | #ifdef CONFIG_PPC_PSERIES | 415 | #ifdef CONFIG_PPC_PSERIES |
389 | /* | 416 | /* |
@@ -793,7 +820,8 @@ vsx_unavailable_common: | |||
793 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | 820 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) |
794 | #ifdef CONFIG_VSX | 821 | #ifdef CONFIG_VSX |
795 | BEGIN_FTR_SECTION | 822 | BEGIN_FTR_SECTION |
796 | bne .load_up_vsx | 823 | beq 1f |
824 | b .load_up_vsx | ||
797 | 1: | 825 | 1: |
798 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 826 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
799 | #endif | 827 | #endif |
@@ -808,65 +836,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
808 | __end_handlers: | 836 | __end_handlers: |
809 | 837 | ||
810 | /* | 838 | /* |
811 | * Return from an exception with minimal checks. | ||
812 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
813 | * If interrupts have been enabled, or anything has been | ||
814 | * done that might have changed the scheduling status of | ||
815 | * any task or sent any task a signal, you should use | ||
816 | * ret_from_except or ret_from_except_lite instead of this. | ||
817 | */ | ||
818 | fast_exc_return_irq: /* restores irq state too */ | ||
819 | ld r3,SOFTE(r1) | ||
820 | TRACE_AND_RESTORE_IRQ(r3); | ||
821 | ld r12,_MSR(r1) | ||
822 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
823 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
824 | b 1f | ||
825 | |||
826 | .globl fast_exception_return | ||
827 | fast_exception_return: | ||
828 | ld r12,_MSR(r1) | ||
829 | 1: ld r11,_NIP(r1) | ||
830 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
831 | beq- unrecov_fer | ||
832 | |||
833 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
834 | andi. r3,r12,MSR_PR | ||
835 | beq 2f | ||
836 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
837 | 2: | ||
838 | #endif | ||
839 | |||
840 | ld r3,_CCR(r1) | ||
841 | ld r4,_LINK(r1) | ||
842 | ld r5,_CTR(r1) | ||
843 | ld r6,_XER(r1) | ||
844 | mtcr r3 | ||
845 | mtlr r4 | ||
846 | mtctr r5 | ||
847 | mtxer r6 | ||
848 | REST_GPR(0, r1) | ||
849 | REST_8GPRS(2, r1) | ||
850 | |||
851 | ld r10,PACAKMSR(r13) | ||
852 | clrrdi r10,r10,2 /* clear RI */ | ||
853 | mtmsrd r10,1 | ||
854 | |||
855 | mtspr SPRN_SRR1,r12 | ||
856 | mtspr SPRN_SRR0,r11 | ||
857 | REST_4GPRS(10, r1) | ||
858 | ld r1,GPR1(r1) | ||
859 | rfid | ||
860 | b . /* prevent speculative execution */ | ||
861 | |||
862 | unrecov_fer: | ||
863 | bl .save_nvgprs | ||
864 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
865 | bl .unrecoverable_exception | ||
866 | b 1b | ||
867 | |||
868 | |||
869 | /* | ||
870 | * Hash table stuff | 839 | * Hash table stuff |
871 | */ | 840 | */ |
872 | .align 7 | 841 | .align 7 |
@@ -905,19 +874,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
905 | * r4 contains the required access permissions | 874 | * r4 contains the required access permissions |
906 | * r5 contains the trap number | 875 | * r5 contains the trap number |
907 | * | 876 | * |
908 | * at return r3 = 0 for success | 877 | * at return r3 = 0 for success, 1 for page fault, negative for error |
909 | */ | 878 | */ |
910 | bl .hash_page /* build HPTE if possible */ | 879 | bl .hash_page /* build HPTE if possible */ |
911 | cmpdi r3,0 /* see if hash_page succeeded */ | 880 | cmpdi r3,0 /* see if hash_page succeeded */ |
912 | 881 | ||
913 | /* | 882 | /* Success */ |
914 | * Here we have interrupts hard-disabled, so it is sufficient | ||
915 | * to restore paca->{soft,hard}_enable and get out. | ||
916 | */ | ||
917 | beq fast_exc_return_irq /* Return from exception on success */ | 883 | beq fast_exc_return_irq /* Return from exception on success */ |
918 | 884 | ||
919 | /* For a hash failure, we don't bother re-enabling interrupts */ | 885 | /* Error */ |
920 | ble- 13f | 886 | blt- 13f |
921 | 887 | ||
922 | /* Here we have a page fault that hash_page can't handle. */ | 888 | /* Here we have a page fault that hash_page can't handle. */ |
923 | handle_page_fault: | 889 | handle_page_fault: |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 40759fbfb171..58bddee8e1e8 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/irqflags.h> | 38 | #include <asm/irqflags.h> |
39 | #include <asm/kvm_book3s_asm.h> | 39 | #include <asm/kvm_book3s_asm.h> |
40 | #include <asm/ptrace.h> | 40 | #include <asm/ptrace.h> |
41 | #include <asm/hw_irq.h> | ||
41 | 42 | ||
42 | /* The physical memory is laid out such that the secondary processor | 43 | /* The physical memory is laid out such that the secondary processor |
43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow | 44 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
@@ -550,7 +551,8 @@ _GLOBAL(pmac_secondary_start) | |||
550 | */ | 551 | */ |
551 | li r0,0 | 552 | li r0,0 |
552 | stb r0,PACASOFTIRQEN(r13) | 553 | stb r0,PACASOFTIRQEN(r13) |
553 | stb r0,PACAHARDIRQEN(r13) | 554 | li r0,PACA_IRQ_HARD_DIS |
555 | stb r0,PACAIRQHAPPENED(r13) | ||
554 | 556 | ||
555 | /* Create a temp kernel stack for use before relocation is on. */ | 557 | /* Create a temp kernel stack for use before relocation is on. */ |
556 | ld r1,PACAEMERGSP(r13) | 558 | ld r1,PACAEMERGSP(r13) |
@@ -601,9 +603,12 @@ __secondary_start: | |||
601 | li r7,0 | 603 | li r7,0 |
602 | mtlr r7 | 604 | mtlr r7 |
603 | 605 | ||
604 | /* Mark interrupts both hard and soft disabled */ | 606 | /* Mark interrupts soft and hard disabled (they might be enabled |
605 | stb r7,PACAHARDIRQEN(r13) | 607 | * in the PACA when doing hotplug) |
608 | */ | ||
606 | stb r7,PACASOFTIRQEN(r13) | 609 | stb r7,PACASOFTIRQEN(r13) |
610 | li r0,PACA_IRQ_HARD_DIS | ||
611 | stb r0,PACAIRQHAPPENED(r13) | ||
607 | 612 | ||
608 | /* enable MMU and jump to start_secondary */ | 613 | /* enable MMU and jump to start_secondary */ |
609 | LOAD_REG_ADDR(r3, .start_secondary_prolog) | 614 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
@@ -750,13 +755,18 @@ _INIT_GLOBAL(start_here_common) | |||
750 | /* Load the TOC (virtual address) */ | 755 | /* Load the TOC (virtual address) */ |
751 | ld r2,PACATOC(r13) | 756 | ld r2,PACATOC(r13) |
752 | 757 | ||
758 | /* Do more system initializations in virtual mode */ | ||
753 | bl .setup_system | 759 | bl .setup_system |
754 | 760 | ||
755 | /* Load up the kernel context */ | 761 | /* Mark interrupts soft and hard disabled (they might be enabled |
756 | 5: li r5,0 | 762 | * in the PACA when doing hotplug) |
757 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ | 763 | */ |
758 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ | 764 | li r0,0 |
765 | stb r0,PACASOFTIRQEN(r13) | ||
766 | li r0,PACA_IRQ_HARD_DIS | ||
767 | stb r0,PACAIRQHAPPENED(r13) | ||
759 | 768 | ||
769 | /* Generic kernel entry */ | ||
760 | bl .start_kernel | 770 | bl .start_kernel |
761 | 771 | ||
762 | /* Not reached */ | 772 | /* Not reached */ |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 0a48bf5db6c8..8f7a2b62863d 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -84,7 +84,11 @@ void cpu_idle(void) | |||
84 | 84 | ||
85 | start_critical_timings(); | 85 | start_critical_timings(); |
86 | 86 | ||
87 | local_irq_enable(); | 87 | /* Some power_save functions return with |
88 | * interrupts enabled, some don't. | ||
89 | */ | ||
90 | if (irqs_disabled()) | ||
91 | local_irq_enable(); | ||
88 | set_thread_flag(TIF_POLLING_NRFLAG); | 92 | set_thread_flag(TIF_POLLING_NRFLAG); |
89 | 93 | ||
90 | } else { | 94 | } else { |
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 16c002d6bdf1..ff007b59448d 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S | |||
@@ -29,43 +29,30 @@ _GLOBAL(book3e_idle) | |||
29 | wrteei 0 | 29 | wrteei 0 |
30 | 30 | ||
31 | /* Now check if an interrupt came in while we were soft disabled | 31 | /* Now check if an interrupt came in while we were soft disabled |
32 | * since we may otherwise lose it (doorbells etc...). We know | 32 | * since we may otherwise lose it (doorbells etc...). |
33 | * that since PACAHARDIRQEN will have been cleared in that case. | ||
34 | */ | 33 | */ |
35 | lbz r3,PACAHARDIRQEN(r13) | 34 | lbz r3,PACAIRQHAPPENED(r13) |
36 | cmpwi cr0,r3,0 | 35 | cmpwi cr0,r3,0 |
37 | beqlr | 36 | bnelr |
38 | 37 | ||
39 | /* Now we are going to mark ourselves as soft and hard enables in | 38 | /* Now we are going to mark ourselves as soft and hard enabled in |
40 | * order to be able to take interrupts while asleep. We inform lockdep | 39 | * order to be able to take interrupts while asleep. We inform lockdep |
41 | * of that. We don't actually turn interrupts on just yet tho. | 40 | * of that. We don't actually turn interrupts on just yet tho. |
42 | */ | 41 | */ |
43 | #ifdef CONFIG_TRACE_IRQFLAGS | 42 | #ifdef CONFIG_TRACE_IRQFLAGS |
44 | stdu r1,-128(r1) | 43 | stdu r1,-128(r1) |
45 | bl .trace_hardirqs_on | 44 | bl .trace_hardirqs_on |
45 | addi r1,r1,128 | ||
46 | #endif | 46 | #endif |
47 | li r0,1 | 47 | li r0,1 |
48 | stb r0,PACASOFTIRQEN(r13) | 48 | stb r0,PACASOFTIRQEN(r13) |
49 | stb r0,PACAHARDIRQEN(r13) | ||
50 | 49 | ||
51 | /* Interrupts will make use return to LR, so get something we want | 50 | /* Interrupts will make use return to LR, so get something we want |
52 | * in there | 51 | * in there |
53 | */ | 52 | */ |
54 | bl 1f | 53 | bl 1f |
55 | 54 | ||
56 | /* Hard disable interrupts again */ | 55 | /* And return (interrupts are on) */ |
57 | wrteei 0 | ||
58 | |||
59 | /* Mark them off again in the PACA as well */ | ||
60 | li r0,0 | ||
61 | stb r0,PACASOFTIRQEN(r13) | ||
62 | stb r0,PACAHARDIRQEN(r13) | ||
63 | |||
64 | /* Tell lockdep about it */ | ||
65 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
66 | bl .trace_hardirqs_off | ||
67 | addi r1,r1,128 | ||
68 | #endif | ||
69 | ld r0,16(r1) | 56 | ld r0,16(r1) |
70 | mtlr r0 | 57 | mtlr r0 |
71 | blr | 58 | blr |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index ba3195478600..d8cdba4c28b2 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/thread_info.h> | 14 | #include <asm/thread_info.h> |
15 | #include <asm/ppc_asm.h> | 15 | #include <asm/ppc_asm.h> |
16 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/irqflags.h> | ||
17 | 18 | ||
18 | #undef DEBUG | 19 | #undef DEBUG |
19 | 20 | ||
@@ -29,14 +30,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) | |||
29 | cmpwi 0,r4,0 | 30 | cmpwi 0,r4,0 |
30 | beqlr | 31 | beqlr |
31 | 32 | ||
32 | /* Go to NAP now */ | 33 | /* Hard disable interrupts */ |
33 | mfmsr r7 | 34 | mfmsr r7 |
34 | rldicl r0,r7,48,1 | 35 | rldicl r0,r7,48,1 |
35 | rotldi r0,r0,16 | 36 | rotldi r0,r0,16 |
36 | mtmsrd r0,1 /* hard-disable interrupts */ | 37 | mtmsrd r0,1 |
38 | |||
39 | /* Check if something happened while soft-disabled */ | ||
40 | lbz r0,PACAIRQHAPPENED(r13) | ||
41 | cmpwi cr0,r0,0 | ||
42 | bnelr | ||
43 | |||
44 | /* Soft-enable interrupts */ | ||
45 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
46 | mflr r0 | ||
47 | std r0,16(r1) | ||
48 | stdu r1,-128(r1) | ||
49 | bl .trace_hardirqs_on | ||
50 | addi r1,r1,128 | ||
51 | ld r0,16(r1) | ||
52 | mtlr r0 | ||
53 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
54 | |||
55 | TRACE_ENABLE_INTS | ||
37 | li r0,1 | 56 | li r0,1 |
38 | stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ | 57 | stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ |
39 | stb r0,PACAHARDIRQEN(r13) | ||
40 | BEGIN_FTR_SECTION | 58 | BEGIN_FTR_SECTION |
41 | DSSALL | 59 | DSSALL |
42 | sync | 60 | sync |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index fcdff198da4b..0cdc9a392839 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * This file contains the power_save function for 970-family CPUs. | 2 | * This file contains the power_save function for Power7 CPUs. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/ppc_asm.h> | 15 | #include <asm/ppc_asm.h> |
16 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/ppc-opcode.h> | 17 | #include <asm/ppc-opcode.h> |
18 | #include <asm/hw_irq.h> | ||
18 | 19 | ||
19 | #undef DEBUG | 20 | #undef DEBUG |
20 | 21 | ||
@@ -51,9 +52,25 @@ _GLOBAL(power7_idle) | |||
51 | rldicl r9,r9,48,1 | 52 | rldicl r9,r9,48,1 |
52 | rotldi r9,r9,16 | 53 | rotldi r9,r9,16 |
53 | mtmsrd r9,1 /* hard-disable interrupts */ | 54 | mtmsrd r9,1 /* hard-disable interrupts */ |
55 | |||
56 | /* Check if something happened while soft-disabled */ | ||
57 | lbz r0,PACAIRQHAPPENED(r13) | ||
58 | cmpwi cr0,r0,0 | ||
59 | beq 1f | ||
60 | addi r1,r1,INT_FRAME_SIZE | ||
61 | ld r0,16(r1) | ||
62 | mtlr r0 | ||
63 | blr | ||
64 | |||
65 | 1: /* We mark irqs hard disabled as this is the state we'll | ||
66 | * be in when returning and we need to tell arch_local_irq_restore() | ||
67 | * about it | ||
68 | */ | ||
69 | li r0,PACA_IRQ_HARD_DIS | ||
70 | stb r0,PACAIRQHAPPENED(r13) | ||
71 | |||
72 | /* We haven't lost state ... yet */ | ||
54 | li r0,0 | 73 | li r0,0 |
55 | stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ | ||
56 | stb r0,PACAHARDIRQEN(r13) | ||
57 | stb r0,PACA_NAPSTATELOST(r13) | 74 | stb r0,PACA_NAPSTATELOST(r13) |
58 | 75 | ||
59 | /* Continue saving state */ | 76 | /* Continue saving state */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9b6e80668cfb..eb804e15b29b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -95,14 +95,14 @@ extern int tau_interrupts(int); | |||
95 | 95 | ||
96 | int distribute_irqs = 1; | 96 | int distribute_irqs = 1; |
97 | 97 | ||
98 | static inline notrace unsigned long get_hard_enabled(void) | 98 | static inline notrace unsigned long get_irq_happened(void) |
99 | { | 99 | { |
100 | unsigned long enabled; | 100 | unsigned long happened; |
101 | 101 | ||
102 | __asm__ __volatile__("lbz %0,%1(13)" | 102 | __asm__ __volatile__("lbz %0,%1(13)" |
103 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | 103 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
104 | 104 | ||
105 | return enabled; | 105 | return happened; |
106 | } | 106 | } |
107 | 107 | ||
108 | static inline notrace void set_soft_enabled(unsigned long enable) | 108 | static inline notrace void set_soft_enabled(unsigned long enable) |
@@ -111,88 +111,167 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline notrace void decrementer_check_overflow(void) | 114 | static inline notrace int decrementer_check_overflow(void) |
115 | { | 115 | { |
116 | u64 now = get_tb_or_rtc(); | 116 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb; | 117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
118 | 118 | ||
119 | preempt_disable(); | ||
120 | next_tb = &__get_cpu_var(decrementers_next_tb); | ||
121 | |||
122 | if (now >= *next_tb) | 119 | if (now >= *next_tb) |
123 | set_dec(1); | 120 | set_dec(1); |
124 | preempt_enable(); | 121 | return now >= *next_tb; |
125 | } | 122 | } |
126 | 123 | ||
127 | notrace void arch_local_irq_restore(unsigned long en) | 124 | /* This is called whenever we are re-enabling interrupts |
125 | * and returns either 0 (nothing to do) or 500/900 if there's | ||
126 | * either an EE or a DEC to generate. | ||
127 | * | ||
128 | * This is called in two contexts: From arch_local_irq_restore() | ||
129 | * before soft-enabling interrupts, and from the exception exit | ||
130 | * path when returning from an interrupt from a soft-disabled to | ||
131 | * a soft enabled context. In both case we have interrupts hard | ||
132 | * disabled. | ||
133 | * | ||
134 | * We take care of only clearing the bits we handled in the | ||
135 | * PACA irq_happened field since we can only re-emit one at a | ||
136 | * time and we don't want to "lose" one. | ||
137 | */ | ||
138 | notrace unsigned int __check_irq_replay(void) | ||
128 | { | 139 | { |
129 | /* | 140 | /* |
130 | * get_paca()->soft_enabled = en; | 141 | * We use local_paca rather than get_paca() to avoid all |
131 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | 142 | * the debug_smp_processor_id() business in this low level |
132 | * That was allowed before, and in such a case we do need to take care | 143 | * function |
133 | * that gcc will set soft_enabled directly via r13, not choose to use | ||
134 | * an intermediate register, lest we're preempted to a different cpu. | ||
135 | */ | 144 | */ |
136 | set_soft_enabled(en); | 145 | unsigned char happened = local_paca->irq_happened; |
137 | if (!en) | ||
138 | return; | ||
139 | 146 | ||
140 | #ifdef CONFIG_PPC_STD_MMU_64 | 147 | /* Clear bit 0 which we wouldn't clear otherwise */ |
141 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 148 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; |
142 | /* | 149 | |
143 | * Do we need to disable preemption here? Not really: in the | 150 | /* |
144 | * unlikely event that we're preempted to a different cpu in | 151 | * Force the delivery of pending soft-disabled interrupts on PS3. |
145 | * between getting r13, loading its lppaca_ptr, and loading | 152 | * Any HV call will have this side effect. |
146 | * its any_int, we might call iseries_handle_interrupts without | 153 | */ |
147 | * an interrupt pending on the new cpu, but that's no disaster, | 154 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { |
148 | * is it? And the business of preempting us off the old cpu | 155 | u64 tmp, tmp2; |
149 | * would itself involve a local_irq_restore which handles the | 156 | lv1_get_version_info(&tmp, &tmp2); |
150 | * interrupt to that cpu. | ||
151 | * | ||
152 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | ||
153 | * to avoid any preemption checking added into get_paca(). | ||
154 | */ | ||
155 | if (local_paca->lppaca_ptr->int_dword.any_int) | ||
156 | iseries_handle_interrupts(); | ||
157 | } | 157 | } |
158 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
159 | 158 | ||
160 | /* | 159 | /* |
161 | * if (get_paca()->hard_enabled) return; | 160 | * We may have missed a decrementer interrupt. We check the |
162 | * But again we need to take care that gcc gets hard_enabled directly | 161 | * decrementer itself rather than the paca irq_happened field |
163 | * via r13, not choose to use an intermediate register, lest we're | 162 | * in case we also had a rollover while hard disabled |
164 | * preempted to a different cpu in between the two instructions. | 163 | */ |
164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | ||
165 | if (decrementer_check_overflow()) | ||
166 | return 0x900; | ||
167 | |||
168 | /* Finally check if an external interrupt happened */ | ||
169 | local_paca->irq_happened &= ~PACA_IRQ_EE; | ||
170 | if (happened & PACA_IRQ_EE) | ||
171 | return 0x500; | ||
172 | |||
173 | #ifdef CONFIG_PPC_BOOK3E | ||
174 | /* Finally check if an EPR external interrupt happened | ||
175 | * this bit is typically set if we need to handle another | ||
176 | * "edge" interrupt from within the MPIC "EPR" handler | ||
177 | */ | ||
178 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; | ||
179 | if (happened & PACA_IRQ_EE_EDGE) | ||
180 | return 0x500; | ||
181 | |||
182 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | ||
183 | if (happened & PACA_IRQ_DBELL) | ||
184 | return 0x280; | ||
185 | #endif /* CONFIG_PPC_BOOK3E */ | ||
186 | |||
187 | /* There should be nothing left ! */ | ||
188 | BUG_ON(local_paca->irq_happened != 0); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | notrace void arch_local_irq_restore(unsigned long en) | ||
194 | { | ||
195 | unsigned char irq_happened; | ||
196 | unsigned int replay; | ||
197 | |||
198 | /* Write the new soft-enabled value */ | ||
199 | set_soft_enabled(en); | ||
200 | if (!en) | ||
201 | return; | ||
202 | /* | ||
203 | * From this point onward, we can take interrupts, preempt, | ||
204 | * etc... unless we got hard-disabled. We check if an event | ||
205 | * happened. If none happened, we know we can just return. | ||
206 | * | ||
207 | * We may have preempted before the check below, in which case | ||
208 | * we are checking the "new" CPU instead of the old one. This | ||
209 | * is only a problem if an event happened on the "old" CPU. | ||
210 | * | ||
211 | * External interrupt events on non-iseries will have caused | ||
212 | * interrupts to be hard-disabled, so there is no problem, we | ||
213 | * cannot have preempted. | ||
214 | * | ||
215 | * That leaves us with EEs on iSeries or decrementer interrupts, | ||
216 | * which I decided to safely ignore. The preemption would have | ||
217 | * itself been the result of an interrupt, upon which return we | ||
218 | * will have checked for pending events on the old CPU. | ||
165 | */ | 219 | */ |
166 | if (get_hard_enabled()) | 220 | irq_happened = get_irq_happened(); |
221 | if (!irq_happened) | ||
167 | return; | 222 | return; |
168 | 223 | ||
169 | /* | 224 | /* |
170 | * Need to hard-enable interrupts here. Since currently disabled, | 225 | * We need to hard disable to get a trusted value from |
171 | * no need to take further asm precautions against preemption; but | 226 | * __check_irq_replay(). We also need to soft-disable |
172 | * use local_paca instead of get_paca() to avoid preemption checking. | 227 | * again to avoid warnings in there due to the use of |
228 | * per-cpu variables. | ||
229 | * | ||
230 | * We know that if the value in irq_happened is exactly 0x01 | ||
231 | * then we are already hard disabled (there are other less | ||
232 | * common cases that we'll ignore for now), so we skip the | ||
233 | * (expensive) mtmsrd. | ||
173 | */ | 234 | */ |
174 | local_paca->hard_enabled = en; | 235 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
236 | __hard_irq_disable(); | ||
237 | set_soft_enabled(0); | ||
175 | 238 | ||
176 | /* | 239 | /* |
177 | * Trigger the decrementer if we have a pending event. Some processors | 240 | * Check if anything needs to be re-emitted. We haven't |
178 | * only trigger on edge transitions of the sign bit. We might also | 241 | * soft-enabled yet to avoid warnings in decrementer_check_overflow |
179 | * have disabled interrupts long enough that the decrementer wrapped | 242 | * accessing per-cpu variables |
180 | * to positive. | ||
181 | */ | 243 | */ |
182 | decrementer_check_overflow(); | 244 | replay = __check_irq_replay(); |
245 | |||
246 | /* We can soft-enable now */ | ||
247 | set_soft_enabled(1); | ||
183 | 248 | ||
184 | /* | 249 | /* |
185 | * Force the delivery of pending soft-disabled interrupts on PS3. | 250 | * And replay if we have to. This will return with interrupts |
186 | * Any HV call will have this side effect. | 251 | * hard-enabled. |
187 | */ | 252 | */ |
188 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | 253 | if (replay) { |
189 | u64 tmp, tmp2; | 254 | __replay_interrupt(replay); |
190 | lv1_get_version_info(&tmp, &tmp2); | 255 | return; |
191 | } | 256 | } |
192 | 257 | ||
258 | /* Finally, let's ensure we are hard enabled */ | ||
193 | __hard_irq_enable(); | 259 | __hard_irq_enable(); |
194 | } | 260 | } |
195 | EXPORT_SYMBOL(arch_local_irq_restore); | 261 | EXPORT_SYMBOL(arch_local_irq_restore); |
262 | |||
263 | /* | ||
264 | * This is specifically called by assembly code to re-enable interrupts | ||
265 | * if they are currently disabled. This is typically called before | ||
266 | * schedule() or do_signal() when returning to userspace. We do it | ||
267 | * in C to avoid the burden of dealing with lockdep etc... | ||
268 | */ | ||
269 | void restore_interrupts(void) | ||
270 | { | ||
271 | if (irqs_disabled()) | ||
272 | local_irq_enable(); | ||
273 | } | ||
274 | |||
196 | #endif /* CONFIG_PPC64 */ | 275 | #endif /* CONFIG_PPC64 */ |
197 | 276 | ||
198 | int arch_show_interrupts(struct seq_file *p, int prec) | 277 | int arch_show_interrupts(struct seq_file *p, int prec) |
@@ -360,8 +439,17 @@ void do_IRQ(struct pt_regs *regs) | |||
360 | 439 | ||
361 | check_stack_overflow(); | 440 | check_stack_overflow(); |
362 | 441 | ||
442 | /* | ||
443 | * Query the platform PIC for the interrupt & ack it. | ||
444 | * | ||
445 | * This will typically lower the interrupt line to the CPU | ||
446 | */ | ||
363 | irq = ppc_md.get_irq(); | 447 | irq = ppc_md.get_irq(); |
364 | 448 | ||
449 | /* We can hard enable interrupts now */ | ||
450 | may_hard_irq_enable(); | ||
451 | |||
452 | /* And finally process it */ | ||
365 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) | 453 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
366 | handle_one_irq(irq); | 454 | handle_one_irq(irq); |
367 | else if (irq != NO_IRQ_IGNORE) | 455 | else if (irq != NO_IRQ_IGNORE) |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bf80a1d5f8fe..e40707032ac3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -647,6 +647,9 @@ void show_regs(struct pt_regs * regs) | |||
647 | printk("MSR: "REG" ", regs->msr); | 647 | printk("MSR: "REG" ", regs->msr); |
648 | printbits(regs->msr, msr_bits); | 648 | printbits(regs->msr, msr_bits); |
649 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 649 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
650 | #ifdef CONFIG_PPC64 | ||
651 | printk("SOFTE: %ld\n", regs->softe); | ||
652 | #endif | ||
650 | trap = TRAP(regs); | 653 | trap = TRAP(regs); |
651 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) | 654 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
652 | printk("CFAR: "REG"\n", regs->orig_gpr3); | 655 | printk("CFAR: "REG"\n", regs->orig_gpr3); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 567dd7c3ac2a..f81c81b92f0e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -259,7 +259,6 @@ void accumulate_stolen_time(void) | |||
259 | u64 sst, ust; | 259 | u64 sst, ust; |
260 | 260 | ||
261 | u8 save_soft_enabled = local_paca->soft_enabled; | 261 | u8 save_soft_enabled = local_paca->soft_enabled; |
262 | u8 save_hard_enabled = local_paca->hard_enabled; | ||
263 | 262 | ||
264 | /* We are called early in the exception entry, before | 263 | /* We are called early in the exception entry, before |
265 | * soft/hard_enabled are sync'ed to the expected state | 264 | * soft/hard_enabled are sync'ed to the expected state |
@@ -268,7 +267,6 @@ void accumulate_stolen_time(void) | |||
268 | * complain | 267 | * complain |
269 | */ | 268 | */ |
270 | local_paca->soft_enabled = 0; | 269 | local_paca->soft_enabled = 0; |
271 | local_paca->hard_enabled = 0; | ||
272 | 270 | ||
273 | sst = scan_dispatch_log(local_paca->starttime_user); | 271 | sst = scan_dispatch_log(local_paca->starttime_user); |
274 | ust = scan_dispatch_log(local_paca->starttime); | 272 | ust = scan_dispatch_log(local_paca->starttime); |
@@ -277,7 +275,6 @@ void accumulate_stolen_time(void) | |||
277 | local_paca->stolen_time += ust + sst; | 275 | local_paca->stolen_time += ust + sst; |
278 | 276 | ||
279 | local_paca->soft_enabled = save_soft_enabled; | 277 | local_paca->soft_enabled = save_soft_enabled; |
280 | local_paca->hard_enabled = save_hard_enabled; | ||
281 | } | 278 | } |
282 | 279 | ||
283 | static inline u64 calculate_stolen_time(u64 stop_tb) | 280 | static inline u64 calculate_stolen_time(u64 stop_tb) |
@@ -580,6 +577,11 @@ void timer_interrupt(struct pt_regs * regs) | |||
580 | if (!cpu_online(smp_processor_id())) | 577 | if (!cpu_online(smp_processor_id())) |
581 | return; | 578 | return; |
582 | 579 | ||
580 | /* Conditionally hard-enable interrupts now that the DEC has been | ||
581 | * bumped to its maximum value | ||
582 | */ | ||
583 | may_hard_irq_enable(); | ||
584 | |||
583 | trace_timer_interrupt_entry(regs); | 585 | trace_timer_interrupt_entry(regs); |
584 | 586 | ||
585 | __get_cpu_var(irq_stat).timer_irqs++; | 587 | __get_cpu_var(irq_stat).timer_irqs++; |
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 085fd3f45ad2..a12e95af6933 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c | |||
@@ -96,6 +96,20 @@ out: | |||
96 | return index; | 96 | return index; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void check_and_cede_processor(void) | ||
100 | { | ||
101 | /* | ||
102 | * Interrupts are soft-disabled at this point, | ||
103 | * but not hard disabled. So an interrupt might have | ||
104 | * occurred before entering NAP, and would be potentially | ||
105 | * lost (edge events, decrementer events, etc...) unless | ||
106 | * we first hard disable then check. | ||
107 | */ | ||
108 | hard_irq_disable(); | ||
109 | if (get_paca()->irq_happened == 0) | ||
110 | cede_processor(); | ||
111 | } | ||
112 | |||
99 | static int dedicated_cede_loop(struct cpuidle_device *dev, | 113 | static int dedicated_cede_loop(struct cpuidle_device *dev, |
100 | struct cpuidle_driver *drv, | 114 | struct cpuidle_driver *drv, |
101 | int index) | 115 | int index) |
@@ -108,7 +122,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, | |||
108 | 122 | ||
109 | ppc64_runlatch_off(); | 123 | ppc64_runlatch_off(); |
110 | HMT_medium(); | 124 | HMT_medium(); |
111 | cede_processor(); | 125 | check_and_cede_processor(); |
112 | 126 | ||
113 | get_lppaca()->donate_dedicated_cpu = 0; | 127 | get_lppaca()->donate_dedicated_cpu = 0; |
114 | dev->last_residency = | 128 | dev->last_residency = |
@@ -132,7 +146,7 @@ static int shared_cede_loop(struct cpuidle_device *dev, | |||
132 | * processor. When returning here, external interrupts | 146 | * processor. When returning here, external interrupts |
133 | * are enabled. | 147 | * are enabled. |
134 | */ | 148 | */ |
135 | cede_processor(); | 149 | check_and_cede_processor(); |
136 | 150 | ||
137 | dev->last_residency = | 151 | dev->last_residency = |
138 | (int)idle_loop_epilog(in_purr, kt_before); | 152 | (int)idle_loop_epilog(in_purr, kt_before); |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 63846ebd3276..974a47b3c9b8 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -1437,8 +1437,8 @@ static void excprint(struct pt_regs *fp) | |||
1437 | 1437 | ||
1438 | printf(" current = 0x%lx\n", current); | 1438 | printf(" current = 0x%lx\n", current); |
1439 | #ifdef CONFIG_PPC64 | 1439 | #ifdef CONFIG_PPC64 |
1440 | printf(" paca = 0x%lx\t softe: %d\t harde: %d\n", | 1440 | printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n", |
1441 | local_paca, local_paca->soft_enabled, local_paca->hard_enabled); | 1441 | local_paca, local_paca->soft_enabled, local_paca->irq_happened); |
1442 | #endif | 1442 | #endif |
1443 | if (current) { | 1443 | if (current) { |
1444 | printf(" pid = %ld, comm = %s\n", | 1444 | printf(" pid = %ld, comm = %s\n", |