aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-10-04 02:47:49 -0400
committerPaul Mackerras <paulus@samba.org>2006-10-16 02:31:36 -0400
commitd04c56f73c30a5e593202ecfcf25ed43d42363a2 (patch)
treebec1208293c904b73f12dd9179ebf8e88dbdb085 /include/asm-powerpc
parent284a940675a64df253e3dffc60b09bb4bbb149e4 (diff)
[POWERPC] Lazy interrupt disabling for 64-bit machines
This implements a lazy strategy for disabling interrupts. This means that local_irq_disable() et al. just clear the 'interrupts are enabled' flag in the paca. If an interrupt comes along, the interrupt entry code notices that interrupts are supposed to be disabled, and clears the EE bit in SRR1, clears the 'interrupts are hard-enabled' flag in the paca, and returns. This means that interrupts only actually get disabled in the processor when an interrupt comes along. When interrupts are enabled by local_irq_enable() et al., the code sets the interrupts-enabled flag in the paca, and then checks whether interrupts got hard-disabled. If so, it also sets the EE bit in the MSR to hard-enable the interrupts. This has the potential to improve performance, and also makes it easier to make a kernel that can boot on iSeries and on other 64-bit machines, since this lazy-disable strategy is very similar to the soft-disable strategy that iSeries already uses. This version renames paca->proc_enabled to paca->soft_enabled, and changes a couple of soft-disables in the kexec code to hard-disables, which should fix the crash that Michael Ellerman saw. This doesn't yet use a reserved CR field for the soft_enabled and hard_enabled flags. This applies on top of Stephen Rothwell's patches to make it possible to build a combined iSeries/other kernel. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/hw_irq.h31
-rw-r--r--include/asm-powerpc/paca.h3
2 files changed, 23 insertions, 11 deletions
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index d40359204aba..c4a1ab608f6f 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -7,16 +7,30 @@
7#ifdef __KERNEL__ 7#ifdef __KERNEL__
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/compiler.h>
10#include <asm/ptrace.h> 11#include <asm/ptrace.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12 13
13extern void timer_interrupt(struct pt_regs *); 14extern void timer_interrupt(struct pt_regs *);
14 15
15#ifdef CONFIG_PPC_ISERIES 16#ifdef CONFIG_PPC64
17#include <asm/paca.h>
18
19static inline unsigned long local_get_flags(void)
20{
21 return get_paca()->soft_enabled;
22}
23
24static inline unsigned long local_irq_disable(void)
25{
26 unsigned long flag = get_paca()->soft_enabled;
27 get_paca()->soft_enabled = 0;
28 barrier();
29 return flag;
30}
16 31
17extern unsigned long local_get_flags(void);
18extern unsigned long local_irq_disable(void);
19extern void local_irq_restore(unsigned long); 32extern void local_irq_restore(unsigned long);
33extern void iseries_handle_interrupts(void);
20 34
21#define local_irq_enable() local_irq_restore(1) 35#define local_irq_enable() local_irq_restore(1)
22#define local_save_flags(flags) ((flags) = local_get_flags()) 36#define local_save_flags(flags) ((flags) = local_get_flags())
@@ -24,17 +38,14 @@ extern void local_irq_restore(unsigned long);
24 38
25#define irqs_disabled() (local_get_flags() == 0) 39#define irqs_disabled() (local_get_flags() == 0)
26 40
41#define hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
42#define hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
43
27#else 44#else
28 45
29#if defined(CONFIG_BOOKE) 46#if defined(CONFIG_BOOKE)
30#define SET_MSR_EE(x) mtmsr(x) 47#define SET_MSR_EE(x) mtmsr(x)
31#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 48#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
32#elif defined(__powerpc64__)
33#define SET_MSR_EE(x) __mtmsrd(x, 1)
34#define local_irq_restore(flags) do { \
35 __asm__ __volatile__("": : :"memory"); \
36 __mtmsrd((flags), 1); \
37} while(0)
38#else 49#else
39#define SET_MSR_EE(x) mtmsr(x) 50#define SET_MSR_EE(x) mtmsr(x)
40#define local_irq_restore(flags) mtmsr(flags) 51#define local_irq_restore(flags) mtmsr(flags)
@@ -81,7 +92,7 @@ static inline void local_irq_save_ptr(unsigned long *flags)
81#define local_irq_save(flags) local_irq_save_ptr(&flags) 92#define local_irq_save(flags) local_irq_save_ptr(&flags)
82#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) 93#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
83 94
84#endif /* CONFIG_PPC_ISERIES */ 95#endif /* CONFIG_PPC64 */
85 96
86#define mask_irq(irq) \ 97#define mask_irq(irq) \
87 ({ \ 98 ({ \
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index 0a4e5c93e8e6..0d3adc09c847 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -93,7 +93,8 @@ struct paca_struct {
93 u64 stab_rr; /* stab/slb round-robin counter */ 93 u64 stab_rr; /* stab/slb round-robin counter */
94 u64 saved_r1; /* r1 save for RTAS calls */ 94 u64 saved_r1; /* r1 save for RTAS calls */
95 u64 saved_msr; /* MSR saved here by enter_rtas */ 95 u64 saved_msr; /* MSR saved here by enter_rtas */
96 u8 proc_enabled; /* irq soft-enable flag */ 96 u8 soft_enabled; /* irq soft-enable flag */
97 u8 hard_enabled; /* set if irqs are enabled in MSR */
97 u8 io_sync; /* writel() needs spin_unlock sync */ 98 u8 io_sync; /* writel() needs spin_unlock sync */
98 99
99 /* Stuff for accurate time accounting */ 100 /* Stuff for accurate time accounting */