aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-11 22:00:50 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-14 23:27:37 -0400
commit4c75f84f2c781beb230031234ed961d28771a764 (patch)
treebae42cc387d0fdbc206729f2163b3eebefb9a85c /arch/powerpc/include
parente8d1673b9720ec72d85916c6b7d5d476abb2c861 (diff)
powerpc: Add compiler memory barrier to mtmsr macro
On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(), which doesn't currently have a compiler memory barrier. This means that accesses to memory inside a local_irq_save/restore section, or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can be reordered by the compiler to occur outside that section. To fix this, this adds a compiler memory barrier to mtmsr for both 32-bit and 64-bit. Having a compiler memory barrier in mtmsr makes sense because it will almost always be changing something about the context in which memory accesses are done, so in general we don't want memory accesses getting moved from one side of an mtmsr to the other. With the barrier in mtmsr(), some of the explicit barriers in hw_irq.h are now redundant, so this removes them. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
2 files changed, 4 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 53512374e1c9..b7f8f4a87cc0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@ static inline void local_irq_disable(void)
80 __asm__ __volatile__("wrteei 0": : :"memory"); 80 __asm__ __volatile__("wrteei 0": : :"memory");
81#else 81#else
82 unsigned long msr; 82 unsigned long msr;
83 __asm__ __volatile__("": : :"memory"); 83
84 msr = mfmsr(); 84 msr = mfmsr();
85 SET_MSR_EE(msr & ~MSR_EE); 85 SET_MSR_EE(msr & ~MSR_EE);
86#endif 86#endif
@@ -92,7 +92,7 @@ static inline void local_irq_enable(void)
92 __asm__ __volatile__("wrteei 1": : :"memory"); 92 __asm__ __volatile__("wrteei 1": : :"memory");
93#else 93#else
94 unsigned long msr; 94 unsigned long msr;
95 __asm__ __volatile__("": : :"memory"); 95
96 msr = mfmsr(); 96 msr = mfmsr();
97 SET_MSR_EE(msr | MSR_EE); 97 SET_MSR_EE(msr | MSR_EE);
98#endif 98#endif
@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
108#else 108#else
109 SET_MSR_EE(msr & ~MSR_EE); 109 SET_MSR_EE(msr & ~MSR_EE);
110#endif 110#endif
111 __asm__ __volatile__("": : :"memory");
112} 111}
113 112
114#define local_save_flags(flags) ((flags) = mfmsr()) 113#define local_save_flags(flags) ((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0a6937..a3c28e46947c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@
745 asm volatile("mfmsr %0" : "=r" (rval)); rval;}) 745 asm volatile("mfmsr %0" : "=r" (rval)); rval;})
746#ifdef CONFIG_PPC64 746#ifdef CONFIG_PPC64
747#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ 747#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
748 : : "r" (v)) 748 : : "r" (v) : "memory")
749#define mtmsrd(v) __mtmsrd((v), 0) 749#define mtmsrd(v) __mtmsrd((v), 0)
750#define mtmsr(v) mtmsrd(v) 750#define mtmsr(v) mtmsrd(v)
751#else 751#else
752#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) 752#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
753#endif 753#endif
754 754
755#define mfspr(rn) ({unsigned long rval; \ 755#define mfspr(rn) ({unsigned long rval; \