aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/eeh.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-09-13 08:08:26 -0400
committerPaul Mackerras <paulus@samba.org>2006-09-13 08:08:26 -0400
commitf007cacffc8870702a1473d83ba5e4922d54e17c (patch)
tree7faa1dbd7ccd2c4536f29852e0fedf7499d90508 /include/asm-powerpc/eeh.h
parent2e8e8dacc566cc91cd8707cb092e76c7bbfab178 (diff)
[POWERPC] Fix MMIO ops to provide expected barrier behaviour
This changes the writeX family of functions to have a sync instruction before the MMIO store rather than after, because the generally expected behaviour is that the device receiving the MMIO store can be guaranteed to see the effects of any preceding writes to normal memory. To preserve ordering between writeX and readX, and to preserve ordering between preceding stores and the readX, the readX family of functions have had an sync added before the load. Although writeX followed by spin_unlock is not officially guaranteed to keep the writeX inside the spin-locked region unless an mmiowb() is used, there are currently drivers that depend on the previous behaviour on powerpc, which was that the mmiowb wasn't actually required. Therefore we have a per-cpu flag that is set by writeX, cleared by __raw_spin_lock and mmiowb, and tested by __raw_spin_unlock. If it is set, __raw_spin_unlock does a sync and clears it. This changes both 32-bit and 64-bit readX/writeX. 32-bit already has a sync in __raw_spin_unlock (since lwsync doesn't exist on 32-bit), and thus doesn't need the per-cpu flag. Tested on G5 (PPC970) and POWER5. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/eeh.h')
-rw-r--r--include/asm-powerpc/eeh.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index 4df3e80118f4..6a784396660b 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -205,6 +205,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c,
205 lc |= lc << 8; 205 lc |= lc << 8;
206 lc |= lc << 16; 206 lc |= lc << 16;
207 207
208 __asm__ __volatile__ ("sync" : : : "memory");
208 while(n && !EEH_CHECK_ALIGN(p, 4)) { 209 while(n && !EEH_CHECK_ALIGN(p, 4)) {
209 *((volatile u8 *)p) = c; 210 *((volatile u8 *)p) = c;
210 p++; 211 p++;
@@ -229,6 +230,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr
229 void *destsave = dest; 230 void *destsave = dest;
230 unsigned long nsave = n; 231 unsigned long nsave = n;
231 232
233 __asm__ __volatile__ ("sync" : : : "memory");
232 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { 234 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
233 *((u8 *)dest) = *((volatile u8 *)vsrc); 235 *((u8 *)dest) = *((volatile u8 *)vsrc);
234 __asm__ __volatile__ ("eieio" : : : "memory"); 236 __asm__ __volatile__ ("eieio" : : : "memory");
@@ -266,6 +268,7 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
266{ 268{
267 void *vdest = (void __force *) dest; 269 void *vdest = (void __force *) dest;
268 270
271 __asm__ __volatile__ ("sync" : : : "memory");
269 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { 272 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
270 *((volatile u8 *)vdest) = *((u8 *)src); 273 *((volatile u8 *)vdest) = *((u8 *)src);
271 src++; 274 src++;