aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2006-10-31 13:41:51 -0500
committerPaul Mackerras <paulus@samba.org>2006-10-31 22:52:49 -0500
commit292f86f005e3867277b2126c2399eea3e773a4fc (patch)
tree12a7040e81b80f87f4c0899b94dd8bd29c1df391 /include/asm-powerpc
parent96268889ee369b36203b7a06e8aabb197270216e (diff)
[POWERPC] Make mmiowb's io_sync preempt safe
If mmiowb() is always used prior to releasing spinlock as Doc suggests, then it's safe against preemption; but I'm not convinced that's always the case. If preemption occurs between sync and get_paca()->io_sync = 0, I believe there's no problem. But in the unlikely event that gcc does the store relative to another register than r13 (as it did with current), then there's a small danger of setting another cpu's io_sync to 0, after it had just set it to 1. Rewrite ppc64 mmiowb to prevent that. The remaining io_sync assignments in io.h all get_paca()->io_sync = 1, which is harmless even if preempted to the wrong cpu (the context switch itself syncs); and those in spinlock.h are while preemption is disabled. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/io.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 3baff8b0fd5a..c2c5f14b5f5f 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -163,8 +163,11 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
163 163
164static inline void mmiowb(void) 164static inline void mmiowb(void)
165{ 165{
166 __asm__ __volatile__ ("sync" : : : "memory"); 166 unsigned long tmp;
167 get_paca()->io_sync = 0; 167
168 __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
169 : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
170 : "memory");
168} 171}
169 172
170/* 173/*