aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlan Cox <alan@lxorguk.ukuu.org.uk>2006-01-18 20:44:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 22:20:30 -0500
commit715b49ef2de6fcead0776d9349071670282faf65 (patch)
treed09b77c804aba3b191dc0ceb294387cf730ede4b /include
parent3213e913b0d6baeb28aa1affbdd4bfa7efedc35f (diff)
[PATCH] EDAC: atomic scrub operations
EDAC requires a way to scrub memory if an ECC error is found and the chipset does not do the work automatically. That means rewriting memory locations atomically with respect to all CPUs _and_ bus masters. That means we can't use atomic_add(foo, 0) as it gets optimised for non-SMP This adds a function to include/asm-foo/atomic.h for the platforms currently supported which implements a scrub of a mapped block. It also adjusts a few other files include order where atomic.h is included before types.h as this now causes an error as atomic_scrub uses u32. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/atomic.h12
-rw-r--r--include/asm-x86_64/atomic.h12
2 files changed, 24 insertions, 0 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..e2c00c95a5e1 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
255#define smp_mb__before_atomic_inc() barrier() 255#define smp_mb__before_atomic_inc() barrier()
256#define smp_mb__after_atomic_inc() barrier() 256#define smp_mb__after_atomic_inc() barrier()
257 257
258/* ECC atomic, DMA, SMP and interrupt safe scrub function */
259
260static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
261{
262 u32 i;
263 for (i = 0; i < size / 4; i++, virt_addr++)
264 /* Very carefully read and write to memory atomically
265 * so we are interrupt, DMA and SMP safe.
266 */
267 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
268}
269
258#include <asm-generic/atomic.h> 270#include <asm-generic/atomic.h>
259#endif 271#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e772..4048508c4f40 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
426#define smp_mb__before_atomic_inc() barrier() 426#define smp_mb__before_atomic_inc() barrier()
427#define smp_mb__after_atomic_inc() barrier() 427#define smp_mb__after_atomic_inc() barrier()
428 428
429/* ECC atomic, DMA, SMP and interrupt safe scrub function */
430
431static __inline__ void atomic_scrub(u32 *virt_addr, u32 size)
432{
433 u32 i;
434 for (i = 0; i < size / 4; i++, virt_addr++)
435 /* Very carefully read and write to memory atomically
436 * so we are interrupt, DMA and SMP safe.
437 */
438 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
439}
440
429#include <asm-generic/atomic.h> 441#include <asm-generic/atomic.h>
430#endif 442#endif