diff options
author | Alan Cox <alan@lxorguk.ukuu.org.uk> | 2006-01-18 20:44:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-18 22:20:31 -0500 |
commit | da9bb1d27b21cb24cbb6a2efb5d3c464d357a01e (patch) | |
tree | 016b66985a651d071d3873e74b115108ddf0b3f5 /include | |
parent | 2f768af73fea4c70f9046388a7ff648ad11f028e (diff) |
[PATCH] EDAC: core EDAC support code
This is a subset of the bluesmoke project core code, stripped of the NMI work
which isn't ready to merge and some of the "interesting" proc functionality
that needs reworking or just has no place in kernel. It requires no core
kernel changes except the added scrub functions already posted.
The goal is to merge further functionality only after the core code is
accepted and proven in the base kernel, and only at the point the upstream
extras are really ready to merge.
From: doug thompson <norsk5@xmission.com>
This converts EDAC to sysfs and is the final chunk neccessary before EDAC
has a stable user space API and can be considered for submission into the
base kernel.
Signed-off-by: Alan Cox <alan@redhat.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Jesper Juhl <jesper.juhl@gmail.com>
Signed-off-by: doug thompson <norsk5@xmission.com>
Signed-off-by: Pavel Machek <pavel@suse.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-i386/atomic.h | 12 | ||||
-rw-r--r-- | include/asm-i386/edac.h | 18 | ||||
-rw-r--r-- | include/asm-x86_64/atomic.h | 12 | ||||
-rw-r--r-- | include/asm-x86_64/edac.h | 18 |
4 files changed, 36 insertions, 24 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index e2c00c95a5e..de649d3aa2d 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -255,17 +255,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ | |||
255 | #define smp_mb__before_atomic_inc() barrier() | 255 | #define smp_mb__before_atomic_inc() barrier() |
256 | #define smp_mb__after_atomic_inc() barrier() | 256 | #define smp_mb__after_atomic_inc() barrier() |
257 | 257 | ||
258 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | ||
259 | |||
260 | static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size) | ||
261 | { | ||
262 | u32 i; | ||
263 | for (i = 0; i < size / 4; i++, virt_addr++) | ||
264 | /* Very carefully read and write to memory atomically | ||
265 | * so we are interrupt, DMA and SMP safe. | ||
266 | */ | ||
267 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | ||
268 | } | ||
269 | |||
270 | #include <asm-generic/atomic.h> | 258 | #include <asm-generic/atomic.h> |
271 | #endif | 259 | #endif |
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h new file mode 100644 index 00000000000..3e7dd0ab68c --- /dev/null +++ b/include/asm-i386/edac.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef ASM_EDAC_H | ||
2 | #define ASM_EDAC_H | ||
3 | |||
4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | ||
5 | |||
6 | static __inline__ void atomic_scrub(void *va, u32 size) | ||
7 | { | ||
8 | unsigned long *virt_addr = va; | ||
9 | u32 i; | ||
10 | |||
11 | for (i = 0; i < size / 4; i++, virt_addr++) | ||
12 | /* Very carefully read and write to memory atomically | ||
13 | * so we are interrupt, DMA and SMP safe. | ||
14 | */ | ||
15 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | ||
16 | } | ||
17 | |||
18 | #endif | ||
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 4048508c4f4..4b5cd553e77 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -426,17 +426,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ | |||
426 | #define smp_mb__before_atomic_inc() barrier() | 426 | #define smp_mb__before_atomic_inc() barrier() |
427 | #define smp_mb__after_atomic_inc() barrier() | 427 | #define smp_mb__after_atomic_inc() barrier() |
428 | 428 | ||
429 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | ||
430 | |||
431 | static __inline__ void atomic_scrub(u32 *virt_addr, u32 size) | ||
432 | { | ||
433 | u32 i; | ||
434 | for (i = 0; i < size / 4; i++, virt_addr++) | ||
435 | /* Very carefully read and write to memory atomically | ||
436 | * so we are interrupt, DMA and SMP safe. | ||
437 | */ | ||
438 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | ||
439 | } | ||
440 | |||
441 | #include <asm-generic/atomic.h> | 429 | #include <asm-generic/atomic.h> |
442 | #endif | 430 | #endif |
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h new file mode 100644 index 00000000000..cad1cd42b4e --- /dev/null +++ b/include/asm-x86_64/edac.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef ASM_EDAC_H | ||
2 | #define ASM_EDAC_H | ||
3 | |||
4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | ||
5 | |||
6 | static __inline__ void atomic_scrub(void *va, u32 size) | ||
7 | { | ||
8 | unsigned int *virt_addr = va; | ||
9 | u32 i; | ||
10 | |||
11 | for (i = 0; i < size / 4; i++, virt_addr++) | ||
12 | /* Very carefully read and write to memory atomically | ||
13 | * so we are interrupt, DMA and SMP safe. | ||
14 | */ | ||
15 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | ||
16 | } | ||
17 | |||
18 | #endif | ||