diff options
author | Graf Yang <graf.yang@analog.com> | 2010-01-07 01:57:30 -0500 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2011-04-13 19:34:04 -0400 |
commit | 943aee0c685d0563228d5a2ad9c8394ad0300fb5 (patch) | |
tree | 380d2fab81323a878ab750f573d4476d109e589b /arch/blackfin/include/asm | |
parent | 85f2e689a5c8fb6ed8fdbee00109e7f6e5fefcb6 (diff) |
Blackfin: SMP: make all barriers handle cache issues
When suspending/resuming, the common task freezing code will run in
parallel and freeze processes on each core. This is because the code
uses the non-smp version of memory barriers (as well it should).
The Blackfin smp barrier logic at the moment contains the cache sync
logic, but the non-smp barriers do not. This is incorrect as Rafel
summarized:
> ...
> The existing memory barriers are SMP barriers too, but they are more
> than _just_ SMP barriers. At least that's how it is _supposed_ to be
> (eg. rmb() is supposed to be stronger than smp_rmb()).
> ...
> However, looking at the blackfin's definitions of SMP barriers I see
> that it uses extra stuff that should _also_ be used in the definitions
> of the mandatory barriers.
> ...
URL: http://lkml.org/lkml/2011/4/13/11
LKML-Reference: <BANLkTi=F-C-vwX4PGGfbkdTBw3OWL-twfg@mail.gmail.com>
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/include/asm')
-rw-r--r-- | arch/blackfin/include/asm/system.h | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
@@ -19,11 +19,11 @@ | |||
19 | * Force strict CPU ordering. | 19 | * Force strict CPU ordering. |
20 | */ | 20 | */ |
21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) |
22 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 22 | #define smp_mb() mb() |
23 | #define rmb() __asm__ __volatile__ ("" : : : "memory") | 23 | #define smp_rmb() rmb() |
24 | #define wmb() __asm__ __volatile__ ("" : : : "memory") | 24 | #define smp_wmb() wmb() |
25 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 25 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
26 | #define read_barrier_depends() do { } while(0) | 26 | #define smp_read_barrier_depends() read_barrier_depends() |
27 | 27 | ||
28 | #ifdef CONFIG_SMP | 28 | #ifdef CONFIG_SMP |
29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); |
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | |||
37 | unsigned long new, unsigned long old); | 37 | unsigned long new, unsigned long old); |
38 | 38 | ||
39 | #ifdef __ARCH_SYNC_CORE_DCACHE | 39 | #ifdef __ARCH_SYNC_CORE_DCACHE |
40 | # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) | 40 | /* Force Core data cache coherence */ |
41 | # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) | 41 | # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) |
42 | # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) | 42 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) |
43 | #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | 43 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) |
44 | 44 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | |
45 | #else | 45 | #else |
46 | # define smp_mb() barrier() | 46 | # define mb() barrier() |
47 | # define smp_rmb() barrier() | 47 | # define rmb() barrier() |
48 | # define smp_wmb() barrier() | 48 | # define wmb() barrier() |
49 | #define smp_read_barrier_depends() barrier() | 49 | # define read_barrier_depends() do { } while (0) |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
99 | 99 | ||
100 | #else /* !CONFIG_SMP */ | 100 | #else /* !CONFIG_SMP */ |
101 | 101 | ||
102 | #define smp_mb() barrier() | 102 | #define mb() barrier() |
103 | #define smp_rmb() barrier() | 103 | #define rmb() barrier() |
104 | #define smp_wmb() barrier() | 104 | #define wmb() barrier() |
105 | #define smp_read_barrier_depends() do { } while(0) | 105 | #define read_barrier_depends() do { } while (0) |
106 | 106 | ||
107 | struct __xchg_dummy { | 107 | struct __xchg_dummy { |
108 | unsigned long a[100]; | 108 | unsigned long a[100]; |