diff options
author | David Daney <ddaney@caviumnetworks.com> | 2009-07-13 14:15:19 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-09-17 14:07:50 -0400 |
commit | b791d1193af9772040e592d5aa161790f800b762 (patch) | |
tree | 6adad3d9cdf278a3a1a3418ae75a2864d0cc7f39 /arch/mips/include/asm/system.h | |
parent | f7ade3c168e4f437c11f57be012992bbb0e3075c (diff) |
MIPS: Allow kernel use of LL/SC to be separate from the presence of LL/SC.
On some CPUs, it is more efficient to disable and enable interrupts in the
kernel rather than use ll/sc for atomic operations. But if we were to set
cpu_has_llsc to false, we would break the userspace futex interface (in
asm/futex.h).
We separate the two concepts, with a new predicate kernel_uses_llsc, that
lets us disable the kernel's use of ll/sc while still allowing the futex
code to use it.
Also there were a couple of cases in bitops.h where we were using ll/sc
unconditionally even if cpu_has_llsc were false.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/system.h')
-rw-r--r-- | arch/mips/include/asm/system.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h index cc7262ff0765..fcf5f98d90cc 100644 --- a/arch/mips/include/asm/system.h +++ b/arch/mips/include/asm/system.h | |||
@@ -94,7 +94,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
94 | { | 94 | { |
95 | __u32 retval; | 95 | __u32 retval; |
96 | 96 | ||
97 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 97 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
98 | unsigned long dummy; | 98 | unsigned long dummy; |
99 | 99 | ||
100 | __asm__ __volatile__( | 100 | __asm__ __volatile__( |
@@ -109,7 +109,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
109 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 109 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
110 | : "R" (*m), "Jr" (val) | 110 | : "R" (*m), "Jr" (val) |
111 | : "memory"); | 111 | : "memory"); |
112 | } else if (cpu_has_llsc) { | 112 | } else if (kernel_uses_llsc) { |
113 | unsigned long dummy; | 113 | unsigned long dummy; |
114 | 114 | ||
115 | __asm__ __volatile__( | 115 | __asm__ __volatile__( |
@@ -146,7 +146,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
146 | { | 146 | { |
147 | __u64 retval; | 147 | __u64 retval; |
148 | 148 | ||
149 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 149 | if (kernel_uses_llsc && R10000_LLSC_WAR) { |
150 | unsigned long dummy; | 150 | unsigned long dummy; |
151 | 151 | ||
152 | __asm__ __volatile__( | 152 | __asm__ __volatile__( |
@@ -159,7 +159,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
159 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) | 159 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
160 | : "R" (*m), "Jr" (val) | 160 | : "R" (*m), "Jr" (val) |
161 | : "memory"); | 161 | : "memory"); |
162 | } else if (cpu_has_llsc) { | 162 | } else if (kernel_uses_llsc) { |
163 | unsigned long dummy; | 163 | unsigned long dummy; |
164 | 164 | ||
165 | __asm__ __volatile__( | 165 | __asm__ __volatile__( |