diff options
author | Rich Felker <dalias@libc.org> | 2016-07-28 15:21:10 -0400 |
---|---|---|
committer | Rich Felker <dalias@libc.org> | 2016-08-04 23:29:34 -0400 |
commit | 2b47d54ed41c33baf5825185168b493317c5572f (patch) | |
tree | a42a2098fa73f41d6d5b214e1eeca5712d419551 | |
parent | 834da197058bebcb343320dafb8b62d416d4038c (diff) |
sh: add J2 atomics using the cas.l instruction
Signed-off-by: Rich Felker <dalias@libc.org>
-rw-r--r-- | arch/sh/include/asm/atomic.h | 8 | ||||
-rw-r--r-- | arch/sh/include/asm/barrier.h | 5 | ||||
-rw-r--r-- | arch/sh/include/asm/bitops-cas.h | 93 | ||||
-rw-r--r-- | arch/sh/include/asm/bitops.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/cmpxchg-cas.h | 24 | ||||
-rw-r--r-- | arch/sh/include/asm/cmpxchg.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/spinlock-cas.h | 117 | ||||
-rw-r--r-- | arch/sh/include/asm/spinlock-llsc.h | 224 | ||||
-rw-r--r-- | arch/sh/include/asm/spinlock.h | 222 |
9 files changed, 481 insertions, 216 deletions
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index c399e1c55685..8a7bd80c8b33 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h | |||
@@ -1,6 +1,12 @@ | |||
1 | #ifndef __ASM_SH_ATOMIC_H | 1 | #ifndef __ASM_SH_ATOMIC_H |
2 | #define __ASM_SH_ATOMIC_H | 2 | #define __ASM_SH_ATOMIC_H |
3 | 3 | ||
4 | #if defined(CONFIG_CPU_J2) | ||
5 | |||
6 | #include <asm-generic/atomic.h> | ||
7 | |||
8 | #else | ||
9 | |||
4 | /* | 10 | /* |
5 | * Atomic operations that C can't guarantee us. Useful for | 11 | * Atomic operations that C can't guarantee us. Useful for |
6 | * resource counting etc.. | 12 | * resource counting etc.. |
@@ -63,4 +69,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
63 | return c; | 69 | return c; |
64 | } | 70 | } |
65 | 71 | ||
72 | #endif /* CONFIG_CPU_J2 */ | ||
73 | |||
66 | #endif /* __ASM_SH_ATOMIC_H */ | 74 | #endif /* __ASM_SH_ATOMIC_H */ |
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 8a84e05adb2e..3c30b6e166b6 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h | |||
@@ -29,6 +29,11 @@ | |||
29 | #define wmb() mb() | 29 | #define wmb() mb() |
30 | #define ctrl_barrier() __icbi(PAGE_OFFSET) | 30 | #define ctrl_barrier() __icbi(PAGE_OFFSET) |
31 | #else | 31 | #else |
32 | #if defined(CONFIG_CPU_J2) && defined(CONFIG_SMP) | ||
33 | #define __smp_mb() do { int tmp = 0; __asm__ __volatile__ ("cas.l %0,%0,@%1" : "+r"(tmp) : "z"(&tmp) : "memory", "t"); } while(0) | ||
34 | #define __smp_rmb() __smp_mb() | ||
35 | #define __smp_wmb() __smp_mb() | ||
36 | #endif | ||
32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | 37 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") |
33 | #endif | 38 | #endif |
34 | 39 | ||
diff --git a/arch/sh/include/asm/bitops-cas.h b/arch/sh/include/asm/bitops-cas.h new file mode 100644 index 000000000000..88f793c04d3c --- /dev/null +++ b/arch/sh/include/asm/bitops-cas.h | |||
@@ -0,0 +1,93 @@ | |||
1 | #ifndef __ASM_SH_BITOPS_CAS_H | ||
2 | #define __ASM_SH_BITOPS_CAS_H | ||
3 | |||
4 | static inline unsigned __bo_cas(volatile unsigned *p, unsigned old, unsigned new) | ||
5 | { | ||
6 | __asm__ __volatile__("cas.l %1,%0,@r0" | ||
7 | : "+r"(new) | ||
8 | : "r"(old), "z"(p) | ||
9 | : "t", "memory" ); | ||
10 | return new; | ||
11 | } | ||
12 | |||
13 | static inline void set_bit(int nr, volatile void *addr) | ||
14 | { | ||
15 | unsigned mask, old; | ||
16 | volatile unsigned *a = addr; | ||
17 | |||
18 | a += nr >> 5; | ||
19 | mask = 1U << (nr & 0x1f); | ||
20 | |||
21 | do old = *a; | ||
22 | while (__bo_cas(a, old, old|mask) != old); | ||
23 | } | ||
24 | |||
25 | static inline void clear_bit(int nr, volatile void *addr) | ||
26 | { | ||
27 | unsigned mask, old; | ||
28 | volatile unsigned *a = addr; | ||
29 | |||
30 | a += nr >> 5; | ||
31 | mask = 1U << (nr & 0x1f); | ||
32 | |||
33 | do old = *a; | ||
34 | while (__bo_cas(a, old, old&~mask) != old); | ||
35 | } | ||
36 | |||
37 | static inline void change_bit(int nr, volatile void *addr) | ||
38 | { | ||
39 | unsigned mask, old; | ||
40 | volatile unsigned *a = addr; | ||
41 | |||
42 | a += nr >> 5; | ||
43 | mask = 1U << (nr & 0x1f); | ||
44 | |||
45 | do old = *a; | ||
46 | while (__bo_cas(a, old, old^mask) != old); | ||
47 | } | ||
48 | |||
49 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
50 | { | ||
51 | unsigned mask, old; | ||
52 | volatile unsigned *a = addr; | ||
53 | |||
54 | a += nr >> 5; | ||
55 | mask = 1U << (nr & 0x1f); | ||
56 | |||
57 | do old = *a; | ||
58 | while (__bo_cas(a, old, old|mask) != old); | ||
59 | |||
60 | return !!(old & mask); | ||
61 | } | ||
62 | |||
63 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
64 | { | ||
65 | unsigned mask, old; | ||
66 | volatile unsigned *a = addr; | ||
67 | |||
68 | a += nr >> 5; | ||
69 | mask = 1U << (nr & 0x1f); | ||
70 | |||
71 | do old = *a; | ||
72 | while (__bo_cas(a, old, old&~mask) != old); | ||
73 | |||
74 | return !!(old & mask); | ||
75 | } | ||
76 | |||
77 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
78 | { | ||
79 | unsigned mask, old; | ||
80 | volatile unsigned *a = addr; | ||
81 | |||
82 | a += nr >> 5; | ||
83 | mask = 1U << (nr & 0x1f); | ||
84 | |||
85 | do old = *a; | ||
86 | while (__bo_cas(a, old, old^mask) != old); | ||
87 | |||
88 | return !!(old & mask); | ||
89 | } | ||
90 | |||
91 | #include <asm-generic/bitops/non-atomic.h> | ||
92 | |||
93 | #endif /* __ASM_SH_BITOPS_CAS_H */ | ||
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index fc8e652cf173..a8699d60a8c4 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <asm/bitops-op32.h> | 18 | #include <asm/bitops-op32.h> |
19 | #elif defined(CONFIG_CPU_SH4A) | 19 | #elif defined(CONFIG_CPU_SH4A) |
20 | #include <asm/bitops-llsc.h> | 20 | #include <asm/bitops-llsc.h> |
21 | #elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP) | ||
22 | #include <asm/bitops-cas.h> | ||
21 | #else | 23 | #else |
22 | #include <asm-generic/bitops/atomic.h> | 24 | #include <asm-generic/bitops/atomic.h> |
23 | #include <asm-generic/bitops/non-atomic.h> | 25 | #include <asm-generic/bitops/non-atomic.h> |
diff --git a/arch/sh/include/asm/cmpxchg-cas.h b/arch/sh/include/asm/cmpxchg-cas.h new file mode 100644 index 000000000000..d0d86649e8c1 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-cas.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __ASM_SH_CMPXCHG_CAS_H | ||
2 | #define __ASM_SH_CMPXCHG_CAS_H | ||
3 | |||
4 | static inline unsigned long | ||
5 | __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) | ||
6 | { | ||
7 | __asm__ __volatile__("cas.l %1,%0,@r0" | ||
8 | : "+r"(new) | ||
9 | : "r"(old), "z"(m) | ||
10 | : "t", "memory" ); | ||
11 | return new; | ||
12 | } | ||
13 | |||
14 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
15 | { | ||
16 | unsigned long old; | ||
17 | do old = *m; | ||
18 | while (__cmpxchg_u32(m, old, val) != old); | ||
19 | return old; | ||
20 | } | ||
21 | |||
22 | #include <asm/cmpxchg-xchg.h> | ||
23 | |||
24 | #endif /* __ASM_SH_CMPXCHG_CAS_H */ | ||
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 5225916c1057..3dfe0467a773 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <asm/cmpxchg-grb.h> | 13 | #include <asm/cmpxchg-grb.h> |
14 | #elif defined(CONFIG_CPU_SH4A) | 14 | #elif defined(CONFIG_CPU_SH4A) |
15 | #include <asm/cmpxchg-llsc.h> | 15 | #include <asm/cmpxchg-llsc.h> |
16 | #elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP) | ||
17 | #include <asm/cmpxchg-cas.h> | ||
16 | #else | 18 | #else |
17 | #include <asm/cmpxchg-irq.h> | 19 | #include <asm/cmpxchg-irq.h> |
18 | #endif | 20 | #endif |
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h new file mode 100644 index 000000000000..c46e8cc7b515 --- /dev/null +++ b/arch/sh/include/asm/spinlock-cas.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * include/asm-sh/spinlock-cas.h | ||
3 | * | ||
4 | * Copyright (C) 2015 SEI | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_SH_SPINLOCK_CAS_H | ||
11 | #define __ASM_SH_SPINLOCK_CAS_H | ||
12 | |||
13 | #include <asm/barrier.h> | ||
14 | #include <asm/processor.h> | ||
15 | |||
16 | static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new) | ||
17 | { | ||
18 | __asm__ __volatile__("cas.l %1,%0,@r0" | ||
19 | : "+r"(new) | ||
20 | : "r"(old), "z"(p) | ||
21 | : "t", "memory" ); | ||
22 | return new; | ||
23 | } | ||
24 | |||
25 | /* | ||
26 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
27 | */ | ||
28 | |||
29 | #define arch_spin_is_locked(x) ((x)->lock <= 0) | ||
30 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
31 | |||
32 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
33 | { | ||
34 | smp_cond_load_acquire(&lock->lock, VAL > 0); | ||
35 | } | ||
36 | |||
37 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
38 | { | ||
39 | while (!__sl_cas(&lock->lock, 1, 0)); | ||
40 | } | ||
41 | |||
42 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
43 | { | ||
44 | __sl_cas(&lock->lock, 0, 1); | ||
45 | } | ||
46 | |||
47 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
48 | { | ||
49 | return __sl_cas(&lock->lock, 1, 0); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
54 | * | ||
55 | * NOTE! it is quite common to have readers in interrupts but no interrupt | ||
56 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | ||
57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | ||
58 | * read-locks. | ||
59 | */ | ||
60 | |||
61 | /** | ||
62 | * read_can_lock - would read_trylock() succeed? | ||
63 | * @lock: the rwlock in question. | ||
64 | */ | ||
65 | #define arch_read_can_lock(x) ((x)->lock > 0) | ||
66 | |||
67 | /** | ||
68 | * write_can_lock - would write_trylock() succeed? | ||
69 | * @lock: the rwlock in question. | ||
70 | */ | ||
71 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
72 | |||
73 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
74 | { | ||
75 | unsigned old; | ||
76 | do old = rw->lock; | ||
77 | while (!old || __sl_cas(&rw->lock, old, old-1) != old); | ||
78 | } | ||
79 | |||
80 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
81 | { | ||
82 | unsigned old; | ||
83 | do old = rw->lock; | ||
84 | while (__sl_cas(&rw->lock, old, old+1) != old); | ||
85 | } | ||
86 | |||
87 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
88 | { | ||
89 | while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS); | ||
90 | } | ||
91 | |||
92 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
93 | { | ||
94 | __sl_cas(&rw->lock, 0, RW_LOCK_BIAS); | ||
95 | } | ||
96 | |||
97 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
98 | { | ||
99 | unsigned old; | ||
100 | do old = rw->lock; | ||
101 | while (old && __sl_cas(&rw->lock, old, old-1) != old); | ||
102 | return !!old; | ||
103 | } | ||
104 | |||
105 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
106 | { | ||
107 | return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS; | ||
108 | } | ||
109 | |||
110 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
111 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
112 | |||
113 | #define arch_spin_relax(lock) cpu_relax() | ||
114 | #define arch_read_relax(lock) cpu_relax() | ||
115 | #define arch_write_relax(lock) cpu_relax() | ||
116 | |||
117 | #endif /* __ASM_SH_SPINLOCK_CAS_H */ | ||
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h new file mode 100644 index 000000000000..cec78143fa83 --- /dev/null +++ b/arch/sh/include/asm/spinlock-llsc.h | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * include/asm-sh/spinlock-llsc.h | ||
3 | * | ||
4 | * Copyright (C) 2002, 2003 Paul Mundt | ||
5 | * Copyright (C) 2006, 2007 Akio Idehara | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #ifndef __ASM_SH_SPINLOCK_LLSC_H | ||
12 | #define __ASM_SH_SPINLOCK_LLSC_H | ||
13 | |||
14 | #include <asm/barrier.h> | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | /* | ||
18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
19 | */ | ||
20 | |||
21 | #define arch_spin_is_locked(x) ((x)->lock <= 0) | ||
22 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
23 | |||
24 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
25 | { | ||
26 | smp_cond_load_acquire(&lock->lock, VAL > 0); | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
31 | * on the local processor, one does not. | ||
32 | * | ||
33 | * We make no fairness assumptions. They have a cost. | ||
34 | */ | ||
35 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
36 | { | ||
37 | unsigned long tmp; | ||
38 | unsigned long oldval; | ||
39 | |||
40 | __asm__ __volatile__ ( | ||
41 | "1: \n\t" | ||
42 | "movli.l @%2, %0 ! arch_spin_lock \n\t" | ||
43 | "mov %0, %1 \n\t" | ||
44 | "mov #0, %0 \n\t" | ||
45 | "movco.l %0, @%2 \n\t" | ||
46 | "bf 1b \n\t" | ||
47 | "cmp/pl %1 \n\t" | ||
48 | "bf 1b \n\t" | ||
49 | : "=&z" (tmp), "=&r" (oldval) | ||
50 | : "r" (&lock->lock) | ||
51 | : "t", "memory" | ||
52 | ); | ||
53 | } | ||
54 | |||
55 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
56 | { | ||
57 | unsigned long tmp; | ||
58 | |||
59 | __asm__ __volatile__ ( | ||
60 | "mov #1, %0 ! arch_spin_unlock \n\t" | ||
61 | "mov.l %0, @%1 \n\t" | ||
62 | : "=&z" (tmp) | ||
63 | : "r" (&lock->lock) | ||
64 | : "t", "memory" | ||
65 | ); | ||
66 | } | ||
67 | |||
68 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
69 | { | ||
70 | unsigned long tmp, oldval; | ||
71 | |||
72 | __asm__ __volatile__ ( | ||
73 | "1: \n\t" | ||
74 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" | ||
75 | "mov %0, %1 \n\t" | ||
76 | "mov #0, %0 \n\t" | ||
77 | "movco.l %0, @%2 \n\t" | ||
78 | "bf 1b \n\t" | ||
79 | "synco \n\t" | ||
80 | : "=&z" (tmp), "=&r" (oldval) | ||
81 | : "r" (&lock->lock) | ||
82 | : "t", "memory" | ||
83 | ); | ||
84 | |||
85 | return oldval; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
90 | * | ||
91 | * NOTE! it is quite common to have readers in interrupts but no interrupt | ||
92 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | ||
93 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | ||
94 | * read-locks. | ||
95 | */ | ||
96 | |||
97 | /** | ||
98 | * read_can_lock - would read_trylock() succeed? | ||
99 | * @lock: the rwlock in question. | ||
100 | */ | ||
101 | #define arch_read_can_lock(x) ((x)->lock > 0) | ||
102 | |||
103 | /** | ||
104 | * write_can_lock - would write_trylock() succeed? | ||
105 | * @lock: the rwlock in question. | ||
106 | */ | ||
107 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
108 | |||
109 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
110 | { | ||
111 | unsigned long tmp; | ||
112 | |||
113 | __asm__ __volatile__ ( | ||
114 | "1: \n\t" | ||
115 | "movli.l @%1, %0 ! arch_read_lock \n\t" | ||
116 | "cmp/pl %0 \n\t" | ||
117 | "bf 1b \n\t" | ||
118 | "add #-1, %0 \n\t" | ||
119 | "movco.l %0, @%1 \n\t" | ||
120 | "bf 1b \n\t" | ||
121 | : "=&z" (tmp) | ||
122 | : "r" (&rw->lock) | ||
123 | : "t", "memory" | ||
124 | ); | ||
125 | } | ||
126 | |||
127 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
128 | { | ||
129 | unsigned long tmp; | ||
130 | |||
131 | __asm__ __volatile__ ( | ||
132 | "1: \n\t" | ||
133 | "movli.l @%1, %0 ! arch_read_unlock \n\t" | ||
134 | "add #1, %0 \n\t" | ||
135 | "movco.l %0, @%1 \n\t" | ||
136 | "bf 1b \n\t" | ||
137 | : "=&z" (tmp) | ||
138 | : "r" (&rw->lock) | ||
139 | : "t", "memory" | ||
140 | ); | ||
141 | } | ||
142 | |||
143 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
144 | { | ||
145 | unsigned long tmp; | ||
146 | |||
147 | __asm__ __volatile__ ( | ||
148 | "1: \n\t" | ||
149 | "movli.l @%1, %0 ! arch_write_lock \n\t" | ||
150 | "cmp/hs %2, %0 \n\t" | ||
151 | "bf 1b \n\t" | ||
152 | "sub %2, %0 \n\t" | ||
153 | "movco.l %0, @%1 \n\t" | ||
154 | "bf 1b \n\t" | ||
155 | : "=&z" (tmp) | ||
156 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
157 | : "t", "memory" | ||
158 | ); | ||
159 | } | ||
160 | |||
161 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
162 | { | ||
163 | __asm__ __volatile__ ( | ||
164 | "mov.l %1, @%0 ! arch_write_unlock \n\t" | ||
165 | : | ||
166 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
167 | : "t", "memory" | ||
168 | ); | ||
169 | } | ||
170 | |||
171 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
172 | { | ||
173 | unsigned long tmp, oldval; | ||
174 | |||
175 | __asm__ __volatile__ ( | ||
176 | "1: \n\t" | ||
177 | "movli.l @%2, %0 ! arch_read_trylock \n\t" | ||
178 | "mov %0, %1 \n\t" | ||
179 | "cmp/pl %0 \n\t" | ||
180 | "bf 2f \n\t" | ||
181 | "add #-1, %0 \n\t" | ||
182 | "movco.l %0, @%2 \n\t" | ||
183 | "bf 1b \n\t" | ||
184 | "2: \n\t" | ||
185 | "synco \n\t" | ||
186 | : "=&z" (tmp), "=&r" (oldval) | ||
187 | : "r" (&rw->lock) | ||
188 | : "t", "memory" | ||
189 | ); | ||
190 | |||
191 | return (oldval > 0); | ||
192 | } | ||
193 | |||
194 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
195 | { | ||
196 | unsigned long tmp, oldval; | ||
197 | |||
198 | __asm__ __volatile__ ( | ||
199 | "1: \n\t" | ||
200 | "movli.l @%2, %0 ! arch_write_trylock \n\t" | ||
201 | "mov %0, %1 \n\t" | ||
202 | "cmp/hs %3, %0 \n\t" | ||
203 | "bf 2f \n\t" | ||
204 | "sub %3, %0 \n\t" | ||
205 | "2: \n\t" | ||
206 | "movco.l %0, @%2 \n\t" | ||
207 | "bf 1b \n\t" | ||
208 | "synco \n\t" | ||
209 | : "=&z" (tmp), "=&r" (oldval) | ||
210 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
211 | : "t", "memory" | ||
212 | ); | ||
213 | |||
214 | return (oldval > (RW_LOCK_BIAS - 1)); | ||
215 | } | ||
216 | |||
217 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
218 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
219 | |||
220 | #define arch_spin_relax(lock) cpu_relax() | ||
221 | #define arch_read_relax(lock) cpu_relax() | ||
222 | #define arch_write_relax(lock) cpu_relax() | ||
223 | |||
224 | #endif /* __ASM_SH_SPINLOCK_LLSC_H */ | ||
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 416834b60ad0..c2c61ea6a8e2 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -11,222 +11,12 @@ | |||
11 | #ifndef __ASM_SH_SPINLOCK_H | 11 | #ifndef __ASM_SH_SPINLOCK_H |
12 | #define __ASM_SH_SPINLOCK_H | 12 | #define __ASM_SH_SPINLOCK_H |
13 | 13 | ||
14 | /* | 14 | #if defined(CONFIG_CPU_SH4A) |
15 | * The only locking implemented here uses SH-4A opcodes. For others, | 15 | #include <asm/spinlock-llsc.h> |
16 | * split this out as per atomic-*.h. | 16 | #elif defined(CONFIG_CPU_J2) |
17 | */ | 17 | #include <asm/spinlock-cas.h> |
18 | #ifndef CONFIG_CPU_SH4A | 18 | #else |
19 | #error "Need movli.l/movco.l for spinlocks" | 19 | #error "The configured cpu type does not support spinlocks" |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #include <asm/barrier.h> | ||
23 | #include <asm/processor.h> | ||
24 | |||
25 | /* | ||
26 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
27 | */ | ||
28 | |||
29 | #define arch_spin_is_locked(x) ((x)->lock <= 0) | ||
30 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
31 | |||
32 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
33 | { | ||
34 | smp_cond_load_acquire(&lock->lock, VAL > 0); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
39 | * on the local processor, one does not. | ||
40 | * | ||
41 | * We make no fairness assumptions. They have a cost. | ||
42 | */ | ||
43 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
44 | { | ||
45 | unsigned long tmp; | ||
46 | unsigned long oldval; | ||
47 | |||
48 | __asm__ __volatile__ ( | ||
49 | "1: \n\t" | ||
50 | "movli.l @%2, %0 ! arch_spin_lock \n\t" | ||
51 | "mov %0, %1 \n\t" | ||
52 | "mov #0, %0 \n\t" | ||
53 | "movco.l %0, @%2 \n\t" | ||
54 | "bf 1b \n\t" | ||
55 | "cmp/pl %1 \n\t" | ||
56 | "bf 1b \n\t" | ||
57 | : "=&z" (tmp), "=&r" (oldval) | ||
58 | : "r" (&lock->lock) | ||
59 | : "t", "memory" | ||
60 | ); | ||
61 | } | ||
62 | |||
63 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
64 | { | ||
65 | unsigned long tmp; | ||
66 | |||
67 | __asm__ __volatile__ ( | ||
68 | "mov #1, %0 ! arch_spin_unlock \n\t" | ||
69 | "mov.l %0, @%1 \n\t" | ||
70 | : "=&z" (tmp) | ||
71 | : "r" (&lock->lock) | ||
72 | : "t", "memory" | ||
73 | ); | ||
74 | } | ||
75 | |||
76 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
77 | { | ||
78 | unsigned long tmp, oldval; | ||
79 | |||
80 | __asm__ __volatile__ ( | ||
81 | "1: \n\t" | ||
82 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" | ||
83 | "mov %0, %1 \n\t" | ||
84 | "mov #0, %0 \n\t" | ||
85 | "movco.l %0, @%2 \n\t" | ||
86 | "bf 1b \n\t" | ||
87 | "synco \n\t" | ||
88 | : "=&z" (tmp), "=&r" (oldval) | ||
89 | : "r" (&lock->lock) | ||
90 | : "t", "memory" | ||
91 | ); | ||
92 | |||
93 | return oldval; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
98 | * | ||
99 | * NOTE! it is quite common to have readers in interrupts but no interrupt | ||
100 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | ||
101 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | ||
102 | * read-locks. | ||
103 | */ | ||
104 | |||
105 | /** | ||
106 | * read_can_lock - would read_trylock() succeed? | ||
107 | * @lock: the rwlock in question. | ||
108 | */ | ||
109 | #define arch_read_can_lock(x) ((x)->lock > 0) | ||
110 | |||
111 | /** | ||
112 | * write_can_lock - would write_trylock() succeed? | ||
113 | * @lock: the rwlock in question. | ||
114 | */ | ||
115 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
116 | |||
117 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
118 | { | ||
119 | unsigned long tmp; | ||
120 | |||
121 | __asm__ __volatile__ ( | ||
122 | "1: \n\t" | ||
123 | "movli.l @%1, %0 ! arch_read_lock \n\t" | ||
124 | "cmp/pl %0 \n\t" | ||
125 | "bf 1b \n\t" | ||
126 | "add #-1, %0 \n\t" | ||
127 | "movco.l %0, @%1 \n\t" | ||
128 | "bf 1b \n\t" | ||
129 | : "=&z" (tmp) | ||
130 | : "r" (&rw->lock) | ||
131 | : "t", "memory" | ||
132 | ); | ||
133 | } | ||
134 | |||
135 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
136 | { | ||
137 | unsigned long tmp; | ||
138 | |||
139 | __asm__ __volatile__ ( | ||
140 | "1: \n\t" | ||
141 | "movli.l @%1, %0 ! arch_read_unlock \n\t" | ||
142 | "add #1, %0 \n\t" | ||
143 | "movco.l %0, @%1 \n\t" | ||
144 | "bf 1b \n\t" | ||
145 | : "=&z" (tmp) | ||
146 | : "r" (&rw->lock) | ||
147 | : "t", "memory" | ||
148 | ); | ||
149 | } | ||
150 | |||
151 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
152 | { | ||
153 | unsigned long tmp; | ||
154 | |||
155 | __asm__ __volatile__ ( | ||
156 | "1: \n\t" | ||
157 | "movli.l @%1, %0 ! arch_write_lock \n\t" | ||
158 | "cmp/hs %2, %0 \n\t" | ||
159 | "bf 1b \n\t" | ||
160 | "sub %2, %0 \n\t" | ||
161 | "movco.l %0, @%1 \n\t" | ||
162 | "bf 1b \n\t" | ||
163 | : "=&z" (tmp) | ||
164 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
165 | : "t", "memory" | ||
166 | ); | ||
167 | } | ||
168 | |||
169 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
170 | { | ||
171 | __asm__ __volatile__ ( | ||
172 | "mov.l %1, @%0 ! arch_write_unlock \n\t" | ||
173 | : | ||
174 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
175 | : "t", "memory" | ||
176 | ); | ||
177 | } | ||
178 | |||
179 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
180 | { | ||
181 | unsigned long tmp, oldval; | ||
182 | |||
183 | __asm__ __volatile__ ( | ||
184 | "1: \n\t" | ||
185 | "movli.l @%2, %0 ! arch_read_trylock \n\t" | ||
186 | "mov %0, %1 \n\t" | ||
187 | "cmp/pl %0 \n\t" | ||
188 | "bf 2f \n\t" | ||
189 | "add #-1, %0 \n\t" | ||
190 | "movco.l %0, @%2 \n\t" | ||
191 | "bf 1b \n\t" | ||
192 | "2: \n\t" | ||
193 | "synco \n\t" | ||
194 | : "=&z" (tmp), "=&r" (oldval) | ||
195 | : "r" (&rw->lock) | ||
196 | : "t", "memory" | ||
197 | ); | ||
198 | |||
199 | return (oldval > 0); | ||
200 | } | ||
201 | |||
202 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
203 | { | ||
204 | unsigned long tmp, oldval; | ||
205 | |||
206 | __asm__ __volatile__ ( | ||
207 | "1: \n\t" | ||
208 | "movli.l @%2, %0 ! arch_write_trylock \n\t" | ||
209 | "mov %0, %1 \n\t" | ||
210 | "cmp/hs %3, %0 \n\t" | ||
211 | "bf 2f \n\t" | ||
212 | "sub %3, %0 \n\t" | ||
213 | "2: \n\t" | ||
214 | "movco.l %0, @%2 \n\t" | ||
215 | "bf 1b \n\t" | ||
216 | "synco \n\t" | ||
217 | : "=&z" (tmp), "=&r" (oldval) | ||
218 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
219 | : "t", "memory" | ||
220 | ); | ||
221 | |||
222 | return (oldval > (RW_LOCK_BIAS - 1)); | ||
223 | } | ||
224 | |||
225 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
226 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
227 | |||
228 | #define arch_spin_relax(lock) cpu_relax() | ||
229 | #define arch_read_relax(lock) cpu_relax() | ||
230 | #define arch_write_relax(lock) cpu_relax() | ||
231 | |||
232 | #endif /* __ASM_SH_SPINLOCK_H */ | 22 | #endif /* __ASM_SH_SPINLOCK_H */ |