aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include/asm/spinlock.h
diff options
context:
space:
mode:
authorAkira Takeuchi <takeuchi.akr@jp.panasonic.com>2010-10-27 12:28:55 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:55 -0400
commit368dd5acd154b09c043cc4392a74da01599b37d5 (patch)
treedd94ae3d044f6e774dec2437613515bd6b46dacb /arch/mn10300/include/asm/spinlock.h
parent04157a6e7df99fd5ed64955233d6e00ab6613614 (diff)
MN10300: And Panasonic AM34 subarch and implement SMP
Implement the Panasonic MN10300 AM34 CPU subarch and implement SMP support for MN10300. Also implement support for the MN2WS0060 processor and the ASB2364 evaluation board which are AM34 based. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/include/asm/spinlock.h')
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
1 files changed, 178 insertions, 1 deletions
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b169e..93429154e89 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
11#ifndef _ASM_SPINLOCK_H 11#ifndef _ASM_SPINLOCK_H
12#define _ASM_SPINLOCK_H 12#define _ASM_SPINLOCK_H
13 13
14#error SMP spinlocks not implemented for MN10300 14#include <asm/atomic.h>
15#include <asm/rwlock.h>
16#include <asm/page.h>
15 17
18/*
19 * Simple spin lock operations. There are two variants, one clears IRQ's
20 * on the local processor, one does not.
21 *
22 * We make no fairness assumptions. They have a cost.
23 */
24
25#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
26#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
27
28static inline void arch_spin_unlock(arch_spinlock_t *lock)
29{
30 asm volatile(
31 " bclr 1,(0,%0) \n"
32 :
33 : "a"(&lock->slock)
34 : "memory", "cc");
35}
36
37static inline int arch_spin_trylock(arch_spinlock_t *lock)
38{
39 int ret;
40
41 asm volatile(
42 " mov 1,%0 \n"
43 " bset %0,(%1) \n"
44 " bne 1f \n"
45 " clr %0 \n"
46 "1: xor 1,%0 \n"
47 : "=d"(ret)
48 : "a"(&lock->slock)
49 : "memory", "cc");
50
51 return ret;
52}
53
54static inline void arch_spin_lock(arch_spinlock_t *lock)
55{
56 asm volatile(
57 "1: bset 1,(0,%0) \n"
58 " bne 1b \n"
59 :
60 : "a"(&lock->slock)
61 : "memory", "cc");
62}
63
64static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
65 unsigned long flags)
66{
67 int temp;
68
69 asm volatile(
70 "1: bset 1,(0,%2) \n"
71 " beq 3f \n"
72 " mov %1,epsw \n"
73 "2: mov (0,%2),%0 \n"
74 " or %0,%0 \n"
75 " bne 2b \n"
76 " mov %3,%0 \n"
77 " mov %0,epsw \n"
78 " nop \n"
79 " nop \n"
80 " bra 1b\n"
81 "3: \n"
82 : "=&d" (temp)
83 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
84 : "memory", "cc");
85}
86
87#ifdef __KERNEL__
88
89/*
90 * Read-write spinlocks, allowing multiple readers
91 * but only one writer.
92 *
93 * NOTE! it is quite common to have readers in interrupts
94 * but no interrupt writers. For those circumstances we
95 * can "mix" irq-safe locks - any writer needs to get a
96 * irq-safe write-lock, but readers can get non-irqsafe
97 * read-locks.
98 */
99
100/**
101 * read_can_lock - would read_trylock() succeed?
102 * @lock: the rwlock in question.
103 */
104#define arch_read_can_lock(x) ((int)(x)->lock > 0)
105
106/**
107 * write_can_lock - would write_trylock() succeed?
108 * @lock: the rwlock in question.
109 */
110#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
111
112/*
113 * On mn10300, we implement read-write locks as a 32-bit counter
114 * with the high bit (sign) being the "contended" bit.
115 */
116static inline void arch_read_lock(arch_rwlock_t *rw)
117{
118#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
119 __build_read_lock(rw, "__read_lock_failed");
120#else
121 {
122 atomic_t *count = (atomic_t *)rw;
123 while (atomic_dec_return(count) < 0)
124 atomic_inc(count);
125 }
126#endif
127}
128
129static inline void arch_write_lock(arch_rwlock_t *rw)
130{
131#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
132 __build_write_lock(rw, "__write_lock_failed");
133#else
134 {
135 atomic_t *count = (atomic_t *)rw;
136 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
137 atomic_add(RW_LOCK_BIAS, count);
138 }
139#endif
140}
141
142static inline void arch_read_unlock(arch_rwlock_t *rw)
143{
144#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
145 __build_read_unlock(rw);
146#else
147 {
148 atomic_t *count = (atomic_t *)rw;
149 atomic_inc(count);
150 }
151#endif
152}
153
154static inline void arch_write_unlock(arch_rwlock_t *rw)
155{
156#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
157 __build_write_unlock(rw);
158#else
159 {
160 atomic_t *count = (atomic_t *)rw;
161 atomic_add(RW_LOCK_BIAS, count);
162 }
163#endif
164}
165
166static inline int arch_read_trylock(arch_rwlock_t *lock)
167{
168 atomic_t *count = (atomic_t *)lock;
169 atomic_dec(count);
170 if (atomic_read(count) >= 0)
171 return 1;
172 atomic_inc(count);
173 return 0;
174}
175
176static inline int arch_write_trylock(arch_rwlock_t *lock)
177{
178 atomic_t *count = (atomic_t *)lock;
179 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
180 return 1;
181 atomic_add(RW_LOCK_BIAS, count);
182 return 0;
183}
184
185#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
186#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
187
188#define _raw_spin_relax(lock) cpu_relax()
189#define _raw_read_relax(lock) cpu_relax()
190#define _raw_write_relax(lock) cpu_relax()
191
192#endif /* __KERNEL__ */
16#endif /* _ASM_SPINLOCK_H */ 193#endif /* _ASM_SPINLOCK_H */