diff options
Diffstat (limited to 'arch/parisc/include/asm/spinlock.h')
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa8..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -5,17 +5,17 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 8 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
9 | { | 9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
11 | return *a == 0; | 11 | return *a == 0; |
12 | } | 12 | } |
13 | 13 | ||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 14 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
15 | #define __raw_spin_unlock_wait(x) \ | 15 | #define arch_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 16 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
17 | 17 | ||
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
19 | unsigned long flags) | 19 | unsigned long flags) |
20 | { | 20 | { |
21 | volatile unsigned int *a; | 21 | volatile unsigned int *a; |
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | |||
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | 36 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 37 | { |
38 | volatile unsigned int *a; | 38 | volatile unsigned int *a; |
39 | mb(); | 39 | mb(); |
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) | |||
42 | mb(); | 42 | mb(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | 45 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
46 | { | 46 | { |
47 | volatile unsigned int *a; | 47 | volatile unsigned int *a; |
48 | int ret; | 48 | int ret; |
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) | |||
69 | 69 | ||
70 | /* Note that we have to ensure interrupts are disabled in case we're | 70 | /* Note that we have to ensure interrupts are disabled in case we're |
71 | * interrupted by some other code that wants to grab the same read lock */ | 71 | * interrupted by some other code that wants to grab the same read lock */ |
72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | 72 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
76 | __raw_spin_lock_flags(&rw->lock, flags); | 76 | arch_spin_lock_flags(&rw->lock, flags); |
77 | rw->counter++; | 77 | rw->counter++; |
78 | __raw_spin_unlock(&rw->lock); | 78 | arch_spin_unlock(&rw->lock); |
79 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* Note that we have to ensure interrupts are disabled in case we're | 82 | /* Note that we have to ensure interrupts are disabled in case we're |
83 | * interrupted by some other code that wants to grab the same read lock */ | 83 | * interrupted by some other code that wants to grab the same read lock */ |
84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | 84 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | __raw_spin_lock_flags(&rw->lock, flags); | 88 | arch_spin_lock_flags(&rw->lock, flags); |
89 | rw->counter--; | 89 | rw->counter--; |
90 | __raw_spin_unlock(&rw->lock); | 90 | arch_spin_unlock(&rw->lock); |
91 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Note that we have to ensure interrupts are disabled in case we're | 94 | /* Note that we have to ensure interrupts are disabled in case we're |
95 | * interrupted by some other code that wants to grab the same read lock */ | 95 | * interrupted by some other code that wants to grab the same read lock */ |
96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | 96 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
97 | { | 97 | { |
98 | unsigned long flags; | 98 | unsigned long flags; |
99 | retry: | 99 | retry: |
100 | local_irq_save(flags); | 100 | local_irq_save(flags); |
101 | if (__raw_spin_trylock(&rw->lock)) { | 101 | if (arch_spin_trylock(&rw->lock)) { |
102 | rw->counter++; | 102 | rw->counter++; |
103 | __raw_spin_unlock(&rw->lock); | 103 | arch_spin_unlock(&rw->lock); |
104 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
105 | return 1; | 105 | return 1; |
106 | } | 106 | } |
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | 113 | /* Wait until we have a realistic chance at the lock */ |
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | 114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
115 | cpu_relax(); | 115 | cpu_relax(); |
116 | 116 | ||
117 | goto retry; | 117 | goto retry; |
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
119 | 119 | ||
120 | /* Note that we have to ensure interrupts are disabled in case we're | 120 | /* Note that we have to ensure interrupts are disabled in case we're |
121 | * interrupted by some other code that wants to read_trylock() this lock */ | 121 | * interrupted by some other code that wants to read_trylock() this lock */ |
122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 122 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | retry: | 125 | retry: |
126 | local_irq_save(flags); | 126 | local_irq_save(flags); |
127 | __raw_spin_lock_flags(&rw->lock, flags); | 127 | arch_spin_lock_flags(&rw->lock, flags); |
128 | 128 | ||
129 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
130 | __raw_spin_unlock(&rw->lock); | 130 | arch_spin_unlock(&rw->lock); |
131 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
132 | 132 | ||
133 | while (rw->counter != 0) | 133 | while (rw->counter != 0) |
@@ -141,27 +141,27 @@ retry: | |||
141 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
142 | } | 142 | } |
143 | 143 | ||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 144 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
145 | { | 145 | { |
146 | rw->counter = 0; | 146 | rw->counter = 0; |
147 | __raw_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
151 | * interrupted by some other code that wants to read_trylock() this lock */ | 151 | * interrupted by some other code that wants to read_trylock() this lock */ |
152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | 152 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | int result = 0; | 155 | int result = 0; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | local_irq_save(flags); |
158 | if (__raw_spin_trylock(&rw->lock)) { | 158 | if (arch_spin_trylock(&rw->lock)) { |
159 | if (rw->counter == 0) { | 159 | if (rw->counter == 0) { |
160 | rw->counter = -1; | 160 | rw->counter = -1; |
161 | result = 1; | 161 | result = 1; |
162 | } else { | 162 | } else { |
163 | /* Read-locked. Oh well. */ | 163 | /* Read-locked. Oh well. */ |
164 | __raw_spin_unlock(&rw->lock); | 164 | arch_spin_unlock(&rw->lock); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | |||
173 | * read_can_lock - would read_trylock() succeed? | 173 | * read_can_lock - would read_trylock() succeed? |
174 | * @lock: the rwlock in question. | 174 | * @lock: the rwlock in question. |
175 | */ | 175 | */ |
176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | 176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
177 | { | 177 | { |
178 | return rw->counter >= 0; | 178 | return rw->counter >= 0; |
179 | } | 179 | } |
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | |||
182 | * write_can_lock - would write_trylock() succeed? | 182 | * write_can_lock - would write_trylock() succeed? |
183 | * @lock: the rwlock in question. | 183 | * @lock: the rwlock in question. |
184 | */ | 184 | */ |
185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | 185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
186 | { | 186 | { |
187 | return !rw->counter; | 187 | return !rw->counter; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
192 | 192 | ||
193 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
194 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
195 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define arch_write_relax(lock) cpu_relax() |
196 | 196 | ||
197 | #endif /* __ASM_SPINLOCK_H */ | 197 | #endif /* __ASM_SPINLOCK_H */ |