aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-parisc/spinlock.h')
-rw-r--r--include/asm-parisc/spinlock.h24
1 files changed, 10 insertions, 14 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 43eaa6e742e0..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -5,29 +5,31 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
9 * since it only has load-and-zero. Moreover, at least on some PA processors,
10 * the semaphore address has to be 16-byte aligned.
11 */
12
13static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int __raw_spin_is_locked(raw_spinlock_t *x)
14{ 9{
15 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
16 return *a == 0; 11 return *a == 0;
17} 12}
18 13
19#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
20#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
21 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
22 17
23static inline void __raw_spin_lock(raw_spinlock_t *x) 18static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
19 unsigned long flags)
24{ 20{
25 volatile unsigned int *a; 21 volatile unsigned int *a;
26 22
27 mb(); 23 mb();
28 a = __ldcw_align(x); 24 a = __ldcw_align(x);
29 while (__ldcw(a) == 0) 25 while (__ldcw(a) == 0)
30 while (*a == 0); 26 while (*a == 0)
27 if (flags & PSW_SM_I) {
28 local_irq_enable();
29 cpu_relax();
30 local_irq_disable();
31 } else
32 cpu_relax();
31 mb(); 33 mb();
32} 34}
33 35
@@ -65,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
65 67
66static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 68static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
67{ 69{
68 unsigned long flags;
69 local_irq_save(flags);
70 __raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
71 71
72 rw->counter++; 72 rw->counter++;
73 73
74 __raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
75 local_irq_restore(flags);
76} 75}
77 76
78static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
79{ 78{
80 unsigned long flags;
81 local_irq_save(flags);
82 __raw_spin_lock(&rw->lock); 79 __raw_spin_lock(&rw->lock);
83 80
84 rw->counter--; 81 rw->counter--;
85 82
86 __raw_spin_unlock(&rw->lock); 83 __raw_spin_unlock(&rw->lock);
87 local_irq_restore(flags);
88} 84}
89 85
90/* write_lock is less trivial. We optimistically grab the lock and check 86/* write_lock is less trivial. We optimistically grab the lock and check