aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2006-09-02 09:54:58 -0400
committerMatthew Wilcox <willy@parisc-linux.org>2006-10-04 08:47:25 -0400
commit6e071852a10ec02570c472052f07b5facb5ad857 (patch)
tree9b90611da8782225df48b12a2c83e0992fef6d4d /include/asm-parisc
parent9c2c54574e724589858ad656a507be29f9034943 (diff)
[PARISC] Improve rwlock implementation
Rewrite rwlock implementation to avoid various deadlocks in the current scheme. Signed-off-by: Matthew Wilcox <matthew@wil.cx> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/spinlock.h98
1 files changed, 61 insertions, 37 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index e1825530365d..8980a876cc4e 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -56,50 +56,70 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
56} 56}
57 57
58/* 58/*
59 * Read-write spinlocks, allowing multiple readers 59 * Read-write spinlocks, allowing multiple readers but only one writer.
60 * but only one writer. 60 * The spinlock is held by the writer, preventing any readers or other
61 * writers from grabbing the rwlock. Readers use the lock to serialise their
62 * access to the counter (which records how many readers currently hold the
63 * lock). Linux rwlocks are unfair to writers; they can be starved for
64 * an indefinite time by readers. They can also be taken in interrupt context,
65 * so we have to disable interrupts when acquiring the spin lock to be sure
66 * that an interrupting reader doesn't get an inconsistent view of the lock.
61 */ 67 */
62 68
63#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
64
65/* read_lock, read_unlock are pretty straightforward. Of course it somehow
66 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
67
68static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 69static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
69{ 70{
71 unsigned long flags;
72 local_irq_save(flags);
70 __raw_spin_lock(&rw->lock); 73 __raw_spin_lock(&rw->lock);
71
72 rw->counter++; 74 rw->counter++;
73
74 __raw_spin_unlock(&rw->lock); 75 __raw_spin_unlock(&rw->lock);
76 local_irq_restore(flags);
75} 77}
76 78
77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 79static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
78{ 80{
81 unsigned long flags;
82 local_irq_save(flags);
79 __raw_spin_lock(&rw->lock); 83 __raw_spin_lock(&rw->lock);
80
81 rw->counter--; 84 rw->counter--;
82
83 __raw_spin_unlock(&rw->lock); 85 __raw_spin_unlock(&rw->lock);
86 local_irq_restore(flags);
84} 87}
85 88
86/* write_lock is less trivial. We optimistically grab the lock and check 89static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
87 * if we surprised any readers. If so we release the lock and wait till 90{
88 * they're all gone before trying again 91 unsigned long flags;
89 * 92 retry:
90 * Also note that we don't use the _irqsave / _irqrestore suffixes here. 93 local_irq_save(flags);
91 * If we're called with interrupts enabled and we've got readers (or other 94 if (__raw_spin_trylock(&rw->lock)) {
92 * writers) in interrupt handlers someone fucked up and we'd dead-lock 95 rw->counter++;
93 * sooner or later anyway. prumpf */ 96 __raw_spin_unlock(&rw->lock);
97 local_irq_restore(flags);
98 return 1;
99 }
100
101 local_irq_restore(flags);
102 /* If write-locked, we fail to acquire the lock */
103 if (rw->counter < 0)
104 return 0;
105
106 /* Wait until we have a realistic chance at the lock */
107 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
108 cpu_relax();
109
110 goto retry;
111}
94 112
95static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 113static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
96{ 114{
115 unsigned long flags;
97retry: 116retry:
117 local_irq_save(flags);
98 __raw_spin_lock(&rw->lock); 118 __raw_spin_lock(&rw->lock);
99 119
100 if(rw->counter != 0) { 120 if (rw->counter != 0) {
101 /* this basically never happens */
102 __raw_spin_unlock(&rw->lock); 121 __raw_spin_unlock(&rw->lock);
122 local_irq_restore(flags);
103 123
104 while (rw->counter != 0) 124 while (rw->counter != 0)
105 cpu_relax(); 125 cpu_relax();
@@ -107,31 +127,35 @@ retry:
107 goto retry; 127 goto retry;
108 } 128 }
109 129
110 /* got it. now leave without unlocking */ 130 rw->counter = -1; /* mark as write-locked */
111 rw->counter = -1; /* remember we are locked */ 131 mb();
132 local_irq_restore(flags);
112} 133}
113 134
114/* write_unlock is absolutely trivial - we don't have to wait for anything */ 135static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
115
116static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
117{ 136{
118 rw->counter = 0; 137 rw->counter = 0;
119 __raw_spin_unlock(&rw->lock); 138 __raw_spin_unlock(&rw->lock);
120} 139}
121 140
122static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 141static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
123{ 142{
124 __raw_spin_lock(&rw->lock); 143 unsigned long flags;
125 if (rw->counter != 0) { 144 int result = 0;
126 /* this basically never happens */ 145
127 __raw_spin_unlock(&rw->lock); 146 local_irq_save(flags);
128 147 if (__raw_spin_trylock(&rw->lock)) {
129 return 0; 148 if (rw->counter == 0) {
149 rw->counter = -1;
150 result = 1;
151 } else {
152 /* Read-locked. Oh well. */
153 __raw_spin_unlock(&rw->lock);
154 }
130 } 155 }
156 local_irq_restore(flags);
131 157
132 /* got it. now leave without unlocking */ 158 return result;
133 rw->counter = -1; /* remember we are locked */
134 return 1;
135} 159}
136 160
137/* 161/*