diff options
author | Matthew Wilcox <matthew@wil.cx> | 2006-09-08 07:43:44 -0400 |
---|---|---|
committer | Matthew Wilcox <willy@parisc-linux.org> | 2006-10-04 08:48:17 -0400 |
commit | 65ee8f0a7fc2f2267b983f1f0349acb8f19db6e6 (patch) | |
tree | a74a9c7683957545c67777aee644ef14801975a2 /include/asm-parisc/spinlock.h | |
parent | 56f335c89e28c488b1bfea3e5e697fce805c784d (diff) |
[PARISC] Enable interrupts while spinning
Use the __raw_spin_lock_flags routine so we can take an interrupt while
spinning. This re-fixes a bug jejb found on 2005-10-20:
CPU0 does a flush_tlb_all holding the vmlist_lock for write.
CPU1 tries a cat of /proc/meminfo which tries to acquire vmlist_lock for read
CPU1 is now spinning with interrupts disabled
CPU0 tries to execute a smp_call_function to flush the local tlb caches
This is now a deadlock because CPU1 is spinning with interrupts disabled and
can never receive the IPI
Signed-off-by: Matthew Wilcox <matthew@wil.cx>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'include/asm-parisc/spinlock.h')
-rw-r--r-- | include/asm-parisc/spinlock.h | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 8980a876cc4e..f3d2090a18dc 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
@@ -57,35 +57,42 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) | |||
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Read-write spinlocks, allowing multiple readers but only one writer. | 59 | * Read-write spinlocks, allowing multiple readers but only one writer. |
60 | * The spinlock is held by the writer, preventing any readers or other | 60 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite |
61 | * writers from grabbing the rwlock. Readers use the lock to serialise their | 61 | * time by readers. With care, they can also be taken in interrupt context. |
62 | * access to the counter (which records how many readers currently hold the | 62 | * |
63 | * lock). Linux rwlocks are unfair to writers; they can be starved for | 63 | * In the PA-RISC implementation, we have a spinlock and a counter. |
64 | * an indefinite time by readers. They can also be taken in interrupt context, | 64 | * Readers use the lock to serialise their access to the counter (which |
65 | * so we have to disable interrupts when acquiring the spin lock to be sure | 65 | * records how many readers currently hold the lock). |
66 | * that an interrupting reader doesn't get an inconsistent view of the lock. | 66 | * Writers hold the spinlock, preventing any readers or other writers from |
67 | * grabbing the rwlock. | ||
67 | */ | 68 | */ |
68 | 69 | ||
70 | /* Note that we have to ensure interrupts are disabled in case we're | ||
71 | * interrupted by some other code that wants to grab the same read lock */ | ||
69 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | 72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
70 | { | 73 | { |
71 | unsigned long flags; | 74 | unsigned long flags; |
72 | local_irq_save(flags); | 75 | local_irq_save(flags); |
73 | __raw_spin_lock(&rw->lock); | 76 | __raw_spin_lock_flags(&rw->lock, flags); |
74 | rw->counter++; | 77 | rw->counter++; |
75 | __raw_spin_unlock(&rw->lock); | 78 | __raw_spin_unlock(&rw->lock); |
76 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
77 | } | 80 | } |
78 | 81 | ||
82 | /* Note that we have to ensure interrupts are disabled in case we're | ||
83 | * interrupted by some other code that wants to grab the same read lock */ | ||
79 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | 84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
80 | { | 85 | { |
81 | unsigned long flags; | 86 | unsigned long flags; |
82 | local_irq_save(flags); | 87 | local_irq_save(flags); |
83 | __raw_spin_lock(&rw->lock); | 88 | __raw_spin_lock_flags(&rw->lock, flags); |
84 | rw->counter--; | 89 | rw->counter--; |
85 | __raw_spin_unlock(&rw->lock); | 90 | __raw_spin_unlock(&rw->lock); |
86 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
87 | } | 92 | } |
88 | 93 | ||
94 | /* Note that we have to ensure interrupts are disabled in case we're | ||
95 | * interrupted by some other code that wants to grab the same read lock */ | ||
89 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | 96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
90 | { | 97 | { |
91 | unsigned long flags; | 98 | unsigned long flags; |
@@ -110,12 +117,14 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
110 | goto retry; | 117 | goto retry; |
111 | } | 118 | } |
112 | 119 | ||
120 | /* Note that we have to ensure interrupts are disabled in case we're | ||
121 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
113 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
114 | { | 123 | { |
115 | unsigned long flags; | 124 | unsigned long flags; |
116 | retry: | 125 | retry: |
117 | local_irq_save(flags); | 126 | local_irq_save(flags); |
118 | __raw_spin_lock(&rw->lock); | 127 | __raw_spin_lock_flags(&rw->lock, flags); |
119 | 128 | ||
120 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
121 | __raw_spin_unlock(&rw->lock); | 130 | __raw_spin_unlock(&rw->lock); |
@@ -138,6 +147,8 @@ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | |||
138 | __raw_spin_unlock(&rw->lock); | 147 | __raw_spin_unlock(&rw->lock); |
139 | } | 148 | } |
140 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | ||
151 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
141 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | 152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
142 | { | 153 | { |
143 | unsigned long flags; | 154 | unsigned long flags; |