summaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-21 13:45:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-21 13:45:03 -0500
commit10436cf8812edadcc1813dbda39a69e9965caee6 (patch)
tree5659ab914ddda91fac03556cc8d38f13fe87e185 /arch/x86/xen
parent5fbe4c224ce3e2e62bd487158dfd1e89f9ae3e11 (diff)
parentd6abfdb2022368d8c6c4be3f11a06656601a6cc2 (diff)
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Ingo Molnar: "Two fixes: the paravirt spin_unlock() corruption/crash fix, and an rtmutex NULL dereference crash fix" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/spinlocks/paravirt: Fix memory corruption on unlock locking/rtmutex: Avoid a NULL pointer dereference on deadlock
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/spinlock.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23b45eb9a89c..956374c1edbc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -41,7 +41,7 @@ static u8 zero_stats;
41static inline void check_zero(void) 41static inline void check_zero(void)
42{ 42{
43 u8 ret; 43 u8 ret;
44 u8 old = ACCESS_ONCE(zero_stats); 44 u8 old = READ_ONCE(zero_stats);
45 if (unlikely(old)) { 45 if (unlikely(old)) {
46 ret = cmpxchg(&zero_stats, old, 0); 46 ret = cmpxchg(&zero_stats, old, 0);
47 /* This ensures only one fellow resets the stat */ 47 /* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); 112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
113 int cpu = smp_processor_id(); 113 int cpu = smp_processor_id();
114 u64 start; 114 u64 start;
115 __ticket_t head;
115 unsigned long flags; 116 unsigned long flags;
116 117
117 /* If kicker interrupts not initialized yet, just spin */ 118 /* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
159 */ 160 */
160 __ticket_enter_slowpath(lock); 161 __ticket_enter_slowpath(lock);
161 162
163 /* make sure enter_slowpath, which is atomic does not cross the read */
164 smp_mb__after_atomic();
165
162 /* 166 /*
163 * check again make sure it didn't become free while 167 * check again make sure it didn't become free while
164 * we weren't looking 168 * we weren't looking
165 */ 169 */
166 if (ACCESS_ONCE(lock->tickets.head) == want) { 170 head = READ_ONCE(lock->tickets.head);
171 if (__tickets_equal(head, want)) {
167 add_stats(TAKEN_SLOW_PICKUP, 1); 172 add_stats(TAKEN_SLOW_PICKUP, 1);
168 goto out; 173 goto out;
169 } 174 }
@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
204 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); 209 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
205 210
206 /* Make sure we read lock before want */ 211 /* Make sure we read lock before want */
207 if (ACCESS_ONCE(w->lock) == lock && 212 if (READ_ONCE(w->lock) == lock &&
208 ACCESS_ONCE(w->want) == next) { 213 READ_ONCE(w->want) == next) {
209 add_stats(RELEASED_SLOW_KICKED, 1); 214 add_stats(RELEASED_SLOW_KICKED, 1);
210 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 215 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
211 break; 216 break;