aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorManfred Spraul <manfred@colorfullife.com>2015-08-14 18:35:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-14 18:56:32 -0400
commit3ed1f8a99d70ea1cd1508910eb107d0edcae5009 (patch)
treee969912641af3d1095025d90d55dc33d9af2a10b /ipc/sem.c
parent7f6bf39bbdd1dcccd103ba7dce8496a8e72e7df4 (diff)
ipc/sem.c: update/correct memory barriers
sem_lock() did not properly pair memory barriers: !spin_is_locked() and spin_unlock_wait() are both only control barriers. The code needs an acquire barrier, otherwise the cpu might perform read operations before the lock test. As no primitive exists inside <include/spinlock.h> and since it seems noone wants another primitive, the code creates a local primitive within ipc/sem.c. With regards to -stable: The change of sem_wait_array() is a bugfix, the change to sem_lock() is a nop (just a preprocessor redefinition to improve the readability). The bugfix is necessary for all kernels that use sem_wait_array() (i.e.: starting from 3.10). Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Reported-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Kirill Tkhai <ktkhai@parallels.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: <stable@vger.kernel.org> [3.10+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 178f303deea5..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
253} 253}
254 254
255/* 255/*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265/*
256 * Wait until all currently ongoing simple ops have completed. 266 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock. 267 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check 268 * New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
275 sem = sma->sem_base + i; 285 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock); 286 spin_unlock_wait(&sem->lock);
277 } 287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
278} 289}
279 290
280/* 291/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
327 /* Then check that the global lock is free */ 338 /* Then check that the global lock is free */
328 if (!spin_is_locked(&sma->sem_perm.lock)) { 339 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* 340 /*
330 * The ipc object lock check must be visible on all 341 * We need a memory barrier with acquire semantics,
331 * cores before rechecking the complex count. Otherwise 342 * otherwise we can race with another thread that does:
332 * we can race with another thread that does:
333 * complex_count++; 343 * complex_count++;
334 * spin_unlock(sem_perm.lock); 344 * spin_unlock(sem_perm.lock);
335 */ 345 */
336 smp_rmb(); 346 ipc_smp_acquire__after_spin_is_unlocked();
337 347
338 /* 348 /*
339 * Now repeat the test of complex_count: 349 * Now repeat the test of complex_count: