diff options
| -rw-r--r-- | ipc/sem.c | 18 |
1 files changed, 14 insertions, 4 deletions
| @@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) | |||
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | /* | 255 | /* |
| 256 | * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they | ||
| 257 | * are only control barriers. | ||
| 258 | * The code must pair with spin_unlock(&sem->lock) or | ||
| 259 | * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. | ||
| 260 | * | ||
| 261 | * smp_rmb() is sufficient, as writes cannot pass the control barrier. | ||
| 262 | */ | ||
| 263 | #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() | ||
| 264 | |||
| 265 | /* | ||
| 256 | * Wait until all currently ongoing simple ops have completed. | 266 | * Wait until all currently ongoing simple ops have completed. |
| 257 | * Caller must own sem_perm.lock. | 267 | * Caller must own sem_perm.lock. |
| 258 | * New simple ops cannot start, because simple ops first check | 268 | * New simple ops cannot start, because simple ops first check |
| @@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) | |||
| 275 | sem = sma->sem_base + i; | 285 | sem = sma->sem_base + i; |
| 276 | spin_unlock_wait(&sem->lock); | 286 | spin_unlock_wait(&sem->lock); |
| 277 | } | 287 | } |
| 288 | ipc_smp_acquire__after_spin_is_unlocked(); | ||
| 278 | } | 289 | } |
| 279 | 290 | ||
| 280 | /* | 291 | /* |
| @@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
| 327 | /* Then check that the global lock is free */ | 338 | /* Then check that the global lock is free */ |
| 328 | if (!spin_is_locked(&sma->sem_perm.lock)) { | 339 | if (!spin_is_locked(&sma->sem_perm.lock)) { |
| 329 | /* | 340 | /* |
| 330 | * The ipc object lock check must be visible on all | 341 | * We need a memory barrier with acquire semantics, |
| 331 | * cores before rechecking the complex count. Otherwise | 342 | * otherwise we can race with another thread that does: |
| 332 | * we can race with another thread that does: | ||
| 333 | * complex_count++; | 343 | * complex_count++; |
| 334 | * spin_unlock(sem_perm.lock); | 344 | * spin_unlock(sem_perm.lock); |
| 335 | */ | 345 | */ |
| 336 | smp_rmb(); | 346 | ipc_smp_acquire__after_spin_is_unlocked(); |
| 337 | 347 | ||
| 338 | /* | 348 | /* |
| 339 | * Now repeat the test of complex_count: | 349 | * Now repeat the test of complex_count: |
