diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2013-09-30 16:45:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-30 17:31:01 -0400 |
commit | 6d07b68ce16ae9535955ba2059dedba5309c3ca1 (patch) | |
tree | f8eccb1e9ff34ffb41225d247329d7e73148b6fe /ipc | |
parent | 5e9d527591421ccdb16acb8c23662231135d8686 (diff) |
ipc/sem.c: optimize sem_lock()
Operations that need access to the whole array must guarantee that there
are no simple operations ongoing. Right now this is achieved by
spin_unlock_wait(sem->lock) on all semaphores.
If complex_count is nonzero, then this spin_unlock_wait() is not
necessary, because it was already performed in the past by the thread
that increased complex_count and even though sem_perm.lock was dropped
inbetween, no simple operation could have started, because simple
operations cannot start when complex_count is non-zero.
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Mike Galbraith <bitbucket@online.de>
Cc: Rik van Riel <riel@redhat.com>
Reviewed-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/sem.c | 8 |
1 files changed, 8 insertions, 0 deletions
@@ -257,12 +257,20 @@ static void sem_rcu_free(struct rcu_head *head) | |||
257 | * Caller must own sem_perm.lock. | 257 | * Caller must own sem_perm.lock. |
258 | * New simple ops cannot start, because simple ops first check | 258 | * New simple ops cannot start, because simple ops first check |
259 | * that sem_perm.lock is free. | 259 | * that sem_perm.lock is free. |
260 | * that a) sem_perm.lock is free and b) complex_count is 0. | ||
260 | */ | 261 | */ |
261 | static void sem_wait_array(struct sem_array *sma) | 262 | static void sem_wait_array(struct sem_array *sma) |
262 | { | 263 | { |
263 | int i; | 264 | int i; |
264 | struct sem *sem; | 265 | struct sem *sem; |
265 | 266 | ||
267 | if (sma->complex_count) { | ||
268 | /* The thread that increased sma->complex_count waited on | ||
269 | * all sem->lock locks. Thus we don't need to wait again. | ||
270 | */ | ||
271 | return; | ||
272 | } | ||
273 | |||
266 | for (i = 0; i < sma->sem_nsems; i++) { | 274 | for (i = 0; i < sma->sem_nsems; i++) { |
267 | sem = sma->sem_base + i; | 275 | sem = sma->sem_base + i; |
268 | spin_unlock_wait(&sem->lock); | 276 | spin_unlock_wait(&sem->lock); |