diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-06 02:46:27 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-06 02:47:26 -0400 |
commit | 3611dfb8eda847c1c8e1a052f57206f7fddc6a7c (patch) | |
tree | 74b5d689be8bfca2ec0fe18961ccaf6953c21dba | |
parent | 16c8a10932aef971292c9570eb5f60b5d4e83ed2 (diff) | |
parent | a511e3f968c462a55ef58697257f5347c73d306e (diff) |
Merge branch 'core/locking' into perfcounters/core
Merge reason: we moved a mutex.h commit that originated from the
perfcounters tree into core/locking - but now merge
back that branch to solve a merge artifact and to
pick up cleanups of this commit that happened in
core/locking.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/mutex.h | 24 | ||||
-rw-r--r-- | kernel/mutex.c | 25 | ||||
-rw-r--r-- | kernel/rtmutex.c | 8 |
3 files changed, 29 insertions, 28 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 93054fc3635c..878cab4f5fcc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -150,28 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
150 | */ | 150 | */ |
151 | extern int mutex_trylock(struct mutex *lock); | 151 | extern int mutex_trylock(struct mutex *lock); |
152 | extern void mutex_unlock(struct mutex *lock); | 152 | extern void mutex_unlock(struct mutex *lock); |
153 | 153 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | |
154 | /** | ||
155 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | ||
156 | * @cnt: the atomic which we are to dec | ||
157 | * @lock: the mutex to return holding if we dec to 0 | ||
158 | * | ||
159 | * return true and hold lock if we dec to 0, return false otherwise | ||
160 | */ | ||
161 | static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | ||
162 | { | ||
163 | /* dec if we can't possibly hit 0 */ | ||
164 | if (atomic_add_unless(cnt, -1, 1)) | ||
165 | return 0; | ||
166 | /* we might hit 0, so take the lock */ | ||
167 | mutex_lock(lock); | ||
168 | if (!atomic_dec_and_test(cnt)) { | ||
169 | /* when we actually did the dec, we didn't hit 0 */ | ||
170 | mutex_unlock(lock); | ||
171 | return 0; | ||
172 | } | ||
173 | /* we hit 0, and we hold the lock */ | ||
174 | return 1; | ||
175 | } | ||
176 | 154 | ||
177 | #endif | 155 | #endif |
diff --git a/kernel/mutex.c b/kernel/mutex.c index f415e80a9119..f788a5ace24b 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -471,5 +471,28 @@ int __sched mutex_trylock(struct mutex *lock) | |||
471 | 471 | ||
472 | return ret; | 472 | return ret; |
473 | } | 473 | } |
474 | |||
475 | EXPORT_SYMBOL(mutex_trylock); | 474 | EXPORT_SYMBOL(mutex_trylock); |
475 | |||
476 | /** | ||
477 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | ||
478 | * @cnt: the atomic which we are to dec | ||
479 | * @lock: the mutex to return holding if we dec to 0 | ||
480 | * | ||
481 | * return true and hold lock if we dec to 0, return false otherwise | ||
482 | */ | ||
483 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | ||
484 | { | ||
485 | /* dec if we can't possibly hit 0 */ | ||
486 | if (atomic_add_unless(cnt, -1, 1)) | ||
487 | return 0; | ||
488 | /* we might hit 0, so take the lock */ | ||
489 | mutex_lock(lock); | ||
490 | if (!atomic_dec_and_test(cnt)) { | ||
491 | /* when we actually did the dec, we didn't hit 0 */ | ||
492 | mutex_unlock(lock); | ||
493 | return 0; | ||
494 | } | ||
495 | /* we hit 0, and we hold the lock */ | ||
496 | return 1; | ||
497 | } | ||
498 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); | ||
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 69d9cb921ffa..013882e83497 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -864,9 +864,9 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |||
864 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | 864 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); |
865 | 865 | ||
866 | /** | 866 | /** |
867 | * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible | 867 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
868 | * the timeout structure is provided | 868 | * the timeout structure is provided |
869 | * by the caller | 869 | * by the caller |
870 | * | 870 | * |
871 | * @lock: the rt_mutex to be locked | 871 | * @lock: the rt_mutex to be locked |
872 | * @timeout: timeout structure or NULL (no timeout) | 872 | * @timeout: timeout structure or NULL (no timeout) |
@@ -913,7 +913,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) | |||
913 | } | 913 | } |
914 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | 914 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
915 | 915 | ||
916 | /*** | 916 | /** |
917 | * rt_mutex_destroy - mark a mutex unusable | 917 | * rt_mutex_destroy - mark a mutex unusable |
918 | * @lock: the mutex to be destroyed | 918 | * @lock: the mutex to be destroyed |
919 | * | 919 | * |