diff options
| -rw-r--r-- | Documentation/DocBook/kernel-locking.tmpl | 6 | ||||
| -rw-r--r-- | Documentation/mutex-design.txt | 3 | ||||
| -rw-r--r-- | include/linux/mutex.h | 8 | ||||
| -rw-r--r-- | kernel/mutex.c | 23 |
4 files changed, 23 insertions, 17 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 0b1a3f97f285..a0d479d1e1dd 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
| @@ -1961,6 +1961,12 @@ machines due to caching. | |||
| 1961 | </sect1> | 1961 | </sect1> |
| 1962 | </chapter> | 1962 | </chapter> |
| 1963 | 1963 | ||
| 1964 | <chapter id="apiref"> | ||
| 1965 | <title>Mutex API reference</title> | ||
| 1966 | !Iinclude/linux/mutex.h | ||
| 1967 | !Ekernel/mutex.c | ||
| 1968 | </chapter> | ||
| 1969 | |||
| 1964 | <chapter id="references"> | 1970 | <chapter id="references"> |
| 1965 | <title>Further reading</title> | 1971 | <title>Further reading</title> |
| 1966 | 1972 | ||
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index c91ccc0720fa..38c10fd7f411 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt | |||
| @@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler | |||
| 9 | mutex semantics are sufficient for your code, then there are a couple | 9 | mutex semantics are sufficient for your code, then there are a couple |
| 10 | of advantages of mutexes: | 10 | of advantages of mutexes: |
| 11 | 11 | ||
| 12 | - 'struct mutex' is smaller on most architectures: .e.g on x86, | 12 | - 'struct mutex' is smaller on most architectures: E.g. on x86, |
| 13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. | 13 | 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. |
| 14 | A smaller structure size means less RAM footprint, and better | 14 | A smaller structure size means less RAM footprint, and better |
| 15 | CPU-cache utilization. | 15 | CPU-cache utilization. |
| @@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined: | |||
| 136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 136 | void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
| 137 | int mutex_lock_interruptible_nested(struct mutex *lock, | 137 | int mutex_lock_interruptible_nested(struct mutex *lock, |
| 138 | unsigned int subclass); | 138 | unsigned int subclass); |
| 139 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -78,6 +78,14 @@ struct mutex_waiter { | |||
| 78 | # include <linux/mutex-debug.h> | 78 | # include <linux/mutex-debug.h> |
| 79 | #else | 79 | #else |
| 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
| 81 | /** | ||
| 82 | * mutex_init - initialize the mutex | ||
| 83 | * @mutex: the mutex to be initialized | ||
| 84 | * | ||
| 85 | * Initialize the mutex to unlocked state. | ||
| 86 | * | ||
| 87 | * It is not allowed to initialize an already locked mutex. | ||
| 88 | */ | ||
| 81 | # define mutex_init(mutex) \ | 89 | # define mutex_init(mutex) \ |
| 82 | do { \ | 90 | do { \ |
| 83 | static struct lock_class_key __key; \ | 91 | static struct lock_class_key __key; \ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -36,15 +36,6 @@ | |||
| 36 | # include <asm/mutex.h> | 36 | # include <asm/mutex.h> |
| 37 | #endif | 37 | #endif |
| 38 | 38 | ||
| 39 | /*** | ||
| 40 | * mutex_init - initialize the mutex | ||
| 41 | * @lock: the mutex to be initialized | ||
| 42 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
| 43 | * | ||
| 44 | * Initialize the mutex to unlocked state. | ||
| 45 | * | ||
| 46 | * It is not allowed to initialize an already locked mutex. | ||
| 47 | */ | ||
| 48 | void | 39 | void |
| 49 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 50 | { | 41 | { |
| @@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
| 68 | static __used noinline void __sched | 59 | static __used noinline void __sched |
| 69 | __mutex_lock_slowpath(atomic_t *lock_count); | 60 | __mutex_lock_slowpath(atomic_t *lock_count); |
| 70 | 61 | ||
| 71 | /*** | 62 | /** |
| 72 | * mutex_lock - acquire the mutex | 63 | * mutex_lock - acquire the mutex |
| 73 | * @lock: the mutex to be acquired | 64 | * @lock: the mutex to be acquired |
| 74 | * | 65 | * |
| @@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
| 105 | 96 | ||
| 106 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 97 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
| 107 | 98 | ||
| 108 | /*** | 99 | /** |
| 109 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
| 110 | * @lock: the mutex to be released | 101 | * @lock: the mutex to be released |
| 111 | * | 102 | * |
| @@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); | |||
| 364 | static noinline int __sched | 355 | static noinline int __sched |
| 365 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 356 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
| 366 | 357 | ||
| 367 | /*** | 358 | /** |
| 368 | * mutex_lock_interruptible - acquire the mutex, interruptable | 359 | * mutex_lock_interruptible - acquire the mutex, interruptible |
| 369 | * @lock: the mutex to be acquired | 360 | * @lock: the mutex to be acquired |
| 370 | * | 361 | * |
| 371 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | 362 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| @@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
| 456 | return prev == 1; | 447 | return prev == 1; |
| 457 | } | 448 | } |
| 458 | 449 | ||
| 459 | /*** | 450 | /** |
| 460 | * mutex_trylock - try acquire the mutex, without waiting | 451 | * mutex_trylock - try to acquire the mutex, without waiting |
| 461 | * @lock: the mutex to be acquired | 452 | * @lock: the mutex to be acquired |
| 462 | * | 453 | * |
| 463 | * Try to acquire the mutex atomically. Returns 1 if the mutex | 454 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 464 | * has been acquired successfully, and 0 on contention. | 455 | * has been acquired successfully, and 0 on contention. |
| 465 | * | 456 | * |
| 466 | * NOTE: this function follows the spin_trylock() convention, so | 457 | * NOTE: this function follows the spin_trylock() convention, so |
| 467 | * it is negated to the down_trylock() return values! Be careful | 458 | * it is negated from the down_trylock() return values! Be careful |
| 468 | * about this when converting semaphore users to mutexes. | 459 | * about this when converting semaphore users to mutexes. |
| 469 | * | 460 | * |
| 470 | * This function must not be used in interrupt context. The | 461 | * This function must not be used in interrupt context. The |
