aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mutex.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mutex.h')
-rw-r--r--include/linux/mutex.h46
1 files changed, 30 insertions, 16 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..4d3bccabbea5 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,6 +18,7 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h> 20#include <linux/osq_lock.h>
21#include <linux/debug_locks.h>
21 22
22/* 23/*
23 * Simple, straightforward mutexes with strict semantics: 24 * Simple, straightforward mutexes with strict semantics:
@@ -48,16 +49,12 @@
48 * locks and tasks (and only those tasks) 49 * locks and tasks (and only those tasks)
49 */ 50 */
50struct mutex { 51struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 52 atomic_long_t owner;
52 atomic_t count;
53 spinlock_t wait_lock; 53 spinlock_t wait_lock;
54 struct list_head wait_list;
55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
56 struct task_struct *owner;
57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 55 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 56#endif
57 struct list_head wait_list;
61#ifdef CONFIG_DEBUG_MUTEXES 58#ifdef CONFIG_DEBUG_MUTEXES
62 void *magic; 59 void *magic;
63#endif 60#endif
@@ -66,6 +63,11 @@ struct mutex {
66#endif 63#endif
67}; 64};
68 65
66static inline struct task_struct *__mutex_owner(struct mutex *lock)
67{
68 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
69}
70
69/* 71/*
70 * This is the control structure for tasks blocked on mutex, 72 * This is the control structure for tasks blocked on mutex,
71 * which resides on the blocked task's kernel stack: 73 * which resides on the blocked task's kernel stack:
@@ -79,9 +81,20 @@ struct mutex_waiter {
79}; 81};
80 82
81#ifdef CONFIG_DEBUG_MUTEXES 83#ifdef CONFIG_DEBUG_MUTEXES
82# include <linux/mutex-debug.h> 84
85#define __DEBUG_MUTEX_INITIALIZER(lockname) \
86 , .magic = &lockname
87
88extern void mutex_destroy(struct mutex *lock);
89
83#else 90#else
91
84# define __DEBUG_MUTEX_INITIALIZER(lockname) 92# define __DEBUG_MUTEX_INITIALIZER(lockname)
93
94static inline void mutex_destroy(struct mutex *lock) {}
95
96#endif
97
85/** 98/**
86 * mutex_init - initialize the mutex 99 * mutex_init - initialize the mutex
87 * @mutex: the mutex to be initialized 100 * @mutex: the mutex to be initialized
@@ -90,14 +103,12 @@ struct mutex_waiter {
90 * 103 *
91 * It is not allowed to initialize an already locked mutex. 104 * It is not allowed to initialize an already locked mutex.
92 */ 105 */
93# define mutex_init(mutex) \ 106#define mutex_init(mutex) \
94do { \ 107do { \
95 static struct lock_class_key __key; \ 108 static struct lock_class_key __key; \
96 \ 109 \
97 __mutex_init((mutex), #mutex, &__key); \ 110 __mutex_init((mutex), #mutex, &__key); \
98} while (0) 111} while (0)
99static inline void mutex_destroy(struct mutex *lock) {}
100#endif
101 112
102#ifdef CONFIG_DEBUG_LOCK_ALLOC 113#ifdef CONFIG_DEBUG_LOCK_ALLOC
103# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 114# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
107#endif 118#endif
108 119
109#define __MUTEX_INITIALIZER(lockname) \ 120#define __MUTEX_INITIALIZER(lockname) \
110 { .count = ATOMIC_INIT(1) \ 121 { .owner = ATOMIC_LONG_INIT(0) \
111 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ 122 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
112 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 123 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
113 __DEBUG_MUTEX_INITIALIZER(lockname) \ 124 __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
127 */ 138 */
128static inline int mutex_is_locked(struct mutex *lock) 139static inline int mutex_is_locked(struct mutex *lock)
129{ 140{
130 return atomic_read(&lock->count) != 1; 141 /*
142 * XXX think about spin_is_locked
143 */
144 return __mutex_owner(lock) != NULL;
131} 145}
132 146
133/* 147/*