diff options
| author | Jason Low <jason.low2@hp.com> | 2014-07-14 13:27:48 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-07-16 07:28:03 -0400 |
| commit | 046a619d8e9746fa4c0e29e8c6b78e16efc008a8 (patch) | |
| tree | 1eb92c69dcc0eb80c7287b89a2ca89b05245f9cb | |
| parent | 37e9562453b813d2ea527bd9531fef2c3c592847 (diff) | |
locking/spinlocks/mcs: Rename optimistic_spin_queue() to optimistic_spin_node()
Currently, the per-cpu nodes structure for the cancellable MCS spinlock is
named "optimistic_spin_queue". However, in a follow up patch in the series
we will be introducing a new structure that serves as the new "handle" for
the lock. It would make more sense if that structure is named
"optimistic_spin_queue". Additionally, since the current use of the
"optimistic_spin_queue" structure are "nodes", it might be better if we
rename them to "node" anyway.
This preparatory patch renames all current "optimistic_spin_queue"
to "optimistic_spin_node".
Signed-off-by: Jason Low <jason.low2@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Chris Mason <clm@fb.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Josef Bacik <jbacik@fusionio.com>
Link: http://lkml.kernel.org/r/1405358872-3732-2-git-send-email-jason.low2@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | include/linux/mutex.h | 4 | ||||
| -rw-r--r-- | include/linux/rwsem.h | 4 | ||||
| -rw-r--r-- | kernel/locking/mcs_spinlock.c | 24 | ||||
| -rw-r--r-- | kernel/locking/mcs_spinlock.h | 8 |
4 files changed, 20 insertions, 20 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 11692dea18aa..885f3f56a77f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | * - detects multi-task circular deadlocks and prints out all affected | 46 | * - detects multi-task circular deadlocks and prints out all affected |
| 47 | * locks and tasks (and only those tasks) | 47 | * locks and tasks (and only those tasks) |
| 48 | */ | 48 | */ |
| 49 | struct optimistic_spin_queue; | 49 | struct optimistic_spin_node; |
| 50 | struct mutex { | 50 | struct mutex { |
| 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ |
| 52 | atomic_t count; | 52 | atomic_t count; |
| @@ -56,7 +56,7 @@ struct mutex { | |||
| 56 | struct task_struct *owner; | 56 | struct task_struct *owner; |
| 57 | #endif | 57 | #endif |
| 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 59 | struct optimistic_spin_queue *osq; /* Spinner MCS lock */ | 59 | struct optimistic_spin_node *osq; /* Spinner MCS lock */ |
| 60 | #endif | 60 | #endif |
| 61 | #ifdef CONFIG_DEBUG_MUTEXES | 61 | #ifdef CONFIG_DEBUG_MUTEXES |
| 62 | const char *name; | 62 | const char *name; |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8d79708146aa..ba3f108ddea1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
| 18 | 18 | ||
| 19 | struct optimistic_spin_queue; | 19 | struct optimistic_spin_node; |
| 20 | struct rw_semaphore; | 20 | struct rw_semaphore; |
| 21 | 21 | ||
| 22 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK | 22 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK |
| @@ -33,7 +33,7 @@ struct rw_semaphore { | |||
| 33 | * if the owner is running on the cpu. | 33 | * if the owner is running on the cpu. |
| 34 | */ | 34 | */ |
| 35 | struct task_struct *owner; | 35 | struct task_struct *owner; |
| 36 | struct optimistic_spin_queue *osq; /* spinner MCS lock */ | 36 | struct optimistic_spin_node *osq; /* spinner MCS lock */ |
| 37 | #endif | 37 | #endif |
| 38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 39 | struct lockdep_map dep_map; | 39 | struct lockdep_map dep_map; |
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c index 838dc9e00669..e9866f70e828 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/mcs_spinlock.c | |||
| @@ -14,18 +14,18 @@ | |||
| 14 | * called from interrupt context and we have preemption disabled while | 14 | * called from interrupt context and we have preemption disabled while |
| 15 | * spinning. | 15 | * spinning. |
| 16 | */ | 16 | */ |
| 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); | 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); |
| 18 | 18 | ||
| 19 | /* | 19 | /* |
| 20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | 20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. |
| 21 | * Can return NULL in case we were the last queued and we updated @lock instead. | 21 | * Can return NULL in case we were the last queued and we updated @lock instead. |
| 22 | */ | 22 | */ |
| 23 | static inline struct optimistic_spin_queue * | 23 | static inline struct optimistic_spin_node * |
| 24 | osq_wait_next(struct optimistic_spin_queue **lock, | 24 | osq_wait_next(struct optimistic_spin_node **lock, |
| 25 | struct optimistic_spin_queue *node, | 25 | struct optimistic_spin_node *node, |
| 26 | struct optimistic_spin_queue *prev) | 26 | struct optimistic_spin_node *prev) |
| 27 | { | 27 | { |
| 28 | struct optimistic_spin_queue *next = NULL; | 28 | struct optimistic_spin_node *next = NULL; |
| 29 | 29 | ||
| 30 | for (;;) { | 30 | for (;;) { |
| 31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { | 31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { |
| @@ -59,10 +59,10 @@ osq_wait_next(struct optimistic_spin_queue **lock, | |||
| 59 | return next; | 59 | return next; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | bool osq_lock(struct optimistic_spin_queue **lock) | 62 | bool osq_lock(struct optimistic_spin_node **lock) |
| 63 | { | 63 | { |
| 64 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 64 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
| 65 | struct optimistic_spin_queue *prev, *next; | 65 | struct optimistic_spin_node *prev, *next; |
| 66 | 66 | ||
| 67 | node->locked = 0; | 67 | node->locked = 0; |
| 68 | node->next = NULL; | 68 | node->next = NULL; |
| @@ -149,10 +149,10 @@ unqueue: | |||
| 149 | return false; | 149 | return false; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | void osq_unlock(struct optimistic_spin_queue **lock) | 152 | void osq_unlock(struct optimistic_spin_node **lock) |
| 153 | { | 153 | { |
| 154 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 154 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
| 155 | struct optimistic_spin_queue *next; | 155 | struct optimistic_spin_node *next; |
| 156 | 156 | ||
| 157 | /* | 157 | /* |
| 158 | * Fast path for the uncontended case. | 158 | * Fast path for the uncontended case. |
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index a2dbac4aca6b..c99dc0052f49 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h | |||
| @@ -118,12 +118,12 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |||
| 118 | * mutex_lock()/rwsem_down_{read,write}() etc. | 118 | * mutex_lock()/rwsem_down_{read,write}() etc. |
| 119 | */ | 119 | */ |
| 120 | 120 | ||
| 121 | struct optimistic_spin_queue { | 121 | struct optimistic_spin_node { |
| 122 | struct optimistic_spin_queue *next, *prev; | 122 | struct optimistic_spin_node *next, *prev; |
| 123 | int locked; /* 1 if lock acquired */ | 123 | int locked; /* 1 if lock acquired */ |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | extern bool osq_lock(struct optimistic_spin_queue **lock); | 126 | extern bool osq_lock(struct optimistic_spin_node **lock); |
| 127 | extern void osq_unlock(struct optimistic_spin_queue **lock); | 127 | extern void osq_unlock(struct optimistic_spin_node **lock); |
| 128 | 128 | ||
| 129 | #endif /* __LINUX_MCS_SPINLOCK_H */ | 129 | #endif /* __LINUX_MCS_SPINLOCK_H */ |
