diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2015-09-14 03:37:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-09-18 03:27:29 -0400 |
commit | 6e1e5196975fb7ecc501b3fe1075b77aea2b7839 (patch) | |
tree | 42ff6a299f0b1d30da4d9c992ba438c883aac9ff /kernel/locking/qrwlock.c | |
parent | e58cdf585a38412f9f56c512d20e8e12637d9892 (diff) |
locking/qrwlock: Rename ->lock to ->wait_lock
... trivial, but reads a little nicer when we name our
actual primitive 'lock'.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <Waiman.Long@hpe.com>
Link: http://lkml.kernel.org/r/1442216244-4409-1-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r-- | kernel/locking/qrwlock.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index f17a3e3b3550..fec082338668 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c | |||
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | |||
86 | /* | 86 | /* |
87 | * Put the reader into the wait queue | 87 | * Put the reader into the wait queue |
88 | */ | 88 | */ |
89 | arch_spin_lock(&lock->lock); | 89 | arch_spin_lock(&lock->wait_lock); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * The ACQUIRE semantics of the following spinning code ensure | 92 | * The ACQUIRE semantics of the following spinning code ensure |
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | |||
99 | /* | 99 | /* |
100 | * Signal the next one in queue to become queue head | 100 | * Signal the next one in queue to become queue head |
101 | */ | 101 | */ |
102 | arch_spin_unlock(&lock->lock); | 102 | arch_spin_unlock(&lock->wait_lock); |
103 | } | 103 | } |
104 | EXPORT_SYMBOL(queued_read_lock_slowpath); | 104 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
105 | 105 | ||
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock) | |||
112 | u32 cnts; | 112 | u32 cnts; |
113 | 113 | ||
114 | /* Put the writer into the wait queue */ | 114 | /* Put the writer into the wait queue */ |
115 | arch_spin_lock(&lock->lock); | 115 | arch_spin_lock(&lock->wait_lock); |
116 | 116 | ||
117 | /* Try to acquire the lock directly if no reader is present */ | 117 | /* Try to acquire the lock directly if no reader is present */ |
118 | if (!atomic_read(&lock->cnts) && | 118 | if (!atomic_read(&lock->cnts) && |
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock) | |||
144 | cpu_relax_lowlatency(); | 144 | cpu_relax_lowlatency(); |
145 | } | 145 | } |
146 | unlock: | 146 | unlock: |
147 | arch_spin_unlock(&lock->lock); | 147 | arch_spin_unlock(&lock->wait_lock); |
148 | } | 148 | } |
149 | EXPORT_SYMBOL(queued_write_lock_slowpath); | 149 | EXPORT_SYMBOL(queued_write_lock_slowpath); |