aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:50:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:50:15 -0400
commit007dc78fea62610bf06829e38f1d8c69b6ea5af6 (patch)
tree683af90696ed7a237dedd48030bfd649e5822955 /kernel/locking/rwsem.c
parent2f1835dffa949f560dfa3ed63c0bfc10944b461c (diff)
parentd671002be6bdd7f77a771e23bf3e95d1f16775e6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Here are the locking changes in this cycle: - rwsem unification and simpler micro-optimizations to prepare for more intrusive (and more lucrative) scalability improvements in v5.3 (Waiman Long) - Lockdep irq state tracking flag usage cleanups (Frederic Weisbecker) - static key improvements (Jakub Kicinski, Peter Zijlstra) - misc updates, cleanups and smaller fixes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits) locking/lockdep: Remove unnecessary unlikely() locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred() locking/static_key: Factor out the fast path of static_key_slow_dec() locking/static_key: Add support for deferred static branches locking/lockdep: Test all incompatible scenarios at once in check_irq_usage() locking/lockdep: Avoid bogus Clang warning locking/lockdep: Generate LOCKF_ bit composites locking/lockdep: Use expanded masks on find_usage_*() functions locking/lockdep: Map remaining magic numbers to lock usage mask names locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING locking/rwsem: Prevent unneeded warning during locking selftest locking/rwsem: Optimize rwsem structure for uncontended lock acquisition locking/rwsem: Enable lock event counting locking/lock_events: Don't show pvqspinlock events on bare metal locking/lock_events: Make lock_events available for all archs & other locks locking/qspinlock_stat: Introduce generic lockevent_*() counting APIs locking/rwsem: Enhance DEBUG_RWSEMS_WARN_ON() macro locking/rwsem: Add debug check for __down_read*() locking/rwsem: Micro-optimize rwsem_try_read_lock_unqueued() locking/rwsem: Move rwsem internal function declarations to rwsem-xadd.h ...
Diffstat (limited to 'kernel/locking/rwsem.c')
-rw-r--r--kernel/locking/rwsem.c25
1 files changed, 4 insertions, 21 deletions
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index e586f0d03ad3..ccbf18f560ff 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -24,7 +24,6 @@ void __sched down_read(struct rw_semaphore *sem)
24 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 24 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
25 25
26 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 26 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
27 rwsem_set_reader_owned(sem);
28} 27}
29 28
30EXPORT_SYMBOL(down_read); 29EXPORT_SYMBOL(down_read);
@@ -39,7 +38,6 @@ int __sched down_read_killable(struct rw_semaphore *sem)
39 return -EINTR; 38 return -EINTR;
40 } 39 }
41 40
42 rwsem_set_reader_owned(sem);
43 return 0; 41 return 0;
44} 42}
45 43
@@ -52,10 +50,8 @@ int down_read_trylock(struct rw_semaphore *sem)
52{ 50{
53 int ret = __down_read_trylock(sem); 51 int ret = __down_read_trylock(sem);
54 52
55 if (ret == 1) { 53 if (ret == 1)
56 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); 54 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
57 rwsem_set_reader_owned(sem);
58 }
59 return ret; 55 return ret;
60} 56}
61 57
@@ -70,7 +66,6 @@ void __sched down_write(struct rw_semaphore *sem)
70 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); 66 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
71 67
72 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 68 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
73 rwsem_set_owner(sem);
74} 69}
75 70
76EXPORT_SYMBOL(down_write); 71EXPORT_SYMBOL(down_write);
@@ -88,7 +83,6 @@ int __sched down_write_killable(struct rw_semaphore *sem)
88 return -EINTR; 83 return -EINTR;
89 } 84 }
90 85
91 rwsem_set_owner(sem);
92 return 0; 86 return 0;
93} 87}
94 88
@@ -101,10 +95,8 @@ int down_write_trylock(struct rw_semaphore *sem)
101{ 95{
102 int ret = __down_write_trylock(sem); 96 int ret = __down_write_trylock(sem);
103 97
104 if (ret == 1) { 98 if (ret == 1)
105 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); 99 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
106 rwsem_set_owner(sem);
107 }
108 100
109 return ret; 101 return ret;
110} 102}
@@ -117,9 +109,7 @@ EXPORT_SYMBOL(down_write_trylock);
117void up_read(struct rw_semaphore *sem) 109void up_read(struct rw_semaphore *sem)
118{ 110{
119 rwsem_release(&sem->dep_map, 1, _RET_IP_); 111 rwsem_release(&sem->dep_map, 1, _RET_IP_);
120 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
121 112
122 rwsem_clear_reader_owned(sem);
123 __up_read(sem); 113 __up_read(sem);
124} 114}
125 115
@@ -131,9 +121,7 @@ EXPORT_SYMBOL(up_read);
131void up_write(struct rw_semaphore *sem) 121void up_write(struct rw_semaphore *sem)
132{ 122{
133 rwsem_release(&sem->dep_map, 1, _RET_IP_); 123 rwsem_release(&sem->dep_map, 1, _RET_IP_);
134 DEBUG_RWSEMS_WARN_ON(sem->owner != current);
135 124
136 rwsem_clear_owner(sem);
137 __up_write(sem); 125 __up_write(sem);
138} 126}
139 127
@@ -145,9 +133,7 @@ EXPORT_SYMBOL(up_write);
145void downgrade_write(struct rw_semaphore *sem) 133void downgrade_write(struct rw_semaphore *sem)
146{ 134{
147 lock_downgrade(&sem->dep_map, _RET_IP_); 135 lock_downgrade(&sem->dep_map, _RET_IP_);
148 DEBUG_RWSEMS_WARN_ON(sem->owner != current);
149 136
150 rwsem_set_reader_owned(sem);
151 __downgrade_write(sem); 137 __downgrade_write(sem);
152} 138}
153 139
@@ -161,7 +147,6 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
161 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); 147 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
162 148
163 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); 149 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
164 rwsem_set_reader_owned(sem);
165} 150}
166 151
167EXPORT_SYMBOL(down_read_nested); 152EXPORT_SYMBOL(down_read_nested);
@@ -172,7 +157,6 @@ void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
172 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); 157 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
173 158
174 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 159 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
175 rwsem_set_owner(sem);
176} 160}
177 161
178EXPORT_SYMBOL(_down_write_nest_lock); 162EXPORT_SYMBOL(_down_write_nest_lock);
@@ -193,7 +177,6 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
193 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); 177 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
194 178
195 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); 179 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
196 rwsem_set_owner(sem);
197} 180}
198 181
199EXPORT_SYMBOL(down_write_nested); 182EXPORT_SYMBOL(down_write_nested);
@@ -208,7 +191,6 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
208 return -EINTR; 191 return -EINTR;
209 } 192 }
210 193
211 rwsem_set_owner(sem);
212 return 0; 194 return 0;
213} 195}
214 196
@@ -216,7 +198,8 @@ EXPORT_SYMBOL(down_write_killable_nested);
216 198
217void up_read_non_owner(struct rw_semaphore *sem) 199void up_read_non_owner(struct rw_semaphore *sem)
218{ 200{
219 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); 201 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
202 sem);
220 __up_read(sem); 203 __up_read(sem);
221} 204}
222 205