aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/qspinlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:50:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 16:50:15 -0400
commit007dc78fea62610bf06829e38f1d8c69b6ea5af6 (patch)
tree683af90696ed7a237dedd48030bfd649e5822955 /kernel/locking/qspinlock.c
parent2f1835dffa949f560dfa3ed63c0bfc10944b461c (diff)
parentd671002be6bdd7f77a771e23bf3e95d1f16775e6 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Here are the locking changes in this cycle: - rwsem unification and simpler micro-optimizations to prepare for more intrusive (and more lucrative) scalability improvements in v5.3 (Waiman Long) - Lockdep irq state tracking flag usage cleanups (Frederic Weisbecker) - static key improvements (Jakub Kicinski, Peter Zijlstra) - misc updates, cleanups and smaller fixes" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits) locking/lockdep: Remove unnecessary unlikely() locking/static_key: Don't take sleeping locks in __static_key_slow_dec_deferred() locking/static_key: Factor out the fast path of static_key_slow_dec() locking/static_key: Add support for deferred static branches locking/lockdep: Test all incompatible scenarios at once in check_irq_usage() locking/lockdep: Avoid bogus Clang warning locking/lockdep: Generate LOCKF_ bit composites locking/lockdep: Use expanded masks on find_usage_*() functions locking/lockdep: Map remaining magic numbers to lock usage mask names locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING locking/rwsem: Prevent unneeded warning during locking selftest locking/rwsem: Optimize rwsem structure for uncontended lock acquisition locking/rwsem: Enable lock event counting locking/lock_events: Don't show pvqspinlock events on bare metal locking/lock_events: Make lock_events available for all archs & other locks locking/qspinlock_stat: Introduce generic lockevent_*() counting APIs locking/rwsem: Enhance DEBUG_RWSEMS_WARN_ON() macro locking/rwsem: Add debug check for __down_read*() locking/rwsem: Micro-optimize rwsem_try_read_lock_unqueued() locking/rwsem: Move rwsem internal function declarations to rwsem-xadd.h ...
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r--kernel/locking/qspinlock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 5e9247dc2515..e14b32c69639 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -395,7 +395,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
395 * 0,1,0 -> 0,0,1 395 * 0,1,0 -> 0,0,1
396 */ 396 */
397 clear_pending_set_locked(lock); 397 clear_pending_set_locked(lock);
398 qstat_inc(qstat_lock_pending, true); 398 lockevent_inc(lock_pending);
399 return; 399 return;
400 400
401 /* 401 /*
@@ -403,7 +403,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
403 * queuing. 403 * queuing.
404 */ 404 */
405queue: 405queue:
406 qstat_inc(qstat_lock_slowpath, true); 406 lockevent_inc(lock_slowpath);
407pv_queue: 407pv_queue:
408 node = this_cpu_ptr(&qnodes[0].mcs); 408 node = this_cpu_ptr(&qnodes[0].mcs);
409 idx = node->count++; 409 idx = node->count++;
@@ -419,7 +419,7 @@ pv_queue:
419 * simple enough. 419 * simple enough.
420 */ 420 */
421 if (unlikely(idx >= MAX_NODES)) { 421 if (unlikely(idx >= MAX_NODES)) {
422 qstat_inc(qstat_lock_no_node, true); 422 lockevent_inc(lock_no_node);
423 while (!queued_spin_trylock(lock)) 423 while (!queued_spin_trylock(lock))
424 cpu_relax(); 424 cpu_relax();
425 goto release; 425 goto release;
@@ -430,7 +430,7 @@ pv_queue:
430 /* 430 /*
431 * Keep counts of non-zero index values: 431 * Keep counts of non-zero index values:
432 */ 432 */
433 qstat_inc(qstat_lock_use_node2 + idx - 1, idx); 433 lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
434 434
435 /* 435 /*
436 * Ensure that we increment the head node->count before initialising 436 * Ensure that we increment the head node->count before initialising