aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2019-04-04 13:43:18 -0400
committerIngo Molnar <mingo@kernel.org>2019-04-10 04:56:05 -0400
commitbf20616f46e536fe8affed6f138db4b3040b55a6 (patch)
tree9be354c880ec9a4fbe38e9b184d74bf6ed39852b
parentfb346fd9fc081c3d978c3f3d26d39334527a2662 (diff)
locking/lock_events: Don't show pvqspinlock events on bare metal
On bare metal, the pvqspinlock event counts will always be 0. So there is no point in showing their corresponding debugfs files. So they are skipped in this case. Signed-off-by: Waiman Long <longman@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Davidlohr Bueso <dbueso@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/20190404174320.22416-10-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/lock_events.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/kernel/locking/lock_events.c b/kernel/locking/lock_events.c
index 71c36d1fb834..fa2c2f951c6b 100644
--- a/kernel/locking/lock_events.c
+++ b/kernel/locking/lock_events.c
@@ -115,6 +115,29 @@ static const struct file_operations fops_lockevent = {
115 .llseek = default_llseek, 115 .llseek = default_llseek,
116}; 116};
117 117
118#ifdef CONFIG_PARAVIRT_SPINLOCKS
119#include <asm/paravirt.h>
120
121static bool __init skip_lockevent(const char *name)
122{
123 static int pv_on __initdata = -1;
124
125 if (pv_on < 0)
126 pv_on = !pv_is_native_spin_unlock();
127 /*
128 * Skip PV qspinlock events on bare metal.
129 */
130 if (!pv_on && !memcmp(name, "pv_", 3))
131 return true;
132 return false;
133}
134#else
135static inline bool skip_lockevent(const char *name)
136{
137 return false;
138}
139#endif
140
118/* 141/*
119 * Initialize debugfs for the locking event counts. 142 * Initialize debugfs for the locking event counts.
120 */ 143 */
@@ -133,10 +156,13 @@ static int __init init_lockevent_counts(void)
133 * root is allowed to do the read/write to limit impact to system 156 * root is allowed to do the read/write to limit impact to system
134 * performance. 157 * performance.
135 */ 158 */
136 for (i = 0; i < lockevent_num; i++) 159 for (i = 0; i < lockevent_num; i++) {
160 if (skip_lockevent(lockevent_names[i]))
161 continue;
137 if (!debugfs_create_file(lockevent_names[i], 0400, d_counts, 162 if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
138 (void *)(long)i, &fops_lockevent)) 163 (void *)(long)i, &fops_lockevent))
139 goto fail_undo; 164 goto fail_undo;
165 }
140 166
141 if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200, 167 if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
142 d_counts, (void *)(long)LOCKEVENT_reset_cnts, 168 d_counts, (void *)(long)LOCKEVENT_reset_cnts,