diff options
Diffstat (limited to 'kernel/locking/qspinlock_stat.h')
-rw-r--r-- | kernel/locking/qspinlock_stat.h | 300 |
1 files changed, 300 insertions, 0 deletions
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h new file mode 100644 index 000000000000..640dcecdd1df --- /dev/null +++ b/kernel/locking/qspinlock_stat.h | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * Authors: Waiman Long <waiman.long@hpe.com> | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * When queued spinlock statistical counters are enabled, the following | ||
17 | * debugfs files will be created for reporting the counter values: | ||
18 | * | ||
19 | * <debugfs>/qlockstat/ | ||
20 | * pv_hash_hops - average # of hops per hashing operation | ||
21 | * pv_kick_unlock - # of vCPU kicks issued at unlock time | ||
22 | * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake | ||
23 | * pv_latency_kick - average latency (ns) of vCPU kick operation | ||
24 | * pv_latency_wake - average latency (ns) from vCPU kick to wakeup | ||
25 | * pv_lock_stealing - # of lock stealing operations | ||
26 | * pv_spurious_wakeup - # of spurious wakeups | ||
27 | * pv_wait_again - # of vCPU wait's that happened after a vCPU kick | ||
28 | * pv_wait_early - # of early vCPU wait's | ||
29 | * pv_wait_head - # of vCPU wait's at the queue head | ||
30 | * pv_wait_node - # of vCPU wait's at a non-head queue node | ||
31 | * | ||
32 | * Writing to the "reset_counters" file will reset all the above counter | ||
33 | * values. | ||
34 | * | ||
35 | * These statistical counters are implemented as per-cpu variables which are | ||
36 | * summed and computed whenever the corresponding debugfs files are read. This | ||
37 | * minimizes added overhead making the counters usable even in a production | ||
38 | * environment. | ||
39 | * | ||
40 | * There may be slight difference between pv_kick_wake and pv_kick_unlock. | ||
41 | */ | ||
42 | enum qlock_stats { | ||
43 | qstat_pv_hash_hops, | ||
44 | qstat_pv_kick_unlock, | ||
45 | qstat_pv_kick_wake, | ||
46 | qstat_pv_latency_kick, | ||
47 | qstat_pv_latency_wake, | ||
48 | qstat_pv_lock_stealing, | ||
49 | qstat_pv_spurious_wakeup, | ||
50 | qstat_pv_wait_again, | ||
51 | qstat_pv_wait_early, | ||
52 | qstat_pv_wait_head, | ||
53 | qstat_pv_wait_node, | ||
54 | qstat_num, /* Total number of statistical counters */ | ||
55 | qstat_reset_cnts = qstat_num, | ||
56 | }; | ||
57 | |||
58 | #ifdef CONFIG_QUEUED_LOCK_STAT | ||
59 | /* | ||
60 | * Collect pvqspinlock statistics | ||
61 | */ | ||
62 | #include <linux/debugfs.h> | ||
63 | #include <linux/sched.h> | ||
64 | #include <linux/fs.h> | ||
65 | |||
66 | static const char * const qstat_names[qstat_num + 1] = { | ||
67 | [qstat_pv_hash_hops] = "pv_hash_hops", | ||
68 | [qstat_pv_kick_unlock] = "pv_kick_unlock", | ||
69 | [qstat_pv_kick_wake] = "pv_kick_wake", | ||
70 | [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup", | ||
71 | [qstat_pv_latency_kick] = "pv_latency_kick", | ||
72 | [qstat_pv_latency_wake] = "pv_latency_wake", | ||
73 | [qstat_pv_lock_stealing] = "pv_lock_stealing", | ||
74 | [qstat_pv_wait_again] = "pv_wait_again", | ||
75 | [qstat_pv_wait_early] = "pv_wait_early", | ||
76 | [qstat_pv_wait_head] = "pv_wait_head", | ||
77 | [qstat_pv_wait_node] = "pv_wait_node", | ||
78 | [qstat_reset_cnts] = "reset_counters", | ||
79 | }; | ||
80 | |||
81 | /* | ||
82 | * Per-cpu counters | ||
83 | */ | ||
84 | static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]); | ||
85 | static DEFINE_PER_CPU(u64, pv_kick_time); | ||
86 | |||
87 | /* | ||
88 | * Function to read and return the qlock statistical counter values | ||
89 | * | ||
90 | * The following counters are handled specially: | ||
91 | * 1. qstat_pv_latency_kick | ||
92 | * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock | ||
93 | * 2. qstat_pv_latency_wake | ||
94 | * Average wake latency (ns) = pv_latency_wake/pv_kick_wake | ||
95 | * 3. qstat_pv_hash_hops | ||
96 | * Average hops/hash = pv_hash_hops/pv_kick_unlock | ||
97 | */ | ||
98 | static ssize_t qstat_read(struct file *file, char __user *user_buf, | ||
99 | size_t count, loff_t *ppos) | ||
100 | { | ||
101 | char buf[64]; | ||
102 | int cpu, counter, len; | ||
103 | u64 stat = 0, kicks = 0; | ||
104 | |||
105 | /* | ||
106 | * Get the counter ID stored in file->f_inode->i_private | ||
107 | */ | ||
108 | if (!file->f_inode) { | ||
109 | WARN_ON_ONCE(1); | ||
110 | return -EBADF; | ||
111 | } | ||
112 | counter = (long)(file->f_inode->i_private); | ||
113 | |||
114 | if (counter >= qstat_num) | ||
115 | return -EBADF; | ||
116 | |||
117 | for_each_possible_cpu(cpu) { | ||
118 | stat += per_cpu(qstats[counter], cpu); | ||
119 | /* | ||
120 | * Need to sum additional counter for some of them | ||
121 | */ | ||
122 | switch (counter) { | ||
123 | |||
124 | case qstat_pv_latency_kick: | ||
125 | case qstat_pv_hash_hops: | ||
126 | kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu); | ||
127 | break; | ||
128 | |||
129 | case qstat_pv_latency_wake: | ||
130 | kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu); | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | if (counter == qstat_pv_hash_hops) { | ||
136 | u64 frac; | ||
137 | |||
138 | frac = 100ULL * do_div(stat, kicks); | ||
139 | frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); | ||
140 | |||
141 | /* | ||
142 | * Return a X.XX decimal number | ||
143 | */ | ||
144 | len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac); | ||
145 | } else { | ||
146 | /* | ||
147 | * Round to the nearest ns | ||
148 | */ | ||
149 | if ((counter == qstat_pv_latency_kick) || | ||
150 | (counter == qstat_pv_latency_wake)) { | ||
151 | stat = 0; | ||
152 | if (kicks) | ||
153 | stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); | ||
154 | } | ||
155 | len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat); | ||
156 | } | ||
157 | |||
158 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Function to handle write request | ||
163 | * | ||
164 | * When counter = reset_cnts, reset all the counter values. | ||
165 | * Since the counter updates aren't atomic, the resetting is done twice | ||
166 | * to make sure that the counters are very likely to be all cleared. | ||
167 | */ | ||
168 | static ssize_t qstat_write(struct file *file, const char __user *user_buf, | ||
169 | size_t count, loff_t *ppos) | ||
170 | { | ||
171 | int cpu; | ||
172 | |||
173 | /* | ||
174 | * Get the counter ID stored in file->f_inode->i_private | ||
175 | */ | ||
176 | if (!file->f_inode) { | ||
177 | WARN_ON_ONCE(1); | ||
178 | return -EBADF; | ||
179 | } | ||
180 | if ((long)(file->f_inode->i_private) != qstat_reset_cnts) | ||
181 | return count; | ||
182 | |||
183 | for_each_possible_cpu(cpu) { | ||
184 | int i; | ||
185 | unsigned long *ptr = per_cpu_ptr(qstats, cpu); | ||
186 | |||
187 | for (i = 0 ; i < qstat_num; i++) | ||
188 | WRITE_ONCE(ptr[i], 0); | ||
189 | for (i = 0 ; i < qstat_num; i++) | ||
190 | WRITE_ONCE(ptr[i], 0); | ||
191 | } | ||
192 | return count; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Debugfs data structures | ||
197 | */ | ||
198 | static const struct file_operations fops_qstat = { | ||
199 | .read = qstat_read, | ||
200 | .write = qstat_write, | ||
201 | .llseek = default_llseek, | ||
202 | }; | ||
203 | |||
204 | /* | ||
205 | * Initialize debugfs for the qspinlock statistical counters | ||
206 | */ | ||
207 | static int __init init_qspinlock_stat(void) | ||
208 | { | ||
209 | struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL); | ||
210 | int i; | ||
211 | |||
212 | if (!d_qstat) { | ||
213 | pr_warn("Could not create 'qlockstat' debugfs directory\n"); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Create the debugfs files | ||
219 | * | ||
220 | * As reading from and writing to the stat files can be slow, only | ||
221 | * root is allowed to do the read/write to limit impact to system | ||
222 | * performance. | ||
223 | */ | ||
224 | for (i = 0; i < qstat_num; i++) | ||
225 | debugfs_create_file(qstat_names[i], 0400, d_qstat, | ||
226 | (void *)(long)i, &fops_qstat); | ||
227 | |||
228 | debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat, | ||
229 | (void *)(long)qstat_reset_cnts, &fops_qstat); | ||
230 | return 0; | ||
231 | } | ||
232 | fs_initcall(init_qspinlock_stat); | ||
233 | |||
234 | /* | ||
235 | * Increment the PV qspinlock statistical counters | ||
236 | */ | ||
237 | static inline void qstat_inc(enum qlock_stats stat, bool cond) | ||
238 | { | ||
239 | if (cond) | ||
240 | this_cpu_inc(qstats[stat]); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * PV hash hop count | ||
245 | */ | ||
246 | static inline void qstat_hop(int hopcnt) | ||
247 | { | ||
248 | this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Replacement function for pv_kick() | ||
253 | */ | ||
254 | static inline void __pv_kick(int cpu) | ||
255 | { | ||
256 | u64 start = sched_clock(); | ||
257 | |||
258 | per_cpu(pv_kick_time, cpu) = start; | ||
259 | pv_kick(cpu); | ||
260 | this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Replacement function for pv_wait() | ||
265 | */ | ||
266 | static inline void __pv_wait(u8 *ptr, u8 val) | ||
267 | { | ||
268 | u64 *pkick_time = this_cpu_ptr(&pv_kick_time); | ||
269 | |||
270 | *pkick_time = 0; | ||
271 | pv_wait(ptr, val); | ||
272 | if (*pkick_time) { | ||
273 | this_cpu_add(qstats[qstat_pv_latency_wake], | ||
274 | sched_clock() - *pkick_time); | ||
275 | qstat_inc(qstat_pv_kick_wake, true); | ||
276 | } | ||
277 | } | ||
278 | |||
279 | #define pv_kick(c) __pv_kick(c) | ||
280 | #define pv_wait(p, v) __pv_wait(p, v) | ||
281 | |||
282 | /* | ||
283 | * PV unfair trylock count tracking function | ||
284 | */ | ||
285 | static inline int qstat_spin_steal_lock(struct qspinlock *lock) | ||
286 | { | ||
287 | int ret = pv_queued_spin_steal_lock(lock); | ||
288 | |||
289 | qstat_inc(qstat_pv_lock_stealing, ret); | ||
290 | return ret; | ||
291 | } | ||
292 | #undef queued_spin_trylock | ||
293 | #define queued_spin_trylock(l) qstat_spin_steal_lock(l) | ||
294 | |||
295 | #else /* CONFIG_QUEUED_LOCK_STAT */ | ||
296 | |||
297 | static inline void qstat_inc(enum qlock_stats stat, bool cond) { } | ||
298 | static inline void qstat_hop(int hopcnt) { } | ||
299 | |||
300 | #endif /* CONFIG_QUEUED_LOCK_STAT */ | ||