diff options
Diffstat (limited to 'kernel/trace/latency_hist.c')
-rw-r--r-- | kernel/trace/latency_hist.c | 1041 |
1 files changed, 1041 insertions, 0 deletions
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c new file mode 100644 index 000000000000..7c2661559f25 --- /dev/null +++ b/kernel/trace/latency_hist.c | |||
@@ -0,0 +1,1041 @@ | |||
1 | /* | ||
2 | * kernel/trace/latency_hist.c | ||
3 | * | ||
4 | * Add support for histograms of preemption-off latency and | ||
5 | * interrupt-off latency and wakeup latency, it depends on | ||
6 | * Real-Time Preemption Support. | ||
7 | * | ||
8 | * Copyright (C) 2005 MontaVista Software, Inc. | ||
9 | * Yi Yang <yyang@ch.mvista.com> | ||
10 | * | ||
11 | * Converted to work with the new latency tracer. | ||
12 | * Copyright (C) 2008 Red Hat, Inc. | ||
13 | * Steven Rostedt <srostedt@redhat.com> | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/seq_file.h> | ||
19 | #include <linux/percpu.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <asm/atomic.h> | ||
24 | #include <asm/div64.h> | ||
25 | |||
26 | #include "trace.h" | ||
27 | #include <trace/events/sched.h> | ||
28 | |||
29 | #define CREATE_TRACE_POINTS | ||
30 | #include <trace/events/hist.h> | ||
31 | |||
32 | enum { | ||
33 | IRQSOFF_LATENCY = 0, | ||
34 | PREEMPTOFF_LATENCY, | ||
35 | PREEMPTIRQSOFF_LATENCY, | ||
36 | WAKEUP_LATENCY, | ||
37 | WAKEUP_LATENCY_SHAREDPRIO, | ||
38 | MISSED_TIMER_OFFSETS, | ||
39 | MAX_LATENCY_TYPE, | ||
40 | }; | ||
41 | |||
42 | #define MAX_ENTRY_NUM 10240 | ||
43 | |||
44 | struct hist_data { | ||
45 | atomic_t hist_mode; /* 0 log, 1 don't log */ | ||
46 | long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ | ||
47 | unsigned long min_lat; | ||
48 | unsigned long max_lat; | ||
49 | unsigned long long below_hist_bound_samples; | ||
50 | unsigned long long above_hist_bound_samples; | ||
51 | unsigned long long accumulate_lat; | ||
52 | unsigned long long total_samples; | ||
53 | unsigned long long hist_array[MAX_ENTRY_NUM]; | ||
54 | }; | ||
55 | |||
56 | struct enable_data { | ||
57 | int latency_type; | ||
58 | int enabled; | ||
59 | }; | ||
60 | |||
61 | static char *latency_hist_dir_root = "latency_hist"; | ||
62 | |||
63 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
64 | static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); | ||
65 | static char *irqsoff_hist_dir = "irqsoff"; | ||
66 | static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); | ||
67 | static DEFINE_PER_CPU(int, hist_irqsoff_counting); | ||
68 | #endif | ||
69 | |||
70 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
71 | static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); | ||
72 | static char *preemptoff_hist_dir = "preemptoff"; | ||
73 | static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); | ||
74 | static DEFINE_PER_CPU(int, hist_preemptoff_counting); | ||
75 | #endif | ||
76 | |||
77 | #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) | ||
78 | static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); | ||
79 | static char *preemptirqsoff_hist_dir = "preemptirqsoff"; | ||
80 | static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); | ||
81 | static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); | ||
82 | #endif | ||
83 | |||
84 | #if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) | ||
85 | static notrace void probe_preemptirqsoff_hist(int reason, int start); | ||
86 | static struct enable_data preemptirqsoff_enabled_data = { | ||
87 | .latency_type = PREEMPTIRQSOFF_LATENCY, | ||
88 | .enabled = 0, | ||
89 | }; | ||
90 | #endif | ||
91 | |||
92 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
93 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
94 | struct maxlatproc_data { | ||
95 | char comm[FIELD_SIZEOF(struct task_struct, comm)]; | ||
96 | int pid; | ||
97 | int prio; | ||
98 | long latency; | ||
99 | }; | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
103 | static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); | ||
104 | static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); | ||
105 | static char *wakeup_latency_hist_dir = "wakeup"; | ||
106 | static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; | ||
107 | static notrace void probe_wakeup_latency_hist_start(struct rq *rq, | ||
108 | struct task_struct *p, int success); | ||
109 | static notrace void probe_wakeup_latency_hist_stop(struct rq *rq, | ||
110 | struct task_struct *prev, struct task_struct *next); | ||
111 | static notrace void probe_sched_migrate_task(struct task_struct *task, | ||
112 | int cpu); | ||
113 | static struct enable_data wakeup_latency_enabled_data = { | ||
114 | .latency_type = WAKEUP_LATENCY, | ||
115 | .enabled = 0, | ||
116 | }; | ||
117 | static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); | ||
118 | static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); | ||
119 | static DEFINE_PER_CPU(struct task_struct *, wakeup_task); | ||
120 | static DEFINE_PER_CPU(int, wakeup_sharedprio); | ||
121 | static unsigned long wakeup_pid; | ||
122 | #endif | ||
123 | |||
124 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
125 | static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); | ||
126 | static char *missed_timer_offsets_dir = "missed_timer_offsets"; | ||
127 | static notrace void probe_hrtimer_interrupt(int cpu, | ||
128 | long long offset, struct task_struct *curr, struct task_struct *task); | ||
129 | static struct enable_data missed_timer_offsets_enabled_data = { | ||
130 | .latency_type = MISSED_TIMER_OFFSETS, | ||
131 | .enabled = 0, | ||
132 | }; | ||
133 | static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); | ||
134 | static unsigned long missed_timer_offsets_pid; | ||
135 | #endif | ||
136 | |||
137 | void notrace latency_hist(int latency_type, int cpu, unsigned long latency, | ||
138 | struct task_struct *p) | ||
139 | { | ||
140 | struct hist_data *my_hist; | ||
141 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
142 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
143 | struct maxlatproc_data *mp = NULL; | ||
144 | #endif | ||
145 | |||
146 | if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 || | ||
147 | latency_type >= MAX_LATENCY_TYPE) | ||
148 | return; | ||
149 | |||
150 | switch (latency_type) { | ||
151 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
152 | case IRQSOFF_LATENCY: | ||
153 | my_hist = &per_cpu(irqsoff_hist, cpu); | ||
154 | break; | ||
155 | #endif | ||
156 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
157 | case PREEMPTOFF_LATENCY: | ||
158 | my_hist = &per_cpu(preemptoff_hist, cpu); | ||
159 | break; | ||
160 | #endif | ||
161 | #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) | ||
162 | case PREEMPTIRQSOFF_LATENCY: | ||
163 | my_hist = &per_cpu(preemptirqsoff_hist, cpu); | ||
164 | break; | ||
165 | #endif | ||
166 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
167 | case WAKEUP_LATENCY: | ||
168 | my_hist = &per_cpu(wakeup_latency_hist, cpu); | ||
169 | mp = &per_cpu(wakeup_maxlatproc, cpu); | ||
170 | break; | ||
171 | case WAKEUP_LATENCY_SHAREDPRIO: | ||
172 | my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); | ||
173 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); | ||
174 | break; | ||
175 | #endif | ||
176 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
177 | case MISSED_TIMER_OFFSETS: | ||
178 | my_hist = &per_cpu(missed_timer_offsets, cpu); | ||
179 | mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); | ||
180 | break; | ||
181 | #endif | ||
182 | default: | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | if (atomic_read(&my_hist->hist_mode) == 0) | ||
187 | return; | ||
188 | |||
189 | latency += my_hist->offset; | ||
190 | |||
191 | if (latency < 0 || latency >= MAX_ENTRY_NUM) { | ||
192 | if (latency < 0) | ||
193 | my_hist->below_hist_bound_samples++; | ||
194 | else | ||
195 | my_hist->above_hist_bound_samples++; | ||
196 | } else | ||
197 | my_hist->hist_array[latency]++; | ||
198 | |||
199 | if (latency < my_hist->min_lat) | ||
200 | my_hist->min_lat = latency; | ||
201 | if (latency > my_hist->max_lat) { | ||
202 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
203 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
204 | if (latency_type == WAKEUP_LATENCY || | ||
205 | latency_type == WAKEUP_LATENCY_SHAREDPRIO || | ||
206 | latency_type == MISSED_TIMER_OFFSETS) { | ||
207 | strncpy(mp->comm, p->comm, sizeof(mp->comm)); | ||
208 | mp->pid = task_pid_nr(p); | ||
209 | mp->prio = p->prio; | ||
210 | mp->latency = latency; | ||
211 | } | ||
212 | #endif | ||
213 | my_hist->max_lat = latency; | ||
214 | } | ||
215 | my_hist->total_samples++; | ||
216 | my_hist->accumulate_lat += latency; | ||
217 | } | ||
218 | |||
219 | static void *l_start(struct seq_file *m, loff_t *pos) | ||
220 | { | ||
221 | loff_t *index_ptr = NULL; | ||
222 | loff_t index = *pos; | ||
223 | struct hist_data *my_hist = m->private; | ||
224 | |||
225 | if (index == 0) { | ||
226 | char minstr[32], avgstr[32], maxstr[32]; | ||
227 | |||
228 | atomic_dec(&my_hist->hist_mode); | ||
229 | |||
230 | if (likely(my_hist->total_samples)) { | ||
231 | unsigned long avg = (unsigned long) | ||
232 | div64_u64(my_hist->accumulate_lat, | ||
233 | my_hist->total_samples); | ||
234 | snprintf(minstr, sizeof(minstr), "%ld", | ||
235 | (long) my_hist->min_lat - my_hist->offset); | ||
236 | snprintf(avgstr, sizeof(avgstr), "%ld", | ||
237 | (long) avg - my_hist->offset); | ||
238 | snprintf(maxstr, sizeof(maxstr), "%ld", | ||
239 | (long) my_hist->max_lat - my_hist->offset); | ||
240 | } else { | ||
241 | strcpy(minstr, "<undef>"); | ||
242 | strcpy(avgstr, minstr); | ||
243 | strcpy(maxstr, minstr); | ||
244 | } | ||
245 | |||
246 | seq_printf(m, "#Minimum latency: %s microseconds\n" | ||
247 | "#Average latency: %s microseconds\n" | ||
248 | "#Maximum latency: %s microseconds\n" | ||
249 | "#Total samples: %llu\n" | ||
250 | "#There are %llu samples lower than %ld" | ||
251 | " microseconds.\n" | ||
252 | "#There are %llu samples greater or equal" | ||
253 | " than %ld microseconds.\n" | ||
254 | "#usecs\t%16s\n", | ||
255 | minstr, avgstr, maxstr, | ||
256 | my_hist->total_samples, | ||
257 | my_hist->below_hist_bound_samples, | ||
258 | -my_hist->offset, | ||
259 | my_hist->above_hist_bound_samples, | ||
260 | MAX_ENTRY_NUM - my_hist->offset, | ||
261 | "samples"); | ||
262 | } | ||
263 | if (index < MAX_ENTRY_NUM) { | ||
264 | index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); | ||
265 | if (index_ptr) | ||
266 | *index_ptr = index; | ||
267 | } | ||
268 | |||
269 | return index_ptr; | ||
270 | } | ||
271 | |||
272 | static void *l_next(struct seq_file *m, void *p, loff_t *pos) | ||
273 | { | ||
274 | loff_t *index_ptr = p; | ||
275 | struct hist_data *my_hist = m->private; | ||
276 | |||
277 | if (++*pos >= MAX_ENTRY_NUM) { | ||
278 | atomic_inc(&my_hist->hist_mode); | ||
279 | return NULL; | ||
280 | } | ||
281 | *index_ptr = *pos; | ||
282 | return index_ptr; | ||
283 | } | ||
284 | |||
285 | static void l_stop(struct seq_file *m, void *p) | ||
286 | { | ||
287 | kfree(p); | ||
288 | } | ||
289 | |||
290 | static int l_show(struct seq_file *m, void *p) | ||
291 | { | ||
292 | int index = *(loff_t *) p; | ||
293 | struct hist_data *my_hist = m->private; | ||
294 | |||
295 | seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, | ||
296 | my_hist->hist_array[index]); | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static struct seq_operations latency_hist_seq_op = { | ||
301 | .start = l_start, | ||
302 | .next = l_next, | ||
303 | .stop = l_stop, | ||
304 | .show = l_show | ||
305 | }; | ||
306 | |||
307 | static int latency_hist_open(struct inode *inode, struct file *file) | ||
308 | { | ||
309 | int ret; | ||
310 | |||
311 | ret = seq_open(file, &latency_hist_seq_op); | ||
312 | if (!ret) { | ||
313 | struct seq_file *seq = file->private_data; | ||
314 | seq->private = inode->i_private; | ||
315 | } | ||
316 | return ret; | ||
317 | } | ||
318 | |||
319 | static struct file_operations latency_hist_fops = { | ||
320 | .open = latency_hist_open, | ||
321 | .read = seq_read, | ||
322 | .llseek = seq_lseek, | ||
323 | .release = seq_release, | ||
324 | }; | ||
325 | |||
326 | static void hist_reset(struct hist_data *hist) | ||
327 | { | ||
328 | atomic_dec(&hist->hist_mode); | ||
329 | |||
330 | memset(hist->hist_array, 0, sizeof(hist->hist_array)); | ||
331 | hist->below_hist_bound_samples = 0ULL; | ||
332 | hist->above_hist_bound_samples = 0ULL; | ||
333 | hist->min_lat = 0xFFFFFFFFUL; | ||
334 | hist->max_lat = 0UL; | ||
335 | hist->total_samples = 0ULL; | ||
336 | hist->accumulate_lat = 0ULL; | ||
337 | |||
338 | atomic_inc(&hist->hist_mode); | ||
339 | } | ||
340 | |||
341 | static ssize_t | ||
342 | latency_hist_reset(struct file *file, const char __user *a, | ||
343 | size_t size, loff_t *off) | ||
344 | { | ||
345 | int cpu; | ||
346 | struct hist_data *hist = NULL; | ||
347 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
348 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
349 | struct maxlatproc_data *mp = NULL; | ||
350 | #endif | ||
351 | off_t latency_type = (off_t) file->private_data; | ||
352 | |||
353 | for_each_online_cpu(cpu) { | ||
354 | |||
355 | switch (latency_type) { | ||
356 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
357 | case PREEMPTOFF_LATENCY: | ||
358 | hist = &per_cpu(preemptoff_hist, cpu); | ||
359 | break; | ||
360 | #endif | ||
361 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
362 | case IRQSOFF_LATENCY: | ||
363 | hist = &per_cpu(irqsoff_hist, cpu); | ||
364 | break; | ||
365 | #endif | ||
366 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
367 | case PREEMPTIRQSOFF_LATENCY: | ||
368 | hist = &per_cpu(preemptirqsoff_hist, cpu); | ||
369 | break; | ||
370 | #endif | ||
371 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
372 | case WAKEUP_LATENCY: | ||
373 | hist = &per_cpu(wakeup_latency_hist, cpu); | ||
374 | mp = &per_cpu(wakeup_maxlatproc, cpu); | ||
375 | break; | ||
376 | case WAKEUP_LATENCY_SHAREDPRIO: | ||
377 | hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); | ||
378 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); | ||
379 | break; | ||
380 | #endif | ||
381 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
382 | case MISSED_TIMER_OFFSETS: | ||
383 | hist = &per_cpu(missed_timer_offsets, cpu); | ||
384 | mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); | ||
385 | break; | ||
386 | #endif | ||
387 | } | ||
388 | |||
389 | hist_reset(hist); | ||
390 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
391 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
392 | if (latency_type == WAKEUP_LATENCY || | ||
393 | latency_type == WAKEUP_LATENCY_SHAREDPRIO || | ||
394 | latency_type == MISSED_TIMER_OFFSETS) { | ||
395 | mp->comm[0] = '\0'; | ||
396 | mp->prio = mp->pid = mp->latency = -1; | ||
397 | } | ||
398 | #endif | ||
399 | } | ||
400 | |||
401 | return size; | ||
402 | } | ||
403 | |||
404 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
405 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
406 | static ssize_t | ||
407 | show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
408 | { | ||
409 | char buf[64]; | ||
410 | int r; | ||
411 | unsigned long *this_pid = file->private_data; | ||
412 | |||
413 | r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); | ||
414 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
415 | } | ||
416 | |||
417 | static ssize_t do_pid(struct file *file, const char __user *ubuf, | ||
418 | size_t cnt, loff_t *ppos) | ||
419 | { | ||
420 | char buf[64]; | ||
421 | unsigned long pid; | ||
422 | unsigned long *this_pid = file->private_data; | ||
423 | |||
424 | if (cnt >= sizeof(buf)) | ||
425 | return -EINVAL; | ||
426 | |||
427 | if (copy_from_user(&buf, ubuf, cnt)) | ||
428 | return -EFAULT; | ||
429 | |||
430 | buf[cnt] = '\0'; | ||
431 | |||
432 | if (strict_strtoul(buf, 10, &pid)) | ||
433 | return(-EINVAL); | ||
434 | |||
435 | *this_pid = pid; | ||
436 | |||
437 | return cnt; | ||
438 | } | ||
439 | #endif | ||
440 | |||
441 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
442 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
443 | static ssize_t | ||
444 | show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
445 | { | ||
446 | int r; | ||
447 | struct maxlatproc_data *mp = file->private_data; | ||
448 | int strmaxlen = TASK_COMM_LEN + 32; | ||
449 | char *buf = kmalloc(strmaxlen, GFP_KERNEL); | ||
450 | |||
451 | if (buf == NULL) | ||
452 | return -ENOMEM; | ||
453 | |||
454 | r = snprintf(buf, strmaxlen, "%d %d %ld %s\n", | ||
455 | mp->pid, MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->comm); | ||
456 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
457 | kfree(buf); | ||
458 | return r; | ||
459 | } | ||
460 | #endif | ||
461 | |||
462 | static ssize_t | ||
463 | show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
464 | { | ||
465 | char buf[64]; | ||
466 | struct enable_data *ed = file->private_data; | ||
467 | int r; | ||
468 | |||
469 | r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); | ||
470 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
471 | } | ||
472 | |||
473 | static ssize_t | ||
474 | do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) | ||
475 | { | ||
476 | char buf[64]; | ||
477 | long enable; | ||
478 | struct enable_data *ed = file->private_data; | ||
479 | |||
480 | if (cnt >= sizeof(buf)) | ||
481 | return -EINVAL; | ||
482 | |||
483 | if (copy_from_user(&buf, ubuf, cnt)) | ||
484 | return -EFAULT; | ||
485 | |||
486 | buf[cnt] = 0; | ||
487 | |||
488 | if (strict_strtol(buf, 10, &enable)) | ||
489 | return(-EINVAL); | ||
490 | |||
491 | if ((enable && ed->enabled) || (!enable && !ed->enabled)) | ||
492 | return cnt; | ||
493 | |||
494 | if (enable) { | ||
495 | int ret; | ||
496 | |||
497 | switch (ed->latency_type) { | ||
498 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
499 | case PREEMPTIRQSOFF_LATENCY: | ||
500 | ret = register_trace_preemptirqsoff_hist( | ||
501 | probe_preemptirqsoff_hist); | ||
502 | if (ret) { | ||
503 | pr_info("wakeup trace: Couldn't assign " | ||
504 | "probe_preemptirqsoff_hist " | ||
505 | "to trace_preemptirqsoff_hist\n"); | ||
506 | return ret; | ||
507 | } | ||
508 | break; | ||
509 | #endif | ||
510 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
511 | case WAKEUP_LATENCY: | ||
512 | ret = register_trace_sched_wakeup( | ||
513 | probe_wakeup_latency_hist_start); | ||
514 | if (ret) { | ||
515 | pr_info("wakeup trace: Couldn't assign " | ||
516 | "probe_wakeup_latency_hist_start " | ||
517 | "to trace_sched_wakeup\n"); | ||
518 | return ret; | ||
519 | } | ||
520 | ret = register_trace_sched_wakeup_new( | ||
521 | probe_wakeup_latency_hist_start); | ||
522 | if (ret) { | ||
523 | pr_info("wakeup trace: Couldn't assign " | ||
524 | "probe_wakeup_latency_hist_start " | ||
525 | "to trace_sched_wakeup_new\n"); | ||
526 | unregister_trace_sched_wakeup( | ||
527 | probe_wakeup_latency_hist_start); | ||
528 | return ret; | ||
529 | } | ||
530 | ret = register_trace_sched_switch( | ||
531 | probe_wakeup_latency_hist_stop); | ||
532 | if (ret) { | ||
533 | pr_info("wakeup trace: Couldn't assign " | ||
534 | "probe_wakeup_latency_hist_stop " | ||
535 | "to trace_sched_switch\n"); | ||
536 | unregister_trace_sched_wakeup( | ||
537 | probe_wakeup_latency_hist_start); | ||
538 | unregister_trace_sched_wakeup_new( | ||
539 | probe_wakeup_latency_hist_start); | ||
540 | return ret; | ||
541 | } | ||
542 | ret = register_trace_sched_migrate_task( | ||
543 | probe_sched_migrate_task); | ||
544 | if (ret) { | ||
545 | pr_info("wakeup trace: Couldn't assign " | ||
546 | "probe_sched_migrate_task " | ||
547 | "to trace_sched_migrate_task\n"); | ||
548 | unregister_trace_sched_wakeup( | ||
549 | probe_wakeup_latency_hist_start); | ||
550 | unregister_trace_sched_wakeup_new( | ||
551 | probe_wakeup_latency_hist_start); | ||
552 | unregister_trace_sched_switch( | ||
553 | probe_wakeup_latency_hist_stop); | ||
554 | return ret; | ||
555 | } | ||
556 | break; | ||
557 | #endif | ||
558 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
559 | case MISSED_TIMER_OFFSETS: | ||
560 | ret = register_trace_hrtimer_interrupt( | ||
561 | probe_hrtimer_interrupt); | ||
562 | if (ret) { | ||
563 | pr_info("wakeup trace: Couldn't assign " | ||
564 | "probe_hrtimer_interrupt " | ||
565 | "to trace_hrtimer_interrupt\n"); | ||
566 | return ret; | ||
567 | } | ||
568 | break; | ||
569 | #endif | ||
570 | default: | ||
571 | break; | ||
572 | } | ||
573 | } else { | ||
574 | switch (ed->latency_type) { | ||
575 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
576 | case PREEMPTIRQSOFF_LATENCY: | ||
577 | { | ||
578 | int cpu; | ||
579 | |||
580 | unregister_trace_preemptirqsoff_hist( | ||
581 | probe_preemptirqsoff_hist); | ||
582 | for_each_online_cpu(cpu) { | ||
583 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
584 | per_cpu(hist_irqsoff_counting, | ||
585 | cpu) = 0; | ||
586 | #endif | ||
587 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
588 | per_cpu(hist_preemptoff_counting, | ||
589 | cpu) = 0; | ||
590 | #endif | ||
591 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
592 | per_cpu(hist_preemptirqsoff_counting, | ||
593 | cpu) = 0; | ||
594 | #endif | ||
595 | } | ||
596 | } | ||
597 | break; | ||
598 | #endif | ||
599 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
600 | case WAKEUP_LATENCY: | ||
601 | { | ||
602 | int cpu; | ||
603 | |||
604 | unregister_trace_sched_wakeup( | ||
605 | probe_wakeup_latency_hist_start); | ||
606 | unregister_trace_sched_wakeup_new( | ||
607 | probe_wakeup_latency_hist_start); | ||
608 | unregister_trace_sched_switch( | ||
609 | probe_wakeup_latency_hist_stop); | ||
610 | unregister_trace_sched_migrate_task( | ||
611 | probe_sched_migrate_task); | ||
612 | |||
613 | for_each_online_cpu(cpu) { | ||
614 | per_cpu(wakeup_task, cpu) = NULL; | ||
615 | per_cpu(wakeup_sharedprio, cpu) = 0; | ||
616 | } | ||
617 | } | ||
618 | break; | ||
619 | #endif | ||
620 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
621 | case MISSED_TIMER_OFFSETS: | ||
622 | unregister_trace_hrtimer_interrupt( | ||
623 | probe_hrtimer_interrupt); | ||
624 | break; | ||
625 | #endif | ||
626 | default: | ||
627 | break; | ||
628 | } | ||
629 | } | ||
630 | ed->enabled = enable; | ||
631 | return cnt; | ||
632 | } | ||
633 | |||
634 | static const struct file_operations latency_hist_reset_fops = { | ||
635 | .open = tracing_open_generic, | ||
636 | .write = latency_hist_reset, | ||
637 | }; | ||
638 | |||
639 | static const struct file_operations enable_fops = { | ||
640 | .open = tracing_open_generic, | ||
641 | .read = show_enable, | ||
642 | .write = do_enable, | ||
643 | }; | ||
644 | |||
645 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
646 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
647 | static const struct file_operations pid_fops = { | ||
648 | .open = tracing_open_generic, | ||
649 | .read = show_pid, | ||
650 | .write = do_pid, | ||
651 | }; | ||
652 | |||
653 | static const struct file_operations maxlatproc_fops = { | ||
654 | .open = tracing_open_generic, | ||
655 | .read = show_maxlatproc, | ||
656 | }; | ||
657 | #endif | ||
658 | |||
659 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
660 | static notrace void probe_preemptirqsoff_hist(int reason, int starthist) | ||
661 | { | ||
662 | int cpu = raw_smp_processor_id(); | ||
663 | int time_set = 0; | ||
664 | |||
665 | if (starthist) { | ||
666 | cycle_t uninitialized_var(start); | ||
667 | |||
668 | if (!preempt_count() && !irqs_disabled()) | ||
669 | return; | ||
670 | |||
671 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
672 | if ((reason == IRQS_OFF || reason == TRACE_START) && | ||
673 | !per_cpu(hist_irqsoff_counting, cpu)) { | ||
674 | per_cpu(hist_irqsoff_counting, cpu) = 1; | ||
675 | start = ftrace_now(cpu); | ||
676 | time_set++; | ||
677 | per_cpu(hist_irqsoff_start, cpu) = start; | ||
678 | } | ||
679 | #endif | ||
680 | |||
681 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
682 | if ((reason == PREEMPT_OFF || reason == TRACE_START) && | ||
683 | !per_cpu(hist_preemptoff_counting, cpu)) { | ||
684 | per_cpu(hist_preemptoff_counting, cpu) = 1; | ||
685 | if (!(time_set++)) | ||
686 | start = ftrace_now(cpu); | ||
687 | per_cpu(hist_preemptoff_start, cpu) = start; | ||
688 | } | ||
689 | #endif | ||
690 | |||
691 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
692 | if (per_cpu(hist_irqsoff_counting, cpu) && | ||
693 | per_cpu(hist_preemptoff_counting, cpu) && | ||
694 | !per_cpu(hist_preemptirqsoff_counting, cpu)) { | ||
695 | per_cpu(hist_preemptirqsoff_counting, cpu) = 1; | ||
696 | if (!time_set) | ||
697 | start = ftrace_now(cpu); | ||
698 | per_cpu(hist_preemptirqsoff_start, cpu) = start; | ||
699 | } | ||
700 | #endif | ||
701 | } else { | ||
702 | cycle_t uninitialized_var(stop); | ||
703 | |||
704 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
705 | if ((reason == IRQS_ON || reason == TRACE_STOP) && | ||
706 | per_cpu(hist_irqsoff_counting, cpu)) { | ||
707 | cycle_t start = per_cpu(hist_irqsoff_start, cpu); | ||
708 | |||
709 | stop = ftrace_now(cpu); | ||
710 | time_set++; | ||
711 | if (start && stop >= start) { | ||
712 | unsigned long latency = | ||
713 | nsecs_to_usecs(stop - start); | ||
714 | |||
715 | latency_hist(IRQSOFF_LATENCY, cpu, latency, | ||
716 | NULL); | ||
717 | } | ||
718 | per_cpu(hist_irqsoff_counting, cpu) = 0; | ||
719 | } | ||
720 | #endif | ||
721 | |||
722 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
723 | if ((reason == PREEMPT_ON || reason == TRACE_STOP) && | ||
724 | per_cpu(hist_preemptoff_counting, cpu)) { | ||
725 | cycle_t start = per_cpu(hist_preemptoff_start, cpu); | ||
726 | |||
727 | if (!(time_set++)) | ||
728 | stop = ftrace_now(cpu); | ||
729 | if (start && stop >= start) { | ||
730 | unsigned long latency = | ||
731 | nsecs_to_usecs(stop - start); | ||
732 | |||
733 | latency_hist(PREEMPTOFF_LATENCY, cpu, latency, | ||
734 | NULL); | ||
735 | } | ||
736 | per_cpu(hist_preemptoff_counting, cpu) = 0; | ||
737 | } | ||
738 | #endif | ||
739 | |||
740 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
741 | if ((!per_cpu(hist_irqsoff_counting, cpu) || | ||
742 | !per_cpu(hist_preemptoff_counting, cpu)) && | ||
743 | per_cpu(hist_preemptirqsoff_counting, cpu)) { | ||
744 | cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); | ||
745 | |||
746 | if (!time_set) | ||
747 | stop = ftrace_now(cpu); | ||
748 | if (start && stop >= start) { | ||
749 | unsigned long latency = | ||
750 | nsecs_to_usecs(stop - start); | ||
751 | latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, | ||
752 | latency, NULL); | ||
753 | } | ||
754 | per_cpu(hist_preemptirqsoff_counting, cpu) = 0; | ||
755 | } | ||
756 | #endif | ||
757 | } | ||
758 | } | ||
759 | #endif | ||
760 | |||
761 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
762 | static DEFINE_RAW_SPINLOCK(wakeup_lock); | ||
763 | static notrace void probe_sched_migrate_task(struct task_struct *task, int cpu) | ||
764 | { | ||
765 | int old_cpu = task_cpu(task); | ||
766 | |||
767 | if (cpu != old_cpu) { | ||
768 | unsigned long flags; | ||
769 | struct task_struct *cpu_wakeup_task; | ||
770 | |||
771 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
772 | |||
773 | cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); | ||
774 | if (task == cpu_wakeup_task) { | ||
775 | put_task_struct(cpu_wakeup_task); | ||
776 | per_cpu(wakeup_task, old_cpu) = NULL; | ||
777 | cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; | ||
778 | get_task_struct(cpu_wakeup_task); | ||
779 | } | ||
780 | |||
781 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
782 | } | ||
783 | } | ||
784 | |||
785 | static notrace void probe_wakeup_latency_hist_start(struct rq *rq, | ||
786 | struct task_struct *p, int success) | ||
787 | { | ||
788 | unsigned long flags; | ||
789 | struct task_struct *curr = rq_curr(rq); | ||
790 | int cpu = task_cpu(p); | ||
791 | struct task_struct *cpu_wakeup_task; | ||
792 | |||
793 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
794 | |||
795 | cpu_wakeup_task = per_cpu(wakeup_task, cpu); | ||
796 | |||
797 | if (wakeup_pid) { | ||
798 | if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || | ||
799 | p->prio == curr->prio) | ||
800 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
801 | if (likely(wakeup_pid != task_pid_nr(p))) | ||
802 | goto out; | ||
803 | } else { | ||
804 | if (likely(!rt_task(p)) || | ||
805 | (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || | ||
806 | p->prio > curr->prio) | ||
807 | goto out; | ||
808 | if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || | ||
809 | p->prio == curr->prio) | ||
810 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
811 | } | ||
812 | |||
813 | if (cpu_wakeup_task) | ||
814 | put_task_struct(cpu_wakeup_task); | ||
815 | cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; | ||
816 | get_task_struct(cpu_wakeup_task); | ||
817 | cpu_wakeup_task->preempt_timestamp_hist = | ||
818 | ftrace_now(raw_smp_processor_id()); | ||
819 | out: | ||
820 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
821 | } | ||
822 | |||
823 | static notrace void probe_wakeup_latency_hist_stop(struct rq *rq, | ||
824 | struct task_struct *prev, struct task_struct *next) | ||
825 | { | ||
826 | unsigned long flags; | ||
827 | int cpu = task_cpu(next); | ||
828 | unsigned long latency; | ||
829 | cycle_t stop; | ||
830 | struct task_struct *cpu_wakeup_task; | ||
831 | |||
832 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
833 | |||
834 | cpu_wakeup_task = per_cpu(wakeup_task, cpu); | ||
835 | |||
836 | if (cpu_wakeup_task == NULL) | ||
837 | goto out; | ||
838 | |||
839 | /* Already running? */ | ||
840 | if (unlikely(current == cpu_wakeup_task)) | ||
841 | goto out_reset; | ||
842 | |||
843 | if (next != cpu_wakeup_task) { | ||
844 | if (next->prio < cpu_wakeup_task->prio) | ||
845 | goto out_reset; | ||
846 | |||
847 | if (next->prio == cpu_wakeup_task->prio) | ||
848 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
849 | |||
850 | goto out; | ||
851 | } | ||
852 | |||
853 | /* | ||
854 | * The task we are waiting for is about to be switched to. | ||
855 | * Calculate latency and store it in histogram. | ||
856 | */ | ||
857 | stop = ftrace_now(raw_smp_processor_id()); | ||
858 | |||
859 | latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist); | ||
860 | |||
861 | if (per_cpu(wakeup_sharedprio, cpu)) { | ||
862 | latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, next); | ||
863 | per_cpu(wakeup_sharedprio, cpu) = 0; | ||
864 | } else | ||
865 | latency_hist(WAKEUP_LATENCY, cpu, latency, next); | ||
866 | |||
867 | out_reset: | ||
868 | put_task_struct(cpu_wakeup_task); | ||
869 | per_cpu(wakeup_task, cpu) = NULL; | ||
870 | out: | ||
871 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
872 | } | ||
873 | #endif | ||
874 | |||
875 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
876 | static notrace void probe_hrtimer_interrupt(int cpu, long long latency_ns, | ||
877 | struct task_struct *curr, struct task_struct *task) | ||
878 | { | ||
879 | if (latency_ns <= 0 && task != NULL && rt_task(task) && | ||
880 | task->prio < curr->prio) { | ||
881 | unsigned long latency; | ||
882 | |||
883 | if (missed_timer_offsets_pid) { | ||
884 | if (likely(missed_timer_offsets_pid != | ||
885 | task_pid_nr(task))) | ||
886 | return; | ||
887 | } | ||
888 | |||
889 | latency = (unsigned long) div_s64(-latency_ns, 1000); | ||
890 | latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, task); | ||
891 | } | ||
892 | } | ||
893 | #endif | ||
894 | |||
895 | static __init int latency_hist_init(void) | ||
896 | { | ||
897 | struct dentry *latency_hist_root = NULL; | ||
898 | struct dentry *dentry; | ||
899 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
900 | struct dentry *dentry_sharedprio; | ||
901 | #endif | ||
902 | struct dentry *entry; | ||
903 | struct dentry *enable_root; | ||
904 | int i = 0; | ||
905 | struct hist_data *my_hist; | ||
906 | char name[64]; | ||
907 | char *cpufmt = "CPU%d"; | ||
908 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
909 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
910 | char *cpufmt_maxlatproc = "max_latency-CPU%d"; | ||
911 | struct maxlatproc_data *mp = NULL; | ||
912 | #endif | ||
913 | |||
914 | dentry = tracing_init_dentry(); | ||
915 | latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); | ||
916 | enable_root = debugfs_create_dir("enable", latency_hist_root); | ||
917 | |||
918 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
919 | dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); | ||
920 | for_each_possible_cpu(i) { | ||
921 | sprintf(name, cpufmt, i); | ||
922 | entry = debugfs_create_file(name, 0444, dentry, | ||
923 | &per_cpu(irqsoff_hist, i), &latency_hist_fops); | ||
924 | my_hist = &per_cpu(irqsoff_hist, i); | ||
925 | atomic_set(&my_hist->hist_mode, 1); | ||
926 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
927 | } | ||
928 | entry = debugfs_create_file("reset", 0644, dentry, | ||
929 | (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); | ||
930 | #endif | ||
931 | |||
932 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
933 | dentry = debugfs_create_dir(preemptoff_hist_dir, | ||
934 | latency_hist_root); | ||
935 | for_each_possible_cpu(i) { | ||
936 | sprintf(name, cpufmt, i); | ||
937 | entry = debugfs_create_file(name, 0444, dentry, | ||
938 | &per_cpu(preemptoff_hist, i), &latency_hist_fops); | ||
939 | my_hist = &per_cpu(preemptoff_hist, i); | ||
940 | atomic_set(&my_hist->hist_mode, 1); | ||
941 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
942 | } | ||
943 | entry = debugfs_create_file("reset", 0644, dentry, | ||
944 | (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); | ||
945 | #endif | ||
946 | |||
947 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
948 | dentry = debugfs_create_dir(preemptirqsoff_hist_dir, | ||
949 | latency_hist_root); | ||
950 | for_each_possible_cpu(i) { | ||
951 | sprintf(name, cpufmt, i); | ||
952 | entry = debugfs_create_file(name, 0444, dentry, | ||
953 | &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); | ||
954 | my_hist = &per_cpu(preemptirqsoff_hist, i); | ||
955 | atomic_set(&my_hist->hist_mode, 1); | ||
956 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
957 | } | ||
958 | entry = debugfs_create_file("reset", 0644, dentry, | ||
959 | (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); | ||
960 | #endif | ||
961 | |||
962 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
963 | entry = debugfs_create_file("preemptirqsoff", 0644, | ||
964 | enable_root, (void *)&preemptirqsoff_enabled_data, | ||
965 | &enable_fops); | ||
966 | #endif | ||
967 | |||
968 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
969 | dentry = debugfs_create_dir(wakeup_latency_hist_dir, | ||
970 | latency_hist_root); | ||
971 | dentry_sharedprio = debugfs_create_dir( | ||
972 | wakeup_latency_hist_dir_sharedprio, dentry); | ||
973 | for_each_possible_cpu(i) { | ||
974 | sprintf(name, cpufmt, i); | ||
975 | |||
976 | entry = debugfs_create_file(name, 0444, dentry, | ||
977 | &per_cpu(wakeup_latency_hist, i), | ||
978 | &latency_hist_fops); | ||
979 | my_hist = &per_cpu(wakeup_latency_hist, i); | ||
980 | atomic_set(&my_hist->hist_mode, 1); | ||
981 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
982 | |||
983 | entry = debugfs_create_file(name, 0444, dentry_sharedprio, | ||
984 | &per_cpu(wakeup_latency_hist_sharedprio, i), | ||
985 | &latency_hist_fops); | ||
986 | my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); | ||
987 | atomic_set(&my_hist->hist_mode, 1); | ||
988 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
989 | |||
990 | sprintf(name, cpufmt_maxlatproc, i); | ||
991 | |||
992 | mp = &per_cpu(wakeup_maxlatproc, i); | ||
993 | entry = debugfs_create_file(name, 0444, dentry, mp, | ||
994 | &maxlatproc_fops); | ||
995 | mp->prio = mp->pid = mp->latency = -1; | ||
996 | |||
997 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); | ||
998 | entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, | ||
999 | &maxlatproc_fops); | ||
1000 | mp->prio = mp->pid = mp->latency = -1; | ||
1001 | } | ||
1002 | entry = debugfs_create_file("pid", 0644, dentry, | ||
1003 | (void *)&wakeup_pid, &pid_fops); | ||
1004 | entry = debugfs_create_file("reset", 0644, dentry, | ||
1005 | (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); | ||
1006 | entry = debugfs_create_file("reset", 0644, dentry_sharedprio, | ||
1007 | (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); | ||
1008 | entry = debugfs_create_file("wakeup", 0644, | ||
1009 | enable_root, (void *)&wakeup_latency_enabled_data, | ||
1010 | &enable_fops); | ||
1011 | #endif | ||
1012 | |||
1013 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
1014 | dentry = debugfs_create_dir(missed_timer_offsets_dir, | ||
1015 | latency_hist_root); | ||
1016 | for_each_possible_cpu(i) { | ||
1017 | sprintf(name, cpufmt, i); | ||
1018 | entry = debugfs_create_file(name, 0444, dentry, | ||
1019 | &per_cpu(missed_timer_offsets, i), &latency_hist_fops); | ||
1020 | my_hist = &per_cpu(missed_timer_offsets, i); | ||
1021 | atomic_set(&my_hist->hist_mode, 1); | ||
1022 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
1023 | |||
1024 | sprintf(name, cpufmt_maxlatproc, i); | ||
1025 | mp = &per_cpu(missed_timer_offsets_maxlatproc, i); | ||
1026 | entry = debugfs_create_file(name, 0444, dentry, mp, | ||
1027 | &maxlatproc_fops); | ||
1028 | mp->prio = mp->pid = mp->latency = -1; | ||
1029 | } | ||
1030 | entry = debugfs_create_file("pid", 0644, dentry, | ||
1031 | (void *)&missed_timer_offsets_pid, &pid_fops); | ||
1032 | entry = debugfs_create_file("reset", 0644, dentry, | ||
1033 | (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); | ||
1034 | entry = debugfs_create_file("missed_timer_offsets", 0644, | ||
1035 | enable_root, (void *)&missed_timer_offsets_enabled_data, | ||
1036 | &enable_fops); | ||
1037 | #endif | ||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | __initcall(latency_hist_init); | ||