diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 93 | ||||
-rw-r--r-- | kernel/trace/Makefile | 4 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 3 | ||||
-rw-r--r-- | kernel/trace/latency_hist.c | 1041 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 65 | ||||
-rw-r--r-- | kernel/trace/trace.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 17 |
8 files changed, 1193 insertions, 47 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 60e2ce0181ee..7d57890e906d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -175,6 +175,24 @@ config IRQSOFF_TRACER | |||
175 | enabled. This option and the preempt-off timing option can be | 175 | enabled. This option and the preempt-off timing option can be |
176 | used together or separately.) | 176 | used together or separately.) |
177 | 177 | ||
178 | config INTERRUPT_OFF_HIST | ||
179 | bool "Interrupts-off Latency Histogram" | ||
180 | depends on IRQSOFF_TRACER | ||
181 | help | ||
182 | This option generates continuously updated histograms (one per cpu) | ||
183 | of the duration of time periods with interrupts disabled. The | ||
184 | histograms are disabled by default. To enable them, write a non-zero | ||
185 | number to | ||
186 | |||
187 | /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff | ||
188 | |||
189 | If PREEMPT_OFF_HIST is also selected, additional histograms (one | ||
190 | per cpu) are generated that accumulate the duration of time periods | ||
191 | when both interrupts and preemption are disabled. The histogram data | ||
192 | will be located in the debug file system at | ||
193 | |||
194 | /sys/kernel/debug/tracing/latency_hist/irqsoff | ||
195 | |||
178 | config PREEMPT_TRACER | 196 | config PREEMPT_TRACER |
179 | bool "Preemption-off Latency Tracer" | 197 | bool "Preemption-off Latency Tracer" |
180 | default n | 198 | default n |
@@ -197,14 +215,23 @@ config PREEMPT_TRACER | |||
197 | enabled. This option and the irqs-off timing option can be | 215 | enabled. This option and the irqs-off timing option can be |
198 | used together or separately.) | 216 | used together or separately.) |
199 | 217 | ||
200 | config SYSPROF_TRACER | 218 | config PREEMPT_OFF_HIST |
201 | bool "Sysprof Tracer" | 219 | bool "Preemption-off Latency Histogram" |
202 | depends on X86 | 220 | depends on PREEMPT_TRACER |
203 | select GENERIC_TRACER | ||
204 | select CONTEXT_SWITCH_TRACER | ||
205 | help | 221 | help |
206 | This tracer provides the trace needed by the 'Sysprof' userspace | 222 | This option generates continuously updated histograms (one per cpu) |
207 | tool. | 223 | of the duration of time periods with preemption disabled. The |
224 | histograms are disabled by default. To enable them, write a non-zero | ||
225 | number to | ||
226 | |||
227 | /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff | ||
228 | |||
229 | If INTERRUPT_OFF_HIST is also selected, additional histograms (one | ||
230 | per cpu) are generated that accumulate the duration of time periods | ||
231 | when both interrupts and preemption are disabled. The histogram data | ||
232 | will be located in the debug file system at | ||
233 | |||
234 | /sys/kernel/debug/tracing/latency_hist/preemptoff | ||
208 | 235 | ||
209 | config SCHED_TRACER | 236 | config SCHED_TRACER |
210 | bool "Scheduling Latency Tracer" | 237 | bool "Scheduling Latency Tracer" |
@@ -215,6 +242,55 @@ config SCHED_TRACER | |||
215 | This tracer tracks the latency of the highest priority task | 242 | This tracer tracks the latency of the highest priority task |
216 | to be scheduled in, starting from the point it has woken up. | 243 | to be scheduled in, starting from the point it has woken up. |
217 | 244 | ||
245 | config WAKEUP_LATENCY_HIST | ||
246 | bool "Scheduling Latency Histogram" | ||
247 | depends on SCHED_TRACER | ||
248 | help | ||
249 | This option generates continuously updated histograms (one per cpu) | ||
250 | of the scheduling latency of the highest priority task. | ||
251 | The histograms are disabled by default. To enable them, write a | ||
252 | non-zero number to | ||
253 | |||
254 | /sys/kernel/debug/tracing/latency_hist/enable/wakeup | ||
255 | |||
256 | Two different algorithms are used, one to determine the latency of | ||
257 | processes that exclusively use the highest priority of the system and | ||
258 | another one to determine the latency of processes that share the | ||
259 | highest system priority with other processes. The former is used to | ||
260 | improve hardware and system software, the latter to optimize the | ||
261 | priority design of a given system. The histogram data will be | ||
262 | located in the debug file system at | ||
263 | |||
264 | /sys/kernel/debug/tracing/latency_hist/wakeup | ||
265 | |||
266 | and | ||
267 | |||
268 | /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio | ||
269 | |||
270 | config MISSED_TIMER_OFFSETS_HIST | ||
271 | depends on GENERIC_TIME | ||
272 | select GENERIC_TRACER | ||
273 | bool "Missed timer offsets histogram" | ||
274 | help | ||
275 | Generate a histogram of missed timer offsets in microseconds. The | ||
276 | histograms are disabled by default. To enable them, write a non-zero | ||
277 | number to | ||
278 | |||
279 | /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets | ||
280 | |||
281 | The histogram data will be located in the debug file system at | ||
282 | |||
283 | /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets | ||
284 | |||
285 | config SYSPROF_TRACER | ||
286 | bool "Sysprof Tracer" | ||
287 | depends on X86 | ||
288 | select GENERIC_TRACER | ||
289 | select CONTEXT_SWITCH_TRACER | ||
290 | help | ||
291 | This tracer provides the trace needed by the 'Sysprof' userspace | ||
292 | tool. | ||
293 | |||
218 | config ENABLE_DEFAULT_TRACERS | 294 | config ENABLE_DEFAULT_TRACERS |
219 | bool "Trace process context switches and events" | 295 | bool "Trace process context switches and events" |
220 | depends on !GENERIC_TRACER | 296 | depends on !GENERIC_TRACER |
@@ -385,6 +461,7 @@ config STACK_TRACER | |||
385 | 461 | ||
386 | config HW_BRANCH_TRACER | 462 | config HW_BRANCH_TRACER |
387 | depends on HAVE_HW_BRANCH_TRACER | 463 | depends on HAVE_HW_BRANCH_TRACER |
464 | depends on !PREEMPT_RT | ||
388 | bool "Trace hw branches" | 465 | bool "Trace hw branches" |
389 | select GENERIC_TRACER | 466 | select GENERIC_TRACER |
390 | help | 467 | help |
@@ -412,7 +489,7 @@ config KMEMTRACE | |||
412 | If unsure, say N. | 489 | If unsure, say N. |
413 | 490 | ||
414 | config WORKQUEUE_TRACER | 491 | config WORKQUEUE_TRACER |
415 | bool "Trace workqueues" | 492 | bool "Trace workqueues" if !PREEMPT_RT |
416 | select GENERIC_TRACER | 493 | select GENERIC_TRACER |
417 | help | 494 | help |
418 | The workqueue tracer provides some statistical information | 495 | The workqueue tracer provides some statistical information |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index cd9ecd89ec77..5dddc4da1399 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -35,6 +35,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | |||
35 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 35 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
36 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 36 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
37 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | 37 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o |
38 | obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o | ||
39 | obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o | ||
40 | obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o | ||
41 | obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o | ||
38 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o | 42 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o |
39 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 43 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
40 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 44 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 404c9ba1602b..9ed787fc3dff 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -392,7 +392,8 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
392 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 392 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
393 | seq_printf(m, " "); | 393 | seq_printf(m, " "); |
394 | avg = rec->time; | 394 | avg = rec->time; |
395 | do_div(avg, rec->counter); | 395 | if (rec->counter) |
396 | do_div(avg, rec->counter); | ||
396 | 397 | ||
397 | mutex_lock(&mutex); | 398 | mutex_lock(&mutex); |
398 | trace_seq_init(&s); | 399 | trace_seq_init(&s); |
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c new file mode 100644 index 000000000000..7c2661559f25 --- /dev/null +++ b/kernel/trace/latency_hist.c | |||
@@ -0,0 +1,1041 @@ | |||
1 | /* | ||
2 | * kernel/trace/latency_hist.c | ||
3 | * | ||
4 | * Add support for histograms of preemption-off latency and | ||
5 | * interrupt-off latency and wakeup latency, it depends on | ||
6 | * Real-Time Preemption Support. | ||
7 | * | ||
8 | * Copyright (C) 2005 MontaVista Software, Inc. | ||
9 | * Yi Yang <yyang@ch.mvista.com> | ||
10 | * | ||
11 | * Converted to work with the new latency tracer. | ||
12 | * Copyright (C) 2008 Red Hat, Inc. | ||
13 | * Steven Rostedt <srostedt@redhat.com> | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/seq_file.h> | ||
19 | #include <linux/percpu.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <asm/atomic.h> | ||
24 | #include <asm/div64.h> | ||
25 | |||
26 | #include "trace.h" | ||
27 | #include <trace/events/sched.h> | ||
28 | |||
29 | #define CREATE_TRACE_POINTS | ||
30 | #include <trace/events/hist.h> | ||
31 | |||
32 | enum { | ||
33 | IRQSOFF_LATENCY = 0, | ||
34 | PREEMPTOFF_LATENCY, | ||
35 | PREEMPTIRQSOFF_LATENCY, | ||
36 | WAKEUP_LATENCY, | ||
37 | WAKEUP_LATENCY_SHAREDPRIO, | ||
38 | MISSED_TIMER_OFFSETS, | ||
39 | MAX_LATENCY_TYPE, | ||
40 | }; | ||
41 | |||
42 | #define MAX_ENTRY_NUM 10240 | ||
43 | |||
44 | struct hist_data { | ||
45 | atomic_t hist_mode; /* 0 log, 1 don't log */ | ||
46 | long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ | ||
47 | unsigned long min_lat; | ||
48 | unsigned long max_lat; | ||
49 | unsigned long long below_hist_bound_samples; | ||
50 | unsigned long long above_hist_bound_samples; | ||
51 | unsigned long long accumulate_lat; | ||
52 | unsigned long long total_samples; | ||
53 | unsigned long long hist_array[MAX_ENTRY_NUM]; | ||
54 | }; | ||
55 | |||
56 | struct enable_data { | ||
57 | int latency_type; | ||
58 | int enabled; | ||
59 | }; | ||
60 | |||
61 | static char *latency_hist_dir_root = "latency_hist"; | ||
62 | |||
63 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
64 | static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); | ||
65 | static char *irqsoff_hist_dir = "irqsoff"; | ||
66 | static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); | ||
67 | static DEFINE_PER_CPU(int, hist_irqsoff_counting); | ||
68 | #endif | ||
69 | |||
70 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
71 | static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); | ||
72 | static char *preemptoff_hist_dir = "preemptoff"; | ||
73 | static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); | ||
74 | static DEFINE_PER_CPU(int, hist_preemptoff_counting); | ||
75 | #endif | ||
76 | |||
77 | #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) | ||
78 | static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); | ||
79 | static char *preemptirqsoff_hist_dir = "preemptirqsoff"; | ||
80 | static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); | ||
81 | static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); | ||
82 | #endif | ||
83 | |||
84 | #if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) | ||
85 | static notrace void probe_preemptirqsoff_hist(int reason, int start); | ||
86 | static struct enable_data preemptirqsoff_enabled_data = { | ||
87 | .latency_type = PREEMPTIRQSOFF_LATENCY, | ||
88 | .enabled = 0, | ||
89 | }; | ||
90 | #endif | ||
91 | |||
92 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
93 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
94 | struct maxlatproc_data { | ||
95 | char comm[FIELD_SIZEOF(struct task_struct, comm)]; | ||
96 | int pid; | ||
97 | int prio; | ||
98 | long latency; | ||
99 | }; | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
103 | static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); | ||
104 | static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); | ||
105 | static char *wakeup_latency_hist_dir = "wakeup"; | ||
106 | static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; | ||
107 | static notrace void probe_wakeup_latency_hist_start(struct rq *rq, | ||
108 | struct task_struct *p, int success); | ||
109 | static notrace void probe_wakeup_latency_hist_stop(struct rq *rq, | ||
110 | struct task_struct *prev, struct task_struct *next); | ||
111 | static notrace void probe_sched_migrate_task(struct task_struct *task, | ||
112 | int cpu); | ||
113 | static struct enable_data wakeup_latency_enabled_data = { | ||
114 | .latency_type = WAKEUP_LATENCY, | ||
115 | .enabled = 0, | ||
116 | }; | ||
117 | static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); | ||
118 | static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); | ||
119 | static DEFINE_PER_CPU(struct task_struct *, wakeup_task); | ||
120 | static DEFINE_PER_CPU(int, wakeup_sharedprio); | ||
121 | static unsigned long wakeup_pid; | ||
122 | #endif | ||
123 | |||
124 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
125 | static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); | ||
126 | static char *missed_timer_offsets_dir = "missed_timer_offsets"; | ||
127 | static notrace void probe_hrtimer_interrupt(int cpu, | ||
128 | long long offset, struct task_struct *curr, struct task_struct *task); | ||
129 | static struct enable_data missed_timer_offsets_enabled_data = { | ||
130 | .latency_type = MISSED_TIMER_OFFSETS, | ||
131 | .enabled = 0, | ||
132 | }; | ||
133 | static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); | ||
134 | static unsigned long missed_timer_offsets_pid; | ||
135 | #endif | ||
136 | |||
137 | void notrace latency_hist(int latency_type, int cpu, unsigned long latency, | ||
138 | struct task_struct *p) | ||
139 | { | ||
140 | struct hist_data *my_hist; | ||
141 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
142 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
143 | struct maxlatproc_data *mp = NULL; | ||
144 | #endif | ||
145 | |||
146 | if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 || | ||
147 | latency_type >= MAX_LATENCY_TYPE) | ||
148 | return; | ||
149 | |||
150 | switch (latency_type) { | ||
151 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
152 | case IRQSOFF_LATENCY: | ||
153 | my_hist = &per_cpu(irqsoff_hist, cpu); | ||
154 | break; | ||
155 | #endif | ||
156 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
157 | case PREEMPTOFF_LATENCY: | ||
158 | my_hist = &per_cpu(preemptoff_hist, cpu); | ||
159 | break; | ||
160 | #endif | ||
161 | #if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) | ||
162 | case PREEMPTIRQSOFF_LATENCY: | ||
163 | my_hist = &per_cpu(preemptirqsoff_hist, cpu); | ||
164 | break; | ||
165 | #endif | ||
166 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
167 | case WAKEUP_LATENCY: | ||
168 | my_hist = &per_cpu(wakeup_latency_hist, cpu); | ||
169 | mp = &per_cpu(wakeup_maxlatproc, cpu); | ||
170 | break; | ||
171 | case WAKEUP_LATENCY_SHAREDPRIO: | ||
172 | my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); | ||
173 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); | ||
174 | break; | ||
175 | #endif | ||
176 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
177 | case MISSED_TIMER_OFFSETS: | ||
178 | my_hist = &per_cpu(missed_timer_offsets, cpu); | ||
179 | mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); | ||
180 | break; | ||
181 | #endif | ||
182 | default: | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | if (atomic_read(&my_hist->hist_mode) == 0) | ||
187 | return; | ||
188 | |||
189 | latency += my_hist->offset; | ||
190 | |||
191 | if (latency < 0 || latency >= MAX_ENTRY_NUM) { | ||
192 | if (latency < 0) | ||
193 | my_hist->below_hist_bound_samples++; | ||
194 | else | ||
195 | my_hist->above_hist_bound_samples++; | ||
196 | } else | ||
197 | my_hist->hist_array[latency]++; | ||
198 | |||
199 | if (latency < my_hist->min_lat) | ||
200 | my_hist->min_lat = latency; | ||
201 | if (latency > my_hist->max_lat) { | ||
202 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
203 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
204 | if (latency_type == WAKEUP_LATENCY || | ||
205 | latency_type == WAKEUP_LATENCY_SHAREDPRIO || | ||
206 | latency_type == MISSED_TIMER_OFFSETS) { | ||
207 | strncpy(mp->comm, p->comm, sizeof(mp->comm)); | ||
208 | mp->pid = task_pid_nr(p); | ||
209 | mp->prio = p->prio; | ||
210 | mp->latency = latency; | ||
211 | } | ||
212 | #endif | ||
213 | my_hist->max_lat = latency; | ||
214 | } | ||
215 | my_hist->total_samples++; | ||
216 | my_hist->accumulate_lat += latency; | ||
217 | } | ||
218 | |||
219 | static void *l_start(struct seq_file *m, loff_t *pos) | ||
220 | { | ||
221 | loff_t *index_ptr = NULL; | ||
222 | loff_t index = *pos; | ||
223 | struct hist_data *my_hist = m->private; | ||
224 | |||
225 | if (index == 0) { | ||
226 | char minstr[32], avgstr[32], maxstr[32]; | ||
227 | |||
228 | atomic_dec(&my_hist->hist_mode); | ||
229 | |||
230 | if (likely(my_hist->total_samples)) { | ||
231 | unsigned long avg = (unsigned long) | ||
232 | div64_u64(my_hist->accumulate_lat, | ||
233 | my_hist->total_samples); | ||
234 | snprintf(minstr, sizeof(minstr), "%ld", | ||
235 | (long) my_hist->min_lat - my_hist->offset); | ||
236 | snprintf(avgstr, sizeof(avgstr), "%ld", | ||
237 | (long) avg - my_hist->offset); | ||
238 | snprintf(maxstr, sizeof(maxstr), "%ld", | ||
239 | (long) my_hist->max_lat - my_hist->offset); | ||
240 | } else { | ||
241 | strcpy(minstr, "<undef>"); | ||
242 | strcpy(avgstr, minstr); | ||
243 | strcpy(maxstr, minstr); | ||
244 | } | ||
245 | |||
246 | seq_printf(m, "#Minimum latency: %s microseconds\n" | ||
247 | "#Average latency: %s microseconds\n" | ||
248 | "#Maximum latency: %s microseconds\n" | ||
249 | "#Total samples: %llu\n" | ||
250 | "#There are %llu samples lower than %ld" | ||
251 | " microseconds.\n" | ||
252 | "#There are %llu samples greater or equal" | ||
253 | " than %ld microseconds.\n" | ||
254 | "#usecs\t%16s\n", | ||
255 | minstr, avgstr, maxstr, | ||
256 | my_hist->total_samples, | ||
257 | my_hist->below_hist_bound_samples, | ||
258 | -my_hist->offset, | ||
259 | my_hist->above_hist_bound_samples, | ||
260 | MAX_ENTRY_NUM - my_hist->offset, | ||
261 | "samples"); | ||
262 | } | ||
263 | if (index < MAX_ENTRY_NUM) { | ||
264 | index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); | ||
265 | if (index_ptr) | ||
266 | *index_ptr = index; | ||
267 | } | ||
268 | |||
269 | return index_ptr; | ||
270 | } | ||
271 | |||
272 | static void *l_next(struct seq_file *m, void *p, loff_t *pos) | ||
273 | { | ||
274 | loff_t *index_ptr = p; | ||
275 | struct hist_data *my_hist = m->private; | ||
276 | |||
277 | if (++*pos >= MAX_ENTRY_NUM) { | ||
278 | atomic_inc(&my_hist->hist_mode); | ||
279 | return NULL; | ||
280 | } | ||
281 | *index_ptr = *pos; | ||
282 | return index_ptr; | ||
283 | } | ||
284 | |||
285 | static void l_stop(struct seq_file *m, void *p) | ||
286 | { | ||
287 | kfree(p); | ||
288 | } | ||
289 | |||
290 | static int l_show(struct seq_file *m, void *p) | ||
291 | { | ||
292 | int index = *(loff_t *) p; | ||
293 | struct hist_data *my_hist = m->private; | ||
294 | |||
295 | seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, | ||
296 | my_hist->hist_array[index]); | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static struct seq_operations latency_hist_seq_op = { | ||
301 | .start = l_start, | ||
302 | .next = l_next, | ||
303 | .stop = l_stop, | ||
304 | .show = l_show | ||
305 | }; | ||
306 | |||
307 | static int latency_hist_open(struct inode *inode, struct file *file) | ||
308 | { | ||
309 | int ret; | ||
310 | |||
311 | ret = seq_open(file, &latency_hist_seq_op); | ||
312 | if (!ret) { | ||
313 | struct seq_file *seq = file->private_data; | ||
314 | seq->private = inode->i_private; | ||
315 | } | ||
316 | return ret; | ||
317 | } | ||
318 | |||
319 | static struct file_operations latency_hist_fops = { | ||
320 | .open = latency_hist_open, | ||
321 | .read = seq_read, | ||
322 | .llseek = seq_lseek, | ||
323 | .release = seq_release, | ||
324 | }; | ||
325 | |||
326 | static void hist_reset(struct hist_data *hist) | ||
327 | { | ||
328 | atomic_dec(&hist->hist_mode); | ||
329 | |||
330 | memset(hist->hist_array, 0, sizeof(hist->hist_array)); | ||
331 | hist->below_hist_bound_samples = 0ULL; | ||
332 | hist->above_hist_bound_samples = 0ULL; | ||
333 | hist->min_lat = 0xFFFFFFFFUL; | ||
334 | hist->max_lat = 0UL; | ||
335 | hist->total_samples = 0ULL; | ||
336 | hist->accumulate_lat = 0ULL; | ||
337 | |||
338 | atomic_inc(&hist->hist_mode); | ||
339 | } | ||
340 | |||
341 | static ssize_t | ||
342 | latency_hist_reset(struct file *file, const char __user *a, | ||
343 | size_t size, loff_t *off) | ||
344 | { | ||
345 | int cpu; | ||
346 | struct hist_data *hist = NULL; | ||
347 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
348 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
349 | struct maxlatproc_data *mp = NULL; | ||
350 | #endif | ||
351 | off_t latency_type = (off_t) file->private_data; | ||
352 | |||
353 | for_each_online_cpu(cpu) { | ||
354 | |||
355 | switch (latency_type) { | ||
356 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
357 | case PREEMPTOFF_LATENCY: | ||
358 | hist = &per_cpu(preemptoff_hist, cpu); | ||
359 | break; | ||
360 | #endif | ||
361 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
362 | case IRQSOFF_LATENCY: | ||
363 | hist = &per_cpu(irqsoff_hist, cpu); | ||
364 | break; | ||
365 | #endif | ||
366 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
367 | case PREEMPTIRQSOFF_LATENCY: | ||
368 | hist = &per_cpu(preemptirqsoff_hist, cpu); | ||
369 | break; | ||
370 | #endif | ||
371 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
372 | case WAKEUP_LATENCY: | ||
373 | hist = &per_cpu(wakeup_latency_hist, cpu); | ||
374 | mp = &per_cpu(wakeup_maxlatproc, cpu); | ||
375 | break; | ||
376 | case WAKEUP_LATENCY_SHAREDPRIO: | ||
377 | hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); | ||
378 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); | ||
379 | break; | ||
380 | #endif | ||
381 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
382 | case MISSED_TIMER_OFFSETS: | ||
383 | hist = &per_cpu(missed_timer_offsets, cpu); | ||
384 | mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); | ||
385 | break; | ||
386 | #endif | ||
387 | } | ||
388 | |||
389 | hist_reset(hist); | ||
390 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
391 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
392 | if (latency_type == WAKEUP_LATENCY || | ||
393 | latency_type == WAKEUP_LATENCY_SHAREDPRIO || | ||
394 | latency_type == MISSED_TIMER_OFFSETS) { | ||
395 | mp->comm[0] = '\0'; | ||
396 | mp->prio = mp->pid = mp->latency = -1; | ||
397 | } | ||
398 | #endif | ||
399 | } | ||
400 | |||
401 | return size; | ||
402 | } | ||
403 | |||
404 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
405 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
406 | static ssize_t | ||
407 | show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
408 | { | ||
409 | char buf[64]; | ||
410 | int r; | ||
411 | unsigned long *this_pid = file->private_data; | ||
412 | |||
413 | r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); | ||
414 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
415 | } | ||
416 | |||
417 | static ssize_t do_pid(struct file *file, const char __user *ubuf, | ||
418 | size_t cnt, loff_t *ppos) | ||
419 | { | ||
420 | char buf[64]; | ||
421 | unsigned long pid; | ||
422 | unsigned long *this_pid = file->private_data; | ||
423 | |||
424 | if (cnt >= sizeof(buf)) | ||
425 | return -EINVAL; | ||
426 | |||
427 | if (copy_from_user(&buf, ubuf, cnt)) | ||
428 | return -EFAULT; | ||
429 | |||
430 | buf[cnt] = '\0'; | ||
431 | |||
432 | if (strict_strtoul(buf, 10, &pid)) | ||
433 | return(-EINVAL); | ||
434 | |||
435 | *this_pid = pid; | ||
436 | |||
437 | return cnt; | ||
438 | } | ||
439 | #endif | ||
440 | |||
441 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
442 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
443 | static ssize_t | ||
444 | show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
445 | { | ||
446 | int r; | ||
447 | struct maxlatproc_data *mp = file->private_data; | ||
448 | int strmaxlen = TASK_COMM_LEN + 32; | ||
449 | char *buf = kmalloc(strmaxlen, GFP_KERNEL); | ||
450 | |||
451 | if (buf == NULL) | ||
452 | return -ENOMEM; | ||
453 | |||
454 | r = snprintf(buf, strmaxlen, "%d %d %ld %s\n", | ||
455 | mp->pid, MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->comm); | ||
456 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
457 | kfree(buf); | ||
458 | return r; | ||
459 | } | ||
460 | #endif | ||
461 | |||
462 | static ssize_t | ||
463 | show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) | ||
464 | { | ||
465 | char buf[64]; | ||
466 | struct enable_data *ed = file->private_data; | ||
467 | int r; | ||
468 | |||
469 | r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); | ||
470 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
471 | } | ||
472 | |||
473 | static ssize_t | ||
474 | do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) | ||
475 | { | ||
476 | char buf[64]; | ||
477 | long enable; | ||
478 | struct enable_data *ed = file->private_data; | ||
479 | |||
480 | if (cnt >= sizeof(buf)) | ||
481 | return -EINVAL; | ||
482 | |||
483 | if (copy_from_user(&buf, ubuf, cnt)) | ||
484 | return -EFAULT; | ||
485 | |||
486 | buf[cnt] = 0; | ||
487 | |||
488 | if (strict_strtol(buf, 10, &enable)) | ||
489 | return(-EINVAL); | ||
490 | |||
491 | if ((enable && ed->enabled) || (!enable && !ed->enabled)) | ||
492 | return cnt; | ||
493 | |||
494 | if (enable) { | ||
495 | int ret; | ||
496 | |||
497 | switch (ed->latency_type) { | ||
498 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
499 | case PREEMPTIRQSOFF_LATENCY: | ||
500 | ret = register_trace_preemptirqsoff_hist( | ||
501 | probe_preemptirqsoff_hist); | ||
502 | if (ret) { | ||
503 | pr_info("wakeup trace: Couldn't assign " | ||
504 | "probe_preemptirqsoff_hist " | ||
505 | "to trace_preemptirqsoff_hist\n"); | ||
506 | return ret; | ||
507 | } | ||
508 | break; | ||
509 | #endif | ||
510 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
511 | case WAKEUP_LATENCY: | ||
512 | ret = register_trace_sched_wakeup( | ||
513 | probe_wakeup_latency_hist_start); | ||
514 | if (ret) { | ||
515 | pr_info("wakeup trace: Couldn't assign " | ||
516 | "probe_wakeup_latency_hist_start " | ||
517 | "to trace_sched_wakeup\n"); | ||
518 | return ret; | ||
519 | } | ||
520 | ret = register_trace_sched_wakeup_new( | ||
521 | probe_wakeup_latency_hist_start); | ||
522 | if (ret) { | ||
523 | pr_info("wakeup trace: Couldn't assign " | ||
524 | "probe_wakeup_latency_hist_start " | ||
525 | "to trace_sched_wakeup_new\n"); | ||
526 | unregister_trace_sched_wakeup( | ||
527 | probe_wakeup_latency_hist_start); | ||
528 | return ret; | ||
529 | } | ||
530 | ret = register_trace_sched_switch( | ||
531 | probe_wakeup_latency_hist_stop); | ||
532 | if (ret) { | ||
533 | pr_info("wakeup trace: Couldn't assign " | ||
534 | "probe_wakeup_latency_hist_stop " | ||
535 | "to trace_sched_switch\n"); | ||
536 | unregister_trace_sched_wakeup( | ||
537 | probe_wakeup_latency_hist_start); | ||
538 | unregister_trace_sched_wakeup_new( | ||
539 | probe_wakeup_latency_hist_start); | ||
540 | return ret; | ||
541 | } | ||
542 | ret = register_trace_sched_migrate_task( | ||
543 | probe_sched_migrate_task); | ||
544 | if (ret) { | ||
545 | pr_info("wakeup trace: Couldn't assign " | ||
546 | "probe_sched_migrate_task " | ||
547 | "to trace_sched_migrate_task\n"); | ||
548 | unregister_trace_sched_wakeup( | ||
549 | probe_wakeup_latency_hist_start); | ||
550 | unregister_trace_sched_wakeup_new( | ||
551 | probe_wakeup_latency_hist_start); | ||
552 | unregister_trace_sched_switch( | ||
553 | probe_wakeup_latency_hist_stop); | ||
554 | return ret; | ||
555 | } | ||
556 | break; | ||
557 | #endif | ||
558 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
559 | case MISSED_TIMER_OFFSETS: | ||
560 | ret = register_trace_hrtimer_interrupt( | ||
561 | probe_hrtimer_interrupt); | ||
562 | if (ret) { | ||
563 | pr_info("wakeup trace: Couldn't assign " | ||
564 | "probe_hrtimer_interrupt " | ||
565 | "to trace_hrtimer_interrupt\n"); | ||
566 | return ret; | ||
567 | } | ||
568 | break; | ||
569 | #endif | ||
570 | default: | ||
571 | break; | ||
572 | } | ||
573 | } else { | ||
574 | switch (ed->latency_type) { | ||
575 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
576 | case PREEMPTIRQSOFF_LATENCY: | ||
577 | { | ||
578 | int cpu; | ||
579 | |||
580 | unregister_trace_preemptirqsoff_hist( | ||
581 | probe_preemptirqsoff_hist); | ||
582 | for_each_online_cpu(cpu) { | ||
583 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
584 | per_cpu(hist_irqsoff_counting, | ||
585 | cpu) = 0; | ||
586 | #endif | ||
587 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
588 | per_cpu(hist_preemptoff_counting, | ||
589 | cpu) = 0; | ||
590 | #endif | ||
591 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
592 | per_cpu(hist_preemptirqsoff_counting, | ||
593 | cpu) = 0; | ||
594 | #endif | ||
595 | } | ||
596 | } | ||
597 | break; | ||
598 | #endif | ||
599 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
600 | case WAKEUP_LATENCY: | ||
601 | { | ||
602 | int cpu; | ||
603 | |||
604 | unregister_trace_sched_wakeup( | ||
605 | probe_wakeup_latency_hist_start); | ||
606 | unregister_trace_sched_wakeup_new( | ||
607 | probe_wakeup_latency_hist_start); | ||
608 | unregister_trace_sched_switch( | ||
609 | probe_wakeup_latency_hist_stop); | ||
610 | unregister_trace_sched_migrate_task( | ||
611 | probe_sched_migrate_task); | ||
612 | |||
613 | for_each_online_cpu(cpu) { | ||
614 | per_cpu(wakeup_task, cpu) = NULL; | ||
615 | per_cpu(wakeup_sharedprio, cpu) = 0; | ||
616 | } | ||
617 | } | ||
618 | break; | ||
619 | #endif | ||
620 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
621 | case MISSED_TIMER_OFFSETS: | ||
622 | unregister_trace_hrtimer_interrupt( | ||
623 | probe_hrtimer_interrupt); | ||
624 | break; | ||
625 | #endif | ||
626 | default: | ||
627 | break; | ||
628 | } | ||
629 | } | ||
630 | ed->enabled = enable; | ||
631 | return cnt; | ||
632 | } | ||
633 | |||
634 | static const struct file_operations latency_hist_reset_fops = { | ||
635 | .open = tracing_open_generic, | ||
636 | .write = latency_hist_reset, | ||
637 | }; | ||
638 | |||
639 | static const struct file_operations enable_fops = { | ||
640 | .open = tracing_open_generic, | ||
641 | .read = show_enable, | ||
642 | .write = do_enable, | ||
643 | }; | ||
644 | |||
645 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
646 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
647 | static const struct file_operations pid_fops = { | ||
648 | .open = tracing_open_generic, | ||
649 | .read = show_pid, | ||
650 | .write = do_pid, | ||
651 | }; | ||
652 | |||
653 | static const struct file_operations maxlatproc_fops = { | ||
654 | .open = tracing_open_generic, | ||
655 | .read = show_maxlatproc, | ||
656 | }; | ||
657 | #endif | ||
658 | |||
659 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
660 | static notrace void probe_preemptirqsoff_hist(int reason, int starthist) | ||
661 | { | ||
662 | int cpu = raw_smp_processor_id(); | ||
663 | int time_set = 0; | ||
664 | |||
665 | if (starthist) { | ||
666 | cycle_t uninitialized_var(start); | ||
667 | |||
668 | if (!preempt_count() && !irqs_disabled()) | ||
669 | return; | ||
670 | |||
671 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
672 | if ((reason == IRQS_OFF || reason == TRACE_START) && | ||
673 | !per_cpu(hist_irqsoff_counting, cpu)) { | ||
674 | per_cpu(hist_irqsoff_counting, cpu) = 1; | ||
675 | start = ftrace_now(cpu); | ||
676 | time_set++; | ||
677 | per_cpu(hist_irqsoff_start, cpu) = start; | ||
678 | } | ||
679 | #endif | ||
680 | |||
681 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
682 | if ((reason == PREEMPT_OFF || reason == TRACE_START) && | ||
683 | !per_cpu(hist_preemptoff_counting, cpu)) { | ||
684 | per_cpu(hist_preemptoff_counting, cpu) = 1; | ||
685 | if (!(time_set++)) | ||
686 | start = ftrace_now(cpu); | ||
687 | per_cpu(hist_preemptoff_start, cpu) = start; | ||
688 | } | ||
689 | #endif | ||
690 | |||
691 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
692 | if (per_cpu(hist_irqsoff_counting, cpu) && | ||
693 | per_cpu(hist_preemptoff_counting, cpu) && | ||
694 | !per_cpu(hist_preemptirqsoff_counting, cpu)) { | ||
695 | per_cpu(hist_preemptirqsoff_counting, cpu) = 1; | ||
696 | if (!time_set) | ||
697 | start = ftrace_now(cpu); | ||
698 | per_cpu(hist_preemptirqsoff_start, cpu) = start; | ||
699 | } | ||
700 | #endif | ||
701 | } else { | ||
702 | cycle_t uninitialized_var(stop); | ||
703 | |||
704 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
705 | if ((reason == IRQS_ON || reason == TRACE_STOP) && | ||
706 | per_cpu(hist_irqsoff_counting, cpu)) { | ||
707 | cycle_t start = per_cpu(hist_irqsoff_start, cpu); | ||
708 | |||
709 | stop = ftrace_now(cpu); | ||
710 | time_set++; | ||
711 | if (start && stop >= start) { | ||
712 | unsigned long latency = | ||
713 | nsecs_to_usecs(stop - start); | ||
714 | |||
715 | latency_hist(IRQSOFF_LATENCY, cpu, latency, | ||
716 | NULL); | ||
717 | } | ||
718 | per_cpu(hist_irqsoff_counting, cpu) = 0; | ||
719 | } | ||
720 | #endif | ||
721 | |||
722 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
723 | if ((reason == PREEMPT_ON || reason == TRACE_STOP) && | ||
724 | per_cpu(hist_preemptoff_counting, cpu)) { | ||
725 | cycle_t start = per_cpu(hist_preemptoff_start, cpu); | ||
726 | |||
727 | if (!(time_set++)) | ||
728 | stop = ftrace_now(cpu); | ||
729 | if (start && stop >= start) { | ||
730 | unsigned long latency = | ||
731 | nsecs_to_usecs(stop - start); | ||
732 | |||
733 | latency_hist(PREEMPTOFF_LATENCY, cpu, latency, | ||
734 | NULL); | ||
735 | } | ||
736 | per_cpu(hist_preemptoff_counting, cpu) = 0; | ||
737 | } | ||
738 | #endif | ||
739 | |||
740 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
741 | if ((!per_cpu(hist_irqsoff_counting, cpu) || | ||
742 | !per_cpu(hist_preemptoff_counting, cpu)) && | ||
743 | per_cpu(hist_preemptirqsoff_counting, cpu)) { | ||
744 | cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); | ||
745 | |||
746 | if (!time_set) | ||
747 | stop = ftrace_now(cpu); | ||
748 | if (start && stop >= start) { | ||
749 | unsigned long latency = | ||
750 | nsecs_to_usecs(stop - start); | ||
751 | latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, | ||
752 | latency, NULL); | ||
753 | } | ||
754 | per_cpu(hist_preemptirqsoff_counting, cpu) = 0; | ||
755 | } | ||
756 | #endif | ||
757 | } | ||
758 | } | ||
759 | #endif | ||
760 | |||
761 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
762 | static DEFINE_RAW_SPINLOCK(wakeup_lock); | ||
763 | static notrace void probe_sched_migrate_task(struct task_struct *task, int cpu) | ||
764 | { | ||
765 | int old_cpu = task_cpu(task); | ||
766 | |||
767 | if (cpu != old_cpu) { | ||
768 | unsigned long flags; | ||
769 | struct task_struct *cpu_wakeup_task; | ||
770 | |||
771 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
772 | |||
773 | cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); | ||
774 | if (task == cpu_wakeup_task) { | ||
775 | put_task_struct(cpu_wakeup_task); | ||
776 | per_cpu(wakeup_task, old_cpu) = NULL; | ||
777 | cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; | ||
778 | get_task_struct(cpu_wakeup_task); | ||
779 | } | ||
780 | |||
781 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
782 | } | ||
783 | } | ||
784 | |||
785 | static notrace void probe_wakeup_latency_hist_start(struct rq *rq, | ||
786 | struct task_struct *p, int success) | ||
787 | { | ||
788 | unsigned long flags; | ||
789 | struct task_struct *curr = rq_curr(rq); | ||
790 | int cpu = task_cpu(p); | ||
791 | struct task_struct *cpu_wakeup_task; | ||
792 | |||
793 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
794 | |||
795 | cpu_wakeup_task = per_cpu(wakeup_task, cpu); | ||
796 | |||
797 | if (wakeup_pid) { | ||
798 | if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || | ||
799 | p->prio == curr->prio) | ||
800 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
801 | if (likely(wakeup_pid != task_pid_nr(p))) | ||
802 | goto out; | ||
803 | } else { | ||
804 | if (likely(!rt_task(p)) || | ||
805 | (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || | ||
806 | p->prio > curr->prio) | ||
807 | goto out; | ||
808 | if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || | ||
809 | p->prio == curr->prio) | ||
810 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
811 | } | ||
812 | |||
813 | if (cpu_wakeup_task) | ||
814 | put_task_struct(cpu_wakeup_task); | ||
815 | cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; | ||
816 | get_task_struct(cpu_wakeup_task); | ||
817 | cpu_wakeup_task->preempt_timestamp_hist = | ||
818 | ftrace_now(raw_smp_processor_id()); | ||
819 | out: | ||
820 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
821 | } | ||
822 | |||
823 | static notrace void probe_wakeup_latency_hist_stop(struct rq *rq, | ||
824 | struct task_struct *prev, struct task_struct *next) | ||
825 | { | ||
826 | unsigned long flags; | ||
827 | int cpu = task_cpu(next); | ||
828 | unsigned long latency; | ||
829 | cycle_t stop; | ||
830 | struct task_struct *cpu_wakeup_task; | ||
831 | |||
832 | raw_spin_lock_irqsave(&wakeup_lock, flags); | ||
833 | |||
834 | cpu_wakeup_task = per_cpu(wakeup_task, cpu); | ||
835 | |||
836 | if (cpu_wakeup_task == NULL) | ||
837 | goto out; | ||
838 | |||
839 | /* Already running? */ | ||
840 | if (unlikely(current == cpu_wakeup_task)) | ||
841 | goto out_reset; | ||
842 | |||
843 | if (next != cpu_wakeup_task) { | ||
844 | if (next->prio < cpu_wakeup_task->prio) | ||
845 | goto out_reset; | ||
846 | |||
847 | if (next->prio == cpu_wakeup_task->prio) | ||
848 | per_cpu(wakeup_sharedprio, cpu) = 1; | ||
849 | |||
850 | goto out; | ||
851 | } | ||
852 | |||
853 | /* | ||
854 | * The task we are waiting for is about to be switched to. | ||
855 | * Calculate latency and store it in histogram. | ||
856 | */ | ||
857 | stop = ftrace_now(raw_smp_processor_id()); | ||
858 | |||
859 | latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist); | ||
860 | |||
861 | if (per_cpu(wakeup_sharedprio, cpu)) { | ||
862 | latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, next); | ||
863 | per_cpu(wakeup_sharedprio, cpu) = 0; | ||
864 | } else | ||
865 | latency_hist(WAKEUP_LATENCY, cpu, latency, next); | ||
866 | |||
867 | out_reset: | ||
868 | put_task_struct(cpu_wakeup_task); | ||
869 | per_cpu(wakeup_task, cpu) = NULL; | ||
870 | out: | ||
871 | raw_spin_unlock_irqrestore(&wakeup_lock, flags); | ||
872 | } | ||
873 | #endif | ||
874 | |||
875 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
876 | static notrace void probe_hrtimer_interrupt(int cpu, long long latency_ns, | ||
877 | struct task_struct *curr, struct task_struct *task) | ||
878 | { | ||
879 | if (latency_ns <= 0 && task != NULL && rt_task(task) && | ||
880 | task->prio < curr->prio) { | ||
881 | unsigned long latency; | ||
882 | |||
883 | if (missed_timer_offsets_pid) { | ||
884 | if (likely(missed_timer_offsets_pid != | ||
885 | task_pid_nr(task))) | ||
886 | return; | ||
887 | } | ||
888 | |||
889 | latency = (unsigned long) div_s64(-latency_ns, 1000); | ||
890 | latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, task); | ||
891 | } | ||
892 | } | ||
893 | #endif | ||
894 | |||
895 | static __init int latency_hist_init(void) | ||
896 | { | ||
897 | struct dentry *latency_hist_root = NULL; | ||
898 | struct dentry *dentry; | ||
899 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
900 | struct dentry *dentry_sharedprio; | ||
901 | #endif | ||
902 | struct dentry *entry; | ||
903 | struct dentry *enable_root; | ||
904 | int i = 0; | ||
905 | struct hist_data *my_hist; | ||
906 | char name[64]; | ||
907 | char *cpufmt = "CPU%d"; | ||
908 | #if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ | ||
909 | defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) | ||
910 | char *cpufmt_maxlatproc = "max_latency-CPU%d"; | ||
911 | struct maxlatproc_data *mp = NULL; | ||
912 | #endif | ||
913 | |||
914 | dentry = tracing_init_dentry(); | ||
915 | latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); | ||
916 | enable_root = debugfs_create_dir("enable", latency_hist_root); | ||
917 | |||
918 | #ifdef CONFIG_INTERRUPT_OFF_HIST | ||
919 | dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); | ||
920 | for_each_possible_cpu(i) { | ||
921 | sprintf(name, cpufmt, i); | ||
922 | entry = debugfs_create_file(name, 0444, dentry, | ||
923 | &per_cpu(irqsoff_hist, i), &latency_hist_fops); | ||
924 | my_hist = &per_cpu(irqsoff_hist, i); | ||
925 | atomic_set(&my_hist->hist_mode, 1); | ||
926 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
927 | } | ||
928 | entry = debugfs_create_file("reset", 0644, dentry, | ||
929 | (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); | ||
930 | #endif | ||
931 | |||
932 | #ifdef CONFIG_PREEMPT_OFF_HIST | ||
933 | dentry = debugfs_create_dir(preemptoff_hist_dir, | ||
934 | latency_hist_root); | ||
935 | for_each_possible_cpu(i) { | ||
936 | sprintf(name, cpufmt, i); | ||
937 | entry = debugfs_create_file(name, 0444, dentry, | ||
938 | &per_cpu(preemptoff_hist, i), &latency_hist_fops); | ||
939 | my_hist = &per_cpu(preemptoff_hist, i); | ||
940 | atomic_set(&my_hist->hist_mode, 1); | ||
941 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
942 | } | ||
943 | entry = debugfs_create_file("reset", 0644, dentry, | ||
944 | (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); | ||
945 | #endif | ||
946 | |||
947 | #if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) | ||
948 | dentry = debugfs_create_dir(preemptirqsoff_hist_dir, | ||
949 | latency_hist_root); | ||
950 | for_each_possible_cpu(i) { | ||
951 | sprintf(name, cpufmt, i); | ||
952 | entry = debugfs_create_file(name, 0444, dentry, | ||
953 | &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); | ||
954 | my_hist = &per_cpu(preemptirqsoff_hist, i); | ||
955 | atomic_set(&my_hist->hist_mode, 1); | ||
956 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
957 | } | ||
958 | entry = debugfs_create_file("reset", 0644, dentry, | ||
959 | (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); | ||
960 | #endif | ||
961 | |||
962 | #if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) | ||
963 | entry = debugfs_create_file("preemptirqsoff", 0644, | ||
964 | enable_root, (void *)&preemptirqsoff_enabled_data, | ||
965 | &enable_fops); | ||
966 | #endif | ||
967 | |||
968 | #ifdef CONFIG_WAKEUP_LATENCY_HIST | ||
969 | dentry = debugfs_create_dir(wakeup_latency_hist_dir, | ||
970 | latency_hist_root); | ||
971 | dentry_sharedprio = debugfs_create_dir( | ||
972 | wakeup_latency_hist_dir_sharedprio, dentry); | ||
973 | for_each_possible_cpu(i) { | ||
974 | sprintf(name, cpufmt, i); | ||
975 | |||
976 | entry = debugfs_create_file(name, 0444, dentry, | ||
977 | &per_cpu(wakeup_latency_hist, i), | ||
978 | &latency_hist_fops); | ||
979 | my_hist = &per_cpu(wakeup_latency_hist, i); | ||
980 | atomic_set(&my_hist->hist_mode, 1); | ||
981 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
982 | |||
983 | entry = debugfs_create_file(name, 0444, dentry_sharedprio, | ||
984 | &per_cpu(wakeup_latency_hist_sharedprio, i), | ||
985 | &latency_hist_fops); | ||
986 | my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); | ||
987 | atomic_set(&my_hist->hist_mode, 1); | ||
988 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
989 | |||
990 | sprintf(name, cpufmt_maxlatproc, i); | ||
991 | |||
992 | mp = &per_cpu(wakeup_maxlatproc, i); | ||
993 | entry = debugfs_create_file(name, 0444, dentry, mp, | ||
994 | &maxlatproc_fops); | ||
995 | mp->prio = mp->pid = mp->latency = -1; | ||
996 | |||
997 | mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); | ||
998 | entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, | ||
999 | &maxlatproc_fops); | ||
1000 | mp->prio = mp->pid = mp->latency = -1; | ||
1001 | } | ||
1002 | entry = debugfs_create_file("pid", 0644, dentry, | ||
1003 | (void *)&wakeup_pid, &pid_fops); | ||
1004 | entry = debugfs_create_file("reset", 0644, dentry, | ||
1005 | (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); | ||
1006 | entry = debugfs_create_file("reset", 0644, dentry_sharedprio, | ||
1007 | (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); | ||
1008 | entry = debugfs_create_file("wakeup", 0644, | ||
1009 | enable_root, (void *)&wakeup_latency_enabled_data, | ||
1010 | &enable_fops); | ||
1011 | #endif | ||
1012 | |||
1013 | #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST | ||
1014 | dentry = debugfs_create_dir(missed_timer_offsets_dir, | ||
1015 | latency_hist_root); | ||
1016 | for_each_possible_cpu(i) { | ||
1017 | sprintf(name, cpufmt, i); | ||
1018 | entry = debugfs_create_file(name, 0444, dentry, | ||
1019 | &per_cpu(missed_timer_offsets, i), &latency_hist_fops); | ||
1020 | my_hist = &per_cpu(missed_timer_offsets, i); | ||
1021 | atomic_set(&my_hist->hist_mode, 1); | ||
1022 | my_hist->min_lat = 0xFFFFFFFFUL; | ||
1023 | |||
1024 | sprintf(name, cpufmt_maxlatproc, i); | ||
1025 | mp = &per_cpu(missed_timer_offsets_maxlatproc, i); | ||
1026 | entry = debugfs_create_file(name, 0444, dentry, mp, | ||
1027 | &maxlatproc_fops); | ||
1028 | mp->prio = mp->pid = mp->latency = -1; | ||
1029 | } | ||
1030 | entry = debugfs_create_file("pid", 0644, dentry, | ||
1031 | (void *)&missed_timer_offsets_pid, &pid_fops); | ||
1032 | entry = debugfs_create_file("reset", 0644, dentry, | ||
1033 | (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); | ||
1034 | entry = debugfs_create_file("missed_timer_offsets", 0644, | ||
1035 | enable_root, (void *)&missed_timer_offsets_enabled_data, | ||
1036 | &enable_fops); | ||
1037 | #endif | ||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | __initcall(latency_hist_init); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 54191d6ed195..850918a4a8ee 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -422,7 +422,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
422 | struct ring_buffer_per_cpu { | 422 | struct ring_buffer_per_cpu { |
423 | int cpu; | 423 | int cpu; |
424 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
425 | spinlock_t reader_lock; /* serialize readers */ | 425 | raw_spinlock_t reader_lock; /* serialize readers */ |
426 | arch_spinlock_t lock; | 426 | arch_spinlock_t lock; |
427 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
428 | struct list_head *pages; | 428 | struct list_head *pages; |
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
998 | 998 | ||
999 | cpu_buffer->cpu = cpu; | 999 | cpu_buffer->cpu = cpu; |
1000 | cpu_buffer->buffer = buffer; | 1000 | cpu_buffer->buffer = buffer; |
1001 | spin_lock_init(&cpu_buffer->reader_lock); | 1001 | raw_spin_lock_init(&cpu_buffer->reader_lock); |
1002 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1002 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
1003 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1003 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1004 | 1004 | ||
@@ -1191,11 +1191,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | |||
1191 | static void | 1191 | static void |
1192 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 1192 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
1193 | { | 1193 | { |
1194 | struct buffer_page *bpage; | 1194 | struct buffer_page *bpage, *tmp; |
1195 | struct list_head *p; | 1195 | struct list_head *p; |
1196 | LIST_HEAD(tofree); | ||
1196 | unsigned i; | 1197 | unsigned i; |
1197 | 1198 | ||
1198 | spin_lock_irq(&cpu_buffer->reader_lock); | 1199 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1199 | rb_head_page_deactivate(cpu_buffer); | 1200 | rb_head_page_deactivate(cpu_buffer); |
1200 | 1201 | ||
1201 | for (i = 0; i < nr_pages; i++) { | 1202 | for (i = 0; i < nr_pages; i++) { |
@@ -1203,8 +1204,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1203 | return; | 1204 | return; |
1204 | p = cpu_buffer->pages->next; | 1205 | p = cpu_buffer->pages->next; |
1205 | bpage = list_entry(p, struct buffer_page, list); | 1206 | bpage = list_entry(p, struct buffer_page, list); |
1206 | list_del_init(&bpage->list); | 1207 | list_del(&bpage->list); |
1207 | free_buffer_page(bpage); | 1208 | list_add(&bpage->list, &tofree); |
1208 | } | 1209 | } |
1209 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1210 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1210 | return; | 1211 | return; |
@@ -1212,7 +1213,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1212 | rb_reset_cpu(cpu_buffer); | 1213 | rb_reset_cpu(cpu_buffer); |
1213 | rb_check_pages(cpu_buffer); | 1214 | rb_check_pages(cpu_buffer); |
1214 | 1215 | ||
1215 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1216 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1217 | |||
1218 | list_for_each_entry_safe(bpage, tmp, &tofree, list) { | ||
1219 | list_del_init(&bpage->list); | ||
1220 | free_buffer_page(bpage); | ||
1221 | } | ||
1222 | |||
1216 | } | 1223 | } |
1217 | 1224 | ||
1218 | static void | 1225 | static void |
@@ -1223,7 +1230,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1223 | struct list_head *p; | 1230 | struct list_head *p; |
1224 | unsigned i; | 1231 | unsigned i; |
1225 | 1232 | ||
1226 | spin_lock_irq(&cpu_buffer->reader_lock); | 1233 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1227 | rb_head_page_deactivate(cpu_buffer); | 1234 | rb_head_page_deactivate(cpu_buffer); |
1228 | 1235 | ||
1229 | for (i = 0; i < nr_pages; i++) { | 1236 | for (i = 0; i < nr_pages; i++) { |
@@ -1237,7 +1244,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1237 | rb_reset_cpu(cpu_buffer); | 1244 | rb_reset_cpu(cpu_buffer); |
1238 | rb_check_pages(cpu_buffer); | 1245 | rb_check_pages(cpu_buffer); |
1239 | 1246 | ||
1240 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1247 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1241 | } | 1248 | } |
1242 | 1249 | ||
1243 | /** | 1250 | /** |
@@ -2739,9 +2746,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
2739 | 2746 | ||
2740 | cpu_buffer = iter->cpu_buffer; | 2747 | cpu_buffer = iter->cpu_buffer; |
2741 | 2748 | ||
2742 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2749 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2743 | rb_iter_reset(iter); | 2750 | rb_iter_reset(iter); |
2744 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2751 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2745 | } | 2752 | } |
2746 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | 2753 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); |
2747 | 2754 | ||
@@ -3175,12 +3182,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3175 | again: | 3182 | again: |
3176 | local_irq_save(flags); | 3183 | local_irq_save(flags); |
3177 | if (dolock) | 3184 | if (dolock) |
3178 | spin_lock(&cpu_buffer->reader_lock); | 3185 | raw_spin_lock(&cpu_buffer->reader_lock); |
3179 | event = rb_buffer_peek(cpu_buffer, ts); | 3186 | event = rb_buffer_peek(cpu_buffer, ts); |
3180 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3187 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3181 | rb_advance_reader(cpu_buffer); | 3188 | rb_advance_reader(cpu_buffer); |
3182 | if (dolock) | 3189 | if (dolock) |
3183 | spin_unlock(&cpu_buffer->reader_lock); | 3190 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3184 | local_irq_restore(flags); | 3191 | local_irq_restore(flags); |
3185 | 3192 | ||
3186 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3193 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
@@ -3205,9 +3212,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3205 | unsigned long flags; | 3212 | unsigned long flags; |
3206 | 3213 | ||
3207 | again: | 3214 | again: |
3208 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3215 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3209 | event = rb_iter_peek(iter, ts); | 3216 | event = rb_iter_peek(iter, ts); |
3210 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3217 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3211 | 3218 | ||
3212 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3219 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3213 | goto again; | 3220 | goto again; |
@@ -3243,14 +3250,14 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
3243 | cpu_buffer = buffer->buffers[cpu]; | 3250 | cpu_buffer = buffer->buffers[cpu]; |
3244 | local_irq_save(flags); | 3251 | local_irq_save(flags); |
3245 | if (dolock) | 3252 | if (dolock) |
3246 | spin_lock(&cpu_buffer->reader_lock); | 3253 | raw_spin_lock(&cpu_buffer->reader_lock); |
3247 | 3254 | ||
3248 | event = rb_buffer_peek(cpu_buffer, ts); | 3255 | event = rb_buffer_peek(cpu_buffer, ts); |
3249 | if (event) | 3256 | if (event) |
3250 | rb_advance_reader(cpu_buffer); | 3257 | rb_advance_reader(cpu_buffer); |
3251 | 3258 | ||
3252 | if (dolock) | 3259 | if (dolock) |
3253 | spin_unlock(&cpu_buffer->reader_lock); | 3260 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3254 | local_irq_restore(flags); | 3261 | local_irq_restore(flags); |
3255 | 3262 | ||
3256 | out: | 3263 | out: |
@@ -3296,11 +3303,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
3296 | atomic_inc(&cpu_buffer->record_disabled); | 3303 | atomic_inc(&cpu_buffer->record_disabled); |
3297 | synchronize_sched(); | 3304 | synchronize_sched(); |
3298 | 3305 | ||
3299 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3306 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3300 | arch_spin_lock(&cpu_buffer->lock); | 3307 | arch_spin_lock(&cpu_buffer->lock); |
3301 | rb_iter_reset(iter); | 3308 | rb_iter_reset(iter); |
3302 | arch_spin_unlock(&cpu_buffer->lock); | 3309 | arch_spin_unlock(&cpu_buffer->lock); |
3303 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3310 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3304 | 3311 | ||
3305 | return iter; | 3312 | return iter; |
3306 | } | 3313 | } |
@@ -3337,7 +3344,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3337 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3344 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3338 | unsigned long flags; | 3345 | unsigned long flags; |
3339 | 3346 | ||
3340 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3347 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3341 | again: | 3348 | again: |
3342 | event = rb_iter_peek(iter, ts); | 3349 | event = rb_iter_peek(iter, ts); |
3343 | if (!event) | 3350 | if (!event) |
@@ -3348,7 +3355,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | |||
3348 | 3355 | ||
3349 | rb_advance_iter(iter); | 3356 | rb_advance_iter(iter); |
3350 | out: | 3357 | out: |
3351 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3358 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3352 | 3359 | ||
3353 | return event; | 3360 | return event; |
3354 | } | 3361 | } |
@@ -3414,7 +3421,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3414 | 3421 | ||
3415 | atomic_inc(&cpu_buffer->record_disabled); | 3422 | atomic_inc(&cpu_buffer->record_disabled); |
3416 | 3423 | ||
3417 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3424 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3418 | 3425 | ||
3419 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3426 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
3420 | goto out; | 3427 | goto out; |
@@ -3426,7 +3433,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3426 | arch_spin_unlock(&cpu_buffer->lock); | 3433 | arch_spin_unlock(&cpu_buffer->lock); |
3427 | 3434 | ||
3428 | out: | 3435 | out: |
3429 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3436 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3430 | 3437 | ||
3431 | atomic_dec(&cpu_buffer->record_disabled); | 3438 | atomic_dec(&cpu_buffer->record_disabled); |
3432 | } | 3439 | } |
@@ -3464,10 +3471,10 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
3464 | cpu_buffer = buffer->buffers[cpu]; | 3471 | cpu_buffer = buffer->buffers[cpu]; |
3465 | local_irq_save(flags); | 3472 | local_irq_save(flags); |
3466 | if (dolock) | 3473 | if (dolock) |
3467 | spin_lock(&cpu_buffer->reader_lock); | 3474 | raw_spin_lock(&cpu_buffer->reader_lock); |
3468 | ret = rb_per_cpu_empty(cpu_buffer); | 3475 | ret = rb_per_cpu_empty(cpu_buffer); |
3469 | if (dolock) | 3476 | if (dolock) |
3470 | spin_unlock(&cpu_buffer->reader_lock); | 3477 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3471 | local_irq_restore(flags); | 3478 | local_irq_restore(flags); |
3472 | 3479 | ||
3473 | if (!ret) | 3480 | if (!ret) |
@@ -3498,10 +3505,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
3498 | cpu_buffer = buffer->buffers[cpu]; | 3505 | cpu_buffer = buffer->buffers[cpu]; |
3499 | local_irq_save(flags); | 3506 | local_irq_save(flags); |
3500 | if (dolock) | 3507 | if (dolock) |
3501 | spin_lock(&cpu_buffer->reader_lock); | 3508 | raw_spin_lock(&cpu_buffer->reader_lock); |
3502 | ret = rb_per_cpu_empty(cpu_buffer); | 3509 | ret = rb_per_cpu_empty(cpu_buffer); |
3503 | if (dolock) | 3510 | if (dolock) |
3504 | spin_unlock(&cpu_buffer->reader_lock); | 3511 | raw_spin_unlock(&cpu_buffer->reader_lock); |
3505 | local_irq_restore(flags); | 3512 | local_irq_restore(flags); |
3506 | 3513 | ||
3507 | return ret; | 3514 | return ret; |
@@ -3696,7 +3703,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3696 | if (!bpage) | 3703 | if (!bpage) |
3697 | goto out; | 3704 | goto out; |
3698 | 3705 | ||
3699 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3706 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3700 | 3707 | ||
3701 | reader = rb_get_reader_page(cpu_buffer); | 3708 | reader = rb_get_reader_page(cpu_buffer); |
3702 | if (!reader) | 3709 | if (!reader) |
@@ -3771,7 +3778,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3771 | ret = read; | 3778 | ret = read; |
3772 | 3779 | ||
3773 | out_unlock: | 3780 | out_unlock: |
3774 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3781 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3775 | 3782 | ||
3776 | out: | 3783 | out: |
3777 | return ret; | 3784 | return ret; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45cfb6d713de..9b66ee14b39f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -258,7 +258,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
258 | TRACE_ITER_GRAPH_TIME; | 258 | TRACE_ITER_GRAPH_TIME; |
259 | 259 | ||
260 | static int trace_stop_count; | 260 | static int trace_stop_count; |
261 | static DEFINE_SPINLOCK(tracing_start_lock); | 261 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * trace_wake_up - wake up tasks waiting for trace input | 264 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -272,6 +272,11 @@ void trace_wake_up(void) | |||
272 | 272 | ||
273 | if (trace_flags & TRACE_ITER_BLOCK) | 273 | if (trace_flags & TRACE_ITER_BLOCK) |
274 | return; | 274 | return; |
275 | |||
276 | #ifdef CONFIG_PREEMPT_RT | ||
277 | if (in_atomic() || irqs_disabled()) | ||
278 | return; | ||
279 | #endif | ||
275 | /* | 280 | /* |
276 | * The runqueue_is_locked() can fail, but this is the best we | 281 | * The runqueue_is_locked() can fail, but this is the best we |
277 | * have for now: | 282 | * have for now: |
@@ -847,7 +852,7 @@ void tracing_start(void) | |||
847 | if (tracing_disabled) | 852 | if (tracing_disabled) |
848 | return; | 853 | return; |
849 | 854 | ||
850 | spin_lock_irqsave(&tracing_start_lock, flags); | 855 | raw_spin_lock_irqsave(&tracing_start_lock, flags); |
851 | if (--trace_stop_count) { | 856 | if (--trace_stop_count) { |
852 | if (trace_stop_count < 0) { | 857 | if (trace_stop_count < 0) { |
853 | /* Someone screwed up their debugging */ | 858 | /* Someone screwed up their debugging */ |
@@ -872,7 +877,7 @@ void tracing_start(void) | |||
872 | 877 | ||
873 | ftrace_start(); | 878 | ftrace_start(); |
874 | out: | 879 | out: |
875 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 880 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
876 | } | 881 | } |
877 | 882 | ||
878 | /** | 883 | /** |
@@ -887,7 +892,7 @@ void tracing_stop(void) | |||
887 | unsigned long flags; | 892 | unsigned long flags; |
888 | 893 | ||
889 | ftrace_stop(); | 894 | ftrace_stop(); |
890 | spin_lock_irqsave(&tracing_start_lock, flags); | 895 | raw_spin_lock_irqsave(&tracing_start_lock, flags); |
891 | if (trace_stop_count++) | 896 | if (trace_stop_count++) |
892 | goto out; | 897 | goto out; |
893 | 898 | ||
@@ -905,7 +910,7 @@ void tracing_stop(void) | |||
905 | arch_spin_unlock(&ftrace_max_lock); | 910 | arch_spin_unlock(&ftrace_max_lock); |
906 | 911 | ||
907 | out: | 912 | out: |
908 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 913 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
909 | } | 914 | } |
910 | 915 | ||
911 | void trace_stop_cmdline_recording(void); | 916 | void trace_stop_cmdline_recording(void); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b1342c5d37cf..c2eef6709ad6 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -608,7 +608,7 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
608 | int ret, len; | 608 | int ret, len; |
609 | int i; | 609 | int i; |
610 | 610 | ||
611 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 611 | snprintf(msecs_str, sizeof(msecs_str), "%lu", (unsigned long) duration); |
612 | 612 | ||
613 | /* Print msecs */ | 613 | /* Print msecs */ |
614 | ret = trace_seq_printf(s, "%s", msecs_str); | 614 | ret = trace_seq_printf(s, "%s", msecs_str); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2974bc7538c7..4896b88abbfd 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -17,13 +17,14 @@ | |||
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | 19 | #include "trace.h" |
20 | #include <trace/events/hist.h> | ||
20 | 21 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | 22 | static struct trace_array *irqsoff_trace __read_mostly; |
22 | static int tracer_enabled __read_mostly; | 23 | static int tracer_enabled __read_mostly; |
23 | 24 | ||
24 | static DEFINE_PER_CPU(int, tracing_cpu); | 25 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | 26 | ||
26 | static DEFINE_SPINLOCK(max_trace_lock); | 27 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
27 | 28 | ||
28 | enum { | 29 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | 30 | TRACER_IRQS_OFF = (1 << 1), |
@@ -144,7 +145,7 @@ check_critical_timing(struct trace_array *tr, | |||
144 | if (!report_latency(delta)) | 145 | if (!report_latency(delta)) |
145 | goto out; | 146 | goto out; |
146 | 147 | ||
147 | spin_lock_irqsave(&max_trace_lock, flags); | 148 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
148 | 149 | ||
149 | /* check if we are still the max latency */ | 150 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | 151 | if (!report_latency(delta)) |
@@ -167,7 +168,7 @@ check_critical_timing(struct trace_array *tr, | |||
167 | max_sequence++; | 168 | max_sequence++; |
168 | 169 | ||
169 | out_unlock: | 170 | out_unlock: |
170 | spin_unlock_irqrestore(&max_trace_lock, flags); | 171 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
171 | 172 | ||
172 | out: | 173 | out: |
173 | data->critical_sequence = max_sequence; | 174 | data->critical_sequence = max_sequence; |
@@ -249,11 +250,13 @@ void start_critical_timings(void) | |||
249 | { | 250 | { |
250 | if (preempt_trace() || irq_trace()) | 251 | if (preempt_trace() || irq_trace()) |
251 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 252 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
253 | trace_preemptirqsoff_hist(TRACE_START, 1); | ||
252 | } | 254 | } |
253 | EXPORT_SYMBOL_GPL(start_critical_timings); | 255 | EXPORT_SYMBOL_GPL(start_critical_timings); |
254 | 256 | ||
255 | void stop_critical_timings(void) | 257 | void stop_critical_timings(void) |
256 | { | 258 | { |
259 | trace_preemptirqsoff_hist(TRACE_STOP, 0); | ||
257 | if (preempt_trace() || irq_trace()) | 260 | if (preempt_trace() || irq_trace()) |
258 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 261 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
259 | } | 262 | } |
@@ -263,6 +266,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings); | |||
263 | #ifdef CONFIG_PROVE_LOCKING | 266 | #ifdef CONFIG_PROVE_LOCKING |
264 | void time_hardirqs_on(unsigned long a0, unsigned long a1) | 267 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
265 | { | 268 | { |
269 | trace_preemptirqsoff_hist(IRQS_ON, 0); | ||
266 | if (!preempt_trace() && irq_trace()) | 270 | if (!preempt_trace() && irq_trace()) |
267 | stop_critical_timing(a0, a1); | 271 | stop_critical_timing(a0, a1); |
268 | } | 272 | } |
@@ -271,6 +275,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) | |||
271 | { | 275 | { |
272 | if (!preempt_trace() && irq_trace()) | 276 | if (!preempt_trace() && irq_trace()) |
273 | start_critical_timing(a0, a1); | 277 | start_critical_timing(a0, a1); |
278 | trace_preemptirqsoff_hist(IRQS_OFF, 1); | ||
274 | } | 279 | } |
275 | 280 | ||
276 | #else /* !CONFIG_PROVE_LOCKING */ | 281 | #else /* !CONFIG_PROVE_LOCKING */ |
@@ -304,6 +309,7 @@ inline void print_irqtrace_events(struct task_struct *curr) | |||
304 | */ | 309 | */ |
305 | void trace_hardirqs_on(void) | 310 | void trace_hardirqs_on(void) |
306 | { | 311 | { |
312 | trace_preemptirqsoff_hist(IRQS_ON, 0); | ||
307 | if (!preempt_trace() && irq_trace()) | 313 | if (!preempt_trace() && irq_trace()) |
308 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 314 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
309 | } | 315 | } |
@@ -313,11 +319,13 @@ void trace_hardirqs_off(void) | |||
313 | { | 319 | { |
314 | if (!preempt_trace() && irq_trace()) | 320 | if (!preempt_trace() && irq_trace()) |
315 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 321 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
322 | trace_preemptirqsoff_hist(IRQS_OFF, 1); | ||
316 | } | 323 | } |
317 | EXPORT_SYMBOL(trace_hardirqs_off); | 324 | EXPORT_SYMBOL(trace_hardirqs_off); |
318 | 325 | ||
319 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 326 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
320 | { | 327 | { |
328 | trace_preemptirqsoff_hist(IRQS_ON, 0); | ||
321 | if (!preempt_trace() && irq_trace()) | 329 | if (!preempt_trace() && irq_trace()) |
322 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 330 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
323 | } | 331 | } |
@@ -327,6 +335,7 @@ void trace_hardirqs_off_caller(unsigned long caller_addr) | |||
327 | { | 335 | { |
328 | if (!preempt_trace() && irq_trace()) | 336 | if (!preempt_trace() && irq_trace()) |
329 | start_critical_timing(CALLER_ADDR0, caller_addr); | 337 | start_critical_timing(CALLER_ADDR0, caller_addr); |
338 | trace_preemptirqsoff_hist(IRQS_OFF, 1); | ||
330 | } | 339 | } |
331 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | 340 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
332 | 341 | ||
@@ -336,12 +345,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); | |||
336 | #ifdef CONFIG_PREEMPT_TRACER | 345 | #ifdef CONFIG_PREEMPT_TRACER |
337 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 346 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
338 | { | 347 | { |
348 | trace_preemptirqsoff_hist(PREEMPT_ON, 0); | ||
339 | if (preempt_trace()) | 349 | if (preempt_trace()) |
340 | stop_critical_timing(a0, a1); | 350 | stop_critical_timing(a0, a1); |
341 | } | 351 | } |
342 | 352 | ||
343 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 353 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
344 | { | 354 | { |
355 | trace_preemptirqsoff_hist(PREEMPT_OFF, 1); | ||
345 | if (preempt_trace()) | 356 | if (preempt_trace()) |
346 | start_critical_timing(a0, a1); | 357 | start_critical_timing(a0, a1); |
347 | } | 358 | } |