diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-25 10:40:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 10:40:10 -0500 |
commit | 2e31add2a7e2a15d07f592c21ba35870fa9a1d1f (patch) | |
tree | 814e2c70fb5528f108114c0da6e5a1e219e6a0e1 /kernel/trace | |
parent | d639bab8da86d330493487e8c0fea8ca31f53427 (diff) | |
parent | 17581ad812a9abb0182260374ef2e52d4a808a64 (diff) |
Merge branch 'x86/urgent' into x86/pat
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 25 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 38 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 19 |
8 files changed, 105 insertions, 13 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..34e707e5ab87 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -52,6 +52,7 @@ config FUNCTION_TRACER | |||
52 | depends on HAVE_FUNCTION_TRACER | 52 | depends on HAVE_FUNCTION_TRACER |
53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
54 | select FRAME_POINTER | 54 | select FRAME_POINTER |
55 | select KALLSYMS | ||
55 | select TRACING | 56 | select TRACING |
56 | select CONTEXT_SWITCH_TRACER | 57 | select CONTEXT_SWITCH_TRACER |
57 | help | 58 | help |
@@ -238,6 +239,7 @@ config STACK_TRACER | |||
238 | depends on DEBUG_KERNEL | 239 | depends on DEBUG_KERNEL |
239 | select FUNCTION_TRACER | 240 | select FUNCTION_TRACER |
240 | select STACKTRACE | 241 | select STACKTRACE |
242 | select KALLSYMS | ||
241 | help | 243 | help |
242 | This special tracer records the maximum stack footprint of the | 244 | This special tracer records the maximum stack footprint of the |
243 | kernel and displays it in debugfs/tracing/stack_trace. | 245 | kernel and displays it in debugfs/tracing/stack_trace. |
@@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST | |||
302 | functioning properly. It will do tests on all the configured | 304 | functioning properly. It will do tests on all the configured |
303 | tracers of ftrace. | 305 | tracers of ftrace. |
304 | 306 | ||
307 | config MMIOTRACE | ||
308 | bool "Memory mapped IO tracing" | ||
309 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
310 | select TRACING | ||
311 | help | ||
312 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
313 | debugging and reverse engineering. It is called from the ioremap | ||
314 | implementation and works via page faults. Tracing is disabled by | ||
315 | default and can be enabled at run-time. | ||
316 | |||
317 | See Documentation/tracers/mmiotrace.txt. | ||
318 | If you are not helping to develop drivers, say N. | ||
319 | |||
320 | config MMIOTRACE_TEST | ||
321 | tristate "Test module for mmiotrace" | ||
322 | depends on MMIOTRACE && m | ||
323 | help | ||
324 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
325 | as it will write garbage to IO memory starting at a given address. | ||
326 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
327 | |||
328 | Say N, unless you absolutely know what you are doing. | ||
329 | |||
305 | endmenu | 330 | endmenu |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2f32969c09df..fdf913dfc7e8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/clocksource.h> | 17 | #include <linux/clocksource.h> |
18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/suspend.h> | ||
20 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
21 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
22 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
@@ -1736,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid) | |||
1736 | { | 1737 | { |
1737 | struct task_struct *p; | 1738 | struct task_struct *p; |
1738 | 1739 | ||
1740 | rcu_read_lock(); | ||
1739 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1741 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1740 | clear_tsk_trace_trace(p); | 1742 | clear_tsk_trace_trace(p); |
1741 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1743 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1744 | rcu_read_unlock(); | ||
1745 | |||
1742 | put_pid(pid); | 1746 | put_pid(pid); |
1743 | } | 1747 | } |
1744 | 1748 | ||
@@ -1746,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid) | |||
1746 | { | 1750 | { |
1747 | struct task_struct *p; | 1751 | struct task_struct *p; |
1748 | 1752 | ||
1753 | rcu_read_lock(); | ||
1749 | do_each_pid_task(pid, PIDTYPE_PID, p) { | 1754 | do_each_pid_task(pid, PIDTYPE_PID, p) { |
1750 | set_tsk_trace_trace(p); | 1755 | set_tsk_trace_trace(p); |
1751 | } while_each_pid_task(pid, PIDTYPE_PID, p); | 1756 | } while_each_pid_task(pid, PIDTYPE_PID, p); |
1757 | rcu_read_unlock(); | ||
1752 | } | 1758 | } |
1753 | 1759 | ||
1754 | static void clear_ftrace_pid_task(struct pid **pid) | 1760 | static void clear_ftrace_pid_task(struct pid **pid) |
@@ -1965,6 +1971,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1965 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1971 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1966 | 1972 | ||
1967 | static atomic_t ftrace_graph_active; | 1973 | static atomic_t ftrace_graph_active; |
1974 | static struct notifier_block ftrace_suspend_notifier; | ||
1968 | 1975 | ||
1969 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 1976 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
1970 | { | 1977 | { |
@@ -2026,7 +2033,7 @@ free: | |||
2026 | static int start_graph_tracing(void) | 2033 | static int start_graph_tracing(void) |
2027 | { | 2034 | { |
2028 | struct ftrace_ret_stack **ret_stack_list; | 2035 | struct ftrace_ret_stack **ret_stack_list; |
2029 | int ret; | 2036 | int ret, cpu; |
2030 | 2037 | ||
2031 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
2032 | sizeof(struct ftrace_ret_stack *), | 2039 | sizeof(struct ftrace_ret_stack *), |
@@ -2035,6 +2042,10 @@ static int start_graph_tracing(void) | |||
2035 | if (!ret_stack_list) | 2042 | if (!ret_stack_list) |
2036 | return -ENOMEM; | 2043 | return -ENOMEM; |
2037 | 2044 | ||
2045 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
2046 | for_each_online_cpu(cpu) | ||
2047 | ftrace_graph_init_task(idle_task(cpu)); | ||
2048 | |||
2038 | do { | 2049 | do { |
2039 | ret = alloc_retstack_tasklist(ret_stack_list); | 2050 | ret = alloc_retstack_tasklist(ret_stack_list); |
2040 | } while (ret == -EAGAIN); | 2051 | } while (ret == -EAGAIN); |
@@ -2043,6 +2054,27 @@ static int start_graph_tracing(void) | |||
2043 | return ret; | 2054 | return ret; |
2044 | } | 2055 | } |
2045 | 2056 | ||
2057 | /* | ||
2058 | * Hibernation protection. | ||
2059 | * The state of the current task is too much unstable during | ||
2060 | * suspend/restore to disk. We want to protect against that. | ||
2061 | */ | ||
2062 | static int | ||
2063 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | ||
2064 | void *unused) | ||
2065 | { | ||
2066 | switch (state) { | ||
2067 | case PM_HIBERNATION_PREPARE: | ||
2068 | pause_graph_tracing(); | ||
2069 | break; | ||
2070 | |||
2071 | case PM_POST_HIBERNATION: | ||
2072 | unpause_graph_tracing(); | ||
2073 | break; | ||
2074 | } | ||
2075 | return NOTIFY_DONE; | ||
2076 | } | ||
2077 | |||
2046 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 2078 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
2047 | trace_func_graph_ent_t entryfunc) | 2079 | trace_func_graph_ent_t entryfunc) |
2048 | { | 2080 | { |
@@ -2050,6 +2082,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2050 | 2082 | ||
2051 | mutex_lock(&ftrace_sysctl_lock); | 2083 | mutex_lock(&ftrace_sysctl_lock); |
2052 | 2084 | ||
2085 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | ||
2086 | register_pm_notifier(&ftrace_suspend_notifier); | ||
2087 | |||
2053 | atomic_inc(&ftrace_graph_active); | 2088 | atomic_inc(&ftrace_graph_active); |
2054 | ret = start_graph_tracing(); | 2089 | ret = start_graph_tracing(); |
2055 | if (ret) { | 2090 | if (ret) { |
@@ -2075,6 +2110,7 @@ void unregister_ftrace_graph(void) | |||
2075 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 2110 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
2076 | ftrace_graph_entry = ftrace_graph_entry_stub; | 2111 | ftrace_graph_entry = ftrace_graph_entry_stub; |
2077 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 2112 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
2113 | unregister_pm_notifier(&ftrace_suspend_notifier); | ||
2078 | 2114 | ||
2079 | mutex_unlock(&ftrace_sysctl_lock); | 2115 | mutex_unlock(&ftrace_sysctl_lock); |
2080 | } | 2116 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 8b0daf0662ef..bd38c5cfd8ad 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta) | |||
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) | 249 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * head_page == tail_page && head == tail then buffer is empty. | 252 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | if (next_page == head_page) { | 1027 | if (next_page == head_page) { |
1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) { | 1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) |
1029 | /* reset write */ | ||
1030 | if (tail <= BUF_PAGE_SIZE) | ||
1031 | local_set(&tail_page->write, tail); | ||
1032 | goto out_unlock; | 1029 | goto out_unlock; |
1033 | } | ||
1034 | 1030 | ||
1035 | /* tail_page has not moved yet? */ | 1031 | /* tail_page has not moved yet? */ |
1036 | if (tail_page == cpu_buffer->tail_page) { | 1032 | if (tail_page == cpu_buffer->tail_page) { |
@@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1105 | return event; | 1101 | return event; |
1106 | 1102 | ||
1107 | out_unlock: | 1103 | out_unlock: |
1104 | /* reset write */ | ||
1105 | if (tail <= BUF_PAGE_SIZE) | ||
1106 | local_set(&tail_page->write, tail); | ||
1107 | |||
1108 | __raw_spin_unlock(&cpu_buffer->lock); | 1108 | __raw_spin_unlock(&cpu_buffer->lock); |
1109 | local_irq_restore(flags); | 1109 | local_irq_restore(flags); |
1110 | return NULL; | 1110 | return NULL; |
@@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
2174 | 2174 | ||
2175 | cpu_buffer->overrun = 0; | 2175 | cpu_buffer->overrun = 0; |
2176 | cpu_buffer->entries = 0; | 2176 | cpu_buffer->entries = 0; |
2177 | |||
2178 | cpu_buffer->write_stamp = 0; | ||
2179 | cpu_buffer->read_stamp = 0; | ||
2177 | } | 2180 | } |
2178 | 2181 | ||
2179 | /** | 2182 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c580233add95..17bb88d86ac2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -40,7 +40,7 @@ | |||
40 | 40 | ||
41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
42 | 42 | ||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | /* | 46 | /* |
@@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = { | |||
3736 | * it if we decide to change what log level the ftrace dump | 3736 | * it if we decide to change what log level the ftrace dump |
3737 | * should be at. | 3737 | * should be at. |
3738 | */ | 3738 | */ |
3739 | #define KERN_TRACE KERN_INFO | 3739 | #define KERN_TRACE KERN_EMERG |
3740 | 3740 | ||
3741 | static void | 3741 | static void |
3742 | trace_printk_seq(struct trace_seq *s) | 3742 | trace_printk_seq(struct trace_seq *s) |
@@ -3770,6 +3770,7 @@ void ftrace_dump(void) | |||
3770 | dump_ran = 1; | 3770 | dump_ran = 1; |
3771 | 3771 | ||
3772 | /* No turning back! */ | 3772 | /* No turning back! */ |
3773 | tracing_off(); | ||
3773 | ftrace_kill(); | 3774 | ftrace_kill(); |
3774 | 3775 | ||
3775 | for_each_tracing_cpu(cpu) { | 3776 | for_each_tracing_cpu(cpu) { |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 7c2e326bbc8b..62a78d943534 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr) | |||
380 | 380 | ||
381 | static void __irqsoff_tracer_init(struct trace_array *tr) | 381 | static void __irqsoff_tracer_init(struct trace_array *tr) |
382 | { | 382 | { |
383 | tracing_max_latency = 0; | ||
383 | irqsoff_trace = tr; | 384 | irqsoff_trace = tr; |
384 | /* make sure that the tracer is visible */ | 385 | /* make sure that the tracer is visible */ |
385 | smp_wmb(); | 386 | smp_wmb(); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/atomic.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
@@ -19,6 +20,7 @@ struct header_iter { | |||
19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
23 | static atomic_t dropped_count; | ||
22 | 24 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 26 | { |
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
121 | 123 | ||
122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
123 | { | 125 | { |
124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
126 | 128 | ||
127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
129 | prev_overruns = over; | 131 | prev_overruns = over; |
130 | return cnt; | 132 | return cnt; |
131 | } | 133 | } |
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
310 | 312 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
312 | &irq_flags); | 314 | &irq_flags); |
313 | if (!event) | 315 | if (!event) { |
316 | atomic_inc(&dropped_count); | ||
314 | return; | 317 | return; |
318 | } | ||
315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | 342 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
340 | &irq_flags); | 344 | &irq_flags); |
341 | if (!event) | 345 | if (!event) { |
346 | atomic_inc(&dropped_count); | ||
342 | return; | 347 | return; |
348 | } | ||
343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 43586b689e31..42ae1e77b6b3 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
333 | 333 | ||
334 | static int wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
335 | { | 335 | { |
336 | tracing_max_latency = 0; | ||
336 | wakeup_trace = tr; | 337 | wakeup_trace = tr; |
337 | start_wakeup_tracer(tr); | 338 | start_wakeup_tracer(tr); |
338 | return 0; | 339 | return 0; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..bc8e80a86bca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
23 | { | 23 | { |
24 | struct ring_buffer_event *event; | 24 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | 25 | struct trace_entry *entry; |
26 | unsigned int loops = 0; | ||
26 | 27 | ||
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 28 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | 29 | entry = ring_buffer_event_data(event); |
29 | 30 | ||
31 | /* | ||
32 | * The ring buffer is a size of trace_buf_size, if | ||
33 | * we loop more than the size, there's something wrong | ||
34 | * with the ring buffer. | ||
35 | */ | ||
36 | if (loops++ > trace_buf_size) { | ||
37 | printk(KERN_CONT ".. bad ring buffer "); | ||
38 | goto failed; | ||
39 | } | ||
30 | if (!trace_valid_entry(entry)) { | 40 | if (!trace_valid_entry(entry)) { |
31 | printk(KERN_CONT ".. invalid entry %d ", | 41 | printk(KERN_CONT ".. invalid entry %d ", |
32 | entry->type); | 42 | entry->type); |
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
57 | 67 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 68 | cnt = ring_buffer_entries(tr->buffer); |
59 | 69 | ||
70 | /* | ||
71 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
72 | * If the calling tracer is broken, and is constantly filling | ||
73 | * the buffer, this will run forever, and hard lock the box. | ||
74 | * We disable the ring buffer while we do this test to prevent | ||
75 | * a hard lock up. | ||
76 | */ | ||
77 | tracing_off(); | ||
60 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | 79 | ret = trace_test_buffer_cpu(tr, cpu); |
62 | if (ret) | 80 | if (ret) |
63 | break; | 81 | break; |
64 | } | 82 | } |
83 | tracing_on(); | ||
65 | __raw_spin_unlock(&ftrace_max_lock); | 84 | __raw_spin_unlock(&ftrace_max_lock); |
66 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
67 | 86 | ||