diff options
author | David S. Miller <davem@davemloft.net> | 2009-02-24 06:50:29 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-02-24 06:50:29 -0500 |
commit | e70049b9e74267dd47e1ffa62302073487afcb48 (patch) | |
tree | 2cd000c0751ef31c9044b020d63f278cdf4f332d /kernel/trace | |
parent | d18921a0e319ab512f8186b1b1142c7b8634c779 (diff) | |
parent | f7e603ad8f78cd3b59e33fa72707da0cbabdf699 (diff) |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 25 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 19 |
4 files changed, 59 insertions, 5 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e2a4ff6fc3a6..34e707e5ab87 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -52,6 +52,7 @@ config FUNCTION_TRACER | |||
52 | depends on HAVE_FUNCTION_TRACER | 52 | depends on HAVE_FUNCTION_TRACER |
53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
54 | select FRAME_POINTER | 54 | select FRAME_POINTER |
55 | select KALLSYMS | ||
55 | select TRACING | 56 | select TRACING |
56 | select CONTEXT_SWITCH_TRACER | 57 | select CONTEXT_SWITCH_TRACER |
57 | help | 58 | help |
@@ -238,6 +239,7 @@ config STACK_TRACER | |||
238 | depends on DEBUG_KERNEL | 239 | depends on DEBUG_KERNEL |
239 | select FUNCTION_TRACER | 240 | select FUNCTION_TRACER |
240 | select STACKTRACE | 241 | select STACKTRACE |
242 | select KALLSYMS | ||
241 | help | 243 | help |
242 | This special tracer records the maximum stack footprint of the | 244 | This special tracer records the maximum stack footprint of the |
243 | kernel and displays it in debugfs/tracing/stack_trace. | 245 | kernel and displays it in debugfs/tracing/stack_trace. |
@@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST | |||
302 | functioning properly. It will do tests on all the configured | 304 | functioning properly. It will do tests on all the configured |
303 | tracers of ftrace. | 305 | tracers of ftrace. |
304 | 306 | ||
307 | config MMIOTRACE | ||
308 | bool "Memory mapped IO tracing" | ||
309 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | ||
310 | select TRACING | ||
311 | help | ||
312 | Mmiotrace traces Memory Mapped I/O access and is meant for | ||
313 | debugging and reverse engineering. It is called from the ioremap | ||
314 | implementation and works via page faults. Tracing is disabled by | ||
315 | default and can be enabled at run-time. | ||
316 | |||
317 | See Documentation/tracers/mmiotrace.txt. | ||
318 | If you are not helping to develop drivers, say N. | ||
319 | |||
320 | config MMIOTRACE_TEST | ||
321 | tristate "Test module for mmiotrace" | ||
322 | depends on MMIOTRACE && m | ||
323 | help | ||
324 | This is a dumb module for testing mmiotrace. It is very dangerous | ||
325 | as it will write garbage to IO memory starting at a given address. | ||
326 | However, it should be safe to use on e.g. unused portion of VRAM. | ||
327 | |||
328 | Say N, unless you absolutely know what you are doing. | ||
329 | |||
305 | endmenu | 330 | endmenu |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a236ffe2aa4..fdf913dfc7e8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2033,7 +2033,7 @@ free: | |||
2033 | static int start_graph_tracing(void) | 2033 | static int start_graph_tracing(void) |
2034 | { | 2034 | { |
2035 | struct ftrace_ret_stack **ret_stack_list; | 2035 | struct ftrace_ret_stack **ret_stack_list; |
2036 | int ret; | 2036 | int ret, cpu; |
2037 | 2037 | ||
2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
2039 | sizeof(struct ftrace_ret_stack *), | 2039 | sizeof(struct ftrace_ret_stack *), |
@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void) | |||
2042 | if (!ret_stack_list) | 2042 | if (!ret_stack_list) |
2043 | return -ENOMEM; | 2043 | return -ENOMEM; |
2044 | 2044 | ||
2045 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
2046 | for_each_online_cpu(cpu) | ||
2047 | ftrace_graph_init_task(idle_task(cpu)); | ||
2048 | |||
2045 | do { | 2049 | do { |
2046 | ret = alloc_retstack_tasklist(ret_stack_list); | 2050 | ret = alloc_retstack_tasklist(ret_stack_list); |
2047 | } while (ret == -EAGAIN); | 2051 | } while (ret == -EAGAIN); |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index fffcb069f1dc..80e503ef6136 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/mmiotrace.h> | 10 | #include <linux/mmiotrace.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/atomic.h> | ||
12 | 13 | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
@@ -19,6 +20,7 @@ struct header_iter { | |||
19 | static struct trace_array *mmio_trace_array; | 20 | static struct trace_array *mmio_trace_array; |
20 | static bool overrun_detected; | 21 | static bool overrun_detected; |
21 | static unsigned long prev_overruns; | 22 | static unsigned long prev_overruns; |
23 | static atomic_t dropped_count; | ||
22 | 24 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 25 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 26 | { |
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) | |||
121 | 123 | ||
122 | static unsigned long count_overruns(struct trace_iterator *iter) | 124 | static unsigned long count_overruns(struct trace_iterator *iter) |
123 | { | 125 | { |
124 | unsigned long cnt = 0; | 126 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
125 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); | 127 | unsigned long over = ring_buffer_overruns(iter->tr->buffer); |
126 | 128 | ||
127 | if (over > prev_overruns) | 129 | if (over > prev_overruns) |
128 | cnt = over - prev_overruns; | 130 | cnt += over - prev_overruns; |
129 | prev_overruns = over; | 131 | prev_overruns = over; |
130 | return cnt; | 132 | return cnt; |
131 | } | 133 | } |
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
310 | 312 | ||
311 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
312 | &irq_flags); | 314 | &irq_flags); |
313 | if (!event) | 315 | if (!event) { |
316 | atomic_inc(&dropped_count); | ||
314 | return; | 317 | return; |
318 | } | ||
315 | entry = ring_buffer_event_data(event); | 319 | entry = ring_buffer_event_data(event); |
316 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
317 | entry->ent.type = TRACE_MMIO_RW; | 321 | entry->ent.type = TRACE_MMIO_RW; |
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | 342 | ||
339 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
340 | &irq_flags); | 344 | &irq_flags); |
341 | if (!event) | 345 | if (!event) { |
346 | atomic_inc(&dropped_count); | ||
342 | return; | 347 | return; |
348 | } | ||
343 | entry = ring_buffer_event_data(event); | 349 | entry = ring_buffer_event_data(event); |
344 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | 350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); |
345 | entry->ent.type = TRACE_MMIO_MAP; | 351 | entry->ent.type = TRACE_MMIO_MAP; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..bc8e80a86bca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
23 | { | 23 | { |
24 | struct ring_buffer_event *event; | 24 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | 25 | struct trace_entry *entry; |
26 | unsigned int loops = 0; | ||
26 | 27 | ||
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 28 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | 29 | entry = ring_buffer_event_data(event); |
29 | 30 | ||
31 | /* | ||
32 | * The ring buffer is a size of trace_buf_size, if | ||
33 | * we loop more than the size, there's something wrong | ||
34 | * with the ring buffer. | ||
35 | */ | ||
36 | if (loops++ > trace_buf_size) { | ||
37 | printk(KERN_CONT ".. bad ring buffer "); | ||
38 | goto failed; | ||
39 | } | ||
30 | if (!trace_valid_entry(entry)) { | 40 | if (!trace_valid_entry(entry)) { |
31 | printk(KERN_CONT ".. invalid entry %d ", | 41 | printk(KERN_CONT ".. invalid entry %d ", |
32 | entry->type); | 42 | entry->type); |
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
57 | 67 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 68 | cnt = ring_buffer_entries(tr->buffer); |
59 | 69 | ||
70 | /* | ||
71 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
72 | * If the calling tracer is broken, and is constantly filling | ||
73 | * the buffer, this will run forever, and hard lock the box. | ||
74 | * We disable the ring buffer while we do this test to prevent | ||
75 | * a hard lock up. | ||
76 | */ | ||
77 | tracing_off(); | ||
60 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | 79 | ret = trace_test_buffer_cpu(tr, cpu); |
62 | if (ret) | 80 | if (ret) |
63 | break; | 81 | break; |
64 | } | 82 | } |
83 | tracing_on(); | ||
65 | __raw_spin_unlock(&ftrace_max_lock); | 84 | __raw_spin_unlock(&ftrace_max_lock); |
66 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
67 | 86 | ||