diff options
| -rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 19 |
3 files changed, 26 insertions, 1 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 58a93fbd68aa..34e707e5ab87 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -52,6 +52,7 @@ config FUNCTION_TRACER | |||
| 52 | depends on HAVE_FUNCTION_TRACER | 52 | depends on HAVE_FUNCTION_TRACER |
| 53 | depends on DEBUG_KERNEL | 53 | depends on DEBUG_KERNEL |
| 54 | select FRAME_POINTER | 54 | select FRAME_POINTER |
| 55 | select KALLSYMS | ||
| 55 | select TRACING | 56 | select TRACING |
| 56 | select CONTEXT_SWITCH_TRACER | 57 | select CONTEXT_SWITCH_TRACER |
| 57 | help | 58 | help |
| @@ -238,6 +239,7 @@ config STACK_TRACER | |||
| 238 | depends on DEBUG_KERNEL | 239 | depends on DEBUG_KERNEL |
| 239 | select FUNCTION_TRACER | 240 | select FUNCTION_TRACER |
| 240 | select STACKTRACE | 241 | select STACKTRACE |
| 242 | select KALLSYMS | ||
| 241 | help | 243 | help |
| 242 | This special tracer records the maximum stack footprint of the | 244 | This special tracer records the maximum stack footprint of the |
| 243 | kernel and displays it in debugfs/tracing/stack_trace. | 245 | kernel and displays it in debugfs/tracing/stack_trace. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9a236ffe2aa4..fdf913dfc7e8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -2033,7 +2033,7 @@ free: | |||
| 2033 | static int start_graph_tracing(void) | 2033 | static int start_graph_tracing(void) |
| 2034 | { | 2034 | { |
| 2035 | struct ftrace_ret_stack **ret_stack_list; | 2035 | struct ftrace_ret_stack **ret_stack_list; |
| 2036 | int ret; | 2036 | int ret, cpu; |
| 2037 | 2037 | ||
| 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | 2038 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * |
| 2039 | sizeof(struct ftrace_ret_stack *), | 2039 | sizeof(struct ftrace_ret_stack *), |
| @@ -2042,6 +2042,10 @@ static int start_graph_tracing(void) | |||
| 2042 | if (!ret_stack_list) | 2042 | if (!ret_stack_list) |
| 2043 | return -ENOMEM; | 2043 | return -ENOMEM; |
| 2044 | 2044 | ||
| 2045 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
| 2046 | for_each_online_cpu(cpu) | ||
| 2047 | ftrace_graph_init_task(idle_task(cpu)); | ||
| 2048 | |||
| 2045 | do { | 2049 | do { |
| 2046 | ret = alloc_retstack_tasklist(ret_stack_list); | 2050 | ret = alloc_retstack_tasklist(ret_stack_list); |
| 2047 | } while (ret == -EAGAIN); | 2051 | } while (ret == -EAGAIN); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 88c8eb70f54a..bc8e80a86bca 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) | |||
| 23 | { | 23 | { |
| 24 | struct ring_buffer_event *event; | 24 | struct ring_buffer_event *event; |
| 25 | struct trace_entry *entry; | 25 | struct trace_entry *entry; |
| 26 | unsigned int loops = 0; | ||
| 26 | 27 | ||
| 27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { | 28 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
| 28 | entry = ring_buffer_event_data(event); | 29 | entry = ring_buffer_event_data(event); |
| 29 | 30 | ||
| 31 | /* | ||
| 32 | * The ring buffer is a size of trace_buf_size, if | ||
| 33 | * we loop more than the size, there's something wrong | ||
| 34 | * with the ring buffer. | ||
| 35 | */ | ||
| 36 | if (loops++ > trace_buf_size) { | ||
| 37 | printk(KERN_CONT ".. bad ring buffer "); | ||
| 38 | goto failed; | ||
| 39 | } | ||
| 30 | if (!trace_valid_entry(entry)) { | 40 | if (!trace_valid_entry(entry)) { |
| 31 | printk(KERN_CONT ".. invalid entry %d ", | 41 | printk(KERN_CONT ".. invalid entry %d ", |
| 32 | entry->type); | 42 | entry->type); |
| @@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 57 | 67 | ||
| 58 | cnt = ring_buffer_entries(tr->buffer); | 68 | cnt = ring_buffer_entries(tr->buffer); |
| 59 | 69 | ||
| 70 | /* | ||
| 71 | * The trace_test_buffer_cpu runs a while loop to consume all data. | ||
| 72 | * If the calling tracer is broken, and is constantly filling | ||
| 73 | * the buffer, this will run forever, and hard lock the box. | ||
| 74 | * We disable the ring buffer while we do this test to prevent | ||
| 75 | * a hard lock up. | ||
| 76 | */ | ||
| 77 | tracing_off(); | ||
| 60 | for_each_possible_cpu(cpu) { | 78 | for_each_possible_cpu(cpu) { |
| 61 | ret = trace_test_buffer_cpu(tr, cpu); | 79 | ret = trace_test_buffer_cpu(tr, cpu); |
| 62 | if (ret) | 80 | if (ret) |
| 63 | break; | 81 | break; |
| 64 | } | 82 | } |
| 83 | tracing_on(); | ||
| 65 | __raw_spin_unlock(&ftrace_max_lock); | 84 | __raw_spin_unlock(&ftrace_max_lock); |
| 66 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
| 67 | 86 | ||
