diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/ftrace.c | 30 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 16 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 49 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_clock.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_event_profile.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 27 |
7 files changed, 95 insertions, 37 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 83783579378f..d9062f5cc0c0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/hash.h> | 29 | #include <linux/hash.h> |
| 30 | #include <linux/rcupdate.h> | ||
| 30 | 31 | ||
| 31 | #include <trace/events/sched.h> | 32 | #include <trace/events/sched.h> |
| 32 | 33 | ||
| @@ -84,22 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | |||
| 84 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | 85 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
| 85 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 86 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
| 86 | 87 | ||
| 87 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 88 | /* |
| 88 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | 89 | * Traverse the ftrace_list, invoking all entries. The reason that we |
| 89 | #endif | 90 | * can use rcu_dereference_raw() is that elements removed from this list |
| 90 | 91 | * are simply leaked, so there is no need to interact with a grace-period | |
| 92 | * mechanism. The rcu_dereference_raw() calls are needed to handle | ||
| 93 | * concurrent insertions into the ftrace_list. | ||
| 94 | * | ||
| 95 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
| 96 | */ | ||
| 91 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 97 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
| 92 | { | 98 | { |
| 93 | struct ftrace_ops *op = ftrace_list; | 99 | struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ |
| 94 | |||
| 95 | /* in case someone actually ports this to alpha! */ | ||
| 96 | read_barrier_depends(); | ||
| 97 | 100 | ||
| 98 | while (op != &ftrace_list_end) { | 101 | while (op != &ftrace_list_end) { |
| 99 | /* silly alpha */ | ||
| 100 | read_barrier_depends(); | ||
| 101 | op->func(ip, parent_ip); | 102 | op->func(ip, parent_ip); |
| 102 | op = op->next; | 103 | op = rcu_dereference_raw(op->next); /*see above*/ |
| 103 | }; | 104 | }; |
| 104 | } | 105 | } |
| 105 | 106 | ||
| @@ -154,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 154 | * the ops->next pointer is valid before another CPU sees | 155 | * the ops->next pointer is valid before another CPU sees |
| 155 | * the ops pointer included into the ftrace_list. | 156 | * the ops pointer included into the ftrace_list. |
| 156 | */ | 157 | */ |
| 157 | smp_wmb(); | 158 | rcu_assign_pointer(ftrace_list, ops); |
| 158 | ftrace_list = ops; | ||
| 159 | 159 | ||
| 160 | if (ftrace_enabled) { | 160 | if (ftrace_enabled) { |
| 161 | ftrace_func_t func; | 161 | ftrace_func_t func; |
| @@ -2276,6 +2276,8 @@ __setup("ftrace_filter=", set_ftrace_filter); | |||
| 2276 | 2276 | ||
| 2277 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2277 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 2278 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 2278 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
| 2279 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | ||
| 2280 | |||
| 2279 | static int __init set_graph_function(char *str) | 2281 | static int __init set_graph_function(char *str) |
| 2280 | { | 2282 | { |
| 2281 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 2283 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
| @@ -3351,6 +3353,7 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
| 3351 | { | 3353 | { |
| 3352 | /* Make sure we do not use the parent ret_stack */ | 3354 | /* Make sure we do not use the parent ret_stack */ |
| 3353 | t->ret_stack = NULL; | 3355 | t->ret_stack = NULL; |
| 3356 | t->curr_ret_stack = -1; | ||
| 3354 | 3357 | ||
| 3355 | if (ftrace_graph_active) { | 3358 | if (ftrace_graph_active) { |
| 3356 | struct ftrace_ret_stack *ret_stack; | 3359 | struct ftrace_ret_stack *ret_stack; |
| @@ -3360,7 +3363,6 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
| 3360 | GFP_KERNEL); | 3363 | GFP_KERNEL); |
| 3361 | if (!ret_stack) | 3364 | if (!ret_stack) |
| 3362 | return; | 3365 | return; |
| 3363 | t->curr_ret_stack = -1; | ||
| 3364 | atomic_set(&t->tracing_graph_pause, 0); | 3366 | atomic_set(&t->tracing_graph_pause, 0); |
| 3365 | atomic_set(&t->trace_overrun, 0); | 3367 | atomic_set(&t->trace_overrun, 0); |
| 3366 | t->ftrace_timestamp = 0; | 3368 | t->ftrace_timestamp = 0; |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0287f9f52f5a..05a9f83b8819 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2233,12 +2233,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
| 2233 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2233 | if (ring_buffer_flags != RB_BUFFERS_ON) |
| 2234 | return NULL; | 2234 | return NULL; |
| 2235 | 2235 | ||
| 2236 | if (atomic_read(&buffer->record_disabled)) | ||
| 2237 | return NULL; | ||
| 2238 | |||
| 2239 | /* If we are tracing schedule, we don't want to recurse */ | 2236 | /* If we are tracing schedule, we don't want to recurse */ |
| 2240 | resched = ftrace_preempt_disable(); | 2237 | resched = ftrace_preempt_disable(); |
| 2241 | 2238 | ||
| 2239 | if (atomic_read(&buffer->record_disabled)) | ||
| 2240 | goto out_nocheck; | ||
| 2241 | |||
| 2242 | if (trace_recursive_lock()) | 2242 | if (trace_recursive_lock()) |
| 2243 | goto out_nocheck; | 2243 | goto out_nocheck; |
| 2244 | 2244 | ||
| @@ -2470,11 +2470,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
| 2470 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2470 | if (ring_buffer_flags != RB_BUFFERS_ON) |
| 2471 | return -EBUSY; | 2471 | return -EBUSY; |
| 2472 | 2472 | ||
| 2473 | if (atomic_read(&buffer->record_disabled)) | ||
| 2474 | return -EBUSY; | ||
| 2475 | |||
| 2476 | resched = ftrace_preempt_disable(); | 2473 | resched = ftrace_preempt_disable(); |
| 2477 | 2474 | ||
| 2475 | if (atomic_read(&buffer->record_disabled)) | ||
| 2476 | goto out; | ||
| 2477 | |||
| 2478 | cpu = raw_smp_processor_id(); | 2478 | cpu = raw_smp_processor_id(); |
| 2479 | 2479 | ||
| 2480 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2480 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| @@ -2542,7 +2542,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | |||
| 2542 | * @buffer: The ring buffer to enable writes | 2542 | * @buffer: The ring buffer to enable writes |
| 2543 | * | 2543 | * |
| 2544 | * Note, multiple disables will need the same number of enables | 2544 | * Note, multiple disables will need the same number of enables |
| 2545 | * to truely enable the writing (much like preempt_disable). | 2545 | * to truly enable the writing (much like preempt_disable). |
| 2546 | */ | 2546 | */ |
| 2547 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 2547 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
| 2548 | { | 2548 | { |
| @@ -2578,7 +2578,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | |||
| 2578 | * @cpu: The CPU to enable. | 2578 | * @cpu: The CPU to enable. |
| 2579 | * | 2579 | * |
| 2580 | * Note, multiple disables will need the same number of enables | 2580 | * Note, multiple disables will need the same number of enables |
| 2581 | * to truely enable the writing (much like preempt_disable). | 2581 | * to truly enable the writing (much like preempt_disable). |
| 2582 | */ | 2582 | */ |
| 2583 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 2583 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
| 2584 | { | 2584 | { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed01fdba4a55..3ec2ee6f6560 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -374,6 +374,21 @@ static int __init set_buf_size(char *str) | |||
| 374 | } | 374 | } |
| 375 | __setup("trace_buf_size=", set_buf_size); | 375 | __setup("trace_buf_size=", set_buf_size); |
| 376 | 376 | ||
| 377 | static int __init set_tracing_thresh(char *str) | ||
| 378 | { | ||
| 379 | unsigned long threshhold; | ||
| 380 | int ret; | ||
| 381 | |||
| 382 | if (!str) | ||
| 383 | return 0; | ||
| 384 | ret = strict_strtoul(str, 0, &threshhold); | ||
| 385 | if (ret < 0) | ||
| 386 | return 0; | ||
| 387 | tracing_thresh = threshhold * 1000; | ||
| 388 | return 1; | ||
| 389 | } | ||
| 390 | __setup("tracing_thresh=", set_tracing_thresh); | ||
| 391 | |||
| 377 | unsigned long nsecs_to_usecs(unsigned long nsecs) | 392 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
| 378 | { | 393 | { |
| 379 | return nsecs / 1000; | 394 | return nsecs / 1000; |
| @@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 579 | static arch_spinlock_t ftrace_max_lock = | 594 | static arch_spinlock_t ftrace_max_lock = |
| 580 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 595 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 581 | 596 | ||
| 597 | unsigned long __read_mostly tracing_thresh; | ||
| 598 | |||
| 582 | #ifdef CONFIG_TRACER_MAX_TRACE | 599 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 583 | unsigned long __read_mostly tracing_max_latency; | 600 | unsigned long __read_mostly tracing_max_latency; |
| 584 | unsigned long __read_mostly tracing_thresh; | ||
| 585 | 601 | ||
| 586 | /* | 602 | /* |
| 587 | * Copy the new maximum trace into the separate maximum-trace | 603 | * Copy the new maximum trace into the separate maximum-trace |
| @@ -592,7 +608,7 @@ static void | |||
| 592 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 608 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| 593 | { | 609 | { |
| 594 | struct trace_array_cpu *data = tr->data[cpu]; | 610 | struct trace_array_cpu *data = tr->data[cpu]; |
| 595 | struct trace_array_cpu *max_data = tr->data[cpu]; | 611 | struct trace_array_cpu *max_data; |
| 596 | 612 | ||
| 597 | max_tr.cpu = cpu; | 613 | max_tr.cpu = cpu; |
| 598 | max_tr.time_start = data->preempt_timestamp; | 614 | max_tr.time_start = data->preempt_timestamp; |
| @@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 602 | max_data->critical_start = data->critical_start; | 618 | max_data->critical_start = data->critical_start; |
| 603 | max_data->critical_end = data->critical_end; | 619 | max_data->critical_end = data->critical_end; |
| 604 | 620 | ||
| 605 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 621 | memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
| 606 | max_data->pid = tsk->pid; | 622 | max_data->pid = tsk->pid; |
| 607 | max_data->uid = task_uid(tsk); | 623 | max_data->uid = task_uid(tsk); |
| 608 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 624 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
| @@ -824,10 +840,10 @@ out: | |||
| 824 | mutex_unlock(&trace_types_lock); | 840 | mutex_unlock(&trace_types_lock); |
| 825 | } | 841 | } |
| 826 | 842 | ||
| 827 | static void __tracing_reset(struct trace_array *tr, int cpu) | 843 | static void __tracing_reset(struct ring_buffer *buffer, int cpu) |
| 828 | { | 844 | { |
| 829 | ftrace_disable_cpu(); | 845 | ftrace_disable_cpu(); |
| 830 | ring_buffer_reset_cpu(tr->buffer, cpu); | 846 | ring_buffer_reset_cpu(buffer, cpu); |
| 831 | ftrace_enable_cpu(); | 847 | ftrace_enable_cpu(); |
| 832 | } | 848 | } |
| 833 | 849 | ||
| @@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
| 839 | 855 | ||
| 840 | /* Make sure all commits have finished */ | 856 | /* Make sure all commits have finished */ |
| 841 | synchronize_sched(); | 857 | synchronize_sched(); |
| 842 | __tracing_reset(tr, cpu); | 858 | __tracing_reset(buffer, cpu); |
| 843 | 859 | ||
| 844 | ring_buffer_record_enable(buffer); | 860 | ring_buffer_record_enable(buffer); |
| 845 | } | 861 | } |
| @@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 857 | tr->time_start = ftrace_now(tr->cpu); | 873 | tr->time_start = ftrace_now(tr->cpu); |
| 858 | 874 | ||
| 859 | for_each_online_cpu(cpu) | 875 | for_each_online_cpu(cpu) |
| 860 | __tracing_reset(tr, cpu); | 876 | __tracing_reset(buffer, cpu); |
| 861 | 877 | ||
| 862 | ring_buffer_record_enable(buffer); | 878 | ring_buffer_record_enable(buffer); |
| 863 | } | 879 | } |
| @@ -934,6 +950,8 @@ void tracing_start(void) | |||
| 934 | goto out; | 950 | goto out; |
| 935 | } | 951 | } |
| 936 | 952 | ||
| 953 | /* Prevent the buffers from switching */ | ||
| 954 | arch_spin_lock(&ftrace_max_lock); | ||
| 937 | 955 | ||
| 938 | buffer = global_trace.buffer; | 956 | buffer = global_trace.buffer; |
| 939 | if (buffer) | 957 | if (buffer) |
| @@ -943,6 +961,8 @@ void tracing_start(void) | |||
| 943 | if (buffer) | 961 | if (buffer) |
| 944 | ring_buffer_record_enable(buffer); | 962 | ring_buffer_record_enable(buffer); |
| 945 | 963 | ||
| 964 | arch_spin_unlock(&ftrace_max_lock); | ||
| 965 | |||
| 946 | ftrace_start(); | 966 | ftrace_start(); |
| 947 | out: | 967 | out: |
| 948 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 968 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
| @@ -964,6 +984,9 @@ void tracing_stop(void) | |||
| 964 | if (trace_stop_count++) | 984 | if (trace_stop_count++) |
| 965 | goto out; | 985 | goto out; |
| 966 | 986 | ||
| 987 | /* Prevent the buffers from switching */ | ||
| 988 | arch_spin_lock(&ftrace_max_lock); | ||
| 989 | |||
| 967 | buffer = global_trace.buffer; | 990 | buffer = global_trace.buffer; |
| 968 | if (buffer) | 991 | if (buffer) |
| 969 | ring_buffer_record_disable(buffer); | 992 | ring_buffer_record_disable(buffer); |
| @@ -972,6 +995,8 @@ void tracing_stop(void) | |||
| 972 | if (buffer) | 995 | if (buffer) |
| 973 | ring_buffer_record_disable(buffer); | 996 | ring_buffer_record_disable(buffer); |
| 974 | 997 | ||
| 998 | arch_spin_unlock(&ftrace_max_lock); | ||
| 999 | |||
| 975 | out: | 1000 | out: |
| 976 | spin_unlock_irqrestore(&tracing_start_lock, flags); | 1001 | spin_unlock_irqrestore(&tracing_start_lock, flags); |
| 977 | } | 1002 | } |
| @@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1259 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1284 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
| 1260 | return; | 1285 | return; |
| 1261 | 1286 | ||
| 1287 | /* | ||
| 1288 | * NMIs can not handle page faults, even with fix ups. | ||
| 1289 | * The save user stack can (and often does) fault. | ||
| 1290 | */ | ||
| 1291 | if (unlikely(in_nmi())) | ||
| 1292 | return; | ||
| 1293 | |||
| 1262 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 1294 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1263 | sizeof(*entry), flags, pc); | 1295 | sizeof(*entry), flags, pc); |
| 1264 | if (!event) | 1296 | if (!event) |
| @@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1703 | 1735 | ||
| 1704 | ftrace_enable_cpu(); | 1736 | ftrace_enable_cpu(); |
| 1705 | 1737 | ||
| 1738 | iter->leftover = 0; | ||
| 1706 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | 1739 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
| 1707 | ; | 1740 | ; |
| 1708 | 1741 | ||
| @@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void) | |||
| 4248 | #ifdef CONFIG_TRACER_MAX_TRACE | 4281 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 4249 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4282 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 4250 | &tracing_max_latency, &tracing_max_lat_fops); | 4283 | &tracing_max_latency, &tracing_max_lat_fops); |
| 4284 | #endif | ||
| 4251 | 4285 | ||
| 4252 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4286 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 4253 | &tracing_thresh, &tracing_max_lat_fops); | 4287 | &tracing_thresh, &tracing_max_lat_fops); |
| 4254 | #endif | ||
| 4255 | 4288 | ||
| 4256 | trace_create_file("README", 0444, d_tracer, | 4289 | trace_create_file("README", 0444, d_tracer, |
| 4257 | NULL, &tracing_readme_fops); | 4290 | NULL, &tracing_readme_fops); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fd05bcaf91b0..2825ef2c0b15 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -396,9 +396,10 @@ extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | |||
| 396 | 396 | ||
| 397 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 397 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 398 | 398 | ||
| 399 | extern unsigned long tracing_thresh; | ||
| 400 | |||
| 399 | #ifdef CONFIG_TRACER_MAX_TRACE | 401 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 400 | extern unsigned long tracing_max_latency; | 402 | extern unsigned long tracing_max_latency; |
| 401 | extern unsigned long tracing_thresh; | ||
| 402 | 403 | ||
| 403 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 404 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 404 | void update_max_tr_single(struct trace_array *tr, | 405 | void update_max_tr_single(struct trace_array *tr, |
| @@ -550,7 +551,7 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
| 550 | * struct trace_parser - servers for reading the user input separated by spaces | 551 | * struct trace_parser - servers for reading the user input separated by spaces |
| 551 | * @cont: set if the input is not complete - no final space char was found | 552 | * @cont: set if the input is not complete - no final space char was found |
| 552 | * @buffer: holds the parsed user input | 553 | * @buffer: holds the parsed user input |
| 553 | * @idx: user input lenght | 554 | * @idx: user input length |
| 554 | * @size: buffer size | 555 | * @size: buffer size |
| 555 | */ | 556 | */ |
| 556 | struct trace_parser { | 557 | struct trace_parser { |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 84a3a7ba072a..6fbfb8f417b9 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | * Tracer plugins will chose a default from these clocks. | 13 | * Tracer plugins will chose a default from these clocks. |
| 14 | */ | 14 | */ |
| 15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/irqflags.h> | ||
| 16 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
| 17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 18 | #include <linux/percpu.h> | 19 | #include <linux/percpu.h> |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d693005075..c1cc3ab633de 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -138,9 +138,9 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, | |||
| 138 | cpu = smp_processor_id(); | 138 | cpu = smp_processor_id(); |
| 139 | 139 | ||
| 140 | if (in_nmi()) | 140 | if (in_nmi()) |
| 141 | trace_buf = rcu_dereference(perf_trace_buf_nmi); | 141 | trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); |
| 142 | else | 142 | else |
| 143 | trace_buf = rcu_dereference(perf_trace_buf); | 143 | trace_buf = rcu_dereference_sched(perf_trace_buf); |
| 144 | 144 | ||
| 145 | if (!trace_buf) | 145 | if (!trace_buf) |
| 146 | goto err; | 146 | goto err; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 3fc2a575664f..e6989d9b44da 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -237,6 +237,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 237 | return ret; | 237 | return ret; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | int trace_graph_thresh_entry(struct ftrace_graph_ent *trace) | ||
| 241 | { | ||
| 242 | if (tracing_thresh) | ||
| 243 | return 1; | ||
| 244 | else | ||
| 245 | return trace_graph_entry(trace); | ||
| 246 | } | ||
| 247 | |||
| 240 | static void __trace_graph_return(struct trace_array *tr, | 248 | static void __trace_graph_return(struct trace_array *tr, |
| 241 | struct ftrace_graph_ret *trace, | 249 | struct ftrace_graph_ret *trace, |
| 242 | unsigned long flags, | 250 | unsigned long flags, |
| @@ -290,13 +298,26 @@ void set_graph_array(struct trace_array *tr) | |||
| 290 | smp_mb(); | 298 | smp_mb(); |
| 291 | } | 299 | } |
| 292 | 300 | ||
| 301 | void trace_graph_thresh_return(struct ftrace_graph_ret *trace) | ||
| 302 | { | ||
| 303 | if (tracing_thresh && | ||
| 304 | (trace->rettime - trace->calltime < tracing_thresh)) | ||
| 305 | return; | ||
| 306 | else | ||
| 307 | trace_graph_return(trace); | ||
| 308 | } | ||
| 309 | |||
| 293 | static int graph_trace_init(struct trace_array *tr) | 310 | static int graph_trace_init(struct trace_array *tr) |
| 294 | { | 311 | { |
| 295 | int ret; | 312 | int ret; |
| 296 | 313 | ||
| 297 | set_graph_array(tr); | 314 | set_graph_array(tr); |
| 298 | ret = register_ftrace_graph(&trace_graph_return, | 315 | if (tracing_thresh) |
| 299 | &trace_graph_entry); | 316 | ret = register_ftrace_graph(&trace_graph_thresh_return, |
| 317 | &trace_graph_thresh_entry); | ||
| 318 | else | ||
| 319 | ret = register_ftrace_graph(&trace_graph_return, | ||
| 320 | &trace_graph_entry); | ||
| 300 | if (ret) | 321 | if (ret) |
| 301 | return ret; | 322 | return ret; |
| 302 | tracing_start_cmdline_record(); | 323 | tracing_start_cmdline_record(); |
| @@ -920,7 +941,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 920 | if (!ret) | 941 | if (!ret) |
| 921 | return TRACE_TYPE_PARTIAL_LINE; | 942 | return TRACE_TYPE_PARTIAL_LINE; |
| 922 | } else { | 943 | } else { |
| 923 | ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func); | 944 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
| 924 | if (!ret) | 945 | if (!ret) |
| 925 | return TRACE_TYPE_PARTIAL_LINE; | 946 | return TRACE_TYPE_PARTIAL_LINE; |
| 926 | } | 947 | } |
