diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-06-08 13:34:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 13:34:40 -0400 |
commit | 6113e45f831616de98c54a005260223b21bcb6b9 (patch) | |
tree | 0d56dbfae392f89b33c2ae6a36a5cc1ea1c1c988 /kernel/trace | |
parent | 84bb671dc46d77d665d2b5e74539e81b2129bb3e (diff) | |
parent | 5168ae50a66e3ff7184c2b16d661bd6d70367e50 (diff) |
Merge branch 'tip/perf/core-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 5 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 38 | ||||
-rw-r--r-- | kernel/trace/trace.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace.h | 48 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 6 |
9 files changed, 24 insertions, 99 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14f9449..0d88ce9b9fb8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1883 | struct hlist_head *hhd; | 1883 | struct hlist_head *hhd; |
1884 | struct hlist_node *n; | 1884 | struct hlist_node *n; |
1885 | unsigned long key; | 1885 | unsigned long key; |
1886 | int resched; | ||
1887 | 1886 | ||
1888 | key = hash_long(ip, FTRACE_HASH_BITS); | 1887 | key = hash_long(ip, FTRACE_HASH_BITS); |
1889 | 1888 | ||
@@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1897 | * period. This syncs the hash iteration and freeing of items | 1896 | * period. This syncs the hash iteration and freeing of items |
1898 | * on the hash. rcu_read_lock is too dangerous here. | 1897 | * on the hash. rcu_read_lock is too dangerous here. |
1899 | */ | 1898 | */ |
1900 | resched = ftrace_preempt_disable(); | 1899 | preempt_disable_notrace(); |
1901 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | 1900 | hlist_for_each_entry_rcu(entry, n, hhd, node) { |
1902 | if (entry->ip == ip) | 1901 | if (entry->ip == ip) |
1903 | entry->ops->func(ip, parent_ip, &entry->data); | 1902 | entry->ops->func(ip, parent_ip, &entry->data); |
1904 | } | 1903 | } |
1905 | ftrace_preempt_enable(resched); | 1904 | preempt_enable_notrace(); |
1906 | } | 1905 | } |
1907 | 1906 | ||
1908 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1907 | static struct ftrace_ops trace_probe_ops __read_mostly = |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1da7b6ea8b85..28d0615a513f 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void) | |||
2242 | 2242 | ||
2243 | #endif | 2243 | #endif |
2244 | 2244 | ||
2245 | static DEFINE_PER_CPU(int, rb_need_resched); | ||
2246 | |||
2247 | /** | 2245 | /** |
2248 | * ring_buffer_lock_reserve - reserve a part of the buffer | 2246 | * ring_buffer_lock_reserve - reserve a part of the buffer |
2249 | * @buffer: the ring buffer to reserve from | 2247 | * @buffer: the ring buffer to reserve from |
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2264 | { | 2262 | { |
2265 | struct ring_buffer_per_cpu *cpu_buffer; | 2263 | struct ring_buffer_per_cpu *cpu_buffer; |
2266 | struct ring_buffer_event *event; | 2264 | struct ring_buffer_event *event; |
2267 | int cpu, resched; | 2265 | int cpu; |
2268 | 2266 | ||
2269 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2267 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2270 | return NULL; | 2268 | return NULL; |
2271 | 2269 | ||
2272 | /* If we are tracing schedule, we don't want to recurse */ | 2270 | /* If we are tracing schedule, we don't want to recurse */ |
2273 | resched = ftrace_preempt_disable(); | 2271 | preempt_disable_notrace(); |
2274 | 2272 | ||
2275 | if (atomic_read(&buffer->record_disabled)) | 2273 | if (atomic_read(&buffer->record_disabled)) |
2276 | goto out_nocheck; | 2274 | goto out_nocheck; |
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2295 | if (!event) | 2293 | if (!event) |
2296 | goto out; | 2294 | goto out; |
2297 | 2295 | ||
2298 | /* | ||
2299 | * Need to store resched state on this cpu. | ||
2300 | * Only the first needs to. | ||
2301 | */ | ||
2302 | |||
2303 | if (preempt_count() == 1) | ||
2304 | per_cpu(rb_need_resched, cpu) = resched; | ||
2305 | |||
2306 | return event; | 2296 | return event; |
2307 | 2297 | ||
2308 | out: | 2298 | out: |
2309 | trace_recursive_unlock(); | 2299 | trace_recursive_unlock(); |
2310 | 2300 | ||
2311 | out_nocheck: | 2301 | out_nocheck: |
2312 | ftrace_preempt_enable(resched); | 2302 | preempt_enable_notrace(); |
2313 | return NULL; | 2303 | return NULL; |
2314 | } | 2304 | } |
2315 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 2305 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2355 | 2345 | ||
2356 | trace_recursive_unlock(); | 2346 | trace_recursive_unlock(); |
2357 | 2347 | ||
2358 | /* | 2348 | preempt_enable_notrace(); |
2359 | * Only the last preempt count needs to restore preemption. | ||
2360 | */ | ||
2361 | if (preempt_count() == 1) | ||
2362 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2363 | else | ||
2364 | preempt_enable_no_resched_notrace(); | ||
2365 | 2349 | ||
2366 | return 0; | 2350 | return 0; |
2367 | } | 2351 | } |
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2469 | 2453 | ||
2470 | trace_recursive_unlock(); | 2454 | trace_recursive_unlock(); |
2471 | 2455 | ||
2472 | /* | 2456 | preempt_enable_notrace(); |
2473 | * Only the last preempt count needs to restore preemption. | ||
2474 | */ | ||
2475 | if (preempt_count() == 1) | ||
2476 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2477 | else | ||
2478 | preempt_enable_no_resched_notrace(); | ||
2479 | 2457 | ||
2480 | } | 2458 | } |
2481 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 2459 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); |
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2501 | struct ring_buffer_event *event; | 2479 | struct ring_buffer_event *event; |
2502 | void *body; | 2480 | void *body; |
2503 | int ret = -EBUSY; | 2481 | int ret = -EBUSY; |
2504 | int cpu, resched; | 2482 | int cpu; |
2505 | 2483 | ||
2506 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2484 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2507 | return -EBUSY; | 2485 | return -EBUSY; |
2508 | 2486 | ||
2509 | resched = ftrace_preempt_disable(); | 2487 | preempt_disable_notrace(); |
2510 | 2488 | ||
2511 | if (atomic_read(&buffer->record_disabled)) | 2489 | if (atomic_read(&buffer->record_disabled)) |
2512 | goto out; | 2490 | goto out; |
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2536 | 2514 | ||
2537 | ret = 0; | 2515 | ret = 0; |
2538 | out: | 2516 | out: |
2539 | ftrace_preempt_enable(resched); | 2517 | preempt_enable_notrace(); |
2540 | 2518 | ||
2541 | return ret; | 2519 | return ret; |
2542 | } | 2520 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d36316805..644ab744e759 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1404,7 +1404,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1404 | struct bprint_entry *entry; | 1404 | struct bprint_entry *entry; |
1405 | unsigned long flags; | 1405 | unsigned long flags; |
1406 | int disable; | 1406 | int disable; |
1407 | int resched; | ||
1408 | int cpu, len = 0, size, pc; | 1407 | int cpu, len = 0, size, pc; |
1409 | 1408 | ||
1410 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1409 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
@@ -1414,7 +1413,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1414 | pause_graph_tracing(); | 1413 | pause_graph_tracing(); |
1415 | 1414 | ||
1416 | pc = preempt_count(); | 1415 | pc = preempt_count(); |
1417 | resched = ftrace_preempt_disable(); | 1416 | preempt_disable_notrace(); |
1418 | cpu = raw_smp_processor_id(); | 1417 | cpu = raw_smp_processor_id(); |
1419 | data = tr->data[cpu]; | 1418 | data = tr->data[cpu]; |
1420 | 1419 | ||
@@ -1452,7 +1451,7 @@ out_unlock: | |||
1452 | 1451 | ||
1453 | out: | 1452 | out: |
1454 | atomic_dec_return(&data->disabled); | 1453 | atomic_dec_return(&data->disabled); |
1455 | ftrace_preempt_enable(resched); | 1454 | preempt_enable_notrace(); |
1456 | unpause_graph_tracing(); | 1455 | unpause_graph_tracing(); |
1457 | 1456 | ||
1458 | return len; | 1457 | return len; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd96399463f..6c45e55097ce 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -628,54 +628,6 @@ enum trace_iterator_flags { | |||
628 | 628 | ||
629 | extern struct tracer nop_trace; | 629 | extern struct tracer nop_trace; |
630 | 630 | ||
631 | /** | ||
632 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
633 | * | ||
634 | * When tracing can happen inside the scheduler, there exists | ||
635 | * cases that the tracing might happen before the need_resched | ||
636 | * flag is checked. If this happens and the tracer calls | ||
637 | * preempt_enable (after a disable), a schedule might take place | ||
638 | * causing an infinite recursion. | ||
639 | * | ||
640 | * To prevent this, we read the need_resched flag before | ||
641 | * disabling preemption. When we want to enable preemption we | ||
642 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
643 | * Otherwise, we call preempt_enable. | ||
644 | * | ||
645 | * The rational for doing the above is that if need_resched is set | ||
646 | * and we have yet to reschedule, we are either in an atomic location | ||
647 | * (where we do not need to check for scheduling) or we are inside | ||
648 | * the scheduler and do not want to resched. | ||
649 | */ | ||
650 | static inline int ftrace_preempt_disable(void) | ||
651 | { | ||
652 | int resched; | ||
653 | |||
654 | resched = need_resched(); | ||
655 | preempt_disable_notrace(); | ||
656 | |||
657 | return resched; | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
662 | * @resched: the return value from ftrace_preempt_disable | ||
663 | * | ||
664 | * This is a scheduler safe way to enable preemption and not miss | ||
665 | * any preemption checks. The disabled saved the state of preemption. | ||
666 | * If resched is set, then we are either inside an atomic or | ||
667 | * are inside the scheduler (we would have already scheduled | ||
668 | * otherwise). In this case, we do not want to call normal | ||
669 | * preempt_enable, but preempt_enable_no_resched instead. | ||
670 | */ | ||
671 | static inline void ftrace_preempt_enable(int resched) | ||
672 | { | ||
673 | if (resched) | ||
674 | preempt_enable_no_resched_notrace(); | ||
675 | else | ||
676 | preempt_enable_notrace(); | ||
677 | } | ||
678 | |||
679 | #ifdef CONFIG_BRANCH_TRACER | 631 | #ifdef CONFIG_BRANCH_TRACER |
680 | extern int enable_branch_tracing(struct trace_array *tr); | 632 | extern int enable_branch_tracing(struct trace_array *tr); |
681 | extern void disable_branch_tracing(void); | 633 | extern void disable_branch_tracing(void); |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 9d589d8dcd1a..52fda6c04ac3 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -32,16 +32,15 @@ | |||
32 | u64 notrace trace_clock_local(void) | 32 | u64 notrace trace_clock_local(void) |
33 | { | 33 | { |
34 | u64 clock; | 34 | u64 clock; |
35 | int resched; | ||
36 | 35 | ||
37 | /* | 36 | /* |
38 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
39 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
40 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
41 | */ | 40 | */ |
42 | resched = ftrace_preempt_disable(); | 41 | preempt_disable_notrace(); |
43 | clock = sched_clock(); | 42 | clock = sched_clock(); |
44 | ftrace_preempt_enable(resched); | 43 | preempt_enable_notrace(); |
45 | 44 | ||
46 | return clock; | 45 | return clock; |
47 | } | 46 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53cffc0b0801..a594f9a7ee3d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1524,12 +1524,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1524 | struct ftrace_entry *entry; | 1524 | struct ftrace_entry *entry; |
1525 | unsigned long flags; | 1525 | unsigned long flags; |
1526 | long disabled; | 1526 | long disabled; |
1527 | int resched; | ||
1528 | int cpu; | 1527 | int cpu; |
1529 | int pc; | 1528 | int pc; |
1530 | 1529 | ||
1531 | pc = preempt_count(); | 1530 | pc = preempt_count(); |
1532 | resched = ftrace_preempt_disable(); | 1531 | preempt_disable_notrace(); |
1533 | cpu = raw_smp_processor_id(); | 1532 | cpu = raw_smp_processor_id(); |
1534 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); | 1533 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1535 | 1534 | ||
@@ -1551,7 +1550,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1551 | 1550 | ||
1552 | out: | 1551 | out: |
1553 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | 1552 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1554 | ftrace_preempt_enable(resched); | 1553 | preempt_enable_notrace(); |
1555 | } | 1554 | } |
1556 | 1555 | ||
1557 | static struct ftrace_ops trace_ops __initdata = | 1556 | static struct ftrace_ops trace_ops __initdata = |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b3f3776b0cd6..16aee4d44e8f 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
54 | struct trace_array_cpu *data; | 54 | struct trace_array_cpu *data; |
55 | unsigned long flags; | 55 | unsigned long flags; |
56 | long disabled; | 56 | long disabled; |
57 | int cpu, resched; | 57 | int cpu; |
58 | int pc; | 58 | int pc; |
59 | 59 | ||
60 | if (unlikely(!ftrace_function_enabled)) | 60 | if (unlikely(!ftrace_function_enabled)) |
61 | return; | 61 | return; |
62 | 62 | ||
63 | pc = preempt_count(); | 63 | pc = preempt_count(); |
64 | resched = ftrace_preempt_disable(); | 64 | preempt_disable_notrace(); |
65 | local_save_flags(flags); | 65 | local_save_flags(flags); |
66 | cpu = raw_smp_processor_id(); | 66 | cpu = raw_smp_processor_id(); |
67 | data = tr->data[cpu]; | 67 | data = tr->data[cpu]; |
@@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
71 | trace_function(tr, ip, parent_ip, flags, pc); | 71 | trace_function(tr, ip, parent_ip, flags, pc); |
72 | 72 | ||
73 | atomic_dec(&data->disabled); | 73 | atomic_dec(&data->disabled); |
74 | ftrace_preempt_enable(resched); | 74 | preempt_enable_notrace(); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void | 77 | static void |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0e73bc2ef8c5..c9fd5bd02036 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
46 | struct trace_array_cpu *data; | 46 | struct trace_array_cpu *data; |
47 | unsigned long flags; | 47 | unsigned long flags; |
48 | long disabled; | 48 | long disabled; |
49 | int resched; | ||
50 | int cpu; | 49 | int cpu; |
51 | int pc; | 50 | int pc; |
52 | 51 | ||
@@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
54 | return; | 53 | return; |
55 | 54 | ||
56 | pc = preempt_count(); | 55 | pc = preempt_count(); |
57 | resched = ftrace_preempt_disable(); | 56 | preempt_disable_notrace(); |
58 | 57 | ||
59 | cpu = raw_smp_processor_id(); | 58 | cpu = raw_smp_processor_id(); |
60 | if (cpu != wakeup_current_cpu) | 59 | if (cpu != wakeup_current_cpu) |
@@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
74 | out: | 73 | out: |
75 | atomic_dec(&data->disabled); | 74 | atomic_dec(&data->disabled); |
76 | out_enable: | 75 | out_enable: |
77 | ftrace_preempt_enable(resched); | 76 | preempt_enable_notrace(); |
78 | } | 77 | } |
79 | 78 | ||
80 | static struct ftrace_ops trace_ops __read_mostly = | 79 | static struct ftrace_ops trace_ops __read_mostly = |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index f4bc9b27de5f..056468eae7cf 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -110,12 +110,12 @@ static inline void check_stack(void) | |||
110 | static void | 110 | static void |
111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | 111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) |
112 | { | 112 | { |
113 | int cpu, resched; | 113 | int cpu; |
114 | 114 | ||
115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | resched = ftrace_preempt_disable(); | 118 | preempt_disable_notrace(); |
119 | 119 | ||
120 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
121 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
127 | out: | 127 | out: |
128 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
129 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
130 | ftrace_preempt_enable(resched); | 130 | preempt_enable_notrace(); |
131 | } | 131 | } |
132 | 132 | ||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |