diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-09 04:35:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-09 04:35:12 -0500 |
commit | 44b0635481437140b0e29d6023f05e805d5e7620 (patch) | |
tree | ff31986115075410d0479df307a6b9841976026c /kernel | |
parent | 4ad476e11f94fd3724c6e272d8220e99cd222b27 (diff) | |
parent | 57794a9d48b63e34acbe63282628c9f029603308 (diff) |
Merge branch 'tip/tracing/core/devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Conflicts:
kernel/trace/trace_hw_branches.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/Kconfig | 8 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 31 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace.h | 7 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 6 |
7 files changed, 50 insertions, 15 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 28f2644484d9..25131a5d5e4f 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT | |||
9 | config NOP_TRACER | 9 | config NOP_TRACER |
10 | bool | 10 | bool |
11 | 11 | ||
12 | config HAVE_FTRACE_NMI_ENTER | ||
13 | bool | ||
14 | |||
12 | config HAVE_FUNCTION_TRACER | 15 | config HAVE_FUNCTION_TRACER |
13 | bool | 16 | bool |
14 | 17 | ||
@@ -37,6 +40,11 @@ config TRACER_MAX_TRACE | |||
37 | config RING_BUFFER | 40 | config RING_BUFFER |
38 | bool | 41 | bool |
39 | 42 | ||
43 | config FTRACE_NMI_ENTER | ||
44 | bool | ||
45 | depends on HAVE_FTRACE_NMI_ENTER | ||
46 | default y | ||
47 | |||
40 | config TRACING | 48 | config TRACING |
41 | bool | 49 | bool |
42 | select DEBUG_FS | 50 | select DEBUG_FS |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 68610031780b..1796e018fbff 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -465,7 +465,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
465 | * it is not enabled then do nothing. | 465 | * it is not enabled then do nothing. |
466 | * | 466 | * |
467 | * If this record is not to be traced and | 467 | * If this record is not to be traced and |
468 | * it is enabled then disabled it. | 468 | * it is enabled then disable it. |
469 | * | 469 | * |
470 | */ | 470 | */ |
471 | if (rec->flags & FTRACE_FL_NOTRACE) { | 471 | if (rec->flags & FTRACE_FL_NOTRACE) { |
@@ -485,7 +485,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
485 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) | 485 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
486 | return 0; | 486 | return 0; |
487 | 487 | ||
488 | /* Record is not filtered and is not enabled do nothing */ | 488 | /* Record is not filtered or enabled, do nothing */ |
489 | if (!fl) | 489 | if (!fl) |
490 | return 0; | 490 | return 0; |
491 | 491 | ||
@@ -507,7 +507,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
507 | 507 | ||
508 | } else { | 508 | } else { |
509 | 509 | ||
510 | /* if record is not enabled do nothing */ | 510 | /* if record is not enabled, do nothing */ |
511 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 511 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
512 | return 0; | 512 | return 0; |
513 | 513 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index aee76b3eeed2..53ba3a6d16d0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -4,9 +4,11 @@ | |||
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ring_buffer.h> | 6 | #include <linux/ring_buffer.h> |
7 | #include <linux/ftrace_irq.h> | ||
7 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
8 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
9 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/hardirq.h> | ||
10 | #include <linux/module.h> | 12 | #include <linux/module.h> |
11 | #include <linux/percpu.h> | 13 | #include <linux/percpu.h> |
12 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
@@ -982,6 +984,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
982 | struct ring_buffer *buffer = cpu_buffer->buffer; | 984 | struct ring_buffer *buffer = cpu_buffer->buffer; |
983 | struct ring_buffer_event *event; | 985 | struct ring_buffer_event *event; |
984 | unsigned long flags; | 986 | unsigned long flags; |
987 | bool lock_taken = false; | ||
985 | 988 | ||
986 | commit_page = cpu_buffer->commit_page; | 989 | commit_page = cpu_buffer->commit_page; |
987 | /* we just need to protect against interrupts */ | 990 | /* we just need to protect against interrupts */ |
@@ -995,7 +998,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
995 | struct buffer_page *next_page = tail_page; | 998 | struct buffer_page *next_page = tail_page; |
996 | 999 | ||
997 | local_irq_save(flags); | 1000 | local_irq_save(flags); |
998 | __raw_spin_lock(&cpu_buffer->lock); | 1001 | /* |
1002 | * Since the write to the buffer is still not | ||
1003 | * fully lockless, we must be careful with NMIs. | ||
1004 | * The locks in the writers are taken when a write | ||
1005 | * crosses to a new page. The locks protect against | ||
1006 | * races with the readers (this will soon be fixed | ||
1007 | * with a lockless solution). | ||
1008 | * | ||
1009 | * Because we can not protect against NMIs, and we | ||
1010 | * want to keep traces reentrant, we need to manage | ||
1011 | * what happens when we are in an NMI. | ||
1012 | * | ||
1013 | * NMIs can happen after we take the lock. | ||
1014 | * If we are in an NMI, only take the lock | ||
1015 | * if it is not already taken. Otherwise | ||
1016 | * simply fail. | ||
1017 | */ | ||
1018 | if (unlikely(in_nmi())) { | ||
1019 | if (!__raw_spin_trylock(&cpu_buffer->lock)) | ||
1020 | goto out_unlock; | ||
1021 | } else | ||
1022 | __raw_spin_lock(&cpu_buffer->lock); | ||
1023 | |||
1024 | lock_taken = true; | ||
999 | 1025 | ||
1000 | rb_inc_page(cpu_buffer, &next_page); | 1026 | rb_inc_page(cpu_buffer, &next_page); |
1001 | 1027 | ||
@@ -1097,7 +1123,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1097 | if (tail <= BUF_PAGE_SIZE) | 1123 | if (tail <= BUF_PAGE_SIZE) |
1098 | local_set(&tail_page->write, tail); | 1124 | local_set(&tail_page->write, tail); |
1099 | 1125 | ||
1100 | __raw_spin_unlock(&cpu_buffer->lock); | 1126 | if (likely(lock_taken)) |
1127 | __raw_spin_unlock(&cpu_buffer->lock); | ||
1101 | local_irq_restore(flags); | 1128 | local_irq_restore(flags); |
1102 | return NULL; | 1129 | return NULL; |
1103 | } | 1130 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ef4dbac95568..03fbd4c20bc2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1519,7 +1519,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1519 | 1519 | ||
1520 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 1520 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1521 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1521 | SEQ_PUT_FIELD_RET(s, entry->pid); |
1522 | SEQ_PUT_FIELD_RET(s, entry->cpu); | 1522 | SEQ_PUT_FIELD_RET(s, iter->cpu); |
1523 | SEQ_PUT_FIELD_RET(s, iter->ts); | 1523 | SEQ_PUT_FIELD_RET(s, iter->ts); |
1524 | } | 1524 | } |
1525 | 1525 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f2742fb1575a..b9838f4a6929 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -45,7 +45,6 @@ enum trace_type { | |||
45 | */ | 45 | */ |
46 | struct trace_entry { | 46 | struct trace_entry { |
47 | unsigned char type; | 47 | unsigned char type; |
48 | unsigned char cpu; | ||
49 | unsigned char flags; | 48 | unsigned char flags; |
50 | unsigned char preempt_count; | 49 | unsigned char preempt_count; |
51 | int pid; | 50 | int pid; |
@@ -625,12 +624,12 @@ extern struct tracer nop_trace; | |||
625 | * preempt_enable (after a disable), a schedule might take place | 624 | * preempt_enable (after a disable), a schedule might take place |
626 | * causing an infinite recursion. | 625 | * causing an infinite recursion. |
627 | * | 626 | * |
628 | * To prevent this, we read the need_recshed flag before | 627 | * To prevent this, we read the need_resched flag before |
629 | * disabling preemption. When we want to enable preemption we | 628 | * disabling preemption. When we want to enable preemption we |
630 | * check the flag, if it is set, then we call preempt_enable_no_resched. | 629 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
631 | * Otherwise, we call preempt_enable. | 630 | * Otherwise, we call preempt_enable. |
632 | * | 631 | * |
633 | * The rational for doing the above is that if need resched is set | 632 | * The rational for doing the above is that if need_resched is set |
634 | * and we have yet to reschedule, we are either in an atomic location | 633 | * and we have yet to reschedule, we are either in an atomic location |
635 | * (where we do not need to check for scheduling) or we are inside | 634 | * (where we do not need to check for scheduling) or we are inside |
636 | * the scheduler and do not want to resched. | 635 | * the scheduler and do not want to resched. |
@@ -651,7 +650,7 @@ static inline int ftrace_preempt_disable(void) | |||
651 | * | 650 | * |
652 | * This is a scheduler safe way to enable preemption and not miss | 651 | * This is a scheduler safe way to enable preemption and not miss |
653 | * any preemption checks. The disabled saved the state of preemption. | 652 | * any preemption checks. The disabled saved the state of preemption. |
654 | * If resched is set, then we were either inside an atomic or | 653 | * If resched is set, then we are either inside an atomic or |
655 | * are inside the scheduler (we would have already scheduled | 654 | * are inside the scheduler (we would have already scheduled |
656 | * otherwise). In this case, we do not want to call normal | 655 | * otherwise). In this case, we do not want to call normal |
657 | * preempt_enable, but preempt_enable_no_resched instead. | 656 | * preempt_enable, but preempt_enable_no_resched instead. |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index ca4bbcfb9e2c..e3e7db61c067 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -158,7 +158,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
158 | trace_assign_type(it, entry); | 158 | trace_assign_type(it, entry); |
159 | 159 | ||
160 | if (entry->type == TRACE_HW_BRANCHES) { | 160 | if (entry->type == TRACE_HW_BRANCHES) { |
161 | if (trace_seq_printf(seq, "%4d ", entry->cpu) && | 161 | if (trace_seq_printf(seq, "%4d ", iter->cpu) && |
162 | seq_print_ip_sym(seq, it->to, symflags) && | 162 | seq_print_ip_sym(seq, it->to, symflags) && |
163 | trace_seq_printf(seq, "\t <- ") && | 163 | trace_seq_printf(seq, "\t <- ") && |
164 | seq_print_ip_sym(seq, it->from, symflags) && | 164 | seq_print_ip_sym(seq, it->from, symflags) && |
@@ -193,7 +193,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
193 | if (!event) | 193 | if (!event) |
194 | goto out; | 194 | goto out; |
195 | entry = ring_buffer_event_data(event); | 195 | entry = ring_buffer_event_data(event); |
196 | entry->ent.cpu = cpu; | 196 | tracing_generic_entry_update(&entry->ent, 0, from); |
197 | entry->ent.type = TRACE_HW_BRANCHES; | ||
197 | entry->from = from; | 198 | entry->from = from; |
198 | entry->to = to; | 199 | entry->to = to; |
199 | trace_buffer_unlock_commit(tr, event, 0, 0); | 200 | trace_buffer_unlock_commit(tr, event, 0, 0); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index b6e99af79214..9fc815031b09 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -333,7 +333,7 @@ int trace_print_context(struct trace_iterator *iter) | |||
333 | unsigned long secs = (unsigned long)t; | 333 | unsigned long secs = (unsigned long)t; |
334 | 334 | ||
335 | return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", | 335 | return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", |
336 | comm, entry->pid, entry->cpu, secs, usec_rem); | 336 | comm, entry->pid, iter->cpu, secs, usec_rem); |
337 | } | 337 | } |
338 | 338 | ||
339 | int trace_print_lat_context(struct trace_iterator *iter) | 339 | int trace_print_lat_context(struct trace_iterator *iter) |
@@ -356,7 +356,7 @@ int trace_print_lat_context(struct trace_iterator *iter) | |||
356 | char *comm = trace_find_cmdline(entry->pid); | 356 | char *comm = trace_find_cmdline(entry->pid); |
357 | ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" | 357 | ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" |
358 | " %ld.%03ldms (+%ld.%03ldms): ", comm, | 358 | " %ld.%03ldms (+%ld.%03ldms): ", comm, |
359 | entry->pid, entry->cpu, entry->flags, | 359 | entry->pid, iter->cpu, entry->flags, |
360 | entry->preempt_count, iter->idx, | 360 | entry->preempt_count, iter->idx, |
361 | ns2usecs(iter->ts), | 361 | ns2usecs(iter->ts), |
362 | abs_usecs / USEC_PER_MSEC, | 362 | abs_usecs / USEC_PER_MSEC, |
@@ -364,7 +364,7 @@ int trace_print_lat_context(struct trace_iterator *iter) | |||
364 | rel_usecs / USEC_PER_MSEC, | 364 | rel_usecs / USEC_PER_MSEC, |
365 | rel_usecs % USEC_PER_MSEC); | 365 | rel_usecs % USEC_PER_MSEC); |
366 | } else { | 366 | } else { |
367 | ret = lat_print_generic(s, entry, entry->cpu); | 367 | ret = lat_print_generic(s, entry, iter->cpu); |
368 | if (ret) | 368 | if (ret) |
369 | ret = lat_print_timestamp(s, abs_usecs, rel_usecs); | 369 | ret = lat_print_timestamp(s, abs_usecs, rel_usecs); |
370 | } | 370 | } |