diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-08-12 15:38:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-08-12 15:39:04 -0400 |
commit | f46a6804135795f77d096ab0128f27531c7d051c (patch) | |
tree | 7cd33f69e3661327739ae4c96e5a8389e7fc912e /kernel/trace/trace.c | |
parent | b3e84ffa21f916e3354a12a7f19169c9febe96d0 (diff) | |
parent | ad41a1e0cab07c5125456e8d38e5b1ab148d04aa (diff) |
Merge branch 'linus' into perf/urgent
Merge reason: Fix upstream breakage introduced by:
de5d9bf: Move list types from <linux/list.h> to <linux/types.h>.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 51 |
1 files changed, 22 insertions, 29 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4b1122d0df37..ba14a22be4cc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void) | |||
101 | preempt_enable(); | 101 | preempt_enable(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | cpumask_var_t __read_mostly tracing_buffer_mask; |
105 | |||
106 | #define for_each_tracing_cpu(cpu) \ | ||
107 | for_each_cpu(cpu, tracing_buffer_mask) | ||
108 | 105 | ||
109 | /* | 106 | /* |
110 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 107 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
@@ -744,13 +741,6 @@ __acquires(kernel_lock) | |||
744 | return -1; | 741 | return -1; |
745 | } | 742 | } |
746 | 743 | ||
747 | /* | ||
748 | * When this gets called we hold the BKL which means that | ||
749 | * preemption is disabled. Various trace selftests however | ||
750 | * need to disable and enable preemption for successful tests. | ||
751 | * So we drop the BKL here and grab it after the tests again. | ||
752 | */ | ||
753 | unlock_kernel(); | ||
754 | mutex_lock(&trace_types_lock); | 744 | mutex_lock(&trace_types_lock); |
755 | 745 | ||
756 | tracing_selftest_running = true; | 746 | tracing_selftest_running = true; |
@@ -832,7 +822,6 @@ __acquires(kernel_lock) | |||
832 | #endif | 822 | #endif |
833 | 823 | ||
834 | out_unlock: | 824 | out_unlock: |
835 | lock_kernel(); | ||
836 | return ret; | 825 | return ret; |
837 | } | 826 | } |
838 | 827 | ||
@@ -1493,11 +1482,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1493 | } | 1482 | } |
1494 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1483 | EXPORT_SYMBOL_GPL(trace_vprintk); |
1495 | 1484 | ||
1496 | enum trace_file_type { | ||
1497 | TRACE_FILE_LAT_FMT = 1, | ||
1498 | TRACE_FILE_ANNOTATE = 2, | ||
1499 | }; | ||
1500 | |||
1501 | static void trace_iterator_increment(struct trace_iterator *iter) | 1485 | static void trace_iterator_increment(struct trace_iterator *iter) |
1502 | { | 1486 | { |
1503 | /* Don't allow ftrace to trace into the ring buffers */ | 1487 | /* Don't allow ftrace to trace into the ring buffers */ |
@@ -1595,7 +1579,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |||
1595 | } | 1579 | } |
1596 | 1580 | ||
1597 | /* Find the next real entry, and increment the iterator to the next entry */ | 1581 | /* Find the next real entry, and increment the iterator to the next entry */ |
1598 | static void *find_next_entry_inc(struct trace_iterator *iter) | 1582 | void *trace_find_next_entry_inc(struct trace_iterator *iter) |
1599 | { | 1583 | { |
1600 | iter->ent = __find_next_entry(iter, &iter->cpu, | 1584 | iter->ent = __find_next_entry(iter, &iter->cpu, |
1601 | &iter->lost_events, &iter->ts); | 1585 | &iter->lost_events, &iter->ts); |
@@ -1630,19 +1614,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1630 | return NULL; | 1614 | return NULL; |
1631 | 1615 | ||
1632 | if (iter->idx < 0) | 1616 | if (iter->idx < 0) |
1633 | ent = find_next_entry_inc(iter); | 1617 | ent = trace_find_next_entry_inc(iter); |
1634 | else | 1618 | else |
1635 | ent = iter; | 1619 | ent = iter; |
1636 | 1620 | ||
1637 | while (ent && iter->idx < i) | 1621 | while (ent && iter->idx < i) |
1638 | ent = find_next_entry_inc(iter); | 1622 | ent = trace_find_next_entry_inc(iter); |
1639 | 1623 | ||
1640 | iter->pos = *pos; | 1624 | iter->pos = *pos; |
1641 | 1625 | ||
1642 | return ent; | 1626 | return ent; |
1643 | } | 1627 | } |
1644 | 1628 | ||
1645 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 1629 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
1646 | { | 1630 | { |
1647 | struct trace_array *tr = iter->tr; | 1631 | struct trace_array *tr = iter->tr; |
1648 | struct ring_buffer_event *event; | 1632 | struct ring_buffer_event *event; |
@@ -2003,7 +1987,7 @@ int trace_empty(struct trace_iterator *iter) | |||
2003 | } | 1987 | } |
2004 | 1988 | ||
2005 | /* Called with trace_event_read_lock() held. */ | 1989 | /* Called with trace_event_read_lock() held. */ |
2006 | static enum print_line_t print_trace_line(struct trace_iterator *iter) | 1990 | enum print_line_t print_trace_line(struct trace_iterator *iter) |
2007 | { | 1991 | { |
2008 | enum print_line_t ret; | 1992 | enum print_line_t ret; |
2009 | 1993 | ||
@@ -3193,7 +3177,7 @@ waitagain: | |||
3193 | 3177 | ||
3194 | trace_event_read_lock(); | 3178 | trace_event_read_lock(); |
3195 | trace_access_lock(iter->cpu_file); | 3179 | trace_access_lock(iter->cpu_file); |
3196 | while (find_next_entry_inc(iter) != NULL) { | 3180 | while (trace_find_next_entry_inc(iter) != NULL) { |
3197 | enum print_line_t ret; | 3181 | enum print_line_t ret; |
3198 | int len = iter->seq.len; | 3182 | int len = iter->seq.len; |
3199 | 3183 | ||
@@ -3276,7 +3260,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3276 | if (ret != TRACE_TYPE_NO_CONSUME) | 3260 | if (ret != TRACE_TYPE_NO_CONSUME) |
3277 | trace_consume(iter); | 3261 | trace_consume(iter); |
3278 | rem -= count; | 3262 | rem -= count; |
3279 | if (!find_next_entry_inc(iter)) { | 3263 | if (!trace_find_next_entry_inc(iter)) { |
3280 | rem = 0; | 3264 | rem = 0; |
3281 | iter->ent = NULL; | 3265 | iter->ent = NULL; |
3282 | break; | 3266 | break; |
@@ -3332,7 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3332 | if (ret <= 0) | 3316 | if (ret <= 0) |
3333 | goto out_err; | 3317 | goto out_err; |
3334 | 3318 | ||
3335 | if (!iter->ent && !find_next_entry_inc(iter)) { | 3319 | if (!iter->ent && !trace_find_next_entry_inc(iter)) { |
3336 | ret = -EFAULT; | 3320 | ret = -EFAULT; |
3337 | goto out_err; | 3321 | goto out_err; |
3338 | } | 3322 | } |
@@ -4402,7 +4386,7 @@ static struct notifier_block trace_die_notifier = { | |||
4402 | */ | 4386 | */ |
4403 | #define KERN_TRACE KERN_EMERG | 4387 | #define KERN_TRACE KERN_EMERG |
4404 | 4388 | ||
4405 | static void | 4389 | void |
4406 | trace_printk_seq(struct trace_seq *s) | 4390 | trace_printk_seq(struct trace_seq *s) |
4407 | { | 4391 | { |
4408 | /* Probably should print a warning here. */ | 4392 | /* Probably should print a warning here. */ |
@@ -4417,6 +4401,13 @@ trace_printk_seq(struct trace_seq *s) | |||
4417 | trace_seq_init(s); | 4401 | trace_seq_init(s); |
4418 | } | 4402 | } |
4419 | 4403 | ||
4404 | void trace_init_global_iter(struct trace_iterator *iter) | ||
4405 | { | ||
4406 | iter->tr = &global_trace; | ||
4407 | iter->trace = current_trace; | ||
4408 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | ||
4409 | } | ||
4410 | |||
4420 | static void | 4411 | static void |
4421 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | 4412 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
4422 | { | 4413 | { |
@@ -4442,8 +4433,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4442 | if (disable_tracing) | 4433 | if (disable_tracing) |
4443 | ftrace_kill(); | 4434 | ftrace_kill(); |
4444 | 4435 | ||
4436 | trace_init_global_iter(&iter); | ||
4437 | |||
4445 | for_each_tracing_cpu(cpu) { | 4438 | for_each_tracing_cpu(cpu) { |
4446 | atomic_inc(&global_trace.data[cpu]->disabled); | 4439 | atomic_inc(&iter.tr->data[cpu]->disabled); |
4447 | } | 4440 | } |
4448 | 4441 | ||
4449 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | 4442 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; |
@@ -4492,7 +4485,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4492 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 4485 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
4493 | iter.pos = -1; | 4486 | iter.pos = -1; |
4494 | 4487 | ||
4495 | if (find_next_entry_inc(&iter) != NULL) { | 4488 | if (trace_find_next_entry_inc(&iter) != NULL) { |
4496 | int ret; | 4489 | int ret; |
4497 | 4490 | ||
4498 | ret = print_trace_line(&iter); | 4491 | ret = print_trace_line(&iter); |
@@ -4514,7 +4507,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
4514 | trace_flags |= old_userobj; | 4507 | trace_flags |= old_userobj; |
4515 | 4508 | ||
4516 | for_each_tracing_cpu(cpu) { | 4509 | for_each_tracing_cpu(cpu) { |
4517 | atomic_dec(&global_trace.data[cpu]->disabled); | 4510 | atomic_dec(&iter.tr->data[cpu]->disabled); |
4518 | } | 4511 | } |
4519 | tracing_on(); | 4512 | tracing_on(); |
4520 | } | 4513 | } |