aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-09 04:35:12 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 04:35:12 -0500
commit44b0635481437140b0e29d6023f05e805d5e7620 (patch)
treeff31986115075410d0479df307a6b9841976026c
parent4ad476e11f94fd3724c6e272d8220e99cd222b27 (diff)
parent57794a9d48b63e34acbe63282628c9f029603308 (diff)
Merge branch 'tip/tracing/core/devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Conflicts: kernel/trace/trace_hw_branches.c
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/ftrace.c35
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/hardirq.h15
-rw-r--r--include/linux/ring_buffer.h9
-rw-r--r--kernel/trace/Kconfig8
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/ring_buffer.c31
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace.h7
-rw-r--r--kernel/trace/trace_hw_branches.c5
-rw-r--r--kernel/trace/trace_output.c6
13 files changed, 85 insertions, 44 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 73f7fe8fd4d1..2cf7bbcaed4e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,6 +34,7 @@ config X86
34 select HAVE_FUNCTION_TRACER 34 select HAVE_FUNCTION_TRACER
35 select HAVE_FUNCTION_GRAPH_TRACER 35 select HAVE_FUNCTION_GRAPH_TRACER
36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 36 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
37 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
37 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 38 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
38 select HAVE_ARCH_KGDB if !X86_VOYAGER 39 select HAVE_ARCH_KGDB if !X86_VOYAGER
39 select HAVE_ARCH_TRACEHOOK 40 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4d33224c055f..d74d75e0952d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -82,7 +82,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
82 * are the same as what exists. 82 * are the same as what exists.
83 */ 83 */
84 84
85static atomic_t in_nmi = ATOMIC_INIT(0); 85static atomic_t nmi_running = ATOMIC_INIT(0);
86static int mod_code_status; /* holds return value of text write */ 86static int mod_code_status; /* holds return value of text write */
87static int mod_code_write; /* set when NMI should do the write */ 87static int mod_code_write; /* set when NMI should do the write */
88static void *mod_code_ip; /* holds the IP to write to */ 88static void *mod_code_ip; /* holds the IP to write to */
@@ -115,8 +115,8 @@ static void ftrace_mod_code(void)
115 115
116void ftrace_nmi_enter(void) 116void ftrace_nmi_enter(void)
117{ 117{
118 atomic_inc(&in_nmi); 118 atomic_inc(&nmi_running);
119 /* Must have in_nmi seen before reading write flag */ 119 /* Must have nmi_running seen before reading write flag */
120 smp_mb(); 120 smp_mb();
121 if (mod_code_write) { 121 if (mod_code_write) {
122 ftrace_mod_code(); 122 ftrace_mod_code();
@@ -126,19 +126,19 @@ void ftrace_nmi_enter(void)
126 126
127void ftrace_nmi_exit(void) 127void ftrace_nmi_exit(void)
128{ 128{
129 /* Finish all executions before clearing in_nmi */ 129 /* Finish all executions before clearing nmi_running */
130 smp_wmb(); 130 smp_wmb();
131 atomic_dec(&in_nmi); 131 atomic_dec(&nmi_running);
132} 132}
133 133
134static void wait_for_nmi(void) 134static void wait_for_nmi(void)
135{ 135{
136 if (!atomic_read(&in_nmi)) 136 if (!atomic_read(&nmi_running))
137 return; 137 return;
138 138
139 do { 139 do {
140 cpu_relax(); 140 cpu_relax();
141 } while(atomic_read(&in_nmi)); 141 } while (atomic_read(&nmi_running));
142 142
143 nmi_wait_count++; 143 nmi_wait_count++;
144} 144}
@@ -367,25 +367,6 @@ int ftrace_disable_ftrace_graph_caller(void)
367 return ftrace_mod_jmp(ip, old_offset, new_offset); 367 return ftrace_mod_jmp(ip, old_offset, new_offset);
368} 368}
369 369
370#else /* CONFIG_DYNAMIC_FTRACE */
371
372/*
373 * These functions are picked from those used on
374 * this page for dynamic ftrace. They have been
375 * simplified to ignore all traces in NMI context.
376 */
377static atomic_t in_nmi;
378
379void ftrace_nmi_enter(void)
380{
381 atomic_inc(&in_nmi);
382}
383
384void ftrace_nmi_exit(void)
385{
386 atomic_dec(&in_nmi);
387}
388
389#endif /* !CONFIG_DYNAMIC_FTRACE */ 370#endif /* !CONFIG_DYNAMIC_FTRACE */
390 371
391/* Add a function return address to the trace stack on thread info.*/ 372/* Add a function return address to the trace stack on thread info.*/
@@ -475,7 +456,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
475 &return_to_handler; 456 &return_to_handler;
476 457
477 /* Nmi's are currently unsupported */ 458 /* Nmi's are currently unsupported */
478 if (unlikely(atomic_read(&in_nmi))) 459 if (unlikely(in_nmi()))
479 return; 460 return;
480 461
481 if (unlikely(atomic_read(&current->tracing_graph_pause))) 462 if (unlikely(atomic_read(&current->tracing_graph_pause)))
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7840e718c6c7..5e302d636fc2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -140,7 +140,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
140#endif 140#endif
141 141
142/** 142/**
143 * ftrace_make_nop - convert code into top 143 * ftrace_make_nop - convert code into nop
144 * @mod: module structure if called by module load initialization 144 * @mod: module structure if called by module load initialization
145 * @rec: the mcount call site record 145 * @rec: the mcount call site record
146 * @addr: the address that the call site should be calling 146 * @addr: the address that the call site should be calling
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..f3cf86e1465b 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -61,6 +61,12 @@
61#error PREEMPT_ACTIVE is too low! 61#error PREEMPT_ACTIVE is too low!
62#endif 62#endif
63 63
64#define NMI_OFFSET (PREEMPT_ACTIVE << 1)
65
66#if NMI_OFFSET >= 0x80000000
67#error PREEMPT_ACTIVE too high!
68#endif
69
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 70#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 71#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 72#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_OFFSET)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -167,6 +178,8 @@ extern void irq_exit(void);
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
181 BUG_ON(in_nmi()); \
182 add_preempt_count(NMI_OFFSET); \
170 lockdep_off(); \ 183 lockdep_off(); \
171 rcu_nmi_enter(); \ 184 rcu_nmi_enter(); \
172 __irq_enter(); \ 185 __irq_enter(); \
@@ -177,6 +190,8 @@ extern void irq_exit(void);
177 __irq_exit(); \ 190 __irq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET); \
180 ftrace_nmi_exit(); \ 195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 3110d92e7d81..3c103d636da3 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -121,9 +121,18 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
121u64 ring_buffer_time_stamp(int cpu); 121u64 ring_buffer_time_stamp(int cpu);
122void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 122void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
123 123
124/*
125 * The below functions are fine to use outside the tracing facility.
126 */
127#ifdef CONFIG_RING_BUFFER
124void tracing_on(void); 128void tracing_on(void);
125void tracing_off(void); 129void tracing_off(void);
126void tracing_off_permanent(void); 130void tracing_off_permanent(void);
131#else
132static inline void tracing_on(void) { }
133static inline void tracing_off(void) { }
134static inline void tracing_off_permanent(void) { }
135#endif
127 136
128void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 137void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
129void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 138void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 28f2644484d9..25131a5d5e4f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT
9config NOP_TRACER 9config NOP_TRACER
10 bool 10 bool
11 11
12config HAVE_FTRACE_NMI_ENTER
13 bool
14
12config HAVE_FUNCTION_TRACER 15config HAVE_FUNCTION_TRACER
13 bool 16 bool
14 17
@@ -37,6 +40,11 @@ config TRACER_MAX_TRACE
37config RING_BUFFER 40config RING_BUFFER
38 bool 41 bool
39 42
43config FTRACE_NMI_ENTER
44 bool
45 depends on HAVE_FTRACE_NMI_ENTER
46 default y
47
40config TRACING 48config TRACING
41 bool 49 bool
42 select DEBUG_FS 50 select DEBUG_FS
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 68610031780b..1796e018fbff 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -465,7 +465,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
465 * it is not enabled then do nothing. 465 * it is not enabled then do nothing.
466 * 466 *
467 * If this record is not to be traced and 467 * If this record is not to be traced and
468 * it is enabled then disabled it. 468 * it is enabled then disable it.
469 * 469 *
470 */ 470 */
471 if (rec->flags & FTRACE_FL_NOTRACE) { 471 if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -485,7 +485,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) 485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
486 return 0; 486 return 0;
487 487
488 /* Record is not filtered and is not enabled do nothing */ 488 /* Record is not filtered or enabled, do nothing */
489 if (!fl) 489 if (!fl)
490 return 0; 490 return 0;
491 491
@@ -507,7 +507,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
507 507
508 } else { 508 } else {
509 509
510 /* if record is not enabled do nothing */ 510 /* if record is not enabled, do nothing */
511 if (!(rec->flags & FTRACE_FL_ENABLED)) 511 if (!(rec->flags & FTRACE_FL_ENABLED))
512 return 0; 512 return 0;
513 513
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index aee76b3eeed2..53ba3a6d16d0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,9 +4,11 @@
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */ 5 */
6#include <linux/ring_buffer.h> 6#include <linux/ring_buffer.h>
7#include <linux/ftrace_irq.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
8#include <linux/debugfs.h> 9#include <linux/debugfs.h>
9#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/hardirq.h>
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/percpu.h> 13#include <linux/percpu.h>
12#include <linux/mutex.h> 14#include <linux/mutex.h>
@@ -982,6 +984,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
982 struct ring_buffer *buffer = cpu_buffer->buffer; 984 struct ring_buffer *buffer = cpu_buffer->buffer;
983 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
984 unsigned long flags; 986 unsigned long flags;
987 bool lock_taken = false;
985 988
986 commit_page = cpu_buffer->commit_page; 989 commit_page = cpu_buffer->commit_page;
987 /* we just need to protect against interrupts */ 990 /* we just need to protect against interrupts */
@@ -995,7 +998,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
995 struct buffer_page *next_page = tail_page; 998 struct buffer_page *next_page = tail_page;
996 999
997 local_irq_save(flags); 1000 local_irq_save(flags);
998 __raw_spin_lock(&cpu_buffer->lock); 1001 /*
1002 * Since the write to the buffer is still not
1003 * fully lockless, we must be careful with NMIs.
1004 * The locks in the writers are taken when a write
1005 * crosses to a new page. The locks protect against
1006 * races with the readers (this will soon be fixed
1007 * with a lockless solution).
1008 *
1009 * Because we can not protect against NMIs, and we
1010 * want to keep traces reentrant, we need to manage
1011 * what happens when we are in an NMI.
1012 *
1013 * NMIs can happen after we take the lock.
1014 * If we are in an NMI, only take the lock
1015 * if it is not already taken. Otherwise
1016 * simply fail.
1017 */
1018 if (unlikely(in_nmi())) {
1019 if (!__raw_spin_trylock(&cpu_buffer->lock))
1020 goto out_unlock;
1021 } else
1022 __raw_spin_lock(&cpu_buffer->lock);
1023
1024 lock_taken = true;
999 1025
1000 rb_inc_page(cpu_buffer, &next_page); 1026 rb_inc_page(cpu_buffer, &next_page);
1001 1027
@@ -1097,7 +1123,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1097 if (tail <= BUF_PAGE_SIZE) 1123 if (tail <= BUF_PAGE_SIZE)
1098 local_set(&tail_page->write, tail); 1124 local_set(&tail_page->write, tail);
1099 1125
1100 __raw_spin_unlock(&cpu_buffer->lock); 1126 if (likely(lock_taken))
1127 __raw_spin_unlock(&cpu_buffer->lock);
1101 local_irq_restore(flags); 1128 local_irq_restore(flags);
1102 return NULL; 1129 return NULL;
1103} 1130}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ef4dbac95568..03fbd4c20bc2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1519,7 +1519,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1519 1519
1520 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1520 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1521 SEQ_PUT_FIELD_RET(s, entry->pid); 1521 SEQ_PUT_FIELD_RET(s, entry->pid);
1522 SEQ_PUT_FIELD_RET(s, entry->cpu); 1522 SEQ_PUT_FIELD_RET(s, iter->cpu);
1523 SEQ_PUT_FIELD_RET(s, iter->ts); 1523 SEQ_PUT_FIELD_RET(s, iter->ts);
1524 } 1524 }
1525 1525
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f2742fb1575a..b9838f4a6929 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,7 +45,6 @@ enum trace_type {
45 */ 45 */
46struct trace_entry { 46struct trace_entry {
47 unsigned char type; 47 unsigned char type;
48 unsigned char cpu;
49 unsigned char flags; 48 unsigned char flags;
50 unsigned char preempt_count; 49 unsigned char preempt_count;
51 int pid; 50 int pid;
@@ -625,12 +624,12 @@ extern struct tracer nop_trace;
625 * preempt_enable (after a disable), a schedule might take place 624 * preempt_enable (after a disable), a schedule might take place
626 * causing an infinite recursion. 625 * causing an infinite recursion.
627 * 626 *
628 * To prevent this, we read the need_recshed flag before 627 * To prevent this, we read the need_resched flag before
629 * disabling preemption. When we want to enable preemption we 628 * disabling preemption. When we want to enable preemption we
630 * check the flag, if it is set, then we call preempt_enable_no_resched. 629 * check the flag, if it is set, then we call preempt_enable_no_resched.
631 * Otherwise, we call preempt_enable. 630 * Otherwise, we call preempt_enable.
632 * 631 *
633 * The rational for doing the above is that if need resched is set 632 * The rational for doing the above is that if need_resched is set
634 * and we have yet to reschedule, we are either in an atomic location 633 * and we have yet to reschedule, we are either in an atomic location
635 * (where we do not need to check for scheduling) or we are inside 634 * (where we do not need to check for scheduling) or we are inside
636 * the scheduler and do not want to resched. 635 * the scheduler and do not want to resched.
@@ -651,7 +650,7 @@ static inline int ftrace_preempt_disable(void)
651 * 650 *
652 * This is a scheduler safe way to enable preemption and not miss 651 * This is a scheduler safe way to enable preemption and not miss
653 * any preemption checks. The disabled saved the state of preemption. 652 * any preemption checks. The disabled saved the state of preemption.
654 * If resched is set, then we were either inside an atomic or 653 * If resched is set, then we are either inside an atomic or
655 * are inside the scheduler (we would have already scheduled 654 * are inside the scheduler (we would have already scheduled
656 * otherwise). In this case, we do not want to call normal 655 * otherwise). In this case, we do not want to call normal
657 * preempt_enable, but preempt_enable_no_resched instead. 656 * preempt_enable, but preempt_enable_no_resched instead.
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ca4bbcfb9e2c..e3e7db61c067 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -158,7 +158,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
158 trace_assign_type(it, entry); 158 trace_assign_type(it, entry);
159 159
160 if (entry->type == TRACE_HW_BRANCHES) { 160 if (entry->type == TRACE_HW_BRANCHES) {
161 if (trace_seq_printf(seq, "%4d ", entry->cpu) && 161 if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
162 seq_print_ip_sym(seq, it->to, symflags) && 162 seq_print_ip_sym(seq, it->to, symflags) &&
163 trace_seq_printf(seq, "\t <- ") && 163 trace_seq_printf(seq, "\t <- ") &&
164 seq_print_ip_sym(seq, it->from, symflags) && 164 seq_print_ip_sym(seq, it->from, symflags) &&
@@ -193,7 +193,8 @@ void trace_hw_branch(u64 from, u64 to)
193 if (!event) 193 if (!event)
194 goto out; 194 goto out;
195 entry = ring_buffer_event_data(event); 195 entry = ring_buffer_event_data(event);
196 entry->ent.cpu = cpu; 196 tracing_generic_entry_update(&entry->ent, 0, from);
197 entry->ent.type = TRACE_HW_BRANCHES;
197 entry->from = from; 198 entry->from = from;
198 entry->to = to; 199 entry->to = to;
199 trace_buffer_unlock_commit(tr, event, 0, 0); 200 trace_buffer_unlock_commit(tr, event, 0, 0);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index b6e99af79214..9fc815031b09 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -333,7 +333,7 @@ int trace_print_context(struct trace_iterator *iter)
333 unsigned long secs = (unsigned long)t; 333 unsigned long secs = (unsigned long)t;
334 334
335 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", 335 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
336 comm, entry->pid, entry->cpu, secs, usec_rem); 336 comm, entry->pid, iter->cpu, secs, usec_rem);
337} 337}
338 338
339int trace_print_lat_context(struct trace_iterator *iter) 339int trace_print_lat_context(struct trace_iterator *iter)
@@ -356,7 +356,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
356 char *comm = trace_find_cmdline(entry->pid); 356 char *comm = trace_find_cmdline(entry->pid);
357 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" 357 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
358 " %ld.%03ldms (+%ld.%03ldms): ", comm, 358 " %ld.%03ldms (+%ld.%03ldms): ", comm,
359 entry->pid, entry->cpu, entry->flags, 359 entry->pid, iter->cpu, entry->flags,
360 entry->preempt_count, iter->idx, 360 entry->preempt_count, iter->idx,
361 ns2usecs(iter->ts), 361 ns2usecs(iter->ts),
362 abs_usecs / USEC_PER_MSEC, 362 abs_usecs / USEC_PER_MSEC,
@@ -364,7 +364,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
364 rel_usecs / USEC_PER_MSEC, 364 rel_usecs / USEC_PER_MSEC,
365 rel_usecs % USEC_PER_MSEC); 365 rel_usecs % USEC_PER_MSEC);
366 } else { 366 } else {
367 ret = lat_print_generic(s, entry, entry->cpu); 367 ret = lat_print_generic(s, entry, iter->cpu);
368 if (ret) 368 if (ret)
369 ret = lat_print_timestamp(s, abs_usecs, rel_usecs); 369 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
370 } 370 }