aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-07 17:10:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-07 17:10:10 -0400
commitc93f216b5b985a12a18323e5ca2eb01db3d2f000 (patch)
tree45fa35b290005f8b241dd76b6342875b81432fc3
parentc61b79b6ef266890954213a701d8f6021d8c1289 (diff)
parentab3c9c686e22ab264269337ce7b75d9760211198 (diff)
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: branch tracer, intel-iommu: fix build with CONFIG_BRANCH_TRACER=y branch tracer: Fix for enabling branch profiling makes sparse unusable ftrace: Correct a text align for event format output Update /debug/tracing/README tracing/ftrace: alloc the started cpumask for the trace file tracing, x86: remove duplicated #include ftrace: Add check of sched_stopped for probe_sched_wakeup function-graph: add proper initialization for init task tracing/ftrace: fix missing include string.h tracing: fix incorrect return type of ns2usecs() tracing: remove CALLER_ADDR2 from wakeup tracer blktrace: fix pdu_len when tracing packet command requests blktrace: small cleanup in blk_msg_write() blktrace: NUL-terminate user space messages tracing: move scripts/trace/power.pl to scripts/tracing/power.pl
-rw-r--r--arch/x86/kernel/ftrace.c1
-rw-r--r--block/blk-core.c1
-rw-r--r--include/linux/compiler.h3
-rw-r--r--include/linux/ftrace.h8
-rw-r--r--include/linux/init_task.h2
-rw-r--r--kernel/trace/blktrace.c7
-rw-r--r--kernel/trace/trace.c21
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_sched_switch.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c8
-rw-r--r--scripts/tracing/power.pl (renamed from scripts/trace/power.pl)0
13 files changed, 44 insertions, 16 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 61df77532120..70a10ca100f6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -20,7 +20,6 @@
20 20
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/ftrace.h> 22#include <asm/ftrace.h>
23#include <linux/ftrace.h>
24#include <asm/nops.h> 23#include <asm/nops.h>
25#include <asm/nmi.h> 24#include <asm/nmi.h>
26 25
diff --git a/block/blk-core.c b/block/blk-core.c
index 43fdedc524ee..07ab75403e1a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -131,6 +131,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
131 INIT_HLIST_NODE(&rq->hash); 131 INIT_HLIST_NODE(&rq->hash);
132 RB_CLEAR_NODE(&rq->rb_node); 132 RB_CLEAR_NODE(&rq->rb_node);
133 rq->cmd = rq->__cmd; 133 rq->cmd = rq->__cmd;
134 rq->cmd_len = BLK_MAX_CDB;
134 rq->tag = -1; 135 rq->tag = -1;
135 rq->ref_count = 1; 136 rq->ref_count = 1;
136} 137}
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index cebfdcd3dbdd..37bcb50a4d7c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -76,7 +76,8 @@ struct ftrace_branch_data {
76 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 76 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
77 * to disable branch tracing on a per file basis. 77 * to disable branch tracing on a per file basis.
78 */ 78 */
79#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) 79#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
80 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
80void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); 81void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81 82
82#define likely_notrace(x) __builtin_expect(!!(x), 1) 83#define likely_notrace(x) __builtin_expect(!!(x), 1)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 015a3d22cf74..da5405dce347 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -356,6 +356,9 @@ struct ftrace_graph_ret {
356 356
357#ifdef CONFIG_FUNCTION_GRAPH_TRACER 357#ifdef CONFIG_FUNCTION_GRAPH_TRACER
358 358
359/* for init task */
360#define INIT_FTRACE_GRAPH .ret_stack = NULL
361
359/* 362/*
360 * Stack of return addresses for functions 363 * Stack of return addresses for functions
361 * of a thread. 364 * of a thread.
@@ -430,10 +433,11 @@ static inline void unpause_graph_tracing(void)
430{ 433{
431 atomic_dec(&current->tracing_graph_pause); 434 atomic_dec(&current->tracing_graph_pause);
432} 435}
433#else 436#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
434 437
435#define __notrace_funcgraph 438#define __notrace_funcgraph
436#define __irq_entry 439#define __irq_entry
440#define INIT_FTRACE_GRAPH
437 441
438static inline void ftrace_graph_init_task(struct task_struct *t) { } 442static inline void ftrace_graph_init_task(struct task_struct *t) { }
439static inline void ftrace_graph_exit_task(struct task_struct *t) { } 443static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -445,7 +449,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
445 449
446static inline void pause_graph_tracing(void) { } 450static inline void pause_graph_tracing(void) { }
447static inline void unpause_graph_tracing(void) { } 451static inline void unpause_graph_tracing(void) { }
448#endif 452#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
449 453
450#ifdef CONFIG_TRACING 454#ifdef CONFIG_TRACING
451#include <linux/sched.h> 455#include <linux/sched.h>
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index af1de95e711e..dcfb93337e9a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -5,6 +5,7 @@
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
6#include <linux/utsname.h> 6#include <linux/utsname.h>
7#include <linux/lockdep.h> 7#include <linux/lockdep.h>
8#include <linux/ftrace.h>
8#include <linux/ipc.h> 9#include <linux/ipc.h>
9#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
10#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
@@ -185,6 +186,7 @@ extern struct cred init_cred;
185 INIT_IDS \ 186 INIT_IDS \
186 INIT_TRACE_IRQFLAGS \ 187 INIT_TRACE_IRQFLAGS \
187 INIT_LOCKDEP \ 188 INIT_LOCKDEP \
189 INIT_FTRACE_GRAPH \
188} 190}
189 191
190 192
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 947c5b3f90c4..b32ff446c3fb 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -327,10 +327,10 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
327 char *msg; 327 char *msg;
328 struct blk_trace *bt; 328 struct blk_trace *bt;
329 329
330 if (count > BLK_TN_MAX_MSG) 330 if (count >= BLK_TN_MAX_MSG)
331 return -EINVAL; 331 return -EINVAL;
332 332
333 msg = kmalloc(count, GFP_KERNEL); 333 msg = kmalloc(count + 1, GFP_KERNEL);
334 if (msg == NULL) 334 if (msg == NULL)
335 return -ENOMEM; 335 return -ENOMEM;
336 336
@@ -339,6 +339,7 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
339 return -EFAULT; 339 return -EFAULT;
340 } 340 }
341 341
342 msg[count] = '\0';
342 bt = filp->private_data; 343 bt = filp->private_data;
343 __trace_note_message(bt, "%s", msg); 344 __trace_note_message(bt, "%s", msg);
344 kfree(msg); 345 kfree(msg);
@@ -642,7 +643,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
642 if (blk_pc_request(rq)) { 643 if (blk_pc_request(rq)) {
643 what |= BLK_TC_ACT(BLK_TC_PC); 644 what |= BLK_TC_ACT(BLK_TC_PC);
644 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 645 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
645 sizeof(rq->cmd), rq->cmd); 646 rq->cmd_len, rq->cmd);
646 } else { 647 } else {
647 what |= BLK_TC_ACT(BLK_TC_FS); 648 what |= BLK_TC_ACT(BLK_TC_FS);
648 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 649 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a0174a40c563..9d28476a9851 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,6 +30,7 @@
30#include <linux/percpu.h> 30#include <linux/percpu.h>
31#include <linux/splice.h> 31#include <linux/splice.h>
32#include <linux/kdebug.h> 32#include <linux/kdebug.h>
33#include <linux/string.h>
33#include <linux/ctype.h> 34#include <linux/ctype.h>
34#include <linux/init.h> 35#include <linux/init.h>
35#include <linux/poll.h> 36#include <linux/poll.h>
@@ -147,8 +148,7 @@ static int __init set_ftrace_dump_on_oops(char *str)
147} 148}
148__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 149__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
149 150
150long 151unsigned long long ns2usecs(cycle_t nsec)
151ns2usecs(cycle_t nsec)
152{ 152{
153 nsec += 500; 153 nsec += 500;
154 do_div(nsec, 1000); 154 do_div(nsec, 1000);
@@ -1632,7 +1632,11 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1632 return; 1632 return;
1633 1633
1634 cpumask_set_cpu(iter->cpu, iter->started); 1634 cpumask_set_cpu(iter->cpu, iter->started);
1635 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1635
1636 /* Don't print started cpu buffer for the first entry of the trace */
1637 if (iter->idx > 1)
1638 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1639 iter->cpu);
1636} 1640}
1637 1641
1638static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1642static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
@@ -1867,6 +1871,11 @@ __tracing_open(struct inode *inode, struct file *file)
1867 if (current_trace) 1871 if (current_trace)
1868 *iter->trace = *current_trace; 1872 *iter->trace = *current_trace;
1869 1873
1874 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
1875 goto fail;
1876
1877 cpumask_clear(iter->started);
1878
1870 if (current_trace && current_trace->print_max) 1879 if (current_trace && current_trace->print_max)
1871 iter->tr = &max_tr; 1880 iter->tr = &max_tr;
1872 else 1881 else
@@ -1917,6 +1926,7 @@ __tracing_open(struct inode *inode, struct file *file)
1917 if (iter->buffer_iter[cpu]) 1926 if (iter->buffer_iter[cpu])
1918 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1927 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1919 } 1928 }
1929 free_cpumask_var(iter->started);
1920 fail: 1930 fail:
1921 mutex_unlock(&trace_types_lock); 1931 mutex_unlock(&trace_types_lock);
1922 kfree(iter->trace); 1932 kfree(iter->trace);
@@ -1960,6 +1970,7 @@ static int tracing_release(struct inode *inode, struct file *file)
1960 1970
1961 seq_release(inode, file); 1971 seq_release(inode, file);
1962 mutex_destroy(&iter->mutex); 1972 mutex_destroy(&iter->mutex);
1973 free_cpumask_var(iter->started);
1963 kfree(iter->trace); 1974 kfree(iter->trace);
1964 kfree(iter); 1975 kfree(iter);
1965 return 0; 1976 return 0;
@@ -2358,9 +2369,9 @@ static const char readme_msg[] =
2358 "# mkdir /debug\n" 2369 "# mkdir /debug\n"
2359 "# mount -t debugfs nodev /debug\n\n" 2370 "# mount -t debugfs nodev /debug\n\n"
2360 "# cat /debug/tracing/available_tracers\n" 2371 "# cat /debug/tracing/available_tracers\n"
2361 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" 2372 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2362 "# cat /debug/tracing/current_tracer\n" 2373 "# cat /debug/tracing/current_tracer\n"
2363 "none\n" 2374 "nop\n"
2364 "# echo sched_switch > /debug/tracing/current_tracer\n" 2375 "# echo sched_switch > /debug/tracing/current_tracer\n"
2365 "# cat /debug/tracing/current_tracer\n" 2376 "# cat /debug/tracing/current_tracer\n"
2366 "sched_switch\n" 2377 "sched_switch\n"
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cbc168f1e43d..e685ac2b2ba1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -602,7 +602,7 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
602#endif /* CONFIG_FTRACE_STARTUP_TEST */ 602#endif /* CONFIG_FTRACE_STARTUP_TEST */
603 603
604extern void *head_page(struct trace_array_cpu *data); 604extern void *head_page(struct trace_array_cpu *data);
605extern long ns2usecs(cycle_t nsec); 605extern unsigned long long ns2usecs(cycle_t nsec);
606extern int 606extern int
607trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 607trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
608extern int 608extern int
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4d9952d3df50..07a22c33ebf3 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -40,7 +40,7 @@
40 40
41#undef TRACE_FIELD_ZERO_CHAR 41#undef TRACE_FIELD_ZERO_CHAR
42#define TRACE_FIELD_ZERO_CHAR(item) \ 42#define TRACE_FIELD_ZERO_CHAR(item) \
43 ret = trace_seq_printf(s, "\tfield: char " #item ";\t" \ 43 ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \
44 "offset:%u;\tsize:0;\n", \ 44 "offset:%u;\tsize:0;\n", \
45 (unsigned int)offsetof(typeof(field), item)); \ 45 (unsigned int)offsetof(typeof(field), item)); \
46 if (!ret) \ 46 if (!ret) \
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d72b9a63b247..64b54a59c55b 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -423,7 +423,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
423 423
424 trace_find_cmdline(entry->pid, comm); 424 trace_find_cmdline(entry->pid, comm);
425 425
426 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" 426 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
427 " %ld.%03ldms (+%ld.%03ldms): ", comm, 427 " %ld.%03ldms (+%ld.%03ldms): ", comm,
428 entry->pid, iter->cpu, entry->flags, 428 entry->pid, iter->cpu, entry->flags,
429 entry->preempt_count, iter->idx, 429 entry->preempt_count, iter->idx,
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index de35f200abd3..9117cea6f1ae 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -62,6 +62,9 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
62 pc = preempt_count(); 62 pc = preempt_count();
63 tracing_record_cmdline(current); 63 tracing_record_cmdline(current);
64 64
65 if (sched_stopped)
66 return;
67
65 local_irq_save(flags); 68 local_irq_save(flags);
66 cpu = raw_smp_processor_id(); 69 cpu = raw_smp_processor_id();
67 data = ctx_trace->data[cpu]; 70 data = ctx_trace->data[cpu];
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3c5ad6b2ec84..5bc00e8f153e 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -154,7 +154,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
154 if (unlikely(!tracer_enabled || next != wakeup_task)) 154 if (unlikely(!tracer_enabled || next != wakeup_task))
155 goto out_unlock; 155 goto out_unlock;
156 156
157 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 157 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 158 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
159 159
160 /* 160 /*
@@ -257,6 +257,12 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
257 data = wakeup_trace->data[wakeup_cpu]; 257 data = wakeup_trace->data[wakeup_cpu];
258 data->preempt_timestamp = ftrace_now(cpu); 258 data->preempt_timestamp = ftrace_now(cpu);
259 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 259 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
260
261 /*
262 * We must be careful in using CALLER_ADDR2. But since wake_up
263 * is not called by an assembly function (where as schedule is)
264 * it should be safe to use it here.
265 */
260 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 266 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
261 267
262out_locked: 268out_locked:
diff --git a/scripts/trace/power.pl b/scripts/tracing/power.pl
index 4f729b3501e0..4f729b3501e0 100644
--- a/scripts/trace/power.pl
+++ b/scripts/tracing/power.pl