aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-03-12 21:12:46 -0400
committerSteven Rostedt <srostedt@redhat.com>2009-03-12 21:12:46 -0400
commit51b643b404827d8fde60d7953773a42d46ca87e0 (patch)
treeae71fcbd8b3640b68fc11e1dca620a8ee96bac5f /kernel/trace
parent554f786e284a6ce859d51f62240d615603944c8e (diff)
parent480c93df5b99699390f93a7024c9f60d09da0e96 (diff)
Merge branch 'tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/ftrace-merge
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/blktrace.c2
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_workqueue.c19
5 files changed, 29 insertions, 23 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bec69d3678c1..1f32e4edf490 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -423,7 +423,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
423 if (!bt->sequence) 423 if (!bt->sequence)
424 goto err; 424 goto err;
425 425
426 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); 426 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
427 if (!bt->msg_data) 427 if (!bt->msg_data)
428 goto err; 428 goto err;
429 429
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 14c98f6a47bc..c3946a6df34e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1478,11 +1478,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1478 total = entries + 1478 total = entries +
1479 ring_buffer_overruns(iter->tr->buffer); 1479 ring_buffer_overruns(iter->tr->buffer);
1480 1480
1481 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1481 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1482 name, UTS_RELEASE); 1482 name, UTS_RELEASE);
1483 seq_puts(m, "-----------------------------------" 1483 seq_puts(m, "# -----------------------------------"
1484 "---------------------------------\n"); 1484 "---------------------------------\n");
1485 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" 1485 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1486 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 1486 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1487 nsecs_to_usecs(data->saved_latency), 1487 nsecs_to_usecs(data->saved_latency),
1488 entries, 1488 entries,
@@ -1504,24 +1504,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1504#else 1504#else
1505 seq_puts(m, ")\n"); 1505 seq_puts(m, ")\n");
1506#endif 1506#endif
1507 seq_puts(m, " -----------------\n"); 1507 seq_puts(m, "# -----------------\n");
1508 seq_printf(m, " | task: %.16s-%d " 1508 seq_printf(m, "# | task: %.16s-%d "
1509 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 1509 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1510 data->comm, data->pid, data->uid, data->nice, 1510 data->comm, data->pid, data->uid, data->nice,
1511 data->policy, data->rt_priority); 1511 data->policy, data->rt_priority);
1512 seq_puts(m, " -----------------\n"); 1512 seq_puts(m, "# -----------------\n");
1513 1513
1514 if (data->critical_start) { 1514 if (data->critical_start) {
1515 seq_puts(m, " => started at: "); 1515 seq_puts(m, "# => started at: ");
1516 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 1516 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1517 trace_print_seq(m, &iter->seq); 1517 trace_print_seq(m, &iter->seq);
1518 seq_puts(m, "\n => ended at: "); 1518 seq_puts(m, "\n# => ended at: ");
1519 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1519 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1520 trace_print_seq(m, &iter->seq); 1520 trace_print_seq(m, &iter->seq);
1521 seq_puts(m, "\n"); 1521 seq_puts(m, "#\n");
1522 } 1522 }
1523 1523
1524 seq_puts(m, "\n"); 1524 seq_puts(m, "#\n");
1525} 1525}
1526 1526
1527static void test_cpu_buff_start(struct trace_iterator *iter) 1527static void test_cpu_buff_start(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 2d4953f93560..05b176abfd30 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -27,12 +27,19 @@
27 */ 27 */
28u64 notrace trace_clock_local(void) 28u64 notrace trace_clock_local(void)
29{ 29{
30 unsigned long flags;
31 u64 clock;
32
30 /* 33 /*
31 * sched_clock() is an architecture implemented, fast, scalable, 34 * sched_clock() is an architecture implemented, fast, scalable,
32 * lockless clock. It is not guaranteed to be coherent across 35 * lockless clock. It is not guaranteed to be coherent across
33 * CPUs, nor across CPU idle events. 36 * CPUs, nor across CPU idle events.
34 */ 37 */
35 return sched_clock(); 38 raw_local_irq_save(flags);
39 clock = sched_clock();
40 raw_local_irq_restore(flags);
41
42 return clock;
36} 43}
37 44
38/* 45/*
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d1493b853e41..8566c14b3e9a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -837,7 +837,7 @@ static void graph_trace_open(struct trace_iterator *iter)
837 837
838static void graph_trace_close(struct trace_iterator *iter) 838static void graph_trace_close(struct trace_iterator *iter)
839{ 839{
840 percpu_free(iter->private); 840 free_percpu(iter->private);
841} 841}
842 842
843static struct tracer graph_trace __read_mostly = { 843static struct tracer graph_trace __read_mostly = {
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index e542483df623..fb5ccac8bbc0 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
91 struct cpu_workqueue_stats *cws; 91 struct cpu_workqueue_stats *cws;
92 unsigned long flags; 92 unsigned long flags;
93 93
94 WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); 94 WARN_ON(cpu < 0);
95 95
96 /* Workqueues are sometimes created in atomic context */ 96 /* Workqueues are sometimes created in atomic context */
97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); 97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
@@ -99,8 +99,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
99 pr_warning("trace_workqueue: not enough memory\n"); 99 pr_warning("trace_workqueue: not enough memory\n");
100 return; 100 return;
101 } 101 }
102 tracing_record_cmdline(wq_thread);
103
104 INIT_LIST_HEAD(&cws->list); 102 INIT_LIST_HEAD(&cws->list);
105 cws->cpu = cpu; 103 cws->cpu = cpu;
106 104
@@ -177,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx)
177 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
178 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { 176 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
180 for (++cpu ; cpu < num_possible_cpus(); cpu++) { 178 do {
181 ret = workqueue_stat_start_cpu(cpu); 179 cpu = cpumask_next(cpu, cpu_possible_mask);
182 if (ret) 180 if (cpu >= nr_cpu_ids)
183 return ret; 181 return NULL;
184 } 182 } while (!(ret = workqueue_stat_start_cpu(cpu)));
185 return NULL; 183 return ret;
186 } 184 }
187 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
188 186
@@ -195,11 +193,12 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
195 struct cpu_workqueue_stats *cws = p; 193 struct cpu_workqueue_stats *cws = p;
196 unsigned long flags; 194 unsigned long flags;
197 int cpu = cws->cpu; 195 int cpu = cws->cpu;
196 struct task_struct *tsk = find_task_by_vpid(cws->pid);
198 197
199 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, 198 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
200 atomic_read(&cws->inserted), 199 atomic_read(&cws->inserted),
201 cws->executed, 200 cws->executed,
202 trace_find_cmdline(cws->pid)); 201 tsk ? tsk->comm : "<...>");
203 202
204 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 203 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
205 if (&cws->list == workqueue_cpu_stat(cpu)->list.next) 204 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)