aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
committerTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
commitc43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch)
tree3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /kernel/trace
parent1a8dd307cc0a2119be4e578c517795464e6dabba (diff)
parent746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff)
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c56
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h7
-rw-r--r--kernel/trace/trace_events.c28
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_printk.c26
-rw-r--r--kernel/trace/trace_stat.c6
8 files changed, 86 insertions, 74 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55fb4c3..f3716bf04df6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292 292
293 again: 293 again:
294 rec++; 294 if (idx != 0)
295 rec++;
296
295 if ((void *)rec >= (void *)&pg->records[pg->index]) { 297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
296 pg = pg->next; 298 pg = pg->next;
297 if (!pg) 299 if (!pg)
@@ -1417,10 +1419,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
1417{ 1419{
1418 struct ftrace_iterator *iter = m->private; 1420 struct ftrace_iterator *iter = m->private;
1419 void *p = NULL; 1421 void *p = NULL;
1422 loff_t l;
1423
1424 if (!(iter->flags & FTRACE_ITER_HASH))
1425 *pos = 0;
1420 1426
1421 iter->flags |= FTRACE_ITER_HASH; 1427 iter->flags |= FTRACE_ITER_HASH;
1422 1428
1423 return t_hash_next(m, p, pos); 1429 iter->hidx = 0;
1430 for (l = 0; l <= *pos; ) {
1431 p = t_hash_next(m, p, &l);
1432 if (!p)
1433 break;
1434 }
1435 return p;
1424} 1436}
1425 1437
1426static int t_hash_show(struct seq_file *m, void *v) 1438static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1479,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1467 iter->pg = iter->pg->next; 1479 iter->pg = iter->pg->next;
1468 iter->idx = 0; 1480 iter->idx = 0;
1469 goto retry; 1481 goto retry;
1470 } else {
1471 iter->idx = -1;
1472 } 1482 }
1473 } else { 1483 } else {
1474 rec = &iter->pg->records[iter->idx++]; 1484 rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1507,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1497{ 1507{
1498 struct ftrace_iterator *iter = m->private; 1508 struct ftrace_iterator *iter = m->private;
1499 void *p = NULL; 1509 void *p = NULL;
1510 loff_t l;
1500 1511
1501 mutex_lock(&ftrace_lock); 1512 mutex_lock(&ftrace_lock);
1502 /* 1513 /*
@@ -1508,23 +1519,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1508 if (*pos > 0) 1519 if (*pos > 0)
1509 return t_hash_start(m, pos); 1520 return t_hash_start(m, pos);
1510 iter->flags |= FTRACE_ITER_PRINTALL; 1521 iter->flags |= FTRACE_ITER_PRINTALL;
1511 (*pos)++;
1512 return iter; 1522 return iter;
1513 } 1523 }
1514 1524
1515 if (iter->flags & FTRACE_ITER_HASH) 1525 if (iter->flags & FTRACE_ITER_HASH)
1516 return t_hash_start(m, pos); 1526 return t_hash_start(m, pos);
1517 1527
1518 if (*pos > 0) { 1528 iter->pg = ftrace_pages_start;
1519 if (iter->idx < 0) 1529 iter->idx = 0;
1520 return p; 1530 for (l = 0; l <= *pos; ) {
1521 (*pos)--; 1531 p = t_next(m, p, &l);
1522 iter->idx--; 1532 if (!p)
1533 break;
1523 } 1534 }
1524 1535
1525 p = t_next(m, p, pos); 1536 if (!p && iter->flags & FTRACE_ITER_FILTER)
1526
1527 if (!p)
1528 return t_hash_start(m, pos); 1537 return t_hash_start(m, pos);
1529 1538
1530 return p; 1539 return p;
@@ -2500,32 +2509,31 @@ int ftrace_graph_count;
2500unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 2509unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2501 2510
2502static void * 2511static void *
2503g_next(struct seq_file *m, void *v, loff_t *pos) 2512__g_next(struct seq_file *m, loff_t *pos)
2504{ 2513{
2505 unsigned long *array = m->private; 2514 unsigned long *array = m->private;
2506 int index = *pos;
2507 2515
2508 (*pos)++; 2516 if (*pos >= ftrace_graph_count)
2509
2510 if (index >= ftrace_graph_count)
2511 return NULL; 2517 return NULL;
2518 return &array[*pos];
2519}
2512 2520
2513 return &array[index]; 2521static void *
2522g_next(struct seq_file *m, void *v, loff_t *pos)
2523{
2524 (*pos)++;
2525 return __g_next(m, pos);
2514} 2526}
2515 2527
2516static void *g_start(struct seq_file *m, loff_t *pos) 2528static void *g_start(struct seq_file *m, loff_t *pos)
2517{ 2529{
2518 void *p = NULL;
2519
2520 mutex_lock(&graph_lock); 2530 mutex_lock(&graph_lock);
2521 2531
2522 /* Nothing, tell g_show to print all functions are enabled */ 2532 /* Nothing, tell g_show to print all functions are enabled */
2523 if (!ftrace_graph_count && !*pos) 2533 if (!ftrace_graph_count && !*pos)
2524 return (void *)1; 2534 return (void *)1;
2525 2535
2526 p = g_next(m, p, pos); 2536 return __g_next(m, pos);
2527
2528 return p;
2529} 2537}
2530 2538
2531static void g_stop(struct seq_file *m, void *p) 2539static void g_stop(struct seq_file *m, void *p)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac2638258..bf27bb7a63e2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1563 return NULL; 1563 return NULL;
1564} 1564}
1565 1565
1566#ifdef CONFIG_TRACING
1567
1566#define TRACE_RECURSIVE_DEPTH 16 1568#define TRACE_RECURSIVE_DEPTH 16
1567 1569
1568static int trace_recursive_lock(void) 1570static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
1593 current->trace_recursion--; 1595 current->trace_recursion--;
1594} 1596}
1595 1597
1598#else
1599
1600#define trace_recursive_lock() (0)
1601#define trace_recursive_unlock() do { } while (0)
1602
1603#endif
1604
1596static DEFINE_PER_CPU(int, rb_need_resched); 1605static DEFINE_PER_CPU(int, rb_need_resched);
1597 1606
1598/** 1607/**
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3104} 3113}
3105EXPORT_SYMBOL_GPL(ring_buffer_read_page); 3114EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3106 3115
3116#ifdef CONFIG_TRACING
3107static ssize_t 3117static ssize_t
3108rb_simple_read(struct file *filp, char __user *ubuf, 3118rb_simple_read(struct file *filp, char __user *ubuf,
3109 size_t cnt, loff_t *ppos) 3119 size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
3171} 3181}
3172 3182
3173fs_initcall(rb_init_debugfs); 3183fs_initcall(rb_init_debugfs);
3184#endif
3174 3185
3175#ifdef CONFIG_HOTPLUG_CPU 3186#ifdef CONFIG_HOTPLUG_CPU
3176static int rb_cpu_notify(struct notifier_block *self, 3187static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f0ee48..3aa0a0dfdfa8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -284,13 +284,12 @@ void trace_wake_up(void)
284static int __init set_buf_size(char *str) 284static int __init set_buf_size(char *str)
285{ 285{
286 unsigned long buf_size; 286 unsigned long buf_size;
287 int ret;
288 287
289 if (!str) 288 if (!str)
290 return 0; 289 return 0;
291 ret = strict_strtoul(str, 0, &buf_size); 290 buf_size = memparse(str, &str);
292 /* nr_entries can not be zero */ 291 /* nr_entries can not be zero */
293 if (ret < 0 || buf_size == 0) 292 if (buf_size == 0)
294 return 0; 293 return 0;
295 trace_buf_size = buf_size; 294 trace_buf_size = buf_size;
296 return 1; 295 return 1;
@@ -2053,25 +2052,23 @@ static int tracing_open(struct inode *inode, struct file *file)
2053static void * 2052static void *
2054t_next(struct seq_file *m, void *v, loff_t *pos) 2053t_next(struct seq_file *m, void *v, loff_t *pos)
2055{ 2054{
2056 struct tracer *t = m->private; 2055 struct tracer *t = v;
2057 2056
2058 (*pos)++; 2057 (*pos)++;
2059 2058
2060 if (t) 2059 if (t)
2061 t = t->next; 2060 t = t->next;
2062 2061
2063 m->private = t;
2064
2065 return t; 2062 return t;
2066} 2063}
2067 2064
2068static void *t_start(struct seq_file *m, loff_t *pos) 2065static void *t_start(struct seq_file *m, loff_t *pos)
2069{ 2066{
2070 struct tracer *t = m->private; 2067 struct tracer *t;
2071 loff_t l = 0; 2068 loff_t l = 0;
2072 2069
2073 mutex_lock(&trace_types_lock); 2070 mutex_lock(&trace_types_lock);
2074 for (; t && l < *pos; t = t_next(m, t, &l)) 2071 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2075 ; 2072 ;
2076 2073
2077 return t; 2074 return t;
@@ -2107,18 +2104,10 @@ static struct seq_operations show_traces_seq_ops = {
2107 2104
2108static int show_traces_open(struct inode *inode, struct file *file) 2105static int show_traces_open(struct inode *inode, struct file *file)
2109{ 2106{
2110 int ret;
2111
2112 if (tracing_disabled) 2107 if (tracing_disabled)
2113 return -ENODEV; 2108 return -ENODEV;
2114 2109
2115 ret = seq_open(file, &show_traces_seq_ops); 2110 return seq_open(file, &show_traces_seq_ops);
2116 if (!ret) {
2117 struct seq_file *m = file->private_data;
2118 m->private = trace_types;
2119 }
2120
2121 return ret;
2122} 2111}
2123 2112
2124static ssize_t 2113static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f8..3548ae5cc780 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter)
597 597
598extern struct pid *ftrace_pid_trace; 598extern struct pid *ftrace_pid_trace;
599 599
600#ifdef CONFIG_FUNCTION_TRACER
600static inline int ftrace_trace_task(struct task_struct *task) 601static inline int ftrace_trace_task(struct task_struct *task)
601{ 602{
602 if (!ftrace_pid_trace) 603 if (!ftrace_pid_trace)
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
604 605
605 return test_tsk_trace_trace(task); 606 return test_tsk_trace_trace(task);
606} 607}
608#else
609static inline int ftrace_trace_task(struct task_struct *task)
610{
611 return 1;
612}
613#endif
607 614
608/* 615/*
609 * trace_iterator_flags is an enumeration that defines bit 616 * trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 54b1de5074b6..8c193c2eb68a 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
300 300
301static void *t_start(struct seq_file *m, loff_t *pos) 301static void *t_start(struct seq_file *m, loff_t *pos)
302{ 302{
303 struct ftrace_event_call *call = NULL;
304 loff_t l;
305
303 mutex_lock(&event_mutex); 306 mutex_lock(&event_mutex);
304 if (*pos == 0) 307
305 m->private = ftrace_events.next; 308 m->private = ftrace_events.next;
306 return t_next(m, NULL, pos); 309 for (l = 0; l <= *pos; ) {
310 call = t_next(m, NULL, &l);
311 if (!call)
312 break;
313 }
314 return call;
307} 315}
308 316
309static void * 317static void *
@@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
332 340
333static void *s_start(struct seq_file *m, loff_t *pos) 341static void *s_start(struct seq_file *m, loff_t *pos)
334{ 342{
343 struct ftrace_event_call *call = NULL;
344 loff_t l;
345
335 mutex_lock(&event_mutex); 346 mutex_lock(&event_mutex);
336 if (*pos == 0) 347
337 m->private = ftrace_events.next; 348 m->private = ftrace_events.next;
338 return s_next(m, NULL, pos); 349 for (l = 0; l <= *pos; ) {
350 call = s_next(m, NULL, &l);
351 if (!call)
352 break;
353 }
354 return call;
339} 355}
340 356
341static int t_show(struct seq_file *m, void *v) 357static int t_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f134764837..7402144bff21 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
302 if (count == -1) 302 if (count == -1)
303 seq_printf(m, ":unlimited\n"); 303 seq_printf(m, ":unlimited\n");
304 else 304 else
305 seq_printf(m, ":count=%ld", count); 305 seq_printf(m, ":count=%ld\n", count);
306 seq_putc(m, '\n');
307 306
308 return 0; 307 return 0;
309} 308}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece9687b62..7b6278110827 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
155EXPORT_SYMBOL_GPL(__ftrace_vprintk); 155EXPORT_SYMBOL_GPL(__ftrace_vprintk);
156 156
157static void * 157static void *
158t_next(struct seq_file *m, void *v, loff_t *pos) 158t_start(struct seq_file *m, loff_t *pos)
159{ 159{
160 const char **fmt = m->private; 160 const char **fmt = __start___trace_bprintk_fmt + *pos;
161 const char **next = fmt;
162
163 (*pos)++;
164 161
165 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) 162 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
166 return NULL; 163 return NULL;
167
168 next = fmt;
169 m->private = ++next;
170
171 return fmt; 164 return fmt;
172} 165}
173 166
174static void *t_start(struct seq_file *m, loff_t *pos) 167static void *t_next(struct seq_file *m, void * v, loff_t *pos)
175{ 168{
176 return t_next(m, NULL, pos); 169 (*pos)++;
170 return t_start(m, pos);
177} 171}
178 172
179static int t_show(struct seq_file *m, void *v) 173static int t_show(struct seq_file *m, void *v)
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = {
224static int 218static int
225ftrace_formats_open(struct inode *inode, struct file *file) 219ftrace_formats_open(struct inode *inode, struct file *file)
226{ 220{
227 int ret; 221 return seq_open(file, &show_format_seq_ops);
228
229 ret = seq_open(file, &show_format_seq_ops);
230 if (!ret) {
231 struct seq_file *m = file->private_data;
232
233 m->private = __start___trace_bprintk_fmt;
234 }
235 return ret;
236} 222}
237 223
238static const struct file_operations ftrace_formats_fops = { 224static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c00643733f4c..e66f5e493342 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -199,17 +199,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
199 mutex_lock(&session->stat_mutex); 199 mutex_lock(&session->stat_mutex);
200 200
201 /* If we are in the beginning of the file, print the headers */ 201 /* If we are in the beginning of the file, print the headers */
202 if (!*pos && session->ts->stat_headers) { 202 if (!*pos && session->ts->stat_headers)
203 (*pos)++;
204 return SEQ_START_TOKEN; 203 return SEQ_START_TOKEN;
205 }
206 204
207 node = rb_first(&session->stat_root); 205 node = rb_first(&session->stat_root);
208 for (i = 0; node && i < *pos; i++) 206 for (i = 0; node && i < *pos; i++)
209 node = rb_next(node); 207 node = rb_next(node);
210 208
211 (*pos)++;
212
213 return node; 209 return node;
214} 210}
215 211