aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 15:20:49 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:54:47 -0400
commit2e0f57618529a2739a5e1570e6c445c9c966b595 (patch)
tree3d1b40eff4aa1b00eb4d630a536f2e89ed3411dc
parent0fd9e0dac9026df09986a4b201518ae015814aef (diff)
ftrace: build fix
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/trace/trace.c111
-rw-r--r--kernel/trace/trace_functions.c2
2 files changed, 67 insertions, 46 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d74c039305ad..71b25b79b3de 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -432,47 +432,6 @@ notrace void tracing_reset(struct trace_array_cpu *data)
432 data->trace_tail_idx = 0; 432 data->trace_tail_idx = 0;
433} 433}
434 434
435#ifdef CONFIG_FTRACE
436static notrace void
437function_trace_call(unsigned long ip, unsigned long parent_ip)
438{
439 struct trace_array *tr = &global_trace;
440 struct trace_array_cpu *data;
441 unsigned long flags;
442 long disabled;
443 int cpu;
444
445 if (unlikely(!tracer_enabled))
446 return;
447
448 local_irq_save(flags);
449 cpu = raw_smp_processor_id();
450 data = tr->data[cpu];
451 disabled = atomic_inc_return(&data->disabled);
452
453 if (likely(disabled == 1))
454 ftrace(tr, data, ip, parent_ip, flags);
455
456 atomic_dec(&data->disabled);
457 local_irq_restore(flags);
458}
459
460static struct ftrace_ops trace_ops __read_mostly =
461{
462 .func = function_trace_call,
463};
464#endif
465
466notrace void tracing_start_function_trace(void)
467{
468 register_ftrace_function(&trace_ops);
469}
470
471notrace void tracing_stop_function_trace(void)
472{
473 unregister_ftrace_function(&trace_ops);
474}
475
476#define SAVED_CMDLINES 128 435#define SAVED_CMDLINES 128
477static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 436static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
478static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 437static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -635,8 +594,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
635} 594}
636 595
637notrace void 596notrace void
638ftrace(struct trace_array *tr, struct trace_array_cpu *data, 597__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
639 unsigned long ip, unsigned long parent_ip, unsigned long flags) 598 unsigned long ip, unsigned long parent_ip, unsigned long flags)
640{ 599{
641 struct trace_entry *entry; 600 struct trace_entry *entry;
642 unsigned long irq_flags; 601 unsigned long irq_flags;
@@ -651,6 +610,14 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
651} 610}
652 611
653notrace void 612notrace void
613ftrace(struct trace_array *tr, struct trace_array_cpu *data,
614 unsigned long ip, unsigned long parent_ip, unsigned long flags)
615{
616 if (likely(!atomic_read(&data->disabled)))
617 __ftrace(tr, data, ip, parent_ip, flags);
618}
619
620notrace void
654trace_special(struct trace_array *tr, struct trace_array_cpu *data, 621trace_special(struct trace_array *tr, struct trace_array_cpu *data,
655 unsigned long arg1, unsigned long arg2, unsigned long arg3) 622 unsigned long arg1, unsigned long arg2, unsigned long arg3)
656{ 623{
@@ -688,6 +655,47 @@ tracing_sched_switch_trace(struct trace_array *tr,
688 spin_unlock_irqrestore(&data->lock, irq_flags); 655 spin_unlock_irqrestore(&data->lock, irq_flags);
689} 656}
690 657
658#ifdef CONFIG_FTRACE
659static notrace void
660function_trace_call(unsigned long ip, unsigned long parent_ip)
661{
662 struct trace_array *tr = &global_trace;
663 struct trace_array_cpu *data;
664 unsigned long flags;
665 long disabled;
666 int cpu;
667
668 if (unlikely(!tracer_enabled))
669 return;
670
671 local_irq_save(flags);
672 cpu = raw_smp_processor_id();
673 data = tr->data[cpu];
674 disabled = atomic_inc_return(&data->disabled);
675
676 if (likely(disabled == 1))
677 __ftrace(tr, data, ip, parent_ip, flags);
678
679 atomic_dec(&data->disabled);
680 local_irq_restore(flags);
681}
682
683static struct ftrace_ops trace_ops __read_mostly =
684{
685 .func = function_trace_call,
686};
687
688notrace void tracing_start_function_trace(void)
689{
690 register_ftrace_function(&trace_ops);
691}
692
693notrace void tracing_stop_function_trace(void)
694{
695 unregister_ftrace_function(&trace_ops);
696}
697#endif
698
691enum trace_file_type { 699enum trace_file_type {
692 TRACE_FILE_LAT_FMT = 1, 700 TRACE_FILE_LAT_FMT = 1,
693}; 701};
@@ -722,7 +730,7 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
722 return &array[iter->next_page_idx[cpu]]; 730 return &array[iter->next_page_idx[cpu]];
723} 731}
724 732
725static struct notrace trace_entry * 733static struct trace_entry * notrace
726find_next_entry(struct trace_iterator *iter, int *ent_cpu) 734find_next_entry(struct trace_iterator *iter, int *ent_cpu)
727{ 735{
728 struct trace_array *tr = iter->tr; 736 struct trace_array *tr = iter->tr;
@@ -1866,6 +1874,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
1866 static cpumask_t mask; 1874 static cpumask_t mask;
1867 static int start; 1875 static int start;
1868 unsigned long flags; 1876 unsigned long flags;
1877 int ftrace_save;
1869 int read = 0; 1878 int read = 0;
1870 int cpu; 1879 int cpu;
1871 int len; 1880 int len;
@@ -1944,6 +1953,9 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
1944 1953
1945 cpus_clear(mask); 1954 cpus_clear(mask);
1946 local_irq_save(flags); 1955 local_irq_save(flags);
1956 ftrace_save = ftrace_enabled;
1957 ftrace_enabled = 0;
1958 smp_wmb();
1947 for_each_possible_cpu(cpu) { 1959 for_each_possible_cpu(cpu) {
1948 data = iter->tr->data[cpu]; 1960 data = iter->tr->data[cpu];
1949 1961
@@ -1951,10 +1963,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
1951 continue; 1963 continue;
1952 1964
1953 atomic_inc(&data->disabled); 1965 atomic_inc(&data->disabled);
1954 spin_lock(&data->lock);
1955 cpu_set(cpu, mask); 1966 cpu_set(cpu, mask);
1956 } 1967 }
1957 1968
1969 for_each_cpu_mask(cpu, mask) {
1970 data = iter->tr->data[cpu];
1971 spin_lock(&data->lock);
1972 }
1973
1958 while (find_next_entry_inc(iter) != NULL) { 1974 while (find_next_entry_inc(iter) != NULL) {
1959 int len = iter->seq.len; 1975 int len = iter->seq.len;
1960 1976
@@ -1974,8 +1990,13 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
1974 for_each_cpu_mask(cpu, mask) { 1990 for_each_cpu_mask(cpu, mask) {
1975 data = iter->tr->data[cpu]; 1991 data = iter->tr->data[cpu];
1976 spin_unlock(&data->lock); 1992 spin_unlock(&data->lock);
1993 }
1994
1995 for_each_cpu_mask(cpu, mask) {
1996 data = iter->tr->data[cpu];
1977 atomic_dec(&data->disabled); 1997 atomic_dec(&data->disabled);
1978 } 1998 }
1999 ftrace_enabled = ftrace_save;
1979 local_irq_restore(flags); 2000 local_irq_restore(flags);
1980 2001
1981 /* Now copy what we have to the user */ 2002 /* Now copy what we have to the user */
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e5d34b78fc99..69a0eb00a0a5 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -9,10 +9,10 @@
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 William Lee Irwin III
11 */ 11 */
12#include <linux/fs.h>
13#include <linux/debugfs.h> 12#include <linux/debugfs.h>
14#include <linux/uaccess.h> 13#include <linux/uaccess.h>
15#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/fs.h>
16 16
17#include "trace.h" 17#include "trace.h"
18 18