aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ftrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 07:34:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 07:34:42 -0400
commit2e8844e13ab73f1107aea4317a53ff5879f2e1d7 (patch)
tree36165371cf6fd26d674610f1c6bb5fac50e6e13f /arch/x86/kernel/ftrace.c
parentc78a3956b982418186e40978a51636a2b43221bc (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into tracing/hw-branch-tracing
Merge reason: update to latest tracing and ptrace APIs Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/ftrace.c')
-rw-r--r--arch/x86/kernel/ftrace.c132
1 files changed, 104 insertions, 28 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index a85da1764b1c..61df77532120 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -79,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
79 * 79 *
80 * 1) Put the instruction pointer into the IP buffer 80 * 1) Put the instruction pointer into the IP buffer
81 * and the new code into the "code" buffer. 81 * and the new code into the "code" buffer.
82 * 2) Set a flag that says we are modifying code 82 * 2) Wait for any running NMIs to finish and set a flag that says
83 * 3) Wait for any running NMIs to finish. 83 * we are modifying code, it is done in an atomic operation.
84 * 4) Write the code 84 * 3) Write the code
85 * 5) clear the flag. 85 * 4) clear the flag.
86 * 6) Wait for any running NMIs to finish. 86 * 5) Wait for any running NMIs to finish.
87 * 87 *
88 * If an NMI is executed, the first thing it does is to call 88 * If an NMI is executed, the first thing it does is to call
89 * "ftrace_nmi_enter". This will check if the flag is set to write 89 * "ftrace_nmi_enter". This will check if the flag is set to write
@@ -95,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
95 * are the same as what exists. 95 * are the same as what exists.
96 */ 96 */
97 97
98#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
98static atomic_t nmi_running = ATOMIC_INIT(0); 99static atomic_t nmi_running = ATOMIC_INIT(0);
99static int mod_code_status; /* holds return value of text write */ 100static int mod_code_status; /* holds return value of text write */
100static int mod_code_write; /* set when NMI should do the write */
101static void *mod_code_ip; /* holds the IP to write to */ 101static void *mod_code_ip; /* holds the IP to write to */
102static void *mod_code_newcode; /* holds the text to write to the IP */ 102static void *mod_code_newcode; /* holds the text to write to the IP */
103 103
@@ -114,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
114 return r; 114 return r;
115} 115}
116 116
117static void clear_mod_flag(void)
118{
119 int old = atomic_read(&nmi_running);
120
121 for (;;) {
122 int new = old & ~MOD_CODE_WRITE_FLAG;
123
124 if (old == new)
125 break;
126
127 old = atomic_cmpxchg(&nmi_running, old, new);
128 }
129}
130
117static void ftrace_mod_code(void) 131static void ftrace_mod_code(void)
118{ 132{
119 /* 133 /*
@@ -127,27 +141,39 @@ static void ftrace_mod_code(void)
127 141
128 /* if we fail, then kill any new writers */ 142 /* if we fail, then kill any new writers */
129 if (mod_code_status) 143 if (mod_code_status)
130 mod_code_write = 0; 144 clear_mod_flag();
131} 145}
132 146
133void ftrace_nmi_enter(void) 147void ftrace_nmi_enter(void)
134{ 148{
135 atomic_inc(&nmi_running); 149 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
136 /* Must have nmi_running seen before reading write flag */ 150 smp_rmb();
137 smp_mb();
138 if (mod_code_write) {
139 ftrace_mod_code(); 151 ftrace_mod_code();
140 atomic_inc(&nmi_update_count); 152 atomic_inc(&nmi_update_count);
141 } 153 }
154 /* Must have previous changes seen before executions */
155 smp_mb();
142} 156}
143 157
144void ftrace_nmi_exit(void) 158void ftrace_nmi_exit(void)
145{ 159{
146 /* Finish all executions before clearing nmi_running */ 160 /* Finish all executions before clearing nmi_running */
147 smp_wmb(); 161 smp_mb();
148 atomic_dec(&nmi_running); 162 atomic_dec(&nmi_running);
149} 163}
150 164
165static void wait_for_nmi_and_set_mod_flag(void)
166{
167 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
168 return;
169
170 do {
171 cpu_relax();
172 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
173
174 nmi_wait_count++;
175}
176
151static void wait_for_nmi(void) 177static void wait_for_nmi(void)
152{ 178{
153 if (!atomic_read(&nmi_running)) 179 if (!atomic_read(&nmi_running))
@@ -167,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
167 mod_code_newcode = new_code; 193 mod_code_newcode = new_code;
168 194
169 /* The buffers need to be visible before we let NMIs write them */ 195 /* The buffers need to be visible before we let NMIs write them */
170 smp_wmb();
171
172 mod_code_write = 1;
173
174 /* Make sure write bit is visible before we wait on NMIs */
175 smp_mb(); 196 smp_mb();
176 197
177 wait_for_nmi(); 198 wait_for_nmi_and_set_mod_flag();
178 199
179 /* Make sure all running NMIs have finished before we write the code */ 200 /* Make sure all running NMIs have finished before we write the code */
180 smp_mb(); 201 smp_mb();
@@ -182,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
182 ftrace_mod_code(); 203 ftrace_mod_code();
183 204
184 /* Make sure the write happens before clearing the bit */ 205 /* Make sure the write happens before clearing the bit */
185 smp_wmb();
186
187 mod_code_write = 0;
188
189 /* make sure NMIs see the cleared bit */
190 smp_mb(); 206 smp_mb();
191 207
208 clear_mod_flag();
192 wait_for_nmi(); 209 wait_for_nmi();
193 210
194 return mod_code_status; 211 return mod_code_status;
@@ -393,7 +410,6 @@ int ftrace_disable_ftrace_graph_caller(void)
393void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 410void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
394{ 411{
395 unsigned long old; 412 unsigned long old;
396 unsigned long long calltime;
397 int faulted; 413 int faulted;
398 struct ftrace_graph_ent trace; 414 struct ftrace_graph_ent trace;
399 unsigned long return_hooker = (unsigned long) 415 unsigned long return_hooker = (unsigned long)
@@ -436,10 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
436 return; 452 return;
437 } 453 }
438 454
439 calltime = trace_clock_local(); 455 if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
440
441 if (ftrace_push_return_trace(old, calltime,
442 self_addr, &trace.depth) == -EBUSY) {
443 *parent = old; 456 *parent = old;
444 return; 457 return;
445 } 458 }
@@ -453,3 +466,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
453 } 466 }
454} 467}
455#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 468#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
469
470#ifdef CONFIG_FTRACE_SYSCALLS
471
472extern unsigned long __start_syscalls_metadata[];
473extern unsigned long __stop_syscalls_metadata[];
474extern unsigned long *sys_call_table;
475
476static struct syscall_metadata **syscalls_metadata;
477
478static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
479{
480 struct syscall_metadata *start;
481 struct syscall_metadata *stop;
482 char str[KSYM_SYMBOL_LEN];
483
484
485 start = (struct syscall_metadata *)__start_syscalls_metadata;
486 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
487 kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
488
489 for ( ; start < stop; start++) {
490 if (start->name && !strcmp(start->name, str))
491 return start;
492 }
493 return NULL;
494}
495
496struct syscall_metadata *syscall_nr_to_meta(int nr)
497{
498 if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
499 return NULL;
500
501 return syscalls_metadata[nr];
502}
503
504void arch_init_ftrace_syscalls(void)
505{
506 int i;
507 struct syscall_metadata *meta;
508 unsigned long **psys_syscall_table = &sys_call_table;
509 static atomic_t refs;
510
511 if (atomic_inc_return(&refs) != 1)
512 goto end;
513
514 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
515 FTRACE_SYSCALL_MAX, GFP_KERNEL);
516 if (!syscalls_metadata) {
517 WARN_ON(1);
518 return;
519 }
520
521 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
522 meta = find_syscall_meta(psys_syscall_table[i]);
523 syscalls_metadata[i] = meta;
524 }
525 return;
526
527 /* Paranoid: avoid overflow */
528end:
529 atomic_dec(&refs);
530}
531#endif