aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c1541
1 files changed, 964 insertions, 577 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f6e3af31b403..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -25,17 +25,35 @@
25#include <linux/ftrace.h> 25#include <linux/ftrace.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <linux/hash.h>
29#include <linux/list.h> 28#include <linux/list.h>
30 29
31#include <asm/ftrace.h> 30#include <asm/ftrace.h>
32 31
33#include "trace.h" 32#include "trace.h"
34 33
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
35/* ftrace_enabled is a method to turn ftrace on or off */ 46/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
37static int last_ftrace_enabled; 48static int last_ftrace_enabled;
38 49
50/* set when tracing only a pid */
51struct pid *ftrace_pid_trace;
52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
54/* Quick disabling of function tracer. */
55int function_trace_stop;
56
39/* 57/*
40 * ftrace_disabled is set when an anomaly is discovered. 58 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled. 59 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -44,6 +62,7 @@ static int ftrace_disabled __read_mostly;
44 62
45static DEFINE_SPINLOCK(ftrace_lock); 63static DEFINE_SPINLOCK(ftrace_lock);
46static DEFINE_MUTEX(ftrace_sysctl_lock); 64static DEFINE_MUTEX(ftrace_sysctl_lock);
65static DEFINE_MUTEX(ftrace_start_lock);
47 66
48static struct ftrace_ops ftrace_list_end __read_mostly = 67static struct ftrace_ops ftrace_list_end __read_mostly =
49{ 68{
@@ -52,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
52 71
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
55 76
56static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57{ 78{
@@ -68,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68 }; 89 };
69} 90}
70 91
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
94 if (!test_tsk_trace_trace(current))
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
71/** 107/**
72 * clear_ftrace_function - reset the ftrace function 108 * clear_ftrace_function - reset the ftrace function
73 * 109 *
@@ -77,11 +113,27 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
77void clear_ftrace_function(void) 113void clear_ftrace_function(void)
78{ 114{
79 ftrace_trace_function = ftrace_stub; 115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
118}
119
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
80} 131}
132#endif
81 133
82static int __register_ftrace_function(struct ftrace_ops *ops) 134static int __register_ftrace_function(struct ftrace_ops *ops)
83{ 135{
84 /* Should never be called by interrupts */ 136 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock); 137 spin_lock(&ftrace_lock);
86 138
87 ops->next = ftrace_list; 139 ops->next = ftrace_list;
@@ -95,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
95 ftrace_list = ops; 147 ftrace_list = ops;
96 148
97 if (ftrace_enabled) { 149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
98 /* 162 /*
99 * For one func, simply call it directly. 163 * For one func, simply call it directly.
100 * For more than one func, call the chain. 164 * For more than one func, call the chain.
101 */ 165 */
102 if (ops->next == &ftrace_list_end) 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
103 ftrace_trace_function = ops->func; 167 ftrace_trace_function = func;
104 else 168#else
105 ftrace_trace_function = ftrace_list_func; 169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
106 } 172 }
107 173
108 spin_unlock(&ftrace_lock); 174 spin_unlock(&ftrace_lock);
@@ -115,6 +181,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p; 181 struct ftrace_ops **p;
116 int ret = 0; 182 int ret = 0;
117 183
184 /* should not be called from interrupt context */
118 spin_lock(&ftrace_lock); 185 spin_lock(&ftrace_lock);
119 186
120 /* 187 /*
@@ -140,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
140 207
141 if (ftrace_enabled) { 208 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */ 209 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end || 210 if (ftrace_list->next == &ftrace_list_end) {
144 ftrace_list->next == &ftrace_list_end) 211 ftrace_func_t func = ftrace_list->func;
145 ftrace_trace_function = ftrace_list->func; 212
213 if (ftrace_pid_trace) {
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
146 } 223 }
147 224
148 out: 225 out:
@@ -151,9 +228,48 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
151 return ret; 228 return ret;
152} 229}
153 230
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
243 if (ftrace_pid_trace) {
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
154#ifdef CONFIG_DYNAMIC_FTRACE 261#ifdef CONFIG_DYNAMIC_FTRACE
262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
263# error Dynamic ftrace depends on MCOUNT_RECORD
264#endif
155 265
156static struct task_struct *ftraced_task; 266/*
267 * Since MCOUNT_ADDR may point to mcount itself, we do not want
268 * to get it confused by reading a reference in the code as we
269 * are parsing on objcopy output of text. Use a variable for
270 * it instead.
271 */
272static unsigned long mcount_addr = MCOUNT_ADDR;
157 273
158enum { 274enum {
159 FTRACE_ENABLE_CALLS = (1 << 0), 275 FTRACE_ENABLE_CALLS = (1 << 0),
@@ -161,18 +277,14 @@ enum {
161 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 277 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
162 FTRACE_ENABLE_MCOUNT = (1 << 3), 278 FTRACE_ENABLE_MCOUNT = (1 << 3),
163 FTRACE_DISABLE_MCOUNT = (1 << 4), 279 FTRACE_DISABLE_MCOUNT = (1 << 4),
280 FTRACE_START_FUNC_RET = (1 << 5),
281 FTRACE_STOP_FUNC_RET = (1 << 6),
164}; 282};
165 283
166static int ftrace_filtered; 284static int ftrace_filtered;
167static int tracing_on;
168static int frozen_record_count;
169
170static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171 285
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 286static LIST_HEAD(ftrace_new_addrs);
173 287
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock);
176static DEFINE_MUTEX(ftrace_regex_lock); 288static DEFINE_MUTEX(ftrace_regex_lock);
177 289
178struct ftrace_page { 290struct ftrace_page {
@@ -190,16 +302,13 @@ struct ftrace_page {
190static struct ftrace_page *ftrace_pages_start; 302static struct ftrace_page *ftrace_pages_start;
191static struct ftrace_page *ftrace_pages; 303static struct ftrace_page *ftrace_pages;
192 304
193static int ftraced_trigger;
194static int ftraced_suspend;
195static int ftraced_stop;
196
197static int ftrace_record_suspend;
198
199static struct dyn_ftrace *ftrace_free_records; 305static struct dyn_ftrace *ftrace_free_records;
200 306
201 307
202#ifdef CONFIG_KPROBES 308#ifdef CONFIG_KPROBES
309
310static int frozen_record_count;
311
203static inline void freeze_record(struct dyn_ftrace *rec) 312static inline void freeze_record(struct dyn_ftrace *rec)
204{ 313{
205 if (!(rec->flags & FTRACE_FL_FROZEN)) { 314 if (!(rec->flags & FTRACE_FL_FROZEN)) {
@@ -226,79 +335,36 @@ static inline int record_frozen(struct dyn_ftrace *rec)
226# define record_frozen(rec) ({ 0; }) 335# define record_frozen(rec) ({ 0; })
227#endif /* CONFIG_KPROBES */ 336#endif /* CONFIG_KPROBES */
228 337
229int skip_trace(unsigned long ip) 338static void ftrace_free_rec(struct dyn_ftrace *rec)
230{ 339{
231 unsigned long fl; 340 rec->ip = (unsigned long)ftrace_free_records;
232 struct dyn_ftrace *rec; 341 ftrace_free_records = rec;
233 struct hlist_node *t; 342 rec->flags |= FTRACE_FL_FREE;
234 struct hlist_head *head;
235
236 if (frozen_record_count == 0)
237 return 0;
238
239 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240 hlist_for_each_entry_rcu(rec, t, head, node) {
241 if (rec->ip == ip) {
242 if (record_frozen(rec)) {
243 if (rec->flags & FTRACE_FL_FAILED)
244 return 1;
245
246 if (!(rec->flags & FTRACE_FL_CONVERTED))
247 return 1;
248
249 if (!tracing_on || !ftrace_enabled)
250 return 1;
251
252 if (ftrace_filtered) {
253 fl = rec->flags & (FTRACE_FL_FILTER |
254 FTRACE_FL_NOTRACE);
255 if (!fl || (fl & FTRACE_FL_NOTRACE))
256 return 1;
257 }
258 }
259 break;
260 }
261 }
262
263 return 0;
264} 343}
265 344
266static inline int 345void ftrace_release(void *start, unsigned long size)
267ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268{ 346{
269 struct dyn_ftrace *p; 347 struct dyn_ftrace *rec;
270 struct hlist_node *t; 348 struct ftrace_page *pg;
271 int found = 0; 349 unsigned long s = (unsigned long)start;
272 350 unsigned long e = s + size;
273 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { 351 int i;
274 if (p->ip == ip) {
275 found = 1;
276 break;
277 }
278 }
279
280 return found;
281}
282 352
283static inline void 353 if (ftrace_disabled || !start)
284ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) 354 return;
285{
286 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287}
288 355
289/* called from kstop_machine */ 356 /* should not be called from interrupt context */
290static inline void ftrace_del_hash(struct dyn_ftrace *node) 357 spin_lock(&ftrace_lock);
291{
292 hlist_del(&node->node);
293}
294 358
295static void ftrace_free_rec(struct dyn_ftrace *rec) 359 for (pg = ftrace_pages_start; pg; pg = pg->next) {
296{ 360 for (i = 0; i < pg->index; i++) {
297 /* no locking, only called from kstop_machine */ 361 rec = &pg->records[i];
298 362
299 rec->ip = (unsigned long)ftrace_free_records; 363 if ((rec->ip >= s) && (rec->ip < e))
300 ftrace_free_records = rec; 364 ftrace_free_rec(rec);
301 rec->flags |= FTRACE_FL_FREE; 365 }
366 }
367 spin_unlock(&ftrace_lock);
302} 368}
303 369
304static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 370static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -310,10 +376,8 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
310 rec = ftrace_free_records; 376 rec = ftrace_free_records;
311 377
312 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { 378 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313 WARN_ON_ONCE(1); 379 FTRACE_WARN_ON_ONCE(1);
314 ftrace_free_records = NULL; 380 ftrace_free_records = NULL;
315 ftrace_disabled = 1;
316 ftrace_enabled = 0;
317 return NULL; 381 return NULL;
318 } 382 }
319 383
@@ -323,182 +387,163 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
323 } 387 }
324 388
325 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 389 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326 if (!ftrace_pages->next) 390 if (!ftrace_pages->next) {
327 return NULL; 391 /* allocate another page */
392 ftrace_pages->next =
393 (void *)get_zeroed_page(GFP_KERNEL);
394 if (!ftrace_pages->next)
395 return NULL;
396 }
328 ftrace_pages = ftrace_pages->next; 397 ftrace_pages = ftrace_pages->next;
329 } 398 }
330 399
331 return &ftrace_pages->records[ftrace_pages->index++]; 400 return &ftrace_pages->records[ftrace_pages->index++];
332} 401}
333 402
334static void 403static struct dyn_ftrace *
335ftrace_record_ip(unsigned long ip) 404ftrace_record_ip(unsigned long ip)
336{ 405{
337 struct dyn_ftrace *node; 406 struct dyn_ftrace *rec;
338 unsigned long flags;
339 unsigned long key;
340 int resched;
341 int atomic;
342 int cpu;
343
344 if (!ftrace_enabled || ftrace_disabled)
345 return;
346
347 resched = need_resched();
348 preempt_disable_notrace();
349
350 /*
351 * We simply need to protect against recursion.
352 * Use the the raw version of smp_processor_id and not
353 * __get_cpu_var which can call debug hooks that can
354 * cause a recursive crash here.
355 */
356 cpu = raw_smp_processor_id();
357 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
359 goto out;
360
361 if (unlikely(ftrace_record_suspend))
362 goto out;
363
364 key = hash_long(ip, FTRACE_HASHBITS);
365
366 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368 if (ftrace_ip_in_hash(ip, key))
369 goto out;
370
371 atomic = irqs_disabled();
372
373 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374 407
375 /* This ip may have hit the hash before the lock */ 408 if (ftrace_disabled)
376 if (ftrace_ip_in_hash(ip, key)) 409 return NULL;
377 goto out_unlock;
378 410
379 node = ftrace_alloc_dyn_node(ip); 411 rec = ftrace_alloc_dyn_node(ip);
380 if (!node) 412 if (!rec)
381 goto out_unlock; 413 return NULL;
382 414
383 node->ip = ip; 415 rec->ip = ip;
384 416
385 ftrace_add_hash(node, key); 417 list_add(&rec->list, &ftrace_new_addrs);
386 418
387 ftraced_trigger = 1; 419 return rec;
420}
388 421
389 out_unlock: 422static void print_ip_ins(const char *fmt, unsigned char *p)
390 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); 423{
391 out: 424 int i;
392 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393 425
394 /* prevent recursion with scheduler */ 426 printk(KERN_CONT "%s", fmt);
395 if (resched) 427
396 preempt_enable_no_resched_notrace(); 428 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
397 else 429 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
398 preempt_enable_notrace(); 430}
431
432static void ftrace_bug(int failed, unsigned long ip)
433{
434 switch (failed) {
435 case -EFAULT:
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on modifying ");
438 print_ip_sym(ip);
439 break;
440 case -EINVAL:
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace failed to modify ");
443 print_ip_sym(ip);
444 print_ip_ins(" actual: ", (unsigned char *)ip);
445 printk(KERN_CONT "\n");
446 break;
447 case -EPERM:
448 FTRACE_WARN_ON_ONCE(1);
449 pr_info("ftrace faulted on writing ");
450 print_ip_sym(ip);
451 break;
452 default:
453 FTRACE_WARN_ON_ONCE(1);
454 pr_info("ftrace faulted on unknown error ");
455 print_ip_sym(ip);
456 }
399} 457}
400 458
401#define FTRACE_ADDR ((long)(ftrace_caller))
402 459
403static int 460static int
404__ftrace_replace_code(struct dyn_ftrace *rec, 461__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
405 unsigned char *old, unsigned char *new, int enable)
406{ 462{
407 unsigned long ip, fl; 463 unsigned long ip, fl;
464 unsigned long ftrace_addr;
465
466 ftrace_addr = (unsigned long)ftrace_caller;
408 467
409 ip = rec->ip; 468 ip = rec->ip;
410 469
411 if (ftrace_filtered && enable) { 470 /*
471 * If this record is not to be traced and
472 * it is not enabled then do nothing.
473 *
474 * If this record is not to be traced and
475 * it is enabled then disabled it.
476 *
477 */
478 if (rec->flags & FTRACE_FL_NOTRACE) {
479 if (rec->flags & FTRACE_FL_ENABLED)
480 rec->flags &= ~FTRACE_FL_ENABLED;
481 else
482 return 0;
483
484 } else if (ftrace_filtered && enable) {
412 /* 485 /*
413 * If filtering is on: 486 * Filtering is on:
414 *
415 * If this record is set to be filtered and
416 * is enabled then do nothing.
417 *
418 * If this record is set to be filtered and
419 * it is not enabled, enable it.
420 *
421 * If this record is not set to be filtered
422 * and it is not enabled do nothing.
423 *
424 * If this record is set not to trace then
425 * do nothing.
426 *
427 * If this record is set not to trace and
428 * it is enabled then disable it.
429 *
430 * If this record is not set to be filtered and
431 * it is enabled, disable it.
432 */ 487 */
433 488
434 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 489 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
435 FTRACE_FL_ENABLED);
436 490
437 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 491 /* Record is filtered and enabled, do nothing */
438 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 492 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
439 !fl || (fl == FTRACE_FL_NOTRACE))
440 return 0; 493 return 0;
441 494
442 /* 495 /* Record is not filtered and is not enabled do nothing */
443 * If it is enabled disable it, 496 if (!fl)
444 * otherwise enable it! 497 return 0;
445 */ 498
446 if (fl & FTRACE_FL_ENABLED) { 499 /* Record is not filtered but enabled, disable it */
447 /* swap new and old */ 500 if (fl == FTRACE_FL_ENABLED)
448 new = old;
449 old = ftrace_call_replace(ip, FTRACE_ADDR);
450 rec->flags &= ~FTRACE_FL_ENABLED; 501 rec->flags &= ~FTRACE_FL_ENABLED;
451 } else { 502 else
452 new = ftrace_call_replace(ip, FTRACE_ADDR); 503 /* Otherwise record is filtered but not enabled, enable it */
453 rec->flags |= FTRACE_FL_ENABLED; 504 rec->flags |= FTRACE_FL_ENABLED;
454 }
455 } else { 505 } else {
506 /* Disable or not filtered */
456 507
457 if (enable) { 508 if (enable) {
458 /* 509 /* if record is enabled, do nothing */
459 * If this record is set not to trace and is
460 * not enabled, do nothing.
461 */
462 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463 if (fl == FTRACE_FL_NOTRACE)
464 return 0;
465
466 new = ftrace_call_replace(ip, FTRACE_ADDR);
467 } else
468 old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470 if (enable) {
471 if (rec->flags & FTRACE_FL_ENABLED) 510 if (rec->flags & FTRACE_FL_ENABLED)
472 return 0; 511 return 0;
512
473 rec->flags |= FTRACE_FL_ENABLED; 513 rec->flags |= FTRACE_FL_ENABLED;
514
474 } else { 515 } else {
516
517 /* if record is not enabled do nothing */
475 if (!(rec->flags & FTRACE_FL_ENABLED)) 518 if (!(rec->flags & FTRACE_FL_ENABLED))
476 return 0; 519 return 0;
520
477 rec->flags &= ~FTRACE_FL_ENABLED; 521 rec->flags &= ~FTRACE_FL_ENABLED;
478 } 522 }
479 } 523 }
480 524
481 return ftrace_modify_code(ip, old, new); 525 if (rec->flags & FTRACE_FL_ENABLED)
526 return ftrace_make_call(rec, ftrace_addr);
527 else
528 return ftrace_make_nop(NULL, rec, ftrace_addr);
482} 529}
483 530
484static void ftrace_replace_code(int enable) 531static void ftrace_replace_code(int enable)
485{ 532{
486 int i, failed; 533 int i, failed;
487 unsigned char *new = NULL, *old = NULL;
488 struct dyn_ftrace *rec; 534 struct dyn_ftrace *rec;
489 struct ftrace_page *pg; 535 struct ftrace_page *pg;
490 536
491 if (enable)
492 old = ftrace_nop_replace();
493 else
494 new = ftrace_nop_replace();
495
496 for (pg = ftrace_pages_start; pg; pg = pg->next) { 537 for (pg = ftrace_pages_start; pg; pg = pg->next) {
497 for (i = 0; i < pg->index; i++) { 538 for (i = 0; i < pg->index; i++) {
498 rec = &pg->records[i]; 539 rec = &pg->records[i];
499 540
500 /* don't modify code that has already faulted */ 541 /*
501 if (rec->flags & FTRACE_FL_FAILED) 542 * Skip over free records and records that have
543 * failed.
544 */
545 if (rec->flags & FTRACE_FL_FREE ||
546 rec->flags & FTRACE_FL_FAILED)
502 continue; 547 continue;
503 548
504 /* ignore updates to this record's mcount site */ 549 /* ignore updates to this record's mcount site */
@@ -509,78 +554,52 @@ static void ftrace_replace_code(int enable)
509 unfreeze_record(rec); 554 unfreeze_record(rec);
510 } 555 }
511 556
512 failed = __ftrace_replace_code(rec, old, new, enable); 557 failed = __ftrace_replace_code(rec, enable);
513 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
514 rec->flags |= FTRACE_FL_FAILED; 559 rec->flags |= FTRACE_FL_FAILED;
515 if ((system_state == SYSTEM_BOOTING) || 560 if ((system_state == SYSTEM_BOOTING) ||
516 !core_kernel_text(rec->ip)) { 561 !core_kernel_text(rec->ip)) {
517 ftrace_del_hash(rec);
518 ftrace_free_rec(rec); 562 ftrace_free_rec(rec);
519 } 563 } else
564 ftrace_bug(failed, rec->ip);
520 } 565 }
521 } 566 }
522 } 567 }
523} 568}
524 569
525static void ftrace_shutdown_replenish(void)
526{
527 if (ftrace_pages->next)
528 return;
529
530 /* allocate another page */
531 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532}
533
534static int 570static int
535ftrace_code_disable(struct dyn_ftrace *rec) 571ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
536{ 572{
537 unsigned long ip; 573 unsigned long ip;
538 unsigned char *nop, *call; 574 int ret;
539 int failed;
540 575
541 ip = rec->ip; 576 ip = rec->ip;
542 577
543 nop = ftrace_nop_replace(); 578 ret = ftrace_make_nop(mod, rec, mcount_addr);
544 call = ftrace_call_replace(ip, MCOUNT_ADDR); 579 if (ret) {
545 580 ftrace_bug(ret, ip);
546 failed = ftrace_modify_code(ip, call, nop);
547 if (failed) {
548 rec->flags |= FTRACE_FL_FAILED; 581 rec->flags |= FTRACE_FL_FAILED;
549 return 0; 582 return 0;
550 } 583 }
551 return 1; 584 return 1;
552} 585}
553 586
554static int __ftrace_update_code(void *ignore);
555
556static int __ftrace_modify_code(void *data) 587static int __ftrace_modify_code(void *data)
557{ 588{
558 unsigned long addr;
559 int *command = data; 589 int *command = data;
560 590
561 if (*command & FTRACE_ENABLE_CALLS) { 591 if (*command & FTRACE_ENABLE_CALLS)
562 /*
563 * Update any recorded ips now that we have the
564 * machine stopped
565 */
566 __ftrace_update_code(NULL);
567 ftrace_replace_code(1); 592 ftrace_replace_code(1);
568 tracing_on = 1; 593 else if (*command & FTRACE_DISABLE_CALLS)
569 } else if (*command & FTRACE_DISABLE_CALLS) {
570 ftrace_replace_code(0); 594 ftrace_replace_code(0);
571 tracing_on = 0;
572 }
573 595
574 if (*command & FTRACE_UPDATE_TRACE_FUNC) 596 if (*command & FTRACE_UPDATE_TRACE_FUNC)
575 ftrace_update_ftrace_func(ftrace_trace_function); 597 ftrace_update_ftrace_func(ftrace_trace_function);
576 598
577 if (*command & FTRACE_ENABLE_MCOUNT) { 599 if (*command & FTRACE_START_FUNC_RET)
578 addr = (unsigned long)ftrace_record_ip; 600 ftrace_enable_ftrace_graph_caller();
579 ftrace_mcount_set(&addr); 601 else if (*command & FTRACE_STOP_FUNC_RET)
580 } else if (*command & FTRACE_DISABLE_MCOUNT) { 602 ftrace_disable_ftrace_graph_caller();
581 addr = (unsigned long)ftrace_stub;
582 ftrace_mcount_set(&addr);
583 }
584 603
585 return 0; 604 return 0;
586} 605}
@@ -590,62 +609,44 @@ static void ftrace_run_update_code(int command)
590 stop_machine(__ftrace_modify_code, &command, NULL); 609 stop_machine(__ftrace_modify_code, &command, NULL);
591} 610}
592 611
593void ftrace_disable_daemon(void)
594{
595 /* Stop the daemon from calling kstop_machine */
596 mutex_lock(&ftraced_lock);
597 ftraced_stop = 1;
598 mutex_unlock(&ftraced_lock);
599
600 ftrace_force_update();
601}
602
603void ftrace_enable_daemon(void)
604{
605 mutex_lock(&ftraced_lock);
606 ftraced_stop = 0;
607 mutex_unlock(&ftraced_lock);
608
609 ftrace_force_update();
610}
611
612static ftrace_func_t saved_ftrace_func; 612static ftrace_func_t saved_ftrace_func;
613static int ftrace_start_up;
613 614
614static void ftrace_startup(void) 615static void ftrace_startup_enable(int command)
615{ 616{
616 int command = 0;
617
618 if (unlikely(ftrace_disabled))
619 return;
620
621 mutex_lock(&ftraced_lock);
622 ftraced_suspend++;
623 if (ftraced_suspend == 1)
624 command |= FTRACE_ENABLE_CALLS;
625
626 if (saved_ftrace_func != ftrace_trace_function) { 617 if (saved_ftrace_func != ftrace_trace_function) {
627 saved_ftrace_func = ftrace_trace_function; 618 saved_ftrace_func = ftrace_trace_function;
628 command |= FTRACE_UPDATE_TRACE_FUNC; 619 command |= FTRACE_UPDATE_TRACE_FUNC;
629 } 620 }
630 621
631 if (!command || !ftrace_enabled) 622 if (!command || !ftrace_enabled)
632 goto out; 623 return;
633 624
634 ftrace_run_update_code(command); 625 ftrace_run_update_code(command);
635 out:
636 mutex_unlock(&ftraced_lock);
637} 626}
638 627
639static void ftrace_shutdown(void) 628static void ftrace_startup(int command)
640{ 629{
641 int command = 0; 630 if (unlikely(ftrace_disabled))
631 return;
632
633 mutex_lock(&ftrace_start_lock);
634 ftrace_start_up++;
635 command |= FTRACE_ENABLE_CALLS;
642 636
637 ftrace_startup_enable(command);
638
639 mutex_unlock(&ftrace_start_lock);
640}
641
642static void ftrace_shutdown(int command)
643{
643 if (unlikely(ftrace_disabled)) 644 if (unlikely(ftrace_disabled))
644 return; 645 return;
645 646
646 mutex_lock(&ftraced_lock); 647 mutex_lock(&ftrace_start_lock);
647 ftraced_suspend--; 648 ftrace_start_up--;
648 if (!ftraced_suspend) 649 if (!ftrace_start_up)
649 command |= FTRACE_DISABLE_CALLS; 650 command |= FTRACE_DISABLE_CALLS;
650 651
651 if (saved_ftrace_func != ftrace_trace_function) { 652 if (saved_ftrace_func != ftrace_trace_function) {
@@ -658,7 +659,7 @@ static void ftrace_shutdown(void)
658 659
659 ftrace_run_update_code(command); 660 ftrace_run_update_code(command);
660 out: 661 out:
661 mutex_unlock(&ftraced_lock); 662 mutex_unlock(&ftrace_start_lock);
662} 663}
663 664
664static void ftrace_startup_sysctl(void) 665static void ftrace_startup_sysctl(void)
@@ -668,15 +669,15 @@ static void ftrace_startup_sysctl(void)
668 if (unlikely(ftrace_disabled)) 669 if (unlikely(ftrace_disabled))
669 return; 670 return;
670 671
671 mutex_lock(&ftraced_lock); 672 mutex_lock(&ftrace_start_lock);
672 /* Force update next time */ 673 /* Force update next time */
673 saved_ftrace_func = NULL; 674 saved_ftrace_func = NULL;
674 /* ftraced_suspend is true if we want ftrace running */ 675 /* ftrace_start_up is true if we want ftrace running */
675 if (ftraced_suspend) 676 if (ftrace_start_up)
676 command |= FTRACE_ENABLE_CALLS; 677 command |= FTRACE_ENABLE_CALLS;
677 678
678 ftrace_run_update_code(command); 679 ftrace_run_update_code(command);
679 mutex_unlock(&ftraced_lock); 680 mutex_unlock(&ftrace_start_lock);
680} 681}
681 682
682static void ftrace_shutdown_sysctl(void) 683static void ftrace_shutdown_sysctl(void)
@@ -686,153 +687,51 @@ static void ftrace_shutdown_sysctl(void)
686 if (unlikely(ftrace_disabled)) 687 if (unlikely(ftrace_disabled))
687 return; 688 return;
688 689
689 mutex_lock(&ftraced_lock); 690 mutex_lock(&ftrace_start_lock);
690 /* ftraced_suspend is true if ftrace is running */ 691 /* ftrace_start_up is true if ftrace is running */
691 if (ftraced_suspend) 692 if (ftrace_start_up)
692 command |= FTRACE_DISABLE_CALLS; 693 command |= FTRACE_DISABLE_CALLS;
693 694
694 ftrace_run_update_code(command); 695 ftrace_run_update_code(command);
695 mutex_unlock(&ftraced_lock); 696 mutex_unlock(&ftrace_start_lock);
696} 697}
697 698
698static cycle_t ftrace_update_time; 699static cycle_t ftrace_update_time;
699static unsigned long ftrace_update_cnt; 700static unsigned long ftrace_update_cnt;
700unsigned long ftrace_update_tot_cnt; 701unsigned long ftrace_update_tot_cnt;
701 702
702static int __ftrace_update_code(void *ignore) 703static int ftrace_update_code(struct module *mod)
703{ 704{
704 int i, save_ftrace_enabled; 705 struct dyn_ftrace *p, *t;
705 cycle_t start, stop; 706 cycle_t start, stop;
706 struct dyn_ftrace *p;
707 struct hlist_node *t, *n;
708 struct hlist_head *head, temp_list;
709
710 /* Don't be recording funcs now */
711 ftrace_record_suspend++;
712 save_ftrace_enabled = ftrace_enabled;
713 ftrace_enabled = 0;
714 707
715 start = ftrace_now(raw_smp_processor_id()); 708 start = ftrace_now(raw_smp_processor_id());
716 ftrace_update_cnt = 0; 709 ftrace_update_cnt = 0;
717 710
718 /* No locks needed, the machine is stopped! */ 711 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
719 for (i = 0; i < FTRACE_HASHSIZE; i++) {
720 INIT_HLIST_HEAD(&temp_list);
721 head = &ftrace_hash[i];
722 712
723 /* all CPUS are stopped, we are safe to modify code */ 713 /* If something went wrong, bail without enabling anything */
724 hlist_for_each_entry_safe(p, t, n, head, node) { 714 if (unlikely(ftrace_disabled))
725 /* Skip over failed records which have not been 715 return -1;
726 * freed. */
727 if (p->flags & FTRACE_FL_FAILED)
728 continue;
729
730 /* Unconverted records are always at the head of the
731 * hash bucket. Once we encounter a converted record,
732 * simply skip over to the next bucket. Saves ftraced
733 * some processor cycles (ftrace does its bid for
734 * global warming :-p ). */
735 if (p->flags & (FTRACE_FL_CONVERTED))
736 break;
737
738 /* Ignore updates to this record's mcount site.
739 * Reintroduce this record at the head of this
740 * bucket to attempt to "convert" it again if
741 * the kprobe on it is unregistered before the
742 * next run. */
743 if (get_kprobe((void *)p->ip)) {
744 ftrace_del_hash(p);
745 INIT_HLIST_NODE(&p->node);
746 hlist_add_head(&p->node, &temp_list);
747 freeze_record(p);
748 continue;
749 } else {
750 unfreeze_record(p);
751 }
752 716
753 /* convert record (i.e, patch mcount-call with NOP) */ 717 list_del_init(&p->list);
754 if (ftrace_code_disable(p)) {
755 p->flags |= FTRACE_FL_CONVERTED;
756 ftrace_update_cnt++;
757 } else {
758 if ((system_state == SYSTEM_BOOTING) ||
759 !core_kernel_text(p->ip)) {
760 ftrace_del_hash(p);
761 ftrace_free_rec(p);
762 }
763 }
764 }
765 718
766 hlist_for_each_entry_safe(p, t, n, &temp_list, node) { 719 /* convert record (i.e, patch mcount-call with NOP) */
767 hlist_del(&p->node); 720 if (ftrace_code_disable(mod, p)) {
768 INIT_HLIST_NODE(&p->node); 721 p->flags |= FTRACE_FL_CONVERTED;
769 hlist_add_head(&p->node, head); 722 ftrace_update_cnt++;
770 } 723 } else
724 ftrace_free_rec(p);
771 } 725 }
772 726
773 stop = ftrace_now(raw_smp_processor_id()); 727 stop = ftrace_now(raw_smp_processor_id());
774 ftrace_update_time = stop - start; 728 ftrace_update_time = stop - start;
775 ftrace_update_tot_cnt += ftrace_update_cnt; 729 ftrace_update_tot_cnt += ftrace_update_cnt;
776 ftraced_trigger = 0;
777
778 ftrace_enabled = save_ftrace_enabled;
779 ftrace_record_suspend--;
780 730
781 return 0; 731 return 0;
782} 732}
783 733
784static int ftrace_update_code(void) 734static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
785{
786 if (unlikely(ftrace_disabled) ||
787 !ftrace_enabled || !ftraced_trigger)
788 return 0;
789
790 stop_machine(__ftrace_update_code, NULL, NULL);
791
792 return 1;
793}
794
795static int ftraced(void *ignore)
796{
797 unsigned long usecs;
798
799 while (!kthread_should_stop()) {
800
801 set_current_state(TASK_INTERRUPTIBLE);
802
803 /* check once a second */
804 schedule_timeout(HZ);
805
806 if (unlikely(ftrace_disabled))
807 continue;
808
809 mutex_lock(&ftrace_sysctl_lock);
810 mutex_lock(&ftraced_lock);
811 if (!ftraced_suspend && !ftraced_stop &&
812 ftrace_update_code()) {
813 usecs = nsecs_to_usecs(ftrace_update_time);
814 if (ftrace_update_tot_cnt > 100000) {
815 ftrace_update_tot_cnt = 0;
816 pr_info("hm, dftrace overflow: %lu change%s"
817 " (%lu total) in %lu usec%s\n",
818 ftrace_update_cnt,
819 ftrace_update_cnt != 1 ? "s" : "",
820 ftrace_update_tot_cnt,
821 usecs, usecs != 1 ? "s" : "");
822 ftrace_disabled = 1;
823 WARN_ON_ONCE(1);
824 }
825 }
826 mutex_unlock(&ftraced_lock);
827 mutex_unlock(&ftrace_sysctl_lock);
828
829 ftrace_shutdown_replenish();
830 }
831 __set_current_state(TASK_RUNNING);
832 return 0;
833}
834
835static int __init ftrace_dyn_table_alloc(void)
836{ 735{
837 struct ftrace_page *pg; 736 struct ftrace_page *pg;
838 int cnt; 737 int cnt;
@@ -859,7 +758,9 @@ static int __init ftrace_dyn_table_alloc(void)
859 758
860 pg = ftrace_pages = ftrace_pages_start; 759 pg = ftrace_pages = ftrace_pages_start;
861 760
862 cnt = NR_TO_INIT / ENTRIES_PER_PAGE; 761 cnt = num_to_init / ENTRIES_PER_PAGE;
762 pr_info("ftrace: allocating %ld entries in %d pages\n",
763 num_to_init, cnt + 1);
863 764
864 for (i = 0; i < cnt; i++) { 765 for (i = 0; i < cnt; i++) {
865 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 766 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -884,7 +785,6 @@ enum {
884#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 785#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
885 786
886struct ftrace_iterator { 787struct ftrace_iterator {
887 loff_t pos;
888 struct ftrace_page *pg; 788 struct ftrace_page *pg;
889 unsigned idx; 789 unsigned idx;
890 unsigned flags; 790 unsigned flags;
@@ -901,21 +801,26 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
901 801
902 (*pos)++; 802 (*pos)++;
903 803
804 /* should not be called from interrupt context */
805 spin_lock(&ftrace_lock);
904 retry: 806 retry:
905 if (iter->idx >= iter->pg->index) { 807 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) { 808 if (iter->pg->next) {
907 iter->pg = iter->pg->next; 809 iter->pg = iter->pg->next;
908 iter->idx = 0; 810 iter->idx = 0;
909 goto retry; 811 goto retry;
812 } else {
813 iter->idx = -1;
910 } 814 }
911 } else { 815 } else {
912 rec = &iter->pg->records[iter->idx++]; 816 rec = &iter->pg->records[iter->idx++];
913 if ((!(iter->flags & FTRACE_ITER_FAILURES) && 817 if ((rec->flags & FTRACE_FL_FREE) ||
818
819 (!(iter->flags & FTRACE_ITER_FAILURES) &&
914 (rec->flags & FTRACE_FL_FAILED)) || 820 (rec->flags & FTRACE_FL_FAILED)) ||
915 821
916 ((iter->flags & FTRACE_ITER_FAILURES) && 822 ((iter->flags & FTRACE_ITER_FAILURES) &&
917 (!(rec->flags & FTRACE_FL_FAILED) || 823 !(rec->flags & FTRACE_FL_FAILED)) ||
918 (rec->flags & FTRACE_FL_FREE))) ||
919 824
920 ((iter->flags & FTRACE_ITER_FILTER) && 825 ((iter->flags & FTRACE_ITER_FILTER) &&
921 !(rec->flags & FTRACE_FL_FILTER)) || 826 !(rec->flags & FTRACE_FL_FILTER)) ||
@@ -926,8 +831,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
926 goto retry; 831 goto retry;
927 } 832 }
928 } 833 }
929 834 spin_unlock(&ftrace_lock);
930 iter->pos = *pos;
931 835
932 return rec; 836 return rec;
933} 837}
@@ -936,16 +840,16 @@ static void *t_start(struct seq_file *m, loff_t *pos)
936{ 840{
937 struct ftrace_iterator *iter = m->private; 841 struct ftrace_iterator *iter = m->private;
938 void *p = NULL; 842 void *p = NULL;
939 loff_t l = -1;
940 843
941 if (*pos != iter->pos) { 844 if (*pos > 0) {
942 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 845 if (iter->idx < 0)
943 ; 846 return p;
944 } else { 847 (*pos)--;
945 l = *pos; 848 iter->idx--;
946 p = t_next(m, p, &l);
947 } 849 }
948 850
851 p = t_next(m, p, pos);
852
949 return p; 853 return p;
950} 854}
951 855
@@ -989,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
989 return -ENOMEM; 893 return -ENOMEM;
990 894
991 iter->pg = ftrace_pages_start; 895 iter->pg = ftrace_pages_start;
992 iter->pos = -1;
993 896
994 ret = seq_open(file, &show_ftrace_seq_ops); 897 ret = seq_open(file, &show_ftrace_seq_ops);
995 if (!ret) { 898 if (!ret) {
@@ -1039,8 +942,8 @@ static void ftrace_filter_reset(int enable)
1039 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 942 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1040 unsigned i; 943 unsigned i;
1041 944
1042 /* keep kstop machine from running */ 945 /* should not be called from interrupt context */
1043 preempt_disable(); 946 spin_lock(&ftrace_lock);
1044 if (enable) 947 if (enable)
1045 ftrace_filtered = 0; 948 ftrace_filtered = 0;
1046 pg = ftrace_pages_start; 949 pg = ftrace_pages_start;
@@ -1053,7 +956,7 @@ static void ftrace_filter_reset(int enable)
1053 } 956 }
1054 pg = pg->next; 957 pg = pg->next;
1055 } 958 }
1056 preempt_enable(); 959 spin_unlock(&ftrace_lock);
1057} 960}
1058 961
1059static int 962static int
@@ -1076,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1076 979
1077 if (file->f_mode & FMODE_READ) { 980 if (file->f_mode & FMODE_READ) {
1078 iter->pg = ftrace_pages_start; 981 iter->pg = ftrace_pages_start;
1079 iter->pos = -1;
1080 iter->flags = enable ? FTRACE_ITER_FILTER : 982 iter->flags = enable ? FTRACE_ITER_FILTER :
1081 FTRACE_ITER_NOTRACE; 983 FTRACE_ITER_NOTRACE;
1082 984
@@ -1145,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
1145 int type = MATCH_FULL; 1047 int type = MATCH_FULL;
1146 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1147 unsigned i, match = 0, search_len = 0; 1049 unsigned i, match = 0, search_len = 0;
1050 int not = 0;
1051
1052 if (buff[0] == '!') {
1053 not = 1;
1054 buff++;
1055 len--;
1056 }
1148 1057
1149 for (i = 0; i < len; i++) { 1058 for (i = 0; i < len; i++) {
1150 if (buff[i] == '*') { 1059 if (buff[i] == '*') {
@@ -1165,8 +1074,8 @@ ftrace_match(unsigned char *buff, int len, int enable)
1165 } 1074 }
1166 } 1075 }
1167 1076
1168 /* keep kstop machine from running */ 1077 /* should not be called from interrupt context */
1169 preempt_disable(); 1078 spin_lock(&ftrace_lock);
1170 if (enable) 1079 if (enable)
1171 ftrace_filtered = 1; 1080 ftrace_filtered = 1;
1172 pg = ftrace_pages_start; 1081 pg = ftrace_pages_start;
@@ -1198,12 +1107,16 @@ ftrace_match(unsigned char *buff, int len, int enable)
1198 matched = 1; 1107 matched = 1;
1199 break; 1108 break;
1200 } 1109 }
1201 if (matched) 1110 if (matched) {
1202 rec->flags |= flag; 1111 if (not)
1112 rec->flags &= ~flag;
1113 else
1114 rec->flags |= flag;
1115 }
1203 } 1116 }
1204 pg = pg->next; 1117 pg = pg->next;
1205 } 1118 }
1206 preempt_enable(); 1119 spin_unlock(&ftrace_lock);
1207} 1120}
1208 1121
1209static ssize_t 1122static ssize_t
@@ -1366,10 +1279,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1366 } 1279 }
1367 1280
1368 mutex_lock(&ftrace_sysctl_lock); 1281 mutex_lock(&ftrace_sysctl_lock);
1369 mutex_lock(&ftraced_lock); 1282 mutex_lock(&ftrace_start_lock);
1370 if (iter->filtered && ftraced_suspend && ftrace_enabled) 1283 if (ftrace_start_up && ftrace_enabled)
1371 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1284 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1372 mutex_unlock(&ftraced_lock); 1285 mutex_unlock(&ftrace_start_lock);
1373 mutex_unlock(&ftrace_sysctl_lock); 1286 mutex_unlock(&ftrace_sysctl_lock);
1374 1287
1375 kfree(iter); 1288 kfree(iter);
@@ -1389,55 +1302,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
1389 return ftrace_regex_release(inode, file, 0); 1302 return ftrace_regex_release(inode, file, 0);
1390} 1303}
1391 1304
1392static ssize_t
1393ftraced_read(struct file *filp, char __user *ubuf,
1394 size_t cnt, loff_t *ppos)
1395{
1396 /* don't worry about races */
1397 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1398 int r = strlen(buf);
1399
1400 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1401}
1402
1403static ssize_t
1404ftraced_write(struct file *filp, const char __user *ubuf,
1405 size_t cnt, loff_t *ppos)
1406{
1407 char buf[64];
1408 long val;
1409 int ret;
1410
1411 if (cnt >= sizeof(buf))
1412 return -EINVAL;
1413
1414 if (copy_from_user(&buf, ubuf, cnt))
1415 return -EFAULT;
1416
1417 if (strncmp(buf, "enable", 6) == 0)
1418 val = 1;
1419 else if (strncmp(buf, "disable", 7) == 0)
1420 val = 0;
1421 else {
1422 buf[cnt] = 0;
1423
1424 ret = strict_strtoul(buf, 10, &val);
1425 if (ret < 0)
1426 return ret;
1427
1428 val = !!val;
1429 }
1430
1431 if (val)
1432 ftrace_enable_daemon();
1433 else
1434 ftrace_disable_daemon();
1435
1436 filp->f_pos += cnt;
1437
1438 return cnt;
1439}
1440
1441static struct file_operations ftrace_avail_fops = { 1305static struct file_operations ftrace_avail_fops = {
1442 .open = ftrace_avail_open, 1306 .open = ftrace_avail_open,
1443 .read = seq_read, 1307 .read = seq_read,
@@ -1468,60 +1332,233 @@ static struct file_operations ftrace_notrace_fops = {
1468 .release = ftrace_notrace_release, 1332 .release = ftrace_notrace_release,
1469}; 1333};
1470 1334
1471static struct file_operations ftraced_fops = { 1335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1472 .open = tracing_open_generic, 1336
1473 .read = ftraced_read, 1337static DEFINE_MUTEX(graph_lock);
1474 .write = ftraced_write, 1338
1339int ftrace_graph_count;
1340unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1341
1342static void *
1343g_next(struct seq_file *m, void *v, loff_t *pos)
1344{
1345 unsigned long *array = m->private;
1346 int index = *pos;
1347
1348 (*pos)++;
1349
1350 if (index >= ftrace_graph_count)
1351 return NULL;
1352
1353 return &array[index];
1354}
1355
1356static void *g_start(struct seq_file *m, loff_t *pos)
1357{
1358 void *p = NULL;
1359
1360 mutex_lock(&graph_lock);
1361
1362 p = g_next(m, p, pos);
1363
1364 return p;
1365}
1366
1367static void g_stop(struct seq_file *m, void *p)
1368{
1369 mutex_unlock(&graph_lock);
1370}
1371
1372static int g_show(struct seq_file *m, void *v)
1373{
1374 unsigned long *ptr = v;
1375 char str[KSYM_SYMBOL_LEN];
1376
1377 if (!ptr)
1378 return 0;
1379
1380 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1381
1382 seq_printf(m, "%s\n", str);
1383
1384 return 0;
1385}
1386
1387static struct seq_operations ftrace_graph_seq_ops = {
1388 .start = g_start,
1389 .next = g_next,
1390 .stop = g_stop,
1391 .show = g_show,
1475}; 1392};
1476 1393
1477/** 1394static int
1478 * ftrace_force_update - force an update to all recording ftrace functions 1395ftrace_graph_open(struct inode *inode, struct file *file)
1479 */
1480int ftrace_force_update(void)
1481{ 1396{
1482 int ret = 0; 1397 int ret = 0;
1483 1398
1484 if (unlikely(ftrace_disabled)) 1399 if (unlikely(ftrace_disabled))
1485 return -ENODEV; 1400 return -ENODEV;
1486 1401
1487 mutex_lock(&ftrace_sysctl_lock); 1402 mutex_lock(&graph_lock);
1488 mutex_lock(&ftraced_lock); 1403 if ((file->f_mode & FMODE_WRITE) &&
1489 1404 !(file->f_flags & O_APPEND)) {
1490 /* 1405 ftrace_graph_count = 0;
1491 * If ftraced_trigger is not set, then there is nothing 1406 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1492 * to update. 1407 }
1493 */
1494 if (ftraced_trigger && !ftrace_update_code())
1495 ret = -EBUSY;
1496 1408
1497 mutex_unlock(&ftraced_lock); 1409 if (file->f_mode & FMODE_READ) {
1498 mutex_unlock(&ftrace_sysctl_lock); 1410 ret = seq_open(file, &ftrace_graph_seq_ops);
1411 if (!ret) {
1412 struct seq_file *m = file->private_data;
1413 m->private = ftrace_graph_funcs;
1414 }
1415 } else
1416 file->private_data = ftrace_graph_funcs;
1417 mutex_unlock(&graph_lock);
1499 1418
1500 return ret; 1419 return ret;
1501} 1420}
1502 1421
1503static void ftrace_force_shutdown(void) 1422static ssize_t
1423ftrace_graph_read(struct file *file, char __user *ubuf,
1424 size_t cnt, loff_t *ppos)
1504{ 1425{
1505 struct task_struct *task; 1426 if (file->f_mode & FMODE_READ)
1506 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; 1427 return seq_read(file, ubuf, cnt, ppos);
1428 else
1429 return -EPERM;
1430}
1507 1431
1508 mutex_lock(&ftraced_lock); 1432static int
1509 task = ftraced_task; 1433ftrace_set_func(unsigned long *array, int idx, char *buffer)
1510 ftraced_task = NULL; 1434{
1511 ftraced_suspend = -1; 1435 char str[KSYM_SYMBOL_LEN];
1512 ftrace_run_update_code(command); 1436 struct dyn_ftrace *rec;
1513 mutex_unlock(&ftraced_lock); 1437 struct ftrace_page *pg;
1438 int found = 0;
1439 int i, j;
1440
1441 if (ftrace_disabled)
1442 return -ENODEV;
1443
1444 /* should not be called from interrupt context */
1445 spin_lock(&ftrace_lock);
1446
1447 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1448 for (i = 0; i < pg->index; i++) {
1449 rec = &pg->records[i];
1450
1451 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1452 continue;
1453
1454 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1455 if (strcmp(str, buffer) == 0) {
1456 found = 1;
1457 for (j = 0; j < idx; j++)
1458 if (array[j] == rec->ip) {
1459 found = 0;
1460 break;
1461 }
1462 if (found)
1463 array[idx] = rec->ip;
1464 break;
1465 }
1466 }
1467 }
1468 spin_unlock(&ftrace_lock);
1514 1469
1515 if (task) 1470 return found ? 0 : -EINVAL;
1516 kthread_stop(task);
1517} 1471}
1518 1472
1519static __init int ftrace_init_debugfs(void) 1473static ssize_t
1474ftrace_graph_write(struct file *file, const char __user *ubuf,
1475 size_t cnt, loff_t *ppos)
1520{ 1476{
1521 struct dentry *d_tracer; 1477 unsigned char buffer[FTRACE_BUFF_MAX+1];
1522 struct dentry *entry; 1478 unsigned long *array;
1479 size_t read = 0;
1480 ssize_t ret;
1481 int index = 0;
1482 char ch;
1523 1483
1524 d_tracer = tracing_init_dentry(); 1484 if (!cnt || cnt < 0)
1485 return 0;
1486
1487 mutex_lock(&graph_lock);
1488
1489 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1490 ret = -EBUSY;
1491 goto out;
1492 }
1493
1494 if (file->f_mode & FMODE_READ) {
1495 struct seq_file *m = file->private_data;
1496 array = m->private;
1497 } else
1498 array = file->private_data;
1499
1500 ret = get_user(ch, ubuf++);
1501 if (ret)
1502 goto out;
1503 read++;
1504 cnt--;
1505
1506 /* skip white space */
1507 while (cnt && isspace(ch)) {
1508 ret = get_user(ch, ubuf++);
1509 if (ret)
1510 goto out;
1511 read++;
1512 cnt--;
1513 }
1514
1515 if (isspace(ch)) {
1516 *ppos += read;
1517 ret = read;
1518 goto out;
1519 }
1520
1521 while (cnt && !isspace(ch)) {
1522 if (index < FTRACE_BUFF_MAX)
1523 buffer[index++] = ch;
1524 else {
1525 ret = -EINVAL;
1526 goto out;
1527 }
1528 ret = get_user(ch, ubuf++);
1529 if (ret)
1530 goto out;
1531 read++;
1532 cnt--;
1533 }
1534 buffer[index] = 0;
1535
1536 /* we allow only one at a time */
1537 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1538 if (ret)
1539 goto out;
1540
1541 ftrace_graph_count++;
1542
1543 file->f_pos += read;
1544
1545 ret = read;
1546 out:
1547 mutex_unlock(&graph_lock);
1548
1549 return ret;
1550}
1551
1552static const struct file_operations ftrace_graph_fops = {
1553 .open = ftrace_graph_open,
1554 .read = ftrace_graph_read,
1555 .write = ftrace_graph_write,
1556};
1557#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1558
1559static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1560{
1561 struct dentry *entry;
1525 1562
1526 entry = debugfs_create_file("available_filter_functions", 0444, 1563 entry = debugfs_create_file("available_filter_functions", 0444,
1527 d_tracer, NULL, &ftrace_avail_fops); 1564 d_tracer, NULL, &ftrace_avail_fops);
@@ -1546,97 +1583,295 @@ static __init int ftrace_init_debugfs(void)
1546 pr_warning("Could not create debugfs " 1583 pr_warning("Could not create debugfs "
1547 "'set_ftrace_notrace' entry\n"); 1584 "'set_ftrace_notrace' entry\n");
1548 1585
1549 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, 1586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1550 NULL, &ftraced_fops); 1587 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1588 NULL,
1589 &ftrace_graph_fops);
1551 if (!entry) 1590 if (!entry)
1552 pr_warning("Could not create debugfs " 1591 pr_warning("Could not create debugfs "
1553 "'ftraced_enabled' entry\n"); 1592 "'set_graph_function' entry\n");
1593#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1594
1554 return 0; 1595 return 0;
1555} 1596}
1556 1597
1557fs_initcall(ftrace_init_debugfs); 1598static int ftrace_convert_nops(struct module *mod,
1558 1599 unsigned long *start,
1559static int __init ftrace_dynamic_init(void) 1600 unsigned long *end)
1560{ 1601{
1561 struct task_struct *p; 1602 unsigned long *p;
1562 unsigned long addr; 1603 unsigned long addr;
1604 unsigned long flags;
1605
1606 mutex_lock(&ftrace_start_lock);
1607 p = start;
1608 while (p < end) {
1609 addr = ftrace_call_adjust(*p++);
1610 /*
1611 * Some architecture linkers will pad between
1612 * the different mcount_loc sections of different
1613 * object files to satisfy alignments.
1614 * Skip any NULL pointers.
1615 */
1616 if (!addr)
1617 continue;
1618 ftrace_record_ip(addr);
1619 }
1620
1621 /* disable interrupts to prevent kstop machine */
1622 local_irq_save(flags);
1623 ftrace_update_code(mod);
1624 local_irq_restore(flags);
1625 mutex_unlock(&ftrace_start_lock);
1626
1627 return 0;
1628}
1629
1630void ftrace_init_module(struct module *mod,
1631 unsigned long *start, unsigned long *end)
1632{
1633 if (ftrace_disabled || start == end)
1634 return;
1635 ftrace_convert_nops(mod, start, end);
1636}
1637
1638extern unsigned long __start_mcount_loc[];
1639extern unsigned long __stop_mcount_loc[];
1640
1641void __init ftrace_init(void)
1642{
1643 unsigned long count, addr, flags;
1563 int ret; 1644 int ret;
1564 1645
1565 addr = (unsigned long)ftrace_record_ip; 1646 /* Keep the ftrace pointer to the stub */
1647 addr = (unsigned long)ftrace_stub;
1566 1648
1567 stop_machine(ftrace_dyn_arch_init, &addr, NULL); 1649 local_irq_save(flags);
1650 ftrace_dyn_arch_init(&addr);
1651 local_irq_restore(flags);
1568 1652
1569 /* ftrace_dyn_arch_init places the return code in addr */ 1653 /* ftrace_dyn_arch_init places the return code in addr */
1570 if (addr) { 1654 if (addr)
1571 ret = (int)addr;
1572 goto failed; 1655 goto failed;
1573 }
1574 1656
1575 ret = ftrace_dyn_table_alloc(); 1657 count = __stop_mcount_loc - __start_mcount_loc;
1576 if (ret)
1577 goto failed;
1578 1658
1579 p = kthread_run(ftraced, NULL, "ftraced"); 1659 ret = ftrace_dyn_table_alloc(count);
1580 if (IS_ERR(p)) { 1660 if (ret)
1581 ret = -1;
1582 goto failed; 1661 goto failed;
1583 }
1584 1662
1585 last_ftrace_enabled = ftrace_enabled = 1; 1663 last_ftrace_enabled = ftrace_enabled = 1;
1586 ftraced_task = p;
1587 1664
1588 return 0; 1665 ret = ftrace_convert_nops(NULL,
1666 __start_mcount_loc,
1667 __stop_mcount_loc);
1589 1668
1669 return;
1590 failed: 1670 failed:
1591 ftrace_disabled = 1; 1671 ftrace_disabled = 1;
1592 return ret;
1593} 1672}
1594 1673
1595core_initcall(ftrace_dynamic_init);
1596#else 1674#else
1597# define ftrace_startup() do { } while (0) 1675
1598# define ftrace_shutdown() do { } while (0) 1676static int __init ftrace_nodyn_init(void)
1677{
1678 ftrace_enabled = 1;
1679 return 0;
1680}
1681device_initcall(ftrace_nodyn_init);
1682
1683static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1684static inline void ftrace_startup_enable(int command) { }
1685/* Keep as macros so we do not need to define the commands */
1686# define ftrace_startup(command) do { } while (0)
1687# define ftrace_shutdown(command) do { } while (0)
1599# define ftrace_startup_sysctl() do { } while (0) 1688# define ftrace_startup_sysctl() do { } while (0)
1600# define ftrace_shutdown_sysctl() do { } while (0) 1689# define ftrace_shutdown_sysctl() do { } while (0)
1601# define ftrace_force_shutdown() do { } while (0)
1602#endif /* CONFIG_DYNAMIC_FTRACE */ 1690#endif /* CONFIG_DYNAMIC_FTRACE */
1603 1691
1604/** 1692static ssize_t
1605 * ftrace_kill_atomic - kill ftrace from critical sections 1693ftrace_pid_read(struct file *file, char __user *ubuf,
1606 * 1694 size_t cnt, loff_t *ppos)
1607 * This function should be used by panic code. It stops ftrace
1608 * but in a not so nice way. If you need to simply kill ftrace
1609 * from a non-atomic section, use ftrace_kill.
1610 */
1611void ftrace_kill_atomic(void)
1612{ 1695{
1613 ftrace_disabled = 1; 1696 char buf[64];
1614 ftrace_enabled = 0; 1697 int r;
1615#ifdef CONFIG_DYNAMIC_FTRACE 1698
1616 ftraced_suspend = -1; 1699 if (ftrace_pid_trace == ftrace_swapper_pid)
1617#endif 1700 r = sprintf(buf, "swapper tasks\n");
1618 clear_ftrace_function(); 1701 else if (ftrace_pid_trace)
1702 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1703 else
1704 r = sprintf(buf, "no pid\n");
1705
1706 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1707}
1708
1709static void clear_ftrace_swapper(void)
1710{
1711 struct task_struct *p;
1712 int cpu;
1713
1714 get_online_cpus();
1715 for_each_online_cpu(cpu) {
1716 p = idle_task(cpu);
1717 clear_tsk_trace_trace(p);
1718 }
1719 put_online_cpus();
1720}
1721
1722static void set_ftrace_swapper(void)
1723{
1724 struct task_struct *p;
1725 int cpu;
1726
1727 get_online_cpus();
1728 for_each_online_cpu(cpu) {
1729 p = idle_task(cpu);
1730 set_tsk_trace_trace(p);
1731 }
1732 put_online_cpus();
1733}
1734
1735static void clear_ftrace_pid(struct pid *pid)
1736{
1737 struct task_struct *p;
1738
1739 do_each_pid_task(pid, PIDTYPE_PID, p) {
1740 clear_tsk_trace_trace(p);
1741 } while_each_pid_task(pid, PIDTYPE_PID, p);
1742 put_pid(pid);
1743}
1744
1745static void set_ftrace_pid(struct pid *pid)
1746{
1747 struct task_struct *p;
1748
1749 do_each_pid_task(pid, PIDTYPE_PID, p) {
1750 set_tsk_trace_trace(p);
1751 } while_each_pid_task(pid, PIDTYPE_PID, p);
1752}
1753
1754static void clear_ftrace_pid_task(struct pid **pid)
1755{
1756 if (*pid == ftrace_swapper_pid)
1757 clear_ftrace_swapper();
1758 else
1759 clear_ftrace_pid(*pid);
1760
1761 *pid = NULL;
1762}
1763
1764static void set_ftrace_pid_task(struct pid *pid)
1765{
1766 if (pid == ftrace_swapper_pid)
1767 set_ftrace_swapper();
1768 else
1769 set_ftrace_pid(pid);
1770}
1771
1772static ssize_t
1773ftrace_pid_write(struct file *filp, const char __user *ubuf,
1774 size_t cnt, loff_t *ppos)
1775{
1776 struct pid *pid;
1777 char buf[64];
1778 long val;
1779 int ret;
1780
1781 if (cnt >= sizeof(buf))
1782 return -EINVAL;
1783
1784 if (copy_from_user(&buf, ubuf, cnt))
1785 return -EFAULT;
1786
1787 buf[cnt] = 0;
1788
1789 ret = strict_strtol(buf, 10, &val);
1790 if (ret < 0)
1791 return ret;
1792
1793 mutex_lock(&ftrace_start_lock);
1794 if (val < 0) {
1795 /* disable pid tracing */
1796 if (!ftrace_pid_trace)
1797 goto out;
1798
1799 clear_ftrace_pid_task(&ftrace_pid_trace);
1800
1801 } else {
1802 /* swapper task is special */
1803 if (!val) {
1804 pid = ftrace_swapper_pid;
1805 if (pid == ftrace_pid_trace)
1806 goto out;
1807 } else {
1808 pid = find_get_pid(val);
1809
1810 if (pid == ftrace_pid_trace) {
1811 put_pid(pid);
1812 goto out;
1813 }
1814 }
1815
1816 if (ftrace_pid_trace)
1817 clear_ftrace_pid_task(&ftrace_pid_trace);
1818
1819 if (!pid)
1820 goto out;
1821
1822 ftrace_pid_trace = pid;
1823
1824 set_ftrace_pid_task(ftrace_pid_trace);
1825 }
1826
1827 /* update the function call */
1828 ftrace_update_pid_func();
1829 ftrace_startup_enable(0);
1830
1831 out:
1832 mutex_unlock(&ftrace_start_lock);
1833
1834 return cnt;
1619} 1835}
1620 1836
1837static struct file_operations ftrace_pid_fops = {
1838 .read = ftrace_pid_read,
1839 .write = ftrace_pid_write,
1840};
1841
1842static __init int ftrace_init_debugfs(void)
1843{
1844 struct dentry *d_tracer;
1845 struct dentry *entry;
1846
1847 d_tracer = tracing_init_dentry();
1848 if (!d_tracer)
1849 return 0;
1850
1851 ftrace_init_dyn_debugfs(d_tracer);
1852
1853 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1854 NULL, &ftrace_pid_fops);
1855 if (!entry)
1856 pr_warning("Could not create debugfs "
1857 "'set_ftrace_pid' entry\n");
1858 return 0;
1859}
1860
1861fs_initcall(ftrace_init_debugfs);
1862
1621/** 1863/**
1622 * ftrace_kill - totally shutdown ftrace 1864 * ftrace_kill - kill ftrace
1623 * 1865 *
1624 * This is a safety measure. If something was detected that seems 1866 * This function should be used by panic code. It stops ftrace
1625 * wrong, calling this function will keep ftrace from doing 1867 * but in a not so nice way. If you need to simply kill ftrace
1626 * any more modifications, and updates. 1868 * from a non-atomic section, use ftrace_kill.
1627 * used when something went wrong.
1628 */ 1869 */
1629void ftrace_kill(void) 1870void ftrace_kill(void)
1630{ 1871{
1631 mutex_lock(&ftrace_sysctl_lock);
1632 ftrace_disabled = 1; 1872 ftrace_disabled = 1;
1633 ftrace_enabled = 0; 1873 ftrace_enabled = 0;
1634
1635 clear_ftrace_function(); 1874 clear_ftrace_function();
1636 mutex_unlock(&ftrace_sysctl_lock);
1637
1638 /* Try to totally disable ftrace */
1639 ftrace_force_shutdown();
1640} 1875}
1641 1876
1642/** 1877/**
@@ -1658,10 +1893,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
1658 return -1; 1893 return -1;
1659 1894
1660 mutex_lock(&ftrace_sysctl_lock); 1895 mutex_lock(&ftrace_sysctl_lock);
1896
1661 ret = __register_ftrace_function(ops); 1897 ret = __register_ftrace_function(ops);
1662 ftrace_startup(); 1898 ftrace_startup(0);
1663 mutex_unlock(&ftrace_sysctl_lock);
1664 1899
1900 mutex_unlock(&ftrace_sysctl_lock);
1665 return ret; 1901 return ret;
1666} 1902}
1667 1903
@@ -1677,7 +1913,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1677 1913
1678 mutex_lock(&ftrace_sysctl_lock); 1914 mutex_lock(&ftrace_sysctl_lock);
1679 ret = __unregister_ftrace_function(ops); 1915 ret = __unregister_ftrace_function(ops);
1680 ftrace_shutdown(); 1916 ftrace_shutdown(0);
1681 mutex_unlock(&ftrace_sysctl_lock); 1917 mutex_unlock(&ftrace_sysctl_lock);
1682 1918
1683 return ret; 1919 return ret;
@@ -1725,3 +1961,154 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1725 mutex_unlock(&ftrace_sysctl_lock); 1961 mutex_unlock(&ftrace_sysctl_lock);
1726 return ret; 1962 return ret;
1727} 1963}
1964
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966
1967static atomic_t ftrace_graph_active;
1968
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{
1971 return 0;
1972}
1973
1974/* The callbacks that hook a function */
1975trace_func_graph_ret_t ftrace_graph_return =
1976 (trace_func_graph_ret_t)ftrace_stub;
1977trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1978
1979/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1980static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1981{
1982 int i;
1983 int ret = 0;
1984 unsigned long flags;
1985 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1986 struct task_struct *g, *t;
1987
1988 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1989 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1990 * sizeof(struct ftrace_ret_stack),
1991 GFP_KERNEL);
1992 if (!ret_stack_list[i]) {
1993 start = 0;
1994 end = i;
1995 ret = -ENOMEM;
1996 goto free;
1997 }
1998 }
1999
2000 read_lock_irqsave(&tasklist_lock, flags);
2001 do_each_thread(g, t) {
2002 if (start == end) {
2003 ret = -EAGAIN;
2004 goto unlock;
2005 }
2006
2007 if (t->ret_stack == NULL) {
2008 t->curr_ret_stack = -1;
2009 /* Make sure IRQs see the -1 first: */
2010 barrier();
2011 t->ret_stack = ret_stack_list[start++];
2012 atomic_set(&t->tracing_graph_pause, 0);
2013 atomic_set(&t->trace_overrun, 0);
2014 }
2015 } while_each_thread(g, t);
2016
2017unlock:
2018 read_unlock_irqrestore(&tasklist_lock, flags);
2019free:
2020 for (i = start; i < end; i++)
2021 kfree(ret_stack_list[i]);
2022 return ret;
2023}
2024
2025/* Allocate a return stack for each task */
2026static int start_graph_tracing(void)
2027{
2028 struct ftrace_ret_stack **ret_stack_list;
2029 int ret;
2030
2031 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2032 sizeof(struct ftrace_ret_stack *),
2033 GFP_KERNEL);
2034
2035 if (!ret_stack_list)
2036 return -ENOMEM;
2037
2038 do {
2039 ret = alloc_retstack_tasklist(ret_stack_list);
2040 } while (ret == -EAGAIN);
2041
2042 kfree(ret_stack_list);
2043 return ret;
2044}
2045
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc)
2048{
2049 int ret = 0;
2050
2051 mutex_lock(&ftrace_sysctl_lock);
2052
2053 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing();
2055 if (ret) {
2056 atomic_dec(&ftrace_graph_active);
2057 goto out;
2058 }
2059
2060 ftrace_graph_return = retfunc;
2061 ftrace_graph_entry = entryfunc;
2062
2063 ftrace_startup(FTRACE_START_FUNC_RET);
2064
2065out:
2066 mutex_unlock(&ftrace_sysctl_lock);
2067 return ret;
2068}
2069
2070void unregister_ftrace_graph(void)
2071{
2072 mutex_lock(&ftrace_sysctl_lock);
2073
2074 atomic_dec(&ftrace_graph_active);
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2078
2079 mutex_unlock(&ftrace_sysctl_lock);
2080}
2081
2082/* Allocate a return stack for newly created task */
2083void ftrace_graph_init_task(struct task_struct *t)
2084{
2085 if (atomic_read(&ftrace_graph_active)) {
2086 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2087 * sizeof(struct ftrace_ret_stack),
2088 GFP_KERNEL);
2089 if (!t->ret_stack)
2090 return;
2091 t->curr_ret_stack = -1;
2092 atomic_set(&t->tracing_graph_pause, 0);
2093 atomic_set(&t->trace_overrun, 0);
2094 } else
2095 t->ret_stack = NULL;
2096}
2097
2098void ftrace_graph_exit_task(struct task_struct *t)
2099{
2100 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2101
2102 t->ret_stack = NULL;
2103 /* NULL must become visible to IRQs before we free it: */
2104 barrier();
2105
2106 kfree(ret_stack);
2107}
2108
2109void ftrace_graph_stop(void)
2110{
2111 ftrace_stop();
2112}
2113#endif
2114