diff options
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 29 | ||||
-rw-r--r-- | include/linux/ftrace.h | 53 | ||||
-rw-r--r-- | kernel/module.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 224 | ||||
-rw-r--r-- | kernel/trace/trace.c | 23 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 70 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 3 |
18 files changed, 284 insertions, 164 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9b6a1fa19e70..2bb43b433e07 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -17,6 +17,14 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
17 | */ | 17 | */ |
18 | return addr - 1; | 18 | return addr - 1; |
19 | } | 19 | } |
20 | |||
21 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
22 | |||
23 | struct dyn_arch_ftrace { | ||
24 | /* No extra data needed for x86 */ | ||
25 | }; | ||
26 | |||
27 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
20 | #endif /* __ASSEMBLY__ */ | 28 | #endif /* __ASSEMBLY__ */ |
21 | #endif /* CONFIG_FUNCTION_TRACER */ | 29 | #endif /* CONFIG_FUNCTION_TRACER */ |
22 | 30 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index fe832738e1e2..762222ad1387 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -166,7 +166,7 @@ static int ftrace_calc_offset(long ip, long addr) | |||
166 | return (int)(addr - ip); | 166 | return (int)(addr - ip); |
167 | } | 167 | } |
168 | 168 | ||
169 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | 169 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
170 | { | 170 | { |
171 | static union ftrace_code_union calc; | 171 | static union ftrace_code_union calc; |
172 | 172 | ||
@@ -311,12 +311,12 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
311 | 311 | ||
312 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | 312 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; |
313 | 313 | ||
314 | unsigned char *ftrace_nop_replace(void) | 314 | static unsigned char *ftrace_nop_replace(void) |
315 | { | 315 | { |
316 | return ftrace_nop; | 316 | return ftrace_nop; |
317 | } | 317 | } |
318 | 318 | ||
319 | int | 319 | static int |
320 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 320 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
321 | unsigned char *new_code) | 321 | unsigned char *new_code) |
322 | { | 322 | { |
@@ -349,6 +349,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
349 | return 0; | 349 | return 0; |
350 | } | 350 | } |
351 | 351 | ||
352 | int ftrace_make_nop(struct module *mod, | ||
353 | struct dyn_ftrace *rec, unsigned long addr) | ||
354 | { | ||
355 | unsigned char *new, *old; | ||
356 | unsigned long ip = rec->ip; | ||
357 | |||
358 | old = ftrace_call_replace(ip, addr); | ||
359 | new = ftrace_nop_replace(); | ||
360 | |||
361 | return ftrace_modify_code(rec->ip, old, new); | ||
362 | } | ||
363 | |||
364 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
365 | { | ||
366 | unsigned char *new, *old; | ||
367 | unsigned long ip = rec->ip; | ||
368 | |||
369 | old = ftrace_nop_replace(); | ||
370 | new = ftrace_call_replace(ip, addr); | ||
371 | |||
372 | return ftrace_modify_code(rec->ip, old, new); | ||
373 | } | ||
374 | |||
352 | int ftrace_update_ftrace_func(ftrace_func_t func) | 375 | int ftrace_update_ftrace_func(ftrace_func_t func) |
353 | { | 376 | { |
354 | unsigned long ip = (unsigned long)(&ftrace_call); | 377 | unsigned long ip = (unsigned long)(&ftrace_call); |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4fbc4a8b86a5..166a2070ef65 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -74,6 +74,9 @@ static inline void ftrace_start(void) { } | |||
74 | #endif /* CONFIG_FUNCTION_TRACER */ | 74 | #endif /* CONFIG_FUNCTION_TRACER */ |
75 | 75 | ||
76 | #ifdef CONFIG_DYNAMIC_FTRACE | 76 | #ifdef CONFIG_DYNAMIC_FTRACE |
77 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | ||
78 | #include <asm/ftrace.h> | ||
79 | |||
77 | enum { | 80 | enum { |
78 | FTRACE_FL_FREE = (1 << 0), | 81 | FTRACE_FL_FREE = (1 << 0), |
79 | FTRACE_FL_FAILED = (1 << 1), | 82 | FTRACE_FL_FAILED = (1 << 1), |
@@ -88,6 +91,7 @@ struct dyn_ftrace { | |||
88 | struct list_head list; | 91 | struct list_head list; |
89 | unsigned long ip; /* address of mcount call-site */ | 92 | unsigned long ip; /* address of mcount call-site */ |
90 | unsigned long flags; | 93 | unsigned long flags; |
94 | struct dyn_arch_ftrace arch; | ||
91 | }; | 95 | }; |
92 | 96 | ||
93 | int ftrace_force_update(void); | 97 | int ftrace_force_update(void); |
@@ -95,22 +99,40 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset); | |||
95 | 99 | ||
96 | /* defined in arch */ | 100 | /* defined in arch */ |
97 | extern int ftrace_ip_converted(unsigned long ip); | 101 | extern int ftrace_ip_converted(unsigned long ip); |
98 | extern unsigned char *ftrace_nop_replace(void); | ||
99 | extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); | ||
100 | extern int ftrace_dyn_arch_init(void *data); | 102 | extern int ftrace_dyn_arch_init(void *data); |
101 | extern int ftrace_update_ftrace_func(ftrace_func_t func); | 103 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
102 | extern void ftrace_caller(void); | 104 | extern void ftrace_caller(void); |
103 | extern void ftrace_call(void); | 105 | extern void ftrace_call(void); |
104 | extern void mcount_call(void); | 106 | extern void mcount_call(void); |
105 | 107 | ||
106 | /* May be defined in arch */ | 108 | /** |
107 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 109 | * ftrace_make_nop - convert code into top |
110 | * @mod: module structure if called by module load initialization | ||
111 | * @rec: the mcount call site record | ||
112 | * @addr: the address that the call site should be calling | ||
113 | * | ||
114 | * This is a very sensitive operation and great care needs | ||
115 | * to be taken by the arch. The operation should carefully | ||
116 | * read the location, check to see if what is read is indeed | ||
117 | * what we expect it to be, and then on success of the compare, | ||
118 | * it should write to the location. | ||
119 | * | ||
120 | * The code segment at @rec->ip should be a caller to @addr | ||
121 | * | ||
122 | * Return must be: | ||
123 | * 0 on success | ||
124 | * -EFAULT on error reading the location | ||
125 | * -EINVAL on a failed compare of the contents | ||
126 | * -EPERM on error writing to the location | ||
127 | * Any other value will be considered a failure. | ||
128 | */ | ||
129 | extern int ftrace_make_nop(struct module *mod, | ||
130 | struct dyn_ftrace *rec, unsigned long addr); | ||
108 | 131 | ||
109 | /** | 132 | /** |
110 | * ftrace_modify_code - modify code segment | 133 | * ftrace_make_call - convert a nop call site into a call to addr |
111 | * @ip: the address of the code segment | 134 | * @rec: the mcount call site record |
112 | * @old_code: the contents of what is expected to be there | 135 | * @addr: the address that the call site should call |
113 | * @new_code: the code to patch in | ||
114 | * | 136 | * |
115 | * This is a very sensitive operation and great care needs | 137 | * This is a very sensitive operation and great care needs |
116 | * to be taken by the arch. The operation should carefully | 138 | * to be taken by the arch. The operation should carefully |
@@ -118,6 +140,8 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
118 | * what we expect it to be, and then on success of the compare, | 140 | * what we expect it to be, and then on success of the compare, |
119 | * it should write to the location. | 141 | * it should write to the location. |
120 | * | 142 | * |
143 | * The code segment at @rec->ip should be a nop | ||
144 | * | ||
121 | * Return must be: | 145 | * Return must be: |
122 | * 0 on success | 146 | * 0 on success |
123 | * -EFAULT on error reading the location | 147 | * -EFAULT on error reading the location |
@@ -125,8 +149,11 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
125 | * -EPERM on error writing to the location | 149 | * -EPERM on error writing to the location |
126 | * Any other value will be considered a failure. | 150 | * Any other value will be considered a failure. |
127 | */ | 151 | */ |
128 | extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 152 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
129 | unsigned char *new_code); | 153 | |
154 | |||
155 | /* May be defined in arch */ | ||
156 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | ||
130 | 157 | ||
131 | extern int skip_trace(unsigned long ip); | 158 | extern int skip_trace(unsigned long ip); |
132 | 159 | ||
@@ -259,11 +286,13 @@ static inline void ftrace_dump(void) { } | |||
259 | 286 | ||
260 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 287 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
261 | extern void ftrace_init(void); | 288 | extern void ftrace_init(void); |
262 | extern void ftrace_init_module(unsigned long *start, unsigned long *end); | 289 | extern void ftrace_init_module(struct module *mod, |
290 | unsigned long *start, unsigned long *end); | ||
263 | #else | 291 | #else |
264 | static inline void ftrace_init(void) { } | 292 | static inline void ftrace_init(void) { } |
265 | static inline void | 293 | static inline void |
266 | ftrace_init_module(unsigned long *start, unsigned long *end) { } | 294 | ftrace_init_module(struct module *mod, |
295 | unsigned long *start, unsigned long *end) { } | ||
267 | #endif | 296 | #endif |
268 | 297 | ||
269 | 298 | ||
diff --git a/kernel/module.c b/kernel/module.c index 1f4cc00e0c20..69791274e899 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2201,7 +2201,7 @@ static noinline struct module *load_module(void __user *umod, | |||
2201 | /* sechdrs[0].sh_size is always zero */ | 2201 | /* sechdrs[0].sh_size is always zero */ |
2202 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", | 2202 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", |
2203 | sizeof(*mseg), &num_mcount); | 2203 | sizeof(*mseg), &num_mcount); |
2204 | ftrace_init_module(mseg, mseg + num_mcount); | 2204 | ftrace_init_module(mod, mseg, mseg + num_mcount); |
2205 | 2205 | ||
2206 | err = module_finalize(hdr, sechdrs, mod); | 2206 | err = module_finalize(hdr, sechdrs, mod); |
2207 | if (err < 0) | 2207 | if (err < 0) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 54cb9a7d15e5..b42ec1de546b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -334,7 +334,7 @@ ftrace_record_ip(unsigned long ip) | |||
334 | { | 334 | { |
335 | struct dyn_ftrace *rec; | 335 | struct dyn_ftrace *rec; |
336 | 336 | ||
337 | if (!ftrace_enabled || ftrace_disabled) | 337 | if (ftrace_disabled) |
338 | return NULL; | 338 | return NULL; |
339 | 339 | ||
340 | rec = ftrace_alloc_dyn_node(ip); | 340 | rec = ftrace_alloc_dyn_node(ip); |
@@ -348,107 +348,129 @@ ftrace_record_ip(unsigned long ip) | |||
348 | return rec; | 348 | return rec; |
349 | } | 349 | } |
350 | 350 | ||
351 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
352 | { | ||
353 | int i; | ||
354 | |||
355 | printk(KERN_CONT "%s", fmt); | ||
356 | |||
357 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
358 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
359 | } | ||
360 | |||
361 | static void ftrace_bug(int failed, unsigned long ip) | ||
362 | { | ||
363 | switch (failed) { | ||
364 | case -EFAULT: | ||
365 | FTRACE_WARN_ON_ONCE(1); | ||
366 | pr_info("ftrace faulted on modifying "); | ||
367 | print_ip_sym(ip); | ||
368 | break; | ||
369 | case -EINVAL: | ||
370 | FTRACE_WARN_ON_ONCE(1); | ||
371 | pr_info("ftrace failed to modify "); | ||
372 | print_ip_sym(ip); | ||
373 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
374 | printk(KERN_CONT "\n"); | ||
375 | break; | ||
376 | case -EPERM: | ||
377 | FTRACE_WARN_ON_ONCE(1); | ||
378 | pr_info("ftrace faulted on writing "); | ||
379 | print_ip_sym(ip); | ||
380 | break; | ||
381 | default: | ||
382 | FTRACE_WARN_ON_ONCE(1); | ||
383 | pr_info("ftrace faulted on unknown error "); | ||
384 | print_ip_sym(ip); | ||
385 | } | ||
386 | } | ||
387 | |||
351 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 388 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
352 | 389 | ||
353 | static int | 390 | static int |
354 | __ftrace_replace_code(struct dyn_ftrace *rec, | 391 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
355 | unsigned char *old, unsigned char *new, int enable) | ||
356 | { | 392 | { |
357 | unsigned long ip, fl; | 393 | unsigned long ip, fl; |
358 | 394 | ||
359 | ip = rec->ip; | 395 | ip = rec->ip; |
360 | 396 | ||
361 | if (ftrace_filtered && enable) { | 397 | /* |
398 | * If this record is not to be traced and | ||
399 | * it is not enabled then do nothing. | ||
400 | * | ||
401 | * If this record is not to be traced and | ||
402 | * it is enabled then disabled it. | ||
403 | * | ||
404 | */ | ||
405 | if (rec->flags & FTRACE_FL_NOTRACE) { | ||
406 | if (rec->flags & FTRACE_FL_ENABLED) | ||
407 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
408 | else | ||
409 | return 0; | ||
410 | |||
411 | } else if (ftrace_filtered && enable) { | ||
362 | /* | 412 | /* |
363 | * If filtering is on: | 413 | * Filtering is on: |
364 | * | ||
365 | * If this record is set to be filtered and | ||
366 | * is enabled then do nothing. | ||
367 | * | ||
368 | * If this record is set to be filtered and | ||
369 | * it is not enabled, enable it. | ||
370 | * | ||
371 | * If this record is not set to be filtered | ||
372 | * and it is not enabled do nothing. | ||
373 | * | ||
374 | * If this record is set not to trace then | ||
375 | * do nothing. | ||
376 | * | ||
377 | * If this record is set not to trace and | ||
378 | * it is enabled then disable it. | ||
379 | * | ||
380 | * If this record is not set to be filtered and | ||
381 | * it is enabled, disable it. | ||
382 | */ | 414 | */ |
383 | 415 | ||
384 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | | 416 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); |
385 | FTRACE_FL_ENABLED); | ||
386 | 417 | ||
387 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | 418 | /* Record is filtered and enabled, do nothing */ |
388 | (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || | 419 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
389 | !fl || (fl == FTRACE_FL_NOTRACE)) | ||
390 | return 0; | 420 | return 0; |
391 | 421 | ||
392 | /* | 422 | /* Record is not filtered and is not enabled do nothing */ |
393 | * If it is enabled disable it, | 423 | if (!fl) |
394 | * otherwise enable it! | 424 | return 0; |
395 | */ | 425 | |
396 | if (fl & FTRACE_FL_ENABLED) { | 426 | /* Record is not filtered but enabled, disable it */ |
397 | /* swap new and old */ | 427 | if (fl == FTRACE_FL_ENABLED) |
398 | new = old; | ||
399 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
400 | rec->flags &= ~FTRACE_FL_ENABLED; | 428 | rec->flags &= ~FTRACE_FL_ENABLED; |
401 | } else { | 429 | else |
402 | new = ftrace_call_replace(ip, FTRACE_ADDR); | 430 | /* Otherwise record is filtered but not enabled, enable it */ |
403 | rec->flags |= FTRACE_FL_ENABLED; | 431 | rec->flags |= FTRACE_FL_ENABLED; |
404 | } | ||
405 | } else { | 432 | } else { |
433 | /* Disable or not filtered */ | ||
406 | 434 | ||
407 | if (enable) { | 435 | if (enable) { |
408 | /* | 436 | /* if record is enabled, do nothing */ |
409 | * If this record is set not to trace and is | ||
410 | * not enabled, do nothing. | ||
411 | */ | ||
412 | fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); | ||
413 | if (fl == FTRACE_FL_NOTRACE) | ||
414 | return 0; | ||
415 | |||
416 | new = ftrace_call_replace(ip, FTRACE_ADDR); | ||
417 | } else | ||
418 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
419 | |||
420 | if (enable) { | ||
421 | if (rec->flags & FTRACE_FL_ENABLED) | 437 | if (rec->flags & FTRACE_FL_ENABLED) |
422 | return 0; | 438 | return 0; |
439 | |||
423 | rec->flags |= FTRACE_FL_ENABLED; | 440 | rec->flags |= FTRACE_FL_ENABLED; |
441 | |||
424 | } else { | 442 | } else { |
443 | |||
444 | /* if record is not enabled do nothing */ | ||
425 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 445 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
426 | return 0; | 446 | return 0; |
447 | |||
427 | rec->flags &= ~FTRACE_FL_ENABLED; | 448 | rec->flags &= ~FTRACE_FL_ENABLED; |
428 | } | 449 | } |
429 | } | 450 | } |
430 | 451 | ||
431 | return ftrace_modify_code(ip, old, new); | 452 | if (rec->flags & FTRACE_FL_ENABLED) |
453 | return ftrace_make_call(rec, FTRACE_ADDR); | ||
454 | else | ||
455 | return ftrace_make_nop(NULL, rec, FTRACE_ADDR); | ||
432 | } | 456 | } |
433 | 457 | ||
434 | static void ftrace_replace_code(int enable) | 458 | static void ftrace_replace_code(int enable) |
435 | { | 459 | { |
436 | int i, failed; | 460 | int i, failed; |
437 | unsigned char *new = NULL, *old = NULL; | ||
438 | struct dyn_ftrace *rec; | 461 | struct dyn_ftrace *rec; |
439 | struct ftrace_page *pg; | 462 | struct ftrace_page *pg; |
440 | 463 | ||
441 | if (enable) | ||
442 | old = ftrace_nop_replace(); | ||
443 | else | ||
444 | new = ftrace_nop_replace(); | ||
445 | |||
446 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 464 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
447 | for (i = 0; i < pg->index; i++) { | 465 | for (i = 0; i < pg->index; i++) { |
448 | rec = &pg->records[i]; | 466 | rec = &pg->records[i]; |
449 | 467 | ||
450 | /* don't modify code that has already faulted */ | 468 | /* |
451 | if (rec->flags & FTRACE_FL_FAILED) | 469 | * Skip over free records and records that have |
470 | * failed. | ||
471 | */ | ||
472 | if (rec->flags & FTRACE_FL_FREE || | ||
473 | rec->flags & FTRACE_FL_FAILED) | ||
452 | continue; | 474 | continue; |
453 | 475 | ||
454 | /* ignore updates to this record's mcount site */ | 476 | /* ignore updates to this record's mcount site */ |
@@ -459,68 +481,30 @@ static void ftrace_replace_code(int enable) | |||
459 | unfreeze_record(rec); | 481 | unfreeze_record(rec); |
460 | } | 482 | } |
461 | 483 | ||
462 | failed = __ftrace_replace_code(rec, old, new, enable); | 484 | failed = __ftrace_replace_code(rec, enable); |
463 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 485 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
464 | rec->flags |= FTRACE_FL_FAILED; | 486 | rec->flags |= FTRACE_FL_FAILED; |
465 | if ((system_state == SYSTEM_BOOTING) || | 487 | if ((system_state == SYSTEM_BOOTING) || |
466 | !core_kernel_text(rec->ip)) { | 488 | !core_kernel_text(rec->ip)) { |
467 | ftrace_free_rec(rec); | 489 | ftrace_free_rec(rec); |
468 | } | 490 | } else |
491 | ftrace_bug(failed, rec->ip); | ||
469 | } | 492 | } |
470 | } | 493 | } |
471 | } | 494 | } |
472 | } | 495 | } |
473 | 496 | ||
474 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
475 | { | ||
476 | int i; | ||
477 | |||
478 | printk(KERN_CONT "%s", fmt); | ||
479 | |||
480 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
481 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
482 | } | ||
483 | |||
484 | static int | 497 | static int |
485 | ftrace_code_disable(struct dyn_ftrace *rec) | 498 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
486 | { | 499 | { |
487 | unsigned long ip; | 500 | unsigned long ip; |
488 | unsigned char *nop, *call; | ||
489 | int ret; | 501 | int ret; |
490 | 502 | ||
491 | ip = rec->ip; | 503 | ip = rec->ip; |
492 | 504 | ||
493 | nop = ftrace_nop_replace(); | 505 | ret = ftrace_make_nop(mod, rec, mcount_addr); |
494 | call = ftrace_call_replace(ip, mcount_addr); | ||
495 | |||
496 | ret = ftrace_modify_code(ip, call, nop); | ||
497 | if (ret) { | 506 | if (ret) { |
498 | switch (ret) { | 507 | ftrace_bug(ret, ip); |
499 | case -EFAULT: | ||
500 | FTRACE_WARN_ON_ONCE(1); | ||
501 | pr_info("ftrace faulted on modifying "); | ||
502 | print_ip_sym(ip); | ||
503 | break; | ||
504 | case -EINVAL: | ||
505 | FTRACE_WARN_ON_ONCE(1); | ||
506 | pr_info("ftrace failed to modify "); | ||
507 | print_ip_sym(ip); | ||
508 | print_ip_ins(" expected: ", call); | ||
509 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
510 | print_ip_ins(" replace: ", nop); | ||
511 | printk(KERN_CONT "\n"); | ||
512 | break; | ||
513 | case -EPERM: | ||
514 | FTRACE_WARN_ON_ONCE(1); | ||
515 | pr_info("ftrace faulted on writing "); | ||
516 | print_ip_sym(ip); | ||
517 | break; | ||
518 | default: | ||
519 | FTRACE_WARN_ON_ONCE(1); | ||
520 | pr_info("ftrace faulted on unknown error "); | ||
521 | print_ip_sym(ip); | ||
522 | } | ||
523 | |||
524 | rec->flags |= FTRACE_FL_FAILED; | 508 | rec->flags |= FTRACE_FL_FAILED; |
525 | return 0; | 509 | return 0; |
526 | } | 510 | } |
@@ -560,8 +544,7 @@ static void ftrace_startup(void) | |||
560 | 544 | ||
561 | mutex_lock(&ftrace_start_lock); | 545 | mutex_lock(&ftrace_start_lock); |
562 | ftrace_start_up++; | 546 | ftrace_start_up++; |
563 | if (ftrace_start_up == 1) | 547 | command |= FTRACE_ENABLE_CALLS; |
564 | command |= FTRACE_ENABLE_CALLS; | ||
565 | 548 | ||
566 | if (saved_ftrace_func != ftrace_trace_function) { | 549 | if (saved_ftrace_func != ftrace_trace_function) { |
567 | saved_ftrace_func = ftrace_trace_function; | 550 | saved_ftrace_func = ftrace_trace_function; |
@@ -639,7 +622,7 @@ static cycle_t ftrace_update_time; | |||
639 | static unsigned long ftrace_update_cnt; | 622 | static unsigned long ftrace_update_cnt; |
640 | unsigned long ftrace_update_tot_cnt; | 623 | unsigned long ftrace_update_tot_cnt; |
641 | 624 | ||
642 | static int ftrace_update_code(void) | 625 | static int ftrace_update_code(struct module *mod) |
643 | { | 626 | { |
644 | struct dyn_ftrace *p, *t; | 627 | struct dyn_ftrace *p, *t; |
645 | cycle_t start, stop; | 628 | cycle_t start, stop; |
@@ -656,7 +639,7 @@ static int ftrace_update_code(void) | |||
656 | list_del_init(&p->list); | 639 | list_del_init(&p->list); |
657 | 640 | ||
658 | /* convert record (i.e, patch mcount-call with NOP) */ | 641 | /* convert record (i.e, patch mcount-call with NOP) */ |
659 | if (ftrace_code_disable(p)) { | 642 | if (ftrace_code_disable(mod, p)) { |
660 | p->flags |= FTRACE_FL_CONVERTED; | 643 | p->flags |= FTRACE_FL_CONVERTED; |
661 | ftrace_update_cnt++; | 644 | ftrace_update_cnt++; |
662 | } else | 645 | } else |
@@ -1211,7 +1194,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1211 | 1194 | ||
1212 | mutex_lock(&ftrace_sysctl_lock); | 1195 | mutex_lock(&ftrace_sysctl_lock); |
1213 | mutex_lock(&ftrace_start_lock); | 1196 | mutex_lock(&ftrace_start_lock); |
1214 | if (iter->filtered && ftrace_start_up && ftrace_enabled) | 1197 | if (ftrace_start_up && ftrace_enabled) |
1215 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1198 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1216 | mutex_unlock(&ftrace_start_lock); | 1199 | mutex_unlock(&ftrace_start_lock); |
1217 | mutex_unlock(&ftrace_sysctl_lock); | 1200 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1298,7 +1281,8 @@ static __init int ftrace_init_debugfs(void) | |||
1298 | 1281 | ||
1299 | fs_initcall(ftrace_init_debugfs); | 1282 | fs_initcall(ftrace_init_debugfs); |
1300 | 1283 | ||
1301 | static int ftrace_convert_nops(unsigned long *start, | 1284 | static int ftrace_convert_nops(struct module *mod, |
1285 | unsigned long *start, | ||
1302 | unsigned long *end) | 1286 | unsigned long *end) |
1303 | { | 1287 | { |
1304 | unsigned long *p; | 1288 | unsigned long *p; |
@@ -1309,23 +1293,32 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1309 | p = start; | 1293 | p = start; |
1310 | while (p < end) { | 1294 | while (p < end) { |
1311 | addr = ftrace_call_adjust(*p++); | 1295 | addr = ftrace_call_adjust(*p++); |
1296 | /* | ||
1297 | * Some architecture linkers will pad between | ||
1298 | * the different mcount_loc sections of different | ||
1299 | * object files to satisfy alignments. | ||
1300 | * Skip any NULL pointers. | ||
1301 | */ | ||
1302 | if (!addr) | ||
1303 | continue; | ||
1312 | ftrace_record_ip(addr); | 1304 | ftrace_record_ip(addr); |
1313 | } | 1305 | } |
1314 | 1306 | ||
1315 | /* disable interrupts to prevent kstop machine */ | 1307 | /* disable interrupts to prevent kstop machine */ |
1316 | local_irq_save(flags); | 1308 | local_irq_save(flags); |
1317 | ftrace_update_code(); | 1309 | ftrace_update_code(mod); |
1318 | local_irq_restore(flags); | 1310 | local_irq_restore(flags); |
1319 | mutex_unlock(&ftrace_start_lock); | 1311 | mutex_unlock(&ftrace_start_lock); |
1320 | 1312 | ||
1321 | return 0; | 1313 | return 0; |
1322 | } | 1314 | } |
1323 | 1315 | ||
1324 | void ftrace_init_module(unsigned long *start, unsigned long *end) | 1316 | void ftrace_init_module(struct module *mod, |
1317 | unsigned long *start, unsigned long *end) | ||
1325 | { | 1318 | { |
1326 | if (ftrace_disabled || start == end) | 1319 | if (ftrace_disabled || start == end) |
1327 | return; | 1320 | return; |
1328 | ftrace_convert_nops(start, end); | 1321 | ftrace_convert_nops(mod, start, end); |
1329 | } | 1322 | } |
1330 | 1323 | ||
1331 | extern unsigned long __start_mcount_loc[]; | 1324 | extern unsigned long __start_mcount_loc[]; |
@@ -1355,7 +1348,8 @@ void __init ftrace_init(void) | |||
1355 | 1348 | ||
1356 | last_ftrace_enabled = ftrace_enabled = 1; | 1349 | last_ftrace_enabled = ftrace_enabled = 1; |
1357 | 1350 | ||
1358 | ret = ftrace_convert_nops(__start_mcount_loc, | 1351 | ret = ftrace_convert_nops(NULL, |
1352 | __start_mcount_loc, | ||
1359 | __stop_mcount_loc); | 1353 | __stop_mcount_loc); |
1360 | 1354 | ||
1361 | return; | 1355 | return; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4a904623e05d..396fda034e3f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1051,7 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1051 | * Need to use raw, since this must be called before the | 1051 | * Need to use raw, since this must be called before the |
1052 | * recursive protection is performed. | 1052 | * recursive protection is performed. |
1053 | */ | 1053 | */ |
1054 | raw_local_irq_save(flags); | 1054 | local_irq_save(flags); |
1055 | cpu = raw_smp_processor_id(); | 1055 | cpu = raw_smp_processor_id(); |
1056 | data = tr->data[cpu]; | 1056 | data = tr->data[cpu]; |
1057 | disabled = atomic_inc_return(&data->disabled); | 1057 | disabled = atomic_inc_return(&data->disabled); |
@@ -1062,7 +1062,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | atomic_dec(&data->disabled); | 1064 | atomic_dec(&data->disabled); |
1065 | raw_local_irq_restore(flags); | 1065 | local_irq_restore(flags); |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1068 | #ifdef CONFIG_FUNCTION_RET_TRACER |
@@ -2638,8 +2638,11 @@ static int tracing_set_tracer(char *buf) | |||
2638 | current_trace->reset(tr); | 2638 | current_trace->reset(tr); |
2639 | 2639 | ||
2640 | current_trace = t; | 2640 | current_trace = t; |
2641 | if (t->init) | 2641 | if (t->init) { |
2642 | t->init(tr); | 2642 | ret = t->init(tr); |
2643 | if (ret) | ||
2644 | goto out; | ||
2645 | } | ||
2643 | 2646 | ||
2644 | trace_branch_enable(tr); | 2647 | trace_branch_enable(tr); |
2645 | out: | 2648 | out: |
@@ -2655,6 +2658,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2655 | char buf[max_tracer_type_len+1]; | 2658 | char buf[max_tracer_type_len+1]; |
2656 | int i; | 2659 | int i; |
2657 | size_t ret; | 2660 | size_t ret; |
2661 | int err; | ||
2662 | |||
2663 | ret = cnt; | ||
2658 | 2664 | ||
2659 | if (cnt > max_tracer_type_len) | 2665 | if (cnt > max_tracer_type_len) |
2660 | cnt = max_tracer_type_len; | 2666 | cnt = max_tracer_type_len; |
@@ -2668,12 +2674,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2668 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 2674 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
2669 | buf[i] = 0; | 2675 | buf[i] = 0; |
2670 | 2676 | ||
2671 | ret = tracing_set_tracer(buf); | 2677 | err = tracing_set_tracer(buf); |
2672 | if (!ret) | 2678 | if (err) |
2673 | ret = cnt; | 2679 | return err; |
2674 | 2680 | ||
2675 | if (ret > 0) | 2681 | filp->f_pos += ret; |
2676 | filp->f_pos += ret; | ||
2677 | 2682 | ||
2678 | return ret; | 2683 | return ret; |
2679 | } | 2684 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 790ea8c0e1f3..cdbd5cc22be8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -264,7 +264,8 @@ enum print_line_t { | |||
264 | */ | 264 | */ |
265 | struct tracer { | 265 | struct tracer { |
266 | const char *name; | 266 | const char *name; |
267 | void (*init)(struct trace_array *tr); | 267 | /* Your tracer should raise a warning if init fails */ |
268 | int (*init)(struct trace_array *tr); | ||
268 | void (*reset)(struct trace_array *tr); | 269 | void (*reset)(struct trace_array *tr); |
269 | void (*start)(struct trace_array *tr); | 270 | void (*start)(struct trace_array *tr); |
270 | void (*stop)(struct trace_array *tr); | 271 | void (*stop)(struct trace_array *tr); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index cb333b7fd113..a4fa2c57e34e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -47,7 +47,7 @@ static void reset_boot_trace(struct trace_array *tr) | |||
47 | tracing_reset(tr, cpu); | 47 | tracing_reset(tr, cpu); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void boot_trace_init(struct trace_array *tr) | 50 | static int boot_trace_init(struct trace_array *tr) |
51 | { | 51 | { |
52 | int cpu; | 52 | int cpu; |
53 | boot_trace = tr; | 53 | boot_trace = tr; |
@@ -56,6 +56,7 @@ static void boot_trace_init(struct trace_array *tr) | |||
56 | tracing_reset(tr, cpu); | 56 | tracing_reset(tr, cpu); |
57 | 57 | ||
58 | tracing_sched_switch_assign_trace(tr); | 58 | tracing_sched_switch_assign_trace(tr); |
59 | return 0; | ||
59 | } | 60 | } |
60 | 61 | ||
61 | static enum print_line_t | 62 | static enum print_line_t |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 85265553918f..44bd39539d61 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -125,7 +125,7 @@ static void stop_branch_trace(struct trace_array *tr) | |||
125 | disable_branch_tracing(); | 125 | disable_branch_tracing(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void branch_trace_init(struct trace_array *tr) | 128 | static int branch_trace_init(struct trace_array *tr) |
129 | { | 129 | { |
130 | int cpu; | 130 | int cpu; |
131 | 131 | ||
@@ -133,6 +133,7 @@ static void branch_trace_init(struct trace_array *tr) | |||
133 | tracing_reset(tr, cpu); | 133 | tracing_reset(tr, cpu); |
134 | 134 | ||
135 | start_branch_trace(tr); | 135 | start_branch_trace(tr); |
136 | return 0; | ||
136 | } | 137 | } |
137 | 138 | ||
138 | static void branch_trace_reset(struct trace_array *tr) | 139 | static void branch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8693b7a0a5b2..e74f6d0a3216 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -42,9 +42,10 @@ static void stop_function_trace(struct trace_array *tr) | |||
42 | tracing_stop_cmdline_record(); | 42 | tracing_stop_cmdline_record(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static void function_trace_init(struct trace_array *tr) | 45 | static int function_trace_init(struct trace_array *tr) |
46 | { | 46 | { |
47 | start_function_trace(tr); | 47 | start_function_trace(tr); |
48 | return 0; | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static void function_trace_reset(struct trace_array *tr) | 51 | static void function_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c index 7680b21537dd..61185f756a13 100644 --- a/kernel/trace/trace_functions_return.c +++ b/kernel/trace/trace_functions_return.c | |||
@@ -24,13 +24,14 @@ static void stop_return_trace(struct trace_array *tr) | |||
24 | unregister_ftrace_return(); | 24 | unregister_ftrace_return(); |
25 | } | 25 | } |
26 | 26 | ||
27 | static void return_trace_init(struct trace_array *tr) | 27 | static int return_trace_init(struct trace_array *tr) |
28 | { | 28 | { |
29 | int cpu; | 29 | int cpu; |
30 | for_each_online_cpu(cpu) | 30 | for_each_online_cpu(cpu) |
31 | tracing_reset(tr, cpu); | 31 | tracing_reset(tr, cpu); |
32 | 32 | ||
33 | start_return_trace(tr); | 33 | start_return_trace(tr); |
34 | return 0; | ||
34 | } | 35 | } |
35 | 36 | ||
36 | static void return_trace_reset(struct trace_array *tr) | 37 | static void return_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d919d4eaa7cc..7c2e326bbc8b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -416,11 +416,12 @@ static void irqsoff_tracer_close(struct trace_iterator *iter) | |||
416 | } | 416 | } |
417 | 417 | ||
418 | #ifdef CONFIG_IRQSOFF_TRACER | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
419 | static void irqsoff_tracer_init(struct trace_array *tr) | 419 | static int irqsoff_tracer_init(struct trace_array *tr) |
420 | { | 420 | { |
421 | trace_type = TRACER_IRQS_OFF; | 421 | trace_type = TRACER_IRQS_OFF; |
422 | 422 | ||
423 | __irqsoff_tracer_init(tr); | 423 | __irqsoff_tracer_init(tr); |
424 | return 0; | ||
424 | } | 425 | } |
425 | static struct tracer irqsoff_tracer __read_mostly = | 426 | static struct tracer irqsoff_tracer __read_mostly = |
426 | { | 427 | { |
@@ -442,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
442 | #endif | 443 | #endif |
443 | 444 | ||
444 | #ifdef CONFIG_PREEMPT_TRACER | 445 | #ifdef CONFIG_PREEMPT_TRACER |
445 | static void preemptoff_tracer_init(struct trace_array *tr) | 446 | static int preemptoff_tracer_init(struct trace_array *tr) |
446 | { | 447 | { |
447 | trace_type = TRACER_PREEMPT_OFF; | 448 | trace_type = TRACER_PREEMPT_OFF; |
448 | 449 | ||
449 | __irqsoff_tracer_init(tr); | 450 | __irqsoff_tracer_init(tr); |
451 | return 0; | ||
450 | } | 452 | } |
451 | 453 | ||
452 | static struct tracer preemptoff_tracer __read_mostly = | 454 | static struct tracer preemptoff_tracer __read_mostly = |
@@ -471,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
471 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 473 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
472 | defined(CONFIG_PREEMPT_TRACER) | 474 | defined(CONFIG_PREEMPT_TRACER) |
473 | 475 | ||
474 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | 476 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
475 | { | 477 | { |
476 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 478 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
477 | 479 | ||
478 | __irqsoff_tracer_init(tr); | 480 | __irqsoff_tracer_init(tr); |
481 | return 0; | ||
479 | } | 482 | } |
480 | 483 | ||
481 | static struct tracer preemptirqsoff_tracer __read_mostly = | 484 | static struct tracer preemptirqsoff_tracer __read_mostly = |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 51bcf370215e..433d650eda9f 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -30,13 +30,14 @@ static void mmio_reset_data(struct trace_array *tr) | |||
30 | tracing_reset(tr, cpu); | 30 | tracing_reset(tr, cpu); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void mmio_trace_init(struct trace_array *tr) | 33 | static int mmio_trace_init(struct trace_array *tr) |
34 | { | 34 | { |
35 | pr_debug("in %s\n", __func__); | 35 | pr_debug("in %s\n", __func__); |
36 | mmio_trace_array = tr; | 36 | mmio_trace_array = tr; |
37 | 37 | ||
38 | mmio_reset_data(tr); | 38 | mmio_reset_data(tr); |
39 | enable_mmiotrace(); | 39 | enable_mmiotrace(); |
40 | return 0; | ||
40 | } | 41 | } |
41 | 42 | ||
42 | static void mmio_trace_reset(struct trace_array *tr) | 43 | static void mmio_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 2ef1d227e7d8..0e77415caed3 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -24,7 +24,7 @@ static void stop_nop_trace(struct trace_array *tr) | |||
24 | /* Nothing to do! */ | 24 | /* Nothing to do! */ |
25 | } | 25 | } |
26 | 26 | ||
27 | static void nop_trace_init(struct trace_array *tr) | 27 | static int nop_trace_init(struct trace_array *tr) |
28 | { | 28 | { |
29 | int cpu; | 29 | int cpu; |
30 | ctx_trace = tr; | 30 | ctx_trace = tr; |
@@ -33,6 +33,7 @@ static void nop_trace_init(struct trace_array *tr) | |||
33 | tracing_reset(tr, cpu); | 33 | tracing_reset(tr, cpu); |
34 | 34 | ||
35 | start_nop_trace(tr); | 35 | start_nop_trace(tr); |
36 | return 0; | ||
36 | } | 37 | } |
37 | 38 | ||
38 | static void nop_trace_reset(struct trace_array *tr) | 39 | static void nop_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index be35bdfe2e38..863390557b44 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -206,10 +206,11 @@ static void stop_sched_trace(struct trace_array *tr) | |||
206 | tracing_stop_sched_switch_record(); | 206 | tracing_stop_sched_switch_record(); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void sched_switch_trace_init(struct trace_array *tr) | 209 | static int sched_switch_trace_init(struct trace_array *tr) |
210 | { | 210 | { |
211 | ctx_trace = tr; | 211 | ctx_trace = tr; |
212 | start_sched_trace(tr); | 212 | start_sched_trace(tr); |
213 | return 0; | ||
213 | } | 214 | } |
214 | 215 | ||
215 | static void sched_switch_trace_reset(struct trace_array *tr) | 216 | static void sched_switch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 983f2b1478c9..0067b49746c1 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -331,10 +331,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
331 | unregister_trace_sched_wakeup(probe_wakeup); | 331 | unregister_trace_sched_wakeup(probe_wakeup); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
335 | { | 335 | { |
336 | wakeup_trace = tr; | 336 | wakeup_trace = tr; |
337 | start_wakeup_tracer(tr); | 337 | start_wakeup_tracer(tr); |
338 | return 0; | ||
338 | } | 339 | } |
339 | 340 | ||
340 | static void wakeup_tracer_reset(struct trace_array *tr) | 341 | static void wakeup_tracer_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 24e6e075e6d6..88c8eb70f54a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -52,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
52 | int cpu, ret = 0; | 52 | int cpu, ret = 0; |
53 | 53 | ||
54 | /* Don't allow flipping of max traces now */ | 54 | /* Don't allow flipping of max traces now */ |
55 | raw_local_irq_save(flags); | 55 | local_irq_save(flags); |
56 | __raw_spin_lock(&ftrace_max_lock); | 56 | __raw_spin_lock(&ftrace_max_lock); |
57 | 57 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 58 | cnt = ring_buffer_entries(tr->buffer); |
@@ -63,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | __raw_spin_unlock(&ftrace_max_lock); | 65 | __raw_spin_unlock(&ftrace_max_lock); |
66 | raw_local_irq_restore(flags); | 66 | local_irq_restore(flags); |
67 | 67 | ||
68 | if (count) | 68 | if (count) |
69 | *count = cnt; | 69 | *count = cnt; |
@@ -71,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | ||
75 | { | ||
76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | ||
77 | trace->name, init_ret); | ||
78 | } | ||
74 | #ifdef CONFIG_FUNCTION_TRACER | 79 | #ifdef CONFIG_FUNCTION_TRACER |
75 | 80 | ||
76 | #ifdef CONFIG_DYNAMIC_FTRACE | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -111,7 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
111 | ftrace_set_filter(func_name, strlen(func_name), 1); | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
112 | 117 | ||
113 | /* enable tracing */ | 118 | /* enable tracing */ |
114 | trace->init(tr); | 119 | ret = trace->init(tr); |
120 | if (ret) { | ||
121 | warn_failed_init_tracer(trace, ret); | ||
122 | goto out; | ||
123 | } | ||
115 | 124 | ||
116 | /* Sleep for a 1/10 of a second */ | 125 | /* Sleep for a 1/10 of a second */ |
117 | msleep(100); | 126 | msleep(100); |
@@ -181,7 +190,12 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
181 | ftrace_enabled = 1; | 190 | ftrace_enabled = 1; |
182 | tracer_enabled = 1; | 191 | tracer_enabled = 1; |
183 | 192 | ||
184 | trace->init(tr); | 193 | ret = trace->init(tr); |
194 | if (ret) { | ||
195 | warn_failed_init_tracer(trace, ret); | ||
196 | goto out; | ||
197 | } | ||
198 | |||
185 | /* Sleep for a 1/10 of a second */ | 199 | /* Sleep for a 1/10 of a second */ |
186 | msleep(100); | 200 | msleep(100); |
187 | /* stop the tracing. */ | 201 | /* stop the tracing. */ |
@@ -223,7 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
223 | int ret; | 237 | int ret; |
224 | 238 | ||
225 | /* start the tracing */ | 239 | /* start the tracing */ |
226 | trace->init(tr); | 240 | ret = trace->init(tr); |
241 | if (ret) { | ||
242 | warn_failed_init_tracer(trace, ret); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
227 | /* reset the max latency */ | 246 | /* reset the max latency */ |
228 | tracing_max_latency = 0; | 247 | tracing_max_latency = 0; |
229 | /* disable interrupts for a bit */ | 248 | /* disable interrupts for a bit */ |
@@ -272,7 +291,12 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
272 | } | 291 | } |
273 | 292 | ||
274 | /* start the tracing */ | 293 | /* start the tracing */ |
275 | trace->init(tr); | 294 | ret = trace->init(tr); |
295 | if (ret) { | ||
296 | warn_failed_init_tracer(trace, ret); | ||
297 | return ret; | ||
298 | } | ||
299 | |||
276 | /* reset the max latency */ | 300 | /* reset the max latency */ |
277 | tracing_max_latency = 0; | 301 | tracing_max_latency = 0; |
278 | /* disable preemption for a bit */ | 302 | /* disable preemption for a bit */ |
@@ -321,7 +345,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
321 | } | 345 | } |
322 | 346 | ||
323 | /* start the tracing */ | 347 | /* start the tracing */ |
324 | trace->init(tr); | 348 | ret = trace->init(tr); |
349 | if (ret) { | ||
350 | warn_failed_init_tracer(trace, ret); | ||
351 | goto out; | ||
352 | } | ||
325 | 353 | ||
326 | /* reset the max latency */ | 354 | /* reset the max latency */ |
327 | tracing_max_latency = 0; | 355 | tracing_max_latency = 0; |
@@ -449,7 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
449 | wait_for_completion(&isrt); | 477 | wait_for_completion(&isrt); |
450 | 478 | ||
451 | /* start the tracing */ | 479 | /* start the tracing */ |
452 | trace->init(tr); | 480 | ret = trace->init(tr); |
481 | if (ret) { | ||
482 | warn_failed_init_tracer(trace, ret); | ||
483 | return ret; | ||
484 | } | ||
485 | |||
453 | /* reset the max latency */ | 486 | /* reset the max latency */ |
454 | tracing_max_latency = 0; | 487 | tracing_max_latency = 0; |
455 | 488 | ||
@@ -505,7 +538,12 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
505 | int ret; | 538 | int ret; |
506 | 539 | ||
507 | /* start the tracing */ | 540 | /* start the tracing */ |
508 | trace->init(tr); | 541 | ret = trace->init(tr); |
542 | if (ret) { | ||
543 | warn_failed_init_tracer(trace, ret); | ||
544 | return ret; | ||
545 | } | ||
546 | |||
509 | /* Sleep for a 1/10 of a second */ | 547 | /* Sleep for a 1/10 of a second */ |
510 | msleep(100); | 548 | msleep(100); |
511 | /* stop the tracing. */ | 549 | /* stop the tracing. */ |
@@ -532,7 +570,12 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
532 | int ret; | 570 | int ret; |
533 | 571 | ||
534 | /* start the tracing */ | 572 | /* start the tracing */ |
535 | trace->init(tr); | 573 | ret = trace->init(tr); |
574 | if (ret) { | ||
575 | warn_failed_init_tracer(trace, ret); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
536 | /* Sleep for a 1/10 of a second */ | 579 | /* Sleep for a 1/10 of a second */ |
537 | msleep(100); | 580 | msleep(100); |
538 | /* stop the tracing. */ | 581 | /* stop the tracing. */ |
@@ -554,7 +597,12 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
554 | int ret; | 597 | int ret; |
555 | 598 | ||
556 | /* start the tracing */ | 599 | /* start the tracing */ |
557 | trace->init(tr); | 600 | ret = trace->init(tr); |
601 | if (ret) { | ||
602 | warn_failed_init_tracer(trace, ret); | ||
603 | return ret; | ||
604 | } | ||
605 | |||
558 | /* Sleep for a 1/10 of a second */ | 606 | /* Sleep for a 1/10 of a second */ |
559 | msleep(100); | 607 | msleep(100); |
560 | /* stop the tracing. */ | 608 | /* stop the tracing. */ |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 05f753422aea..54960edb96d0 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -261,11 +261,12 @@ static void stop_stack_trace(struct trace_array *tr) | |||
261 | mutex_unlock(&sample_timer_lock); | 261 | mutex_unlock(&sample_timer_lock); |
262 | } | 262 | } |
263 | 263 | ||
264 | static void stack_trace_init(struct trace_array *tr) | 264 | static int stack_trace_init(struct trace_array *tr) |
265 | { | 265 | { |
266 | sysprof_trace = tr; | 266 | sysprof_trace = tr; |
267 | 267 | ||
268 | start_stack_trace(tr); | 268 | start_stack_trace(tr); |
269 | return 0; | ||
269 | } | 270 | } |
270 | 271 | ||
271 | static void stack_trace_reset(struct trace_array *tr) | 272 | static void stack_trace_reset(struct trace_array *tr) |