diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/marker.c | 80 | ||||
-rw-r--r-- | kernel/sysctl.c | 10 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 59 | ||||
-rw-r--r-- | kernel/trace/Makefile | 7 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 69 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 298 | ||||
-rw-r--r-- | kernel/trace/trace.c | 490 | ||||
-rw-r--r-- | kernel/trace/trace.h | 154 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 163 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 320 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 82 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 52 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 22 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 103 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 67 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 105 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 16 | ||||
-rw-r--r-- | kernel/tracepoint.c | 261 |
22 files changed, 1778 insertions, 623 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 9a3ec66a9d8..af3be57acbb 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -23,6 +23,10 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg | |||
23 | CFLAGS_REMOVE_sched_clock.o = -pg | 23 | CFLAGS_REMOVE_sched_clock.o = -pg |
24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | 24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg |
25 | endif | 25 | endif |
26 | ifdef CONFIG_FUNCTION_RET_TRACER | ||
27 | CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address() | ||
28 | CFLAGS_REMOVE_module.o = -pg # For __module_text_address() | ||
29 | endif | ||
26 | 30 | ||
27 | obj-$(CONFIG_FREEZER) += freezer.o | 31 | obj-$(CONFIG_FREEZER) += freezer.o |
28 | obj-$(CONFIG_PROFILING) += profile.o | 32 | obj-$(CONFIG_PROFILING) += profile.o |
diff --git a/kernel/marker.c b/kernel/marker.c index e9c6b2bc940..2898b647d41 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(markers_mutex); | |||
43 | */ | 43 | */ |
44 | #define MARKER_HASH_BITS 6 | 44 | #define MARKER_HASH_BITS 6 |
45 | #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) | 45 | #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) |
46 | static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Note about RCU : | 49 | * Note about RCU : |
@@ -64,11 +65,10 @@ struct marker_entry { | |||
64 | void *oldptr; | 65 | void *oldptr; |
65 | int rcu_pending; | 66 | int rcu_pending; |
66 | unsigned char ptype:1; | 67 | unsigned char ptype:1; |
68 | unsigned char format_allocated:1; | ||
67 | char name[0]; /* Contains name'\0'format'\0' */ | 69 | char name[0]; /* Contains name'\0'format'\0' */ |
68 | }; | 70 | }; |
69 | 71 | ||
70 | static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | ||
71 | |||
72 | /** | 72 | /** |
73 | * __mark_empty_function - Empty probe callback | 73 | * __mark_empty_function - Empty probe callback |
74 | * @probe_private: probe private data | 74 | * @probe_private: probe private data |
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(marker_probe_cb); | |||
157 | * | 157 | * |
158 | * Should be connected to markers "MARK_NOARGS". | 158 | * Should be connected to markers "MARK_NOARGS". |
159 | */ | 159 | */ |
160 | void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | 160 | static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) |
161 | { | 161 | { |
162 | va_list args; /* not initialized */ | 162 | va_list args; /* not initialized */ |
163 | char ptype; | 163 | char ptype; |
@@ -197,7 +197,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | |||
197 | } | 197 | } |
198 | rcu_read_unlock_sched(); | 198 | rcu_read_unlock_sched(); |
199 | } | 199 | } |
200 | EXPORT_SYMBOL_GPL(marker_probe_cb_noarg); | ||
201 | 200 | ||
202 | static void free_old_closure(struct rcu_head *head) | 201 | static void free_old_closure(struct rcu_head *head) |
203 | { | 202 | { |
@@ -416,6 +415,7 @@ static struct marker_entry *add_marker(const char *name, const char *format) | |||
416 | e->single.probe_private = NULL; | 415 | e->single.probe_private = NULL; |
417 | e->multi = NULL; | 416 | e->multi = NULL; |
418 | e->ptype = 0; | 417 | e->ptype = 0; |
418 | e->format_allocated = 0; | ||
419 | e->refcount = 0; | 419 | e->refcount = 0; |
420 | e->rcu_pending = 0; | 420 | e->rcu_pending = 0; |
421 | hlist_add_head(&e->hlist, head); | 421 | hlist_add_head(&e->hlist, head); |
@@ -447,6 +447,8 @@ static int remove_marker(const char *name) | |||
447 | if (e->single.func != __mark_empty_function) | 447 | if (e->single.func != __mark_empty_function) |
448 | return -EBUSY; | 448 | return -EBUSY; |
449 | hlist_del(&e->hlist); | 449 | hlist_del(&e->hlist); |
450 | if (e->format_allocated) | ||
451 | kfree(e->format); | ||
450 | /* Make sure the call_rcu has been executed */ | 452 | /* Make sure the call_rcu has been executed */ |
451 | if (e->rcu_pending) | 453 | if (e->rcu_pending) |
452 | rcu_barrier_sched(); | 454 | rcu_barrier_sched(); |
@@ -457,57 +459,34 @@ static int remove_marker(const char *name) | |||
457 | /* | 459 | /* |
458 | * Set the mark_entry format to the format found in the element. | 460 | * Set the mark_entry format to the format found in the element. |
459 | */ | 461 | */ |
460 | static int marker_set_format(struct marker_entry **entry, const char *format) | 462 | static int marker_set_format(struct marker_entry *entry, const char *format) |
461 | { | 463 | { |
462 | struct marker_entry *e; | 464 | entry->format = kstrdup(format, GFP_KERNEL); |
463 | size_t name_len = strlen((*entry)->name) + 1; | 465 | if (!entry->format) |
464 | size_t format_len = strlen(format) + 1; | ||
465 | |||
466 | |||
467 | e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, | ||
468 | GFP_KERNEL); | ||
469 | if (!e) | ||
470 | return -ENOMEM; | 466 | return -ENOMEM; |
471 | memcpy(&e->name[0], (*entry)->name, name_len); | 467 | entry->format_allocated = 1; |
472 | e->format = &e->name[name_len]; | 468 | |
473 | memcpy(e->format, format, format_len); | ||
474 | if (strcmp(e->format, MARK_NOARGS) == 0) | ||
475 | e->call = marker_probe_cb_noarg; | ||
476 | else | ||
477 | e->call = marker_probe_cb; | ||
478 | e->single = (*entry)->single; | ||
479 | e->multi = (*entry)->multi; | ||
480 | e->ptype = (*entry)->ptype; | ||
481 | e->refcount = (*entry)->refcount; | ||
482 | e->rcu_pending = 0; | ||
483 | hlist_add_before(&e->hlist, &(*entry)->hlist); | ||
484 | hlist_del(&(*entry)->hlist); | ||
485 | /* Make sure the call_rcu has been executed */ | ||
486 | if ((*entry)->rcu_pending) | ||
487 | rcu_barrier_sched(); | ||
488 | kfree(*entry); | ||
489 | *entry = e; | ||
490 | trace_mark(core_marker_format, "name %s format %s", | 469 | trace_mark(core_marker_format, "name %s format %s", |
491 | e->name, e->format); | 470 | entry->name, entry->format); |
492 | return 0; | 471 | return 0; |
493 | } | 472 | } |
494 | 473 | ||
495 | /* | 474 | /* |
496 | * Sets the probe callback corresponding to one marker. | 475 | * Sets the probe callback corresponding to one marker. |
497 | */ | 476 | */ |
498 | static int set_marker(struct marker_entry **entry, struct marker *elem, | 477 | static int set_marker(struct marker_entry *entry, struct marker *elem, |
499 | int active) | 478 | int active) |
500 | { | 479 | { |
501 | int ret; | 480 | int ret; |
502 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 481 | WARN_ON(strcmp(entry->name, elem->name) != 0); |
503 | 482 | ||
504 | if ((*entry)->format) { | 483 | if (entry->format) { |
505 | if (strcmp((*entry)->format, elem->format) != 0) { | 484 | if (strcmp(entry->format, elem->format) != 0) { |
506 | printk(KERN_NOTICE | 485 | printk(KERN_NOTICE |
507 | "Format mismatch for probe %s " | 486 | "Format mismatch for probe %s " |
508 | "(%s), marker (%s)\n", | 487 | "(%s), marker (%s)\n", |
509 | (*entry)->name, | 488 | entry->name, |
510 | (*entry)->format, | 489 | entry->format, |
511 | elem->format); | 490 | elem->format); |
512 | return -EPERM; | 491 | return -EPERM; |
513 | } | 492 | } |
@@ -523,34 +502,33 @@ static int set_marker(struct marker_entry **entry, struct marker *elem, | |||
523 | * pass from a "safe" callback (with argument) to an "unsafe" | 502 | * pass from a "safe" callback (with argument) to an "unsafe" |
524 | * callback (does not set arguments). | 503 | * callback (does not set arguments). |
525 | */ | 504 | */ |
526 | elem->call = (*entry)->call; | 505 | elem->call = entry->call; |
527 | /* | 506 | /* |
528 | * Sanity check : | 507 | * Sanity check : |
529 | * We only update the single probe private data when the ptr is | 508 | * We only update the single probe private data when the ptr is |
530 | * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) | 509 | * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) |
531 | */ | 510 | */ |
532 | WARN_ON(elem->single.func != __mark_empty_function | 511 | WARN_ON(elem->single.func != __mark_empty_function |
533 | && elem->single.probe_private | 512 | && elem->single.probe_private != entry->single.probe_private |
534 | != (*entry)->single.probe_private && | 513 | && !elem->ptype); |
535 | !elem->ptype); | 514 | elem->single.probe_private = entry->single.probe_private; |
536 | elem->single.probe_private = (*entry)->single.probe_private; | ||
537 | /* | 515 | /* |
538 | * Make sure the private data is valid when we update the | 516 | * Make sure the private data is valid when we update the |
539 | * single probe ptr. | 517 | * single probe ptr. |
540 | */ | 518 | */ |
541 | smp_wmb(); | 519 | smp_wmb(); |
542 | elem->single.func = (*entry)->single.func; | 520 | elem->single.func = entry->single.func; |
543 | /* | 521 | /* |
544 | * We also make sure that the new probe callbacks array is consistent | 522 | * We also make sure that the new probe callbacks array is consistent |
545 | * before setting a pointer to it. | 523 | * before setting a pointer to it. |
546 | */ | 524 | */ |
547 | rcu_assign_pointer(elem->multi, (*entry)->multi); | 525 | rcu_assign_pointer(elem->multi, entry->multi); |
548 | /* | 526 | /* |
549 | * Update the function or multi probe array pointer before setting the | 527 | * Update the function or multi probe array pointer before setting the |
550 | * ptype. | 528 | * ptype. |
551 | */ | 529 | */ |
552 | smp_wmb(); | 530 | smp_wmb(); |
553 | elem->ptype = (*entry)->ptype; | 531 | elem->ptype = entry->ptype; |
554 | elem->state = active; | 532 | elem->state = active; |
555 | 533 | ||
556 | return 0; | 534 | return 0; |
@@ -594,8 +572,7 @@ void marker_update_probe_range(struct marker *begin, | |||
594 | for (iter = begin; iter < end; iter++) { | 572 | for (iter = begin; iter < end; iter++) { |
595 | mark_entry = get_marker(iter->name); | 573 | mark_entry = get_marker(iter->name); |
596 | if (mark_entry) { | 574 | if (mark_entry) { |
597 | set_marker(&mark_entry, iter, | 575 | set_marker(mark_entry, iter, !!mark_entry->refcount); |
598 | !!mark_entry->refcount); | ||
599 | /* | 576 | /* |
600 | * ignore error, continue | 577 | * ignore error, continue |
601 | */ | 578 | */ |
@@ -657,7 +634,7 @@ int marker_probe_register(const char *name, const char *format, | |||
657 | ret = PTR_ERR(entry); | 634 | ret = PTR_ERR(entry); |
658 | } else if (format) { | 635 | } else if (format) { |
659 | if (!entry->format) | 636 | if (!entry->format) |
660 | ret = marker_set_format(&entry, format); | 637 | ret = marker_set_format(entry, format); |
661 | else if (strcmp(entry->format, format)) | 638 | else if (strcmp(entry->format, format)) |
662 | ret = -EPERM; | 639 | ret = -EPERM; |
663 | } | 640 | } |
@@ -848,8 +825,6 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
848 | if (!e->ptype) { | 825 | if (!e->ptype) { |
849 | if (num == 0 && e->single.func == probe) | 826 | if (num == 0 && e->single.func == probe) |
850 | return e->single.probe_private; | 827 | return e->single.probe_private; |
851 | else | ||
852 | break; | ||
853 | } else { | 828 | } else { |
854 | struct marker_probe_closure *closure; | 829 | struct marker_probe_closure *closure; |
855 | int match = 0; | 830 | int match = 0; |
@@ -861,6 +836,7 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
861 | return closure[i].probe_private; | 836 | return closure[i].probe_private; |
862 | } | 837 | } |
863 | } | 838 | } |
839 | break; | ||
864 | } | 840 | } |
865 | } | 841 | } |
866 | return ERR_PTR(-ENOENT); | 842 | return ERR_PTR(-ENOENT); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9d048fa2d90..65d4a9ba79e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -484,6 +484,16 @@ static struct ctl_table kern_table[] = { | |||
484 | .proc_handler = &ftrace_enable_sysctl, | 484 | .proc_handler = &ftrace_enable_sysctl, |
485 | }, | 485 | }, |
486 | #endif | 486 | #endif |
487 | #ifdef CONFIG_TRACING | ||
488 | { | ||
489 | .ctl_name = CTL_UNNUMBERED, | ||
490 | .procname = "ftrace_dump_on_oops", | ||
491 | .data = &ftrace_dump_on_oops, | ||
492 | .maxlen = sizeof(int), | ||
493 | .mode = 0644, | ||
494 | .proc_handler = &proc_dointvec, | ||
495 | }, | ||
496 | #endif | ||
487 | #ifdef CONFIG_MODULES | 497 | #ifdef CONFIG_MODULES |
488 | { | 498 | { |
489 | .ctl_name = KERN_MODPROBE, | 499 | .ctl_name = KERN_MODPROBE, |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 33dbefd471e..9c89526b6b7 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -9,6 +9,16 @@ config NOP_TRACER | |||
9 | config HAVE_FUNCTION_TRACER | 9 | config HAVE_FUNCTION_TRACER |
10 | bool | 10 | bool |
11 | 11 | ||
12 | config HAVE_FUNCTION_RET_TRACER | ||
13 | bool | ||
14 | |||
15 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
16 | bool | ||
17 | help | ||
18 | This gets selected when the arch tests the function_trace_stop | ||
19 | variable at the mcount call site. Otherwise, this variable | ||
20 | is tested by the called function. | ||
21 | |||
12 | config HAVE_DYNAMIC_FTRACE | 22 | config HAVE_DYNAMIC_FTRACE |
13 | bool | 23 | bool |
14 | 24 | ||
@@ -47,6 +57,17 @@ config FUNCTION_TRACER | |||
47 | (the bootup default), then the overhead of the instructions is very | 57 | (the bootup default), then the overhead of the instructions is very |
48 | small and not measurable even in micro-benchmarks. | 58 | small and not measurable even in micro-benchmarks. |
49 | 59 | ||
60 | config FUNCTION_RET_TRACER | ||
61 | bool "Kernel Function return Tracer" | ||
62 | depends on !DYNAMIC_FTRACE | ||
63 | depends on HAVE_FUNCTION_RET_TRACER | ||
64 | depends on FUNCTION_TRACER | ||
65 | help | ||
66 | Enable the kernel to trace a function at its return. | ||
67 | It's first purpose is to trace the duration of functions. | ||
68 | This is done by setting the current return address on the thread | ||
69 | info structure of the current task. | ||
70 | |||
50 | config IRQSOFF_TRACER | 71 | config IRQSOFF_TRACER |
51 | bool "Interrupts-off Latency Tracer" | 72 | bool "Interrupts-off Latency Tracer" |
52 | default n | 73 | default n |
@@ -138,6 +159,44 @@ config BOOT_TRACER | |||
138 | selected, because the self-tests are an initcall as well and that | 159 | selected, because the self-tests are an initcall as well and that |
139 | would invalidate the boot trace. ) | 160 | would invalidate the boot trace. ) |
140 | 161 | ||
162 | config TRACE_BRANCH_PROFILING | ||
163 | bool "Trace likely/unlikely profiler" | ||
164 | depends on DEBUG_KERNEL | ||
165 | select TRACING | ||
166 | help | ||
167 | This tracer profiles all the the likely and unlikely macros | ||
168 | in the kernel. It will display the results in: | ||
169 | |||
170 | /debugfs/tracing/profile_likely | ||
171 | /debugfs/tracing/profile_unlikely | ||
172 | |||
173 | Note: this will add a significant overhead, only turn this | ||
174 | on if you need to profile the system's use of these macros. | ||
175 | |||
176 | Say N if unsure. | ||
177 | |||
178 | config TRACING_BRANCHES | ||
179 | bool | ||
180 | help | ||
181 | Selected by tracers that will trace the likely and unlikely | ||
182 | conditions. This prevents the tracers themselves from being | ||
183 | profiled. Profiling the tracing infrastructure can only happen | ||
184 | when the likelys and unlikelys are not being traced. | ||
185 | |||
186 | config BRANCH_TRACER | ||
187 | bool "Trace likely/unlikely instances" | ||
188 | depends on TRACE_BRANCH_PROFILING | ||
189 | select TRACING_BRANCHES | ||
190 | help | ||
191 | This traces the events of likely and unlikely condition | ||
192 | calls in the kernel. The difference between this and the | ||
193 | "Trace likely/unlikely profiler" is that this is not a | ||
194 | histogram of the callers, but actually places the calling | ||
195 | events into a running trace buffer to see when and where the | ||
196 | events happened, as well as their results. | ||
197 | |||
198 | Say N if unsure. | ||
199 | |||
141 | config STACK_TRACER | 200 | config STACK_TRACER |
142 | bool "Trace max stack" | 201 | bool "Trace max stack" |
143 | depends on HAVE_FUNCTION_TRACER | 202 | depends on HAVE_FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c8228b1a49e..1a8c9259dc6 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg | |||
10 | obj-y += trace_selftest_dynamic.o | 10 | obj-y += trace_selftest_dynamic.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | # If unlikely tracing is enabled, do not trace these files | ||
14 | ifdef CONFIG_TRACING_BRANCHES | ||
15 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | ||
16 | endif | ||
17 | |||
13 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | 18 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
15 | 20 | ||
@@ -24,5 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o | |||
24 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
25 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
26 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
32 | obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o | ||
33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | ||
27 | 34 | ||
28 | libftrace-y := ftrace.o | 35 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 14fa52297b2..54cb9a7d15e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,6 +47,9 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* Quick disabling of function tracer. */ | ||
51 | int function_trace_stop; | ||
52 | |||
50 | /* | 53 | /* |
51 | * ftrace_disabled is set when an anomaly is discovered. | 54 | * ftrace_disabled is set when an anomaly is discovered. |
52 | * ftrace_disabled is much stronger than ftrace_enabled. | 55 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -63,6 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
63 | 66 | ||
64 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 67 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
65 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 68 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
69 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
66 | 70 | ||
67 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 71 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
68 | { | 72 | { |
@@ -88,7 +92,22 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
88 | void clear_ftrace_function(void) | 92 | void clear_ftrace_function(void) |
89 | { | 93 | { |
90 | ftrace_trace_function = ftrace_stub; | 94 | ftrace_trace_function = ftrace_stub; |
95 | __ftrace_trace_function = ftrace_stub; | ||
96 | } | ||
97 | |||
98 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
99 | /* | ||
100 | * For those archs that do not test ftrace_trace_stop in their | ||
101 | * mcount call site, we need to do it from C. | ||
102 | */ | ||
103 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
104 | { | ||
105 | if (function_trace_stop) | ||
106 | return; | ||
107 | |||
108 | __ftrace_trace_function(ip, parent_ip); | ||
91 | } | 109 | } |
110 | #endif | ||
92 | 111 | ||
93 | static int __register_ftrace_function(struct ftrace_ops *ops) | 112 | static int __register_ftrace_function(struct ftrace_ops *ops) |
94 | { | 113 | { |
@@ -110,10 +129,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
110 | * For one func, simply call it directly. | 129 | * For one func, simply call it directly. |
111 | * For more than one func, call the chain. | 130 | * For more than one func, call the chain. |
112 | */ | 131 | */ |
132 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
113 | if (ops->next == &ftrace_list_end) | 133 | if (ops->next == &ftrace_list_end) |
114 | ftrace_trace_function = ops->func; | 134 | ftrace_trace_function = ops->func; |
115 | else | 135 | else |
116 | ftrace_trace_function = ftrace_list_func; | 136 | ftrace_trace_function = ftrace_list_func; |
137 | #else | ||
138 | if (ops->next == &ftrace_list_end) | ||
139 | __ftrace_trace_function = ops->func; | ||
140 | else | ||
141 | __ftrace_trace_function = ftrace_list_func; | ||
142 | ftrace_trace_function = ftrace_test_stop_func; | ||
143 | #endif | ||
117 | } | 144 | } |
118 | 145 | ||
119 | spin_unlock(&ftrace_lock); | 146 | spin_unlock(&ftrace_lock); |
@@ -152,8 +179,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
152 | 179 | ||
153 | if (ftrace_enabled) { | 180 | if (ftrace_enabled) { |
154 | /* If we only have one func left, then call that directly */ | 181 | /* If we only have one func left, then call that directly */ |
155 | if (ftrace_list == &ftrace_list_end || | 182 | if (ftrace_list->next == &ftrace_list_end) |
156 | ftrace_list->next == &ftrace_list_end) | ||
157 | ftrace_trace_function = ftrace_list->func; | 183 | ftrace_trace_function = ftrace_list->func; |
158 | } | 184 | } |
159 | 185 | ||
@@ -522,7 +548,7 @@ static void ftrace_run_update_code(int command) | |||
522 | } | 548 | } |
523 | 549 | ||
524 | static ftrace_func_t saved_ftrace_func; | 550 | static ftrace_func_t saved_ftrace_func; |
525 | static int ftrace_start; | 551 | static int ftrace_start_up; |
526 | static DEFINE_MUTEX(ftrace_start_lock); | 552 | static DEFINE_MUTEX(ftrace_start_lock); |
527 | 553 | ||
528 | static void ftrace_startup(void) | 554 | static void ftrace_startup(void) |
@@ -533,8 +559,8 @@ static void ftrace_startup(void) | |||
533 | return; | 559 | return; |
534 | 560 | ||
535 | mutex_lock(&ftrace_start_lock); | 561 | mutex_lock(&ftrace_start_lock); |
536 | ftrace_start++; | 562 | ftrace_start_up++; |
537 | if (ftrace_start == 1) | 563 | if (ftrace_start_up == 1) |
538 | command |= FTRACE_ENABLE_CALLS; | 564 | command |= FTRACE_ENABLE_CALLS; |
539 | 565 | ||
540 | if (saved_ftrace_func != ftrace_trace_function) { | 566 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -558,8 +584,8 @@ static void ftrace_shutdown(void) | |||
558 | return; | 584 | return; |
559 | 585 | ||
560 | mutex_lock(&ftrace_start_lock); | 586 | mutex_lock(&ftrace_start_lock); |
561 | ftrace_start--; | 587 | ftrace_start_up--; |
562 | if (!ftrace_start) | 588 | if (!ftrace_start_up) |
563 | command |= FTRACE_DISABLE_CALLS; | 589 | command |= FTRACE_DISABLE_CALLS; |
564 | 590 | ||
565 | if (saved_ftrace_func != ftrace_trace_function) { | 591 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -585,8 +611,8 @@ static void ftrace_startup_sysctl(void) | |||
585 | mutex_lock(&ftrace_start_lock); | 611 | mutex_lock(&ftrace_start_lock); |
586 | /* Force update next time */ | 612 | /* Force update next time */ |
587 | saved_ftrace_func = NULL; | 613 | saved_ftrace_func = NULL; |
588 | /* ftrace_start is true if we want ftrace running */ | 614 | /* ftrace_start_up is true if we want ftrace running */ |
589 | if (ftrace_start) | 615 | if (ftrace_start_up) |
590 | command |= FTRACE_ENABLE_CALLS; | 616 | command |= FTRACE_ENABLE_CALLS; |
591 | 617 | ||
592 | ftrace_run_update_code(command); | 618 | ftrace_run_update_code(command); |
@@ -601,8 +627,8 @@ static void ftrace_shutdown_sysctl(void) | |||
601 | return; | 627 | return; |
602 | 628 | ||
603 | mutex_lock(&ftrace_start_lock); | 629 | mutex_lock(&ftrace_start_lock); |
604 | /* ftrace_start is true if ftrace is running */ | 630 | /* ftrace_start_up is true if ftrace is running */ |
605 | if (ftrace_start) | 631 | if (ftrace_start_up) |
606 | command |= FTRACE_DISABLE_CALLS; | 632 | command |= FTRACE_DISABLE_CALLS; |
607 | 633 | ||
608 | ftrace_run_update_code(command); | 634 | ftrace_run_update_code(command); |
@@ -734,6 +760,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
734 | ((iter->flags & FTRACE_ITER_FAILURES) && | 760 | ((iter->flags & FTRACE_ITER_FAILURES) && |
735 | !(rec->flags & FTRACE_FL_FAILED)) || | 761 | !(rec->flags & FTRACE_FL_FAILED)) || |
736 | 762 | ||
763 | ((iter->flags & FTRACE_ITER_FILTER) && | ||
764 | !(rec->flags & FTRACE_FL_FILTER)) || | ||
765 | |||
737 | ((iter->flags & FTRACE_ITER_NOTRACE) && | 766 | ((iter->flags & FTRACE_ITER_NOTRACE) && |
738 | !(rec->flags & FTRACE_FL_NOTRACE))) { | 767 | !(rec->flags & FTRACE_FL_NOTRACE))) { |
739 | rec = NULL; | 768 | rec = NULL; |
@@ -1182,7 +1211,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1182 | 1211 | ||
1183 | mutex_lock(&ftrace_sysctl_lock); | 1212 | mutex_lock(&ftrace_sysctl_lock); |
1184 | mutex_lock(&ftrace_start_lock); | 1213 | mutex_lock(&ftrace_start_lock); |
1185 | if (iter->filtered && ftrace_start && ftrace_enabled) | 1214 | if (iter->filtered && ftrace_start_up && ftrace_enabled) |
1186 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1215 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1187 | mutex_unlock(&ftrace_start_lock); | 1216 | mutex_unlock(&ftrace_start_lock); |
1188 | mutex_unlock(&ftrace_sysctl_lock); | 1217 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1450,3 +1479,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1450 | return ret; | 1479 | return ret; |
1451 | } | 1480 | } |
1452 | 1481 | ||
1482 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1483 | trace_function_return_t ftrace_function_return = | ||
1484 | (trace_function_return_t)ftrace_stub; | ||
1485 | void register_ftrace_return(trace_function_return_t func) | ||
1486 | { | ||
1487 | ftrace_function_return = func; | ||
1488 | } | ||
1489 | |||
1490 | void unregister_ftrace_return(void) | ||
1491 | { | ||
1492 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | ||
1493 | } | ||
1494 | #endif | ||
1495 | |||
1496 | |||
1497 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 036456cbb4f..caa4fda50f8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -45,6 +45,8 @@ void tracing_off(void) | |||
45 | ring_buffers_off = 1; | 45 | ring_buffers_off = 1; |
46 | } | 46 | } |
47 | 47 | ||
48 | #include "trace.h" | ||
49 | |||
48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 50 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
49 | #define DEBUG_SHIFT 0 | 51 | #define DEBUG_SHIFT 0 |
50 | 52 | ||
@@ -187,7 +189,8 @@ static inline int test_time_stamp(u64 delta) | |||
187 | struct ring_buffer_per_cpu { | 189 | struct ring_buffer_per_cpu { |
188 | int cpu; | 190 | int cpu; |
189 | struct ring_buffer *buffer; | 191 | struct ring_buffer *buffer; |
190 | spinlock_t lock; | 192 | spinlock_t reader_lock; /* serialize readers */ |
193 | raw_spinlock_t lock; | ||
191 | struct lock_class_key lock_key; | 194 | struct lock_class_key lock_key; |
192 | struct list_head pages; | 195 | struct list_head pages; |
193 | struct buffer_page *head_page; /* read from head */ | 196 | struct buffer_page *head_page; /* read from head */ |
@@ -221,32 +224,16 @@ struct ring_buffer_iter { | |||
221 | u64 read_stamp; | 224 | u64 read_stamp; |
222 | }; | 225 | }; |
223 | 226 | ||
227 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | ||
224 | #define RB_WARN_ON(buffer, cond) \ | 228 | #define RB_WARN_ON(buffer, cond) \ |
225 | do { \ | 229 | ({ \ |
226 | if (unlikely(cond)) { \ | 230 | int _____ret = unlikely(cond); \ |
227 | atomic_inc(&buffer->record_disabled); \ | 231 | if (_____ret) { \ |
228 | WARN_ON(1); \ | ||
229 | } \ | ||
230 | } while (0) | ||
231 | |||
232 | #define RB_WARN_ON_RET(buffer, cond) \ | ||
233 | do { \ | ||
234 | if (unlikely(cond)) { \ | ||
235 | atomic_inc(&buffer->record_disabled); \ | ||
236 | WARN_ON(1); \ | ||
237 | return -1; \ | ||
238 | } \ | ||
239 | } while (0) | ||
240 | |||
241 | #define RB_WARN_ON_ONCE(buffer, cond) \ | ||
242 | do { \ | ||
243 | static int once; \ | ||
244 | if (unlikely(cond) && !once) { \ | ||
245 | once++; \ | ||
246 | atomic_inc(&buffer->record_disabled); \ | 232 | atomic_inc(&buffer->record_disabled); \ |
247 | WARN_ON(1); \ | 233 | WARN_ON(1); \ |
248 | } \ | 234 | } \ |
249 | } while (0) | 235 | _____ret; \ |
236 | }) | ||
250 | 237 | ||
251 | /** | 238 | /** |
252 | * check_pages - integrity check of buffer pages | 239 | * check_pages - integrity check of buffer pages |
@@ -260,14 +247,18 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | |||
260 | struct list_head *head = &cpu_buffer->pages; | 247 | struct list_head *head = &cpu_buffer->pages; |
261 | struct buffer_page *page, *tmp; | 248 | struct buffer_page *page, *tmp; |
262 | 249 | ||
263 | RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); | 250 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
264 | RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); | 251 | return -1; |
252 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | ||
253 | return -1; | ||
265 | 254 | ||
266 | list_for_each_entry_safe(page, tmp, head, list) { | 255 | list_for_each_entry_safe(page, tmp, head, list) { |
267 | RB_WARN_ON_RET(cpu_buffer, | 256 | if (RB_WARN_ON(cpu_buffer, |
268 | page->list.next->prev != &page->list); | 257 | page->list.next->prev != &page->list)) |
269 | RB_WARN_ON_RET(cpu_buffer, | 258 | return -1; |
270 | page->list.prev->next != &page->list); | 259 | if (RB_WARN_ON(cpu_buffer, |
260 | page->list.prev->next != &page->list)) | ||
261 | return -1; | ||
271 | } | 262 | } |
272 | 263 | ||
273 | return 0; | 264 | return 0; |
@@ -324,7 +315,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
324 | 315 | ||
325 | cpu_buffer->cpu = cpu; | 316 | cpu_buffer->cpu = cpu; |
326 | cpu_buffer->buffer = buffer; | 317 | cpu_buffer->buffer = buffer; |
327 | spin_lock_init(&cpu_buffer->lock); | 318 | spin_lock_init(&cpu_buffer->reader_lock); |
319 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
328 | INIT_LIST_HEAD(&cpu_buffer->pages); | 320 | INIT_LIST_HEAD(&cpu_buffer->pages); |
329 | 321 | ||
330 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 322 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), |
@@ -473,13 +465,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
473 | synchronize_sched(); | 465 | synchronize_sched(); |
474 | 466 | ||
475 | for (i = 0; i < nr_pages; i++) { | 467 | for (i = 0; i < nr_pages; i++) { |
476 | BUG_ON(list_empty(&cpu_buffer->pages)); | 468 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
469 | return; | ||
477 | p = cpu_buffer->pages.next; | 470 | p = cpu_buffer->pages.next; |
478 | page = list_entry(p, struct buffer_page, list); | 471 | page = list_entry(p, struct buffer_page, list); |
479 | list_del_init(&page->list); | 472 | list_del_init(&page->list); |
480 | free_buffer_page(page); | 473 | free_buffer_page(page); |
481 | } | 474 | } |
482 | BUG_ON(list_empty(&cpu_buffer->pages)); | 475 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
476 | return; | ||
483 | 477 | ||
484 | rb_reset_cpu(cpu_buffer); | 478 | rb_reset_cpu(cpu_buffer); |
485 | 479 | ||
@@ -501,7 +495,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
501 | synchronize_sched(); | 495 | synchronize_sched(); |
502 | 496 | ||
503 | for (i = 0; i < nr_pages; i++) { | 497 | for (i = 0; i < nr_pages; i++) { |
504 | BUG_ON(list_empty(pages)); | 498 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
499 | return; | ||
505 | p = pages->next; | 500 | p = pages->next; |
506 | page = list_entry(p, struct buffer_page, list); | 501 | page = list_entry(p, struct buffer_page, list); |
507 | list_del_init(&page->list); | 502 | list_del_init(&page->list); |
@@ -562,7 +557,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
562 | if (size < buffer_size) { | 557 | if (size < buffer_size) { |
563 | 558 | ||
564 | /* easy case, just free pages */ | 559 | /* easy case, just free pages */ |
565 | BUG_ON(nr_pages >= buffer->pages); | 560 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { |
561 | mutex_unlock(&buffer->mutex); | ||
562 | return -1; | ||
563 | } | ||
566 | 564 | ||
567 | rm_pages = buffer->pages - nr_pages; | 565 | rm_pages = buffer->pages - nr_pages; |
568 | 566 | ||
@@ -581,7 +579,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
581 | * add these pages to the cpu_buffers. Otherwise we just free | 579 | * add these pages to the cpu_buffers. Otherwise we just free |
582 | * them all and return -ENOMEM; | 580 | * them all and return -ENOMEM; |
583 | */ | 581 | */ |
584 | BUG_ON(nr_pages <= buffer->pages); | 582 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { |
583 | mutex_unlock(&buffer->mutex); | ||
584 | return -1; | ||
585 | } | ||
586 | |||
585 | new_pages = nr_pages - buffer->pages; | 587 | new_pages = nr_pages - buffer->pages; |
586 | 588 | ||
587 | for_each_buffer_cpu(buffer, cpu) { | 589 | for_each_buffer_cpu(buffer, cpu) { |
@@ -604,7 +606,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
604 | rb_insert_pages(cpu_buffer, &pages, new_pages); | 606 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
605 | } | 607 | } |
606 | 608 | ||
607 | BUG_ON(!list_empty(&pages)); | 609 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { |
610 | mutex_unlock(&buffer->mutex); | ||
611 | return -1; | ||
612 | } | ||
608 | 613 | ||
609 | out: | 614 | out: |
610 | buffer->pages = nr_pages; | 615 | buffer->pages = nr_pages; |
@@ -692,7 +697,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
692 | head += rb_event_length(event)) { | 697 | head += rb_event_length(event)) { |
693 | 698 | ||
694 | event = __rb_page_index(cpu_buffer->head_page, head); | 699 | event = __rb_page_index(cpu_buffer->head_page, head); |
695 | BUG_ON(rb_null_event(event)); | 700 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
701 | return; | ||
696 | /* Only count data entries */ | 702 | /* Only count data entries */ |
697 | if (event->type != RINGBUF_TYPE_DATA) | 703 | if (event->type != RINGBUF_TYPE_DATA) |
698 | continue; | 704 | continue; |
@@ -745,8 +751,9 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
745 | addr &= PAGE_MASK; | 751 | addr &= PAGE_MASK; |
746 | 752 | ||
747 | while (cpu_buffer->commit_page->page != (void *)addr) { | 753 | while (cpu_buffer->commit_page->page != (void *)addr) { |
748 | RB_WARN_ON(cpu_buffer, | 754 | if (RB_WARN_ON(cpu_buffer, |
749 | cpu_buffer->commit_page == cpu_buffer->tail_page); | 755 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
756 | return; | ||
750 | cpu_buffer->commit_page->commit = | 757 | cpu_buffer->commit_page->commit = |
751 | cpu_buffer->commit_page->write; | 758 | cpu_buffer->commit_page->write; |
752 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 759 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
@@ -893,7 +900,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
893 | if (write > BUF_PAGE_SIZE) { | 900 | if (write > BUF_PAGE_SIZE) { |
894 | struct buffer_page *next_page = tail_page; | 901 | struct buffer_page *next_page = tail_page; |
895 | 902 | ||
896 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 903 | local_irq_save(flags); |
904 | __raw_spin_lock(&cpu_buffer->lock); | ||
897 | 905 | ||
898 | rb_inc_page(cpu_buffer, &next_page); | 906 | rb_inc_page(cpu_buffer, &next_page); |
899 | 907 | ||
@@ -901,7 +909,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
901 | reader_page = cpu_buffer->reader_page; | 909 | reader_page = cpu_buffer->reader_page; |
902 | 910 | ||
903 | /* we grabbed the lock before incrementing */ | 911 | /* we grabbed the lock before incrementing */ |
904 | RB_WARN_ON(cpu_buffer, next_page == reader_page); | 912 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
913 | goto out_unlock; | ||
905 | 914 | ||
906 | /* | 915 | /* |
907 | * If for some reason, we had an interrupt storm that made | 916 | * If for some reason, we had an interrupt storm that made |
@@ -969,7 +978,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
969 | rb_set_commit_to_write(cpu_buffer); | 978 | rb_set_commit_to_write(cpu_buffer); |
970 | } | 979 | } |
971 | 980 | ||
972 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 981 | __raw_spin_unlock(&cpu_buffer->lock); |
982 | local_irq_restore(flags); | ||
973 | 983 | ||
974 | /* fail and let the caller try again */ | 984 | /* fail and let the caller try again */ |
975 | return ERR_PTR(-EAGAIN); | 985 | return ERR_PTR(-EAGAIN); |
@@ -977,7 +987,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
977 | 987 | ||
978 | /* We reserved something on the buffer */ | 988 | /* We reserved something on the buffer */ |
979 | 989 | ||
980 | BUG_ON(write > BUF_PAGE_SIZE); | 990 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
991 | return NULL; | ||
981 | 992 | ||
982 | event = __rb_page_index(tail_page, tail); | 993 | event = __rb_page_index(tail_page, tail); |
983 | rb_update_event(event, type, length); | 994 | rb_update_event(event, type, length); |
@@ -992,7 +1003,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
992 | return event; | 1003 | return event; |
993 | 1004 | ||
994 | out_unlock: | 1005 | out_unlock: |
995 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1006 | __raw_spin_unlock(&cpu_buffer->lock); |
1007 | local_irq_restore(flags); | ||
996 | return NULL; | 1008 | return NULL; |
997 | } | 1009 | } |
998 | 1010 | ||
@@ -1075,10 +1087,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1075 | * storm or we have something buggy. | 1087 | * storm or we have something buggy. |
1076 | * Bail! | 1088 | * Bail! |
1077 | */ | 1089 | */ |
1078 | if (unlikely(++nr_loops > 1000)) { | 1090 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
1079 | RB_WARN_ON(cpu_buffer, 1); | ||
1080 | return NULL; | 1091 | return NULL; |
1081 | } | ||
1082 | 1092 | ||
1083 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1093 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1084 | 1094 | ||
@@ -1181,8 +1191,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1181 | return NULL; | 1191 | return NULL; |
1182 | 1192 | ||
1183 | /* If we are tracing schedule, we don't want to recurse */ | 1193 | /* If we are tracing schedule, we don't want to recurse */ |
1184 | resched = need_resched(); | 1194 | resched = ftrace_preempt_disable(); |
1185 | preempt_disable_notrace(); | ||
1186 | 1195 | ||
1187 | cpu = raw_smp_processor_id(); | 1196 | cpu = raw_smp_processor_id(); |
1188 | 1197 | ||
@@ -1213,10 +1222,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1213 | return event; | 1222 | return event; |
1214 | 1223 | ||
1215 | out: | 1224 | out: |
1216 | if (resched) | 1225 | ftrace_preempt_enable(resched); |
1217 | preempt_enable_notrace(); | ||
1218 | else | ||
1219 | preempt_enable_notrace(); | ||
1220 | return NULL; | 1226 | return NULL; |
1221 | } | 1227 | } |
1222 | 1228 | ||
@@ -1258,12 +1264,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1258 | /* | 1264 | /* |
1259 | * Only the last preempt count needs to restore preemption. | 1265 | * Only the last preempt count needs to restore preemption. |
1260 | */ | 1266 | */ |
1261 | if (preempt_count() == 1) { | 1267 | if (preempt_count() == 1) |
1262 | if (per_cpu(rb_need_resched, cpu)) | 1268 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); |
1263 | preempt_enable_no_resched_notrace(); | 1269 | else |
1264 | else | ||
1265 | preempt_enable_notrace(); | ||
1266 | } else | ||
1267 | preempt_enable_no_resched_notrace(); | 1270 | preempt_enable_no_resched_notrace(); |
1268 | 1271 | ||
1269 | return 0; | 1272 | return 0; |
@@ -1299,8 +1302,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1299 | if (atomic_read(&buffer->record_disabled)) | 1302 | if (atomic_read(&buffer->record_disabled)) |
1300 | return -EBUSY; | 1303 | return -EBUSY; |
1301 | 1304 | ||
1302 | resched = need_resched(); | 1305 | resched = ftrace_preempt_disable(); |
1303 | preempt_disable_notrace(); | ||
1304 | 1306 | ||
1305 | cpu = raw_smp_processor_id(); | 1307 | cpu = raw_smp_processor_id(); |
1306 | 1308 | ||
@@ -1326,10 +1328,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1326 | 1328 | ||
1327 | ret = 0; | 1329 | ret = 0; |
1328 | out: | 1330 | out: |
1329 | if (resched) | 1331 | ftrace_preempt_enable(resched); |
1330 | preempt_enable_no_resched_notrace(); | ||
1331 | else | ||
1332 | preempt_enable_notrace(); | ||
1333 | 1332 | ||
1334 | return ret; | 1333 | return ret; |
1335 | } | 1334 | } |
@@ -1488,14 +1487,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1488 | return overruns; | 1487 | return overruns; |
1489 | } | 1488 | } |
1490 | 1489 | ||
1491 | /** | 1490 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1492 | * ring_buffer_iter_reset - reset an iterator | ||
1493 | * @iter: The iterator to reset | ||
1494 | * | ||
1495 | * Resets the iterator, so that it will start from the beginning | ||
1496 | * again. | ||
1497 | */ | ||
1498 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1499 | { | 1491 | { |
1500 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1492 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1501 | 1493 | ||
@@ -1514,6 +1506,23 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1514 | } | 1506 | } |
1515 | 1507 | ||
1516 | /** | 1508 | /** |
1509 | * ring_buffer_iter_reset - reset an iterator | ||
1510 | * @iter: The iterator to reset | ||
1511 | * | ||
1512 | * Resets the iterator, so that it will start from the beginning | ||
1513 | * again. | ||
1514 | */ | ||
1515 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1516 | { | ||
1517 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
1518 | unsigned long flags; | ||
1519 | |||
1520 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1521 | rb_iter_reset(iter); | ||
1522 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1523 | } | ||
1524 | |||
1525 | /** | ||
1517 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1526 | * ring_buffer_iter_empty - check if an iterator has no more to read |
1518 | * @iter: The iterator to check | 1527 | * @iter: The iterator to check |
1519 | */ | 1528 | */ |
@@ -1596,7 +1605,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1596 | unsigned long flags; | 1605 | unsigned long flags; |
1597 | int nr_loops = 0; | 1606 | int nr_loops = 0; |
1598 | 1607 | ||
1599 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1608 | local_irq_save(flags); |
1609 | __raw_spin_lock(&cpu_buffer->lock); | ||
1600 | 1610 | ||
1601 | again: | 1611 | again: |
1602 | /* | 1612 | /* |
@@ -1605,8 +1615,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1605 | * a case where we will loop three times. There should be no | 1615 | * a case where we will loop three times. There should be no |
1606 | * reason to loop four times (that I know of). | 1616 | * reason to loop four times (that I know of). |
1607 | */ | 1617 | */ |
1608 | if (unlikely(++nr_loops > 3)) { | 1618 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { |
1609 | RB_WARN_ON(cpu_buffer, 1); | ||
1610 | reader = NULL; | 1619 | reader = NULL; |
1611 | goto out; | 1620 | goto out; |
1612 | } | 1621 | } |
@@ -1618,8 +1627,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1618 | goto out; | 1627 | goto out; |
1619 | 1628 | ||
1620 | /* Never should we have an index greater than the size */ | 1629 | /* Never should we have an index greater than the size */ |
1621 | RB_WARN_ON(cpu_buffer, | 1630 | if (RB_WARN_ON(cpu_buffer, |
1622 | cpu_buffer->reader_page->read > rb_page_size(reader)); | 1631 | cpu_buffer->reader_page->read > rb_page_size(reader))) |
1632 | goto out; | ||
1623 | 1633 | ||
1624 | /* check if we caught up to the tail */ | 1634 | /* check if we caught up to the tail */ |
1625 | reader = NULL; | 1635 | reader = NULL; |
@@ -1658,7 +1668,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1658 | goto again; | 1668 | goto again; |
1659 | 1669 | ||
1660 | out: | 1670 | out: |
1661 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1671 | __raw_spin_unlock(&cpu_buffer->lock); |
1672 | local_irq_restore(flags); | ||
1662 | 1673 | ||
1663 | return reader; | 1674 | return reader; |
1664 | } | 1675 | } |
@@ -1672,7 +1683,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
1672 | reader = rb_get_reader_page(cpu_buffer); | 1683 | reader = rb_get_reader_page(cpu_buffer); |
1673 | 1684 | ||
1674 | /* This function should not be called when buffer is empty */ | 1685 | /* This function should not be called when buffer is empty */ |
1675 | BUG_ON(!reader); | 1686 | if (RB_WARN_ON(cpu_buffer, !reader)) |
1687 | return; | ||
1676 | 1688 | ||
1677 | event = rb_reader_event(cpu_buffer); | 1689 | event = rb_reader_event(cpu_buffer); |
1678 | 1690 | ||
@@ -1699,7 +1711,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1699 | * Check if we are at the end of the buffer. | 1711 | * Check if we are at the end of the buffer. |
1700 | */ | 1712 | */ |
1701 | if (iter->head >= rb_page_size(iter->head_page)) { | 1713 | if (iter->head >= rb_page_size(iter->head_page)) { |
1702 | BUG_ON(iter->head_page == cpu_buffer->commit_page); | 1714 | if (RB_WARN_ON(buffer, |
1715 | iter->head_page == cpu_buffer->commit_page)) | ||
1716 | return; | ||
1703 | rb_inc_iter(iter); | 1717 | rb_inc_iter(iter); |
1704 | return; | 1718 | return; |
1705 | } | 1719 | } |
@@ -1712,8 +1726,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1712 | * This should not be called to advance the header if we are | 1726 | * This should not be called to advance the header if we are |
1713 | * at the tail of the buffer. | 1727 | * at the tail of the buffer. |
1714 | */ | 1728 | */ |
1715 | BUG_ON((iter->head_page == cpu_buffer->commit_page) && | 1729 | if (RB_WARN_ON(cpu_buffer, |
1716 | (iter->head + length > rb_commit_index(cpu_buffer))); | 1730 | (iter->head_page == cpu_buffer->commit_page) && |
1731 | (iter->head + length > rb_commit_index(cpu_buffer)))) | ||
1732 | return; | ||
1717 | 1733 | ||
1718 | rb_update_iter_read_stamp(iter, event); | 1734 | rb_update_iter_read_stamp(iter, event); |
1719 | 1735 | ||
@@ -1725,17 +1741,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1725 | rb_advance_iter(iter); | 1741 | rb_advance_iter(iter); |
1726 | } | 1742 | } |
1727 | 1743 | ||
1728 | /** | 1744 | static struct ring_buffer_event * |
1729 | * ring_buffer_peek - peek at the next event to be read | 1745 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
1730 | * @buffer: The ring buffer to read | ||
1731 | * @cpu: The cpu to peak at | ||
1732 | * @ts: The timestamp counter of this event. | ||
1733 | * | ||
1734 | * This will return the event that will be read next, but does | ||
1735 | * not consume the data. | ||
1736 | */ | ||
1737 | struct ring_buffer_event * | ||
1738 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1739 | { | 1746 | { |
1740 | struct ring_buffer_per_cpu *cpu_buffer; | 1747 | struct ring_buffer_per_cpu *cpu_buffer; |
1741 | struct ring_buffer_event *event; | 1748 | struct ring_buffer_event *event; |
@@ -1756,10 +1763,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1756 | * can have. Nesting 10 deep of interrupts is clearly | 1763 | * can have. Nesting 10 deep of interrupts is clearly |
1757 | * an anomaly. | 1764 | * an anomaly. |
1758 | */ | 1765 | */ |
1759 | if (unlikely(++nr_loops > 10)) { | 1766 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1760 | RB_WARN_ON(cpu_buffer, 1); | ||
1761 | return NULL; | 1767 | return NULL; |
1762 | } | ||
1763 | 1768 | ||
1764 | reader = rb_get_reader_page(cpu_buffer); | 1769 | reader = rb_get_reader_page(cpu_buffer); |
1765 | if (!reader) | 1770 | if (!reader) |
@@ -1797,16 +1802,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1797 | return NULL; | 1802 | return NULL; |
1798 | } | 1803 | } |
1799 | 1804 | ||
1800 | /** | 1805 | static struct ring_buffer_event * |
1801 | * ring_buffer_iter_peek - peek at the next event to be read | 1806 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
1802 | * @iter: The ring buffer iterator | ||
1803 | * @ts: The timestamp counter of this event. | ||
1804 | * | ||
1805 | * This will return the event that will be read next, but does | ||
1806 | * not increment the iterator. | ||
1807 | */ | ||
1808 | struct ring_buffer_event * | ||
1809 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
1810 | { | 1807 | { |
1811 | struct ring_buffer *buffer; | 1808 | struct ring_buffer *buffer; |
1812 | struct ring_buffer_per_cpu *cpu_buffer; | 1809 | struct ring_buffer_per_cpu *cpu_buffer; |
@@ -1828,10 +1825,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1828 | * can have. Nesting 10 deep of interrupts is clearly | 1825 | * can have. Nesting 10 deep of interrupts is clearly |
1829 | * an anomaly. | 1826 | * an anomaly. |
1830 | */ | 1827 | */ |
1831 | if (unlikely(++nr_loops > 10)) { | 1828 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1832 | RB_WARN_ON(cpu_buffer, 1); | ||
1833 | return NULL; | 1829 | return NULL; |
1834 | } | ||
1835 | 1830 | ||
1836 | if (rb_per_cpu_empty(cpu_buffer)) | 1831 | if (rb_per_cpu_empty(cpu_buffer)) |
1837 | return NULL; | 1832 | return NULL; |
@@ -1868,6 +1863,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1868 | } | 1863 | } |
1869 | 1864 | ||
1870 | /** | 1865 | /** |
1866 | * ring_buffer_peek - peek at the next event to be read | ||
1867 | * @buffer: The ring buffer to read | ||
1868 | * @cpu: The cpu to peak at | ||
1869 | * @ts: The timestamp counter of this event. | ||
1870 | * | ||
1871 | * This will return the event that will be read next, but does | ||
1872 | * not consume the data. | ||
1873 | */ | ||
1874 | struct ring_buffer_event * | ||
1875 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1876 | { | ||
1877 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
1878 | struct ring_buffer_event *event; | ||
1879 | unsigned long flags; | ||
1880 | |||
1881 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1882 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1883 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1884 | |||
1885 | return event; | ||
1886 | } | ||
1887 | |||
1888 | /** | ||
1889 | * ring_buffer_iter_peek - peek at the next event to be read | ||
1890 | * @iter: The ring buffer iterator | ||
1891 | * @ts: The timestamp counter of this event. | ||
1892 | * | ||
1893 | * This will return the event that will be read next, but does | ||
1894 | * not increment the iterator. | ||
1895 | */ | ||
1896 | struct ring_buffer_event * | ||
1897 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
1898 | { | ||
1899 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
1900 | struct ring_buffer_event *event; | ||
1901 | unsigned long flags; | ||
1902 | |||
1903 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1904 | event = rb_iter_peek(iter, ts); | ||
1905 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1906 | |||
1907 | return event; | ||
1908 | } | ||
1909 | |||
1910 | /** | ||
1871 | * ring_buffer_consume - return an event and consume it | 1911 | * ring_buffer_consume - return an event and consume it |
1872 | * @buffer: The ring buffer to get the next event from | 1912 | * @buffer: The ring buffer to get the next event from |
1873 | * | 1913 | * |
@@ -1878,19 +1918,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1878 | struct ring_buffer_event * | 1918 | struct ring_buffer_event * |
1879 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 1919 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
1880 | { | 1920 | { |
1881 | struct ring_buffer_per_cpu *cpu_buffer; | 1921 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
1882 | struct ring_buffer_event *event; | 1922 | struct ring_buffer_event *event; |
1923 | unsigned long flags; | ||
1883 | 1924 | ||
1884 | if (!cpu_isset(cpu, buffer->cpumask)) | 1925 | if (!cpu_isset(cpu, buffer->cpumask)) |
1885 | return NULL; | 1926 | return NULL; |
1886 | 1927 | ||
1887 | event = ring_buffer_peek(buffer, cpu, ts); | 1928 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1929 | |||
1930 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1888 | if (!event) | 1931 | if (!event) |
1889 | return NULL; | 1932 | goto out; |
1890 | 1933 | ||
1891 | cpu_buffer = buffer->buffers[cpu]; | ||
1892 | rb_advance_reader(cpu_buffer); | 1934 | rb_advance_reader(cpu_buffer); |
1893 | 1935 | ||
1936 | out: | ||
1937 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1938 | |||
1894 | return event; | 1939 | return event; |
1895 | } | 1940 | } |
1896 | 1941 | ||
@@ -1927,9 +1972,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1927 | atomic_inc(&cpu_buffer->record_disabled); | 1972 | atomic_inc(&cpu_buffer->record_disabled); |
1928 | synchronize_sched(); | 1973 | synchronize_sched(); |
1929 | 1974 | ||
1930 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1975 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1931 | ring_buffer_iter_reset(iter); | 1976 | __raw_spin_lock(&cpu_buffer->lock); |
1932 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1977 | rb_iter_reset(iter); |
1978 | __raw_spin_unlock(&cpu_buffer->lock); | ||
1979 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1933 | 1980 | ||
1934 | return iter; | 1981 | return iter; |
1935 | } | 1982 | } |
@@ -1961,12 +2008,17 @@ struct ring_buffer_event * | |||
1961 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 2008 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
1962 | { | 2009 | { |
1963 | struct ring_buffer_event *event; | 2010 | struct ring_buffer_event *event; |
2011 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
2012 | unsigned long flags; | ||
1964 | 2013 | ||
1965 | event = ring_buffer_iter_peek(iter, ts); | 2014 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2015 | event = rb_iter_peek(iter, ts); | ||
1966 | if (!event) | 2016 | if (!event) |
1967 | return NULL; | 2017 | goto out; |
1968 | 2018 | ||
1969 | rb_advance_iter(iter); | 2019 | rb_advance_iter(iter); |
2020 | out: | ||
2021 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1970 | 2022 | ||
1971 | return event; | 2023 | return event; |
1972 | } | 2024 | } |
@@ -2015,11 +2067,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2015 | if (!cpu_isset(cpu, buffer->cpumask)) | 2067 | if (!cpu_isset(cpu, buffer->cpumask)) |
2016 | return; | 2068 | return; |
2017 | 2069 | ||
2018 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 2070 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2071 | |||
2072 | __raw_spin_lock(&cpu_buffer->lock); | ||
2019 | 2073 | ||
2020 | rb_reset_cpu(cpu_buffer); | 2074 | rb_reset_cpu(cpu_buffer); |
2021 | 2075 | ||
2022 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2076 | __raw_spin_unlock(&cpu_buffer->lock); |
2077 | |||
2078 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2023 | } | 2079 | } |
2024 | 2080 | ||
2025 | /** | 2081 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 697eda36b86..4a904623e05 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -43,6 +43,15 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 44 | unsigned long __read_mostly tracing_thresh; |
45 | 45 | ||
46 | |||
47 | /* | ||
48 | * Kill all tracing for good (never come back). | ||
49 | * It is initialized to 1 but will turn to zero if the initialization | ||
50 | * of the tracer is successful. But that is the only place that sets | ||
51 | * this back to zero. | ||
52 | */ | ||
53 | int tracing_disabled = 1; | ||
54 | |||
46 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 55 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
47 | 56 | ||
48 | static inline void ftrace_disable_cpu(void) | 57 | static inline void ftrace_disable_cpu(void) |
@@ -62,7 +71,36 @@ static cpumask_t __read_mostly tracing_buffer_mask; | |||
62 | #define for_each_tracing_cpu(cpu) \ | 71 | #define for_each_tracing_cpu(cpu) \ |
63 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 72 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
64 | 73 | ||
65 | static int tracing_disabled = 1; | 74 | /* |
75 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | ||
76 | * | ||
77 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | ||
78 | * is set, then ftrace_dump is called. This will output the contents | ||
79 | * of the ftrace buffers to the console. This is very useful for | ||
80 | * capturing traces that lead to crashes and outputing it to a | ||
81 | * serial console. | ||
82 | * | ||
83 | * It is default off, but you can enable it with either specifying | ||
84 | * "ftrace_dump_on_oops" in the kernel command line, or setting | ||
85 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | ||
86 | */ | ||
87 | int ftrace_dump_on_oops; | ||
88 | |||
89 | static int tracing_set_tracer(char *buf); | ||
90 | |||
91 | static int __init set_ftrace(char *str) | ||
92 | { | ||
93 | tracing_set_tracer(str); | ||
94 | return 1; | ||
95 | } | ||
96 | __setup("ftrace", set_ftrace); | ||
97 | |||
98 | static int __init set_ftrace_dump_on_oops(char *str) | ||
99 | { | ||
100 | ftrace_dump_on_oops = 1; | ||
101 | return 1; | ||
102 | } | ||
103 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | ||
66 | 104 | ||
67 | long | 105 | long |
68 | ns2usecs(cycle_t nsec) | 106 | ns2usecs(cycle_t nsec) |
@@ -112,6 +150,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |||
112 | /* tracer_enabled is used to toggle activation of a tracer */ | 150 | /* tracer_enabled is used to toggle activation of a tracer */ |
113 | static int tracer_enabled = 1; | 151 | static int tracer_enabled = 1; |
114 | 152 | ||
153 | /** | ||
154 | * tracing_is_enabled - return tracer_enabled status | ||
155 | * | ||
156 | * This function is used by other tracers to know the status | ||
157 | * of the tracer_enabled flag. Tracers may use this function | ||
158 | * to know if it should enable their features when starting | ||
159 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | ||
160 | */ | ||
161 | int tracing_is_enabled(void) | ||
162 | { | ||
163 | return tracer_enabled; | ||
164 | } | ||
165 | |||
115 | /* function tracing enabled */ | 166 | /* function tracing enabled */ |
116 | int ftrace_function_enabled; | 167 | int ftrace_function_enabled; |
117 | 168 | ||
@@ -153,8 +204,9 @@ static DEFINE_MUTEX(trace_types_lock); | |||
153 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 204 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
154 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 205 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
155 | 206 | ||
156 | /* trace_flags holds iter_ctrl options */ | 207 | /* trace_flags holds trace_options default values */ |
157 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; | 208 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
209 | TRACE_ITER_ANNOTATE; | ||
158 | 210 | ||
159 | /** | 211 | /** |
160 | * trace_wake_up - wake up tasks waiting for trace input | 212 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -193,13 +245,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
193 | return nsecs / 1000; | 245 | return nsecs / 1000; |
194 | } | 246 | } |
195 | 247 | ||
196 | /* | ||
197 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
198 | * control the output of kernel symbols. | ||
199 | */ | ||
200 | #define TRACE_ITER_SYM_MASK \ | ||
201 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
202 | |||
203 | /* These must match the bit postions in trace_iterator_flags */ | 248 | /* These must match the bit postions in trace_iterator_flags */ |
204 | static const char *trace_options[] = { | 249 | static const char *trace_options[] = { |
205 | "print-parent", | 250 | "print-parent", |
@@ -213,6 +258,11 @@ static const char *trace_options[] = { | |||
213 | "stacktrace", | 258 | "stacktrace", |
214 | "sched-tree", | 259 | "sched-tree", |
215 | "ftrace_printk", | 260 | "ftrace_printk", |
261 | "ftrace_preempt", | ||
262 | #ifdef CONFIG_BRANCH_TRACER | ||
263 | "branch", | ||
264 | #endif | ||
265 | "annotate", | ||
216 | NULL | 266 | NULL |
217 | }; | 267 | }; |
218 | 268 | ||
@@ -485,7 +535,6 @@ int register_tracer(struct tracer *type) | |||
485 | if (type->selftest) { | 535 | if (type->selftest) { |
486 | struct tracer *saved_tracer = current_trace; | 536 | struct tracer *saved_tracer = current_trace; |
487 | struct trace_array *tr = &global_trace; | 537 | struct trace_array *tr = &global_trace; |
488 | int saved_ctrl = tr->ctrl; | ||
489 | int i; | 538 | int i; |
490 | /* | 539 | /* |
491 | * Run a selftest on this tracer. | 540 | * Run a selftest on this tracer. |
@@ -498,13 +547,11 @@ int register_tracer(struct tracer *type) | |||
498 | tracing_reset(tr, i); | 547 | tracing_reset(tr, i); |
499 | } | 548 | } |
500 | current_trace = type; | 549 | current_trace = type; |
501 | tr->ctrl = 0; | ||
502 | /* the test is responsible for initializing and enabling */ | 550 | /* the test is responsible for initializing and enabling */ |
503 | pr_info("Testing tracer %s: ", type->name); | 551 | pr_info("Testing tracer %s: ", type->name); |
504 | ret = type->selftest(type, tr); | 552 | ret = type->selftest(type, tr); |
505 | /* the test is responsible for resetting too */ | 553 | /* the test is responsible for resetting too */ |
506 | current_trace = saved_tracer; | 554 | current_trace = saved_tracer; |
507 | tr->ctrl = saved_ctrl; | ||
508 | if (ret) { | 555 | if (ret) { |
509 | printk(KERN_CONT "FAILED!\n"); | 556 | printk(KERN_CONT "FAILED!\n"); |
510 | goto out; | 557 | goto out; |
@@ -581,6 +628,76 @@ static void trace_init_cmdlines(void) | |||
581 | cmdline_idx = 0; | 628 | cmdline_idx = 0; |
582 | } | 629 | } |
583 | 630 | ||
631 | static int trace_stop_count; | ||
632 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
633 | |||
634 | /** | ||
635 | * tracing_start - quick start of the tracer | ||
636 | * | ||
637 | * If tracing is enabled but was stopped by tracing_stop, | ||
638 | * this will start the tracer back up. | ||
639 | */ | ||
640 | void tracing_start(void) | ||
641 | { | ||
642 | struct ring_buffer *buffer; | ||
643 | unsigned long flags; | ||
644 | |||
645 | if (tracing_disabled) | ||
646 | return; | ||
647 | |||
648 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
649 | if (--trace_stop_count) | ||
650 | goto out; | ||
651 | |||
652 | if (trace_stop_count < 0) { | ||
653 | /* Someone screwed up their debugging */ | ||
654 | WARN_ON_ONCE(1); | ||
655 | trace_stop_count = 0; | ||
656 | goto out; | ||
657 | } | ||
658 | |||
659 | |||
660 | buffer = global_trace.buffer; | ||
661 | if (buffer) | ||
662 | ring_buffer_record_enable(buffer); | ||
663 | |||
664 | buffer = max_tr.buffer; | ||
665 | if (buffer) | ||
666 | ring_buffer_record_enable(buffer); | ||
667 | |||
668 | ftrace_start(); | ||
669 | out: | ||
670 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * tracing_stop - quick stop of the tracer | ||
675 | * | ||
676 | * Light weight way to stop tracing. Use in conjunction with | ||
677 | * tracing_start. | ||
678 | */ | ||
679 | void tracing_stop(void) | ||
680 | { | ||
681 | struct ring_buffer *buffer; | ||
682 | unsigned long flags; | ||
683 | |||
684 | ftrace_stop(); | ||
685 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
686 | if (trace_stop_count++) | ||
687 | goto out; | ||
688 | |||
689 | buffer = global_trace.buffer; | ||
690 | if (buffer) | ||
691 | ring_buffer_record_disable(buffer); | ||
692 | |||
693 | buffer = max_tr.buffer; | ||
694 | if (buffer) | ||
695 | ring_buffer_record_disable(buffer); | ||
696 | |||
697 | out: | ||
698 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
699 | } | ||
700 | |||
584 | void trace_stop_cmdline_recording(void); | 701 | void trace_stop_cmdline_recording(void); |
585 | 702 | ||
586 | static void trace_save_cmdline(struct task_struct *tsk) | 703 | static void trace_save_cmdline(struct task_struct *tsk) |
@@ -691,6 +808,35 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
691 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 808 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
692 | } | 809 | } |
693 | 810 | ||
811 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
812 | static void __trace_function_return(struct trace_array *tr, | ||
813 | struct trace_array_cpu *data, | ||
814 | struct ftrace_retfunc *trace, | ||
815 | unsigned long flags, | ||
816 | int pc) | ||
817 | { | ||
818 | struct ring_buffer_event *event; | ||
819 | struct ftrace_ret_entry *entry; | ||
820 | unsigned long irq_flags; | ||
821 | |||
822 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
823 | return; | ||
824 | |||
825 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
826 | &irq_flags); | ||
827 | if (!event) | ||
828 | return; | ||
829 | entry = ring_buffer_event_data(event); | ||
830 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
831 | entry->ent.type = TRACE_FN_RET; | ||
832 | entry->ip = trace->func; | ||
833 | entry->parent_ip = trace->ret; | ||
834 | entry->rettime = trace->rettime; | ||
835 | entry->calltime = trace->calltime; | ||
836 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
837 | } | ||
838 | #endif | ||
839 | |||
694 | void | 840 | void |
695 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 841 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
696 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 842 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
@@ -841,26 +987,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
841 | { | 987 | { |
842 | struct trace_array *tr = &global_trace; | 988 | struct trace_array *tr = &global_trace; |
843 | struct trace_array_cpu *data; | 989 | struct trace_array_cpu *data; |
990 | unsigned long flags; | ||
844 | int cpu; | 991 | int cpu; |
845 | int pc; | 992 | int pc; |
846 | 993 | ||
847 | if (tracing_disabled || !tr->ctrl) | 994 | if (tracing_disabled) |
848 | return; | 995 | return; |
849 | 996 | ||
850 | pc = preempt_count(); | 997 | pc = preempt_count(); |
851 | preempt_disable_notrace(); | 998 | local_irq_save(flags); |
852 | cpu = raw_smp_processor_id(); | 999 | cpu = raw_smp_processor_id(); |
853 | data = tr->data[cpu]; | 1000 | data = tr->data[cpu]; |
854 | 1001 | ||
855 | if (likely(!atomic_read(&data->disabled))) | 1002 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
856 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | 1003 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); |
857 | 1004 | ||
858 | preempt_enable_notrace(); | 1005 | atomic_dec(&data->disabled); |
1006 | local_irq_restore(flags); | ||
859 | } | 1007 | } |
860 | 1008 | ||
861 | #ifdef CONFIG_FUNCTION_TRACER | 1009 | #ifdef CONFIG_FUNCTION_TRACER |
862 | static void | 1010 | static void |
863 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 1011 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) |
864 | { | 1012 | { |
865 | struct trace_array *tr = &global_trace; | 1013 | struct trace_array *tr = &global_trace; |
866 | struct trace_array_cpu *data; | 1014 | struct trace_array_cpu *data; |
@@ -873,8 +1021,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
873 | return; | 1021 | return; |
874 | 1022 | ||
875 | pc = preempt_count(); | 1023 | pc = preempt_count(); |
876 | resched = need_resched(); | 1024 | resched = ftrace_preempt_disable(); |
877 | preempt_disable_notrace(); | ||
878 | local_save_flags(flags); | 1025 | local_save_flags(flags); |
879 | cpu = raw_smp_processor_id(); | 1026 | cpu = raw_smp_processor_id(); |
880 | data = tr->data[cpu]; | 1027 | data = tr->data[cpu]; |
@@ -884,11 +1031,62 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
884 | trace_function(tr, data, ip, parent_ip, flags, pc); | 1031 | trace_function(tr, data, ip, parent_ip, flags, pc); |
885 | 1032 | ||
886 | atomic_dec(&data->disabled); | 1033 | atomic_dec(&data->disabled); |
887 | if (resched) | 1034 | ftrace_preempt_enable(resched); |
888 | preempt_enable_no_resched_notrace(); | 1035 | } |
889 | else | 1036 | |
890 | preempt_enable_notrace(); | 1037 | static void |
1038 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
1039 | { | ||
1040 | struct trace_array *tr = &global_trace; | ||
1041 | struct trace_array_cpu *data; | ||
1042 | unsigned long flags; | ||
1043 | long disabled; | ||
1044 | int cpu; | ||
1045 | int pc; | ||
1046 | |||
1047 | if (unlikely(!ftrace_function_enabled)) | ||
1048 | return; | ||
1049 | |||
1050 | /* | ||
1051 | * Need to use raw, since this must be called before the | ||
1052 | * recursive protection is performed. | ||
1053 | */ | ||
1054 | raw_local_irq_save(flags); | ||
1055 | cpu = raw_smp_processor_id(); | ||
1056 | data = tr->data[cpu]; | ||
1057 | disabled = atomic_inc_return(&data->disabled); | ||
1058 | |||
1059 | if (likely(disabled == 1)) { | ||
1060 | pc = preempt_count(); | ||
1061 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1062 | } | ||
1063 | |||
1064 | atomic_dec(&data->disabled); | ||
1065 | raw_local_irq_restore(flags); | ||
1066 | } | ||
1067 | |||
1068 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
1069 | void trace_function_return(struct ftrace_retfunc *trace) | ||
1070 | { | ||
1071 | struct trace_array *tr = &global_trace; | ||
1072 | struct trace_array_cpu *data; | ||
1073 | unsigned long flags; | ||
1074 | long disabled; | ||
1075 | int cpu; | ||
1076 | int pc; | ||
1077 | |||
1078 | raw_local_irq_save(flags); | ||
1079 | cpu = raw_smp_processor_id(); | ||
1080 | data = tr->data[cpu]; | ||
1081 | disabled = atomic_inc_return(&data->disabled); | ||
1082 | if (likely(disabled == 1)) { | ||
1083 | pc = preempt_count(); | ||
1084 | __trace_function_return(tr, data, trace, flags, pc); | ||
1085 | } | ||
1086 | atomic_dec(&data->disabled); | ||
1087 | raw_local_irq_restore(flags); | ||
891 | } | 1088 | } |
1089 | #endif /* CONFIG_FUNCTION_RET_TRACER */ | ||
892 | 1090 | ||
893 | static struct ftrace_ops trace_ops __read_mostly = | 1091 | static struct ftrace_ops trace_ops __read_mostly = |
894 | { | 1092 | { |
@@ -898,9 +1096,14 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
898 | void tracing_start_function_trace(void) | 1096 | void tracing_start_function_trace(void) |
899 | { | 1097 | { |
900 | ftrace_function_enabled = 0; | 1098 | ftrace_function_enabled = 0; |
1099 | |||
1100 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1101 | trace_ops.func = function_trace_call_preempt_only; | ||
1102 | else | ||
1103 | trace_ops.func = function_trace_call; | ||
1104 | |||
901 | register_ftrace_function(&trace_ops); | 1105 | register_ftrace_function(&trace_ops); |
902 | if (tracer_enabled) | 1106 | ftrace_function_enabled = 1; |
903 | ftrace_function_enabled = 1; | ||
904 | } | 1107 | } |
905 | 1108 | ||
906 | void tracing_stop_function_trace(void) | 1109 | void tracing_stop_function_trace(void) |
@@ -912,6 +1115,7 @@ void tracing_stop_function_trace(void) | |||
912 | 1115 | ||
913 | enum trace_file_type { | 1116 | enum trace_file_type { |
914 | TRACE_FILE_LAT_FMT = 1, | 1117 | TRACE_FILE_LAT_FMT = 1, |
1118 | TRACE_FILE_ANNOTATE = 2, | ||
915 | }; | 1119 | }; |
916 | 1120 | ||
917 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 1121 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) |
@@ -1047,10 +1251,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1047 | 1251 | ||
1048 | atomic_inc(&trace_record_cmdline_disabled); | 1252 | atomic_inc(&trace_record_cmdline_disabled); |
1049 | 1253 | ||
1050 | /* let the tracer grab locks here if needed */ | ||
1051 | if (current_trace->start) | ||
1052 | current_trace->start(iter); | ||
1053 | |||
1054 | if (*pos != iter->pos) { | 1254 | if (*pos != iter->pos) { |
1055 | iter->ent = NULL; | 1255 | iter->ent = NULL; |
1056 | iter->cpu = 0; | 1256 | iter->cpu = 0; |
@@ -1077,14 +1277,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1077 | 1277 | ||
1078 | static void s_stop(struct seq_file *m, void *p) | 1278 | static void s_stop(struct seq_file *m, void *p) |
1079 | { | 1279 | { |
1080 | struct trace_iterator *iter = m->private; | ||
1081 | |||
1082 | atomic_dec(&trace_record_cmdline_disabled); | 1280 | atomic_dec(&trace_record_cmdline_disabled); |
1083 | |||
1084 | /* let the tracer release locks here if needed */ | ||
1085 | if (current_trace && current_trace == iter->trace && iter->trace->stop) | ||
1086 | iter->trace->stop(iter); | ||
1087 | |||
1088 | mutex_unlock(&trace_types_lock); | 1281 | mutex_unlock(&trace_types_lock); |
1089 | } | 1282 | } |
1090 | 1283 | ||
@@ -1143,7 +1336,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1143 | # define IP_FMT "%016lx" | 1336 | # define IP_FMT "%016lx" |
1144 | #endif | 1337 | #endif |
1145 | 1338 | ||
1146 | static int | 1339 | int |
1147 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 1340 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
1148 | { | 1341 | { |
1149 | int ret; | 1342 | int ret; |
@@ -1338,6 +1531,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | |||
1338 | trace_seq_putc(s, '\n'); | 1531 | trace_seq_putc(s, '\n'); |
1339 | } | 1532 | } |
1340 | 1533 | ||
1534 | static void test_cpu_buff_start(struct trace_iterator *iter) | ||
1535 | { | ||
1536 | struct trace_seq *s = &iter->seq; | ||
1537 | |||
1538 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | ||
1539 | return; | ||
1540 | |||
1541 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | ||
1542 | return; | ||
1543 | |||
1544 | if (cpu_isset(iter->cpu, iter->started)) | ||
1545 | return; | ||
1546 | |||
1547 | cpu_set(iter->cpu, iter->started); | ||
1548 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | ||
1549 | } | ||
1550 | |||
1341 | static enum print_line_t | 1551 | static enum print_line_t |
1342 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | 1552 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) |
1343 | { | 1553 | { |
@@ -1357,6 +1567,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1357 | if (entry->type == TRACE_CONT) | 1567 | if (entry->type == TRACE_CONT) |
1358 | return TRACE_TYPE_HANDLED; | 1568 | return TRACE_TYPE_HANDLED; |
1359 | 1569 | ||
1570 | test_cpu_buff_start(iter); | ||
1571 | |||
1360 | next_entry = find_next_entry(iter, NULL, &next_ts); | 1572 | next_entry = find_next_entry(iter, NULL, &next_ts); |
1361 | if (!next_entry) | 1573 | if (!next_entry) |
1362 | next_ts = iter->ts; | 1574 | next_ts = iter->ts; |
@@ -1448,6 +1660,18 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1448 | trace_seq_print_cont(s, iter); | 1660 | trace_seq_print_cont(s, iter); |
1449 | break; | 1661 | break; |
1450 | } | 1662 | } |
1663 | case TRACE_BRANCH: { | ||
1664 | struct trace_branch *field; | ||
1665 | |||
1666 | trace_assign_type(field, entry); | ||
1667 | |||
1668 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1669 | field->correct ? " ok " : " MISS ", | ||
1670 | field->func, | ||
1671 | field->file, | ||
1672 | field->line); | ||
1673 | break; | ||
1674 | } | ||
1451 | default: | 1675 | default: |
1452 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1676 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1453 | } | 1677 | } |
@@ -1472,6 +1696,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1472 | if (entry->type == TRACE_CONT) | 1696 | if (entry->type == TRACE_CONT) |
1473 | return TRACE_TYPE_HANDLED; | 1697 | return TRACE_TYPE_HANDLED; |
1474 | 1698 | ||
1699 | test_cpu_buff_start(iter); | ||
1700 | |||
1475 | comm = trace_find_cmdline(iter->ent->pid); | 1701 | comm = trace_find_cmdline(iter->ent->pid); |
1476 | 1702 | ||
1477 | t = ns2usecs(iter->ts); | 1703 | t = ns2usecs(iter->ts); |
@@ -1581,6 +1807,22 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1581 | trace_seq_print_cont(s, iter); | 1807 | trace_seq_print_cont(s, iter); |
1582 | break; | 1808 | break; |
1583 | } | 1809 | } |
1810 | case TRACE_FN_RET: { | ||
1811 | return print_return_function(iter); | ||
1812 | break; | ||
1813 | } | ||
1814 | case TRACE_BRANCH: { | ||
1815 | struct trace_branch *field; | ||
1816 | |||
1817 | trace_assign_type(field, entry); | ||
1818 | |||
1819 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1820 | field->correct ? " ok " : " MISS ", | ||
1821 | field->func, | ||
1822 | field->file, | ||
1823 | field->line); | ||
1824 | break; | ||
1825 | } | ||
1584 | } | 1826 | } |
1585 | return TRACE_TYPE_HANDLED; | 1827 | return TRACE_TYPE_HANDLED; |
1586 | } | 1828 | } |
@@ -1899,6 +2141,11 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1899 | iter->trace = current_trace; | 2141 | iter->trace = current_trace; |
1900 | iter->pos = -1; | 2142 | iter->pos = -1; |
1901 | 2143 | ||
2144 | /* Annotate start of buffers if we had overruns */ | ||
2145 | if (ring_buffer_overruns(iter->tr->buffer)) | ||
2146 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | ||
2147 | |||
2148 | |||
1902 | for_each_tracing_cpu(cpu) { | 2149 | for_each_tracing_cpu(cpu) { |
1903 | 2150 | ||
1904 | iter->buffer_iter[cpu] = | 2151 | iter->buffer_iter[cpu] = |
@@ -1917,10 +2164,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1917 | m->private = iter; | 2164 | m->private = iter; |
1918 | 2165 | ||
1919 | /* stop the trace while dumping */ | 2166 | /* stop the trace while dumping */ |
1920 | if (iter->tr->ctrl) { | 2167 | tracing_stop(); |
1921 | tracer_enabled = 0; | ||
1922 | ftrace_function_enabled = 0; | ||
1923 | } | ||
1924 | 2168 | ||
1925 | if (iter->trace && iter->trace->open) | 2169 | if (iter->trace && iter->trace->open) |
1926 | iter->trace->open(iter); | 2170 | iter->trace->open(iter); |
@@ -1965,14 +2209,7 @@ int tracing_release(struct inode *inode, struct file *file) | |||
1965 | iter->trace->close(iter); | 2209 | iter->trace->close(iter); |
1966 | 2210 | ||
1967 | /* reenable tracing if it was previously enabled */ | 2211 | /* reenable tracing if it was previously enabled */ |
1968 | if (iter->tr->ctrl) { | 2212 | tracing_start(); |
1969 | tracer_enabled = 1; | ||
1970 | /* | ||
1971 | * It is safe to enable function tracing even if it | ||
1972 | * isn't used | ||
1973 | */ | ||
1974 | ftrace_function_enabled = 1; | ||
1975 | } | ||
1976 | mutex_unlock(&trace_types_lock); | 2213 | mutex_unlock(&trace_types_lock); |
1977 | 2214 | ||
1978 | seq_release(inode, file); | 2215 | seq_release(inode, file); |
@@ -2188,7 +2425,7 @@ static struct file_operations tracing_cpumask_fops = { | |||
2188 | }; | 2425 | }; |
2189 | 2426 | ||
2190 | static ssize_t | 2427 | static ssize_t |
2191 | tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | 2428 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2192 | size_t cnt, loff_t *ppos) | 2429 | size_t cnt, loff_t *ppos) |
2193 | { | 2430 | { |
2194 | char *buf; | 2431 | char *buf; |
@@ -2225,7 +2462,7 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2225 | } | 2462 | } |
2226 | 2463 | ||
2227 | static ssize_t | 2464 | static ssize_t |
2228 | tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | 2465 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2229 | size_t cnt, loff_t *ppos) | 2466 | size_t cnt, loff_t *ppos) |
2230 | { | 2467 | { |
2231 | char buf[64]; | 2468 | char buf[64]; |
@@ -2270,8 +2507,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2270 | 2507 | ||
2271 | static struct file_operations tracing_iter_fops = { | 2508 | static struct file_operations tracing_iter_fops = { |
2272 | .open = tracing_open_generic, | 2509 | .open = tracing_open_generic, |
2273 | .read = tracing_iter_ctrl_read, | 2510 | .read = tracing_trace_options_read, |
2274 | .write = tracing_iter_ctrl_write, | 2511 | .write = tracing_trace_options_write, |
2275 | }; | 2512 | }; |
2276 | 2513 | ||
2277 | static const char readme_msg[] = | 2514 | static const char readme_msg[] = |
@@ -2285,9 +2522,9 @@ static const char readme_msg[] = | |||
2285 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2522 | "# echo sched_switch > /debug/tracing/current_tracer\n" |
2286 | "# cat /debug/tracing/current_tracer\n" | 2523 | "# cat /debug/tracing/current_tracer\n" |
2287 | "sched_switch\n" | 2524 | "sched_switch\n" |
2288 | "# cat /debug/tracing/iter_ctrl\n" | 2525 | "# cat /debug/tracing/trace_options\n" |
2289 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2526 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2290 | "# echo print-parent > /debug/tracing/iter_ctrl\n" | 2527 | "# echo print-parent > /debug/tracing/trace_options\n" |
2291 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2528 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
2292 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2529 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" |
2293 | "echo 0 > /debug/tracing/tracing_enabled\n" | 2530 | "echo 0 > /debug/tracing/tracing_enabled\n" |
@@ -2310,11 +2547,10 @@ static ssize_t | |||
2310 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2547 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2311 | size_t cnt, loff_t *ppos) | 2548 | size_t cnt, loff_t *ppos) |
2312 | { | 2549 | { |
2313 | struct trace_array *tr = filp->private_data; | ||
2314 | char buf[64]; | 2550 | char buf[64]; |
2315 | int r; | 2551 | int r; |
2316 | 2552 | ||
2317 | r = sprintf(buf, "%ld\n", tr->ctrl); | 2553 | r = sprintf(buf, "%u\n", tracer_enabled); |
2318 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2554 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2319 | } | 2555 | } |
2320 | 2556 | ||
@@ -2342,16 +2578,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2342 | val = !!val; | 2578 | val = !!val; |
2343 | 2579 | ||
2344 | mutex_lock(&trace_types_lock); | 2580 | mutex_lock(&trace_types_lock); |
2345 | if (tr->ctrl ^ val) { | 2581 | if (tracer_enabled ^ val) { |
2346 | if (val) | 2582 | if (val) { |
2347 | tracer_enabled = 1; | 2583 | tracer_enabled = 1; |
2348 | else | 2584 | if (current_trace->start) |
2585 | current_trace->start(tr); | ||
2586 | tracing_start(); | ||
2587 | } else { | ||
2349 | tracer_enabled = 0; | 2588 | tracer_enabled = 0; |
2350 | 2589 | tracing_stop(); | |
2351 | tr->ctrl = val; | 2590 | if (current_trace->stop) |
2352 | 2591 | current_trace->stop(tr); | |
2353 | if (current_trace && current_trace->ctrl_update) | 2592 | } |
2354 | current_trace->ctrl_update(tr); | ||
2355 | } | 2593 | } |
2356 | mutex_unlock(&trace_types_lock); | 2594 | mutex_unlock(&trace_types_lock); |
2357 | 2595 | ||
@@ -2377,29 +2615,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
2377 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2615 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2378 | } | 2616 | } |
2379 | 2617 | ||
2380 | static ssize_t | 2618 | static int tracing_set_tracer(char *buf) |
2381 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
2382 | size_t cnt, loff_t *ppos) | ||
2383 | { | 2619 | { |
2384 | struct trace_array *tr = &global_trace; | 2620 | struct trace_array *tr = &global_trace; |
2385 | struct tracer *t; | 2621 | struct tracer *t; |
2386 | char buf[max_tracer_type_len+1]; | 2622 | int ret = 0; |
2387 | int i; | ||
2388 | size_t ret; | ||
2389 | |||
2390 | ret = cnt; | ||
2391 | |||
2392 | if (cnt > max_tracer_type_len) | ||
2393 | cnt = max_tracer_type_len; | ||
2394 | |||
2395 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2396 | return -EFAULT; | ||
2397 | |||
2398 | buf[cnt] = 0; | ||
2399 | |||
2400 | /* strip ending whitespace. */ | ||
2401 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
2402 | buf[i] = 0; | ||
2403 | 2623 | ||
2404 | mutex_lock(&trace_types_lock); | 2624 | mutex_lock(&trace_types_lock); |
2405 | for (t = trace_types; t; t = t->next) { | 2625 | for (t = trace_types; t; t = t->next) { |
@@ -2413,6 +2633,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2413 | if (t == current_trace) | 2633 | if (t == current_trace) |
2414 | goto out; | 2634 | goto out; |
2415 | 2635 | ||
2636 | trace_branch_disable(); | ||
2416 | if (current_trace && current_trace->reset) | 2637 | if (current_trace && current_trace->reset) |
2417 | current_trace->reset(tr); | 2638 | current_trace->reset(tr); |
2418 | 2639 | ||
@@ -2420,9 +2641,37 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2420 | if (t->init) | 2641 | if (t->init) |
2421 | t->init(tr); | 2642 | t->init(tr); |
2422 | 2643 | ||
2644 | trace_branch_enable(tr); | ||
2423 | out: | 2645 | out: |
2424 | mutex_unlock(&trace_types_lock); | 2646 | mutex_unlock(&trace_types_lock); |
2425 | 2647 | ||
2648 | return ret; | ||
2649 | } | ||
2650 | |||
2651 | static ssize_t | ||
2652 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
2653 | size_t cnt, loff_t *ppos) | ||
2654 | { | ||
2655 | char buf[max_tracer_type_len+1]; | ||
2656 | int i; | ||
2657 | size_t ret; | ||
2658 | |||
2659 | if (cnt > max_tracer_type_len) | ||
2660 | cnt = max_tracer_type_len; | ||
2661 | |||
2662 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2663 | return -EFAULT; | ||
2664 | |||
2665 | buf[cnt] = 0; | ||
2666 | |||
2667 | /* strip ending whitespace. */ | ||
2668 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
2669 | buf[i] = 0; | ||
2670 | |||
2671 | ret = tracing_set_tracer(buf); | ||
2672 | if (!ret) | ||
2673 | ret = cnt; | ||
2674 | |||
2426 | if (ret > 0) | 2675 | if (ret > 0) |
2427 | filp->f_pos += ret; | 2676 | filp->f_pos += ret; |
2428 | 2677 | ||
@@ -2491,6 +2740,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2491 | return -ENOMEM; | 2740 | return -ENOMEM; |
2492 | 2741 | ||
2493 | mutex_lock(&trace_types_lock); | 2742 | mutex_lock(&trace_types_lock); |
2743 | |||
2744 | /* trace pipe does not show start of buffer */ | ||
2745 | cpus_setall(iter->started); | ||
2746 | |||
2494 | iter->tr = &global_trace; | 2747 | iter->tr = &global_trace; |
2495 | iter->trace = current_trace; | 2748 | iter->trace = current_trace; |
2496 | filp->private_data = iter; | 2749 | filp->private_data = iter; |
@@ -2666,7 +2919,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
2666 | char buf[64]; | 2919 | char buf[64]; |
2667 | int r; | 2920 | int r; |
2668 | 2921 | ||
2669 | r = sprintf(buf, "%lu\n", tr->entries); | 2922 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
2670 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2923 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2671 | } | 2924 | } |
2672 | 2925 | ||
@@ -2677,7 +2930,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2677 | unsigned long val; | 2930 | unsigned long val; |
2678 | char buf[64]; | 2931 | char buf[64]; |
2679 | int ret, cpu; | 2932 | int ret, cpu; |
2680 | struct trace_array *tr = filp->private_data; | ||
2681 | 2933 | ||
2682 | if (cnt >= sizeof(buf)) | 2934 | if (cnt >= sizeof(buf)) |
2683 | return -EINVAL; | 2935 | return -EINVAL; |
@@ -2697,12 +2949,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2697 | 2949 | ||
2698 | mutex_lock(&trace_types_lock); | 2950 | mutex_lock(&trace_types_lock); |
2699 | 2951 | ||
2700 | if (tr->ctrl) { | 2952 | tracing_stop(); |
2701 | cnt = -EBUSY; | ||
2702 | pr_info("ftrace: please disable tracing" | ||
2703 | " before modifying buffer size\n"); | ||
2704 | goto out; | ||
2705 | } | ||
2706 | 2953 | ||
2707 | /* disable all cpu buffers */ | 2954 | /* disable all cpu buffers */ |
2708 | for_each_tracing_cpu(cpu) { | 2955 | for_each_tracing_cpu(cpu) { |
@@ -2712,6 +2959,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2712 | atomic_inc(&max_tr.data[cpu]->disabled); | 2959 | atomic_inc(&max_tr.data[cpu]->disabled); |
2713 | } | 2960 | } |
2714 | 2961 | ||
2962 | /* value is in KB */ | ||
2963 | val <<= 10; | ||
2964 | |||
2715 | if (val != global_trace.entries) { | 2965 | if (val != global_trace.entries) { |
2716 | ret = ring_buffer_resize(global_trace.buffer, val); | 2966 | ret = ring_buffer_resize(global_trace.buffer, val); |
2717 | if (ret < 0) { | 2967 | if (ret < 0) { |
@@ -2750,6 +3000,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2750 | atomic_dec(&max_tr.data[cpu]->disabled); | 3000 | atomic_dec(&max_tr.data[cpu]->disabled); |
2751 | } | 3001 | } |
2752 | 3002 | ||
3003 | tracing_start(); | ||
2753 | max_tr.entries = global_trace.entries; | 3004 | max_tr.entries = global_trace.entries; |
2754 | mutex_unlock(&trace_types_lock); | 3005 | mutex_unlock(&trace_types_lock); |
2755 | 3006 | ||
@@ -2772,9 +3023,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
2772 | { | 3023 | { |
2773 | char *buf; | 3024 | char *buf; |
2774 | char *end; | 3025 | char *end; |
2775 | struct trace_array *tr = &global_trace; | ||
2776 | 3026 | ||
2777 | if (!tr->ctrl || tracing_disabled) | 3027 | if (tracing_disabled) |
2778 | return -EINVAL; | 3028 | return -EINVAL; |
2779 | 3029 | ||
2780 | if (cnt > TRACE_BUF_SIZE) | 3030 | if (cnt > TRACE_BUF_SIZE) |
@@ -2840,22 +3090,38 @@ static struct file_operations tracing_mark_fops = { | |||
2840 | 3090 | ||
2841 | #ifdef CONFIG_DYNAMIC_FTRACE | 3091 | #ifdef CONFIG_DYNAMIC_FTRACE |
2842 | 3092 | ||
3093 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | ||
3094 | { | ||
3095 | return 0; | ||
3096 | } | ||
3097 | |||
2843 | static ssize_t | 3098 | static ssize_t |
2844 | tracing_read_long(struct file *filp, char __user *ubuf, | 3099 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
2845 | size_t cnt, loff_t *ppos) | 3100 | size_t cnt, loff_t *ppos) |
2846 | { | 3101 | { |
3102 | static char ftrace_dyn_info_buffer[1024]; | ||
3103 | static DEFINE_MUTEX(dyn_info_mutex); | ||
2847 | unsigned long *p = filp->private_data; | 3104 | unsigned long *p = filp->private_data; |
2848 | char buf[64]; | 3105 | char *buf = ftrace_dyn_info_buffer; |
3106 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | ||
2849 | int r; | 3107 | int r; |
2850 | 3108 | ||
2851 | r = sprintf(buf, "%ld\n", *p); | 3109 | mutex_lock(&dyn_info_mutex); |
3110 | r = sprintf(buf, "%ld ", *p); | ||
2852 | 3111 | ||
2853 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3112 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
3113 | buf[r++] = '\n'; | ||
3114 | |||
3115 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
3116 | |||
3117 | mutex_unlock(&dyn_info_mutex); | ||
3118 | |||
3119 | return r; | ||
2854 | } | 3120 | } |
2855 | 3121 | ||
2856 | static struct file_operations tracing_read_long_fops = { | 3122 | static struct file_operations tracing_dyn_info_fops = { |
2857 | .open = tracing_open_generic, | 3123 | .open = tracing_open_generic, |
2858 | .read = tracing_read_long, | 3124 | .read = tracing_read_dyn_info, |
2859 | }; | 3125 | }; |
2860 | #endif | 3126 | #endif |
2861 | 3127 | ||
@@ -2896,10 +3162,10 @@ static __init int tracer_init_debugfs(void) | |||
2896 | if (!entry) | 3162 | if (!entry) |
2897 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | 3163 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); |
2898 | 3164 | ||
2899 | entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, | 3165 | entry = debugfs_create_file("trace_options", 0644, d_tracer, |
2900 | NULL, &tracing_iter_fops); | 3166 | NULL, &tracing_iter_fops); |
2901 | if (!entry) | 3167 | if (!entry) |
2902 | pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); | 3168 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
2903 | 3169 | ||
2904 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3170 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
2905 | NULL, &tracing_cpumask_fops); | 3171 | NULL, &tracing_cpumask_fops); |
@@ -2949,11 +3215,11 @@ static __init int tracer_init_debugfs(void) | |||
2949 | pr_warning("Could not create debugfs " | 3215 | pr_warning("Could not create debugfs " |
2950 | "'trace_pipe' entry\n"); | 3216 | "'trace_pipe' entry\n"); |
2951 | 3217 | ||
2952 | entry = debugfs_create_file("trace_entries", 0644, d_tracer, | 3218 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, |
2953 | &global_trace, &tracing_entries_fops); | 3219 | &global_trace, &tracing_entries_fops); |
2954 | if (!entry) | 3220 | if (!entry) |
2955 | pr_warning("Could not create debugfs " | 3221 | pr_warning("Could not create debugfs " |
2956 | "'trace_entries' entry\n"); | 3222 | "'buffer_size_kb' entry\n"); |
2957 | 3223 | ||
2958 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | 3224 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, |
2959 | NULL, &tracing_mark_fops); | 3225 | NULL, &tracing_mark_fops); |
@@ -2964,7 +3230,7 @@ static __init int tracer_init_debugfs(void) | |||
2964 | #ifdef CONFIG_DYNAMIC_FTRACE | 3230 | #ifdef CONFIG_DYNAMIC_FTRACE |
2965 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3231 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
2966 | &ftrace_update_tot_cnt, | 3232 | &ftrace_update_tot_cnt, |
2967 | &tracing_read_long_fops); | 3233 | &tracing_dyn_info_fops); |
2968 | if (!entry) | 3234 | if (!entry) |
2969 | pr_warning("Could not create debugfs " | 3235 | pr_warning("Could not create debugfs " |
2970 | "'dyn_ftrace_total_info' entry\n"); | 3236 | "'dyn_ftrace_total_info' entry\n"); |
@@ -2987,7 +3253,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2987 | unsigned long flags, irq_flags; | 3253 | unsigned long flags, irq_flags; |
2988 | int cpu, len = 0, size, pc; | 3254 | int cpu, len = 0, size, pc; |
2989 | 3255 | ||
2990 | if (!tr->ctrl || tracing_disabled) | 3256 | if (tracing_disabled) |
2991 | return 0; | 3257 | return 0; |
2992 | 3258 | ||
2993 | pc = preempt_count(); | 3259 | pc = preempt_count(); |
@@ -3045,7 +3311,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk); | |||
3045 | static int trace_panic_handler(struct notifier_block *this, | 3311 | static int trace_panic_handler(struct notifier_block *this, |
3046 | unsigned long event, void *unused) | 3312 | unsigned long event, void *unused) |
3047 | { | 3313 | { |
3048 | ftrace_dump(); | 3314 | if (ftrace_dump_on_oops) |
3315 | ftrace_dump(); | ||
3049 | return NOTIFY_OK; | 3316 | return NOTIFY_OK; |
3050 | } | 3317 | } |
3051 | 3318 | ||
@@ -3061,7 +3328,8 @@ static int trace_die_handler(struct notifier_block *self, | |||
3061 | { | 3328 | { |
3062 | switch (val) { | 3329 | switch (val) { |
3063 | case DIE_OOPS: | 3330 | case DIE_OOPS: |
3064 | ftrace_dump(); | 3331 | if (ftrace_dump_on_oops) |
3332 | ftrace_dump(); | ||
3065 | break; | 3333 | break; |
3066 | default: | 3334 | default: |
3067 | break; | 3335 | break; |
@@ -3102,7 +3370,6 @@ trace_printk_seq(struct trace_seq *s) | |||
3102 | trace_seq_reset(s); | 3370 | trace_seq_reset(s); |
3103 | } | 3371 | } |
3104 | 3372 | ||
3105 | |||
3106 | void ftrace_dump(void) | 3373 | void ftrace_dump(void) |
3107 | { | 3374 | { |
3108 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3375 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
@@ -3220,7 +3487,6 @@ __init static int tracer_alloc_buffers(void) | |||
3220 | #endif | 3487 | #endif |
3221 | 3488 | ||
3222 | /* All seems OK, enable tracing */ | 3489 | /* All seems OK, enable tracing */ |
3223 | global_trace.ctrl = tracer_enabled; | ||
3224 | tracing_disabled = 0; | 3490 | tracing_disabled = 0; |
3225 | 3491 | ||
3226 | atomic_notifier_chain_register(&panic_notifier_list, | 3492 | atomic_notifier_chain_register(&panic_notifier_list, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8465ad05270..790ea8c0e1f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ring_buffer.h> | 8 | #include <linux/ring_buffer.h> |
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | ||
11 | 12 | ||
12 | enum trace_type { | 13 | enum trace_type { |
13 | __TRACE_FIRST_TYPE = 0, | 14 | __TRACE_FIRST_TYPE = 0, |
@@ -21,7 +22,10 @@ enum trace_type { | |||
21 | TRACE_SPECIAL, | 22 | TRACE_SPECIAL, |
22 | TRACE_MMIO_RW, | 23 | TRACE_MMIO_RW, |
23 | TRACE_MMIO_MAP, | 24 | TRACE_MMIO_MAP, |
24 | TRACE_BOOT, | 25 | TRACE_BRANCH, |
26 | TRACE_BOOT_CALL, | ||
27 | TRACE_BOOT_RET, | ||
28 | TRACE_FN_RET, | ||
25 | 29 | ||
26 | __TRACE_LAST_TYPE | 30 | __TRACE_LAST_TYPE |
27 | }; | 31 | }; |
@@ -48,6 +52,15 @@ struct ftrace_entry { | |||
48 | unsigned long ip; | 52 | unsigned long ip; |
49 | unsigned long parent_ip; | 53 | unsigned long parent_ip; |
50 | }; | 54 | }; |
55 | |||
56 | /* Function return entry */ | ||
57 | struct ftrace_ret_entry { | ||
58 | struct trace_entry ent; | ||
59 | unsigned long ip; | ||
60 | unsigned long parent_ip; | ||
61 | unsigned long long calltime; | ||
62 | unsigned long long rettime; | ||
63 | }; | ||
51 | extern struct tracer boot_tracer; | 64 | extern struct tracer boot_tracer; |
52 | 65 | ||
53 | /* | 66 | /* |
@@ -112,9 +125,24 @@ struct trace_mmiotrace_map { | |||
112 | struct mmiotrace_map map; | 125 | struct mmiotrace_map map; |
113 | }; | 126 | }; |
114 | 127 | ||
115 | struct trace_boot { | 128 | struct trace_boot_call { |
129 | struct trace_entry ent; | ||
130 | struct boot_trace_call boot_call; | ||
131 | }; | ||
132 | |||
133 | struct trace_boot_ret { | ||
116 | struct trace_entry ent; | 134 | struct trace_entry ent; |
117 | struct boot_trace initcall; | 135 | struct boot_trace_ret boot_ret; |
136 | }; | ||
137 | |||
138 | #define TRACE_FUNC_SIZE 30 | ||
139 | #define TRACE_FILE_SIZE 20 | ||
140 | struct trace_branch { | ||
141 | struct trace_entry ent; | ||
142 | unsigned line; | ||
143 | char func[TRACE_FUNC_SIZE+1]; | ||
144 | char file[TRACE_FILE_SIZE+1]; | ||
145 | char correct; | ||
118 | }; | 146 | }; |
119 | 147 | ||
120 | /* | 148 | /* |
@@ -172,7 +200,6 @@ struct trace_iterator; | |||
172 | struct trace_array { | 200 | struct trace_array { |
173 | struct ring_buffer *buffer; | 201 | struct ring_buffer *buffer; |
174 | unsigned long entries; | 202 | unsigned long entries; |
175 | long ctrl; | ||
176 | int cpu; | 203 | int cpu; |
177 | cycle_t time_start; | 204 | cycle_t time_start; |
178 | struct task_struct *waiter; | 205 | struct task_struct *waiter; |
@@ -218,7 +245,10 @@ extern void __ftrace_bad_type(void); | |||
218 | TRACE_MMIO_RW); \ | 245 | TRACE_MMIO_RW); \ |
219 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 246 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
220 | TRACE_MMIO_MAP); \ | 247 | TRACE_MMIO_MAP); \ |
221 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ | 248 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
249 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | ||
250 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | ||
251 | IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ | ||
222 | __ftrace_bad_type(); \ | 252 | __ftrace_bad_type(); \ |
223 | } while (0) | 253 | } while (0) |
224 | 254 | ||
@@ -236,15 +266,14 @@ struct tracer { | |||
236 | const char *name; | 266 | const char *name; |
237 | void (*init)(struct trace_array *tr); | 267 | void (*init)(struct trace_array *tr); |
238 | void (*reset)(struct trace_array *tr); | 268 | void (*reset)(struct trace_array *tr); |
269 | void (*start)(struct trace_array *tr); | ||
270 | void (*stop)(struct trace_array *tr); | ||
239 | void (*open)(struct trace_iterator *iter); | 271 | void (*open)(struct trace_iterator *iter); |
240 | void (*pipe_open)(struct trace_iterator *iter); | 272 | void (*pipe_open)(struct trace_iterator *iter); |
241 | void (*close)(struct trace_iterator *iter); | 273 | void (*close)(struct trace_iterator *iter); |
242 | void (*start)(struct trace_iterator *iter); | ||
243 | void (*stop)(struct trace_iterator *iter); | ||
244 | ssize_t (*read)(struct trace_iterator *iter, | 274 | ssize_t (*read)(struct trace_iterator *iter, |
245 | struct file *filp, char __user *ubuf, | 275 | struct file *filp, char __user *ubuf, |
246 | size_t cnt, loff_t *ppos); | 276 | size_t cnt, loff_t *ppos); |
247 | void (*ctrl_update)(struct trace_array *tr); | ||
248 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 277 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
249 | int (*selftest)(struct tracer *trace, | 278 | int (*selftest)(struct tracer *trace, |
250 | struct trace_array *tr); | 279 | struct trace_array *tr); |
@@ -279,8 +308,11 @@ struct trace_iterator { | |||
279 | unsigned long iter_flags; | 308 | unsigned long iter_flags; |
280 | loff_t pos; | 309 | loff_t pos; |
281 | long idx; | 310 | long idx; |
311 | |||
312 | cpumask_t started; | ||
282 | }; | 313 | }; |
283 | 314 | ||
315 | int tracing_is_enabled(void); | ||
284 | void trace_wake_up(void); | 316 | void trace_wake_up(void); |
285 | void tracing_reset(struct trace_array *tr, int cpu); | 317 | void tracing_reset(struct trace_array *tr, int cpu); |
286 | int tracing_open_generic(struct inode *inode, struct file *filp); | 318 | int tracing_open_generic(struct inode *inode, struct file *filp); |
@@ -320,9 +352,14 @@ void trace_function(struct trace_array *tr, | |||
320 | unsigned long ip, | 352 | unsigned long ip, |
321 | unsigned long parent_ip, | 353 | unsigned long parent_ip, |
322 | unsigned long flags, int pc); | 354 | unsigned long flags, int pc); |
355 | void | ||
356 | trace_function_return(struct ftrace_retfunc *trace); | ||
323 | 357 | ||
324 | void tracing_start_cmdline_record(void); | 358 | void tracing_start_cmdline_record(void); |
325 | void tracing_stop_cmdline_record(void); | 359 | void tracing_stop_cmdline_record(void); |
360 | void tracing_sched_switch_assign_trace(struct trace_array *tr); | ||
361 | void tracing_stop_sched_switch_record(void); | ||
362 | void tracing_start_sched_switch_record(void); | ||
326 | int register_tracer(struct tracer *type); | 363 | int register_tracer(struct tracer *type); |
327 | void unregister_tracer(struct tracer *type); | 364 | void unregister_tracer(struct tracer *type); |
328 | 365 | ||
@@ -383,12 +420,18 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace, | |||
383 | struct trace_array *tr); | 420 | struct trace_array *tr); |
384 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | 421 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
385 | struct trace_array *tr); | 422 | struct trace_array *tr); |
423 | extern int trace_selftest_startup_branch(struct tracer *trace, | ||
424 | struct trace_array *tr); | ||
386 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 425 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
387 | 426 | ||
388 | extern void *head_page(struct trace_array_cpu *data); | 427 | extern void *head_page(struct trace_array_cpu *data); |
389 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 428 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
390 | extern void trace_seq_print_cont(struct trace_seq *s, | 429 | extern void trace_seq_print_cont(struct trace_seq *s, |
391 | struct trace_iterator *iter); | 430 | struct trace_iterator *iter); |
431 | |||
432 | extern int | ||
433 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
434 | unsigned long sym_flags); | ||
392 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 435 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
393 | size_t cnt); | 436 | size_t cnt); |
394 | extern long ns2usecs(cycle_t nsec); | 437 | extern long ns2usecs(cycle_t nsec); |
@@ -396,6 +439,17 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); | |||
396 | 439 | ||
397 | extern unsigned long trace_flags; | 440 | extern unsigned long trace_flags; |
398 | 441 | ||
442 | /* Standard output formatting function used for function return traces */ | ||
443 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
444 | extern enum print_line_t print_return_function(struct trace_iterator *iter); | ||
445 | #else | ||
446 | static inline enum print_line_t | ||
447 | print_return_function(struct trace_iterator *iter) | ||
448 | { | ||
449 | return TRACE_TYPE_UNHANDLED; | ||
450 | } | ||
451 | #endif | ||
452 | |||
399 | /* | 453 | /* |
400 | * trace_iterator_flags is an enumeration that defines bit | 454 | * trace_iterator_flags is an enumeration that defines bit |
401 | * positions into trace_flags that controls the output. | 455 | * positions into trace_flags that controls the output. |
@@ -415,8 +469,92 @@ enum trace_iterator_flags { | |||
415 | TRACE_ITER_STACKTRACE = 0x100, | 469 | TRACE_ITER_STACKTRACE = 0x100, |
416 | TRACE_ITER_SCHED_TREE = 0x200, | 470 | TRACE_ITER_SCHED_TREE = 0x200, |
417 | TRACE_ITER_PRINTK = 0x400, | 471 | TRACE_ITER_PRINTK = 0x400, |
472 | TRACE_ITER_PREEMPTONLY = 0x800, | ||
473 | #ifdef CONFIG_BRANCH_TRACER | ||
474 | TRACE_ITER_BRANCH = 0x1000, | ||
475 | #endif | ||
476 | TRACE_ITER_ANNOTATE = 0x2000, | ||
418 | }; | 477 | }; |
419 | 478 | ||
479 | /* | ||
480 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
481 | * control the output of kernel symbols. | ||
482 | */ | ||
483 | #define TRACE_ITER_SYM_MASK \ | ||
484 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
485 | |||
420 | extern struct tracer nop_trace; | 486 | extern struct tracer nop_trace; |
421 | 487 | ||
488 | /** | ||
489 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
490 | * | ||
491 | * When tracing can happen inside the scheduler, there exists | ||
492 | * cases that the tracing might happen before the need_resched | ||
493 | * flag is checked. If this happens and the tracer calls | ||
494 | * preempt_enable (after a disable), a schedule might take place | ||
495 | * causing an infinite recursion. | ||
496 | * | ||
497 | * To prevent this, we read the need_recshed flag before | ||
498 | * disabling preemption. When we want to enable preemption we | ||
499 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
500 | * Otherwise, we call preempt_enable. | ||
501 | * | ||
502 | * The rational for doing the above is that if need resched is set | ||
503 | * and we have yet to reschedule, we are either in an atomic location | ||
504 | * (where we do not need to check for scheduling) or we are inside | ||
505 | * the scheduler and do not want to resched. | ||
506 | */ | ||
507 | static inline int ftrace_preempt_disable(void) | ||
508 | { | ||
509 | int resched; | ||
510 | |||
511 | resched = need_resched(); | ||
512 | preempt_disable_notrace(); | ||
513 | |||
514 | return resched; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
519 | * @resched: the return value from ftrace_preempt_disable | ||
520 | * | ||
521 | * This is a scheduler safe way to enable preemption and not miss | ||
522 | * any preemption checks. The disabled saved the state of preemption. | ||
523 | * If resched is set, then we were either inside an atomic or | ||
524 | * are inside the scheduler (we would have already scheduled | ||
525 | * otherwise). In this case, we do not want to call normal | ||
526 | * preempt_enable, but preempt_enable_no_resched instead. | ||
527 | */ | ||
528 | static inline void ftrace_preempt_enable(int resched) | ||
529 | { | ||
530 | if (resched) | ||
531 | preempt_enable_no_resched_notrace(); | ||
532 | else | ||
533 | preempt_enable_notrace(); | ||
534 | } | ||
535 | |||
536 | #ifdef CONFIG_BRANCH_TRACER | ||
537 | extern int enable_branch_tracing(struct trace_array *tr); | ||
538 | extern void disable_branch_tracing(void); | ||
539 | static inline int trace_branch_enable(struct trace_array *tr) | ||
540 | { | ||
541 | if (trace_flags & TRACE_ITER_BRANCH) | ||
542 | return enable_branch_tracing(tr); | ||
543 | return 0; | ||
544 | } | ||
545 | static inline void trace_branch_disable(void) | ||
546 | { | ||
547 | /* due to races, always disable */ | ||
548 | disable_branch_tracing(); | ||
549 | } | ||
550 | #else | ||
551 | static inline int trace_branch_enable(struct trace_array *tr) | ||
552 | { | ||
553 | return 0; | ||
554 | } | ||
555 | static inline void trace_branch_disable(void) | ||
556 | { | ||
557 | } | ||
558 | #endif /* CONFIG_BRANCH_TRACER */ | ||
559 | |||
422 | #endif /* _LINUX_KERNEL_TRACE_H */ | 560 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index d0a5e50eeff..cb333b7fd11 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -13,23 +13,38 @@ | |||
13 | #include "trace.h" | 13 | #include "trace.h" |
14 | 14 | ||
15 | static struct trace_array *boot_trace; | 15 | static struct trace_array *boot_trace; |
16 | static int trace_boot_enabled; | 16 | static bool pre_initcalls_finished; |
17 | 17 | ||
18 | 18 | /* Tells the boot tracer that the pre_smp_initcalls are finished. | |
19 | /* Should be started after do_pre_smp_initcalls() in init/main.c */ | 19 | * So we are ready . |
20 | * It doesn't enable sched events tracing however. | ||
21 | * You have to call enable_boot_trace to do so. | ||
22 | */ | ||
20 | void start_boot_trace(void) | 23 | void start_boot_trace(void) |
21 | { | 24 | { |
22 | trace_boot_enabled = 1; | 25 | pre_initcalls_finished = true; |
23 | } | 26 | } |
24 | 27 | ||
25 | void stop_boot_trace(void) | 28 | void enable_boot_trace(void) |
26 | { | 29 | { |
27 | trace_boot_enabled = 0; | 30 | if (pre_initcalls_finished) |
31 | tracing_start_sched_switch_record(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | void reset_boot_trace(struct trace_array *tr) | 34 | void disable_boot_trace(void) |
31 | { | 35 | { |
32 | stop_boot_trace(); | 36 | if (pre_initcalls_finished) |
37 | tracing_stop_sched_switch_record(); | ||
38 | } | ||
39 | |||
40 | static void reset_boot_trace(struct trace_array *tr) | ||
41 | { | ||
42 | int cpu; | ||
43 | |||
44 | tr->time_start = ftrace_now(tr->cpu); | ||
45 | |||
46 | for_each_online_cpu(cpu) | ||
47 | tracing_reset(tr, cpu); | ||
33 | } | 48 | } |
34 | 49 | ||
35 | static void boot_trace_init(struct trace_array *tr) | 50 | static void boot_trace_init(struct trace_array *tr) |
@@ -37,49 +52,77 @@ static void boot_trace_init(struct trace_array *tr) | |||
37 | int cpu; | 52 | int cpu; |
38 | boot_trace = tr; | 53 | boot_trace = tr; |
39 | 54 | ||
40 | trace_boot_enabled = 0; | ||
41 | |||
42 | for_each_cpu_mask(cpu, cpu_possible_map) | 55 | for_each_cpu_mask(cpu, cpu_possible_map) |
43 | tracing_reset(tr, cpu); | 56 | tracing_reset(tr, cpu); |
57 | |||
58 | tracing_sched_switch_assign_trace(tr); | ||
44 | } | 59 | } |
45 | 60 | ||
46 | static void boot_trace_ctrl_update(struct trace_array *tr) | 61 | static enum print_line_t |
62 | initcall_call_print_line(struct trace_iterator *iter) | ||
47 | { | 63 | { |
48 | if (tr->ctrl) | 64 | struct trace_entry *entry = iter->ent; |
49 | start_boot_trace(); | 65 | struct trace_seq *s = &iter->seq; |
66 | struct trace_boot_call *field; | ||
67 | struct boot_trace_call *call; | ||
68 | u64 ts; | ||
69 | unsigned long nsec_rem; | ||
70 | int ret; | ||
71 | |||
72 | trace_assign_type(field, entry); | ||
73 | call = &field->boot_call; | ||
74 | ts = iter->ts; | ||
75 | nsec_rem = do_div(ts, 1000000000); | ||
76 | |||
77 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | ||
78 | (unsigned long)ts, nsec_rem, call->func, call->caller); | ||
79 | |||
80 | if (!ret) | ||
81 | return TRACE_TYPE_PARTIAL_LINE; | ||
50 | else | 82 | else |
51 | stop_boot_trace(); | 83 | return TRACE_TYPE_HANDLED; |
52 | } | 84 | } |
53 | 85 | ||
54 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | 86 | static enum print_line_t |
87 | initcall_ret_print_line(struct trace_iterator *iter) | ||
55 | { | 88 | { |
56 | int ret; | ||
57 | struct trace_entry *entry = iter->ent; | 89 | struct trace_entry *entry = iter->ent; |
58 | struct trace_boot *field = (struct trace_boot *)entry; | ||
59 | struct boot_trace *it = &field->initcall; | ||
60 | struct trace_seq *s = &iter->seq; | 90 | struct trace_seq *s = &iter->seq; |
61 | struct timespec calltime = ktime_to_timespec(it->calltime); | 91 | struct trace_boot_ret *field; |
62 | struct timespec rettime = ktime_to_timespec(it->rettime); | 92 | struct boot_trace_ret *init_ret; |
63 | 93 | u64 ts; | |
64 | if (entry->type == TRACE_BOOT) { | 94 | unsigned long nsec_rem; |
65 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | 95 | int ret; |
66 | calltime.tv_sec, | 96 | |
67 | calltime.tv_nsec, | 97 | trace_assign_type(field, entry); |
68 | it->func, it->caller); | 98 | init_ret = &field->boot_ret; |
69 | if (!ret) | 99 | ts = iter->ts; |
70 | return TRACE_TYPE_PARTIAL_LINE; | 100 | nsec_rem = do_div(ts, 1000000000); |
71 | 101 | ||
72 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | 102 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " |
73 | "returned %d after %lld msecs\n", | 103 | "returned %d after %llu msecs\n", |
74 | rettime.tv_sec, | 104 | (unsigned long) ts, |
75 | rettime.tv_nsec, | 105 | nsec_rem, |
76 | it->func, it->result, it->duration); | 106 | init_ret->func, init_ret->result, init_ret->duration); |
77 | 107 | ||
78 | if (!ret) | 108 | if (!ret) |
79 | return TRACE_TYPE_PARTIAL_LINE; | 109 | return TRACE_TYPE_PARTIAL_LINE; |
110 | else | ||
80 | return TRACE_TYPE_HANDLED; | 111 | return TRACE_TYPE_HANDLED; |
112 | } | ||
113 | |||
114 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | ||
115 | { | ||
116 | struct trace_entry *entry = iter->ent; | ||
117 | |||
118 | switch (entry->type) { | ||
119 | case TRACE_BOOT_CALL: | ||
120 | return initcall_call_print_line(iter); | ||
121 | case TRACE_BOOT_RET: | ||
122 | return initcall_ret_print_line(iter); | ||
123 | default: | ||
124 | return TRACE_TYPE_UNHANDLED; | ||
81 | } | 125 | } |
82 | return TRACE_TYPE_UNHANDLED; | ||
83 | } | 126 | } |
84 | 127 | ||
85 | struct tracer boot_tracer __read_mostly = | 128 | struct tracer boot_tracer __read_mostly = |
@@ -87,27 +130,53 @@ struct tracer boot_tracer __read_mostly = | |||
87 | .name = "initcall", | 130 | .name = "initcall", |
88 | .init = boot_trace_init, | 131 | .init = boot_trace_init, |
89 | .reset = reset_boot_trace, | 132 | .reset = reset_boot_trace, |
90 | .ctrl_update = boot_trace_ctrl_update, | ||
91 | .print_line = initcall_print_line, | 133 | .print_line = initcall_print_line, |
92 | }; | 134 | }; |
93 | 135 | ||
94 | void trace_boot(struct boot_trace *it, initcall_t fn) | 136 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) |
95 | { | 137 | { |
96 | struct ring_buffer_event *event; | 138 | struct ring_buffer_event *event; |
97 | struct trace_boot *entry; | 139 | struct trace_boot_call *entry; |
98 | struct trace_array_cpu *data; | ||
99 | unsigned long irq_flags; | 140 | unsigned long irq_flags; |
100 | struct trace_array *tr = boot_trace; | 141 | struct trace_array *tr = boot_trace; |
101 | 142 | ||
102 | if (!trace_boot_enabled) | 143 | if (!pre_initcalls_finished) |
103 | return; | 144 | return; |
104 | 145 | ||
105 | /* Get its name now since this function could | 146 | /* Get its name now since this function could |
106 | * disappear because it is in the .init section. | 147 | * disappear because it is in the .init section. |
107 | */ | 148 | */ |
108 | sprint_symbol(it->func, (unsigned long)fn); | 149 | sprint_symbol(bt->func, (unsigned long)fn); |
150 | preempt_disable(); | ||
151 | |||
152 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
153 | &irq_flags); | ||
154 | if (!event) | ||
155 | goto out; | ||
156 | entry = ring_buffer_event_data(event); | ||
157 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
158 | entry->ent.type = TRACE_BOOT_CALL; | ||
159 | entry->boot_call = *bt; | ||
160 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
161 | |||
162 | trace_wake_up(); | ||
163 | |||
164 | out: | ||
165 | preempt_enable(); | ||
166 | } | ||
167 | |||
168 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | ||
169 | { | ||
170 | struct ring_buffer_event *event; | ||
171 | struct trace_boot_ret *entry; | ||
172 | unsigned long irq_flags; | ||
173 | struct trace_array *tr = boot_trace; | ||
174 | |||
175 | if (!pre_initcalls_finished) | ||
176 | return; | ||
177 | |||
178 | sprint_symbol(bt->func, (unsigned long)fn); | ||
109 | preempt_disable(); | 179 | preempt_disable(); |
110 | data = tr->data[smp_processor_id()]; | ||
111 | 180 | ||
112 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 181 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
113 | &irq_flags); | 182 | &irq_flags); |
@@ -115,8 +184,8 @@ void trace_boot(struct boot_trace *it, initcall_t fn) | |||
115 | goto out; | 184 | goto out; |
116 | entry = ring_buffer_event_data(event); | 185 | entry = ring_buffer_event_data(event); |
117 | tracing_generic_entry_update(&entry->ent, 0, 0); | 186 | tracing_generic_entry_update(&entry->ent, 0, 0); |
118 | entry->ent.type = TRACE_BOOT; | 187 | entry->ent.type = TRACE_BOOT_RET; |
119 | entry->initcall = *it; | 188 | entry->boot_ret = *bt; |
120 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 189 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
121 | 190 | ||
122 | trace_wake_up(); | 191 | trace_wake_up(); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c new file mode 100644 index 00000000000..85265553918 --- /dev/null +++ b/kernel/trace/trace_branch.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * unlikely profiler | ||
3 | * | ||
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | */ | ||
6 | #include <linux/kallsyms.h> | ||
7 | #include <linux/seq_file.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | #include <linux/hash.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <asm/local.h> | ||
16 | #include "trace.h" | ||
17 | |||
18 | #ifdef CONFIG_BRANCH_TRACER | ||
19 | |||
20 | static int branch_tracing_enabled __read_mostly; | ||
21 | static DEFINE_MUTEX(branch_tracing_mutex); | ||
22 | static struct trace_array *branch_tracer; | ||
23 | |||
24 | static void | ||
25 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
26 | { | ||
27 | struct trace_array *tr = branch_tracer; | ||
28 | struct ring_buffer_event *event; | ||
29 | struct trace_branch *entry; | ||
30 | unsigned long flags, irq_flags; | ||
31 | int cpu, pc; | ||
32 | const char *p; | ||
33 | |||
34 | /* | ||
35 | * I would love to save just the ftrace_likely_data pointer, but | ||
36 | * this code can also be used by modules. Ugly things can happen | ||
37 | * if the module is unloaded, and then we go and read the | ||
38 | * pointer. This is slower, but much safer. | ||
39 | */ | ||
40 | |||
41 | if (unlikely(!tr)) | ||
42 | return; | ||
43 | |||
44 | local_irq_save(flags); | ||
45 | cpu = raw_smp_processor_id(); | ||
46 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | ||
47 | goto out; | ||
48 | |||
49 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
50 | &irq_flags); | ||
51 | if (!event) | ||
52 | goto out; | ||
53 | |||
54 | pc = preempt_count(); | ||
55 | entry = ring_buffer_event_data(event); | ||
56 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
57 | entry->ent.type = TRACE_BRANCH; | ||
58 | |||
59 | /* Strip off the path, only save the file */ | ||
60 | p = f->file + strlen(f->file); | ||
61 | while (p >= f->file && *p != '/') | ||
62 | p--; | ||
63 | p++; | ||
64 | |||
65 | strncpy(entry->func, f->func, TRACE_FUNC_SIZE); | ||
66 | strncpy(entry->file, p, TRACE_FILE_SIZE); | ||
67 | entry->func[TRACE_FUNC_SIZE] = 0; | ||
68 | entry->file[TRACE_FILE_SIZE] = 0; | ||
69 | entry->line = f->line; | ||
70 | entry->correct = val == expect; | ||
71 | |||
72 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
73 | |||
74 | out: | ||
75 | atomic_dec(&tr->data[cpu]->disabled); | ||
76 | local_irq_restore(flags); | ||
77 | } | ||
78 | |||
79 | static inline | ||
80 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
81 | { | ||
82 | if (!branch_tracing_enabled) | ||
83 | return; | ||
84 | |||
85 | probe_likely_condition(f, val, expect); | ||
86 | } | ||
87 | |||
88 | int enable_branch_tracing(struct trace_array *tr) | ||
89 | { | ||
90 | int ret = 0; | ||
91 | |||
92 | mutex_lock(&branch_tracing_mutex); | ||
93 | branch_tracer = tr; | ||
94 | /* | ||
95 | * Must be seen before enabling. The reader is a condition | ||
96 | * where we do not need a matching rmb() | ||
97 | */ | ||
98 | smp_wmb(); | ||
99 | branch_tracing_enabled++; | ||
100 | mutex_unlock(&branch_tracing_mutex); | ||
101 | |||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | void disable_branch_tracing(void) | ||
106 | { | ||
107 | mutex_lock(&branch_tracing_mutex); | ||
108 | |||
109 | if (!branch_tracing_enabled) | ||
110 | goto out_unlock; | ||
111 | |||
112 | branch_tracing_enabled--; | ||
113 | |||
114 | out_unlock: | ||
115 | mutex_unlock(&branch_tracing_mutex); | ||
116 | } | ||
117 | |||
118 | static void start_branch_trace(struct trace_array *tr) | ||
119 | { | ||
120 | enable_branch_tracing(tr); | ||
121 | } | ||
122 | |||
123 | static void stop_branch_trace(struct trace_array *tr) | ||
124 | { | ||
125 | disable_branch_tracing(); | ||
126 | } | ||
127 | |||
128 | static void branch_trace_init(struct trace_array *tr) | ||
129 | { | ||
130 | int cpu; | ||
131 | |||
132 | for_each_online_cpu(cpu) | ||
133 | tracing_reset(tr, cpu); | ||
134 | |||
135 | start_branch_trace(tr); | ||
136 | } | ||
137 | |||
138 | static void branch_trace_reset(struct trace_array *tr) | ||
139 | { | ||
140 | stop_branch_trace(tr); | ||
141 | } | ||
142 | |||
143 | struct tracer branch_trace __read_mostly = | ||
144 | { | ||
145 | .name = "branch", | ||
146 | .init = branch_trace_init, | ||
147 | .reset = branch_trace_reset, | ||
148 | #ifdef CONFIG_FTRACE_SELFTEST | ||
149 | .selftest = trace_selftest_startup_branch, | ||
150 | #endif | ||
151 | }; | ||
152 | |||
153 | __init static int init_branch_trace(void) | ||
154 | { | ||
155 | return register_tracer(&branch_trace); | ||
156 | } | ||
157 | |||
158 | device_initcall(init_branch_trace); | ||
159 | #else | ||
160 | static inline | ||
161 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
162 | { | ||
163 | } | ||
164 | #endif /* CONFIG_BRANCH_TRACER */ | ||
165 | |||
166 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | ||
167 | { | ||
168 | /* | ||
169 | * I would love to have a trace point here instead, but the | ||
170 | * trace point code is so inundated with unlikely and likely | ||
171 | * conditions that the recursive nightmare that exists is too | ||
172 | * much to try to get working. At least for now. | ||
173 | */ | ||
174 | trace_likely_condition(f, val, expect); | ||
175 | |||
176 | /* FIXME: Make this atomic! */ | ||
177 | if (val == expect) | ||
178 | f->correct++; | ||
179 | else | ||
180 | f->incorrect++; | ||
181 | } | ||
182 | EXPORT_SYMBOL(ftrace_likely_update); | ||
183 | |||
184 | struct ftrace_pointer { | ||
185 | void *start; | ||
186 | void *stop; | ||
187 | }; | ||
188 | |||
189 | static void * | ||
190 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
191 | { | ||
192 | struct ftrace_pointer *f = m->private; | ||
193 | struct ftrace_branch_data *p = v; | ||
194 | |||
195 | (*pos)++; | ||
196 | |||
197 | if (v == (void *)1) | ||
198 | return f->start; | ||
199 | |||
200 | ++p; | ||
201 | |||
202 | if ((void *)p >= (void *)f->stop) | ||
203 | return NULL; | ||
204 | |||
205 | return p; | ||
206 | } | ||
207 | |||
208 | static void *t_start(struct seq_file *m, loff_t *pos) | ||
209 | { | ||
210 | void *t = (void *)1; | ||
211 | loff_t l = 0; | ||
212 | |||
213 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
214 | ; | ||
215 | |||
216 | return t; | ||
217 | } | ||
218 | |||
219 | static void t_stop(struct seq_file *m, void *p) | ||
220 | { | ||
221 | } | ||
222 | |||
223 | static int t_show(struct seq_file *m, void *v) | ||
224 | { | ||
225 | struct ftrace_branch_data *p = v; | ||
226 | const char *f; | ||
227 | unsigned long percent; | ||
228 | |||
229 | if (v == (void *)1) { | ||
230 | seq_printf(m, " correct incorrect %% " | ||
231 | " Function " | ||
232 | " File Line\n" | ||
233 | " ------- --------- - " | ||
234 | " -------- " | ||
235 | " ---- ----\n"); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | /* Only print the file, not the path */ | ||
240 | f = p->file + strlen(p->file); | ||
241 | while (f >= p->file && *f != '/') | ||
242 | f--; | ||
243 | f++; | ||
244 | |||
245 | if (p->correct) { | ||
246 | percent = p->incorrect * 100; | ||
247 | percent /= p->correct + p->incorrect; | ||
248 | } else | ||
249 | percent = p->incorrect ? 100 : 0; | ||
250 | |||
251 | seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent); | ||
252 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static struct seq_operations tracing_likely_seq_ops = { | ||
257 | .start = t_start, | ||
258 | .next = t_next, | ||
259 | .stop = t_stop, | ||
260 | .show = t_show, | ||
261 | }; | ||
262 | |||
263 | static int tracing_likely_open(struct inode *inode, struct file *file) | ||
264 | { | ||
265 | int ret; | ||
266 | |||
267 | ret = seq_open(file, &tracing_likely_seq_ops); | ||
268 | if (!ret) { | ||
269 | struct seq_file *m = file->private_data; | ||
270 | m->private = (void *)inode->i_private; | ||
271 | } | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | |||
276 | static struct file_operations tracing_likely_fops = { | ||
277 | .open = tracing_likely_open, | ||
278 | .read = seq_read, | ||
279 | .llseek = seq_lseek, | ||
280 | }; | ||
281 | |||
282 | extern unsigned long __start_likely_profile[]; | ||
283 | extern unsigned long __stop_likely_profile[]; | ||
284 | extern unsigned long __start_unlikely_profile[]; | ||
285 | extern unsigned long __stop_unlikely_profile[]; | ||
286 | |||
287 | static struct ftrace_pointer ftrace_likely_pos = { | ||
288 | .start = __start_likely_profile, | ||
289 | .stop = __stop_likely_profile, | ||
290 | }; | ||
291 | |||
292 | static struct ftrace_pointer ftrace_unlikely_pos = { | ||
293 | .start = __start_unlikely_profile, | ||
294 | .stop = __stop_unlikely_profile, | ||
295 | }; | ||
296 | |||
297 | static __init int ftrace_branch_init(void) | ||
298 | { | ||
299 | struct dentry *d_tracer; | ||
300 | struct dentry *entry; | ||
301 | |||
302 | d_tracer = tracing_init_dentry(); | ||
303 | |||
304 | entry = debugfs_create_file("profile_likely", 0444, d_tracer, | ||
305 | &ftrace_likely_pos, | ||
306 | &tracing_likely_fops); | ||
307 | if (!entry) | ||
308 | pr_warning("Could not create debugfs 'profile_likely' entry\n"); | ||
309 | |||
310 | entry = debugfs_create_file("profile_unlikely", 0444, d_tracer, | ||
311 | &ftrace_unlikely_pos, | ||
312 | &tracing_likely_fops); | ||
313 | if (!entry) | ||
314 | pr_warning("Could not create debugfs" | ||
315 | " 'profile_unlikely' entry\n"); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | device_initcall(ftrace_branch_init); | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 0f85a64003d..8693b7a0a5b 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -44,22 +44,17 @@ static void stop_function_trace(struct trace_array *tr) | |||
44 | 44 | ||
45 | static void function_trace_init(struct trace_array *tr) | 45 | static void function_trace_init(struct trace_array *tr) |
46 | { | 46 | { |
47 | if (tr->ctrl) | 47 | start_function_trace(tr); |
48 | start_function_trace(tr); | ||
49 | } | 48 | } |
50 | 49 | ||
51 | static void function_trace_reset(struct trace_array *tr) | 50 | static void function_trace_reset(struct trace_array *tr) |
52 | { | 51 | { |
53 | if (tr->ctrl) | 52 | stop_function_trace(tr); |
54 | stop_function_trace(tr); | ||
55 | } | 53 | } |
56 | 54 | ||
57 | static void function_trace_ctrl_update(struct trace_array *tr) | 55 | static void function_trace_start(struct trace_array *tr) |
58 | { | 56 | { |
59 | if (tr->ctrl) | 57 | function_reset(tr); |
60 | start_function_trace(tr); | ||
61 | else | ||
62 | stop_function_trace(tr); | ||
63 | } | 58 | } |
64 | 59 | ||
65 | static struct tracer function_trace __read_mostly = | 60 | static struct tracer function_trace __read_mostly = |
@@ -67,7 +62,7 @@ static struct tracer function_trace __read_mostly = | |||
67 | .name = "function", | 62 | .name = "function", |
68 | .init = function_trace_init, | 63 | .init = function_trace_init, |
69 | .reset = function_trace_reset, | 64 | .reset = function_trace_reset, |
70 | .ctrl_update = function_trace_ctrl_update, | 65 | .start = function_trace_start, |
71 | #ifdef CONFIG_FTRACE_SELFTEST | 66 | #ifdef CONFIG_FTRACE_SELFTEST |
72 | .selftest = trace_selftest_startup_function, | 67 | .selftest = trace_selftest_startup_function, |
73 | #endif | 68 | #endif |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c new file mode 100644 index 00000000000..7680b21537d --- /dev/null +++ b/kernel/trace/trace_functions_return.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function return tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | |||
17 | static void start_return_trace(struct trace_array *tr) | ||
18 | { | ||
19 | register_ftrace_return(&trace_function_return); | ||
20 | } | ||
21 | |||
22 | static void stop_return_trace(struct trace_array *tr) | ||
23 | { | ||
24 | unregister_ftrace_return(); | ||
25 | } | ||
26 | |||
27 | static void return_trace_init(struct trace_array *tr) | ||
28 | { | ||
29 | int cpu; | ||
30 | for_each_online_cpu(cpu) | ||
31 | tracing_reset(tr, cpu); | ||
32 | |||
33 | start_return_trace(tr); | ||
34 | } | ||
35 | |||
36 | static void return_trace_reset(struct trace_array *tr) | ||
37 | { | ||
38 | stop_return_trace(tr); | ||
39 | } | ||
40 | |||
41 | |||
42 | enum print_line_t | ||
43 | print_return_function(struct trace_iterator *iter) | ||
44 | { | ||
45 | struct trace_seq *s = &iter->seq; | ||
46 | struct trace_entry *entry = iter->ent; | ||
47 | struct ftrace_ret_entry *field; | ||
48 | int ret; | ||
49 | |||
50 | if (entry->type == TRACE_FN_RET) { | ||
51 | trace_assign_type(field, entry); | ||
52 | ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip); | ||
53 | if (!ret) | ||
54 | return TRACE_TYPE_PARTIAL_LINE; | ||
55 | ret = seq_print_ip_sym(s, field->ip, | ||
56 | trace_flags & TRACE_ITER_SYM_MASK); | ||
57 | if (!ret) | ||
58 | return TRACE_TYPE_PARTIAL_LINE; | ||
59 | ret = trace_seq_printf(s, " (%llu ns)\n", | ||
60 | field->rettime - field->calltime); | ||
61 | if (!ret) | ||
62 | return TRACE_TYPE_PARTIAL_LINE; | ||
63 | else | ||
64 | return TRACE_TYPE_HANDLED; | ||
65 | } | ||
66 | return TRACE_TYPE_UNHANDLED; | ||
67 | } | ||
68 | |||
69 | static struct tracer return_trace __read_mostly = | ||
70 | { | ||
71 | .name = "return", | ||
72 | .init = return_trace_init, | ||
73 | .reset = return_trace_reset, | ||
74 | .print_line = print_return_function | ||
75 | }; | ||
76 | |||
77 | static __init int init_return_trace(void) | ||
78 | { | ||
79 | return register_tracer(&return_trace); | ||
80 | } | ||
81 | |||
82 | device_initcall(init_return_trace); | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 9c74071c10e..d919d4eaa7c 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
353 | } | 353 | } |
354 | #endif /* CONFIG_PREEMPT_TRACER */ | 354 | #endif /* CONFIG_PREEMPT_TRACER */ |
355 | 355 | ||
356 | /* | ||
357 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
358 | * variable when we disable it when we open a trace output file. | ||
359 | */ | ||
360 | static int save_tracer_enabled; | ||
361 | |||
356 | static void start_irqsoff_tracer(struct trace_array *tr) | 362 | static void start_irqsoff_tracer(struct trace_array *tr) |
357 | { | 363 | { |
358 | register_ftrace_function(&trace_ops); | 364 | register_ftrace_function(&trace_ops); |
359 | tracer_enabled = 1; | 365 | if (tracing_is_enabled()) { |
366 | tracer_enabled = 1; | ||
367 | save_tracer_enabled = 1; | ||
368 | } else { | ||
369 | tracer_enabled = 0; | ||
370 | save_tracer_enabled = 0; | ||
371 | } | ||
360 | } | 372 | } |
361 | 373 | ||
362 | static void stop_irqsoff_tracer(struct trace_array *tr) | 374 | static void stop_irqsoff_tracer(struct trace_array *tr) |
363 | { | 375 | { |
364 | tracer_enabled = 0; | 376 | tracer_enabled = 0; |
377 | save_tracer_enabled = 0; | ||
365 | unregister_ftrace_function(&trace_ops); | 378 | unregister_ftrace_function(&trace_ops); |
366 | } | 379 | } |
367 | 380 | ||
@@ -370,36 +383,36 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
370 | irqsoff_trace = tr; | 383 | irqsoff_trace = tr; |
371 | /* make sure that the tracer is visible */ | 384 | /* make sure that the tracer is visible */ |
372 | smp_wmb(); | 385 | smp_wmb(); |
373 | 386 | start_irqsoff_tracer(tr); | |
374 | if (tr->ctrl) | ||
375 | start_irqsoff_tracer(tr); | ||
376 | } | 387 | } |
377 | 388 | ||
378 | static void irqsoff_tracer_reset(struct trace_array *tr) | 389 | static void irqsoff_tracer_reset(struct trace_array *tr) |
379 | { | 390 | { |
380 | if (tr->ctrl) | 391 | stop_irqsoff_tracer(tr); |
381 | stop_irqsoff_tracer(tr); | ||
382 | } | 392 | } |
383 | 393 | ||
384 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | 394 | static void irqsoff_tracer_start(struct trace_array *tr) |
385 | { | 395 | { |
386 | if (tr->ctrl) | 396 | tracer_enabled = 1; |
387 | start_irqsoff_tracer(tr); | 397 | save_tracer_enabled = 1; |
388 | else | 398 | } |
389 | stop_irqsoff_tracer(tr); | 399 | |
400 | static void irqsoff_tracer_stop(struct trace_array *tr) | ||
401 | { | ||
402 | tracer_enabled = 0; | ||
403 | save_tracer_enabled = 0; | ||
390 | } | 404 | } |
391 | 405 | ||
392 | static void irqsoff_tracer_open(struct trace_iterator *iter) | 406 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
393 | { | 407 | { |
394 | /* stop the trace while dumping */ | 408 | /* stop the trace while dumping */ |
395 | if (iter->tr->ctrl) | 409 | tracer_enabled = 0; |
396 | stop_irqsoff_tracer(iter->tr); | ||
397 | } | 410 | } |
398 | 411 | ||
399 | static void irqsoff_tracer_close(struct trace_iterator *iter) | 412 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
400 | { | 413 | { |
401 | if (iter->tr->ctrl) | 414 | /* restart tracing */ |
402 | start_irqsoff_tracer(iter->tr); | 415 | tracer_enabled = save_tracer_enabled; |
403 | } | 416 | } |
404 | 417 | ||
405 | #ifdef CONFIG_IRQSOFF_TRACER | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
@@ -414,9 +427,10 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
414 | .name = "irqsoff", | 427 | .name = "irqsoff", |
415 | .init = irqsoff_tracer_init, | 428 | .init = irqsoff_tracer_init, |
416 | .reset = irqsoff_tracer_reset, | 429 | .reset = irqsoff_tracer_reset, |
430 | .start = irqsoff_tracer_start, | ||
431 | .stop = irqsoff_tracer_stop, | ||
417 | .open = irqsoff_tracer_open, | 432 | .open = irqsoff_tracer_open, |
418 | .close = irqsoff_tracer_close, | 433 | .close = irqsoff_tracer_close, |
419 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
420 | .print_max = 1, | 434 | .print_max = 1, |
421 | #ifdef CONFIG_FTRACE_SELFTEST | 435 | #ifdef CONFIG_FTRACE_SELFTEST |
422 | .selftest = trace_selftest_startup_irqsoff, | 436 | .selftest = trace_selftest_startup_irqsoff, |
@@ -440,9 +454,10 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
440 | .name = "preemptoff", | 454 | .name = "preemptoff", |
441 | .init = preemptoff_tracer_init, | 455 | .init = preemptoff_tracer_init, |
442 | .reset = irqsoff_tracer_reset, | 456 | .reset = irqsoff_tracer_reset, |
457 | .start = irqsoff_tracer_start, | ||
458 | .stop = irqsoff_tracer_stop, | ||
443 | .open = irqsoff_tracer_open, | 459 | .open = irqsoff_tracer_open, |
444 | .close = irqsoff_tracer_close, | 460 | .close = irqsoff_tracer_close, |
445 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
446 | .print_max = 1, | 461 | .print_max = 1, |
447 | #ifdef CONFIG_FTRACE_SELFTEST | 462 | #ifdef CONFIG_FTRACE_SELFTEST |
448 | .selftest = trace_selftest_startup_preemptoff, | 463 | .selftest = trace_selftest_startup_preemptoff, |
@@ -468,9 +483,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
468 | .name = "preemptirqsoff", | 483 | .name = "preemptirqsoff", |
469 | .init = preemptirqsoff_tracer_init, | 484 | .init = preemptirqsoff_tracer_init, |
470 | .reset = irqsoff_tracer_reset, | 485 | .reset = irqsoff_tracer_reset, |
486 | .start = irqsoff_tracer_start, | ||
487 | .stop = irqsoff_tracer_stop, | ||
471 | .open = irqsoff_tracer_open, | 488 | .open = irqsoff_tracer_open, |
472 | .close = irqsoff_tracer_close, | 489 | .close = irqsoff_tracer_close, |
473 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
474 | .print_max = 1, | 490 | .print_max = 1, |
475 | #ifdef CONFIG_FTRACE_SELFTEST | 491 | #ifdef CONFIG_FTRACE_SELFTEST |
476 | .selftest = trace_selftest_startup_preemptirqsoff, | 492 | .selftest = trace_selftest_startup_preemptirqsoff, |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index f28484618ff..51bcf370215 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -34,30 +34,24 @@ static void mmio_trace_init(struct trace_array *tr) | |||
34 | { | 34 | { |
35 | pr_debug("in %s\n", __func__); | 35 | pr_debug("in %s\n", __func__); |
36 | mmio_trace_array = tr; | 36 | mmio_trace_array = tr; |
37 | if (tr->ctrl) { | 37 | |
38 | mmio_reset_data(tr); | 38 | mmio_reset_data(tr); |
39 | enable_mmiotrace(); | 39 | enable_mmiotrace(); |
40 | } | ||
41 | } | 40 | } |
42 | 41 | ||
43 | static void mmio_trace_reset(struct trace_array *tr) | 42 | static void mmio_trace_reset(struct trace_array *tr) |
44 | { | 43 | { |
45 | pr_debug("in %s\n", __func__); | 44 | pr_debug("in %s\n", __func__); |
46 | if (tr->ctrl) | 45 | |
47 | disable_mmiotrace(); | 46 | disable_mmiotrace(); |
48 | mmio_reset_data(tr); | 47 | mmio_reset_data(tr); |
49 | mmio_trace_array = NULL; | 48 | mmio_trace_array = NULL; |
50 | } | 49 | } |
51 | 50 | ||
52 | static void mmio_trace_ctrl_update(struct trace_array *tr) | 51 | static void mmio_trace_start(struct trace_array *tr) |
53 | { | 52 | { |
54 | pr_debug("in %s\n", __func__); | 53 | pr_debug("in %s\n", __func__); |
55 | if (tr->ctrl) { | 54 | mmio_reset_data(tr); |
56 | mmio_reset_data(tr); | ||
57 | enable_mmiotrace(); | ||
58 | } else { | ||
59 | disable_mmiotrace(); | ||
60 | } | ||
61 | } | 55 | } |
62 | 56 | ||
63 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | 57 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
@@ -298,10 +292,10 @@ static struct tracer mmio_tracer __read_mostly = | |||
298 | .name = "mmiotrace", | 292 | .name = "mmiotrace", |
299 | .init = mmio_trace_init, | 293 | .init = mmio_trace_init, |
300 | .reset = mmio_trace_reset, | 294 | .reset = mmio_trace_reset, |
295 | .start = mmio_trace_start, | ||
301 | .pipe_open = mmio_pipe_open, | 296 | .pipe_open = mmio_pipe_open, |
302 | .close = mmio_close, | 297 | .close = mmio_close, |
303 | .read = mmio_read, | 298 | .read = mmio_read, |
304 | .ctrl_update = mmio_trace_ctrl_update, | ||
305 | .print_line = mmio_print_line, | 299 | .print_line = mmio_print_line, |
306 | }; | 300 | }; |
307 | 301 | ||
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 4592b486251..2ef1d227e7d 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -32,23 +32,12 @@ static void nop_trace_init(struct trace_array *tr) | |||
32 | for_each_online_cpu(cpu) | 32 | for_each_online_cpu(cpu) |
33 | tracing_reset(tr, cpu); | 33 | tracing_reset(tr, cpu); |
34 | 34 | ||
35 | if (tr->ctrl) | 35 | start_nop_trace(tr); |
36 | start_nop_trace(tr); | ||
37 | } | 36 | } |
38 | 37 | ||
39 | static void nop_trace_reset(struct trace_array *tr) | 38 | static void nop_trace_reset(struct trace_array *tr) |
40 | { | 39 | { |
41 | if (tr->ctrl) | 40 | stop_nop_trace(tr); |
42 | stop_nop_trace(tr); | ||
43 | } | ||
44 | |||
45 | static void nop_trace_ctrl_update(struct trace_array *tr) | ||
46 | { | ||
47 | /* When starting a new trace, reset the buffers */ | ||
48 | if (tr->ctrl) | ||
49 | start_nop_trace(tr); | ||
50 | else | ||
51 | stop_nop_trace(tr); | ||
52 | } | 41 | } |
53 | 42 | ||
54 | struct tracer nop_trace __read_mostly = | 43 | struct tracer nop_trace __read_mostly = |
@@ -56,7 +45,6 @@ struct tracer nop_trace __read_mostly = | |||
56 | .name = "nop", | 45 | .name = "nop", |
57 | .init = nop_trace_init, | 46 | .init = nop_trace_init, |
58 | .reset = nop_trace_reset, | 47 | .reset = nop_trace_reset, |
59 | .ctrl_update = nop_trace_ctrl_update, | ||
60 | #ifdef CONFIG_FTRACE_SELFTEST | 48 | #ifdef CONFIG_FTRACE_SELFTEST |
61 | .selftest = trace_selftest_startup_nop, | 49 | .selftest = trace_selftest_startup_nop, |
62 | #endif | 50 | #endif |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index b8f56beb1a6..be35bdfe2e3 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -16,7 +16,8 @@ | |||
16 | 16 | ||
17 | static struct trace_array *ctx_trace; | 17 | static struct trace_array *ctx_trace; |
18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
19 | static atomic_t sched_ref; | 19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | ||
20 | 21 | ||
21 | static void | 22 | static void |
22 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
27 | int cpu; | 28 | int cpu; |
28 | int pc; | 29 | int pc; |
29 | 30 | ||
30 | if (!atomic_read(&sched_ref)) | 31 | if (!sched_ref) |
31 | return; | 32 | return; |
32 | 33 | ||
33 | tracing_record_cmdline(prev); | 34 | tracing_record_cmdline(prev); |
@@ -123,20 +124,18 @@ static void tracing_sched_unregister(void) | |||
123 | 124 | ||
124 | static void tracing_start_sched_switch(void) | 125 | static void tracing_start_sched_switch(void) |
125 | { | 126 | { |
126 | long ref; | 127 | mutex_lock(&sched_register_mutex); |
127 | 128 | if (!(sched_ref++)) | |
128 | ref = atomic_inc_return(&sched_ref); | ||
129 | if (ref == 1) | ||
130 | tracing_sched_register(); | 129 | tracing_sched_register(); |
130 | mutex_unlock(&sched_register_mutex); | ||
131 | } | 131 | } |
132 | 132 | ||
133 | static void tracing_stop_sched_switch(void) | 133 | static void tracing_stop_sched_switch(void) |
134 | { | 134 | { |
135 | long ref; | 135 | mutex_lock(&sched_register_mutex); |
136 | 136 | if (!(--sched_ref)) | |
137 | ref = atomic_dec_and_test(&sched_ref); | ||
138 | if (ref) | ||
139 | tracing_sched_unregister(); | 137 | tracing_sched_unregister(); |
138 | mutex_unlock(&sched_register_mutex); | ||
140 | } | 139 | } |
141 | 140 | ||
142 | void tracing_start_cmdline_record(void) | 141 | void tracing_start_cmdline_record(void) |
@@ -149,40 +148,85 @@ void tracing_stop_cmdline_record(void) | |||
149 | tracing_stop_sched_switch(); | 148 | tracing_stop_sched_switch(); |
150 | } | 149 | } |
151 | 150 | ||
151 | /** | ||
152 | * tracing_start_sched_switch_record - start tracing context switches | ||
153 | * | ||
154 | * Turns on context switch tracing for a tracer. | ||
155 | */ | ||
156 | void tracing_start_sched_switch_record(void) | ||
157 | { | ||
158 | if (unlikely(!ctx_trace)) { | ||
159 | WARN_ON(1); | ||
160 | return; | ||
161 | } | ||
162 | |||
163 | tracing_start_sched_switch(); | ||
164 | |||
165 | mutex_lock(&sched_register_mutex); | ||
166 | tracer_enabled++; | ||
167 | mutex_unlock(&sched_register_mutex); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * tracing_stop_sched_switch_record - start tracing context switches | ||
172 | * | ||
173 | * Turns off context switch tracing for a tracer. | ||
174 | */ | ||
175 | void tracing_stop_sched_switch_record(void) | ||
176 | { | ||
177 | mutex_lock(&sched_register_mutex); | ||
178 | tracer_enabled--; | ||
179 | WARN_ON(tracer_enabled < 0); | ||
180 | mutex_unlock(&sched_register_mutex); | ||
181 | |||
182 | tracing_stop_sched_switch(); | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | ||
187 | * @tr: trace array pointer to assign | ||
188 | * | ||
189 | * Some tracers might want to record the context switches in their | ||
190 | * trace. This function lets those tracers assign the trace array | ||
191 | * to use. | ||
192 | */ | ||
193 | void tracing_sched_switch_assign_trace(struct trace_array *tr) | ||
194 | { | ||
195 | ctx_trace = tr; | ||
196 | } | ||
197 | |||
152 | static void start_sched_trace(struct trace_array *tr) | 198 | static void start_sched_trace(struct trace_array *tr) |
153 | { | 199 | { |
154 | sched_switch_reset(tr); | 200 | sched_switch_reset(tr); |
155 | tracing_start_cmdline_record(); | 201 | tracing_start_sched_switch_record(); |
156 | tracer_enabled = 1; | ||
157 | } | 202 | } |
158 | 203 | ||
159 | static void stop_sched_trace(struct trace_array *tr) | 204 | static void stop_sched_trace(struct trace_array *tr) |
160 | { | 205 | { |
161 | tracer_enabled = 0; | 206 | tracing_stop_sched_switch_record(); |
162 | tracing_stop_cmdline_record(); | ||
163 | } | 207 | } |
164 | 208 | ||
165 | static void sched_switch_trace_init(struct trace_array *tr) | 209 | static void sched_switch_trace_init(struct trace_array *tr) |
166 | { | 210 | { |
167 | ctx_trace = tr; | 211 | ctx_trace = tr; |
168 | 212 | start_sched_trace(tr); | |
169 | if (tr->ctrl) | ||
170 | start_sched_trace(tr); | ||
171 | } | 213 | } |
172 | 214 | ||
173 | static void sched_switch_trace_reset(struct trace_array *tr) | 215 | static void sched_switch_trace_reset(struct trace_array *tr) |
174 | { | 216 | { |
175 | if (tr->ctrl) | 217 | if (sched_ref) |
176 | stop_sched_trace(tr); | 218 | stop_sched_trace(tr); |
177 | } | 219 | } |
178 | 220 | ||
179 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | 221 | static void sched_switch_trace_start(struct trace_array *tr) |
180 | { | 222 | { |
181 | /* When starting a new trace, reset the buffers */ | 223 | sched_switch_reset(tr); |
182 | if (tr->ctrl) | 224 | tracing_start_sched_switch(); |
183 | start_sched_trace(tr); | 225 | } |
184 | else | 226 | |
185 | stop_sched_trace(tr); | 227 | static void sched_switch_trace_stop(struct trace_array *tr) |
228 | { | ||
229 | tracing_stop_sched_switch(); | ||
186 | } | 230 | } |
187 | 231 | ||
188 | static struct tracer sched_switch_trace __read_mostly = | 232 | static struct tracer sched_switch_trace __read_mostly = |
@@ -190,7 +234,8 @@ static struct tracer sched_switch_trace __read_mostly = | |||
190 | .name = "sched_switch", | 234 | .name = "sched_switch", |
191 | .init = sched_switch_trace_init, | 235 | .init = sched_switch_trace_init, |
192 | .reset = sched_switch_trace_reset, | 236 | .reset = sched_switch_trace_reset, |
193 | .ctrl_update = sched_switch_trace_ctrl_update, | 237 | .start = sched_switch_trace_start, |
238 | .stop = sched_switch_trace_stop, | ||
194 | #ifdef CONFIG_FTRACE_SELFTEST | 239 | #ifdef CONFIG_FTRACE_SELFTEST |
195 | .selftest = trace_selftest_startup_sched_switch, | 240 | .selftest = trace_selftest_startup_sched_switch, |
196 | #endif | 241 | #endif |
@@ -198,14 +243,6 @@ static struct tracer sched_switch_trace __read_mostly = | |||
198 | 243 | ||
199 | __init static int init_sched_switch_trace(void) | 244 | __init static int init_sched_switch_trace(void) |
200 | { | 245 | { |
201 | int ret = 0; | ||
202 | |||
203 | if (atomic_read(&sched_ref)) | ||
204 | ret = tracing_sched_register(); | ||
205 | if (ret) { | ||
206 | pr_info("error registering scheduler trace\n"); | ||
207 | return ret; | ||
208 | } | ||
209 | return register_tracer(&sched_switch_trace); | 246 | return register_tracer(&sched_switch_trace); |
210 | } | 247 | } |
211 | device_initcall(init_sched_switch_trace); | 248 | device_initcall(init_sched_switch_trace); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 3ae93f16b56..983f2b1478c 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
50 | return; | 50 | return; |
51 | 51 | ||
52 | pc = preempt_count(); | 52 | pc = preempt_count(); |
53 | resched = need_resched(); | 53 | resched = ftrace_preempt_disable(); |
54 | preempt_disable_notrace(); | ||
55 | 54 | ||
56 | cpu = raw_smp_processor_id(); | 55 | cpu = raw_smp_processor_id(); |
57 | data = tr->data[cpu]; | 56 | data = tr->data[cpu]; |
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
81 | out: | 80 | out: |
82 | atomic_dec(&data->disabled); | 81 | atomic_dec(&data->disabled); |
83 | 82 | ||
84 | /* | 83 | ftrace_preempt_enable(resched); |
85 | * To prevent recursion from the scheduler, if the | ||
86 | * resched flag was set before we entered, then | ||
87 | * don't reschedule. | ||
88 | */ | ||
89 | if (resched) | ||
90 | preempt_enable_no_resched_notrace(); | ||
91 | else | ||
92 | preempt_enable_notrace(); | ||
93 | } | 84 | } |
94 | 85 | ||
95 | static struct ftrace_ops trace_ops __read_mostly = | 86 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -271,6 +262,12 @@ out: | |||
271 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 262 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
272 | } | 263 | } |
273 | 264 | ||
265 | /* | ||
266 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
267 | * variable when we disable it when we open a trace output file. | ||
268 | */ | ||
269 | static int save_tracer_enabled; | ||
270 | |||
274 | static void start_wakeup_tracer(struct trace_array *tr) | 271 | static void start_wakeup_tracer(struct trace_array *tr) |
275 | { | 272 | { |
276 | int ret; | 273 | int ret; |
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
309 | 306 | ||
310 | register_ftrace_function(&trace_ops); | 307 | register_ftrace_function(&trace_ops); |
311 | 308 | ||
312 | tracer_enabled = 1; | 309 | if (tracing_is_enabled()) { |
310 | tracer_enabled = 1; | ||
311 | save_tracer_enabled = 1; | ||
312 | } else { | ||
313 | tracer_enabled = 0; | ||
314 | save_tracer_enabled = 0; | ||
315 | } | ||
313 | 316 | ||
314 | return; | 317 | return; |
315 | fail_deprobe_wake_new: | 318 | fail_deprobe_wake_new: |
@@ -321,6 +324,7 @@ fail_deprobe: | |||
321 | static void stop_wakeup_tracer(struct trace_array *tr) | 324 | static void stop_wakeup_tracer(struct trace_array *tr) |
322 | { | 325 | { |
323 | tracer_enabled = 0; | 326 | tracer_enabled = 0; |
327 | save_tracer_enabled = 0; | ||
324 | unregister_ftrace_function(&trace_ops); | 328 | unregister_ftrace_function(&trace_ops); |
325 | unregister_trace_sched_switch(probe_wakeup_sched_switch); | 329 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
326 | unregister_trace_sched_wakeup_new(probe_wakeup); | 330 | unregister_trace_sched_wakeup_new(probe_wakeup); |
@@ -330,40 +334,42 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
330 | static void wakeup_tracer_init(struct trace_array *tr) | 334 | static void wakeup_tracer_init(struct trace_array *tr) |
331 | { | 335 | { |
332 | wakeup_trace = tr; | 336 | wakeup_trace = tr; |
333 | 337 | start_wakeup_tracer(tr); | |
334 | if (tr->ctrl) | ||
335 | start_wakeup_tracer(tr); | ||
336 | } | 338 | } |
337 | 339 | ||
338 | static void wakeup_tracer_reset(struct trace_array *tr) | 340 | static void wakeup_tracer_reset(struct trace_array *tr) |
339 | { | 341 | { |
340 | if (tr->ctrl) { | 342 | stop_wakeup_tracer(tr); |
341 | stop_wakeup_tracer(tr); | 343 | /* make sure we put back any tasks we are tracing */ |
342 | /* make sure we put back any tasks we are tracing */ | 344 | wakeup_reset(tr); |
343 | wakeup_reset(tr); | ||
344 | } | ||
345 | } | 345 | } |
346 | 346 | ||
347 | static void wakeup_tracer_ctrl_update(struct trace_array *tr) | 347 | static void wakeup_tracer_start(struct trace_array *tr) |
348 | { | 348 | { |
349 | if (tr->ctrl) | 349 | wakeup_reset(tr); |
350 | start_wakeup_tracer(tr); | 350 | tracer_enabled = 1; |
351 | else | 351 | save_tracer_enabled = 1; |
352 | stop_wakeup_tracer(tr); | 352 | } |
353 | |||
354 | static void wakeup_tracer_stop(struct trace_array *tr) | ||
355 | { | ||
356 | tracer_enabled = 0; | ||
357 | save_tracer_enabled = 0; | ||
353 | } | 358 | } |
354 | 359 | ||
355 | static void wakeup_tracer_open(struct trace_iterator *iter) | 360 | static void wakeup_tracer_open(struct trace_iterator *iter) |
356 | { | 361 | { |
357 | /* stop the trace while dumping */ | 362 | /* stop the trace while dumping */ |
358 | if (iter->tr->ctrl) | 363 | tracer_enabled = 0; |
359 | stop_wakeup_tracer(iter->tr); | ||
360 | } | 364 | } |
361 | 365 | ||
362 | static void wakeup_tracer_close(struct trace_iterator *iter) | 366 | static void wakeup_tracer_close(struct trace_iterator *iter) |
363 | { | 367 | { |
364 | /* forget about any processes we were recording */ | 368 | /* forget about any processes we were recording */ |
365 | if (iter->tr->ctrl) | 369 | if (save_tracer_enabled) { |
366 | start_wakeup_tracer(iter->tr); | 370 | wakeup_reset(iter->tr); |
371 | tracer_enabled = 1; | ||
372 | } | ||
367 | } | 373 | } |
368 | 374 | ||
369 | static struct tracer wakeup_tracer __read_mostly = | 375 | static struct tracer wakeup_tracer __read_mostly = |
@@ -371,9 +377,10 @@ static struct tracer wakeup_tracer __read_mostly = | |||
371 | .name = "wakeup", | 377 | .name = "wakeup", |
372 | .init = wakeup_tracer_init, | 378 | .init = wakeup_tracer_init, |
373 | .reset = wakeup_tracer_reset, | 379 | .reset = wakeup_tracer_reset, |
380 | .start = wakeup_tracer_start, | ||
381 | .stop = wakeup_tracer_stop, | ||
374 | .open = wakeup_tracer_open, | 382 | .open = wakeup_tracer_open, |
375 | .close = wakeup_tracer_close, | 383 | .close = wakeup_tracer_close, |
376 | .ctrl_update = wakeup_tracer_ctrl_update, | ||
377 | .print_max = 1, | 384 | .print_max = 1, |
378 | #ifdef CONFIG_FTRACE_SELFTEST | 385 | #ifdef CONFIG_FTRACE_SELFTEST |
379 | .selftest = trace_selftest_startup_wakeup, | 386 | .selftest = trace_selftest_startup_wakeup, |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 90bc752a758..24e6e075e6d 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
13 | case TRACE_STACK: | 13 | case TRACE_STACK: |
14 | case TRACE_PRINT: | 14 | case TRACE_PRINT: |
15 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
16 | case TRACE_BRANCH: | ||
16 | return 1; | 17 | return 1; |
17 | } | 18 | } |
18 | return 0; | 19 | return 0; |
@@ -110,7 +111,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
110 | ftrace_set_filter(func_name, strlen(func_name), 1); | 111 | ftrace_set_filter(func_name, strlen(func_name), 1); |
111 | 112 | ||
112 | /* enable tracing */ | 113 | /* enable tracing */ |
113 | tr->ctrl = 1; | ||
114 | trace->init(tr); | 114 | trace->init(tr); |
115 | 115 | ||
116 | /* Sleep for a 1/10 of a second */ | 116 | /* Sleep for a 1/10 of a second */ |
@@ -134,13 +134,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
134 | msleep(100); | 134 | msleep(100); |
135 | 135 | ||
136 | /* stop the tracing. */ | 136 | /* stop the tracing. */ |
137 | tr->ctrl = 0; | 137 | tracing_stop(); |
138 | trace->ctrl_update(tr); | ||
139 | ftrace_enabled = 0; | 138 | ftrace_enabled = 0; |
140 | 139 | ||
141 | /* check the trace buffer */ | 140 | /* check the trace buffer */ |
142 | ret = trace_test_buffer(tr, &count); | 141 | ret = trace_test_buffer(tr, &count); |
143 | trace->reset(tr); | 142 | trace->reset(tr); |
143 | tracing_start(); | ||
144 | 144 | ||
145 | /* we should only have one item */ | 145 | /* we should only have one item */ |
146 | if (!ret && count != 1) { | 146 | if (!ret && count != 1) { |
@@ -148,6 +148,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
148 | ret = -1; | 148 | ret = -1; |
149 | goto out; | 149 | goto out; |
150 | } | 150 | } |
151 | |||
151 | out: | 152 | out: |
152 | ftrace_enabled = save_ftrace_enabled; | 153 | ftrace_enabled = save_ftrace_enabled; |
153 | tracer_enabled = save_tracer_enabled; | 154 | tracer_enabled = save_tracer_enabled; |
@@ -180,18 +181,17 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
180 | ftrace_enabled = 1; | 181 | ftrace_enabled = 1; |
181 | tracer_enabled = 1; | 182 | tracer_enabled = 1; |
182 | 183 | ||
183 | tr->ctrl = 1; | ||
184 | trace->init(tr); | 184 | trace->init(tr); |
185 | /* Sleep for a 1/10 of a second */ | 185 | /* Sleep for a 1/10 of a second */ |
186 | msleep(100); | 186 | msleep(100); |
187 | /* stop the tracing. */ | 187 | /* stop the tracing. */ |
188 | tr->ctrl = 0; | 188 | tracing_stop(); |
189 | trace->ctrl_update(tr); | ||
190 | ftrace_enabled = 0; | 189 | ftrace_enabled = 0; |
191 | 190 | ||
192 | /* check the trace buffer */ | 191 | /* check the trace buffer */ |
193 | ret = trace_test_buffer(tr, &count); | 192 | ret = trace_test_buffer(tr, &count); |
194 | trace->reset(tr); | 193 | trace->reset(tr); |
194 | tracing_start(); | ||
195 | 195 | ||
196 | if (!ret && !count) { | 196 | if (!ret && !count) { |
197 | printk(KERN_CONT ".. no entries found .."); | 197 | printk(KERN_CONT ".. no entries found .."); |
@@ -223,7 +223,6 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
223 | int ret; | 223 | int ret; |
224 | 224 | ||
225 | /* start the tracing */ | 225 | /* start the tracing */ |
226 | tr->ctrl = 1; | ||
227 | trace->init(tr); | 226 | trace->init(tr); |
228 | /* reset the max latency */ | 227 | /* reset the max latency */ |
229 | tracing_max_latency = 0; | 228 | tracing_max_latency = 0; |
@@ -232,13 +231,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
232 | udelay(100); | 231 | udelay(100); |
233 | local_irq_enable(); | 232 | local_irq_enable(); |
234 | /* stop the tracing. */ | 233 | /* stop the tracing. */ |
235 | tr->ctrl = 0; | 234 | tracing_stop(); |
236 | trace->ctrl_update(tr); | ||
237 | /* check both trace buffers */ | 235 | /* check both trace buffers */ |
238 | ret = trace_test_buffer(tr, NULL); | 236 | ret = trace_test_buffer(tr, NULL); |
239 | if (!ret) | 237 | if (!ret) |
240 | ret = trace_test_buffer(&max_tr, &count); | 238 | ret = trace_test_buffer(&max_tr, &count); |
241 | trace->reset(tr); | 239 | trace->reset(tr); |
240 | tracing_start(); | ||
242 | 241 | ||
243 | if (!ret && !count) { | 242 | if (!ret && !count) { |
244 | printk(KERN_CONT ".. no entries found .."); | 243 | printk(KERN_CONT ".. no entries found .."); |
@@ -259,8 +258,20 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
259 | unsigned long count; | 258 | unsigned long count; |
260 | int ret; | 259 | int ret; |
261 | 260 | ||
261 | /* | ||
262 | * Now that the big kernel lock is no longer preemptable, | ||
263 | * and this is called with the BKL held, it will always | ||
264 | * fail. If preemption is already disabled, simply | ||
265 | * pass the test. When the BKL is removed, or becomes | ||
266 | * preemptible again, we will once again test this, | ||
267 | * so keep it in. | ||
268 | */ | ||
269 | if (preempt_count()) { | ||
270 | printk(KERN_CONT "can not test ... force "); | ||
271 | return 0; | ||
272 | } | ||
273 | |||
262 | /* start the tracing */ | 274 | /* start the tracing */ |
263 | tr->ctrl = 1; | ||
264 | trace->init(tr); | 275 | trace->init(tr); |
265 | /* reset the max latency */ | 276 | /* reset the max latency */ |
266 | tracing_max_latency = 0; | 277 | tracing_max_latency = 0; |
@@ -269,13 +280,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
269 | udelay(100); | 280 | udelay(100); |
270 | preempt_enable(); | 281 | preempt_enable(); |
271 | /* stop the tracing. */ | 282 | /* stop the tracing. */ |
272 | tr->ctrl = 0; | 283 | tracing_stop(); |
273 | trace->ctrl_update(tr); | ||
274 | /* check both trace buffers */ | 284 | /* check both trace buffers */ |
275 | ret = trace_test_buffer(tr, NULL); | 285 | ret = trace_test_buffer(tr, NULL); |
276 | if (!ret) | 286 | if (!ret) |
277 | ret = trace_test_buffer(&max_tr, &count); | 287 | ret = trace_test_buffer(&max_tr, &count); |
278 | trace->reset(tr); | 288 | trace->reset(tr); |
289 | tracing_start(); | ||
279 | 290 | ||
280 | if (!ret && !count) { | 291 | if (!ret && !count) { |
281 | printk(KERN_CONT ".. no entries found .."); | 292 | printk(KERN_CONT ".. no entries found .."); |
@@ -296,8 +307,20 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
296 | unsigned long count; | 307 | unsigned long count; |
297 | int ret; | 308 | int ret; |
298 | 309 | ||
310 | /* | ||
311 | * Now that the big kernel lock is no longer preemptable, | ||
312 | * and this is called with the BKL held, it will always | ||
313 | * fail. If preemption is already disabled, simply | ||
314 | * pass the test. When the BKL is removed, or becomes | ||
315 | * preemptible again, we will once again test this, | ||
316 | * so keep it in. | ||
317 | */ | ||
318 | if (preempt_count()) { | ||
319 | printk(KERN_CONT "can not test ... force "); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
299 | /* start the tracing */ | 323 | /* start the tracing */ |
300 | tr->ctrl = 1; | ||
301 | trace->init(tr); | 324 | trace->init(tr); |
302 | 325 | ||
303 | /* reset the max latency */ | 326 | /* reset the max latency */ |
@@ -312,27 +335,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
312 | local_irq_enable(); | 335 | local_irq_enable(); |
313 | 336 | ||
314 | /* stop the tracing. */ | 337 | /* stop the tracing. */ |
315 | tr->ctrl = 0; | 338 | tracing_stop(); |
316 | trace->ctrl_update(tr); | ||
317 | /* check both trace buffers */ | 339 | /* check both trace buffers */ |
318 | ret = trace_test_buffer(tr, NULL); | 340 | ret = trace_test_buffer(tr, NULL); |
319 | if (ret) | 341 | if (ret) { |
342 | tracing_start(); | ||
320 | goto out; | 343 | goto out; |
344 | } | ||
321 | 345 | ||
322 | ret = trace_test_buffer(&max_tr, &count); | 346 | ret = trace_test_buffer(&max_tr, &count); |
323 | if (ret) | 347 | if (ret) { |
348 | tracing_start(); | ||
324 | goto out; | 349 | goto out; |
350 | } | ||
325 | 351 | ||
326 | if (!ret && !count) { | 352 | if (!ret && !count) { |
327 | printk(KERN_CONT ".. no entries found .."); | 353 | printk(KERN_CONT ".. no entries found .."); |
328 | ret = -1; | 354 | ret = -1; |
355 | tracing_start(); | ||
329 | goto out; | 356 | goto out; |
330 | } | 357 | } |
331 | 358 | ||
332 | /* do the test by disabling interrupts first this time */ | 359 | /* do the test by disabling interrupts first this time */ |
333 | tracing_max_latency = 0; | 360 | tracing_max_latency = 0; |
334 | tr->ctrl = 1; | 361 | tracing_start(); |
335 | trace->ctrl_update(tr); | ||
336 | preempt_disable(); | 362 | preempt_disable(); |
337 | local_irq_disable(); | 363 | local_irq_disable(); |
338 | udelay(100); | 364 | udelay(100); |
@@ -341,8 +367,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
341 | local_irq_enable(); | 367 | local_irq_enable(); |
342 | 368 | ||
343 | /* stop the tracing. */ | 369 | /* stop the tracing. */ |
344 | tr->ctrl = 0; | 370 | tracing_stop(); |
345 | trace->ctrl_update(tr); | ||
346 | /* check both trace buffers */ | 371 | /* check both trace buffers */ |
347 | ret = trace_test_buffer(tr, NULL); | 372 | ret = trace_test_buffer(tr, NULL); |
348 | if (ret) | 373 | if (ret) |
@@ -358,6 +383,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
358 | 383 | ||
359 | out: | 384 | out: |
360 | trace->reset(tr); | 385 | trace->reset(tr); |
386 | tracing_start(); | ||
361 | tracing_max_latency = save_max; | 387 | tracing_max_latency = save_max; |
362 | 388 | ||
363 | return ret; | 389 | return ret; |
@@ -423,7 +449,6 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
423 | wait_for_completion(&isrt); | 449 | wait_for_completion(&isrt); |
424 | 450 | ||
425 | /* start the tracing */ | 451 | /* start the tracing */ |
426 | tr->ctrl = 1; | ||
427 | trace->init(tr); | 452 | trace->init(tr); |
428 | /* reset the max latency */ | 453 | /* reset the max latency */ |
429 | tracing_max_latency = 0; | 454 | tracing_max_latency = 0; |
@@ -448,8 +473,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
448 | msleep(100); | 473 | msleep(100); |
449 | 474 | ||
450 | /* stop the tracing. */ | 475 | /* stop the tracing. */ |
451 | tr->ctrl = 0; | 476 | tracing_stop(); |
452 | trace->ctrl_update(tr); | ||
453 | /* check both trace buffers */ | 477 | /* check both trace buffers */ |
454 | ret = trace_test_buffer(tr, NULL); | 478 | ret = trace_test_buffer(tr, NULL); |
455 | if (!ret) | 479 | if (!ret) |
@@ -457,6 +481,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
457 | 481 | ||
458 | 482 | ||
459 | trace->reset(tr); | 483 | trace->reset(tr); |
484 | tracing_start(); | ||
460 | 485 | ||
461 | tracing_max_latency = save_max; | 486 | tracing_max_latency = save_max; |
462 | 487 | ||
@@ -480,16 +505,15 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
480 | int ret; | 505 | int ret; |
481 | 506 | ||
482 | /* start the tracing */ | 507 | /* start the tracing */ |
483 | tr->ctrl = 1; | ||
484 | trace->init(tr); | 508 | trace->init(tr); |
485 | /* Sleep for a 1/10 of a second */ | 509 | /* Sleep for a 1/10 of a second */ |
486 | msleep(100); | 510 | msleep(100); |
487 | /* stop the tracing. */ | 511 | /* stop the tracing. */ |
488 | tr->ctrl = 0; | 512 | tracing_stop(); |
489 | trace->ctrl_update(tr); | ||
490 | /* check the trace buffer */ | 513 | /* check the trace buffer */ |
491 | ret = trace_test_buffer(tr, &count); | 514 | ret = trace_test_buffer(tr, &count); |
492 | trace->reset(tr); | 515 | trace->reset(tr); |
516 | tracing_start(); | ||
493 | 517 | ||
494 | if (!ret && !count) { | 518 | if (!ret && !count) { |
495 | printk(KERN_CONT ".. no entries found .."); | 519 | printk(KERN_CONT ".. no entries found .."); |
@@ -508,17 +532,38 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
508 | int ret; | 532 | int ret; |
509 | 533 | ||
510 | /* start the tracing */ | 534 | /* start the tracing */ |
511 | tr->ctrl = 1; | ||
512 | trace->init(tr); | 535 | trace->init(tr); |
513 | /* Sleep for a 1/10 of a second */ | 536 | /* Sleep for a 1/10 of a second */ |
514 | msleep(100); | 537 | msleep(100); |
515 | /* stop the tracing. */ | 538 | /* stop the tracing. */ |
516 | tr->ctrl = 0; | 539 | tracing_stop(); |
517 | trace->ctrl_update(tr); | ||
518 | /* check the trace buffer */ | 540 | /* check the trace buffer */ |
519 | ret = trace_test_buffer(tr, &count); | 541 | ret = trace_test_buffer(tr, &count); |
520 | trace->reset(tr); | 542 | trace->reset(tr); |
543 | tracing_start(); | ||
521 | 544 | ||
522 | return ret; | 545 | return ret; |
523 | } | 546 | } |
524 | #endif /* CONFIG_SYSPROF_TRACER */ | 547 | #endif /* CONFIG_SYSPROF_TRACER */ |
548 | |||
549 | #ifdef CONFIG_BRANCH_TRACER | ||
550 | int | ||
551 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | ||
552 | { | ||
553 | unsigned long count; | ||
554 | int ret; | ||
555 | |||
556 | /* start the tracing */ | ||
557 | trace->init(tr); | ||
558 | /* Sleep for a 1/10 of a second */ | ||
559 | msleep(100); | ||
560 | /* stop the tracing. */ | ||
561 | tracing_stop(); | ||
562 | /* check the trace buffer */ | ||
563 | ret = trace_test_buffer(tr, &count); | ||
564 | trace->reset(tr); | ||
565 | tracing_start(); | ||
566 | |||
567 | return ret; | ||
568 | } | ||
569 | #endif /* CONFIG_BRANCH_TRACER */ | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index be682b62fe5..d39e8b7de6a 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | resched = need_resched(); | 110 | resched = ftrace_preempt_disable(); |
111 | preempt_disable_notrace(); | ||
112 | 111 | ||
113 | cpu = raw_smp_processor_id(); | 112 | cpu = raw_smp_processor_id(); |
114 | /* no atomic needed, we only modify this variable by this cpu */ | 113 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
120 | out: | 119 | out: |
121 | per_cpu(trace_active, cpu)--; | 120 | per_cpu(trace_active, cpu)--; |
122 | /* prevent recursion in schedule */ | 121 | /* prevent recursion in schedule */ |
123 | if (resched) | 122 | ftrace_preempt_enable(resched); |
124 | preempt_enable_no_resched_notrace(); | ||
125 | else | ||
126 | preempt_enable_notrace(); | ||
127 | } | 123 | } |
128 | 124 | ||
129 | static struct ftrace_ops trace_ops __read_mostly = | 125 | static struct ftrace_ops trace_ops __read_mostly = |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 9587d3bcba5..05f753422ae 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -265,23 +265,12 @@ static void stack_trace_init(struct trace_array *tr) | |||
265 | { | 265 | { |
266 | sysprof_trace = tr; | 266 | sysprof_trace = tr; |
267 | 267 | ||
268 | if (tr->ctrl) | 268 | start_stack_trace(tr); |
269 | start_stack_trace(tr); | ||
270 | } | 269 | } |
271 | 270 | ||
272 | static void stack_trace_reset(struct trace_array *tr) | 271 | static void stack_trace_reset(struct trace_array *tr) |
273 | { | 272 | { |
274 | if (tr->ctrl) | 273 | stop_stack_trace(tr); |
275 | stop_stack_trace(tr); | ||
276 | } | ||
277 | |||
278 | static void stack_trace_ctrl_update(struct trace_array *tr) | ||
279 | { | ||
280 | /* When starting a new trace, reset the buffers */ | ||
281 | if (tr->ctrl) | ||
282 | start_stack_trace(tr); | ||
283 | else | ||
284 | stop_stack_trace(tr); | ||
285 | } | 274 | } |
286 | 275 | ||
287 | static struct tracer stack_trace __read_mostly = | 276 | static struct tracer stack_trace __read_mostly = |
@@ -289,7 +278,6 @@ static struct tracer stack_trace __read_mostly = | |||
289 | .name = "sysprof", | 278 | .name = "sysprof", |
290 | .init = stack_trace_init, | 279 | .init = stack_trace_init, |
291 | .reset = stack_trace_reset, | 280 | .reset = stack_trace_reset, |
292 | .ctrl_update = stack_trace_ctrl_update, | ||
293 | #ifdef CONFIG_FTRACE_SELFTEST | 281 | #ifdef CONFIG_FTRACE_SELFTEST |
294 | .selftest = trace_selftest_startup_sysprof, | 282 | .selftest = trace_selftest_startup_sysprof, |
295 | #endif | 283 | #endif |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index af8c8566488..e96590f17de 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(tracepoints_mutex); | |||
43 | */ | 43 | */ |
44 | #define TRACEPOINT_HASH_BITS 6 | 44 | #define TRACEPOINT_HASH_BITS 6 |
45 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | 45 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) |
46 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Note about RCU : | 49 | * Note about RCU : |
@@ -54,40 +55,43 @@ struct tracepoint_entry { | |||
54 | struct hlist_node hlist; | 55 | struct hlist_node hlist; |
55 | void **funcs; | 56 | void **funcs; |
56 | int refcount; /* Number of times armed. 0 if disarmed. */ | 57 | int refcount; /* Number of times armed. 0 if disarmed. */ |
57 | struct rcu_head rcu; | ||
58 | void *oldptr; | ||
59 | unsigned char rcu_pending:1; | ||
60 | char name[0]; | 58 | char name[0]; |
61 | }; | 59 | }; |
62 | 60 | ||
63 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | 61 | struct tp_probes { |
62 | union { | ||
63 | struct rcu_head rcu; | ||
64 | struct list_head list; | ||
65 | } u; | ||
66 | void *probes[0]; | ||
67 | }; | ||
64 | 68 | ||
65 | static void free_old_closure(struct rcu_head *head) | 69 | static inline void *allocate_probes(int count) |
66 | { | 70 | { |
67 | struct tracepoint_entry *entry = container_of(head, | 71 | struct tp_probes *p = kmalloc(count * sizeof(void *) |
68 | struct tracepoint_entry, rcu); | 72 | + sizeof(struct tp_probes), GFP_KERNEL); |
69 | kfree(entry->oldptr); | 73 | return p == NULL ? NULL : p->probes; |
70 | /* Make sure we free the data before setting the pending flag to 0 */ | ||
71 | smp_wmb(); | ||
72 | entry->rcu_pending = 0; | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old) | 76 | static void rcu_free_old_probes(struct rcu_head *head) |
76 | { | 77 | { |
77 | if (!old) | 78 | kfree(container_of(head, struct tp_probes, u.rcu)); |
78 | return; | 79 | } |
79 | entry->oldptr = old; | 80 | |
80 | entry->rcu_pending = 1; | 81 | static inline void release_probes(void *old) |
81 | /* write rcu_pending before calling the RCU callback */ | 82 | { |
82 | smp_wmb(); | 83 | if (old) { |
83 | call_rcu_sched(&entry->rcu, free_old_closure); | 84 | struct tp_probes *tp_probes = container_of(old, |
85 | struct tp_probes, probes[0]); | ||
86 | call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); | ||
87 | } | ||
84 | } | 88 | } |
85 | 89 | ||
86 | static void debug_print_probes(struct tracepoint_entry *entry) | 90 | static void debug_print_probes(struct tracepoint_entry *entry) |
87 | { | 91 | { |
88 | int i; | 92 | int i; |
89 | 93 | ||
90 | if (!tracepoint_debug) | 94 | if (!tracepoint_debug || !entry->funcs) |
91 | return; | 95 | return; |
92 | 96 | ||
93 | for (i = 0; entry->funcs[i]; i++) | 97 | for (i = 0; entry->funcs[i]; i++) |
@@ -111,12 +115,13 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) | |||
111 | return ERR_PTR(-EEXIST); | 115 | return ERR_PTR(-EEXIST); |
112 | } | 116 | } |
113 | /* + 2 : one for new probe, one for NULL func */ | 117 | /* + 2 : one for new probe, one for NULL func */ |
114 | new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL); | 118 | new = allocate_probes(nr_probes + 2); |
115 | if (new == NULL) | 119 | if (new == NULL) |
116 | return ERR_PTR(-ENOMEM); | 120 | return ERR_PTR(-ENOMEM); |
117 | if (old) | 121 | if (old) |
118 | memcpy(new, old, nr_probes * sizeof(void *)); | 122 | memcpy(new, old, nr_probes * sizeof(void *)); |
119 | new[nr_probes] = probe; | 123 | new[nr_probes] = probe; |
124 | new[nr_probes + 1] = NULL; | ||
120 | entry->refcount = nr_probes + 1; | 125 | entry->refcount = nr_probes + 1; |
121 | entry->funcs = new; | 126 | entry->funcs = new; |
122 | debug_print_probes(entry); | 127 | debug_print_probes(entry); |
@@ -132,7 +137,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) | |||
132 | old = entry->funcs; | 137 | old = entry->funcs; |
133 | 138 | ||
134 | if (!old) | 139 | if (!old) |
135 | return NULL; | 140 | return ERR_PTR(-ENOENT); |
136 | 141 | ||
137 | debug_print_probes(entry); | 142 | debug_print_probes(entry); |
138 | /* (N -> M), (N > 1, M >= 0) probes */ | 143 | /* (N -> M), (N > 1, M >= 0) probes */ |
@@ -151,13 +156,13 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) | |||
151 | int j = 0; | 156 | int j = 0; |
152 | /* N -> M, (N > 1, M > 0) */ | 157 | /* N -> M, (N > 1, M > 0) */ |
153 | /* + 1 for NULL */ | 158 | /* + 1 for NULL */ |
154 | new = kzalloc((nr_probes - nr_del + 1) | 159 | new = allocate_probes(nr_probes - nr_del + 1); |
155 | * sizeof(void *), GFP_KERNEL); | ||
156 | if (new == NULL) | 160 | if (new == NULL) |
157 | return ERR_PTR(-ENOMEM); | 161 | return ERR_PTR(-ENOMEM); |
158 | for (i = 0; old[i]; i++) | 162 | for (i = 0; old[i]; i++) |
159 | if ((probe && old[i] != probe)) | 163 | if ((probe && old[i] != probe)) |
160 | new[j++] = old[i]; | 164 | new[j++] = old[i]; |
165 | new[nr_probes - nr_del] = NULL; | ||
161 | entry->refcount = nr_probes - nr_del; | 166 | entry->refcount = nr_probes - nr_del; |
162 | entry->funcs = new; | 167 | entry->funcs = new; |
163 | } | 168 | } |
@@ -215,7 +220,6 @@ static struct tracepoint_entry *add_tracepoint(const char *name) | |||
215 | memcpy(&e->name[0], name, name_len); | 220 | memcpy(&e->name[0], name, name_len); |
216 | e->funcs = NULL; | 221 | e->funcs = NULL; |
217 | e->refcount = 0; | 222 | e->refcount = 0; |
218 | e->rcu_pending = 0; | ||
219 | hlist_add_head(&e->hlist, head); | 223 | hlist_add_head(&e->hlist, head); |
220 | return e; | 224 | return e; |
221 | } | 225 | } |
@@ -224,32 +228,10 @@ static struct tracepoint_entry *add_tracepoint(const char *name) | |||
224 | * Remove the tracepoint from the tracepoint hash table. Must be called with | 228 | * Remove the tracepoint from the tracepoint hash table. Must be called with |
225 | * mutex_lock held. | 229 | * mutex_lock held. |
226 | */ | 230 | */ |
227 | static int remove_tracepoint(const char *name) | 231 | static inline void remove_tracepoint(struct tracepoint_entry *e) |
228 | { | 232 | { |
229 | struct hlist_head *head; | ||
230 | struct hlist_node *node; | ||
231 | struct tracepoint_entry *e; | ||
232 | int found = 0; | ||
233 | size_t len = strlen(name) + 1; | ||
234 | u32 hash = jhash(name, len-1, 0); | ||
235 | |||
236 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
237 | hlist_for_each_entry(e, node, head, hlist) { | ||
238 | if (!strcmp(name, e->name)) { | ||
239 | found = 1; | ||
240 | break; | ||
241 | } | ||
242 | } | ||
243 | if (!found) | ||
244 | return -ENOENT; | ||
245 | if (e->refcount) | ||
246 | return -EBUSY; | ||
247 | hlist_del(&e->hlist); | 233 | hlist_del(&e->hlist); |
248 | /* Make sure the call_rcu_sched has been executed */ | ||
249 | if (e->rcu_pending) | ||
250 | rcu_barrier_sched(); | ||
251 | kfree(e); | 234 | kfree(e); |
252 | return 0; | ||
253 | } | 235 | } |
254 | 236 | ||
255 | /* | 237 | /* |
@@ -320,6 +302,23 @@ static void tracepoint_update_probes(void) | |||
320 | module_update_tracepoints(); | 302 | module_update_tracepoints(); |
321 | } | 303 | } |
322 | 304 | ||
305 | static void *tracepoint_add_probe(const char *name, void *probe) | ||
306 | { | ||
307 | struct tracepoint_entry *entry; | ||
308 | void *old; | ||
309 | |||
310 | entry = get_tracepoint(name); | ||
311 | if (!entry) { | ||
312 | entry = add_tracepoint(name); | ||
313 | if (IS_ERR(entry)) | ||
314 | return entry; | ||
315 | } | ||
316 | old = tracepoint_entry_add_probe(entry, probe); | ||
317 | if (IS_ERR(old) && !entry->refcount) | ||
318 | remove_tracepoint(entry); | ||
319 | return old; | ||
320 | } | ||
321 | |||
323 | /** | 322 | /** |
324 | * tracepoint_probe_register - Connect a probe to a tracepoint | 323 | * tracepoint_probe_register - Connect a probe to a tracepoint |
325 | * @name: tracepoint name | 324 | * @name: tracepoint name |
@@ -330,44 +329,36 @@ static void tracepoint_update_probes(void) | |||
330 | */ | 329 | */ |
331 | int tracepoint_probe_register(const char *name, void *probe) | 330 | int tracepoint_probe_register(const char *name, void *probe) |
332 | { | 331 | { |
333 | struct tracepoint_entry *entry; | ||
334 | int ret = 0; | ||
335 | void *old; | 332 | void *old; |
336 | 333 | ||
337 | mutex_lock(&tracepoints_mutex); | 334 | mutex_lock(&tracepoints_mutex); |
338 | entry = get_tracepoint(name); | 335 | old = tracepoint_add_probe(name, probe); |
339 | if (!entry) { | ||
340 | entry = add_tracepoint(name); | ||
341 | if (IS_ERR(entry)) { | ||
342 | ret = PTR_ERR(entry); | ||
343 | goto end; | ||
344 | } | ||
345 | } | ||
346 | /* | ||
347 | * If we detect that a call_rcu_sched is pending for this tracepoint, | ||
348 | * make sure it's executed now. | ||
349 | */ | ||
350 | if (entry->rcu_pending) | ||
351 | rcu_barrier_sched(); | ||
352 | old = tracepoint_entry_add_probe(entry, probe); | ||
353 | if (IS_ERR(old)) { | ||
354 | ret = PTR_ERR(old); | ||
355 | goto end; | ||
356 | } | ||
357 | mutex_unlock(&tracepoints_mutex); | 336 | mutex_unlock(&tracepoints_mutex); |
337 | if (IS_ERR(old)) | ||
338 | return PTR_ERR(old); | ||
339 | |||
358 | tracepoint_update_probes(); /* may update entry */ | 340 | tracepoint_update_probes(); /* may update entry */ |
359 | mutex_lock(&tracepoints_mutex); | 341 | release_probes(old); |
360 | entry = get_tracepoint(name); | 342 | return 0; |
361 | WARN_ON(!entry); | ||
362 | if (entry->rcu_pending) | ||
363 | rcu_barrier_sched(); | ||
364 | tracepoint_entry_free_old(entry, old); | ||
365 | end: | ||
366 | mutex_unlock(&tracepoints_mutex); | ||
367 | return ret; | ||
368 | } | 343 | } |
369 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 344 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
370 | 345 | ||
346 | static void *tracepoint_remove_probe(const char *name, void *probe) | ||
347 | { | ||
348 | struct tracepoint_entry *entry; | ||
349 | void *old; | ||
350 | |||
351 | entry = get_tracepoint(name); | ||
352 | if (!entry) | ||
353 | return ERR_PTR(-ENOENT); | ||
354 | old = tracepoint_entry_remove_probe(entry, probe); | ||
355 | if (IS_ERR(old)) | ||
356 | return old; | ||
357 | if (!entry->refcount) | ||
358 | remove_tracepoint(entry); | ||
359 | return old; | ||
360 | } | ||
361 | |||
371 | /** | 362 | /** |
372 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 363 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
373 | * @name: tracepoint name | 364 | * @name: tracepoint name |
@@ -380,38 +371,104 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register); | |||
380 | */ | 371 | */ |
381 | int tracepoint_probe_unregister(const char *name, void *probe) | 372 | int tracepoint_probe_unregister(const char *name, void *probe) |
382 | { | 373 | { |
383 | struct tracepoint_entry *entry; | ||
384 | void *old; | 374 | void *old; |
385 | int ret = -ENOENT; | ||
386 | 375 | ||
387 | mutex_lock(&tracepoints_mutex); | 376 | mutex_lock(&tracepoints_mutex); |
388 | entry = get_tracepoint(name); | 377 | old = tracepoint_remove_probe(name, probe); |
389 | if (!entry) | ||
390 | goto end; | ||
391 | if (entry->rcu_pending) | ||
392 | rcu_barrier_sched(); | ||
393 | old = tracepoint_entry_remove_probe(entry, probe); | ||
394 | if (!old) { | ||
395 | printk(KERN_WARNING "Warning: Trying to unregister a probe" | ||
396 | "that doesn't exist\n"); | ||
397 | goto end; | ||
398 | } | ||
399 | mutex_unlock(&tracepoints_mutex); | 378 | mutex_unlock(&tracepoints_mutex); |
379 | if (IS_ERR(old)) | ||
380 | return PTR_ERR(old); | ||
381 | |||
400 | tracepoint_update_probes(); /* may update entry */ | 382 | tracepoint_update_probes(); /* may update entry */ |
383 | release_probes(old); | ||
384 | return 0; | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | ||
387 | |||
388 | static LIST_HEAD(old_probes); | ||
389 | static int need_update; | ||
390 | |||
391 | static void tracepoint_add_old_probes(void *old) | ||
392 | { | ||
393 | need_update = 1; | ||
394 | if (old) { | ||
395 | struct tp_probes *tp_probes = container_of(old, | ||
396 | struct tp_probes, probes[0]); | ||
397 | list_add(&tp_probes->u.list, &old_probes); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * tracepoint_probe_register_noupdate - register a probe but not connect | ||
403 | * @name: tracepoint name | ||
404 | * @probe: probe handler | ||
405 | * | ||
406 | * caller must call tracepoint_probe_update_all() | ||
407 | */ | ||
408 | int tracepoint_probe_register_noupdate(const char *name, void *probe) | ||
409 | { | ||
410 | void *old; | ||
411 | |||
401 | mutex_lock(&tracepoints_mutex); | 412 | mutex_lock(&tracepoints_mutex); |
402 | entry = get_tracepoint(name); | 413 | old = tracepoint_add_probe(name, probe); |
403 | if (!entry) | 414 | if (IS_ERR(old)) { |
404 | goto end; | 415 | mutex_unlock(&tracepoints_mutex); |
405 | if (entry->rcu_pending) | 416 | return PTR_ERR(old); |
406 | rcu_barrier_sched(); | 417 | } |
407 | tracepoint_entry_free_old(entry, old); | 418 | tracepoint_add_old_probes(old); |
408 | remove_tracepoint(name); /* Ignore busy error message */ | ||
409 | ret = 0; | ||
410 | end: | ||
411 | mutex_unlock(&tracepoints_mutex); | 419 | mutex_unlock(&tracepoints_mutex); |
412 | return ret; | 420 | return 0; |
413 | } | 421 | } |
414 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 422 | EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); |
423 | |||
424 | /** | ||
425 | * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect | ||
426 | * @name: tracepoint name | ||
427 | * @probe: probe function pointer | ||
428 | * | ||
429 | * caller must call tracepoint_probe_update_all() | ||
430 | */ | ||
431 | int tracepoint_probe_unregister_noupdate(const char *name, void *probe) | ||
432 | { | ||
433 | void *old; | ||
434 | |||
435 | mutex_lock(&tracepoints_mutex); | ||
436 | old = tracepoint_remove_probe(name, probe); | ||
437 | if (IS_ERR(old)) { | ||
438 | mutex_unlock(&tracepoints_mutex); | ||
439 | return PTR_ERR(old); | ||
440 | } | ||
441 | tracepoint_add_old_probes(old); | ||
442 | mutex_unlock(&tracepoints_mutex); | ||
443 | return 0; | ||
444 | } | ||
445 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); | ||
446 | |||
447 | /** | ||
448 | * tracepoint_probe_update_all - update tracepoints | ||
449 | */ | ||
450 | void tracepoint_probe_update_all(void) | ||
451 | { | ||
452 | LIST_HEAD(release_probes); | ||
453 | struct tp_probes *pos, *next; | ||
454 | |||
455 | mutex_lock(&tracepoints_mutex); | ||
456 | if (!need_update) { | ||
457 | mutex_unlock(&tracepoints_mutex); | ||
458 | return; | ||
459 | } | ||
460 | if (!list_empty(&old_probes)) | ||
461 | list_replace_init(&old_probes, &release_probes); | ||
462 | need_update = 0; | ||
463 | mutex_unlock(&tracepoints_mutex); | ||
464 | |||
465 | tracepoint_update_probes(); | ||
466 | list_for_each_entry_safe(pos, next, &release_probes, u.list) { | ||
467 | list_del(&pos->u.list); | ||
468 | call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); | ||
469 | } | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | ||
415 | 472 | ||
416 | /** | 473 | /** |
417 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. | 474 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. |