diff options
Diffstat (limited to 'kernel/trace/trace_event_profile.c')
| -rw-r--r-- | kernel/trace/trace_event_profile.c | 43 |
1 files changed, 20 insertions, 23 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 8d5c171cc998..d9c60f80aa0d 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -8,17 +8,14 @@ | |||
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include "trace.h" | 9 | #include "trace.h" |
| 10 | 10 | ||
| 11 | /* | ||
| 12 | * We can't use a size but a type in alloc_percpu() | ||
| 13 | * So let's create a dummy type that matches the desired size | ||
| 14 | */ | ||
| 15 | typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; | ||
| 16 | 11 | ||
| 17 | char *trace_profile_buf; | 12 | char *perf_trace_buf; |
| 18 | EXPORT_SYMBOL_GPL(trace_profile_buf); | 13 | EXPORT_SYMBOL_GPL(perf_trace_buf); |
| 14 | |||
| 15 | char *perf_trace_buf_nmi; | ||
| 16 | EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); | ||
| 19 | 17 | ||
| 20 | char *trace_profile_buf_nmi; | 18 | typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; |
| 21 | EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); | ||
| 22 | 19 | ||
| 23 | /* Count the events in use (per event id, not per instance) */ | 20 | /* Count the events in use (per event id, not per instance) */ |
| 24 | static int total_profile_count; | 21 | static int total_profile_count; |
| @@ -32,20 +29,20 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
| 32 | return 0; | 29 | return 0; |
| 33 | 30 | ||
| 34 | if (!total_profile_count) { | 31 | if (!total_profile_count) { |
| 35 | buf = (char *)alloc_percpu(profile_buf_t); | 32 | buf = (char *)alloc_percpu(perf_trace_t); |
| 36 | if (!buf) | 33 | if (!buf) |
| 37 | goto fail_buf; | 34 | goto fail_buf; |
| 38 | 35 | ||
| 39 | rcu_assign_pointer(trace_profile_buf, buf); | 36 | rcu_assign_pointer(perf_trace_buf, buf); |
| 40 | 37 | ||
| 41 | buf = (char *)alloc_percpu(profile_buf_t); | 38 | buf = (char *)alloc_percpu(perf_trace_t); |
| 42 | if (!buf) | 39 | if (!buf) |
| 43 | goto fail_buf_nmi; | 40 | goto fail_buf_nmi; |
| 44 | 41 | ||
| 45 | rcu_assign_pointer(trace_profile_buf_nmi, buf); | 42 | rcu_assign_pointer(perf_trace_buf_nmi, buf); |
| 46 | } | 43 | } |
| 47 | 44 | ||
| 48 | ret = event->profile_enable(); | 45 | ret = event->profile_enable(event); |
| 49 | if (!ret) { | 46 | if (!ret) { |
| 50 | total_profile_count++; | 47 | total_profile_count++; |
| 51 | return 0; | 48 | return 0; |
| @@ -53,10 +50,10 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
| 53 | 50 | ||
| 54 | fail_buf_nmi: | 51 | fail_buf_nmi: |
| 55 | if (!total_profile_count) { | 52 | if (!total_profile_count) { |
| 56 | free_percpu(trace_profile_buf_nmi); | 53 | free_percpu(perf_trace_buf_nmi); |
| 57 | free_percpu(trace_profile_buf); | 54 | free_percpu(perf_trace_buf); |
| 58 | trace_profile_buf_nmi = NULL; | 55 | perf_trace_buf_nmi = NULL; |
| 59 | trace_profile_buf = NULL; | 56 | perf_trace_buf = NULL; |
| 60 | } | 57 | } |
| 61 | fail_buf: | 58 | fail_buf: |
| 62 | atomic_dec(&event->profile_count); | 59 | atomic_dec(&event->profile_count); |
| @@ -89,14 +86,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
| 89 | if (!atomic_add_negative(-1, &event->profile_count)) | 86 | if (!atomic_add_negative(-1, &event->profile_count)) |
| 90 | return; | 87 | return; |
| 91 | 88 | ||
| 92 | event->profile_disable(); | 89 | event->profile_disable(event); |
| 93 | 90 | ||
| 94 | if (!--total_profile_count) { | 91 | if (!--total_profile_count) { |
| 95 | buf = trace_profile_buf; | 92 | buf = perf_trace_buf; |
| 96 | rcu_assign_pointer(trace_profile_buf, NULL); | 93 | rcu_assign_pointer(perf_trace_buf, NULL); |
| 97 | 94 | ||
| 98 | nmi_buf = trace_profile_buf_nmi; | 95 | nmi_buf = perf_trace_buf_nmi; |
| 99 | rcu_assign_pointer(trace_profile_buf_nmi, NULL); | 96 | rcu_assign_pointer(perf_trace_buf_nmi, NULL); |
| 100 | 97 | ||
| 101 | /* | 98 | /* |
| 102 | * Ensure every events in profiling have finished before | 99 | * Ensure every events in profiling have finished before |
