aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_event_profile.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_event_profile.c')
-rw-r--r--kernel/trace/trace_event_profile.c92
1 files changed, 89 insertions, 3 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 11ba5bb4ed0a..8d5c171cc998 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -5,8 +5,65 @@
5 * 5 *
6 */ 6 */
7 7
8#include <linux/module.h>
8#include "trace.h" 9#include "trace.h"
9 10
11/*
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
14 */
15typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17char *trace_profile_buf;
18EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20char *trace_profile_buf_nmi;
21EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
22
23/* Count the events in use (per event id, not per instance) */
24static int total_profile_count;
25
26static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27{
28 char *buf;
29 int ret = -ENOMEM;
30
31 if (atomic_inc_return(&event->profile_count))
32 return 0;
33
34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf)
37 goto fail_buf;
38
39 rcu_assign_pointer(trace_profile_buf, buf);
40
41 buf = (char *)alloc_percpu(profile_buf_t);
42 if (!buf)
43 goto fail_buf_nmi;
44
45 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46 }
47
48 ret = event->profile_enable();
49 if (!ret) {
50 total_profile_count++;
51 return 0;
52 }
53
54fail_buf_nmi:
55 if (!total_profile_count) {
56 free_percpu(trace_profile_buf_nmi);
57 free_percpu(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
61fail_buf:
62 atomic_dec(&event->profile_count);
63
64 return ret;
65}
66
10int ftrace_profile_enable(int event_id) 67int ftrace_profile_enable(int event_id)
11{ 68{
12 struct ftrace_event_call *event; 69 struct ftrace_event_call *event;
@@ -14,8 +71,9 @@ int ftrace_profile_enable(int event_id)
14 71
15 mutex_lock(&event_mutex); 72 mutex_lock(&event_mutex);
16 list_for_each_entry(event, &ftrace_events, list) { 73 list_for_each_entry(event, &ftrace_events, list) {
17 if (event->id == event_id && event->profile_enable) { 74 if (event->id == event_id && event->profile_enable &&
18 ret = event->profile_enable(event); 75 try_module_get(event->mod)) {
76 ret = ftrace_profile_enable_event(event);
19 break; 77 break;
20 } 78 }
21 } 79 }
@@ -24,6 +82,33 @@ int ftrace_profile_enable(int event_id)
24 return ret; 82 return ret;
25} 83}
26 84
85static void ftrace_profile_disable_event(struct ftrace_event_call *event)
86{
87 char *buf, *nmi_buf;
88
89 if (!atomic_add_negative(-1, &event->profile_count))
90 return;
91
92 event->profile_disable();
93
94 if (!--total_profile_count) {
95 buf = trace_profile_buf;
96 rcu_assign_pointer(trace_profile_buf, NULL);
97
98 nmi_buf = trace_profile_buf_nmi;
99 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
100
101 /*
102 * Ensure every events in profiling have finished before
103 * releasing the buffers
104 */
105 synchronize_sched();
106
107 free_percpu(buf);
108 free_percpu(nmi_buf);
109 }
110}
111
27void ftrace_profile_disable(int event_id) 112void ftrace_profile_disable(int event_id)
28{ 113{
29 struct ftrace_event_call *event; 114 struct ftrace_event_call *event;
@@ -31,7 +116,8 @@ void ftrace_profile_disable(int event_id)
31 mutex_lock(&event_mutex); 116 mutex_lock(&event_mutex);
32 list_for_each_entry(event, &ftrace_events, list) { 117 list_for_each_entry(event, &ftrace_events, list) {
33 if (event->id == event_id) { 118 if (event->id == event_id) {
34 event->profile_disable(event); 119 ftrace_profile_disable_event(event);
120 module_put(event->mod);
35 break; 121 break;
36 } 122 }
37 } 123 }