aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-03-04 23:35:37 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-03-10 08:47:18 -0500
commit97d5a22005f38057b4bc0d95f81cd26510268794 (patch)
treeb981789b1cec8ac36527e52204e407b32efa0ea6 /kernel
parentc530665c31c0140b74ca7689e7f836177796e5bd (diff)
perf: Drop the obsolete profile naming for trace events
Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c4
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c)44
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_kprobe.c28
-rw-r--r--kernel/trace/trace_syscalls.c72
6 files changed, 76 insertions, 76 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 45b4b6e55891..c502b18594cc 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event,
4347 4347
4348static void tp_perf_event_destroy(struct perf_event *event) 4348static void tp_perf_event_destroy(struct perf_event *event)
4349{ 4349{
4350 ftrace_profile_disable(event->attr.config); 4350 perf_trace_disable(event->attr.config);
4351} 4351}
4352 4352
4353static const struct pmu *tp_perf_event_init(struct perf_event *event) 4353static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4361 !capable(CAP_SYS_ADMIN)) 4361 !capable(CAP_SYS_ADMIN))
4362 return ERR_PTR(-EPERM); 4362 return ERR_PTR(-EPERM);
4363 4363
4364 if (ftrace_profile_enable(event->attr.config)) 4364 if (perf_trace_enable(event->attr.config))
4365 return NULL; 4365 return NULL;
4366 4366
4367 event->destroy = tp_perf_event_destroy; 4367 event->destroy = tp_perf_event_destroy;
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d00c6fe23f54..78edc6490038 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54ifeq ($(CONFIG_PERF_EVENTS),y) 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o 55obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
56endif 56endif
57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c
index e66d21e15a0f..f315b12a41d8 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * trace event based perf counter profiling 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
@@ -14,20 +14,20 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
14static char *perf_trace_buf; 14static char *perf_trace_buf;
15static char *perf_trace_buf_nmi; 15static char *perf_trace_buf_nmi;
16 16
17typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 17typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
18 18
19/* Count the events in use (per event id, not per instance) */ 19/* Count the events in use (per event id, not per instance) */
20static int total_profile_count; 20static int total_ref_count;
21 21
22static int ftrace_profile_enable_event(struct ftrace_event_call *event) 22static int perf_trace_event_enable(struct ftrace_event_call *event)
23{ 23{
24 char *buf; 24 char *buf;
25 int ret = -ENOMEM; 25 int ret = -ENOMEM;
26 26
27 if (event->profile_count++ > 0) 27 if (event->perf_refcount++ > 0)
28 return 0; 28 return 0;
29 29
30 if (!total_profile_count) { 30 if (!total_ref_count) {
31 buf = (char *)alloc_percpu(perf_trace_t); 31 buf = (char *)alloc_percpu(perf_trace_t);
32 if (!buf) 32 if (!buf)
33 goto fail_buf; 33 goto fail_buf;
@@ -41,35 +41,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
41 rcu_assign_pointer(perf_trace_buf_nmi, buf); 41 rcu_assign_pointer(perf_trace_buf_nmi, buf);
42 } 42 }
43 43
44 ret = event->profile_enable(event); 44 ret = event->perf_event_enable(event);
45 if (!ret) { 45 if (!ret) {
46 total_profile_count++; 46 total_ref_count++;
47 return 0; 47 return 0;
48 } 48 }
49 49
50fail_buf_nmi: 50fail_buf_nmi:
51 if (!total_profile_count) { 51 if (!total_ref_count) {
52 free_percpu(perf_trace_buf_nmi); 52 free_percpu(perf_trace_buf_nmi);
53 free_percpu(perf_trace_buf); 53 free_percpu(perf_trace_buf);
54 perf_trace_buf_nmi = NULL; 54 perf_trace_buf_nmi = NULL;
55 perf_trace_buf = NULL; 55 perf_trace_buf = NULL;
56 } 56 }
57fail_buf: 57fail_buf:
58 event->profile_count--; 58 event->perf_refcount--;
59 59
60 return ret; 60 return ret;
61} 61}
62 62
63int ftrace_profile_enable(int event_id) 63int perf_trace_enable(int event_id)
64{ 64{
65 struct ftrace_event_call *event; 65 struct ftrace_event_call *event;
66 int ret = -EINVAL; 66 int ret = -EINVAL;
67 67
68 mutex_lock(&event_mutex); 68 mutex_lock(&event_mutex);
69 list_for_each_entry(event, &ftrace_events, list) { 69 list_for_each_entry(event, &ftrace_events, list) {
70 if (event->id == event_id && event->profile_enable && 70 if (event->id == event_id && event->perf_event_enable &&
71 try_module_get(event->mod)) { 71 try_module_get(event->mod)) {
72 ret = ftrace_profile_enable_event(event); 72 ret = perf_trace_event_enable(event);
73 break; 73 break;
74 } 74 }
75 } 75 }
@@ -78,16 +78,16 @@ int ftrace_profile_enable(int event_id)
78 return ret; 78 return ret;
79} 79}
80 80
81static void ftrace_profile_disable_event(struct ftrace_event_call *event) 81static void perf_trace_event_disable(struct ftrace_event_call *event)
82{ 82{
83 char *buf, *nmi_buf; 83 char *buf, *nmi_buf;
84 84
85 if (--event->profile_count > 0) 85 if (--event->perf_refcount > 0)
86 return; 86 return;
87 87
88 event->profile_disable(event); 88 event->perf_event_disable(event);
89 89
90 if (!--total_profile_count) { 90 if (!--total_ref_count) {
91 buf = perf_trace_buf; 91 buf = perf_trace_buf;
92 rcu_assign_pointer(perf_trace_buf, NULL); 92 rcu_assign_pointer(perf_trace_buf, NULL);
93 93
@@ -105,14 +105,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
105 } 105 }
106} 106}
107 107
108void ftrace_profile_disable(int event_id) 108void perf_trace_disable(int event_id)
109{ 109{
110 struct ftrace_event_call *event; 110 struct ftrace_event_call *event;
111 111
112 mutex_lock(&event_mutex); 112 mutex_lock(&event_mutex);
113 list_for_each_entry(event, &ftrace_events, list) { 113 list_for_each_entry(event, &ftrace_events, list) {
114 if (event->id == event_id) { 114 if (event->id == event_id) {
115 ftrace_profile_disable_event(event); 115 perf_trace_event_disable(event);
116 module_put(event->mod); 116 module_put(event->mod);
117 break; 117 break;
118 } 118 }
@@ -120,8 +120,8 @@ void ftrace_profile_disable(int event_id)
120 mutex_unlock(&event_mutex); 120 mutex_unlock(&event_mutex);
121} 121}
122 122
123__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, 123__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
124 int *rctxp, unsigned long *irq_flags) 124 int *rctxp, unsigned long *irq_flags)
125{ 125{
126 struct trace_entry *entry; 126 struct trace_entry *entry;
127 char *trace_buf, *raw_data; 127 char *trace_buf, *raw_data;
@@ -162,4 +162,4 @@ err_recursion:
162 local_irq_restore(*irq_flags); 162 local_irq_restore(*irq_flags);
163 return NULL; 163 return NULL;
164} 164}
165EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); 165EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 3f972ad98d04..beab8bf2f310 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
938 trace_create_file("enable", 0644, call->dir, call, 938 trace_create_file("enable", 0644, call->dir, call,
939 enable); 939 enable);
940 940
941 if (call->id && call->profile_enable) 941 if (call->id && call->perf_event_enable)
942 trace_create_file("id", 0444, call->dir, call, 942 trace_create_file("id", 0444, call->dir, call,
943 id); 943 id);
944 944
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index f7a20a8bfb31..1251e367bae9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
1214#ifdef CONFIG_PERF_EVENTS 1214#ifdef CONFIG_PERF_EVENTS
1215 1215
1216/* Kprobe profile handler */ 1216/* Kprobe profile handler */
1217static __kprobes void kprobe_profile_func(struct kprobe *kp, 1217static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218 struct pt_regs *regs) 1218 struct pt_regs *regs)
1219{ 1219{
1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229 size -= sizeof(u32); 1229 size -= sizeof(u32);
1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1230 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231 "profile buffer not large enough")) 1231 "profile buffer not large enough"))
1232 return; 1232 return;
1233 1233
1234 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1234 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235 if (!entry) 1235 if (!entry)
1236 return; 1236 return;
1237 1237
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1240 for (i = 0; i < tp->nr_args; i++) 1240 for (i = 0; i < tp->nr_args; i++)
1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242 1242
1243 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); 1243 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244} 1244}
1245 1245
1246/* Kretprobe profile handler */ 1246/* Kretprobe profile handler */
1247static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, 1247static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248 struct pt_regs *regs) 1248 struct pt_regs *regs)
1249{ 1249{
1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 size -= sizeof(u32); 1259 size -= sizeof(u32);
1260 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1260 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261 "profile buffer not large enough")) 1261 "profile buffer not large enough"))
1262 return; 1262 return;
1263 1263
1264 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1264 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265 if (!entry) 1265 if (!entry)
1266 return; 1266 return;
1267 1267
@@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1271 for (i = 0; i < tp->nr_args; i++) 1271 for (i = 0; i < tp->nr_args; i++)
1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273 1273
1274 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, 1274 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275 irq_flags, regs); 1275 irq_flags, regs);
1276} 1276}
1277 1277
1278static int probe_profile_enable(struct ftrace_event_call *call) 1278static int probe_perf_enable(struct ftrace_event_call *call)
1279{ 1279{
1280 struct trace_probe *tp = (struct trace_probe *)call->data; 1280 struct trace_probe *tp = (struct trace_probe *)call->data;
1281 1281
@@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
1287 return enable_kprobe(&tp->rp.kp); 1287 return enable_kprobe(&tp->rp.kp);
1288} 1288}
1289 1289
1290static void probe_profile_disable(struct ftrace_event_call *call) 1290static void probe_perf_disable(struct ftrace_event_call *call)
1291{ 1291{
1292 struct trace_probe *tp = (struct trace_probe *)call->data; 1292 struct trace_probe *tp = (struct trace_probe *)call->data;
1293 1293
@@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1312 kprobe_trace_func(kp, regs); 1312 kprobe_trace_func(kp, regs);
1313#ifdef CONFIG_PERF_EVENTS 1313#ifdef CONFIG_PERF_EVENTS
1314 if (tp->flags & TP_FLAG_PROFILE) 1314 if (tp->flags & TP_FLAG_PROFILE)
1315 kprobe_profile_func(kp, regs); 1315 kprobe_perf_func(kp, regs);
1316#endif 1316#endif
1317 return 0; /* We don't tweek kernel, so just return 0 */ 1317 return 0; /* We don't tweek kernel, so just return 0 */
1318} 1318}
@@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1326 kretprobe_trace_func(ri, regs); 1326 kretprobe_trace_func(ri, regs);
1327#ifdef CONFIG_PERF_EVENTS 1327#ifdef CONFIG_PERF_EVENTS
1328 if (tp->flags & TP_FLAG_PROFILE) 1328 if (tp->flags & TP_FLAG_PROFILE)
1329 kretprobe_profile_func(ri, regs); 1329 kretprobe_perf_func(ri, regs);
1330#endif 1330#endif
1331 return 0; /* We don't tweek kernel, so just return 0 */ 1331 return 0; /* We don't tweek kernel, so just return 0 */
1332} 1332}
@@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
1359 call->unregfunc = probe_event_disable; 1359 call->unregfunc = probe_event_disable;
1360 1360
1361#ifdef CONFIG_PERF_EVENTS 1361#ifdef CONFIG_PERF_EVENTS
1362 call->profile_enable = probe_profile_enable; 1362 call->perf_event_enable = probe_perf_enable;
1363 call->profile_disable = probe_profile_disable; 1363 call->perf_event_disable = probe_perf_disable;
1364#endif 1364#endif
1365 call->data = tp; 1365 call->data = tp;
1366 ret = trace_add_event_call(call); 1366 ret = trace_add_event_call(call);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7e6e84fb7b6c..33c2a5b769dc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
428 428
429#ifdef CONFIG_PERF_EVENTS 429#ifdef CONFIG_PERF_EVENTS
430 430
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 431static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
433static int sys_prof_refcount_enter; 433static int sys_perf_refcount_enter;
434static int sys_prof_refcount_exit; 434static int sys_perf_refcount_exit;
435 435
436static void prof_syscall_enter(struct pt_regs *regs, long id) 436static void perf_syscall_enter(struct pt_regs *regs, long id)
437{ 437{
438 struct syscall_metadata *sys_data; 438 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 439 struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
443 int size; 443 int size;
444 444
445 syscall_nr = syscall_get_nr(current, regs); 445 syscall_nr = syscall_get_nr(current, regs);
446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 446 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 return; 447 return;
448 448
449 sys_data = syscall_nr_to_meta(syscall_nr); 449 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
455 size = ALIGN(size + sizeof(u32), sizeof(u64)); 455 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 size -= sizeof(u32); 456 size -= sizeof(u32);
457 457
458 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 458 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
459 "profile buffer not large enough")) 459 "perf buffer not large enough"))
460 return; 460 return;
461 461
462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, 462 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 sys_data->enter_event->id, &rctx, &flags); 463 sys_data->enter_event->id, &rctx, &flags);
464 if (!rec) 464 if (!rec)
465 return; 465 return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
467 rec->nr = syscall_nr; 467 rec->nr = syscall_nr;
468 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 (unsigned long *)&rec->args); 469 (unsigned long *)&rec->args);
470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); 470 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471} 471}
472 472
473int prof_sysenter_enable(struct ftrace_event_call *call) 473int perf_sysenter_enable(struct ftrace_event_call *call)
474{ 474{
475 int ret = 0; 475 int ret = 0;
476 int num; 476 int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
478 num = ((struct syscall_metadata *)call->data)->syscall_nr; 478 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 479
480 mutex_lock(&syscall_trace_lock); 480 mutex_lock(&syscall_trace_lock);
481 if (!sys_prof_refcount_enter) 481 if (!sys_perf_refcount_enter)
482 ret = register_trace_sys_enter(prof_syscall_enter); 482 ret = register_trace_sys_enter(perf_syscall_enter);
483 if (ret) { 483 if (ret) {
484 pr_info("event trace: Could not activate" 484 pr_info("event trace: Could not activate"
485 "syscall entry trace point"); 485 "syscall entry trace point");
486 } else { 486 } else {
487 set_bit(num, enabled_prof_enter_syscalls); 487 set_bit(num, enabled_perf_enter_syscalls);
488 sys_prof_refcount_enter++; 488 sys_perf_refcount_enter++;
489 } 489 }
490 mutex_unlock(&syscall_trace_lock); 490 mutex_unlock(&syscall_trace_lock);
491 return ret; 491 return ret;
492} 492}
493 493
494void prof_sysenter_disable(struct ftrace_event_call *call) 494void perf_sysenter_disable(struct ftrace_event_call *call)
495{ 495{
496 int num; 496 int num;
497 497
498 num = ((struct syscall_metadata *)call->data)->syscall_nr; 498 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 499
500 mutex_lock(&syscall_trace_lock); 500 mutex_lock(&syscall_trace_lock);
501 sys_prof_refcount_enter--; 501 sys_perf_refcount_enter--;
502 clear_bit(num, enabled_prof_enter_syscalls); 502 clear_bit(num, enabled_perf_enter_syscalls);
503 if (!sys_prof_refcount_enter) 503 if (!sys_perf_refcount_enter)
504 unregister_trace_sys_enter(prof_syscall_enter); 504 unregister_trace_sys_enter(perf_syscall_enter);
505 mutex_unlock(&syscall_trace_lock); 505 mutex_unlock(&syscall_trace_lock);
506} 506}
507 507
508static void prof_syscall_exit(struct pt_regs *regs, long ret) 508static void perf_syscall_exit(struct pt_regs *regs, long ret)
509{ 509{
510 struct syscall_metadata *sys_data; 510 struct syscall_metadata *sys_data;
511 struct syscall_trace_exit *rec; 511 struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
515 int size; 515 int size;
516 516
517 syscall_nr = syscall_get_nr(current, regs); 517 syscall_nr = syscall_get_nr(current, regs);
518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 518 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 return; 519 return;
520 520
521 sys_data = syscall_nr_to_meta(syscall_nr); 521 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
530 * Impossible, but be paranoid with the future 530 * Impossible, but be paranoid with the future
531 * How to put this check outside runtime? 531 * How to put this check outside runtime?
532 */ 532 */
533 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 533 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
534 "exit event has grown above profile buffer size")) 534 "exit event has grown above perf buffer size"))
535 return; 535 return;
536 536
537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, 537 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 sys_data->exit_event->id, &rctx, &flags); 538 sys_data->exit_event->id, &rctx, &flags);
539 if (!rec) 539 if (!rec)
540 return; 540 return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
542 rec->nr = syscall_nr; 542 rec->nr = syscall_nr;
543 rec->ret = syscall_get_return_value(current, regs); 543 rec->ret = syscall_get_return_value(current, regs);
544 544
545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); 545 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546} 546}
547 547
548int prof_sysexit_enable(struct ftrace_event_call *call) 548int perf_sysexit_enable(struct ftrace_event_call *call)
549{ 549{
550 int ret = 0; 550 int ret = 0;
551 int num; 551 int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
553 num = ((struct syscall_metadata *)call->data)->syscall_nr; 553 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 554
555 mutex_lock(&syscall_trace_lock); 555 mutex_lock(&syscall_trace_lock);
556 if (!sys_prof_refcount_exit) 556 if (!sys_perf_refcount_exit)
557 ret = register_trace_sys_exit(prof_syscall_exit); 557 ret = register_trace_sys_exit(perf_syscall_exit);
558 if (ret) { 558 if (ret) {
559 pr_info("event trace: Could not activate" 559 pr_info("event trace: Could not activate"
560 "syscall exit trace point"); 560 "syscall exit trace point");
561 } else { 561 } else {
562 set_bit(num, enabled_prof_exit_syscalls); 562 set_bit(num, enabled_perf_exit_syscalls);
563 sys_prof_refcount_exit++; 563 sys_perf_refcount_exit++;
564 } 564 }
565 mutex_unlock(&syscall_trace_lock); 565 mutex_unlock(&syscall_trace_lock);
566 return ret; 566 return ret;
567} 567}
568 568
569void prof_sysexit_disable(struct ftrace_event_call *call) 569void perf_sysexit_disable(struct ftrace_event_call *call)
570{ 570{
571 int num; 571 int num;
572 572
573 num = ((struct syscall_metadata *)call->data)->syscall_nr; 573 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 574
575 mutex_lock(&syscall_trace_lock); 575 mutex_lock(&syscall_trace_lock);
576 sys_prof_refcount_exit--; 576 sys_perf_refcount_exit--;
577 clear_bit(num, enabled_prof_exit_syscalls); 577 clear_bit(num, enabled_perf_exit_syscalls);
578 if (!sys_prof_refcount_exit) 578 if (!sys_perf_refcount_exit)
579 unregister_trace_sys_exit(prof_syscall_exit); 579 unregister_trace_sys_exit(perf_syscall_exit);
580 mutex_unlock(&syscall_trace_lock); 580 mutex_unlock(&syscall_trace_lock);
581} 581}
582 582