aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_syscalls.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-03-04 23:35:37 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-03-10 08:47:18 -0500
commit97d5a22005f38057b4bc0d95f81cd26510268794 (patch)
treeb981789b1cec8ac36527e52204e407b32efa0ea6 /kernel/trace/trace_syscalls.c
parentc530665c31c0140b74ca7689e7f836177796e5bd (diff)
perf: Drop the obsolete profile naming for trace events
Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r--kernel/trace/trace_syscalls.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7e6e84fb7b6c..33c2a5b769dc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
428 428
429#ifdef CONFIG_PERF_EVENTS 429#ifdef CONFIG_PERF_EVENTS
430 430
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 431static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
433static int sys_prof_refcount_enter; 433static int sys_perf_refcount_enter;
434static int sys_prof_refcount_exit; 434static int sys_perf_refcount_exit;
435 435
436static void prof_syscall_enter(struct pt_regs *regs, long id) 436static void perf_syscall_enter(struct pt_regs *regs, long id)
437{ 437{
438 struct syscall_metadata *sys_data; 438 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 439 struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
443 int size; 443 int size;
444 444
445 syscall_nr = syscall_get_nr(current, regs); 445 syscall_nr = syscall_get_nr(current, regs);
446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 446 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 return; 447 return;
448 448
449 sys_data = syscall_nr_to_meta(syscall_nr); 449 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
455 size = ALIGN(size + sizeof(u32), sizeof(u64)); 455 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 size -= sizeof(u32); 456 size -= sizeof(u32);
457 457
458 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 458 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
459 "profile buffer not large enough")) 459 "perf buffer not large enough"))
460 return; 460 return;
461 461
462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, 462 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 sys_data->enter_event->id, &rctx, &flags); 463 sys_data->enter_event->id, &rctx, &flags);
464 if (!rec) 464 if (!rec)
465 return; 465 return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
467 rec->nr = syscall_nr; 467 rec->nr = syscall_nr;
468 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 (unsigned long *)&rec->args); 469 (unsigned long *)&rec->args);
470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); 470 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471} 471}
472 472
473int prof_sysenter_enable(struct ftrace_event_call *call) 473int perf_sysenter_enable(struct ftrace_event_call *call)
474{ 474{
475 int ret = 0; 475 int ret = 0;
476 int num; 476 int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
478 num = ((struct syscall_metadata *)call->data)->syscall_nr; 478 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 479
480 mutex_lock(&syscall_trace_lock); 480 mutex_lock(&syscall_trace_lock);
481 if (!sys_prof_refcount_enter) 481 if (!sys_perf_refcount_enter)
482 ret = register_trace_sys_enter(prof_syscall_enter); 482 ret = register_trace_sys_enter(perf_syscall_enter);
483 if (ret) { 483 if (ret) {
484 pr_info("event trace: Could not activate" 484 pr_info("event trace: Could not activate"
485 "syscall entry trace point"); 485 "syscall entry trace point");
486 } else { 486 } else {
487 set_bit(num, enabled_prof_enter_syscalls); 487 set_bit(num, enabled_perf_enter_syscalls);
488 sys_prof_refcount_enter++; 488 sys_perf_refcount_enter++;
489 } 489 }
490 mutex_unlock(&syscall_trace_lock); 490 mutex_unlock(&syscall_trace_lock);
491 return ret; 491 return ret;
492} 492}
493 493
494void prof_sysenter_disable(struct ftrace_event_call *call) 494void perf_sysenter_disable(struct ftrace_event_call *call)
495{ 495{
496 int num; 496 int num;
497 497
498 num = ((struct syscall_metadata *)call->data)->syscall_nr; 498 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 499
500 mutex_lock(&syscall_trace_lock); 500 mutex_lock(&syscall_trace_lock);
501 sys_prof_refcount_enter--; 501 sys_perf_refcount_enter--;
502 clear_bit(num, enabled_prof_enter_syscalls); 502 clear_bit(num, enabled_perf_enter_syscalls);
503 if (!sys_prof_refcount_enter) 503 if (!sys_perf_refcount_enter)
504 unregister_trace_sys_enter(prof_syscall_enter); 504 unregister_trace_sys_enter(perf_syscall_enter);
505 mutex_unlock(&syscall_trace_lock); 505 mutex_unlock(&syscall_trace_lock);
506} 506}
507 507
508static void prof_syscall_exit(struct pt_regs *regs, long ret) 508static void perf_syscall_exit(struct pt_regs *regs, long ret)
509{ 509{
510 struct syscall_metadata *sys_data; 510 struct syscall_metadata *sys_data;
511 struct syscall_trace_exit *rec; 511 struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
515 int size; 515 int size;
516 516
517 syscall_nr = syscall_get_nr(current, regs); 517 syscall_nr = syscall_get_nr(current, regs);
518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 518 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 return; 519 return;
520 520
521 sys_data = syscall_nr_to_meta(syscall_nr); 521 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
530 * Impossible, but be paranoid with the future 530 * Impossible, but be paranoid with the future
531 * How to put this check outside runtime? 531 * How to put this check outside runtime?
532 */ 532 */
533 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 533 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
534 "exit event has grown above profile buffer size")) 534 "exit event has grown above perf buffer size"))
535 return; 535 return;
536 536
537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, 537 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 sys_data->exit_event->id, &rctx, &flags); 538 sys_data->exit_event->id, &rctx, &flags);
539 if (!rec) 539 if (!rec)
540 return; 540 return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
542 rec->nr = syscall_nr; 542 rec->nr = syscall_nr;
543 rec->ret = syscall_get_return_value(current, regs); 543 rec->ret = syscall_get_return_value(current, regs);
544 544
545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); 545 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546} 546}
547 547
548int prof_sysexit_enable(struct ftrace_event_call *call) 548int perf_sysexit_enable(struct ftrace_event_call *call)
549{ 549{
550 int ret = 0; 550 int ret = 0;
551 int num; 551 int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
553 num = ((struct syscall_metadata *)call->data)->syscall_nr; 553 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 554
555 mutex_lock(&syscall_trace_lock); 555 mutex_lock(&syscall_trace_lock);
556 if (!sys_prof_refcount_exit) 556 if (!sys_perf_refcount_exit)
557 ret = register_trace_sys_exit(prof_syscall_exit); 557 ret = register_trace_sys_exit(perf_syscall_exit);
558 if (ret) { 558 if (ret) {
559 pr_info("event trace: Could not activate" 559 pr_info("event trace: Could not activate"
560 "syscall exit trace point"); 560 "syscall exit trace point");
561 } else { 561 } else {
562 set_bit(num, enabled_prof_exit_syscalls); 562 set_bit(num, enabled_perf_exit_syscalls);
563 sys_prof_refcount_exit++; 563 sys_perf_refcount_exit++;
564 } 564 }
565 mutex_unlock(&syscall_trace_lock); 565 mutex_unlock(&syscall_trace_lock);
566 return ret; 566 return ret;
567} 567}
568 568
569void prof_sysexit_disable(struct ftrace_event_call *call) 569void perf_sysexit_disable(struct ftrace_event_call *call)
570{ 570{
571 int num; 571 int num;
572 572
573 num = ((struct syscall_metadata *)call->data)->syscall_nr; 573 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 574
575 mutex_lock(&syscall_trace_lock); 575 mutex_lock(&syscall_trace_lock);
576 sys_prof_refcount_exit--; 576 sys_perf_refcount_exit--;
577 clear_bit(num, enabled_prof_exit_syscalls); 577 clear_bit(num, enabled_perf_exit_syscalls);
578 if (!sys_prof_refcount_exit) 578 if (!sys_perf_refcount_exit)
579 unregister_trace_sys_exit(prof_syscall_exit); 579 unregister_trace_sys_exit(perf_syscall_exit);
580 mutex_unlock(&syscall_trace_lock); 580 mutex_unlock(&syscall_trace_lock);
581} 581}
582 582