diff options
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 73 |
1 files changed, 37 insertions, 36 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..4d6d711717f2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #include <trace/syscall.h> | 1 | #include <trace/syscall.h> |
| 2 | #include <trace/events/syscalls.h> | 2 | #include <trace/events/syscalls.h> |
| 3 | #include <linux/slab.h> | ||
| 3 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 4 | #include <linux/ftrace.h> | 5 | #include <linux/ftrace.h> |
| 5 | #include <linux/perf_event.h> | 6 | #include <linux/perf_event.h> |
| @@ -428,12 +429,12 @@ core_initcall(init_ftrace_syscalls); | |||
| 428 | 429 | ||
| 429 | #ifdef CONFIG_PERF_EVENTS | 430 | #ifdef CONFIG_PERF_EVENTS |
| 430 | 431 | ||
| 431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 432 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
| 432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); | 433 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
| 433 | static int sys_prof_refcount_enter; | 434 | static int sys_perf_refcount_enter; |
| 434 | static int sys_prof_refcount_exit; | 435 | static int sys_perf_refcount_exit; |
| 435 | 436 | ||
| 436 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 437 | static void perf_syscall_enter(struct pt_regs *regs, long id) |
| 437 | { | 438 | { |
| 438 | struct syscall_metadata *sys_data; | 439 | struct syscall_metadata *sys_data; |
| 439 | struct syscall_trace_enter *rec; | 440 | struct syscall_trace_enter *rec; |
| @@ -443,7 +444,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 443 | int size; | 444 | int size; |
| 444 | 445 | ||
| 445 | syscall_nr = syscall_get_nr(current, regs); | 446 | syscall_nr = syscall_get_nr(current, regs); |
| 446 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 447 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
| 447 | return; | 448 | return; |
| 448 | 449 | ||
| 449 | sys_data = syscall_nr_to_meta(syscall_nr); | 450 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -455,11 +456,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 456 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
| 456 | size -= sizeof(u32); | 457 | size -= sizeof(u32); |
| 457 | 458 | ||
| 458 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 459 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 459 | "profile buffer not large enough")) | 460 | "perf buffer not large enough")) |
| 460 | return; | 461 | return; |
| 461 | 462 | ||
| 462 | rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, | 463 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
| 463 | sys_data->enter_event->id, &rctx, &flags); | 464 | sys_data->enter_event->id, &rctx, &flags); |
| 464 | if (!rec) | 465 | if (!rec) |
| 465 | return; | 466 | return; |
| @@ -467,10 +468,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 467 | rec->nr = syscall_nr; | 468 | rec->nr = syscall_nr; |
| 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 469 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
| 469 | (unsigned long *)&rec->args); | 470 | (unsigned long *)&rec->args); |
| 470 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 471 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
| 471 | } | 472 | } |
| 472 | 473 | ||
| 473 | int prof_sysenter_enable(struct ftrace_event_call *call) | 474 | int perf_sysenter_enable(struct ftrace_event_call *call) |
| 474 | { | 475 | { |
| 475 | int ret = 0; | 476 | int ret = 0; |
| 476 | int num; | 477 | int num; |
| @@ -478,34 +479,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) | |||
| 478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 479 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 479 | 480 | ||
| 480 | mutex_lock(&syscall_trace_lock); | 481 | mutex_lock(&syscall_trace_lock); |
| 481 | if (!sys_prof_refcount_enter) | 482 | if (!sys_perf_refcount_enter) |
| 482 | ret = register_trace_sys_enter(prof_syscall_enter); | 483 | ret = register_trace_sys_enter(perf_syscall_enter); |
| 483 | if (ret) { | 484 | if (ret) { |
| 484 | pr_info("event trace: Could not activate" | 485 | pr_info("event trace: Could not activate" |
| 485 | "syscall entry trace point"); | 486 | "syscall entry trace point"); |
| 486 | } else { | 487 | } else { |
| 487 | set_bit(num, enabled_prof_enter_syscalls); | 488 | set_bit(num, enabled_perf_enter_syscalls); |
| 488 | sys_prof_refcount_enter++; | 489 | sys_perf_refcount_enter++; |
| 489 | } | 490 | } |
| 490 | mutex_unlock(&syscall_trace_lock); | 491 | mutex_unlock(&syscall_trace_lock); |
| 491 | return ret; | 492 | return ret; |
| 492 | } | 493 | } |
| 493 | 494 | ||
| 494 | void prof_sysenter_disable(struct ftrace_event_call *call) | 495 | void perf_sysenter_disable(struct ftrace_event_call *call) |
| 495 | { | 496 | { |
| 496 | int num; | 497 | int num; |
| 497 | 498 | ||
| 498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 499 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 499 | 500 | ||
| 500 | mutex_lock(&syscall_trace_lock); | 501 | mutex_lock(&syscall_trace_lock); |
| 501 | sys_prof_refcount_enter--; | 502 | sys_perf_refcount_enter--; |
| 502 | clear_bit(num, enabled_prof_enter_syscalls); | 503 | clear_bit(num, enabled_perf_enter_syscalls); |
| 503 | if (!sys_prof_refcount_enter) | 504 | if (!sys_perf_refcount_enter) |
| 504 | unregister_trace_sys_enter(prof_syscall_enter); | 505 | unregister_trace_sys_enter(perf_syscall_enter); |
| 505 | mutex_unlock(&syscall_trace_lock); | 506 | mutex_unlock(&syscall_trace_lock); |
| 506 | } | 507 | } |
| 507 | 508 | ||
| 508 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 509 | static void perf_syscall_exit(struct pt_regs *regs, long ret) |
| 509 | { | 510 | { |
| 510 | struct syscall_metadata *sys_data; | 511 | struct syscall_metadata *sys_data; |
| 511 | struct syscall_trace_exit *rec; | 512 | struct syscall_trace_exit *rec; |
| @@ -515,7 +516,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 515 | int size; | 516 | int size; |
| 516 | 517 | ||
| 517 | syscall_nr = syscall_get_nr(current, regs); | 518 | syscall_nr = syscall_get_nr(current, regs); |
| 518 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 519 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
| 519 | return; | 520 | return; |
| 520 | 521 | ||
| 521 | sys_data = syscall_nr_to_meta(syscall_nr); | 522 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -530,11 +531,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 530 | * Impossible, but be paranoid with the future | 531 | * Impossible, but be paranoid with the future |
| 531 | * How to put this check outside runtime? | 532 | * How to put this check outside runtime? |
| 532 | */ | 533 | */ |
| 533 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 534 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 534 | "exit event has grown above profile buffer size")) | 535 | "exit event has grown above perf buffer size")) |
| 535 | return; | 536 | return; |
| 536 | 537 | ||
| 537 | rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, | 538 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
| 538 | sys_data->exit_event->id, &rctx, &flags); | 539 | sys_data->exit_event->id, &rctx, &flags); |
| 539 | if (!rec) | 540 | if (!rec) |
| 540 | return; | 541 | return; |
| @@ -542,10 +543,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 542 | rec->nr = syscall_nr; | 543 | rec->nr = syscall_nr; |
| 543 | rec->ret = syscall_get_return_value(current, regs); | 544 | rec->ret = syscall_get_return_value(current, regs); |
| 544 | 545 | ||
| 545 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 546 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
| 546 | } | 547 | } |
| 547 | 548 | ||
| 548 | int prof_sysexit_enable(struct ftrace_event_call *call) | 549 | int perf_sysexit_enable(struct ftrace_event_call *call) |
| 549 | { | 550 | { |
| 550 | int ret = 0; | 551 | int ret = 0; |
| 551 | int num; | 552 | int num; |
| @@ -553,30 +554,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) | |||
| 553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 554 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 554 | 555 | ||
| 555 | mutex_lock(&syscall_trace_lock); | 556 | mutex_lock(&syscall_trace_lock); |
| 556 | if (!sys_prof_refcount_exit) | 557 | if (!sys_perf_refcount_exit) |
| 557 | ret = register_trace_sys_exit(prof_syscall_exit); | 558 | ret = register_trace_sys_exit(perf_syscall_exit); |
| 558 | if (ret) { | 559 | if (ret) { |
| 559 | pr_info("event trace: Could not activate" | 560 | pr_info("event trace: Could not activate" |
| 560 | "syscall exit trace point"); | 561 | "syscall exit trace point"); |
| 561 | } else { | 562 | } else { |
| 562 | set_bit(num, enabled_prof_exit_syscalls); | 563 | set_bit(num, enabled_perf_exit_syscalls); |
| 563 | sys_prof_refcount_exit++; | 564 | sys_perf_refcount_exit++; |
| 564 | } | 565 | } |
| 565 | mutex_unlock(&syscall_trace_lock); | 566 | mutex_unlock(&syscall_trace_lock); |
| 566 | return ret; | 567 | return ret; |
| 567 | } | 568 | } |
| 568 | 569 | ||
| 569 | void prof_sysexit_disable(struct ftrace_event_call *call) | 570 | void perf_sysexit_disable(struct ftrace_event_call *call) |
| 570 | { | 571 | { |
| 571 | int num; | 572 | int num; |
| 572 | 573 | ||
| 573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 574 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 574 | 575 | ||
| 575 | mutex_lock(&syscall_trace_lock); | 576 | mutex_lock(&syscall_trace_lock); |
| 576 | sys_prof_refcount_exit--; | 577 | sys_perf_refcount_exit--; |
| 577 | clear_bit(num, enabled_prof_exit_syscalls); | 578 | clear_bit(num, enabled_perf_exit_syscalls); |
| 578 | if (!sys_prof_refcount_exit) | 579 | if (!sys_perf_refcount_exit) |
| 579 | unregister_trace_sys_exit(prof_syscall_exit); | 580 | unregister_trace_sys_exit(perf_syscall_exit); |
| 580 | mutex_unlock(&syscall_trace_lock); | 581 | mutex_unlock(&syscall_trace_lock); |
| 581 | } | 582 | } |
| 582 | 583 | ||
