diff options
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 47 |
1 files changed, 17 insertions, 30 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 51213b0aa81b..0bb934875263 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -477,10 +477,11 @@ static int sys_prof_refcount_exit; | |||
| 477 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 477 | static void prof_syscall_enter(struct pt_regs *regs, long id) |
| 478 | { | 478 | { |
| 479 | struct syscall_metadata *sys_data; | 479 | struct syscall_metadata *sys_data; |
| 480 | struct perf_trace_buf *trace_buf; | ||
| 481 | struct syscall_trace_enter *rec; | 480 | struct syscall_trace_enter *rec; |
| 482 | unsigned long flags; | 481 | unsigned long flags; |
| 482 | char *trace_buf; | ||
| 483 | char *raw_data; | 483 | char *raw_data; |
| 484 | int *recursion; | ||
| 484 | int syscall_nr; | 485 | int syscall_nr; |
| 485 | int size; | 486 | int size; |
| 486 | int cpu; | 487 | int cpu; |
| @@ -505,6 +506,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 505 | /* Protect the per cpu buffer, begin the rcu read side */ | 506 | /* Protect the per cpu buffer, begin the rcu read side */ |
| 506 | local_irq_save(flags); | 507 | local_irq_save(flags); |
| 507 | 508 | ||
| 509 | if (perf_swevent_get_recursion_context(&recursion)) | ||
| 510 | goto end_recursion; | ||
| 511 | |||
| 508 | cpu = smp_processor_id(); | 512 | cpu = smp_processor_id(); |
| 509 | 513 | ||
| 510 | if (in_nmi()) | 514 | if (in_nmi()) |
| @@ -515,18 +519,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 515 | if (!trace_buf) | 519 | if (!trace_buf) |
| 516 | goto end; | 520 | goto end; |
| 517 | 521 | ||
| 518 | trace_buf = per_cpu_ptr(trace_buf, cpu); | 522 | raw_data = per_cpu_ptr(trace_buf, cpu); |
| 519 | |||
| 520 | if (trace_buf->recursion++) | ||
| 521 | goto end_recursion; | ||
| 522 | |||
| 523 | /* | ||
| 524 | * Make recursion update visible before entering perf_tp_event | ||
| 525 | * so that we protect from perf recursions. | ||
| 526 | */ | ||
| 527 | barrier(); | ||
| 528 | |||
| 529 | raw_data = trace_buf->buf; | ||
| 530 | 523 | ||
| 531 | /* zero the dead bytes from align to not leak stack to user */ | 524 | /* zero the dead bytes from align to not leak stack to user */ |
| 532 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 525 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
| @@ -539,9 +532,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 539 | (unsigned long *)&rec->args); | 532 | (unsigned long *)&rec->args); |
| 540 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); | 533 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); |
| 541 | 534 | ||
| 542 | end_recursion: | ||
| 543 | trace_buf->recursion--; | ||
| 544 | end: | 535 | end: |
| 536 | perf_swevent_put_recursion_context(recursion); | ||
| 537 | end_recursion: | ||
| 545 | local_irq_restore(flags); | 538 | local_irq_restore(flags); |
| 546 | } | 539 | } |
| 547 | 540 | ||
| @@ -588,10 +581,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 588 | { | 581 | { |
| 589 | struct syscall_metadata *sys_data; | 582 | struct syscall_metadata *sys_data; |
| 590 | struct syscall_trace_exit *rec; | 583 | struct syscall_trace_exit *rec; |
| 591 | struct perf_trace_buf *trace_buf; | ||
| 592 | unsigned long flags; | 584 | unsigned long flags; |
| 593 | int syscall_nr; | 585 | int syscall_nr; |
| 586 | char *trace_buf; | ||
| 594 | char *raw_data; | 587 | char *raw_data; |
| 588 | int *recursion; | ||
| 595 | int size; | 589 | int size; |
| 596 | int cpu; | 590 | int cpu; |
| 597 | 591 | ||
| @@ -617,6 +611,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 617 | 611 | ||
| 618 | /* Protect the per cpu buffer, begin the rcu read side */ | 612 | /* Protect the per cpu buffer, begin the rcu read side */ |
| 619 | local_irq_save(flags); | 613 | local_irq_save(flags); |
| 614 | |||
| 615 | if (perf_swevent_get_recursion_context(&recursion)) | ||
| 616 | goto end_recursion; | ||
| 617 | |||
| 620 | cpu = smp_processor_id(); | 618 | cpu = smp_processor_id(); |
| 621 | 619 | ||
| 622 | if (in_nmi()) | 620 | if (in_nmi()) |
| @@ -627,18 +625,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 627 | if (!trace_buf) | 625 | if (!trace_buf) |
| 628 | goto end; | 626 | goto end; |
| 629 | 627 | ||
| 630 | trace_buf = per_cpu_ptr(trace_buf, cpu); | 628 | raw_data = per_cpu_ptr(trace_buf, cpu); |
| 631 | |||
| 632 | if (trace_buf->recursion++) | ||
| 633 | goto end_recursion; | ||
| 634 | |||
| 635 | /* | ||
| 636 | * Make recursion update visible before entering perf_tp_event | ||
| 637 | * so that we protect from perf recursions. | ||
| 638 | */ | ||
| 639 | barrier(); | ||
| 640 | |||
| 641 | raw_data = trace_buf->buf; | ||
| 642 | 629 | ||
| 643 | /* zero the dead bytes from align to not leak stack to user */ | 630 | /* zero the dead bytes from align to not leak stack to user */ |
| 644 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 631 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; |
| @@ -652,9 +639,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 652 | 639 | ||
| 653 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); | 640 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); |
| 654 | 641 | ||
| 655 | end_recursion: | ||
| 656 | trace_buf->recursion--; | ||
| 657 | end: | 642 | end: |
| 643 | perf_swevent_put_recursion_context(recursion); | ||
| 644 | end_recursion: | ||
| 658 | local_irq_restore(flags); | 645 | local_irq_restore(flags); |
| 659 | } | 646 | } |
| 660 | 647 | ||
