diff options
-rw-r--r-- | kernel/perf_event.c | 76 |
1 files changed, 63 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e7eeba1794fd..634f86a4b2f9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -412,8 +412,8 @@ event_filter_match(struct perf_event *event) | |||
412 | return event->cpu == -1 || event->cpu == smp_processor_id(); | 412 | return event->cpu == -1 || event->cpu == smp_processor_id(); |
413 | } | 413 | } |
414 | 414 | ||
415 | static void | 415 | static int |
416 | event_sched_out(struct perf_event *event, | 416 | __event_sched_out(struct perf_event *event, |
417 | struct perf_cpu_context *cpuctx, | 417 | struct perf_cpu_context *cpuctx, |
418 | struct perf_event_context *ctx) | 418 | struct perf_event_context *ctx) |
419 | { | 419 | { |
@@ -432,14 +432,13 @@ event_sched_out(struct perf_event *event, | |||
432 | } | 432 | } |
433 | 433 | ||
434 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 434 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
435 | return; | 435 | return 0; |
436 | 436 | ||
437 | event->state = PERF_EVENT_STATE_INACTIVE; | 437 | event->state = PERF_EVENT_STATE_INACTIVE; |
438 | if (event->pending_disable) { | 438 | if (event->pending_disable) { |
439 | event->pending_disable = 0; | 439 | event->pending_disable = 0; |
440 | event->state = PERF_EVENT_STATE_OFF; | 440 | event->state = PERF_EVENT_STATE_OFF; |
441 | } | 441 | } |
442 | event->tstamp_stopped = ctx->time; | ||
443 | event->pmu->del(event, 0); | 442 | event->pmu->del(event, 0); |
444 | event->oncpu = -1; | 443 | event->oncpu = -1; |
445 | 444 | ||
@@ -448,6 +447,19 @@ event_sched_out(struct perf_event *event, | |||
448 | ctx->nr_active--; | 447 | ctx->nr_active--; |
449 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 448 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
450 | cpuctx->exclusive = 0; | 449 | cpuctx->exclusive = 0; |
450 | return 1; | ||
451 | } | ||
452 | |||
453 | static void | ||
454 | event_sched_out(struct perf_event *event, | ||
455 | struct perf_cpu_context *cpuctx, | ||
456 | struct perf_event_context *ctx) | ||
457 | { | ||
458 | int ret; | ||
459 | |||
460 | ret = __event_sched_out(event, cpuctx, ctx); | ||
461 | if (ret) | ||
462 | event->tstamp_stopped = ctx->time; | ||
451 | } | 463 | } |
452 | 464 | ||
453 | static void | 465 | static void |
@@ -647,7 +659,7 @@ retry: | |||
647 | } | 659 | } |
648 | 660 | ||
649 | static int | 661 | static int |
650 | event_sched_in(struct perf_event *event, | 662 | __event_sched_in(struct perf_event *event, |
651 | struct perf_cpu_context *cpuctx, | 663 | struct perf_cpu_context *cpuctx, |
652 | struct perf_event_context *ctx) | 664 | struct perf_event_context *ctx) |
653 | { | 665 | { |
@@ -667,8 +679,6 @@ event_sched_in(struct perf_event *event, | |||
667 | return -EAGAIN; | 679 | return -EAGAIN; |
668 | } | 680 | } |
669 | 681 | ||
670 | event->tstamp_running += ctx->time - event->tstamp_stopped; | ||
671 | |||
672 | if (!is_software_event(event)) | 682 | if (!is_software_event(event)) |
673 | cpuctx->active_oncpu++; | 683 | cpuctx->active_oncpu++; |
674 | ctx->nr_active++; | 684 | ctx->nr_active++; |
@@ -679,6 +689,35 @@ event_sched_in(struct perf_event *event, | |||
679 | return 0; | 689 | return 0; |
680 | } | 690 | } |
681 | 691 | ||
692 | static inline int | ||
693 | event_sched_in(struct perf_event *event, | ||
694 | struct perf_cpu_context *cpuctx, | ||
695 | struct perf_event_context *ctx) | ||
696 | { | ||
697 | int ret = __event_sched_in(event, cpuctx, ctx); | ||
698 | if (ret) | ||
699 | return ret; | ||
700 | event->tstamp_running += ctx->time - event->tstamp_stopped; | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | static void | ||
705 | group_commit_event_sched_in(struct perf_event *group_event, | ||
706 | struct perf_cpu_context *cpuctx, | ||
707 | struct perf_event_context *ctx) | ||
708 | { | ||
709 | struct perf_event *event; | ||
710 | u64 now = ctx->time; | ||
711 | |||
712 | group_event->tstamp_running += now - group_event->tstamp_stopped; | ||
713 | /* | ||
714 | * Schedule in siblings as one group (if any): | ||
715 | */ | ||
716 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | ||
717 | event->tstamp_running += now - event->tstamp_stopped; | ||
718 | } | ||
719 | } | ||
720 | |||
682 | static int | 721 | static int |
683 | group_sched_in(struct perf_event *group_event, | 722 | group_sched_in(struct perf_event *group_event, |
684 | struct perf_cpu_context *cpuctx, | 723 | struct perf_cpu_context *cpuctx, |
@@ -692,7 +731,13 @@ group_sched_in(struct perf_event *group_event, | |||
692 | 731 | ||
693 | pmu->start_txn(pmu); | 732 | pmu->start_txn(pmu); |
694 | 733 | ||
695 | if (event_sched_in(group_event, cpuctx, ctx)) { | 734 | /* |
735 | * use __event_sched_in() to delay updating tstamp_running | ||
736 | * until the transaction is committed. In case of failure | ||
737 | * we will keep an unmodified tstamp_running which is a | ||
738 | * requirement to get correct timing information | ||
739 | */ | ||
740 | if (__event_sched_in(group_event, cpuctx, ctx)) { | ||
696 | pmu->cancel_txn(pmu); | 741 | pmu->cancel_txn(pmu); |
697 | return -EAGAIN; | 742 | return -EAGAIN; |
698 | } | 743 | } |
@@ -701,26 +746,31 @@ group_sched_in(struct perf_event *group_event, | |||
701 | * Schedule in siblings as one group (if any): | 746 | * Schedule in siblings as one group (if any): |
702 | */ | 747 | */ |
703 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 748 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
704 | if (event_sched_in(event, cpuctx, ctx)) { | 749 | if (__event_sched_in(event, cpuctx, ctx)) { |
705 | partial_group = event; | 750 | partial_group = event; |
706 | goto group_error; | 751 | goto group_error; |
707 | } | 752 | } |
708 | } | 753 | } |
709 | 754 | ||
710 | if (!pmu->commit_txn(pmu)) | 755 | if (!pmu->commit_txn(pmu)) { |
756 | /* commit tstamp_running */ | ||
757 | group_commit_event_sched_in(group_event, cpuctx, ctx); | ||
711 | return 0; | 758 | return 0; |
712 | 759 | } | |
713 | group_error: | 760 | group_error: |
714 | /* | 761 | /* |
715 | * Groups can be scheduled in as one unit only, so undo any | 762 | * Groups can be scheduled in as one unit only, so undo any |
716 | * partial group before returning: | 763 | * partial group before returning: |
764 | * | ||
765 | * use __event_sched_out() to avoid updating tstamp_stopped | ||
766 | * because the event never actually ran | ||
717 | */ | 767 | */ |
718 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { | 768 | list_for_each_entry(event, &group_event->sibling_list, group_entry) { |
719 | if (event == partial_group) | 769 | if (event == partial_group) |
720 | break; | 770 | break; |
721 | event_sched_out(event, cpuctx, ctx); | 771 | __event_sched_out(event, cpuctx, ctx); |
722 | } | 772 | } |
723 | event_sched_out(group_event, cpuctx, ctx); | 773 | __event_sched_out(group_event, cpuctx, ctx); |
724 | 774 | ||
725 | pmu->cancel_txn(pmu); | 775 | pmu->cancel_txn(pmu); |
726 | 776 | ||