aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2010-10-20 09:25:01 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:18:27 -0400
commit9ffcfa6f1f63eeac15555b745c292eb9f59130f6 (patch)
tree803cc11f312da98713fdfc366b7ec79351dffa55 /kernel/perf_event.c
parent96681fc3c9e7d1f89ab64e5eec40b6467c97680f (diff)
perf_events: Revert: Fix transaction recovery in group_sched_in()
This patch reverts commit 8e5fc1a (perf_events: Fix transaction recovery in group_sched_in()) because it had one flaw in case the group could never be scheduled. It would cause time_enabled to get negative. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4cbeeeb7.0aefd80a.6e40.0e2f@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c76
1 files changed, 13 insertions, 63 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index f309e8014c78..39afdb07d758 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event)
417 return event->cpu == -1 || event->cpu == smp_processor_id(); 417 return event->cpu == -1 || event->cpu == smp_processor_id();
418} 418}
419 419
420static int 420static void
421__event_sched_out(struct perf_event *event, 421event_sched_out(struct perf_event *event,
422 struct perf_cpu_context *cpuctx, 422 struct perf_cpu_context *cpuctx,
423 struct perf_event_context *ctx) 423 struct perf_event_context *ctx)
424{ 424{
@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event,
437 } 437 }
438 438
439 if (event->state != PERF_EVENT_STATE_ACTIVE) 439 if (event->state != PERF_EVENT_STATE_ACTIVE)
440 return 0; 440 return;
441 441
442 event->state = PERF_EVENT_STATE_INACTIVE; 442 event->state = PERF_EVENT_STATE_INACTIVE;
443 if (event->pending_disable) { 443 if (event->pending_disable) {
444 event->pending_disable = 0; 444 event->pending_disable = 0;
445 event->state = PERF_EVENT_STATE_OFF; 445 event->state = PERF_EVENT_STATE_OFF;
446 } 446 }
447 event->tstamp_stopped = ctx->time;
447 event->pmu->del(event, 0); 448 event->pmu->del(event, 0);
448 event->oncpu = -1; 449 event->oncpu = -1;
449 450
@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event,
452 ctx->nr_active--; 453 ctx->nr_active--;
453 if (event->attr.exclusive || !cpuctx->active_oncpu) 454 if (event->attr.exclusive || !cpuctx->active_oncpu)
454 cpuctx->exclusive = 0; 455 cpuctx->exclusive = 0;
455 return 1;
456}
457
458static void
459event_sched_out(struct perf_event *event,
460 struct perf_cpu_context *cpuctx,
461 struct perf_event_context *ctx)
462{
463 int ret;
464
465 ret = __event_sched_out(event, cpuctx, ctx);
466 if (ret)
467 event->tstamp_stopped = ctx->time;
468} 456}
469 457
470static void 458static void
@@ -664,7 +652,7 @@ retry:
664} 652}
665 653
666static int 654static int
667__event_sched_in(struct perf_event *event, 655event_sched_in(struct perf_event *event,
668 struct perf_cpu_context *cpuctx, 656 struct perf_cpu_context *cpuctx,
669 struct perf_event_context *ctx) 657 struct perf_event_context *ctx)
670{ 658{
@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event,
684 return -EAGAIN; 672 return -EAGAIN;
685 } 673 }
686 674
675 event->tstamp_running += ctx->time - event->tstamp_stopped;
676
687 if (!is_software_event(event)) 677 if (!is_software_event(event))
688 cpuctx->active_oncpu++; 678 cpuctx->active_oncpu++;
689 ctx->nr_active++; 679 ctx->nr_active++;
@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event,
694 return 0; 684 return 0;
695} 685}
696 686
697static inline int
698event_sched_in(struct perf_event *event,
699 struct perf_cpu_context *cpuctx,
700 struct perf_event_context *ctx)
701{
702 int ret = __event_sched_in(event, cpuctx, ctx);
703 if (ret)
704 return ret;
705 event->tstamp_running += ctx->time - event->tstamp_stopped;
706 return 0;
707}
708
709static void
710group_commit_event_sched_in(struct perf_event *group_event,
711 struct perf_cpu_context *cpuctx,
712 struct perf_event_context *ctx)
713{
714 struct perf_event *event;
715 u64 now = ctx->time;
716
717 group_event->tstamp_running += now - group_event->tstamp_stopped;
718 /*
719 * Schedule in siblings as one group (if any):
720 */
721 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
722 event->tstamp_running += now - event->tstamp_stopped;
723 }
724}
725
726static int 687static int
727group_sched_in(struct perf_event *group_event, 688group_sched_in(struct perf_event *group_event,
728 struct perf_cpu_context *cpuctx, 689 struct perf_cpu_context *cpuctx,
@@ -736,13 +697,7 @@ group_sched_in(struct perf_event *group_event,
736 697
737 pmu->start_txn(pmu); 698 pmu->start_txn(pmu);
738 699
739 /* 700 if (event_sched_in(group_event, cpuctx, ctx)) {
740 * use __event_sched_in() to delay updating tstamp_running
741 * until the transaction is committed. In case of failure
742 * we will keep an unmodified tstamp_running which is a
743 * requirement to get correct timing information
744 */
745 if (__event_sched_in(group_event, cpuctx, ctx)) {
746 pmu->cancel_txn(pmu); 701 pmu->cancel_txn(pmu);
747 return -EAGAIN; 702 return -EAGAIN;
748 } 703 }
@@ -751,31 +706,26 @@ group_sched_in(struct perf_event *group_event,
751 * Schedule in siblings as one group (if any): 706 * Schedule in siblings as one group (if any):
752 */ 707 */
753 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 708 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
754 if (__event_sched_in(event, cpuctx, ctx)) { 709 if (event_sched_in(event, cpuctx, ctx)) {
755 partial_group = event; 710 partial_group = event;
756 goto group_error; 711 goto group_error;
757 } 712 }
758 } 713 }
759 714
760 if (!pmu->commit_txn(pmu)) { 715 if (!pmu->commit_txn(pmu))
761 /* commit tstamp_running */
762 group_commit_event_sched_in(group_event, cpuctx, ctx);
763 return 0; 716 return 0;
764 } 717
765group_error: 718group_error:
766 /* 719 /*
767 * Groups can be scheduled in as one unit only, so undo any 720 * Groups can be scheduled in as one unit only, so undo any
768 * partial group before returning: 721 * partial group before returning:
769 *
770 * use __event_sched_out() to avoid updating tstamp_stopped
771 * because the event never actually ran
772 */ 722 */
773 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 723 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
774 if (event == partial_group) 724 if (event == partial_group)
775 break; 725 break;
776 __event_sched_out(event, cpuctx, ctx); 726 event_sched_out(event, cpuctx, ctx);
777 } 727 }
778 __event_sched_out(group_event, cpuctx, ctx); 728 event_sched_out(group_event, cpuctx, ctx);
779 729
780 pmu->cancel_txn(pmu); 730 pmu->cancel_txn(pmu);
781 731