diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 99 |
1 files changed, 51 insertions, 48 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index d850a1fb8d4c..a5bdc93ac477 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -419,6 +419,54 @@ counter_sched_in(struct perf_counter *counter, | |||
419 | return 0; | 419 | return 0; |
420 | } | 420 | } |
421 | 421 | ||
422 | static int | ||
423 | group_sched_in(struct perf_counter *group_counter, | ||
424 | struct perf_cpu_context *cpuctx, | ||
425 | struct perf_counter_context *ctx, | ||
426 | int cpu) | ||
427 | { | ||
428 | struct perf_counter *counter, *partial_group; | ||
429 | int ret; | ||
430 | |||
431 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | ||
432 | return 0; | ||
433 | |||
434 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | ||
435 | if (ret) | ||
436 | return ret < 0 ? ret : 0; | ||
437 | |||
438 | group_counter->prev_state = group_counter->state; | ||
439 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | ||
440 | return -EAGAIN; | ||
441 | |||
442 | /* | ||
443 | * Schedule in siblings as one group (if any): | ||
444 | */ | ||
445 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
446 | counter->prev_state = counter->state; | ||
447 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | ||
448 | partial_group = counter; | ||
449 | goto group_error; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | return 0; | ||
454 | |||
455 | group_error: | ||
456 | /* | ||
457 | * Groups can be scheduled in as one unit only, so undo any | ||
458 | * partial group before returning: | ||
459 | */ | ||
460 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
461 | if (counter == partial_group) | ||
462 | break; | ||
463 | counter_sched_out(counter, cpuctx, ctx); | ||
464 | } | ||
465 | counter_sched_out(group_counter, cpuctx, ctx); | ||
466 | |||
467 | return -EAGAIN; | ||
468 | } | ||
469 | |||
422 | /* | 470 | /* |
423 | * Return 1 for a group consisting entirely of software counters, | 471 | * Return 1 for a group consisting entirely of software counters, |
424 | * 0 if the group contains any hardware counters. | 472 | * 0 if the group contains any hardware counters. |
@@ -643,6 +691,9 @@ static void __perf_counter_enable(void *info) | |||
643 | 691 | ||
644 | if (!group_can_go_on(counter, cpuctx, 1)) | 692 | if (!group_can_go_on(counter, cpuctx, 1)) |
645 | err = -EEXIST; | 693 | err = -EEXIST; |
694 | else if (counter == leader) | ||
695 | err = group_sched_in(counter, cpuctx, ctx, | ||
696 | smp_processor_id()); | ||
646 | else | 697 | else |
647 | err = counter_sched_in(counter, cpuctx, ctx, | 698 | err = counter_sched_in(counter, cpuctx, ctx, |
648 | smp_processor_id()); | 699 | smp_processor_id()); |
@@ -791,54 +842,6 @@ static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | |||
791 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | 842 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); |
792 | } | 843 | } |
793 | 844 | ||
794 | static int | ||
795 | group_sched_in(struct perf_counter *group_counter, | ||
796 | struct perf_cpu_context *cpuctx, | ||
797 | struct perf_counter_context *ctx, | ||
798 | int cpu) | ||
799 | { | ||
800 | struct perf_counter *counter, *partial_group; | ||
801 | int ret; | ||
802 | |||
803 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | ||
804 | return 0; | ||
805 | |||
806 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | ||
807 | if (ret) | ||
808 | return ret < 0 ? ret : 0; | ||
809 | |||
810 | group_counter->prev_state = group_counter->state; | ||
811 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | ||
812 | return -EAGAIN; | ||
813 | |||
814 | /* | ||
815 | * Schedule in siblings as one group (if any): | ||
816 | */ | ||
817 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
818 | counter->prev_state = counter->state; | ||
819 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | ||
820 | partial_group = counter; | ||
821 | goto group_error; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | group_error: | ||
828 | /* | ||
829 | * Groups can be scheduled in as one unit only, so undo any | ||
830 | * partial group before returning: | ||
831 | */ | ||
832 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
833 | if (counter == partial_group) | ||
834 | break; | ||
835 | counter_sched_out(counter, cpuctx, ctx); | ||
836 | } | ||
837 | counter_sched_out(group_counter, cpuctx, ctx); | ||
838 | |||
839 | return -EAGAIN; | ||
840 | } | ||
841 | |||
842 | static void | 845 | static void |
843 | __perf_counter_sched_in(struct perf_counter_context *ctx, | 846 | __perf_counter_sched_in(struct perf_counter_context *ctx, |
844 | struct perf_cpu_context *cpuctx, int cpu) | 847 | struct perf_cpu_context *cpuctx, int cpu) |