diff options
author | Matt Fleming <matt.fleming@intel.com> | 2015-01-23 13:45:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-25 07:53:36 -0500 |
commit | 59bf7fd45c90a8fde22a7717b5413e4ed9666c32 (patch) | |
tree | c7a8eaa6b4cd6e1323164a7aa6ff0eb375a20a61 /arch/x86/kernel/cpu | |
parent | bff671dba7981195a644a5dc210d65de8ae2d251 (diff) |
perf/x86/intel: Enable conflicting event scheduling for CQM
We can leverage the workqueue that we use for RMID rotation to support
scheduling of conflicting monitoring events. Allowing events that
monitor conflicting things is done at various other places in the perf
subsystem, so there's precedent there.
An example of two conflicting events would be monitoring a cgroup and
simultaneously monitoring a task within that cgroup.
This uses the cache_groups list as a queuing mechanism, where every
event that reaches the front of the list gets the chance to be scheduled
in, possibly descheduling any conflicting events that are running.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kanaka Juvva <kanaka.d.juvva@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Vikas Shivappa <vikas.shivappa@linux.intel.com>
Link: http://lkml.kernel.org/r/1422038748-21397-10-git-send-email-matt@codeblueprint.co.uk
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_cqm.c | 130 |
1 files changed, 84 insertions, 46 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index e31f5086f2b5..9a8ef8376fcd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c | |||
@@ -507,7 +507,6 @@ static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME; | |||
507 | static bool intel_cqm_rmid_stabilize(unsigned int *available) | 507 | static bool intel_cqm_rmid_stabilize(unsigned int *available) |
508 | { | 508 | { |
509 | struct cqm_rmid_entry *entry, *tmp; | 509 | struct cqm_rmid_entry *entry, *tmp; |
510 | struct perf_event *event; | ||
511 | 510 | ||
512 | lockdep_assert_held(&cache_mutex); | 511 | lockdep_assert_held(&cache_mutex); |
513 | 512 | ||
@@ -577,19 +576,9 @@ static bool intel_cqm_rmid_stabilize(unsigned int *available) | |||
577 | 576 | ||
578 | /* | 577 | /* |
579 | * If we have groups waiting for RMIDs, hand | 578 | * If we have groups waiting for RMIDs, hand |
580 | * them one now. | 579 | * them one now provided they don't conflict. |
581 | */ | 580 | */ |
582 | list_for_each_entry(event, &cache_groups, | 581 | if (intel_cqm_sched_in_event(entry->rmid)) |
583 | hw.cqm_groups_entry) { | ||
584 | if (__rmid_valid(event->hw.cqm_rmid)) | ||
585 | continue; | ||
586 | |||
587 | intel_cqm_xchg_rmid(event, entry->rmid); | ||
588 | entry = NULL; | ||
589 | break; | ||
590 | } | ||
591 | |||
592 | if (!entry) | ||
593 | continue; | 582 | continue; |
594 | 583 | ||
595 | /* | 584 | /* |
@@ -604,25 +593,73 @@ static bool intel_cqm_rmid_stabilize(unsigned int *available) | |||
604 | 593 | ||
605 | /* | 594 | /* |
606 | * Pick a victim group and move it to the tail of the group list. | 595 | * Pick a victim group and move it to the tail of the group list. |
596 | * @next: The first group without an RMID | ||
607 | */ | 597 | */ |
608 | static struct perf_event * | 598 | static void __intel_cqm_pick_and_rotate(struct perf_event *next) |
609 | __intel_cqm_pick_and_rotate(void) | ||
610 | { | 599 | { |
611 | struct perf_event *rotor; | 600 | struct perf_event *rotor; |
601 | unsigned int rmid; | ||
612 | 602 | ||
613 | lockdep_assert_held(&cache_mutex); | 603 | lockdep_assert_held(&cache_mutex); |
614 | lockdep_assert_held(&cache_lock); | ||
615 | 604 | ||
616 | rotor = list_first_entry(&cache_groups, struct perf_event, | 605 | rotor = list_first_entry(&cache_groups, struct perf_event, |
617 | hw.cqm_groups_entry); | 606 | hw.cqm_groups_entry); |
607 | |||
608 | /* | ||
609 | * The group at the front of the list should always have a valid | ||
610 | * RMID. If it doesn't then no groups have RMIDs assigned and we | ||
611 | * don't need to rotate the list. | ||
612 | */ | ||
613 | if (next == rotor) | ||
614 | return; | ||
615 | |||
616 | rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID); | ||
617 | __put_rmid(rmid); | ||
618 | |||
618 | list_rotate_left(&cache_groups); | 619 | list_rotate_left(&cache_groups); |
620 | } | ||
621 | |||
622 | /* | ||
623 | * Deallocate the RMIDs from any events that conflict with @event, and | ||
624 | * place them on the back of the group list. | ||
625 | */ | ||
626 | static void intel_cqm_sched_out_conflicting_events(struct perf_event *event) | ||
627 | { | ||
628 | struct perf_event *group, *g; | ||
629 | unsigned int rmid; | ||
630 | |||
631 | lockdep_assert_held(&cache_mutex); | ||
632 | |||
633 | list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) { | ||
634 | if (group == event) | ||
635 | continue; | ||
636 | |||
637 | rmid = group->hw.cqm_rmid; | ||
638 | |||
639 | /* | ||
640 | * Skip events that don't have a valid RMID. | ||
641 | */ | ||
642 | if (!__rmid_valid(rmid)) | ||
643 | continue; | ||
644 | |||
645 | /* | ||
646 | * No conflict? No problem! Leave the event alone. | ||
647 | */ | ||
648 | if (!__conflict_event(group, event)) | ||
649 | continue; | ||
619 | 650 | ||
620 | return rotor; | 651 | intel_cqm_xchg_rmid(group, INVALID_RMID); |
652 | __put_rmid(rmid); | ||
653 | } | ||
621 | } | 654 | } |
622 | 655 | ||
623 | /* | 656 | /* |
624 | * Attempt to rotate the groups and assign new RMIDs. | 657 | * Attempt to rotate the groups and assign new RMIDs. |
625 | * | 658 | * |
659 | * We rotate for two reasons, | ||
660 | * 1. To handle the scheduling of conflicting events | ||
661 | * 2. To recycle RMIDs | ||
662 | * | ||
626 | * Rotating RMIDs is complicated because the hardware doesn't give us | 663 | * Rotating RMIDs is complicated because the hardware doesn't give us |
627 | * any clues. | 664 | * any clues. |
628 | * | 665 | * |
@@ -642,11 +679,10 @@ __intel_cqm_pick_and_rotate(void) | |||
642 | */ | 679 | */ |
643 | static bool __intel_cqm_rmid_rotate(void) | 680 | static bool __intel_cqm_rmid_rotate(void) |
644 | { | 681 | { |
645 | struct perf_event *group, *rotor, *start = NULL; | 682 | struct perf_event *group, *start = NULL; |
646 | unsigned int threshold_limit; | 683 | unsigned int threshold_limit; |
647 | unsigned int nr_needed = 0; | 684 | unsigned int nr_needed = 0; |
648 | unsigned int nr_available; | 685 | unsigned int nr_available; |
649 | unsigned int rmid; | ||
650 | bool rotated = false; | 686 | bool rotated = false; |
651 | 687 | ||
652 | mutex_lock(&cache_mutex); | 688 | mutex_lock(&cache_mutex); |
@@ -678,7 +714,9 @@ again: | |||
678 | goto stabilize; | 714 | goto stabilize; |
679 | 715 | ||
680 | /* | 716 | /* |
681 | * We have more event groups without RMIDs than available RMIDs. | 717 | * We have more event groups without RMIDs than available RMIDs, |
718 | * or we have event groups that conflict with the ones currently | ||
719 | * scheduled. | ||
682 | * | 720 | * |
683 | * We force deallocate the rmid of the group at the head of | 721 | * We force deallocate the rmid of the group at the head of |
684 | * cache_groups. The first event group without an RMID then gets | 722 | * cache_groups. The first event group without an RMID then gets |
@@ -688,15 +726,7 @@ again: | |||
688 | * Rotate the cache_groups list so the previous head is now the | 726 | * Rotate the cache_groups list so the previous head is now the |
689 | * tail. | 727 | * tail. |
690 | */ | 728 | */ |
691 | rotor = __intel_cqm_pick_and_rotate(); | 729 | __intel_cqm_pick_and_rotate(start); |
692 | rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID); | ||
693 | |||
694 | /* | ||
695 | * The group at the front of the list should always have a valid | ||
696 | * RMID. If it doesn't then no groups have RMIDs assigned. | ||
697 | */ | ||
698 | if (!__rmid_valid(rmid)) | ||
699 | goto stabilize; | ||
700 | 730 | ||
701 | /* | 731 | /* |
702 | * If the rotation is going to succeed, reduce the threshold so | 732 | * If the rotation is going to succeed, reduce the threshold so |
@@ -704,14 +734,14 @@ again: | |||
704 | */ | 734 | */ |
705 | if (__rmid_valid(intel_cqm_rotation_rmid)) { | 735 | if (__rmid_valid(intel_cqm_rotation_rmid)) { |
706 | intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid); | 736 | intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid); |
707 | intel_cqm_rotation_rmid = INVALID_RMID; | 737 | intel_cqm_rotation_rmid = __get_rmid(); |
738 | |||
739 | intel_cqm_sched_out_conflicting_events(start); | ||
708 | 740 | ||
709 | if (__intel_cqm_threshold) | 741 | if (__intel_cqm_threshold) |
710 | __intel_cqm_threshold--; | 742 | __intel_cqm_threshold--; |
711 | } | 743 | } |
712 | 744 | ||
713 | __put_rmid(rmid); | ||
714 | |||
715 | rotated = true; | 745 | rotated = true; |
716 | 746 | ||
717 | stabilize: | 747 | stabilize: |
@@ -794,25 +824,37 @@ static void intel_cqm_rmid_rotate(struct work_struct *work) | |||
794 | * | 824 | * |
795 | * If we're part of a group, we use the group's RMID. | 825 | * If we're part of a group, we use the group's RMID. |
796 | */ | 826 | */ |
797 | static int intel_cqm_setup_event(struct perf_event *event, | 827 | static void intel_cqm_setup_event(struct perf_event *event, |
798 | struct perf_event **group) | 828 | struct perf_event **group) |
799 | { | 829 | { |
800 | struct perf_event *iter; | 830 | struct perf_event *iter; |
831 | unsigned int rmid; | ||
832 | bool conflict = false; | ||
801 | 833 | ||
802 | list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) { | 834 | list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) { |
835 | rmid = iter->hw.cqm_rmid; | ||
836 | |||
803 | if (__match_event(iter, event)) { | 837 | if (__match_event(iter, event)) { |
804 | /* All tasks in a group share an RMID */ | 838 | /* All tasks in a group share an RMID */ |
805 | event->hw.cqm_rmid = iter->hw.cqm_rmid; | 839 | event->hw.cqm_rmid = rmid; |
806 | *group = iter; | 840 | *group = iter; |
807 | return 0; | 841 | return; |
808 | } | 842 | } |
809 | 843 | ||
810 | if (__conflict_event(iter, event)) | 844 | /* |
811 | return -EBUSY; | 845 | * We only care about conflicts for events that are |
846 | * actually scheduled in (and hence have a valid RMID). | ||
847 | */ | ||
848 | if (__conflict_event(iter, event) && __rmid_valid(rmid)) | ||
849 | conflict = true; | ||
812 | } | 850 | } |
813 | 851 | ||
814 | event->hw.cqm_rmid = __get_rmid(); | 852 | if (conflict) |
815 | return 0; | 853 | rmid = INVALID_RMID; |
854 | else | ||
855 | rmid = __get_rmid(); | ||
856 | |||
857 | event->hw.cqm_rmid = rmid; | ||
816 | } | 858 | } |
817 | 859 | ||
818 | static void intel_cqm_event_read(struct perf_event *event) | 860 | static void intel_cqm_event_read(struct perf_event *event) |
@@ -1030,7 +1072,6 @@ static int intel_cqm_event_init(struct perf_event *event) | |||
1030 | { | 1072 | { |
1031 | struct perf_event *group = NULL; | 1073 | struct perf_event *group = NULL; |
1032 | bool rotate = false; | 1074 | bool rotate = false; |
1033 | int err; | ||
1034 | 1075 | ||
1035 | if (event->attr.type != intel_cqm_pmu.type) | 1076 | if (event->attr.type != intel_cqm_pmu.type) |
1036 | return -ENOENT; | 1077 | return -ENOENT; |
@@ -1056,9 +1097,7 @@ static int intel_cqm_event_init(struct perf_event *event) | |||
1056 | mutex_lock(&cache_mutex); | 1097 | mutex_lock(&cache_mutex); |
1057 | 1098 | ||
1058 | /* Will also set rmid */ | 1099 | /* Will also set rmid */ |
1059 | err = intel_cqm_setup_event(event, &group); | 1100 | intel_cqm_setup_event(event, &group); |
1060 | if (err) | ||
1061 | goto out; | ||
1062 | 1101 | ||
1063 | if (group) { | 1102 | if (group) { |
1064 | list_add_tail(&event->hw.cqm_group_entry, | 1103 | list_add_tail(&event->hw.cqm_group_entry, |
@@ -1078,13 +1117,12 @@ static int intel_cqm_event_init(struct perf_event *event) | |||
1078 | rotate = true; | 1117 | rotate = true; |
1079 | } | 1118 | } |
1080 | 1119 | ||
1081 | out: | ||
1082 | mutex_unlock(&cache_mutex); | 1120 | mutex_unlock(&cache_mutex); |
1083 | 1121 | ||
1084 | if (rotate) | 1122 | if (rotate) |
1085 | schedule_delayed_work(&intel_cqm_rmid_work, 0); | 1123 | schedule_delayed_work(&intel_cqm_rmid_work, 0); |
1086 | 1124 | ||
1087 | return err; | 1125 | return 0; |
1088 | } | 1126 | } |
1089 | 1127 | ||
1090 | EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01"); | 1128 | EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01"); |