aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVikas Shivappa <vikas.shivappa@linux.intel.com>2016-03-10 18:32:11 -0500
committerIngo Molnar <mingo@kernel.org>2016-03-21 04:08:20 -0400
commit2d4de8376ff1d94a5070cfa9092c59bfdc4e693e (patch)
tree988a8c083d7acc793e760738284aa0e8bb4b6ebd
parent87f01cc2a2914b61ade5ec834377fa7819484173 (diff)
perf/x86/mbm: Implement RMID recycling
RMID could be allocated or deallocated as part of RMID recycling. When an RMID is allocated for MBM event, the MBM counter needs to be initialized because next time we read the counter we need the previous value to account for total bytes that went to the memory controller. Similarly, when RMID is deallocated we need to update the ->count variable. Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Tony Luck <tony.luck@intel.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: fenghua.yu@intel.com Cc: h.peter.anvin@intel.com Cc: ravi.v.shankar@intel.com Cc: vikas.shivappa@intel.com Link: http://lkml.kernel.org/r/1457652732-4499-6-git-send-email-vikas.shivappa@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/cqm.c31
1 files changed, 27 insertions, 4 deletions
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 610bd8ab37e4..a98f472bf6b2 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -450,6 +450,7 @@ struct rmid_read {
450 450
451static void __intel_cqm_event_count(void *info); 451static void __intel_cqm_event_count(void *info);
452static void init_mbm_sample(u32 rmid, u32 evt_type); 452static void init_mbm_sample(u32 rmid, u32 evt_type);
453static void __intel_mbm_event_count(void *info);
453 454
454static bool is_mbm_event(int e) 455static bool is_mbm_event(int e)
455{ 456{
@@ -476,8 +477,14 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
476 .rmid = old_rmid, 477 .rmid = old_rmid,
477 }; 478 };
478 479
479 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, 480 if (is_mbm_event(group->attr.config)) {
480 &rr, 1); 481 rr.evt_type = group->attr.config;
482 on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
483 &rr, 1);
484 } else {
485 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
486 &rr, 1);
487 }
481 local64_set(&group->count, atomic64_read(&rr.value)); 488 local64_set(&group->count, atomic64_read(&rr.value));
482 } 489 }
483 490
@@ -489,6 +496,22 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
489 496
490 raw_spin_unlock_irq(&cache_lock); 497 raw_spin_unlock_irq(&cache_lock);
491 498
499 /*
500 * If the allocation is for mbm, init the mbm stats.
501 * Need to check if each event in the group is mbm event
502 * because there could be multiple type of events in the same group.
503 */
504 if (__rmid_valid(rmid)) {
505 event = group;
506 if (is_mbm_event(event->attr.config))
507 init_mbm_sample(rmid, event->attr.config);
508
509 list_for_each_entry(event, head, hw.cqm_group_entry) {
510 if (is_mbm_event(event->attr.config))
511 init_mbm_sample(rmid, event->attr.config);
512 }
513 }
514
492 return old_rmid; 515 return old_rmid;
493} 516}
494 517
@@ -978,7 +1001,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
978 /* All tasks in a group share an RMID */ 1001 /* All tasks in a group share an RMID */
979 event->hw.cqm_rmid = rmid; 1002 event->hw.cqm_rmid = rmid;
980 *group = iter; 1003 *group = iter;
981 if (is_mbm_event(event->attr.config)) 1004 if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
982 init_mbm_sample(rmid, event->attr.config); 1005 init_mbm_sample(rmid, event->attr.config);
983 return; 1006 return;
984 } 1007 }
@@ -996,7 +1019,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
996 else 1019 else
997 rmid = __get_rmid(); 1020 rmid = __get_rmid();
998 1021
999 if (is_mbm_event(event->attr.config)) 1022 if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
1000 init_mbm_sample(rmid, event->attr.config); 1023 init_mbm_sample(rmid, event->attr.config);
1001 1024
1002 event->hw.cqm_rmid = rmid; 1025 event->hw.cqm_rmid = rmid;