aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 7a58fb5df15c..a1e35c9f06b9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
1923 xl = &excl_cntrs->states[tid]; 1923 xl = &excl_cntrs->states[tid];
1924 1924
1925 xl->sched_started = true; 1925 xl->sched_started = true;
1926 xl->num_alloc_cntrs = 0;
1927 /* 1926 /*
1928 * lock shared state until we are done scheduling 1927 * lock shared state until we are done scheduling
1929 * in stop_event_scheduling() 1928 * in stop_event_scheduling()
@@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2000 * across HT threads 1999 * across HT threads
2001 */ 2000 */
2002 is_excl = c->flags & PERF_X86_EVENT_EXCL; 2001 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2002 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2003 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2004 if (!cpuc->n_excl++)
2005 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2006 }
2003 2007
2004 /* 2008 /*
2005 * xl = state of current HT 2009 * xl = state of current HT
@@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2008 xl = &excl_cntrs->states[tid]; 2012 xl = &excl_cntrs->states[tid];
2009 xlo = &excl_cntrs->states[o_tid]; 2013 xlo = &excl_cntrs->states[o_tid];
2010 2014
2011 /*
2012 * do not allow scheduling of more than max_alloc_cntrs
2013 * which is set to half the available generic counters.
2014 * this helps avoid counter starvation of sibling thread
2015 * by ensuring at most half the counters cannot be in
2016 * exclusive mode. There is not designated counters for the
2017 * limits. Any N/2 counters can be used. This helps with
2018 * events with specifix counter constraints
2019 */
2020 if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
2021 return &emptyconstraint;
2022
2023 cx = c; 2015 cx = c;
2024 2016
2025 /* 2017 /*
@@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2150 2142
2151 xl = &excl_cntrs->states[tid]; 2143 xl = &excl_cntrs->states[tid];
2152 xlo = &excl_cntrs->states[o_tid]; 2144 xlo = &excl_cntrs->states[o_tid];
2145 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2146 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2147 if (!--cpuc->n_excl)
2148 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2149 }
2153 2150
2154 /* 2151 /*
2155 * put_constraint may be called from x86_schedule_events() 2152 * put_constraint may be called from x86_schedule_events()
@@ -2632,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu)
2632 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 2629 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2633 2630
2634 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2631 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2635 int h = x86_pmu.num_counters >> 1;
2636
2637 for_each_cpu(i, topology_thread_cpumask(cpu)) { 2632 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2638 struct intel_excl_cntrs *c; 2633 struct intel_excl_cntrs *c;
2639 2634
@@ -2647,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu)
2647 } 2642 }
2648 cpuc->excl_cntrs->core_id = core_id; 2643 cpuc->excl_cntrs->core_id = core_id;
2649 cpuc->excl_cntrs->refcnt++; 2644 cpuc->excl_cntrs->refcnt++;
2650 /*
2651 * set hard limit to half the number of generic counters
2652 */
2653 cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
2654 cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
2655 } 2645 }
2656} 2646}
2657 2647