aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2012-06-15 02:31:30 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-18 06:13:20 -0400
commit4b4969b14490a4f65b572b8f180164181104b5e1 (patch)
treeb7319d7695639b79d21fbc190feca46f526ae454
parentd1ece0998eeb7b6543044f32b7d9bcbaf6dc294a (diff)
perf: Export perf_assign_events()
Export perf_assign_events() so the uncore code can use it to schedule events. Signed-off-by: Zheng Yan <zheng.z.yan@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1339741902-8449-2-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
2 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index d1f38c9509d0..6d32aefc9dbd 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -626,7 +626,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
626 c = sched->constraints[sched->state.event]; 626 c = sched->constraints[sched->state.event];
627 627
628 /* Prefer fixed purpose counters */ 628 /* Prefer fixed purpose counters */
629 if (x86_pmu.num_counters_fixed) { 629 if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
630 idx = X86_PMC_IDX_FIXED; 630 idx = X86_PMC_IDX_FIXED;
631 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { 631 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
632 if (!__test_and_set_bit(idx, sched->state.used)) 632 if (!__test_and_set_bit(idx, sched->state.used))
@@ -693,8 +693,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
693/* 693/*
694 * Assign a counter for each event. 694 * Assign a counter for each event.
695 */ 695 */
696static int perf_assign_events(struct event_constraint **constraints, int n, 696int perf_assign_events(struct event_constraint **constraints, int n,
697 int wmin, int wmax, int *assign) 697 int wmin, int wmax, int *assign)
698{ 698{
699 struct perf_sched sched; 699 struct perf_sched sched;
700 700
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3df3de9452a9..83238f2a12b2 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -481,6 +481,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
481 481
482void x86_pmu_enable_all(int added); 482void x86_pmu_enable_all(int added);
483 483
484int perf_assign_events(struct event_constraint **constraints, int n,
485 int wmin, int wmax, int *assign);
484int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); 486int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
485 487
486void x86_pmu_stop(struct perf_event *event, int flags); 488void x86_pmu_stop(struct perf_event *event, int flags);