aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 5a469d3d0c6..fa6fdec5afb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -499,11 +499,16 @@ struct sched_state {
499 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 499 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
500}; 500};
501 501
502/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
503#define SCHED_STATES_MAX 2
504
502struct perf_sched { 505struct perf_sched {
503 int max_weight; 506 int max_weight;
504 int max_events; 507 int max_events;
505 struct event_constraint **constraints; 508 struct event_constraint **constraints;
506 struct sched_state state; 509 struct sched_state state;
510 int saved_states;
511 struct sched_state saved[SCHED_STATES_MAX];
507}; 512};
508 513
509/* 514/*
@@ -529,11 +534,34 @@ static void perf_sched_init(struct perf_sched *sched, struct event_constraint **
529 sched->state.unassigned = num; 534 sched->state.unassigned = num;
530} 535}
531 536
537static void perf_sched_save_state(struct perf_sched *sched)
538{
539 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
540 return;
541
542 sched->saved[sched->saved_states] = sched->state;
543 sched->saved_states++;
544}
545
546static bool perf_sched_restore_state(struct perf_sched *sched)
547{
548 if (!sched->saved_states)
549 return false;
550
551 sched->saved_states--;
552 sched->state = sched->saved[sched->saved_states];
553
554 /* continue with next counter: */
555 clear_bit(sched->state.counter++, sched->state.used);
556
557 return true;
558}
559
532/* 560/*
533 * Select a counter for the current event to schedule. Return true on 561 * Select a counter for the current event to schedule. Return true on
534 * success. 562 * success.
535 */ 563 */
536static bool perf_sched_find_counter(struct perf_sched *sched) 564static bool __perf_sched_find_counter(struct perf_sched *sched)
537{ 565{
538 struct event_constraint *c; 566 struct event_constraint *c;
539 int idx; 567 int idx;
@@ -557,6 +585,19 @@ static bool perf_sched_find_counter(struct perf_sched *sched)
557 if (idx >= X86_PMC_IDX_MAX) 585 if (idx >= X86_PMC_IDX_MAX)
558 return false; 586 return false;
559 587
588 if (c->overlap)
589 perf_sched_save_state(sched);
590
591 return true;
592}
593
594static bool perf_sched_find_counter(struct perf_sched *sched)
595{
596 while (!__perf_sched_find_counter(sched)) {
597 if (!perf_sched_restore_state(sched))
598 return false;
599 }
600
560 return true; 601 return true;
561} 602}
562 603
@@ -1250,7 +1291,7 @@ static int __init init_hw_perf_events(void)
1250 1291
1251 unconstrained = (struct event_constraint) 1292 unconstrained = (struct event_constraint)
1252 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1293 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1253 0, x86_pmu.num_counters); 1294 0, x86_pmu.num_counters, 0);
1254 1295
1255 if (x86_pmu.event_constraints) { 1296 if (x86_pmu.event_constraints) {
1256 for_each_event_constraint(c, x86_pmu.event_constraints) { 1297 for_each_event_constraint(c, x86_pmu.event_constraints) {