aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-21 04:57:39 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-27 03:17:45 -0400
commit43ef205bded025432f5eeeb3503c11fe5cd1913e (patch)
tree5232d5c0ceba464c361860d3d814bcef01e219e1
parent1fe684e349e904adeed2883cfdeef259a21c94f4 (diff)
perf/x86/intel: Remove intel_excl_states::init_state
For some obscure reason intel_{start,stop}_scheduling() copy the HT state to an intermediate array. This would make sense if we ever were to make changes to it which we'd have to discard. Except we don't. By the time we call intel_commit_scheduling() we're; as the name implies; committed to them. We'll never back out. A further hint its pointless is that stop_scheduling() unconditionally publishes the state. So the intermediate array is pointless, modify the state in place and kill the extra array. And remove the pointless array initialization: INTEL_EXCL_UNUSED == 0. Note; all is serialized by intel_excl_cntr::lock. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c22
3 files changed, 2 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4f7001f28936..d275da3d81dd 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -884,7 +884,6 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
884 } 884 }
885 885
886 if (!assign || unsched) { 886 if (!assign || unsched) {
887
888 for (i = 0; i < n; i++) { 887 for (i = 0; i < n; i++) {
889 e = cpuc->event_list[i]; 888 e = cpuc->event_list[i];
890 /* 889 /*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index e5609522255c..89e6cd61e6ae 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -133,7 +133,6 @@ enum intel_excl_state_type {
133}; 133};
134 134
135struct intel_excl_states { 135struct intel_excl_states {
136 enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
137 enum intel_excl_state_type state[X86_PMC_IDX_MAX]; 136 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
138 bool sched_started; /* true if scheduling has started */ 137 bool sched_started; /* true if scheduling has started */
139}; 138};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6a3e794cdc06..f3201439031d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1927,11 +1927,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
1927 * makes scheduling appear as a transaction 1927 * makes scheduling appear as a transaction
1928 */ 1928 */
1929 raw_spin_lock(&excl_cntrs->lock); 1929 raw_spin_lock(&excl_cntrs->lock);
1930
1931 /*
1932 * Save a copy of our state to work on.
1933 */
1934 memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
1935} 1930}
1936 1931
1937static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 1932static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
@@ -1955,9 +1950,9 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
1955 lockdep_assert_held(&excl_cntrs->lock); 1950 lockdep_assert_held(&excl_cntrs->lock);
1956 1951
1957 if (c->flags & PERF_X86_EVENT_EXCL) 1952 if (c->flags & PERF_X86_EVENT_EXCL)
1958 xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE; 1953 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
1959 else 1954 else
1960 xl->init_state[cntr] = INTEL_EXCL_SHARED; 1955 xl->state[cntr] = INTEL_EXCL_SHARED;
1961} 1956}
1962 1957
1963static void 1958static void
@@ -1980,11 +1975,6 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
1980 1975
1981 xl = &excl_cntrs->states[tid]; 1976 xl = &excl_cntrs->states[tid];
1982 1977
1983 /*
1984 * Commit the working state.
1985 */
1986 memcpy(xl->state, xl->init_state, sizeof(xl->state));
1987
1988 xl->sched_started = false; 1978 xl->sched_started = false;
1989 /* 1979 /*
1990 * release shared state lock (acquired in intel_start_scheduling()) 1980 * release shared state lock (acquired in intel_start_scheduling())
@@ -2519,19 +2509,11 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
2519static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 2509static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
2520{ 2510{
2521 struct intel_excl_cntrs *c; 2511 struct intel_excl_cntrs *c;
2522 int i;
2523 2512
2524 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 2513 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
2525 GFP_KERNEL, cpu_to_node(cpu)); 2514 GFP_KERNEL, cpu_to_node(cpu));
2526 if (c) { 2515 if (c) {
2527 raw_spin_lock_init(&c->lock); 2516 raw_spin_lock_init(&c->lock);
2528 for (i = 0; i < X86_PMC_IDX_MAX; i++) {
2529 c->states[0].state[i] = INTEL_EXCL_UNUSED;
2530 c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
2531
2532 c->states[1].state[i] = INTEL_EXCL_UNUSED;
2533 c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
2534 }
2535 c->core_id = -1; 2517 c->core_id = -1;
2536 } 2518 }
2537 return c; 2519 return c;