aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2014-11-17 14:06:54 -0500
committerIngo Molnar <mingo@kernel.org>2015-04-02 11:33:08 -0400
commit90413464313e00fe4975f4a0ebf25fe31d01f793 (patch)
treecad15b8e8c58af9597b6525f32f7bede0ea14b2f /arch/x86
parent9a5e3fb52ae5458c8bf1a67129b96c39b541a582 (diff)
perf/x86: Vectorize cpuc->kfree_on_online
Make the cpuc->kfree_on_online a vector to accommodate more than one entry and add the second entry to be used by a later patch. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Maria Dimakopoulou <maria.n.dimakopoulou@gmail.com> Cc: bp@alien8.de Cc: jolsa@redhat.com Cc: kan.liang@intel.com Link: http://lkml.kernel.org/r/1416251225-17721-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
4 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 549d01d6d996..682ef00727e7 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1373,11 +1373,12 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1373{ 1373{
1374 unsigned int cpu = (long)hcpu; 1374 unsigned int cpu = (long)hcpu;
1375 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1375 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1376 int ret = NOTIFY_OK; 1376 int i, ret = NOTIFY_OK;
1377 1377
1378 switch (action & ~CPU_TASKS_FROZEN) { 1378 switch (action & ~CPU_TASKS_FROZEN) {
1379 case CPU_UP_PREPARE: 1379 case CPU_UP_PREPARE:
1380 cpuc->kfree_on_online = NULL; 1380 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1381 cpuc->kfree_on_online[i] = NULL;
1381 if (x86_pmu.cpu_prepare) 1382 if (x86_pmu.cpu_prepare)
1382 ret = x86_pmu.cpu_prepare(cpu); 1383 ret = x86_pmu.cpu_prepare(cpu);
1383 break; 1384 break;
@@ -1388,7 +1389,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1388 break; 1389 break;
1389 1390
1390 case CPU_ONLINE: 1391 case CPU_ONLINE:
1391 kfree(cpuc->kfree_on_online); 1392 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1393 kfree(cpuc->kfree_on_online[i]);
1394 cpuc->kfree_on_online[i] = NULL;
1395 }
1392 break; 1396 break;
1393 1397
1394 case CPU_DYING: 1398 case CPU_DYING:
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 5264010c9a08..55b915511e53 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -125,6 +125,12 @@ struct intel_shared_regs {
125 125
126#define MAX_LBR_ENTRIES 16 126#define MAX_LBR_ENTRIES 16
127 127
128enum {
129 X86_PERF_KFREE_SHARED = 0,
130 X86_PERF_KFREE_EXCL = 1,
131 X86_PERF_KFREE_MAX
132};
133
128struct cpu_hw_events { 134struct cpu_hw_events {
129 /* 135 /*
130 * Generic x86 PMC bits 136 * Generic x86 PMC bits
@@ -187,7 +193,7 @@ struct cpu_hw_events {
187 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ 193 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
188 u64 perf_ctr_virt_mask; 194 u64 perf_ctr_virt_mask;
189 195
190 void *kfree_on_online; 196 void *kfree_on_online[X86_PERF_KFREE_MAX];
191}; 197};
192 198
193#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ 199#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 28926311aac1..e4302b8fed2a 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -382,6 +382,7 @@ static int amd_pmu_cpu_prepare(int cpu)
382static void amd_pmu_cpu_starting(int cpu) 382static void amd_pmu_cpu_starting(int cpu)
383{ 383{
384 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 384 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
385 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
385 struct amd_nb *nb; 386 struct amd_nb *nb;
386 int i, nb_id; 387 int i, nb_id;
387 388
@@ -399,7 +400,7 @@ static void amd_pmu_cpu_starting(int cpu)
399 continue; 400 continue;
400 401
401 if (nb->nb_id == nb_id) { 402 if (nb->nb_id == nb_id) {
402 cpuc->kfree_on_online = cpuc->amd_nb; 403 *onln = cpuc->amd_nb;
403 cpuc->amd_nb = nb; 404 cpuc->amd_nb = nb;
404 break; 405 break;
405 } 406 }
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index e85988e2ecc7..c0ed5a4b9537 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2251,12 +2251,14 @@ static void intel_pmu_cpu_starting(int cpu)
2251 return; 2251 return;
2252 2252
2253 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 2253 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
2254 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
2255
2254 for_each_cpu(i, topology_thread_cpumask(cpu)) { 2256 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2255 struct intel_shared_regs *pc; 2257 struct intel_shared_regs *pc;
2256 2258
2257 pc = per_cpu(cpu_hw_events, i).shared_regs; 2259 pc = per_cpu(cpu_hw_events, i).shared_regs;
2258 if (pc && pc->core_id == core_id) { 2260 if (pc && pc->core_id == core_id) {
2259 cpuc->kfree_on_online = cpuc->shared_regs; 2261 *onln = cpuc->shared_regs;
2260 cpuc->shared_regs = pc; 2262 cpuc->shared_regs = pc;
2261 break; 2263 break;
2262 } 2264 }