aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-07-22 07:41:54 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 05:53:02 -0400
commit7fdba1ca10462f42ad2246b918fe6368f5ce488e (patch)
tree6cdb2d8c57da4dde9df3aa22bd4839151884dba6
parentbdc2209fd246820de2816691ac0e82600885e1a8 (diff)
perf, x86: Avoid kfree() in CPU_STARTING
On -rt kfree() can schedule, but CPU_STARTING is before the CPU is fully up and running. These are contradictory, so avoid it. Instead push the kfree() to CPU_ONLINE where we're free to schedule. Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-kwd4j6ayld5thrscvaxgjquv@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_event.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
3 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4ee3abf20ed6..594d42513e60 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -129,6 +129,8 @@ struct cpu_hw_events {
129 * AMD specific bits 129 * AMD specific bits
130 */ 130 */
131 struct amd_nb *amd_nb; 131 struct amd_nb *amd_nb;
132
133 void *kfree_on_online;
132}; 134};
133 135
134#define __EVENT_CONSTRAINT(c, n, m, w) {\ 136#define __EVENT_CONSTRAINT(c, n, m, w) {\
@@ -1466,10 +1468,12 @@ static int __cpuinit
1466x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1468x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1467{ 1469{
1468 unsigned int cpu = (long)hcpu; 1470 unsigned int cpu = (long)hcpu;
1471 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1469 int ret = NOTIFY_OK; 1472 int ret = NOTIFY_OK;
1470 1473
1471 switch (action & ~CPU_TASKS_FROZEN) { 1474 switch (action & ~CPU_TASKS_FROZEN) {
1472 case CPU_UP_PREPARE: 1475 case CPU_UP_PREPARE:
1476 cpuc->kfree_on_online = NULL;
1473 if (x86_pmu.cpu_prepare) 1477 if (x86_pmu.cpu_prepare)
1474 ret = x86_pmu.cpu_prepare(cpu); 1478 ret = x86_pmu.cpu_prepare(cpu);
1475 break; 1479 break;
@@ -1479,6 +1483,10 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1479 x86_pmu.cpu_starting(cpu); 1483 x86_pmu.cpu_starting(cpu);
1480 break; 1484 break;
1481 1485
1486 case CPU_ONLINE:
1487 kfree(cpuc->kfree_on_online);
1488 break;
1489
1482 case CPU_DYING: 1490 case CPU_DYING:
1483 if (x86_pmu.cpu_dying) 1491 if (x86_pmu.cpu_dying)
1484 x86_pmu.cpu_dying(cpu); 1492 x86_pmu.cpu_dying(cpu);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 941caa2e449b..ee9436c3e5d6 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -350,7 +350,7 @@ static void amd_pmu_cpu_starting(int cpu)
350 continue; 350 continue;
351 351
352 if (nb->nb_id == nb_id) { 352 if (nb->nb_id == nb_id) {
353 kfree(cpuc->amd_nb); 353 cpuc->kfree_on_online = cpuc->amd_nb;
354 cpuc->amd_nb = nb; 354 cpuc->amd_nb = nb;
355 break; 355 break;
356 } 356 }
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f88af2c2a561..3751494e70f5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1362,7 +1362,7 @@ static void intel_pmu_cpu_starting(int cpu)
1362 1362
1363 pc = per_cpu(cpu_hw_events, i).shared_regs; 1363 pc = per_cpu(cpu_hw_events, i).shared_regs;
1364 if (pc && pc->core_id == core_id) { 1364 if (pc && pc->core_id == core_id) {
1365 kfree(cpuc->shared_regs); 1365 cpuc->kfree_on_online = cpuc->shared_regs;
1366 cpuc->shared_regs = pc; 1366 cpuc->shared_regs = pc;
1367 break; 1367 break;
1368 } 1368 }