diff options
author | Stephen Boyd <sboyd@codeaurora.org> | 2014-09-11 18:25:30 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-09-16 11:09:33 -0400 |
commit | 505013bc9065391f09a51d51cd3bf0b06dfb570a (patch) | |
tree | 8f3434796acf5dc39e1c982d2d817993c7ee4f22 /arch | |
parent | fbfb872f5f417cea48760c535e0ff027c88b507a (diff) |
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts
Rob Clark reports a sleeping while atomic bug when using perf.
BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583
in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0
------------[ cut here ]------------
WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8()
DEBUG_LOCKS_WARN_ON(in_interrupt())
Modules linked in:
CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819
[<c0216690>] (unwind_backtrace) from [<c0212174>] (show_stack+0x10/0x14)
[<c0212174>] (show_stack) from [<c0867cc0>] (dump_stack+0x98/0xb8)
[<c0867cc0>] (dump_stack) from [<c02492a4>] (warn_slowpath_common+0x70/0x8c)
[<c02492a4>] (warn_slowpath_common) from [<c02492f0>] (warn_slowpath_fmt+0x30/0x40)
[<c02492f0>] (warn_slowpath_fmt) from [<c086a3f8>] (mutex_lock_nested+0x3a0/0x3e8)
[<c086a3f8>] (mutex_lock_nested) from [<c0294d08>] (irq_find_host+0x20/0x9c)
[<c0294d08>] (irq_find_host) from [<c0769d50>] (of_irq_get+0x28/0x48)
[<c0769d50>] (of_irq_get) from [<c057d104>] (platform_get_irq+0x1c/0x8c)
[<c057d104>] (platform_get_irq) from [<c021a06c>] (cpu_pmu_enable_percpu_irq+0x14/0x38)
[<c021a06c>] (cpu_pmu_enable_percpu_irq) from [<c02b1634>] (flush_smp_call_function_queue+0x88/0x178)
[<c02b1634>] (flush_smp_call_function_queue) from [<c0214dc0>] (handle_IPI+0x88/0x160)
[<c0214dc0>] (handle_IPI) from [<c0208930>] (gic_handle_irq+0x64/0x68)
[<c0208930>] (gic_handle_irq) from [<c0212d04>] (__irq_svc+0x44/0x5c)
Exception stack(0xe63ddea0 to 0xe63ddee8)
dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013
dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30
dee0: 20010013 ffffffff
[<c0212d04>] (__irq_svc) from [<c02a2e30>] (ktime_get_ts64+0x1c8/0x200)
[<c02a2e30>] (ktime_get_ts64) from [<c032d4a0>] (poll_select_set_timeout+0x60/0xa8)
[<c032d4a0>] (poll_select_set_timeout) from [<c032df64>] (SyS_select+0xa8/0x118)
[<c032df64>] (SyS_select) from [<c020e8e0>] (ret_fast_syscall+0x0/0x48)
---[ end trace 0bb583b46342da6f ]---
INFO: lockdep is turned off.
We don't really need to get the platform irq again when we're
enabling or disabling the per-cpu irq. Furthermore, we don't
really need to set and clear bits in the active_irqs bitmask
because that's only used in the non-percpu irq case to figure out
when the last CPU PMU has been disabled. Just pass the irq
directly to the enable/disable functions to clean all this up.
This should be slightly more efficient and also fix the
scheduling while atomic bug.
Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU"
Reported-by: Rob Clark <robdclark@gmail.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edbec613..4bf4cce759fe 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) | |||
76 | 76 | ||
77 | static void cpu_pmu_enable_percpu_irq(void *data) | 77 | static void cpu_pmu_enable_percpu_irq(void *data) |
78 | { | 78 | { |
79 | struct arm_pmu *cpu_pmu = data; | 79 | int irq = *(int *)data; |
80 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
81 | int irq = platform_get_irq(pmu_device, 0); | ||
82 | 80 | ||
83 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | 81 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
84 | cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); | ||
85 | } | 82 | } |
86 | 83 | ||
87 | static void cpu_pmu_disable_percpu_irq(void *data) | 84 | static void cpu_pmu_disable_percpu_irq(void *data) |
88 | { | 85 | { |
89 | struct arm_pmu *cpu_pmu = data; | 86 | int irq = *(int *)data; |
90 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
91 | int irq = platform_get_irq(pmu_device, 0); | ||
92 | 87 | ||
93 | cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); | ||
94 | disable_percpu_irq(irq); | 88 | disable_percpu_irq(irq); |
95 | } | 89 | } |
96 | 90 | ||
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) | |||
103 | 97 | ||
104 | irq = platform_get_irq(pmu_device, 0); | 98 | irq = platform_get_irq(pmu_device, 0); |
105 | if (irq >= 0 && irq_is_percpu(irq)) { | 99 | if (irq >= 0 && irq_is_percpu(irq)) { |
106 | on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); | 100 | on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); |
107 | free_percpu_irq(irq, &percpu_pmu); | 101 | free_percpu_irq(irq, &percpu_pmu); |
108 | } else { | 102 | } else { |
109 | for (i = 0; i < irqs; ++i) { | 103 | for (i = 0; i < irqs; ++i) { |
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
138 | irq); | 132 | irq); |
139 | return err; | 133 | return err; |
140 | } | 134 | } |
141 | on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); | 135 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); |
142 | } else { | 136 | } else { |
143 | for (i = 0; i < irqs; ++i) { | 137 | for (i = 0; i < irqs; ++i) { |
144 | err = 0; | 138 | err = 0; |