diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-10 05:10:18 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-10 05:53:13 -0400 |
commit | ac78884e6d89714d18b32b5b7d574116ecfb7c88 (patch) | |
tree | 4e2d9d3106f10f5ce7ae3c9469444cab8ec75024 /arch/arm/kernel/entry-armv.S | |
parent | d9e38040ccf9eb06b9b41c393c512ceb23f51a7f (diff) |
ARM: lockdep: fix unannotated irqs-on
CPU: Testing write buffer coherency: ok
------------[ cut here ]------------
WARNING: at kernel/lockdep.c:3145 check_flags+0xcc/0x1dc()
Modules linked in:
[<c0035120>] (unwind_backtrace+0x0/0xf8) from [<c0355374>] (dump_stack+0x20/0x24)
[<c0355374>] (dump_stack+0x20/0x24) from [<c0060c04>] (warn_slowpath_common+0x58/0x70)
[<c0060c04>] (warn_slowpath_common+0x58/0x70) from [<c0060c3c>] (warn_slowpath_null+0x20/0x24)
[<c0060c3c>] (warn_slowpath_null+0x20/0x24) from [<c008f224>] (check_flags+0xcc/0x1dc)
[<c008f224>] (check_flags+0xcc/0x1dc) from [<c00945dc>] (lock_acquire+0x50/0x140)
[<c00945dc>] (lock_acquire+0x50/0x140) from [<c0358434>] (_raw_spin_lock+0x50/0x88)
[<c0358434>] (_raw_spin_lock+0x50/0x88) from [<c00fd114>] (set_task_comm+0x2c/0x60)
[<c00fd114>] (set_task_comm+0x2c/0x60) from [<c007e184>] (kthreadd+0x30/0x108)
[<c007e184>] (kthreadd+0x30/0x108) from [<c0030104>] (kernel_thread_exit+0x0/0x8)
---[ end trace 1b75b31a2719ed1c ]---
possible reason: unannotated irqs-on.
irq event stamp: 3
hardirqs last enabled at (2): [<c0059bb0>] finish_task_switch+0x48/0xb0
hardirqs last disabled at (3): [<c002f0b0>] ret_slow_syscall+0xc/0x1c
softirqs last enabled at (0): [<c005f3e0>] copy_process+0x394/0xe5c
softirqs last disabled at (0): [<(null)>] (null)
Fix this by ensuring that the lockdep interrupt state is manipulated in
the appropriate places. We essentially treat userspace as an entirely
separate environment which isn't relevant to lockdep (lockdep doesn't
monitor userspace.) We don't tell lockdep that IRQs will be enabled
in that environment.
Instead, when creating kernel threads (which is a rare event compared
to entering/leaving userspace) we have to update the lockdep state. Do
this by starting threads with IRQs disabled, and in the kthread helper,
tell lockdep that IRQs are enabled, and enable them.
This provides lockdep with a consistent view of the current IRQ state
in kernel space.
This also revert portions of 0d928b0b616d1c5c5fe76019a87cba171ca91633
which didn't fix the problem.
Tested-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7ee48e7f8f31..3fd7861de4d1 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -162,8 +162,6 @@ ENDPROC(__und_invalid) | |||
162 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 162 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) |
163 | @ | 163 | @ |
164 | stmia r5, {r0 - r4} | 164 | stmia r5, {r0 - r4} |
165 | |||
166 | asm_trace_hardirqs_off | ||
167 | .endm | 165 | .endm |
168 | 166 | ||
169 | .align 5 | 167 | .align 5 |
@@ -204,7 +202,7 @@ __dabt_svc: | |||
204 | @ | 202 | @ |
205 | @ IRQs off again before pulling preserved data off the stack | 203 | @ IRQs off again before pulling preserved data off the stack |
206 | @ | 204 | @ |
207 | disable_irq | 205 | disable_irq_notrace |
208 | 206 | ||
209 | @ | 207 | @ |
210 | @ restore SPSR and restart the instruction | 208 | @ restore SPSR and restart the instruction |
@@ -218,6 +216,9 @@ ENDPROC(__dabt_svc) | |||
218 | __irq_svc: | 216 | __irq_svc: |
219 | svc_entry | 217 | svc_entry |
220 | 218 | ||
219 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
220 | bl trace_hardirqs_off | ||
221 | #endif | ||
221 | #ifdef CONFIG_PREEMPT | 222 | #ifdef CONFIG_PREEMPT |
222 | get_thread_info tsk | 223 | get_thread_info tsk |
223 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | 224 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
@@ -291,7 +292,7 @@ __und_svc: | |||
291 | @ | 292 | @ |
292 | @ IRQs off again before pulling preserved data off the stack | 293 | @ IRQs off again before pulling preserved data off the stack |
293 | @ | 294 | @ |
294 | 1: disable_irq | 295 | 1: disable_irq_notrace |
295 | 296 | ||
296 | @ | 297 | @ |
297 | @ restore SPSR and restart the instruction | 298 | @ restore SPSR and restart the instruction |
@@ -327,7 +328,7 @@ __pabt_svc: | |||
327 | @ | 328 | @ |
328 | @ IRQs off again before pulling preserved data off the stack | 329 | @ IRQs off again before pulling preserved data off the stack |
329 | @ | 330 | @ |
330 | disable_irq | 331 | disable_irq_notrace |
331 | 332 | ||
332 | @ | 333 | @ |
333 | @ restore SPSR and restart the instruction | 334 | @ restore SPSR and restart the instruction |
@@ -393,8 +394,6 @@ ENDPROC(__pabt_svc) | |||
393 | @ Clear FP to mark the first stack frame | 394 | @ Clear FP to mark the first stack frame |
394 | @ | 395 | @ |
395 | zero_fp | 396 | zero_fp |
396 | |||
397 | asm_trace_hardirqs_off | ||
398 | .endm | 397 | .endm |
399 | 398 | ||
400 | .macro kuser_cmpxchg_check | 399 | .macro kuser_cmpxchg_check |
@@ -465,9 +464,6 @@ __irq_usr: | |||
465 | THUMB( movne r0, #0 ) | 464 | THUMB( movne r0, #0 ) |
466 | THUMB( strne r0, [r0] ) | 465 | THUMB( strne r0, [r0] ) |
467 | #endif | 466 | #endif |
468 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
469 | bl trace_hardirqs_on | ||
470 | #endif | ||
471 | 467 | ||
472 | mov why, #0 | 468 | mov why, #0 |
473 | b ret_to_user | 469 | b ret_to_user |