diff options
-rw-r--r-- | MAINTAINERS | 9 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 12 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 9 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 4 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 23 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes_32.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes_64.c | 7 | ||||
-rw-r--r-- | arch/x86/lib/thunk_64.S | 4 | ||||
-rw-r--r-- | fs/inode.c | 24 | ||||
-rw-r--r-- | fs/jbd/transaction.c | 9 | ||||
-rw-r--r-- | include/asm-x86/irqflags_32.h | 13 | ||||
-rw-r--r-- | include/asm-x86/irqflags_64.h | 14 | ||||
-rw-r--r-- | include/linux/fs.h | 6 | ||||
-rw-r--r-- | include/linux/jbd.h | 5 | ||||
-rw-r--r-- | include/linux/lockdep.h | 9 | ||||
-rw-r--r-- | include/linux/mutex.h | 9 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 14 | ||||
-rw-r--r-- | kernel/lockdep.c | 26 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 61 | ||||
-rw-r--r-- | kernel/mutex.c | 35 | ||||
-rw-r--r-- | kernel/rcupdate.c | 8 |
21 files changed, 251 insertions, 57 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 12cee3da2625..c7355e7f09ff 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2404,6 +2404,15 @@ M: khali@linux-fr.org | |||
2404 | L: lm-sensors@lm-sensors.org | 2404 | L: lm-sensors@lm-sensors.org |
2405 | S: Maintained | 2405 | S: Maintained |
2406 | 2406 | ||
2407 | LOCKDEP AND LOCKSTAT | ||
2408 | P: Peter Zijlstra | ||
2409 | M: peterz@infradead.org | ||
2410 | P: Ingo Molnar | ||
2411 | M: mingo@redhat.com | ||
2412 | L: linux-kernel@vger.kernel.org | ||
2413 | T: git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git | ||
2414 | S: Maintained | ||
2415 | |||
2407 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) | 2416 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) |
2408 | P: Richard Russon (FlatCap) | 2417 | P: Richard Russon (FlatCap) |
2409 | M: ldm@flatcap.org | 2418 | M: ldm@flatcap.org |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f3bceb165321..139ca153d5cc 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -68,9 +68,15 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
68 | l %r1,BASED(.Ltrace_irq_off) | 68 | l %r1,BASED(.Ltrace_irq_off) |
69 | basr %r14,%r1 | 69 | basr %r14,%r1 |
70 | .endm | 70 | .endm |
71 | |||
72 | .macro LOCKDEP_SYS_EXIT | ||
73 | l %r1,BASED(.Llockdep_sys_exit) | ||
74 | basr %r14,%r1 | ||
75 | .endm | ||
71 | #else | 76 | #else |
72 | #define TRACE_IRQS_ON | 77 | #define TRACE_IRQS_ON |
73 | #define TRACE_IRQS_OFF | 78 | #define TRACE_IRQS_OFF |
79 | #define LOCKDEP_SYS_EXIT | ||
74 | #endif | 80 | #endif |
75 | 81 | ||
76 | /* | 82 | /* |
@@ -260,6 +266,7 @@ sysc_return: | |||
260 | bno BASED(sysc_leave) | 266 | bno BASED(sysc_leave) |
261 | tm __TI_flags+3(%r9),_TIF_WORK_SVC | 267 | tm __TI_flags+3(%r9),_TIF_WORK_SVC |
262 | bnz BASED(sysc_work) # there is work to do (signals etc.) | 268 | bnz BASED(sysc_work) # there is work to do (signals etc.) |
269 | LOCKDEP_SYS_EXIT | ||
263 | sysc_leave: | 270 | sysc_leave: |
264 | RESTORE_ALL __LC_RETURN_PSW,1 | 271 | RESTORE_ALL __LC_RETURN_PSW,1 |
265 | 272 | ||
@@ -283,6 +290,7 @@ sysc_work: | |||
283 | bo BASED(sysc_restart) | 290 | bo BASED(sysc_restart) |
284 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP | 291 | tm __TI_flags+3(%r9),_TIF_SINGLE_STEP |
285 | bo BASED(sysc_singlestep) | 292 | bo BASED(sysc_singlestep) |
293 | LOCKDEP_SYS_EXIT | ||
286 | b BASED(sysc_leave) | 294 | b BASED(sysc_leave) |
287 | 295 | ||
288 | # | 296 | # |
@@ -572,6 +580,7 @@ io_return: | |||
572 | #endif | 580 | #endif |
573 | tm __TI_flags+3(%r9),_TIF_WORK_INT | 581 | tm __TI_flags+3(%r9),_TIF_WORK_INT |
574 | bnz BASED(io_work) # there is work to do (signals etc.) | 582 | bnz BASED(io_work) # there is work to do (signals etc.) |
583 | LOCKDEP_SYS_EXIT | ||
575 | io_leave: | 584 | io_leave: |
576 | RESTORE_ALL __LC_RETURN_PSW,0 | 585 | RESTORE_ALL __LC_RETURN_PSW,0 |
577 | io_done: | 586 | io_done: |
@@ -618,6 +627,7 @@ io_work_loop: | |||
618 | bo BASED(io_reschedule) | 627 | bo BASED(io_reschedule) |
619 | tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) | 628 | tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) |
620 | bnz BASED(io_sigpending) | 629 | bnz BASED(io_sigpending) |
630 | LOCKDEP_SYS_EXIT | ||
621 | b BASED(io_leave) | 631 | b BASED(io_leave) |
622 | 632 | ||
623 | # | 633 | # |
@@ -1040,6 +1050,8 @@ cleanup_io_leave_insn: | |||
1040 | .Ltrace_irq_on: .long trace_hardirqs_on | 1050 | .Ltrace_irq_on: .long trace_hardirqs_on |
1041 | .Ltrace_irq_off: | 1051 | .Ltrace_irq_off: |
1042 | .long trace_hardirqs_off | 1052 | .long trace_hardirqs_off |
1053 | .Llockdep_sys_exit: | ||
1054 | .long lockdep_sys_exit | ||
1043 | #endif | 1055 | #endif |
1044 | .Lcritical_start: | 1056 | .Lcritical_start: |
1045 | .long __critical_start + 0x80000000 | 1057 | .long __critical_start + 0x80000000 |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c0d5cc8269d..05e26d1fdf40 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -66,9 +66,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ | |||
66 | .macro TRACE_IRQS_OFF | 66 | .macro TRACE_IRQS_OFF |
67 | brasl %r14,trace_hardirqs_off | 67 | brasl %r14,trace_hardirqs_off |
68 | .endm | 68 | .endm |
69 | |||
70 | .macro LOCKDEP_SYS_EXIT | ||
71 | brasl %r14,lockdep_sys_exit | ||
72 | .endm | ||
69 | #else | 73 | #else |
70 | #define TRACE_IRQS_ON | 74 | #define TRACE_IRQS_ON |
71 | #define TRACE_IRQS_OFF | 75 | #define TRACE_IRQS_OFF |
76 | #define LOCKDEP_SYS_EXIT | ||
72 | #endif | 77 | #endif |
73 | 78 | ||
74 | .macro STORE_TIMER lc_offset | 79 | .macro STORE_TIMER lc_offset |
@@ -255,6 +260,7 @@ sysc_return: | |||
255 | jno sysc_leave | 260 | jno sysc_leave |
256 | tm __TI_flags+7(%r9),_TIF_WORK_SVC | 261 | tm __TI_flags+7(%r9),_TIF_WORK_SVC |
257 | jnz sysc_work # there is work to do (signals etc.) | 262 | jnz sysc_work # there is work to do (signals etc.) |
263 | LOCKDEP_SYS_EXIT | ||
258 | sysc_leave: | 264 | sysc_leave: |
259 | RESTORE_ALL __LC_RETURN_PSW,1 | 265 | RESTORE_ALL __LC_RETURN_PSW,1 |
260 | 266 | ||
@@ -278,6 +284,7 @@ sysc_work: | |||
278 | jo sysc_restart | 284 | jo sysc_restart |
279 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP | 285 | tm __TI_flags+7(%r9),_TIF_SINGLE_STEP |
280 | jo sysc_singlestep | 286 | jo sysc_singlestep |
287 | LOCKDEP_SYS_EXIT | ||
281 | j sysc_leave | 288 | j sysc_leave |
282 | 289 | ||
283 | # | 290 | # |
@@ -558,6 +565,7 @@ io_return: | |||
558 | #endif | 565 | #endif |
559 | tm __TI_flags+7(%r9),_TIF_WORK_INT | 566 | tm __TI_flags+7(%r9),_TIF_WORK_INT |
560 | jnz io_work # there is work to do (signals etc.) | 567 | jnz io_work # there is work to do (signals etc.) |
568 | LOCKDEP_SYS_EXIT | ||
561 | io_leave: | 569 | io_leave: |
562 | RESTORE_ALL __LC_RETURN_PSW,0 | 570 | RESTORE_ALL __LC_RETURN_PSW,0 |
563 | io_done: | 571 | io_done: |
@@ -605,6 +613,7 @@ io_work_loop: | |||
605 | jo io_reschedule | 613 | jo io_reschedule |
606 | tm __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) | 614 | tm __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) |
607 | jnz io_sigpending | 615 | jnz io_sigpending |
616 | LOCKDEP_SYS_EXIT | ||
608 | j io_leave | 617 | j io_leave |
609 | 618 | ||
610 | # | 619 | # |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 290b7bc82da3..8099fea0a72f 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -251,6 +251,7 @@ check_userspace: | |||
251 | jb resume_kernel # not returning to v8086 or userspace | 251 | jb resume_kernel # not returning to v8086 or userspace |
252 | 252 | ||
253 | ENTRY(resume_userspace) | 253 | ENTRY(resume_userspace) |
254 | LOCKDEP_SYS_EXIT | ||
254 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 255 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
255 | # setting need_resched or sigpending | 256 | # setting need_resched or sigpending |
256 | # between sampling and the iret | 257 | # between sampling and the iret |
@@ -338,6 +339,7 @@ sysenter_past_esp: | |||
338 | jae syscall_badsys | 339 | jae syscall_badsys |
339 | call *sys_call_table(,%eax,4) | 340 | call *sys_call_table(,%eax,4) |
340 | movl %eax,PT_EAX(%esp) | 341 | movl %eax,PT_EAX(%esp) |
342 | LOCKDEP_SYS_EXIT | ||
341 | DISABLE_INTERRUPTS(CLBR_ANY) | 343 | DISABLE_INTERRUPTS(CLBR_ANY) |
342 | TRACE_IRQS_OFF | 344 | TRACE_IRQS_OFF |
343 | movl TI_flags(%ebp), %ecx | 345 | movl TI_flags(%ebp), %ecx |
@@ -377,6 +379,7 @@ syscall_call: | |||
377 | call *sys_call_table(,%eax,4) | 379 | call *sys_call_table(,%eax,4) |
378 | movl %eax,PT_EAX(%esp) # store the return value | 380 | movl %eax,PT_EAX(%esp) # store the return value |
379 | syscall_exit: | 381 | syscall_exit: |
382 | LOCKDEP_SYS_EXIT | ||
380 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 383 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
381 | # setting need_resched or sigpending | 384 | # setting need_resched or sigpending |
382 | # between sampling and the iret | 385 | # between sampling and the iret |
@@ -467,6 +470,7 @@ work_pending: | |||
467 | jz work_notifysig | 470 | jz work_notifysig |
468 | work_resched: | 471 | work_resched: |
469 | call schedule | 472 | call schedule |
473 | LOCKDEP_SYS_EXIT | ||
470 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 474 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
471 | # setting need_resched or sigpending | 475 | # setting need_resched or sigpending |
472 | # between sampling and the iret | 476 | # between sampling and the iret |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d232e5f5658..f1cacd4897f7 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -244,6 +244,7 @@ ret_from_sys_call: | |||
244 | movl $_TIF_ALLWORK_MASK,%edi | 244 | movl $_TIF_ALLWORK_MASK,%edi |
245 | /* edi: flagmask */ | 245 | /* edi: flagmask */ |
246 | sysret_check: | 246 | sysret_check: |
247 | LOCKDEP_SYS_EXIT | ||
247 | GET_THREAD_INFO(%rcx) | 248 | GET_THREAD_INFO(%rcx) |
248 | cli | 249 | cli |
249 | TRACE_IRQS_OFF | 250 | TRACE_IRQS_OFF |
@@ -333,6 +334,7 @@ int_ret_from_sys_call: | |||
333 | movl $_TIF_ALLWORK_MASK,%edi | 334 | movl $_TIF_ALLWORK_MASK,%edi |
334 | /* edi: mask to check */ | 335 | /* edi: mask to check */ |
335 | int_with_check: | 336 | int_with_check: |
337 | LOCKDEP_SYS_EXIT_IRQ | ||
336 | GET_THREAD_INFO(%rcx) | 338 | GET_THREAD_INFO(%rcx) |
337 | movl threadinfo_flags(%rcx),%edx | 339 | movl threadinfo_flags(%rcx),%edx |
338 | andl %edi,%edx | 340 | andl %edi,%edx |
@@ -544,11 +546,13 @@ exit_intr: | |||
544 | retint_with_reschedule: | 546 | retint_with_reschedule: |
545 | movl $_TIF_WORK_MASK,%edi | 547 | movl $_TIF_WORK_MASK,%edi |
546 | retint_check: | 548 | retint_check: |
549 | LOCKDEP_SYS_EXIT_IRQ | ||
547 | movl threadinfo_flags(%rcx),%edx | 550 | movl threadinfo_flags(%rcx),%edx |
548 | andl %edi,%edx | 551 | andl %edi,%edx |
549 | CFI_REMEMBER_STATE | 552 | CFI_REMEMBER_STATE |
550 | jnz retint_careful | 553 | jnz retint_careful |
551 | retint_swapgs: | 554 | |
555 | retint_swapgs: /* return to user-space */ | ||
552 | /* | 556 | /* |
553 | * The iretq could re-enable interrupts: | 557 | * The iretq could re-enable interrupts: |
554 | */ | 558 | */ |
@@ -557,7 +561,7 @@ retint_swapgs: | |||
557 | swapgs | 561 | swapgs |
558 | jmp restore_args | 562 | jmp restore_args |
559 | 563 | ||
560 | retint_restore_args: | 564 | retint_restore_args: /* return to kernel space */ |
561 | cli | 565 | cli |
562 | /* | 566 | /* |
563 | * The iretq could re-enable interrupts: | 567 | * The iretq could re-enable interrupts: |
@@ -866,26 +870,21 @@ error_sti: | |||
866 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | 870 | movq ORIG_RAX(%rsp),%rsi /* get error code */ |
867 | movq $-1,ORIG_RAX(%rsp) | 871 | movq $-1,ORIG_RAX(%rsp) |
868 | call *%rax | 872 | call *%rax |
869 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | 873 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
870 | error_exit: | 874 | error_exit: |
871 | movl %ebx,%eax | 875 | movl %ebx,%eax |
872 | RESTORE_REST | 876 | RESTORE_REST |
873 | cli | 877 | cli |
874 | TRACE_IRQS_OFF | 878 | TRACE_IRQS_OFF |
875 | GET_THREAD_INFO(%rcx) | 879 | GET_THREAD_INFO(%rcx) |
876 | testl %eax,%eax | 880 | testl %eax,%eax |
877 | jne retint_kernel | 881 | jne retint_kernel |
882 | LOCKDEP_SYS_EXIT_IRQ | ||
878 | movl threadinfo_flags(%rcx),%edx | 883 | movl threadinfo_flags(%rcx),%edx |
879 | movl $_TIF_WORK_MASK,%edi | 884 | movl $_TIF_WORK_MASK,%edi |
880 | andl %edi,%edx | 885 | andl %edi,%edx |
881 | jnz retint_careful | 886 | jnz retint_careful |
882 | /* | 887 | jmp retint_swapgs |
883 | * The iret might restore flags: | ||
884 | */ | ||
885 | TRACE_IRQS_IRETQ | ||
886 | swapgs | ||
887 | RESTORE_ARGS 0,8,0 | ||
888 | jmp iret_label | ||
889 | CFI_ENDPROC | 888 | CFI_ENDPROC |
890 | 889 | ||
891 | error_kernelspace: | 890 | error_kernelspace: |
diff --git a/arch/x86/kernel/kprobes_32.c b/arch/x86/kernel/kprobes_32.c index c2d03e96ae9f..e7d0d3c2ef64 100644 --- a/arch/x86/kernel/kprobes_32.c +++ b/arch/x86/kernel/kprobes_32.c | |||
@@ -557,6 +557,12 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
557 | 557 | ||
558 | resume_execution(cur, regs, kcb); | 558 | resume_execution(cur, regs, kcb); |
559 | regs->eflags |= kcb->kprobe_saved_eflags; | 559 | regs->eflags |= kcb->kprobe_saved_eflags; |
560 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | ||
561 | if (raw_irqs_disabled_flags(regs->eflags)) | ||
562 | trace_hardirqs_off(); | ||
563 | else | ||
564 | trace_hardirqs_on(); | ||
565 | #endif | ||
560 | 566 | ||
561 | /*Restore back the original saved kprobes variables and continue. */ | 567 | /*Restore back the original saved kprobes variables and continue. */ |
562 | if (kcb->kprobe_status == KPROBE_REENTER) { | 568 | if (kcb->kprobe_status == KPROBE_REENTER) { |
@@ -694,6 +700,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
694 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, | 700 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
695 | MIN_STACK_SIZE(addr)); | 701 | MIN_STACK_SIZE(addr)); |
696 | regs->eflags &= ~IF_MASK; | 702 | regs->eflags &= ~IF_MASK; |
703 | trace_hardirqs_off(); | ||
697 | regs->eip = (unsigned long)(jp->entry); | 704 | regs->eip = (unsigned long)(jp->entry); |
698 | return 1; | 705 | return 1; |
699 | } | 706 | } |
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c index 1df17a0ec0c9..62e28e52d784 100644 --- a/arch/x86/kernel/kprobes_64.c +++ b/arch/x86/kernel/kprobes_64.c | |||
@@ -544,6 +544,12 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
544 | 544 | ||
545 | resume_execution(cur, regs, kcb); | 545 | resume_execution(cur, regs, kcb); |
546 | regs->eflags |= kcb->kprobe_saved_rflags; | 546 | regs->eflags |= kcb->kprobe_saved_rflags; |
547 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | ||
548 | if (raw_irqs_disabled_flags(regs->eflags)) | ||
549 | trace_hardirqs_off(); | ||
550 | else | ||
551 | trace_hardirqs_on(); | ||
552 | #endif | ||
547 | 553 | ||
548 | /* Restore the original saved kprobes variables and continue. */ | 554 | /* Restore the original saved kprobes variables and continue. */ |
549 | if (kcb->kprobe_status == KPROBE_REENTER) { | 555 | if (kcb->kprobe_status == KPROBE_REENTER) { |
@@ -684,6 +690,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
684 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, | 690 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
685 | MIN_STACK_SIZE(addr)); | 691 | MIN_STACK_SIZE(addr)); |
686 | regs->eflags &= ~IF_MASK; | 692 | regs->eflags &= ~IF_MASK; |
693 | trace_hardirqs_off(); | ||
687 | regs->rip = (unsigned long)(jp->entry); | 694 | regs->rip = (unsigned long)(jp->entry); |
688 | return 1; | 695 | return 1; |
689 | } | 696 | } |
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index 55e586d352d3..6ea73f3de567 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S | |||
@@ -50,6 +50,10 @@ | |||
50 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on | 50 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on |
51 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off | 51 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off |
52 | #endif | 52 | #endif |
53 | |||
54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
55 | thunk lockdep_sys_exit_thunk,lockdep_sys_exit | ||
56 | #endif | ||
53 | 57 | ||
54 | /* SAVE_ARGS below is used only for the .cfi directives it contains. */ | 58 | /* SAVE_ARGS below is used only for the .cfi directives it contains. */ |
55 | CFI_STARTPROC | 59 | CFI_STARTPROC |
diff --git a/fs/inode.c b/fs/inode.c index 29f5068f819b..f97de0aeb3b6 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -142,6 +142,15 @@ static struct inode *alloc_inode(struct super_block *sb) | |||
142 | return NULL; | 142 | return NULL; |
143 | } | 143 | } |
144 | 144 | ||
145 | spin_lock_init(&inode->i_lock); | ||
146 | lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); | ||
147 | |||
148 | mutex_init(&inode->i_mutex); | ||
149 | lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); | ||
150 | |||
151 | init_rwsem(&inode->i_alloc_sem); | ||
152 | lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); | ||
153 | |||
145 | mapping->a_ops = &empty_aops; | 154 | mapping->a_ops = &empty_aops; |
146 | mapping->host = inode; | 155 | mapping->host = inode; |
147 | mapping->flags = 0; | 156 | mapping->flags = 0; |
@@ -190,8 +199,6 @@ void inode_init_once(struct inode *inode) | |||
190 | INIT_HLIST_NODE(&inode->i_hash); | 199 | INIT_HLIST_NODE(&inode->i_hash); |
191 | INIT_LIST_HEAD(&inode->i_dentry); | 200 | INIT_LIST_HEAD(&inode->i_dentry); |
192 | INIT_LIST_HEAD(&inode->i_devices); | 201 | INIT_LIST_HEAD(&inode->i_devices); |
193 | mutex_init(&inode->i_mutex); | ||
194 | init_rwsem(&inode->i_alloc_sem); | ||
195 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); | 202 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); |
196 | rwlock_init(&inode->i_data.tree_lock); | 203 | rwlock_init(&inode->i_data.tree_lock); |
197 | spin_lock_init(&inode->i_data.i_mmap_lock); | 204 | spin_lock_init(&inode->i_data.i_mmap_lock); |
@@ -199,7 +206,6 @@ void inode_init_once(struct inode *inode) | |||
199 | spin_lock_init(&inode->i_data.private_lock); | 206 | spin_lock_init(&inode->i_data.private_lock); |
200 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); | 207 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); |
201 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); | 208 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); |
202 | spin_lock_init(&inode->i_lock); | ||
203 | i_size_ordered_init(inode); | 209 | i_size_ordered_init(inode); |
204 | #ifdef CONFIG_INOTIFY | 210 | #ifdef CONFIG_INOTIFY |
205 | INIT_LIST_HEAD(&inode->inotify_watches); | 211 | INIT_LIST_HEAD(&inode->inotify_watches); |
@@ -561,6 +567,18 @@ EXPORT_SYMBOL(new_inode); | |||
561 | 567 | ||
562 | void unlock_new_inode(struct inode *inode) | 568 | void unlock_new_inode(struct inode *inode) |
563 | { | 569 | { |
570 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
571 | struct file_system_type *type = inode->i_sb->s_type; | ||
572 | /* | ||
573 | * ensure nobody is actually holding i_mutex | ||
574 | */ | ||
575 | mutex_destroy(&inode->i_mutex); | ||
576 | mutex_init(&inode->i_mutex); | ||
577 | if (inode->i_mode & S_IFDIR) | ||
578 | lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key); | ||
579 | else | ||
580 | lockdep_set_class(&inode->i_mutex, &type->i_mutex_key); | ||
581 | #endif | ||
564 | /* | 582 | /* |
565 | * This is special! We do not need the spinlock | 583 | * This is special! We do not need the spinlock |
566 | * when clearing I_LOCK, because we're guaranteed | 584 | * when clearing I_LOCK, because we're guaranteed |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 772b6531a2a2..8df5bac0b7a5 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -233,6 +233,8 @@ out: | |||
233 | return ret; | 233 | return ret; |
234 | } | 234 | } |
235 | 235 | ||
236 | static struct lock_class_key jbd_handle_key; | ||
237 | |||
236 | /* Allocate a new handle. This should probably be in a slab... */ | 238 | /* Allocate a new handle. This should probably be in a slab... */ |
237 | static handle_t *new_handle(int nblocks) | 239 | static handle_t *new_handle(int nblocks) |
238 | { | 240 | { |
@@ -243,6 +245,8 @@ static handle_t *new_handle(int nblocks) | |||
243 | handle->h_buffer_credits = nblocks; | 245 | handle->h_buffer_credits = nblocks; |
244 | handle->h_ref = 1; | 246 | handle->h_ref = 1; |
245 | 247 | ||
248 | lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0); | ||
249 | |||
246 | return handle; | 250 | return handle; |
247 | } | 251 | } |
248 | 252 | ||
@@ -286,6 +290,9 @@ handle_t *journal_start(journal_t *journal, int nblocks) | |||
286 | current->journal_info = NULL; | 290 | current->journal_info = NULL; |
287 | handle = ERR_PTR(err); | 291 | handle = ERR_PTR(err); |
288 | } | 292 | } |
293 | |||
294 | lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
295 | |||
289 | return handle; | 296 | return handle; |
290 | } | 297 | } |
291 | 298 | ||
@@ -1411,6 +1418,8 @@ int journal_stop(handle_t *handle) | |||
1411 | spin_unlock(&journal->j_state_lock); | 1418 | spin_unlock(&journal->j_state_lock); |
1412 | } | 1419 | } |
1413 | 1420 | ||
1421 | lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); | ||
1422 | |||
1414 | jbd_free_handle(handle); | 1423 | jbd_free_handle(handle); |
1415 | return err; | 1424 | return err; |
1416 | } | 1425 | } |
diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h index eff8585cb741..d058b04e0083 100644 --- a/include/asm-x86/irqflags_32.h +++ b/include/asm-x86/irqflags_32.h | |||
@@ -160,4 +160,17 @@ static inline int raw_irqs_disabled(void) | |||
160 | # define TRACE_IRQS_OFF | 160 | # define TRACE_IRQS_OFF |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
164 | # define LOCKDEP_SYS_EXIT \ | ||
165 | pushl %eax; \ | ||
166 | pushl %ecx; \ | ||
167 | pushl %edx; \ | ||
168 | call lockdep_sys_exit; \ | ||
169 | popl %edx; \ | ||
170 | popl %ecx; \ | ||
171 | popl %eax; | ||
172 | #else | ||
173 | # define LOCKDEP_SYS_EXIT | ||
174 | #endif | ||
175 | |||
163 | #endif | 176 | #endif |
diff --git a/include/asm-x86/irqflags_64.h b/include/asm-x86/irqflags_64.h index 86e70fe23659..5341ea1f815a 100644 --- a/include/asm-x86/irqflags_64.h +++ b/include/asm-x86/irqflags_64.h | |||
@@ -137,6 +137,20 @@ static inline void halt(void) | |||
137 | # define TRACE_IRQS_ON | 137 | # define TRACE_IRQS_ON |
138 | # define TRACE_IRQS_OFF | 138 | # define TRACE_IRQS_OFF |
139 | # endif | 139 | # endif |
140 | # ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
141 | # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | ||
142 | # define LOCKDEP_SYS_EXIT_IRQ \ | ||
143 | TRACE_IRQS_ON; \ | ||
144 | sti; \ | ||
145 | SAVE_REST; \ | ||
146 | LOCKDEP_SYS_EXIT; \ | ||
147 | RESTORE_REST; \ | ||
148 | cli; \ | ||
149 | TRACE_IRQS_OFF; | ||
150 | # else | ||
151 | # define LOCKDEP_SYS_EXIT | ||
152 | # define LOCKDEP_SYS_EXIT_IRQ | ||
153 | # endif | ||
140 | #endif | 154 | #endif |
141 | 155 | ||
142 | #endif | 156 | #endif |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 16421f662a7a..6d760f1ad875 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1302,8 +1302,14 @@ struct file_system_type { | |||
1302 | struct module *owner; | 1302 | struct module *owner; |
1303 | struct file_system_type * next; | 1303 | struct file_system_type * next; |
1304 | struct list_head fs_supers; | 1304 | struct list_head fs_supers; |
1305 | |||
1305 | struct lock_class_key s_lock_key; | 1306 | struct lock_class_key s_lock_key; |
1306 | struct lock_class_key s_umount_key; | 1307 | struct lock_class_key s_umount_key; |
1308 | |||
1309 | struct lock_class_key i_lock_key; | ||
1310 | struct lock_class_key i_mutex_key; | ||
1311 | struct lock_class_key i_mutex_dir_key; | ||
1312 | struct lock_class_key i_alloc_sem_key; | ||
1307 | }; | 1313 | }; |
1308 | 1314 | ||
1309 | extern int get_sb_bdev(struct file_system_type *fs_type, | 1315 | extern int get_sb_bdev(struct file_system_type *fs_type, |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 452737551260..700a93b79189 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/bit_spinlock.h> | 30 | #include <linux/bit_spinlock.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
33 | #include <linux/lockdep.h> | ||
33 | 34 | ||
34 | #include <asm/semaphore.h> | 35 | #include <asm/semaphore.h> |
35 | #endif | 36 | #endif |
@@ -396,6 +397,10 @@ struct handle_s | |||
396 | unsigned int h_sync: 1; /* sync-on-close */ | 397 | unsigned int h_sync: 1; /* sync-on-close */ |
397 | unsigned int h_jdata: 1; /* force data journaling */ | 398 | unsigned int h_jdata: 1; /* force data journaling */ |
398 | unsigned int h_aborted: 1; /* fatal error on handle */ | 399 | unsigned int h_aborted: 1; /* fatal error on handle */ |
400 | |||
401 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
402 | struct lockdep_map h_lockdep_map; | ||
403 | #endif | ||
399 | }; | 404 | }; |
400 | 405 | ||
401 | 406 | ||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0e843bf65877..f6279f68a827 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -238,6 +238,7 @@ extern void lockdep_info(void); | |||
238 | extern void lockdep_reset(void); | 238 | extern void lockdep_reset(void); |
239 | extern void lockdep_reset_lock(struct lockdep_map *lock); | 239 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
240 | extern void lockdep_free_key_range(void *start, unsigned long size); | 240 | extern void lockdep_free_key_range(void *start, unsigned long size); |
241 | extern void lockdep_sys_exit(void); | ||
241 | 242 | ||
242 | extern void lockdep_off(void); | 243 | extern void lockdep_off(void); |
243 | extern void lockdep_on(void); | 244 | extern void lockdep_on(void); |
@@ -252,6 +253,13 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
252 | struct lock_class_key *key, int subclass); | 253 | struct lock_class_key *key, int subclass); |
253 | 254 | ||
254 | /* | 255 | /* |
256 | * To initialize a lockdep_map statically use this macro. | ||
257 | * Note that _name must not be NULL. | ||
258 | */ | ||
259 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | ||
260 | { .name = (_name), .key = (void *)(_key), } | ||
261 | |||
262 | /* | ||
255 | * Reinitialize a lock key - for cases where there is special locking or | 263 | * Reinitialize a lock key - for cases where there is special locking or |
256 | * special initialization of locks so that the validator gets the scope | 264 | * special initialization of locks so that the validator gets the scope |
257 | * of dependencies wrong: they are either too broad (they need a class-split) | 265 | * of dependencies wrong: they are either too broad (they need a class-split) |
@@ -317,6 +325,7 @@ static inline void lockdep_on(void) | |||
317 | # define INIT_LOCKDEP | 325 | # define INIT_LOCKDEP |
318 | # define lockdep_reset() do { debug_locks = 1; } while (0) | 326 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
319 | # define lockdep_free_key_range(start, size) do { } while (0) | 327 | # define lockdep_free_key_range(start, size) do { } while (0) |
328 | # define lockdep_sys_exit() do { } while (0) | ||
320 | /* | 329 | /* |
321 | * The class key takes no space if lockdep is disabled: | 330 | * The class key takes no space if lockdep is disabled: |
322 | */ | 331 | */ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 0d50ea3df689..6a735c72f23f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -120,14 +120,17 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) | |||
120 | * See kernel/mutex.c for detailed documentation of these APIs. | 120 | * See kernel/mutex.c for detailed documentation of these APIs. |
121 | * Also see Documentation/mutex-design.txt. | 121 | * Also see Documentation/mutex-design.txt. |
122 | */ | 122 | */ |
123 | extern void fastcall mutex_lock(struct mutex *lock); | ||
124 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | ||
125 | |||
126 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 123 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
127 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 124 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
128 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, | 125 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
129 | unsigned int subclass); | 126 | unsigned int subclass); |
127 | |||
128 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) | ||
129 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) | ||
130 | #else | 130 | #else |
131 | extern void fastcall mutex_lock(struct mutex *lock); | ||
132 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | ||
133 | |||
131 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 134 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
132 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 135 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
133 | #endif | 136 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index fe17d7d750c2..76c1a530edc5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/percpu.h> | 41 | #include <linux/percpu.h> |
42 | #include <linux/cpumask.h> | 42 | #include <linux/cpumask.h> |
43 | #include <linux/seqlock.h> | 43 | #include <linux/seqlock.h> |
44 | #include <linux/lockdep.h> | ||
44 | 45 | ||
45 | /** | 46 | /** |
46 | * struct rcu_head - callback structure for use with RCU | 47 | * struct rcu_head - callback structure for use with RCU |
@@ -133,6 +134,15 @@ static inline void rcu_bh_qsctr_inc(int cpu) | |||
133 | extern int rcu_pending(int cpu); | 134 | extern int rcu_pending(int cpu); |
134 | extern int rcu_needs_cpu(int cpu); | 135 | extern int rcu_needs_cpu(int cpu); |
135 | 136 | ||
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
138 | extern struct lockdep_map rcu_lock_map; | ||
139 | # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
140 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
141 | #else | ||
142 | # define rcu_read_acquire() do { } while (0) | ||
143 | # define rcu_read_release() do { } while (0) | ||
144 | #endif | ||
145 | |||
136 | /** | 146 | /** |
137 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 147 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
138 | * | 148 | * |
@@ -166,6 +176,7 @@ extern int rcu_needs_cpu(int cpu); | |||
166 | do { \ | 176 | do { \ |
167 | preempt_disable(); \ | 177 | preempt_disable(); \ |
168 | __acquire(RCU); \ | 178 | __acquire(RCU); \ |
179 | rcu_read_acquire(); \ | ||
169 | } while(0) | 180 | } while(0) |
170 | 181 | ||
171 | /** | 182 | /** |
@@ -175,6 +186,7 @@ extern int rcu_needs_cpu(int cpu); | |||
175 | */ | 186 | */ |
176 | #define rcu_read_unlock() \ | 187 | #define rcu_read_unlock() \ |
177 | do { \ | 188 | do { \ |
189 | rcu_read_release(); \ | ||
178 | __release(RCU); \ | 190 | __release(RCU); \ |
179 | preempt_enable(); \ | 191 | preempt_enable(); \ |
180 | } while(0) | 192 | } while(0) |
@@ -204,6 +216,7 @@ extern int rcu_needs_cpu(int cpu); | |||
204 | do { \ | 216 | do { \ |
205 | local_bh_disable(); \ | 217 | local_bh_disable(); \ |
206 | __acquire(RCU_BH); \ | 218 | __acquire(RCU_BH); \ |
219 | rcu_read_acquire(); \ | ||
207 | } while(0) | 220 | } while(0) |
208 | 221 | ||
209 | /* | 222 | /* |
@@ -213,6 +226,7 @@ extern int rcu_needs_cpu(int cpu); | |||
213 | */ | 226 | */ |
214 | #define rcu_read_unlock_bh() \ | 227 | #define rcu_read_unlock_bh() \ |
215 | do { \ | 228 | do { \ |
229 | rcu_read_release(); \ | ||
216 | __release(RCU_BH); \ | 230 | __release(RCU_BH); \ |
217 | local_bh_enable(); \ | 231 | local_bh_enable(); \ |
218 | } while(0) | 232 | } while(0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 734da579ad13..a6f1ee9c92d9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1521,7 +1521,7 @@ cache_hit: | |||
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | 1523 | static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, |
1524 | struct held_lock *hlock, int chain_head) | 1524 | struct held_lock *hlock, int chain_head, u64 chain_key) |
1525 | { | 1525 | { |
1526 | /* | 1526 | /* |
1527 | * Trylock needs to maintain the stack of held locks, but it | 1527 | * Trylock needs to maintain the stack of held locks, but it |
@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | |||
1534 | * graph_lock for us) | 1534 | * graph_lock for us) |
1535 | */ | 1535 | */ |
1536 | if (!hlock->trylock && (hlock->check == 2) && | 1536 | if (!hlock->trylock && (hlock->check == 2) && |
1537 | lookup_chain_cache(curr->curr_chain_key, hlock->class)) { | 1537 | lookup_chain_cache(chain_key, hlock->class)) { |
1538 | /* | 1538 | /* |
1539 | * Check whether last held lock: | 1539 | * Check whether last held lock: |
1540 | * | 1540 | * |
@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | |||
1576 | #else | 1576 | #else |
1577 | static inline int validate_chain(struct task_struct *curr, | 1577 | static inline int validate_chain(struct task_struct *curr, |
1578 | struct lockdep_map *lock, struct held_lock *hlock, | 1578 | struct lockdep_map *lock, struct held_lock *hlock, |
1579 | int chain_head) | 1579 | int chain_head, u64 chain_key) |
1580 | { | 1580 | { |
1581 | return 1; | 1581 | return 1; |
1582 | } | 1582 | } |
@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2450 | chain_head = 1; | 2450 | chain_head = 1; |
2451 | } | 2451 | } |
2452 | chain_key = iterate_chain_key(chain_key, id); | 2452 | chain_key = iterate_chain_key(chain_key, id); |
2453 | curr->curr_chain_key = chain_key; | ||
2454 | 2453 | ||
2455 | if (!validate_chain(curr, lock, hlock, chain_head)) | 2454 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) |
2456 | return 0; | 2455 | return 0; |
2457 | 2456 | ||
2457 | curr->curr_chain_key = chain_key; | ||
2458 | curr->lockdep_depth++; | 2458 | curr->lockdep_depth++; |
2459 | check_chain_key(curr); | 2459 | check_chain_key(curr); |
2460 | #ifdef CONFIG_DEBUG_LOCKDEP | 2460 | #ifdef CONFIG_DEBUG_LOCKDEP |
@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task) | |||
3199 | } | 3199 | } |
3200 | 3200 | ||
3201 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 3201 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
3202 | |||
3203 | void lockdep_sys_exit(void) | ||
3204 | { | ||
3205 | struct task_struct *curr = current; | ||
3206 | |||
3207 | if (unlikely(curr->lockdep_depth)) { | ||
3208 | if (!debug_locks_off()) | ||
3209 | return; | ||
3210 | printk("\n================================================\n"); | ||
3211 | printk( "[ BUG: lock held when returning to user space! ]\n"); | ||
3212 | printk( "------------------------------------------------\n"); | ||
3213 | printk("%s/%d is leaving the kernel with locks still held!\n", | ||
3214 | curr->comm, curr->pid); | ||
3215 | lockdep_print_held_locks(curr); | ||
3216 | } | ||
3217 | } | ||
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index c851b2dcc685..8a135bd163c2 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -25,28 +25,38 @@ | |||
25 | 25 | ||
26 | static void *l_next(struct seq_file *m, void *v, loff_t *pos) | 26 | static void *l_next(struct seq_file *m, void *v, loff_t *pos) |
27 | { | 27 | { |
28 | struct lock_class *class = v; | 28 | struct lock_class *class; |
29 | 29 | ||
30 | (*pos)++; | 30 | (*pos)++; |
31 | 31 | ||
32 | if (class->lock_entry.next != &all_lock_classes) | 32 | if (v == SEQ_START_TOKEN) |
33 | class = list_entry(class->lock_entry.next, struct lock_class, | 33 | class = m->private; |
34 | lock_entry); | 34 | else { |
35 | else | 35 | class = v; |
36 | class = NULL; | 36 | |
37 | m->private = class; | 37 | if (class->lock_entry.next != &all_lock_classes) |
38 | class = list_entry(class->lock_entry.next, | ||
39 | struct lock_class, lock_entry); | ||
40 | else | ||
41 | class = NULL; | ||
42 | } | ||
38 | 43 | ||
39 | return class; | 44 | return class; |
40 | } | 45 | } |
41 | 46 | ||
42 | static void *l_start(struct seq_file *m, loff_t *pos) | 47 | static void *l_start(struct seq_file *m, loff_t *pos) |
43 | { | 48 | { |
44 | struct lock_class *class = m->private; | 49 | struct lock_class *class; |
50 | loff_t i = 0; | ||
45 | 51 | ||
46 | if (&class->lock_entry == all_lock_classes.next) | 52 | if (*pos == 0) |
47 | seq_printf(m, "all lock classes:\n"); | 53 | return SEQ_START_TOKEN; |
48 | 54 | ||
49 | return class; | 55 | list_for_each_entry(class, &all_lock_classes, lock_entry) { |
56 | if (++i == *pos) | ||
57 | return class; | ||
58 | } | ||
59 | return NULL; | ||
50 | } | 60 | } |
51 | 61 | ||
52 | static void l_stop(struct seq_file *m, void *v) | 62 | static void l_stop(struct seq_file *m, void *v) |
@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class) | |||
101 | static int l_show(struct seq_file *m, void *v) | 111 | static int l_show(struct seq_file *m, void *v) |
102 | { | 112 | { |
103 | unsigned long nr_forward_deps, nr_backward_deps; | 113 | unsigned long nr_forward_deps, nr_backward_deps; |
104 | struct lock_class *class = m->private; | 114 | struct lock_class *class = v; |
105 | struct lock_list *entry; | 115 | struct lock_list *entry; |
106 | char c1, c2, c3, c4; | 116 | char c1, c2, c3, c4; |
107 | 117 | ||
118 | if (v == SEQ_START_TOKEN) { | ||
119 | seq_printf(m, "all lock classes:\n"); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
108 | seq_printf(m, "%p", class->key); | 123 | seq_printf(m, "%p", class->key); |
109 | #ifdef CONFIG_DEBUG_LOCKDEP | 124 | #ifdef CONFIG_DEBUG_LOCKDEP |
110 | seq_printf(m, " OPS:%8ld", class->ops); | 125 | seq_printf(m, " OPS:%8ld", class->ops); |
@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos) | |||
523 | { | 538 | { |
524 | struct lock_stat_seq *data = m->private; | 539 | struct lock_stat_seq *data = m->private; |
525 | 540 | ||
526 | if (data->iter == data->stats) | 541 | if (*pos == 0) |
527 | seq_header(m); | 542 | return SEQ_START_TOKEN; |
528 | 543 | ||
529 | if (data->iter == data->iter_end) | 544 | data->iter = data->stats + *pos; |
545 | if (data->iter >= data->iter_end) | ||
530 | data->iter = NULL; | 546 | data->iter = NULL; |
531 | 547 | ||
532 | return data->iter; | 548 | return data->iter; |
@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos) | |||
538 | 554 | ||
539 | (*pos)++; | 555 | (*pos)++; |
540 | 556 | ||
541 | data->iter = v; | 557 | if (v == SEQ_START_TOKEN) |
542 | data->iter++; | 558 | data->iter = data->stats; |
559 | else { | ||
560 | data->iter = v; | ||
561 | data->iter++; | ||
562 | } | ||
563 | |||
543 | if (data->iter == data->iter_end) | 564 | if (data->iter == data->iter_end) |
544 | data->iter = NULL; | 565 | data->iter = NULL; |
545 | 566 | ||
@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v) | |||
552 | 573 | ||
553 | static int ls_show(struct seq_file *m, void *v) | 574 | static int ls_show(struct seq_file *m, void *v) |
554 | { | 575 | { |
555 | struct lock_stat_seq *data = m->private; | 576 | if (v == SEQ_START_TOKEN) |
577 | seq_header(m); | ||
578 | else | ||
579 | seq_stats(m, v); | ||
556 | 580 | ||
557 | seq_stats(m, data->iter); | ||
558 | return 0; | 581 | return 0; |
559 | } | 582 | } |
560 | 583 | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 691b86564dd9..d7fe50cc556f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
51 | 51 | ||
52 | EXPORT_SYMBOL(__mutex_init); | 52 | EXPORT_SYMBOL(__mutex_init); |
53 | 53 | ||
54 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | ||
54 | /* | 55 | /* |
55 | * We split the mutex lock/unlock logic into separate fastpath and | 56 | * We split the mutex lock/unlock logic into separate fastpath and |
56 | * slowpath functions, to reduce the register pressure on the fastpath. | 57 | * slowpath functions, to reduce the register pressure on the fastpath. |
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) | |||
92 | } | 93 | } |
93 | 94 | ||
94 | EXPORT_SYMBOL(mutex_lock); | 95 | EXPORT_SYMBOL(mutex_lock); |
96 | #endif | ||
95 | 97 | ||
96 | static void fastcall noinline __sched | 98 | static void fastcall noinline __sched |
97 | __mutex_unlock_slowpath(atomic_t *lock_count); | 99 | __mutex_unlock_slowpath(atomic_t *lock_count); |
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock); | |||
122 | * Lock a mutex (possibly interruptible), slowpath: | 124 | * Lock a mutex (possibly interruptible), slowpath: |
123 | */ | 125 | */ |
124 | static inline int __sched | 126 | static inline int __sched |
125 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | 127 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
128 | unsigned long ip) | ||
126 | { | 129 | { |
127 | struct task_struct *task = current; | 130 | struct task_struct *task = current; |
128 | struct mutex_waiter waiter; | 131 | struct mutex_waiter waiter; |
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
132 | spin_lock_mutex(&lock->wait_lock, flags); | 135 | spin_lock_mutex(&lock->wait_lock, flags); |
133 | 136 | ||
134 | debug_mutex_lock_common(lock, &waiter); | 137 | debug_mutex_lock_common(lock, &waiter); |
135 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
136 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
137 | 140 | ||
138 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
143 | if (old_val == 1) | 146 | if (old_val == 1) |
144 | goto done; | 147 | goto done; |
145 | 148 | ||
146 | lock_contended(&lock->dep_map, _RET_IP_); | 149 | lock_contended(&lock->dep_map, ip); |
147 | 150 | ||
148 | for (;;) { | 151 | for (;;) { |
149 | /* | 152 | /* |
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
166 | if (unlikely(state == TASK_INTERRUPTIBLE && | 169 | if (unlikely(state == TASK_INTERRUPTIBLE && |
167 | signal_pending(task))) { | 170 | signal_pending(task))) { |
168 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 171 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
169 | mutex_release(&lock->dep_map, 1, _RET_IP_); | 172 | mutex_release(&lock->dep_map, 1, ip); |
170 | spin_unlock_mutex(&lock->wait_lock, flags); | 173 | spin_unlock_mutex(&lock->wait_lock, flags); |
171 | 174 | ||
172 | debug_mutex_free_waiter(&waiter); | 175 | debug_mutex_free_waiter(&waiter); |
@@ -197,20 +200,12 @@ done: | |||
197 | return 0; | 200 | return 0; |
198 | } | 201 | } |
199 | 202 | ||
200 | static void fastcall noinline __sched | ||
201 | __mutex_lock_slowpath(atomic_t *lock_count) | ||
202 | { | ||
203 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
204 | |||
205 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); | ||
206 | } | ||
207 | |||
208 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 203 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
209 | void __sched | 204 | void __sched |
210 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 205 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
211 | { | 206 | { |
212 | might_sleep(); | 207 | might_sleep(); |
213 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); | 208 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); |
214 | } | 209 | } |
215 | 210 | ||
216 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 211 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
@@ -219,7 +214,7 @@ int __sched | |||
219 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 214 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
220 | { | 215 | { |
221 | might_sleep(); | 216 | might_sleep(); |
222 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); | 217 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); |
223 | } | 218 | } |
224 | 219 | ||
225 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 220 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count) | |||
271 | __mutex_unlock_common_slowpath(lock_count, 1); | 266 | __mutex_unlock_common_slowpath(lock_count, 1); |
272 | } | 267 | } |
273 | 268 | ||
269 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | ||
274 | /* | 270 | /* |
275 | * Here come the less common (and hence less performance-critical) APIs: | 271 | * Here come the less common (and hence less performance-critical) APIs: |
276 | * mutex_lock_interruptible() and mutex_trylock(). | 272 | * mutex_lock_interruptible() and mutex_trylock(). |
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |||
298 | 294 | ||
299 | EXPORT_SYMBOL(mutex_lock_interruptible); | 295 | EXPORT_SYMBOL(mutex_lock_interruptible); |
300 | 296 | ||
297 | static void fastcall noinline __sched | ||
298 | __mutex_lock_slowpath(atomic_t *lock_count) | ||
299 | { | ||
300 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
301 | |||
302 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | ||
303 | } | ||
304 | |||
301 | static int fastcall noinline __sched | 305 | static int fastcall noinline __sched |
302 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | 306 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
303 | { | 307 | { |
304 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 308 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
305 | 309 | ||
306 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); | 310 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); |
307 | } | 311 | } |
312 | #endif | ||
308 | 313 | ||
309 | /* | 314 | /* |
310 | * Spinlock based trylock, we take the spinlock and check whether we | 315 | * Spinlock based trylock, we take the spinlock and check whether we |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c2dd8410dc4..130214f3d229 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -49,6 +49,14 @@ | |||
49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
50 | #include <linux/mutex.h> | 50 | #include <linux/mutex.h> |
51 | 51 | ||
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
53 | static struct lock_class_key rcu_lock_key; | ||
54 | struct lockdep_map rcu_lock_map = | ||
55 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | ||
56 | |||
57 | EXPORT_SYMBOL_GPL(rcu_lock_map); | ||
58 | #endif | ||
59 | |||
52 | /* Definition for rcupdate control block. */ | 60 | /* Definition for rcupdate control block. */ |
53 | static struct rcu_ctrlblk rcu_ctrlblk = { | 61 | static struct rcu_ctrlblk rcu_ctrlblk = { |
54 | .cur = -300, | 62 | .cur = -300, |