diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:17:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:17:24 -0400 |
commit | 4e3408d9f71a70316ebe844c20ef0d7715281f84 (patch) | |
tree | 365f67fbcbe8e047a5fbead3db5d2e7ac20b3618 | |
parent | a66a50054e46ec2a03244bc14c48b9125fcd75a7 (diff) | |
parent | 96910b6dc8a4fdb75e69f09f47b62d41743d36ba (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (32 commits)
locking, m68k/asm-offsets: Rename signal defines
locking: Inline spinlock code for all locking variants on s390
locking: Simplify spinlock inlining
locking: Allow arch-inlined spinlocks
locking: Move spinlock function bodies to header file
locking, m68k: Calculate thread_info offset with asm offset
locking, m68k/asm-offsets: Rename pt_regs offset defines
locking, sparc: Rename __spin_try_lock() and friends
locking, powerpc: Rename __spin_try_lock() and friends
lockdep: Remove recursion stattistics
lockdep: Simplify lock_stat seqfile code
lockdep: Simplify lockdep_chains seqfile code
lockdep: Simplify lockdep seqfile code
lockdep: Fix missing entries in /proc/lock_chains
lockdep: Fix missing entry in /proc/lock_stat
lockdep: Fix memory usage info of BFS
lockdep: Reintroduce generation count to make BFS faster
lockdep: Deal with many similar locks
lockdep: Introduce lockdep_assert_held()
lockdep: Fix style nits
...
-rw-r--r-- | arch/m68k/include/asm/entry_mm.h | 4 | ||||
-rw-r--r-- | arch/m68k/include/asm/entry_no.h | 8 | ||||
-rw-r--r-- | arch/m68k/include/asm/math-emu.h | 20 | ||||
-rw-r--r-- | arch/m68k/include/asm/thread_info_mm.h | 11 | ||||
-rw-r--r-- | arch/m68k/kernel/asm-offsets.c | 39 | ||||
-rw-r--r-- | arch/m68k/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/m68k/math-emu/fp_entry.S | 38 | ||||
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 20 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 29 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_32.h | 12 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 28 | ||||
-rw-r--r-- | include/linux/lockdep.h | 18 | ||||
-rw-r--r-- | include/linux/spinlock.h | 64 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 394 | ||||
-rw-r--r-- | kernel/lockdep.c | 792 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 2 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 128 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/spinlock.c | 230 |
19 files changed, 1209 insertions, 652 deletions
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index 5202f5a5b420..474125886218 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h | |||
@@ -46,7 +46,6 @@ | |||
46 | #define curptr a2 | 46 | #define curptr a2 |
47 | 47 | ||
48 | LFLUSH_I_AND_D = 0x00000808 | 48 | LFLUSH_I_AND_D = 0x00000808 |
49 | LSIGTRAP = 5 | ||
50 | 49 | ||
51 | /* process bits for task_struct.ptrace */ | 50 | /* process bits for task_struct.ptrace */ |
52 | PT_TRACESYS_OFF = 3 | 51 | PT_TRACESYS_OFF = 3 |
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2 | |||
118 | #define STR(X) STR1(X) | 117 | #define STR(X) STR1(X) |
119 | #define STR1(X) #X | 118 | #define STR1(X) #X |
120 | 119 | ||
121 | #define PT_OFF_ORIG_D0 0x24 | ||
122 | #define PT_OFF_FORMATVEC 0x32 | ||
123 | #define PT_OFF_SR 0x2C | ||
124 | #define SAVE_ALL_INT \ | 120 | #define SAVE_ALL_INT \ |
125 | "clrl %%sp@-;" /* stk_adj */ \ | 121 | "clrl %%sp@-;" /* stk_adj */ \ |
126 | "pea -1:w;" /* orig d0 = -1 */ \ | 122 | "pea -1:w;" /* orig d0 = -1 */ \ |
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h index c2553d26273d..907ed03d792f 100644 --- a/arch/m68k/include/asm/entry_no.h +++ b/arch/m68k/include/asm/entry_no.h | |||
@@ -72,8 +72,8 @@ LENOSYS = 38 | |||
72 | lea %sp@(-32),%sp /* space for 8 regs */ | 72 | lea %sp@(-32),%sp /* space for 8 regs */ |
73 | moveml %d1-%d5/%a0-%a2,%sp@ | 73 | moveml %d1-%d5/%a0-%a2,%sp@ |
74 | movel sw_usp,%a0 /* get usp */ | 74 | movel sw_usp,%a0 /* get usp */ |
75 | movel %a0@-,%sp@(PT_PC) /* copy exception program counter */ | 75 | movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */ |
76 | movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */ | 76 | movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */ |
77 | bra 7f | 77 | bra 7f |
78 | 6: | 78 | 6: |
79 | clrl %sp@- /* stkadj */ | 79 | clrl %sp@- /* stkadj */ |
@@ -89,8 +89,8 @@ LENOSYS = 38 | |||
89 | bnes 8f /* no, skip */ | 89 | bnes 8f /* no, skip */ |
90 | move #0x2700,%sr /* disable intrs */ | 90 | move #0x2700,%sr /* disable intrs */ |
91 | movel sw_usp,%a0 /* get usp */ | 91 | movel sw_usp,%a0 /* get usp */ |
92 | movel %sp@(PT_PC),%a0@- /* copy exception program counter */ | 92 | movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ |
93 | movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ | 93 | movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */ |
94 | moveml %sp@,%d1-%d5/%a0-%a2 | 94 | moveml %sp@,%d1-%d5/%a0-%a2 |
95 | lea %sp@(32),%sp /* space for 8 regs */ | 95 | lea %sp@(32),%sp /* space for 8 regs */ |
96 | movel %sp@+,%d0 | 96 | movel %sp@+,%d0 |
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h index ddfab96403cb..5e9249b0014c 100644 --- a/arch/m68k/include/asm/math-emu.h +++ b/arch/m68k/include/asm/math-emu.h | |||
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint; | |||
145 | * these are only used during instruction decoding | 145 | * these are only used during instruction decoding |
146 | * where we always know how deep we're on the stack. | 146 | * where we always know how deep we're on the stack. |
147 | */ | 147 | */ |
148 | #define FPS_DO (PT_D0) | 148 | #define FPS_DO (PT_OFF_D0) |
149 | #define FPS_D1 (PT_D1) | 149 | #define FPS_D1 (PT_OFF_D1) |
150 | #define FPS_D2 (PT_D2) | 150 | #define FPS_D2 (PT_OFF_D2) |
151 | #define FPS_A0 (PT_A0) | 151 | #define FPS_A0 (PT_OFF_A0) |
152 | #define FPS_A1 (PT_A1) | 152 | #define FPS_A1 (PT_OFF_A1) |
153 | #define FPS_A2 (PT_A2) | 153 | #define FPS_A2 (PT_OFF_A2) |
154 | #define FPS_SR (PT_SR) | 154 | #define FPS_SR (PT_OFF_SR) |
155 | #define FPS_PC (PT_PC) | 155 | #define FPS_PC (PT_OFF_PC) |
156 | #define FPS_EA (PT_PC+6) | 156 | #define FPS_EA (PT_OFF_PC+6) |
157 | #define FPS_PC2 (PT_PC+10) | 157 | #define FPS_PC2 (PT_OFF_PC+10) |
158 | 158 | ||
159 | .macro fp_get_fp_reg | 159 | .macro fp_get_fp_reg |
160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 | 160 | lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 |
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h index 6ea5c33b3c56..b6da3882be9b 100644 --- a/arch/m68k/include/asm/thread_info_mm.h +++ b/arch/m68k/include/asm/thread_info_mm.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_M68K_THREAD_INFO_H | 1 | #ifndef _ASM_M68K_THREAD_INFO_H |
2 | #define _ASM_M68K_THREAD_INFO_H | 2 | #define _ASM_M68K_THREAD_INFO_H |
3 | 3 | ||
4 | #ifndef ASM_OFFSETS_C | ||
5 | #include <asm/asm-offsets.h> | ||
6 | #endif | ||
7 | #include <asm/current.h> | ||
4 | #include <asm/types.h> | 8 | #include <asm/types.h> |
5 | #include <asm/page.h> | 9 | #include <asm/page.h> |
6 | 10 | ||
@@ -31,7 +35,12 @@ struct thread_info { | |||
31 | #define init_thread_info (init_task.thread.info) | 35 | #define init_thread_info (init_task.thread.info) |
32 | #define init_stack (init_thread_union.stack) | 36 | #define init_stack (init_thread_union.stack) |
33 | 37 | ||
34 | #define task_thread_info(tsk) (&(tsk)->thread.info) | 38 | #ifdef ASM_OFFSETS_C |
39 | #define task_thread_info(tsk) ((struct thread_info *) NULL) | ||
40 | #else | ||
41 | #define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO)) | ||
42 | #endif | ||
43 | |||
35 | #define task_stack_page(tsk) ((tsk)->stack) | 44 | #define task_stack_page(tsk) ((tsk)->stack) |
36 | #define current_thread_info() task_thread_info(current) | 45 | #define current_thread_info() task_thread_info(current) |
37 | 46 | ||
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index b1f012f6c493..73e5e581245b 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * #defines from the assembly-language output. | 8 | * #defines from the assembly-language output. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define ASM_OFFSETS_C | ||
12 | |||
11 | #include <linux/stddef.h> | 13 | #include <linux/stddef.h> |
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
@@ -27,6 +29,9 @@ int main(void) | |||
27 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); | 29 | DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); |
28 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | 30 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); |
29 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 31 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
32 | #ifdef CONFIG_MMU | ||
33 | DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); | ||
34 | #endif | ||
30 | 35 | ||
31 | /* offsets into the thread struct */ | 36 | /* offsets into the thread struct */ |
32 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | 37 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); |
@@ -44,20 +49,20 @@ int main(void) | |||
44 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); | 49 | DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); |
45 | 50 | ||
46 | /* offsets into the pt_regs */ | 51 | /* offsets into the pt_regs */ |
47 | DEFINE(PT_D0, offsetof(struct pt_regs, d0)); | 52 | DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); |
48 | DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); | 53 | DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); |
49 | DEFINE(PT_D1, offsetof(struct pt_regs, d1)); | 54 | DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); |
50 | DEFINE(PT_D2, offsetof(struct pt_regs, d2)); | 55 | DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); |
51 | DEFINE(PT_D3, offsetof(struct pt_regs, d3)); | 56 | DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); |
52 | DEFINE(PT_D4, offsetof(struct pt_regs, d4)); | 57 | DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); |
53 | DEFINE(PT_D5, offsetof(struct pt_regs, d5)); | 58 | DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); |
54 | DEFINE(PT_A0, offsetof(struct pt_regs, a0)); | 59 | DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); |
55 | DEFINE(PT_A1, offsetof(struct pt_regs, a1)); | 60 | DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); |
56 | DEFINE(PT_A2, offsetof(struct pt_regs, a2)); | 61 | DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); |
57 | DEFINE(PT_PC, offsetof(struct pt_regs, pc)); | 62 | DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); |
58 | DEFINE(PT_SR, offsetof(struct pt_regs, sr)); | 63 | DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); |
59 | /* bitfields are a bit difficult */ | 64 | /* bitfields are a bit difficult */ |
60 | DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); | 65 | DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); |
61 | 66 | ||
62 | /* offsets into the irq_handler struct */ | 67 | /* offsets into the irq_handler struct */ |
63 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); | 68 | DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); |
@@ -84,10 +89,10 @@ int main(void) | |||
84 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); | 89 | DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); |
85 | 90 | ||
86 | /* signal defines */ | 91 | /* signal defines */ |
87 | DEFINE(SIGSEGV, SIGSEGV); | 92 | DEFINE(LSIGSEGV, SIGSEGV); |
88 | DEFINE(SEGV_MAPERR, SEGV_MAPERR); | 93 | DEFINE(LSEGV_MAPERR, SEGV_MAPERR); |
89 | DEFINE(SIGTRAP, SIGTRAP); | 94 | DEFINE(LSIGTRAP, SIGTRAP); |
90 | DEFINE(TRAP_TRACE, TRAP_TRACE); | 95 | DEFINE(LTRAP_TRACE, TRAP_TRACE); |
91 | 96 | ||
92 | /* offsets into the custom struct */ | 97 | /* offsets into the custom struct */ |
93 | DEFINE(CUSTOMBASE, &amiga_custom); | 98 | DEFINE(CUSTOMBASE, &amiga_custom); |
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index c3735cd6207e..922f52e7ed1a 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S | |||
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork) | |||
77 | jra .Lret_from_exception | 77 | jra .Lret_from_exception |
78 | 78 | ||
79 | do_trace_entry: | 79 | do_trace_entry: |
80 | movel #-ENOSYS,%sp@(PT_D0) | needed for strace | 80 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace |
81 | subql #4,%sp | 81 | subql #4,%sp |
82 | SAVE_SWITCH_STACK | 82 | SAVE_SWITCH_STACK |
83 | jbsr syscall_trace | 83 | jbsr syscall_trace |
84 | RESTORE_SWITCH_STACK | 84 | RESTORE_SWITCH_STACK |
85 | addql #4,%sp | 85 | addql #4,%sp |
86 | movel %sp@(PT_ORIG_D0),%d0 | 86 | movel %sp@(PT_OFF_ORIG_D0),%d0 |
87 | cmpl #NR_syscalls,%d0 | 87 | cmpl #NR_syscalls,%d0 |
88 | jcs syscall | 88 | jcs syscall |
89 | badsys: | 89 | badsys: |
90 | movel #-ENOSYS,%sp@(PT_D0) | 90 | movel #-ENOSYS,%sp@(PT_OFF_D0) |
91 | jra ret_from_syscall | 91 | jra ret_from_syscall |
92 | 92 | ||
93 | do_trace_exit: | 93 | do_trace_exit: |
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal) | |||
103 | addql #4,%sp | 103 | addql #4,%sp |
104 | /* on 68040 complete pending writebacks if any */ | 104 | /* on 68040 complete pending writebacks if any */ |
105 | #ifdef CONFIG_M68040 | 105 | #ifdef CONFIG_M68040 |
106 | bfextu %sp@(PT_VECTOR){#0,#4},%d0 | 106 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 |
107 | subql #7,%d0 | bus error frame ? | 107 | subql #7,%d0 | bus error frame ? |
108 | jbne 1f | 108 | jbne 1f |
109 | movel %sp,%sp@- | 109 | movel %sp,%sp@- |
@@ -127,7 +127,7 @@ ENTRY(system_call) | |||
127 | jcc badsys | 127 | jcc badsys |
128 | syscall: | 128 | syscall: |
129 | jbsr @(sys_call_table,%d0:l:4)@(0) | 129 | jbsr @(sys_call_table,%d0:l:4)@(0) |
130 | movel %d0,%sp@(PT_D0) | save the return value | 130 | movel %d0,%sp@(PT_OFF_D0) | save the return value |
131 | ret_from_syscall: | 131 | ret_from_syscall: |
132 | |oriw #0x0700,%sr | 132 | |oriw #0x0700,%sr |
133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 | 133 | movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 |
@@ -135,7 +135,7 @@ ret_from_syscall: | |||
135 | 1: RESTORE_ALL | 135 | 1: RESTORE_ALL |
136 | 136 | ||
137 | syscall_exit_work: | 137 | syscall_exit_work: |
138 | btst #5,%sp@(PT_SR) | check if returning to kernel | 138 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
139 | bnes 1b | if so, skip resched, signals | 139 | bnes 1b | if so, skip resched, signals |
140 | lslw #1,%d0 | 140 | lslw #1,%d0 |
141 | jcs do_trace_exit | 141 | jcs do_trace_exit |
@@ -148,7 +148,7 @@ syscall_exit_work: | |||
148 | 148 | ||
149 | ENTRY(ret_from_exception) | 149 | ENTRY(ret_from_exception) |
150 | .Lret_from_exception: | 150 | .Lret_from_exception: |
151 | btst #5,%sp@(PT_SR) | check if returning to kernel | 151 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel |
152 | bnes 1f | if so, skip resched, signals | 152 | bnes 1f | if so, skip resched, signals |
153 | | only allow interrupts when we are really the last one on the | 153 | | only allow interrupts when we are really the last one on the |
154 | | kernel stack, otherwise stack overflow can occur during | 154 | | kernel stack, otherwise stack overflow can occur during |
@@ -182,7 +182,7 @@ do_signal_return: | |||
182 | jbra resume_userspace | 182 | jbra resume_userspace |
183 | 183 | ||
184 | do_delayed_trace: | 184 | do_delayed_trace: |
185 | bclr #7,%sp@(PT_SR) | clear trace bit in SR | 185 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR |
186 | pea 1 | send SIGTRAP | 186 | pea 1 | send SIGTRAP |
187 | movel %curptr,%sp@- | 187 | movel %curptr,%sp@- |
188 | pea LSIGTRAP | 188 | pea LSIGTRAP |
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler) | |||
199 | GET_CURRENT(%d0) | 199 | GET_CURRENT(%d0) |
200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 200 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
201 | | put exception # in d0 | 201 | | put exception # in d0 |
202 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 202 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
203 | subw #VEC_SPUR,%d0 | 203 | subw #VEC_SPUR,%d0 |
204 | 204 | ||
205 | movel %sp,%sp@- | 205 | movel %sp,%sp@- |
@@ -216,7 +216,7 @@ ret_from_interrupt: | |||
216 | ALIGN | 216 | ALIGN |
217 | ret_from_last_interrupt: | 217 | ret_from_last_interrupt: |
218 | moveq #(~ALLOWINT>>8)&0xff,%d0 | 218 | moveq #(~ALLOWINT>>8)&0xff,%d0 |
219 | andb %sp@(PT_SR),%d0 | 219 | andb %sp@(PT_OFF_SR),%d0 |
220 | jne 2b | 220 | jne 2b |
221 | 221 | ||
222 | /* check if we need to do software interrupts */ | 222 | /* check if we need to do software interrupts */ |
@@ -232,7 +232,7 @@ ENTRY(user_inthandler) | |||
232 | GET_CURRENT(%d0) | 232 | GET_CURRENT(%d0) |
233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) | 233 | addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) |
234 | | put exception # in d0 | 234 | | put exception # in d0 |
235 | bfextu %sp@(PT_VECTOR){#4,#10},%d0 | 235 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
236 | user_irqvec_fixup = . + 2 | 236 | user_irqvec_fixup = . + 2 |
237 | subw #VEC_USER,%d0 | 237 | subw #VEC_USER,%d0 |
238 | 238 | ||
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S index 954b4f304a7d..a3fe1f348dfe 100644 --- a/arch/m68k/math-emu/fp_entry.S +++ b/arch/m68k/math-emu/fp_entry.S | |||
@@ -85,8 +85,8 @@ fp_err_ua2: | |||
85 | fp_err_ua1: | 85 | fp_err_ua1: |
86 | addq.l #4,%sp | 86 | addq.l #4,%sp |
87 | move.l %a0,-(%sp) | 87 | move.l %a0,-(%sp) |
88 | pea SEGV_MAPERR | 88 | pea LSEGV_MAPERR |
89 | pea SIGSEGV | 89 | pea LSIGSEGV |
90 | jsr fpemu_signal | 90 | jsr fpemu_signal |
91 | add.w #12,%sp | 91 | add.w #12,%sp |
92 | jra ret_from_exception | 92 | jra ret_from_exception |
@@ -96,8 +96,8 @@ fp_err_ua1: | |||
96 | | it does not really belong here, but... | 96 | | it does not really belong here, but... |
97 | fp_sendtrace060: | 97 | fp_sendtrace060: |
98 | move.l (FPS_PC,%sp),-(%sp) | 98 | move.l (FPS_PC,%sp),-(%sp) |
99 | pea TRAP_TRACE | 99 | pea LTRAP_TRACE |
100 | pea SIGTRAP | 100 | pea LSIGTRAP |
101 | jsr fpemu_signal | 101 | jsr fpemu_signal |
102 | add.w #12,%sp | 102 | add.w #12,%sp |
103 | jra ret_from_exception | 103 | jra ret_from_exception |
@@ -122,17 +122,17 @@ fp_get_data_reg: | |||
122 | .long fp_get_d6, fp_get_d7 | 122 | .long fp_get_d6, fp_get_d7 |
123 | 123 | ||
124 | fp_get_d0: | 124 | fp_get_d0: |
125 | move.l (PT_D0+8,%sp),%d0 | 125 | move.l (PT_OFF_D0+8,%sp),%d0 |
126 | printf PREGISTER,"{d0->%08x}",1,%d0 | 126 | printf PREGISTER,"{d0->%08x}",1,%d0 |
127 | rts | 127 | rts |
128 | 128 | ||
129 | fp_get_d1: | 129 | fp_get_d1: |
130 | move.l (PT_D1+8,%sp),%d0 | 130 | move.l (PT_OFF_D1+8,%sp),%d0 |
131 | printf PREGISTER,"{d1->%08x}",1,%d0 | 131 | printf PREGISTER,"{d1->%08x}",1,%d0 |
132 | rts | 132 | rts |
133 | 133 | ||
134 | fp_get_d2: | 134 | fp_get_d2: |
135 | move.l (PT_D2+8,%sp),%d0 | 135 | move.l (PT_OFF_D2+8,%sp),%d0 |
136 | printf PREGISTER,"{d2->%08x}",1,%d0 | 136 | printf PREGISTER,"{d2->%08x}",1,%d0 |
137 | rts | 137 | rts |
138 | 138 | ||
@@ -173,35 +173,35 @@ fp_put_data_reg: | |||
173 | 173 | ||
174 | fp_put_d0: | 174 | fp_put_d0: |
175 | printf PREGISTER,"{d0<-%08x}",1,%d0 | 175 | printf PREGISTER,"{d0<-%08x}",1,%d0 |
176 | move.l %d0,(PT_D0+8,%sp) | 176 | move.l %d0,(PT_OFF_D0+8,%sp) |
177 | rts | 177 | rts |
178 | 178 | ||
179 | fp_put_d1: | 179 | fp_put_d1: |
180 | printf PREGISTER,"{d1<-%08x}",1,%d0 | 180 | printf PREGISTER,"{d1<-%08x}",1,%d0 |
181 | move.l %d0,(PT_D1+8,%sp) | 181 | move.l %d0,(PT_OFF_D1+8,%sp) |
182 | rts | 182 | rts |
183 | 183 | ||
184 | fp_put_d2: | 184 | fp_put_d2: |
185 | printf PREGISTER,"{d2<-%08x}",1,%d0 | 185 | printf PREGISTER,"{d2<-%08x}",1,%d0 |
186 | move.l %d0,(PT_D2+8,%sp) | 186 | move.l %d0,(PT_OFF_D2+8,%sp) |
187 | rts | 187 | rts |
188 | 188 | ||
189 | fp_put_d3: | 189 | fp_put_d3: |
190 | printf PREGISTER,"{d3<-%08x}",1,%d0 | 190 | printf PREGISTER,"{d3<-%08x}",1,%d0 |
191 | | move.l %d0,%d3 | 191 | | move.l %d0,%d3 |
192 | move.l %d0,(PT_D3+8,%sp) | 192 | move.l %d0,(PT_OFF_D3+8,%sp) |
193 | rts | 193 | rts |
194 | 194 | ||
195 | fp_put_d4: | 195 | fp_put_d4: |
196 | printf PREGISTER,"{d4<-%08x}",1,%d0 | 196 | printf PREGISTER,"{d4<-%08x}",1,%d0 |
197 | | move.l %d0,%d4 | 197 | | move.l %d0,%d4 |
198 | move.l %d0,(PT_D4+8,%sp) | 198 | move.l %d0,(PT_OFF_D4+8,%sp) |
199 | rts | 199 | rts |
200 | 200 | ||
201 | fp_put_d5: | 201 | fp_put_d5: |
202 | printf PREGISTER,"{d5<-%08x}",1,%d0 | 202 | printf PREGISTER,"{d5<-%08x}",1,%d0 |
203 | | move.l %d0,%d5 | 203 | | move.l %d0,%d5 |
204 | move.l %d0,(PT_D5+8,%sp) | 204 | move.l %d0,(PT_OFF_D5+8,%sp) |
205 | rts | 205 | rts |
206 | 206 | ||
207 | fp_put_d6: | 207 | fp_put_d6: |
@@ -225,17 +225,17 @@ fp_get_addr_reg: | |||
225 | .long fp_get_a6, fp_get_a7 | 225 | .long fp_get_a6, fp_get_a7 |
226 | 226 | ||
227 | fp_get_a0: | 227 | fp_get_a0: |
228 | move.l (PT_A0+8,%sp),%a0 | 228 | move.l (PT_OFF_A0+8,%sp),%a0 |
229 | printf PREGISTER,"{a0->%08x}",1,%a0 | 229 | printf PREGISTER,"{a0->%08x}",1,%a0 |
230 | rts | 230 | rts |
231 | 231 | ||
232 | fp_get_a1: | 232 | fp_get_a1: |
233 | move.l (PT_A1+8,%sp),%a0 | 233 | move.l (PT_OFF_A1+8,%sp),%a0 |
234 | printf PREGISTER,"{a1->%08x}",1,%a0 | 234 | printf PREGISTER,"{a1->%08x}",1,%a0 |
235 | rts | 235 | rts |
236 | 236 | ||
237 | fp_get_a2: | 237 | fp_get_a2: |
238 | move.l (PT_A2+8,%sp),%a0 | 238 | move.l (PT_OFF_A2+8,%sp),%a0 |
239 | printf PREGISTER,"{a2->%08x}",1,%a0 | 239 | printf PREGISTER,"{a2->%08x}",1,%a0 |
240 | rts | 240 | rts |
241 | 241 | ||
@@ -276,17 +276,17 @@ fp_put_addr_reg: | |||
276 | 276 | ||
277 | fp_put_a0: | 277 | fp_put_a0: |
278 | printf PREGISTER,"{a0<-%08x}",1,%a0 | 278 | printf PREGISTER,"{a0<-%08x}",1,%a0 |
279 | move.l %a0,(PT_A0+8,%sp) | 279 | move.l %a0,(PT_OFF_A0+8,%sp) |
280 | rts | 280 | rts |
281 | 281 | ||
282 | fp_put_a1: | 282 | fp_put_a1: |
283 | printf PREGISTER,"{a1<-%08x}",1,%a0 | 283 | printf PREGISTER,"{a1<-%08x}",1,%a0 |
284 | move.l %a0,(PT_A1+8,%sp) | 284 | move.l %a0,(PT_OFF_A1+8,%sp) |
285 | rts | 285 | rts |
286 | 286 | ||
287 | fp_put_a2: | 287 | fp_put_a2: |
288 | printf PREGISTER,"{a2<-%08x}",1,%a0 | 288 | printf PREGISTER,"{a2<-%08x}",1,%a0 |
289 | move.l %a0,(PT_A2+8,%sp) | 289 | move.l %a0,(PT_OFF_A2+8,%sp) |
290 | rts | 290 | rts |
291 | 291 | ||
292 | fp_put_a3: | 292 | fp_put_a3: |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..41ce6861174e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #define __always_inline__spin_lock | ||
195 | #define __always_inline__read_lock | ||
196 | #define __always_inline__write_lock | ||
197 | #define __always_inline__spin_lock_bh | ||
198 | #define __always_inline__read_lock_bh | ||
199 | #define __always_inline__write_lock_bh | ||
200 | #define __always_inline__spin_lock_irq | ||
201 | #define __always_inline__read_lock_irq | ||
202 | #define __always_inline__write_lock_irq | ||
203 | #define __always_inline__spin_lock_irqsave | ||
204 | #define __always_inline__read_lock_irqsave | ||
205 | #define __always_inline__write_lock_irqsave | ||
206 | #define __always_inline__spin_trylock | ||
207 | #define __always_inline__read_trylock | ||
208 | #define __always_inline__write_trylock | ||
209 | #define __always_inline__spin_trylock_bh | ||
210 | #define __always_inline__spin_unlock | ||
211 | #define __always_inline__read_unlock | ||
212 | #define __always_inline__write_unlock | ||
213 | #define __always_inline__spin_unlock_bh | ||
214 | #define __always_inline__read_unlock_bh | ||
215 | #define __always_inline__write_unlock_bh | ||
216 | #define __always_inline__spin_unlock_irq | ||
217 | #define __always_inline__read_unlock_irq | ||
218 | #define __always_inline__write_unlock_irq | ||
219 | #define __always_inline__spin_unlock_irqrestore | ||
220 | #define __always_inline__read_unlock_irqrestore | ||
221 | #define __always_inline__write_unlock_irqrestore | ||
222 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | 223 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 46f91ab66a50..857630cff636 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void __read_lock(raw_rwlock_t *rw) | 79 | static inline void arch_read_lock(raw_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register raw_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw) | |||
92 | #define __raw_read_lock(lock) \ | 92 | #define __raw_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | __read_lock(lock); \ | 95 | arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void __read_unlock(raw_rwlock_t *rw) | 99 | static inline void arch_read_unlock(raw_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register raw_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw) | |||
112 | #define __raw_read_unlock(lock) \ | 112 | #define __raw_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | __read_unlock(lock); \ | 115 | arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int __read_trylock(raw_rwlock_t *rw) | 153 | static inline int arch_read_trylock(raw_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register raw_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw) | |||
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = __read_trylock(lock); \ | 172 | res = arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index f6b2b92ad8d2..43e514783582 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline __read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(raw_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline __read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(raw_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline __read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(raw_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline __write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(raw_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline __write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(raw_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline __write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(raw_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) __read_lock(p) | 214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) __read_trylock(p) | 215 | #define __raw_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) __read_unlock(p) | 216 | #define __raw_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) __write_lock(p) | 217 | #define __raw_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) __write_lock(p) | 218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) __write_unlock(p) | 219 | #define __raw_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) __write_trylock(p) | 220 | #define __raw_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b25d1b53df0d..9ccf0e286b2a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -149,6 +149,12 @@ struct lock_list { | |||
149 | struct lock_class *class; | 149 | struct lock_class *class; |
150 | struct stack_trace trace; | 150 | struct stack_trace trace; |
151 | int distance; | 151 | int distance; |
152 | |||
153 | /* | ||
154 | * The parent field is used to implement breadth-first search, and the | ||
155 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | ||
156 | */ | ||
157 | struct lock_list *parent; | ||
152 | }; | 158 | }; |
153 | 159 | ||
154 | /* | 160 | /* |
@@ -208,10 +214,12 @@ struct held_lock { | |||
208 | * interrupt context: | 214 | * interrupt context: |
209 | */ | 215 | */ |
210 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ | 216 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
211 | unsigned int trylock:1; | 217 | unsigned int trylock:1; /* 16 bits */ |
218 | |||
212 | unsigned int read:2; /* see lock_acquire() comment */ | 219 | unsigned int read:2; /* see lock_acquire() comment */ |
213 | unsigned int check:2; /* see lock_acquire() comment */ | 220 | unsigned int check:2; /* see lock_acquire() comment */ |
214 | unsigned int hardirqs_off:1; | 221 | unsigned int hardirqs_off:1; |
222 | unsigned int references:11; /* 32 bits */ | ||
215 | }; | 223 | }; |
216 | 224 | ||
217 | /* | 225 | /* |
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
291 | extern void lock_release(struct lockdep_map *lock, int nested, | 299 | extern void lock_release(struct lockdep_map *lock, int nested, |
292 | unsigned long ip); | 300 | unsigned long ip); |
293 | 301 | ||
302 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) | ||
303 | |||
304 | extern int lock_is_held(struct lockdep_map *lock); | ||
305 | |||
294 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | 306 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
295 | struct lock_class_key *key, unsigned int subclass, | 307 | struct lock_class_key *key, unsigned int subclass, |
296 | unsigned long ip); | 308 | unsigned long ip); |
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask); | |||
309 | 321 | ||
310 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 322 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
311 | 323 | ||
324 | #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) | ||
325 | |||
312 | #else /* !LOCKDEP */ | 326 | #else /* !LOCKDEP */ |
313 | 327 | ||
314 | static inline void lockdep_off(void) | 328 | static inline void lockdep_off(void) |
@@ -353,6 +367,8 @@ struct lock_class_key { }; | |||
353 | 367 | ||
354 | #define lockdep_depth(tsk) (0) | 368 | #define lockdep_depth(tsk) (0) |
355 | 369 | ||
370 | #define lockdep_assert_held(l) do { } while (0) | ||
371 | |||
356 | #endif /* !LOCKDEP */ | 372 | #endif /* !LOCKDEP */ |
357 | 373 | ||
358 | #ifdef CONFIG_LOCK_STAT | 374 | #ifdef CONFIG_LOCK_STAT |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4be57ab03478..f0ca7a7a1757 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
143 | */ | 143 | */ |
144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 144 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
145 | 145 | ||
146 | /* | ||
147 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
148 | */ | ||
149 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
150 | # include <linux/spinlock_api_smp.h> | ||
151 | #else | ||
152 | # include <linux/spinlock_api_up.h> | ||
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_DEBUG_SPINLOCK | 146 | #ifdef CONFIG_DEBUG_SPINLOCK |
156 | extern void _raw_spin_lock(spinlock_t *lock); | 147 | extern void _raw_spin_lock(spinlock_t *lock); |
157 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 148 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
268 | 259 | ||
269 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 260 | #define spin_lock_irq(lock) _spin_lock_irq(lock) |
270 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | 261 | #define spin_lock_bh(lock) _spin_lock_bh(lock) |
271 | |||
272 | #define read_lock_irq(lock) _read_lock_irq(lock) | 262 | #define read_lock_irq(lock) _read_lock_irq(lock) |
273 | #define read_lock_bh(lock) _read_lock_bh(lock) | 263 | #define read_lock_bh(lock) _read_lock_bh(lock) |
274 | |||
275 | #define write_lock_irq(lock) _write_lock_irq(lock) | 264 | #define write_lock_irq(lock) _write_lock_irq(lock) |
276 | #define write_lock_bh(lock) _write_lock_bh(lock) | 265 | #define write_lock_bh(lock) _write_lock_bh(lock) |
277 | 266 | #define spin_unlock(lock) _spin_unlock(lock) | |
278 | /* | 267 | #define read_unlock(lock) _read_unlock(lock) |
279 | * We inline the unlock functions in the nondebug case: | 268 | #define write_unlock(lock) _write_unlock(lock) |
280 | */ | 269 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
281 | #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ | 270 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
282 | !defined(CONFIG_SMP) | 271 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
283 | # define spin_unlock(lock) _spin_unlock(lock) | ||
284 | # define read_unlock(lock) _read_unlock(lock) | ||
285 | # define write_unlock(lock) _write_unlock(lock) | ||
286 | # define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
287 | # define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
288 | # define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
289 | #else | ||
290 | # define spin_unlock(lock) \ | ||
291 | do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
292 | # define read_unlock(lock) \ | ||
293 | do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
294 | # define write_unlock(lock) \ | ||
295 | do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) | ||
296 | # define spin_unlock_irq(lock) \ | ||
297 | do { \ | ||
298 | __raw_spin_unlock(&(lock)->raw_lock); \ | ||
299 | __release(lock); \ | ||
300 | local_irq_enable(); \ | ||
301 | } while (0) | ||
302 | # define read_unlock_irq(lock) \ | ||
303 | do { \ | ||
304 | __raw_read_unlock(&(lock)->raw_lock); \ | ||
305 | __release(lock); \ | ||
306 | local_irq_enable(); \ | ||
307 | } while (0) | ||
308 | # define write_unlock_irq(lock) \ | ||
309 | do { \ | ||
310 | __raw_write_unlock(&(lock)->raw_lock); \ | ||
311 | __release(lock); \ | ||
312 | local_irq_enable(); \ | ||
313 | } while (0) | ||
314 | #endif | ||
315 | 272 | ||
316 | #define spin_unlock_irqrestore(lock, flags) \ | 273 | #define spin_unlock_irqrestore(lock, flags) \ |
317 | do { \ | 274 | do { \ |
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
380 | */ | 337 | */ |
381 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 338 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
382 | 339 | ||
340 | /* | ||
341 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
342 | */ | ||
343 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
344 | # include <linux/spinlock_api_smp.h> | ||
345 | #else | ||
346 | # include <linux/spinlock_api_up.h> | ||
347 | #endif | ||
348 | |||
383 | #endif /* __LINUX_SPINLOCK_H */ | 349 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b5..7a7e18fc2415 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
61 | __releases(lock); | 61 | __releases(lock); |
62 | 62 | ||
63 | /* | ||
64 | * We inline the unlock functions in the nondebug case: | ||
65 | */ | ||
66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
67 | #define __always_inline__spin_unlock | ||
68 | #define __always_inline__read_unlock | ||
69 | #define __always_inline__write_unlock | ||
70 | #define __always_inline__spin_unlock_irq | ||
71 | #define __always_inline__read_unlock_irq | ||
72 | #define __always_inline__write_unlock_irq | ||
73 | #endif | ||
74 | |||
75 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
76 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
77 | |||
78 | #ifdef __always_inline__spin_lock | ||
79 | #define _spin_lock(lock) __spin_lock(lock) | ||
80 | #endif | ||
81 | |||
82 | #ifdef __always_inline__read_lock | ||
83 | #define _read_lock(lock) __read_lock(lock) | ||
84 | #endif | ||
85 | |||
86 | #ifdef __always_inline__write_lock | ||
87 | #define _write_lock(lock) __write_lock(lock) | ||
88 | #endif | ||
89 | |||
90 | #ifdef __always_inline__spin_lock_bh | ||
91 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | ||
92 | #endif | ||
93 | |||
94 | #ifdef __always_inline__read_lock_bh | ||
95 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
96 | #endif | ||
97 | |||
98 | #ifdef __always_inline__write_lock_bh | ||
99 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __always_inline__spin_lock_irq | ||
103 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | ||
104 | #endif | ||
105 | |||
106 | #ifdef __always_inline__read_lock_irq | ||
107 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
108 | #endif | ||
109 | |||
110 | #ifdef __always_inline__write_lock_irq | ||
111 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
112 | #endif | ||
113 | |||
114 | #ifdef __always_inline__spin_lock_irqsave | ||
115 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | ||
116 | #endif | ||
117 | |||
118 | #ifdef __always_inline__read_lock_irqsave | ||
119 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
120 | #endif | ||
121 | |||
122 | #ifdef __always_inline__write_lock_irqsave | ||
123 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
124 | #endif | ||
125 | |||
126 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | ||
127 | |||
128 | #ifdef __always_inline__spin_trylock | ||
129 | #define _spin_trylock(lock) __spin_trylock(lock) | ||
130 | #endif | ||
131 | |||
132 | #ifdef __always_inline__read_trylock | ||
133 | #define _read_trylock(lock) __read_trylock(lock) | ||
134 | #endif | ||
135 | |||
136 | #ifdef __always_inline__write_trylock | ||
137 | #define _write_trylock(lock) __write_trylock(lock) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __always_inline__spin_trylock_bh | ||
141 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | ||
142 | #endif | ||
143 | |||
144 | #ifdef __always_inline__spin_unlock | ||
145 | #define _spin_unlock(lock) __spin_unlock(lock) | ||
146 | #endif | ||
147 | |||
148 | #ifdef __always_inline__read_unlock | ||
149 | #define _read_unlock(lock) __read_unlock(lock) | ||
150 | #endif | ||
151 | |||
152 | #ifdef __always_inline__write_unlock | ||
153 | #define _write_unlock(lock) __write_unlock(lock) | ||
154 | #endif | ||
155 | |||
156 | #ifdef __always_inline__spin_unlock_bh | ||
157 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | ||
158 | #endif | ||
159 | |||
160 | #ifdef __always_inline__read_unlock_bh | ||
161 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
162 | #endif | ||
163 | |||
164 | #ifdef __always_inline__write_unlock_bh | ||
165 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
166 | #endif | ||
167 | |||
168 | #ifdef __always_inline__spin_unlock_irq | ||
169 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | ||
170 | #endif | ||
171 | |||
172 | #ifdef __always_inline__read_unlock_irq | ||
173 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
174 | #endif | ||
175 | |||
176 | #ifdef __always_inline__write_unlock_irq | ||
177 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
178 | #endif | ||
179 | |||
180 | #ifdef __always_inline__spin_unlock_irqrestore | ||
181 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | ||
182 | #endif | ||
183 | |||
184 | #ifdef __always_inline__read_unlock_irqrestore | ||
185 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
186 | #endif | ||
187 | |||
188 | #ifdef __always_inline__write_unlock_irqrestore | ||
189 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
190 | #endif | ||
191 | |||
192 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
193 | |||
194 | static inline int __spin_trylock(spinlock_t *lock) | ||
195 | { | ||
196 | preempt_disable(); | ||
197 | if (_raw_spin_trylock(lock)) { | ||
198 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
199 | return 1; | ||
200 | } | ||
201 | preempt_enable(); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static inline int __read_trylock(rwlock_t *lock) | ||
206 | { | ||
207 | preempt_disable(); | ||
208 | if (_raw_read_trylock(lock)) { | ||
209 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
210 | return 1; | ||
211 | } | ||
212 | preempt_enable(); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static inline int __write_trylock(rwlock_t *lock) | ||
217 | { | ||
218 | preempt_disable(); | ||
219 | if (_raw_write_trylock(lock)) { | ||
220 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
221 | return 1; | ||
222 | } | ||
223 | preempt_enable(); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
229 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
230 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
231 | */ | ||
232 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
233 | |||
234 | static inline void __read_lock(rwlock_t *lock) | ||
235 | { | ||
236 | preempt_disable(); | ||
237 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
238 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
239 | } | ||
240 | |||
241 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | |||
245 | local_irq_save(flags); | ||
246 | preempt_disable(); | ||
247 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
248 | /* | ||
249 | * On lockdep we dont want the hand-coded irq-enable of | ||
250 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
251 | * that interrupts are not re-enabled during lock-acquire: | ||
252 | */ | ||
253 | #ifdef CONFIG_LOCKDEP | ||
254 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
255 | #else | ||
256 | _raw_spin_lock_flags(lock, &flags); | ||
257 | #endif | ||
258 | return flags; | ||
259 | } | ||
260 | |||
261 | static inline void __spin_lock_irq(spinlock_t *lock) | ||
262 | { | ||
263 | local_irq_disable(); | ||
264 | preempt_disable(); | ||
265 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
266 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
267 | } | ||
268 | |||
269 | static inline void __spin_lock_bh(spinlock_t *lock) | ||
270 | { | ||
271 | local_bh_disable(); | ||
272 | preempt_disable(); | ||
273 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
274 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
275 | } | ||
276 | |||
277 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
278 | { | ||
279 | unsigned long flags; | ||
280 | |||
281 | local_irq_save(flags); | ||
282 | preempt_disable(); | ||
283 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
284 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
285 | _raw_read_lock_flags, &flags); | ||
286 | return flags; | ||
287 | } | ||
288 | |||
289 | static inline void __read_lock_irq(rwlock_t *lock) | ||
290 | { | ||
291 | local_irq_disable(); | ||
292 | preempt_disable(); | ||
293 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
294 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
295 | } | ||
296 | |||
297 | static inline void __read_lock_bh(rwlock_t *lock) | ||
298 | { | ||
299 | local_bh_disable(); | ||
300 | preempt_disable(); | ||
301 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
302 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
303 | } | ||
304 | |||
305 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | |||
309 | local_irq_save(flags); | ||
310 | preempt_disable(); | ||
311 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
312 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
313 | _raw_write_lock_flags, &flags); | ||
314 | return flags; | ||
315 | } | ||
316 | |||
317 | static inline void __write_lock_irq(rwlock_t *lock) | ||
318 | { | ||
319 | local_irq_disable(); | ||
320 | preempt_disable(); | ||
321 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
322 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
323 | } | ||
324 | |||
325 | static inline void __write_lock_bh(rwlock_t *lock) | ||
326 | { | ||
327 | local_bh_disable(); | ||
328 | preempt_disable(); | ||
329 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
330 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
331 | } | ||
332 | |||
333 | static inline void __spin_lock(spinlock_t *lock) | ||
334 | { | ||
335 | preempt_disable(); | ||
336 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
337 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
338 | } | ||
339 | |||
340 | static inline void __write_lock(rwlock_t *lock) | ||
341 | { | ||
342 | preempt_disable(); | ||
343 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
344 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
345 | } | ||
346 | |||
347 | #endif /* CONFIG_PREEMPT */ | ||
348 | |||
349 | static inline void __spin_unlock(spinlock_t *lock) | ||
350 | { | ||
351 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
352 | _raw_spin_unlock(lock); | ||
353 | preempt_enable(); | ||
354 | } | ||
355 | |||
356 | static inline void __write_unlock(rwlock_t *lock) | ||
357 | { | ||
358 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
359 | _raw_write_unlock(lock); | ||
360 | preempt_enable(); | ||
361 | } | ||
362 | |||
363 | static inline void __read_unlock(rwlock_t *lock) | ||
364 | { | ||
365 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
366 | _raw_read_unlock(lock); | ||
367 | preempt_enable(); | ||
368 | } | ||
369 | |||
370 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | ||
371 | unsigned long flags) | ||
372 | { | ||
373 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
374 | _raw_spin_unlock(lock); | ||
375 | local_irq_restore(flags); | ||
376 | preempt_enable(); | ||
377 | } | ||
378 | |||
379 | static inline void __spin_unlock_irq(spinlock_t *lock) | ||
380 | { | ||
381 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
382 | _raw_spin_unlock(lock); | ||
383 | local_irq_enable(); | ||
384 | preempt_enable(); | ||
385 | } | ||
386 | |||
387 | static inline void __spin_unlock_bh(spinlock_t *lock) | ||
388 | { | ||
389 | spin_release(&lock->dep_map, 1, _RET_IP_); | ||
390 | _raw_spin_unlock(lock); | ||
391 | preempt_enable_no_resched(); | ||
392 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
393 | } | ||
394 | |||
395 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
396 | { | ||
397 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
398 | _raw_read_unlock(lock); | ||
399 | local_irq_restore(flags); | ||
400 | preempt_enable(); | ||
401 | } | ||
402 | |||
403 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
404 | { | ||
405 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
406 | _raw_read_unlock(lock); | ||
407 | local_irq_enable(); | ||
408 | preempt_enable(); | ||
409 | } | ||
410 | |||
411 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
412 | { | ||
413 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
414 | _raw_read_unlock(lock); | ||
415 | preempt_enable_no_resched(); | ||
416 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
417 | } | ||
418 | |||
419 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
420 | unsigned long flags) | ||
421 | { | ||
422 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
423 | _raw_write_unlock(lock); | ||
424 | local_irq_restore(flags); | ||
425 | preempt_enable(); | ||
426 | } | ||
427 | |||
428 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
429 | { | ||
430 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
431 | _raw_write_unlock(lock); | ||
432 | local_irq_enable(); | ||
433 | preempt_enable(); | ||
434 | } | ||
435 | |||
436 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
437 | { | ||
438 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
439 | _raw_write_unlock(lock); | ||
440 | preempt_enable_no_resched(); | ||
441 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
442 | } | ||
443 | |||
444 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
445 | { | ||
446 | local_bh_disable(); | ||
447 | preempt_disable(); | ||
448 | if (_raw_spin_trylock(lock)) { | ||
449 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
450 | return 1; | ||
451 | } | ||
452 | preempt_enable_no_resched(); | ||
453 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
454 | return 0; | ||
455 | } | ||
456 | |||
63 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 457 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8bbeef996c76..f74d2d7aa605 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <linux/bitops.h> | ||
45 | 46 | ||
46 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
47 | 48 | ||
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace) | |||
366 | 367 | ||
367 | save_stack_trace(trace); | 368 | save_stack_trace(trace); |
368 | 369 | ||
370 | /* | ||
371 | * Some daft arches put -1 at the end to indicate its a full trace. | ||
372 | * | ||
373 | * <rant> this is buggy anyway, since it takes a whole extra entry so a | ||
374 | * complete trace that maxes out the entries provided will be reported | ||
375 | * as incomplete, friggin useless </rant> | ||
376 | */ | ||
377 | if (trace->entries[trace->nr_entries-1] == ULONG_MAX) | ||
378 | trace->nr_entries--; | ||
379 | |||
369 | trace->max_entries = trace->nr_entries; | 380 | trace->max_entries = trace->nr_entries; |
370 | 381 | ||
371 | nr_stack_trace_entries += trace->nr_entries; | 382 | nr_stack_trace_entries += trace->nr_entries; |
372 | 383 | ||
373 | if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { | 384 | if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { |
374 | if (!debug_locks_off_graph_unlock()) | 385 | if (!debug_locks_off_graph_unlock()) |
375 | return 0; | 386 | return 0; |
376 | 387 | ||
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains; | |||
388 | unsigned int nr_softirq_chains; | 399 | unsigned int nr_softirq_chains; |
389 | unsigned int nr_process_chains; | 400 | unsigned int nr_process_chains; |
390 | unsigned int max_lockdep_depth; | 401 | unsigned int max_lockdep_depth; |
391 | unsigned int max_recursion_depth; | ||
392 | |||
393 | static unsigned int lockdep_dependency_gen_id; | ||
394 | |||
395 | static bool lockdep_dependency_visit(struct lock_class *source, | ||
396 | unsigned int depth) | ||
397 | { | ||
398 | if (!depth) | ||
399 | lockdep_dependency_gen_id++; | ||
400 | if (source->dep_gen_id == lockdep_dependency_gen_id) | ||
401 | return true; | ||
402 | source->dep_gen_id = lockdep_dependency_gen_id; | ||
403 | return false; | ||
404 | } | ||
405 | 402 | ||
406 | #ifdef CONFIG_DEBUG_LOCKDEP | 403 | #ifdef CONFIG_DEBUG_LOCKDEP |
407 | /* | 404 | /* |
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on; | |||
431 | atomic_t redundant_softirqs_off; | 428 | atomic_t redundant_softirqs_off; |
432 | atomic_t nr_unused_locks; | 429 | atomic_t nr_unused_locks; |
433 | atomic_t nr_cyclic_checks; | 430 | atomic_t nr_cyclic_checks; |
434 | atomic_t nr_cyclic_check_recursions; | ||
435 | atomic_t nr_find_usage_forwards_checks; | 431 | atomic_t nr_find_usage_forwards_checks; |
436 | atomic_t nr_find_usage_forwards_recursions; | ||
437 | atomic_t nr_find_usage_backwards_checks; | 432 | atomic_t nr_find_usage_backwards_checks; |
438 | atomic_t nr_find_usage_backwards_recursions; | ||
439 | #endif | 433 | #endif |
440 | 434 | ||
441 | /* | 435 | /* |
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr) | |||
551 | } | 545 | } |
552 | } | 546 | } |
553 | 547 | ||
554 | static void print_lock_class_header(struct lock_class *class, int depth) | ||
555 | { | ||
556 | int bit; | ||
557 | |||
558 | printk("%*s->", depth, ""); | ||
559 | print_lock_name(class); | ||
560 | printk(" ops: %lu", class->ops); | ||
561 | printk(" {\n"); | ||
562 | |||
563 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { | ||
564 | if (class->usage_mask & (1 << bit)) { | ||
565 | int len = depth; | ||
566 | |||
567 | len += printk("%*s %s", depth, "", usage_str[bit]); | ||
568 | len += printk(" at:\n"); | ||
569 | print_stack_trace(class->usage_traces + bit, len); | ||
570 | } | ||
571 | } | ||
572 | printk("%*s }\n", depth, ""); | ||
573 | |||
574 | printk("%*s ... key at: ",depth,""); | ||
575 | print_ip_sym((unsigned long)class->key); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * printk all lock dependencies starting at <entry>: | ||
580 | */ | ||
581 | static void __used | ||
582 | print_lock_dependencies(struct lock_class *class, int depth) | ||
583 | { | ||
584 | struct lock_list *entry; | ||
585 | |||
586 | if (lockdep_dependency_visit(class, depth)) | ||
587 | return; | ||
588 | |||
589 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | ||
590 | return; | ||
591 | |||
592 | print_lock_class_header(class, depth); | ||
593 | |||
594 | list_for_each_entry(entry, &class->locks_after, entry) { | ||
595 | if (DEBUG_LOCKS_WARN_ON(!entry->class)) | ||
596 | return; | ||
597 | |||
598 | print_lock_dependencies(entry->class, depth + 1); | ||
599 | |||
600 | printk("%*s ... acquired at:\n",depth,""); | ||
601 | print_stack_trace(&entry->trace, 2); | ||
602 | printk("\n"); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | static void print_kernel_version(void) | 548 | static void print_kernel_version(void) |
607 | { | 549 | { |
608 | printk("%s %.*s\n", init_utsname()->release, | 550 | printk("%s %.*s\n", init_utsname()->release, |
@@ -898,22 +840,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
898 | } | 840 | } |
899 | 841 | ||
900 | /* | 842 | /* |
843 | * For good efficiency of modular, we use power of 2 | ||
844 | */ | ||
845 | #define MAX_CIRCULAR_QUEUE_SIZE 4096UL | ||
846 | #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) | ||
847 | |||
848 | /* | ||
849 | * The circular_queue and helpers is used to implement the | ||
850 | * breadth-first search(BFS)algorithem, by which we can build | ||
851 | * the shortest path from the next lock to be acquired to the | ||
852 | * previous held lock if there is a circular between them. | ||
853 | */ | ||
854 | struct circular_queue { | ||
855 | unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; | ||
856 | unsigned int front, rear; | ||
857 | }; | ||
858 | |||
859 | static struct circular_queue lock_cq; | ||
860 | |||
861 | unsigned int max_bfs_queue_depth; | ||
862 | |||
863 | static unsigned int lockdep_dependency_gen_id; | ||
864 | |||
865 | static inline void __cq_init(struct circular_queue *cq) | ||
866 | { | ||
867 | cq->front = cq->rear = 0; | ||
868 | lockdep_dependency_gen_id++; | ||
869 | } | ||
870 | |||
871 | static inline int __cq_empty(struct circular_queue *cq) | ||
872 | { | ||
873 | return (cq->front == cq->rear); | ||
874 | } | ||
875 | |||
876 | static inline int __cq_full(struct circular_queue *cq) | ||
877 | { | ||
878 | return ((cq->rear + 1) & CQ_MASK) == cq->front; | ||
879 | } | ||
880 | |||
881 | static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) | ||
882 | { | ||
883 | if (__cq_full(cq)) | ||
884 | return -1; | ||
885 | |||
886 | cq->element[cq->rear] = elem; | ||
887 | cq->rear = (cq->rear + 1) & CQ_MASK; | ||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) | ||
892 | { | ||
893 | if (__cq_empty(cq)) | ||
894 | return -1; | ||
895 | |||
896 | *elem = cq->element[cq->front]; | ||
897 | cq->front = (cq->front + 1) & CQ_MASK; | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) | ||
902 | { | ||
903 | return (cq->rear - cq->front) & CQ_MASK; | ||
904 | } | ||
905 | |||
906 | static inline void mark_lock_accessed(struct lock_list *lock, | ||
907 | struct lock_list *parent) | ||
908 | { | ||
909 | unsigned long nr; | ||
910 | |||
911 | nr = lock - list_entries; | ||
912 | WARN_ON(nr >= nr_list_entries); | ||
913 | lock->parent = parent; | ||
914 | lock->class->dep_gen_id = lockdep_dependency_gen_id; | ||
915 | } | ||
916 | |||
917 | static inline unsigned long lock_accessed(struct lock_list *lock) | ||
918 | { | ||
919 | unsigned long nr; | ||
920 | |||
921 | nr = lock - list_entries; | ||
922 | WARN_ON(nr >= nr_list_entries); | ||
923 | return lock->class->dep_gen_id == lockdep_dependency_gen_id; | ||
924 | } | ||
925 | |||
926 | static inline struct lock_list *get_lock_parent(struct lock_list *child) | ||
927 | { | ||
928 | return child->parent; | ||
929 | } | ||
930 | |||
931 | static inline int get_lock_depth(struct lock_list *child) | ||
932 | { | ||
933 | int depth = 0; | ||
934 | struct lock_list *parent; | ||
935 | |||
936 | while ((parent = get_lock_parent(child))) { | ||
937 | child = parent; | ||
938 | depth++; | ||
939 | } | ||
940 | return depth; | ||
941 | } | ||
942 | |||
943 | static int __bfs(struct lock_list *source_entry, | ||
944 | void *data, | ||
945 | int (*match)(struct lock_list *entry, void *data), | ||
946 | struct lock_list **target_entry, | ||
947 | int forward) | ||
948 | { | ||
949 | struct lock_list *entry; | ||
950 | struct list_head *head; | ||
951 | struct circular_queue *cq = &lock_cq; | ||
952 | int ret = 1; | ||
953 | |||
954 | if (match(source_entry, data)) { | ||
955 | *target_entry = source_entry; | ||
956 | ret = 0; | ||
957 | goto exit; | ||
958 | } | ||
959 | |||
960 | if (forward) | ||
961 | head = &source_entry->class->locks_after; | ||
962 | else | ||
963 | head = &source_entry->class->locks_before; | ||
964 | |||
965 | if (list_empty(head)) | ||
966 | goto exit; | ||
967 | |||
968 | __cq_init(cq); | ||
969 | __cq_enqueue(cq, (unsigned long)source_entry); | ||
970 | |||
971 | while (!__cq_empty(cq)) { | ||
972 | struct lock_list *lock; | ||
973 | |||
974 | __cq_dequeue(cq, (unsigned long *)&lock); | ||
975 | |||
976 | if (!lock->class) { | ||
977 | ret = -2; | ||
978 | goto exit; | ||
979 | } | ||
980 | |||
981 | if (forward) | ||
982 | head = &lock->class->locks_after; | ||
983 | else | ||
984 | head = &lock->class->locks_before; | ||
985 | |||
986 | list_for_each_entry(entry, head, entry) { | ||
987 | if (!lock_accessed(entry)) { | ||
988 | unsigned int cq_depth; | ||
989 | mark_lock_accessed(entry, lock); | ||
990 | if (match(entry, data)) { | ||
991 | *target_entry = entry; | ||
992 | ret = 0; | ||
993 | goto exit; | ||
994 | } | ||
995 | |||
996 | if (__cq_enqueue(cq, (unsigned long)entry)) { | ||
997 | ret = -1; | ||
998 | goto exit; | ||
999 | } | ||
1000 | cq_depth = __cq_get_elem_count(cq); | ||
1001 | if (max_bfs_queue_depth < cq_depth) | ||
1002 | max_bfs_queue_depth = cq_depth; | ||
1003 | } | ||
1004 | } | ||
1005 | } | ||
1006 | exit: | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | static inline int __bfs_forwards(struct lock_list *src_entry, | ||
1011 | void *data, | ||
1012 | int (*match)(struct lock_list *entry, void *data), | ||
1013 | struct lock_list **target_entry) | ||
1014 | { | ||
1015 | return __bfs(src_entry, data, match, target_entry, 1); | ||
1016 | |||
1017 | } | ||
1018 | |||
1019 | static inline int __bfs_backwards(struct lock_list *src_entry, | ||
1020 | void *data, | ||
1021 | int (*match)(struct lock_list *entry, void *data), | ||
1022 | struct lock_list **target_entry) | ||
1023 | { | ||
1024 | return __bfs(src_entry, data, match, target_entry, 0); | ||
1025 | |||
1026 | } | ||
1027 | |||
1028 | /* | ||
901 | * Recursive, forwards-direction lock-dependency checking, used for | 1029 | * Recursive, forwards-direction lock-dependency checking, used for |
902 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe | 1030 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe |
903 | * checking. | 1031 | * checking. |
904 | * | ||
905 | * (to keep the stackframe of the recursive functions small we | ||
906 | * use these global variables, and we also mark various helper | ||
907 | * functions as noinline.) | ||
908 | */ | 1032 | */ |
909 | static struct held_lock *check_source, *check_target; | ||
910 | 1033 | ||
911 | /* | 1034 | /* |
912 | * Print a dependency chain entry (this is only done when a deadlock | 1035 | * Print a dependency chain entry (this is only done when a deadlock |
913 | * has been detected): | 1036 | * has been detected): |
914 | */ | 1037 | */ |
915 | static noinline int | 1038 | static noinline int |
916 | print_circular_bug_entry(struct lock_list *target, unsigned int depth) | 1039 | print_circular_bug_entry(struct lock_list *target, int depth) |
917 | { | 1040 | { |
918 | if (debug_locks_silent) | 1041 | if (debug_locks_silent) |
919 | return 0; | 1042 | return 0; |
@@ -930,11 +1053,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) | |||
930 | * header first: | 1053 | * header first: |
931 | */ | 1054 | */ |
932 | static noinline int | 1055 | static noinline int |
933 | print_circular_bug_header(struct lock_list *entry, unsigned int depth) | 1056 | print_circular_bug_header(struct lock_list *entry, unsigned int depth, |
1057 | struct held_lock *check_src, | ||
1058 | struct held_lock *check_tgt) | ||
934 | { | 1059 | { |
935 | struct task_struct *curr = current; | 1060 | struct task_struct *curr = current; |
936 | 1061 | ||
937 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1062 | if (debug_locks_silent) |
938 | return 0; | 1063 | return 0; |
939 | 1064 | ||
940 | printk("\n=======================================================\n"); | 1065 | printk("\n=======================================================\n"); |
@@ -943,9 +1068,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) | |||
943 | printk( "-------------------------------------------------------\n"); | 1068 | printk( "-------------------------------------------------------\n"); |
944 | printk("%s/%d is trying to acquire lock:\n", | 1069 | printk("%s/%d is trying to acquire lock:\n", |
945 | curr->comm, task_pid_nr(curr)); | 1070 | curr->comm, task_pid_nr(curr)); |
946 | print_lock(check_source); | 1071 | print_lock(check_src); |
947 | printk("\nbut task is already holding lock:\n"); | 1072 | printk("\nbut task is already holding lock:\n"); |
948 | print_lock(check_target); | 1073 | print_lock(check_tgt); |
949 | printk("\nwhich lock already depends on the new lock.\n\n"); | 1074 | printk("\nwhich lock already depends on the new lock.\n\n"); |
950 | printk("\nthe existing dependency chain (in reverse order) is:\n"); | 1075 | printk("\nthe existing dependency chain (in reverse order) is:\n"); |
951 | 1076 | ||
@@ -954,19 +1079,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) | |||
954 | return 0; | 1079 | return 0; |
955 | } | 1080 | } |
956 | 1081 | ||
957 | static noinline int print_circular_bug_tail(void) | 1082 | static inline int class_equal(struct lock_list *entry, void *data) |
1083 | { | ||
1084 | return entry->class == data; | ||
1085 | } | ||
1086 | |||
1087 | static noinline int print_circular_bug(struct lock_list *this, | ||
1088 | struct lock_list *target, | ||
1089 | struct held_lock *check_src, | ||
1090 | struct held_lock *check_tgt) | ||
958 | { | 1091 | { |
959 | struct task_struct *curr = current; | 1092 | struct task_struct *curr = current; |
960 | struct lock_list this; | 1093 | struct lock_list *parent; |
1094 | int depth; | ||
961 | 1095 | ||
962 | if (debug_locks_silent) | 1096 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
963 | return 0; | 1097 | return 0; |
964 | 1098 | ||
965 | this.class = hlock_class(check_source); | 1099 | if (!save_trace(&this->trace)) |
966 | if (!save_trace(&this.trace)) | ||
967 | return 0; | 1100 | return 0; |
968 | 1101 | ||
969 | print_circular_bug_entry(&this, 0); | 1102 | depth = get_lock_depth(target); |
1103 | |||
1104 | print_circular_bug_header(target, depth, check_src, check_tgt); | ||
1105 | |||
1106 | parent = get_lock_parent(target); | ||
1107 | |||
1108 | while (parent) { | ||
1109 | print_circular_bug_entry(parent, --depth); | ||
1110 | parent = get_lock_parent(parent); | ||
1111 | } | ||
970 | 1112 | ||
971 | printk("\nother info that might help us debug this:\n\n"); | 1113 | printk("\nother info that might help us debug this:\n\n"); |
972 | lockdep_print_held_locks(curr); | 1114 | lockdep_print_held_locks(curr); |
@@ -977,73 +1119,69 @@ static noinline int print_circular_bug_tail(void) | |||
977 | return 0; | 1119 | return 0; |
978 | } | 1120 | } |
979 | 1121 | ||
980 | #define RECURSION_LIMIT 40 | 1122 | static noinline int print_bfs_bug(int ret) |
981 | |||
982 | static int noinline print_infinite_recursion_bug(void) | ||
983 | { | 1123 | { |
984 | if (!debug_locks_off_graph_unlock()) | 1124 | if (!debug_locks_off_graph_unlock()) |
985 | return 0; | 1125 | return 0; |
986 | 1126 | ||
987 | WARN_ON(1); | 1127 | WARN(1, "lockdep bfs error:%d\n", ret); |
988 | 1128 | ||
989 | return 0; | 1129 | return 0; |
990 | } | 1130 | } |
991 | 1131 | ||
992 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | 1132 | static int noop_count(struct lock_list *entry, void *data) |
993 | unsigned int depth) | ||
994 | { | 1133 | { |
995 | struct lock_list *entry; | 1134 | (*(unsigned long *)data)++; |
996 | unsigned long ret = 1; | 1135 | return 0; |
1136 | } | ||
997 | 1137 | ||
998 | if (lockdep_dependency_visit(class, depth)) | 1138 | unsigned long __lockdep_count_forward_deps(struct lock_list *this) |
999 | return 0; | 1139 | { |
1140 | unsigned long count = 0; | ||
1141 | struct lock_list *uninitialized_var(target_entry); | ||
1000 | 1142 | ||
1001 | /* | 1143 | __bfs_forwards(this, (void *)&count, noop_count, &target_entry); |
1002 | * Recurse this class's dependency list: | ||
1003 | */ | ||
1004 | list_for_each_entry(entry, &class->locks_after, entry) | ||
1005 | ret += __lockdep_count_forward_deps(entry->class, depth + 1); | ||
1006 | 1144 | ||
1007 | return ret; | 1145 | return count; |
1008 | } | 1146 | } |
1009 | |||
1010 | unsigned long lockdep_count_forward_deps(struct lock_class *class) | 1147 | unsigned long lockdep_count_forward_deps(struct lock_class *class) |
1011 | { | 1148 | { |
1012 | unsigned long ret, flags; | 1149 | unsigned long ret, flags; |
1150 | struct lock_list this; | ||
1151 | |||
1152 | this.parent = NULL; | ||
1153 | this.class = class; | ||
1013 | 1154 | ||
1014 | local_irq_save(flags); | 1155 | local_irq_save(flags); |
1015 | __raw_spin_lock(&lockdep_lock); | 1156 | __raw_spin_lock(&lockdep_lock); |
1016 | ret = __lockdep_count_forward_deps(class, 0); | 1157 | ret = __lockdep_count_forward_deps(&this); |
1017 | __raw_spin_unlock(&lockdep_lock); | 1158 | __raw_spin_unlock(&lockdep_lock); |
1018 | local_irq_restore(flags); | 1159 | local_irq_restore(flags); |
1019 | 1160 | ||
1020 | return ret; | 1161 | return ret; |
1021 | } | 1162 | } |
1022 | 1163 | ||
1023 | unsigned long __lockdep_count_backward_deps(struct lock_class *class, | 1164 | unsigned long __lockdep_count_backward_deps(struct lock_list *this) |
1024 | unsigned int depth) | ||
1025 | { | 1165 | { |
1026 | struct lock_list *entry; | 1166 | unsigned long count = 0; |
1027 | unsigned long ret = 1; | 1167 | struct lock_list *uninitialized_var(target_entry); |
1028 | 1168 | ||
1029 | if (lockdep_dependency_visit(class, depth)) | 1169 | __bfs_backwards(this, (void *)&count, noop_count, &target_entry); |
1030 | return 0; | ||
1031 | /* | ||
1032 | * Recurse this class's dependency list: | ||
1033 | */ | ||
1034 | list_for_each_entry(entry, &class->locks_before, entry) | ||
1035 | ret += __lockdep_count_backward_deps(entry->class, depth + 1); | ||
1036 | 1170 | ||
1037 | return ret; | 1171 | return count; |
1038 | } | 1172 | } |
1039 | 1173 | ||
1040 | unsigned long lockdep_count_backward_deps(struct lock_class *class) | 1174 | unsigned long lockdep_count_backward_deps(struct lock_class *class) |
1041 | { | 1175 | { |
1042 | unsigned long ret, flags; | 1176 | unsigned long ret, flags; |
1177 | struct lock_list this; | ||
1178 | |||
1179 | this.parent = NULL; | ||
1180 | this.class = class; | ||
1043 | 1181 | ||
1044 | local_irq_save(flags); | 1182 | local_irq_save(flags); |
1045 | __raw_spin_lock(&lockdep_lock); | 1183 | __raw_spin_lock(&lockdep_lock); |
1046 | ret = __lockdep_count_backward_deps(class, 0); | 1184 | ret = __lockdep_count_backward_deps(&this); |
1047 | __raw_spin_unlock(&lockdep_lock); | 1185 | __raw_spin_unlock(&lockdep_lock); |
1048 | local_irq_restore(flags); | 1186 | local_irq_restore(flags); |
1049 | 1187 | ||
@@ -1055,29 +1193,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1055 | * lead to <target>. Print an error and return 0 if it does. | 1193 | * lead to <target>. Print an error and return 0 if it does. |
1056 | */ | 1194 | */ |
1057 | static noinline int | 1195 | static noinline int |
1058 | check_noncircular(struct lock_class *source, unsigned int depth) | 1196 | check_noncircular(struct lock_list *root, struct lock_class *target, |
1197 | struct lock_list **target_entry) | ||
1059 | { | 1198 | { |
1060 | struct lock_list *entry; | 1199 | int result; |
1061 | 1200 | ||
1062 | if (lockdep_dependency_visit(source, depth)) | 1201 | debug_atomic_inc(&nr_cyclic_checks); |
1063 | return 1; | ||
1064 | 1202 | ||
1065 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1203 | result = __bfs_forwards(root, target, class_equal, target_entry); |
1066 | if (depth > max_recursion_depth) | 1204 | |
1067 | max_recursion_depth = depth; | 1205 | return result; |
1068 | if (depth >= RECURSION_LIMIT) | ||
1069 | return print_infinite_recursion_bug(); | ||
1070 | /* | ||
1071 | * Check this lock's dependency list: | ||
1072 | */ | ||
1073 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
1074 | if (entry->class == hlock_class(check_target)) | ||
1075 | return print_circular_bug_header(entry, depth+1); | ||
1076 | debug_atomic_inc(&nr_cyclic_checks); | ||
1077 | if (!check_noncircular(entry->class, depth+1)) | ||
1078 | return print_circular_bug_entry(entry, depth+1); | ||
1079 | } | ||
1080 | return 1; | ||
1081 | } | 1206 | } |
1082 | 1207 | ||
1083 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) | 1208 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
@@ -1086,103 +1211,121 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
1086 | * proving that two subgraphs can be connected by a new dependency | 1211 | * proving that two subgraphs can be connected by a new dependency |
1087 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. | 1212 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. |
1088 | */ | 1213 | */ |
1089 | static enum lock_usage_bit find_usage_bit; | 1214 | |
1090 | static struct lock_class *forwards_match, *backwards_match; | 1215 | static inline int usage_match(struct lock_list *entry, void *bit) |
1216 | { | ||
1217 | return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); | ||
1218 | } | ||
1219 | |||
1220 | |||
1091 | 1221 | ||
1092 | /* | 1222 | /* |
1093 | * Find a node in the forwards-direction dependency sub-graph starting | 1223 | * Find a node in the forwards-direction dependency sub-graph starting |
1094 | * at <source> that matches <find_usage_bit>. | 1224 | * at @root->class that matches @bit. |
1095 | * | 1225 | * |
1096 | * Return 2 if such a node exists in the subgraph, and put that node | 1226 | * Return 0 if such a node exists in the subgraph, and put that node |
1097 | * into <forwards_match>. | 1227 | * into *@target_entry. |
1098 | * | 1228 | * |
1099 | * Return 1 otherwise and keep <forwards_match> unchanged. | 1229 | * Return 1 otherwise and keep *@target_entry unchanged. |
1100 | * Return 0 on error. | 1230 | * Return <0 on error. |
1101 | */ | 1231 | */ |
1102 | static noinline int | 1232 | static int |
1103 | find_usage_forwards(struct lock_class *source, unsigned int depth) | 1233 | find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, |
1234 | struct lock_list **target_entry) | ||
1104 | { | 1235 | { |
1105 | struct lock_list *entry; | 1236 | int result; |
1106 | int ret; | ||
1107 | |||
1108 | if (lockdep_dependency_visit(source, depth)) | ||
1109 | return 1; | ||
1110 | |||
1111 | if (depth > max_recursion_depth) | ||
1112 | max_recursion_depth = depth; | ||
1113 | if (depth >= RECURSION_LIMIT) | ||
1114 | return print_infinite_recursion_bug(); | ||
1115 | 1237 | ||
1116 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 1238 | debug_atomic_inc(&nr_find_usage_forwards_checks); |
1117 | if (source->usage_mask & (1 << find_usage_bit)) { | ||
1118 | forwards_match = source; | ||
1119 | return 2; | ||
1120 | } | ||
1121 | 1239 | ||
1122 | /* | 1240 | result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); |
1123 | * Check this lock's dependency list: | 1241 | |
1124 | */ | 1242 | return result; |
1125 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
1126 | debug_atomic_inc(&nr_find_usage_forwards_recursions); | ||
1127 | ret = find_usage_forwards(entry->class, depth+1); | ||
1128 | if (ret == 2 || ret == 0) | ||
1129 | return ret; | ||
1130 | } | ||
1131 | return 1; | ||
1132 | } | 1243 | } |
1133 | 1244 | ||
1134 | /* | 1245 | /* |
1135 | * Find a node in the backwards-direction dependency sub-graph starting | 1246 | * Find a node in the backwards-direction dependency sub-graph starting |
1136 | * at <source> that matches <find_usage_bit>. | 1247 | * at @root->class that matches @bit. |
1137 | * | 1248 | * |
1138 | * Return 2 if such a node exists in the subgraph, and put that node | 1249 | * Return 0 if such a node exists in the subgraph, and put that node |
1139 | * into <backwards_match>. | 1250 | * into *@target_entry. |
1140 | * | 1251 | * |
1141 | * Return 1 otherwise and keep <backwards_match> unchanged. | 1252 | * Return 1 otherwise and keep *@target_entry unchanged. |
1142 | * Return 0 on error. | 1253 | * Return <0 on error. |
1143 | */ | 1254 | */ |
1144 | static noinline int | 1255 | static int |
1145 | find_usage_backwards(struct lock_class *source, unsigned int depth) | 1256 | find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, |
1257 | struct lock_list **target_entry) | ||
1146 | { | 1258 | { |
1147 | struct lock_list *entry; | 1259 | int result; |
1148 | int ret; | ||
1149 | 1260 | ||
1150 | if (lockdep_dependency_visit(source, depth)) | 1261 | debug_atomic_inc(&nr_find_usage_backwards_checks); |
1151 | return 1; | ||
1152 | 1262 | ||
1153 | if (!__raw_spin_is_locked(&lockdep_lock)) | 1263 | result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); |
1154 | return DEBUG_LOCKS_WARN_ON(1); | ||
1155 | 1264 | ||
1156 | if (depth > max_recursion_depth) | 1265 | return result; |
1157 | max_recursion_depth = depth; | 1266 | } |
1158 | if (depth >= RECURSION_LIMIT) | ||
1159 | return print_infinite_recursion_bug(); | ||
1160 | 1267 | ||
1161 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 1268 | static void print_lock_class_header(struct lock_class *class, int depth) |
1162 | if (source->usage_mask & (1 << find_usage_bit)) { | 1269 | { |
1163 | backwards_match = source; | 1270 | int bit; |
1164 | return 2; | ||
1165 | } | ||
1166 | 1271 | ||
1167 | if (!source && debug_locks_off_graph_unlock()) { | 1272 | printk("%*s->", depth, ""); |
1168 | WARN_ON(1); | 1273 | print_lock_name(class); |
1169 | return 0; | 1274 | printk(" ops: %lu", class->ops); |
1170 | } | 1275 | printk(" {\n"); |
1171 | 1276 | ||
1172 | /* | 1277 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { |
1173 | * Check this lock's dependency list: | 1278 | if (class->usage_mask & (1 << bit)) { |
1174 | */ | 1279 | int len = depth; |
1175 | list_for_each_entry(entry, &source->locks_before, entry) { | 1280 | |
1176 | debug_atomic_inc(&nr_find_usage_backwards_recursions); | 1281 | len += printk("%*s %s", depth, "", usage_str[bit]); |
1177 | ret = find_usage_backwards(entry->class, depth+1); | 1282 | len += printk(" at:\n"); |
1178 | if (ret == 2 || ret == 0) | 1283 | print_stack_trace(class->usage_traces + bit, len); |
1179 | return ret; | 1284 | } |
1180 | } | 1285 | } |
1181 | return 1; | 1286 | printk("%*s }\n", depth, ""); |
1287 | |||
1288 | printk("%*s ... key at: ",depth,""); | ||
1289 | print_ip_sym((unsigned long)class->key); | ||
1290 | } | ||
1291 | |||
1292 | /* | ||
1293 | * printk the shortest lock dependencies from @start to @end in reverse order: | ||
1294 | */ | ||
1295 | static void __used | ||
1296 | print_shortest_lock_dependencies(struct lock_list *leaf, | ||
1297 | struct lock_list *root) | ||
1298 | { | ||
1299 | struct lock_list *entry = leaf; | ||
1300 | int depth; | ||
1301 | |||
1302 | /*compute depth from generated tree by BFS*/ | ||
1303 | depth = get_lock_depth(leaf); | ||
1304 | |||
1305 | do { | ||
1306 | print_lock_class_header(entry->class, depth); | ||
1307 | printk("%*s ... acquired at:\n", depth, ""); | ||
1308 | print_stack_trace(&entry->trace, 2); | ||
1309 | printk("\n"); | ||
1310 | |||
1311 | if (depth == 0 && (entry != root)) { | ||
1312 | printk("lockdep:%s bad BFS generated tree\n", __func__); | ||
1313 | break; | ||
1314 | } | ||
1315 | |||
1316 | entry = get_lock_parent(entry); | ||
1317 | depth--; | ||
1318 | } while (entry && (depth >= 0)); | ||
1319 | |||
1320 | return; | ||
1182 | } | 1321 | } |
1183 | 1322 | ||
1184 | static int | 1323 | static int |
1185 | print_bad_irq_dependency(struct task_struct *curr, | 1324 | print_bad_irq_dependency(struct task_struct *curr, |
1325 | struct lock_list *prev_root, | ||
1326 | struct lock_list *next_root, | ||
1327 | struct lock_list *backwards_entry, | ||
1328 | struct lock_list *forwards_entry, | ||
1186 | struct held_lock *prev, | 1329 | struct held_lock *prev, |
1187 | struct held_lock *next, | 1330 | struct held_lock *next, |
1188 | enum lock_usage_bit bit1, | 1331 | enum lock_usage_bit bit1, |
@@ -1215,26 +1358,32 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1215 | 1358 | ||
1216 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1359 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
1217 | irqclass); | 1360 | irqclass); |
1218 | print_lock_name(backwards_match); | 1361 | print_lock_name(backwards_entry->class); |
1219 | printk("\n... which became %s-irq-safe at:\n", irqclass); | 1362 | printk("\n... which became %s-irq-safe at:\n", irqclass); |
1220 | 1363 | ||
1221 | print_stack_trace(backwards_match->usage_traces + bit1, 1); | 1364 | print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); |
1222 | 1365 | ||
1223 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); | 1366 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); |
1224 | print_lock_name(forwards_match); | 1367 | print_lock_name(forwards_entry->class); |
1225 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); | 1368 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); |
1226 | printk("..."); | 1369 | printk("..."); |
1227 | 1370 | ||
1228 | print_stack_trace(forwards_match->usage_traces + bit2, 1); | 1371 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
1229 | 1372 | ||
1230 | printk("\nother info that might help us debug this:\n\n"); | 1373 | printk("\nother info that might help us debug this:\n\n"); |
1231 | lockdep_print_held_locks(curr); | 1374 | lockdep_print_held_locks(curr); |
1232 | 1375 | ||
1233 | printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); | 1376 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); |
1234 | print_lock_dependencies(backwards_match, 0); | 1377 | printk(" and the holding lock:\n"); |
1378 | if (!save_trace(&prev_root->trace)) | ||
1379 | return 0; | ||
1380 | print_shortest_lock_dependencies(backwards_entry, prev_root); | ||
1235 | 1381 | ||
1236 | printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); | 1382 | printk("\nthe dependencies between the lock to be acquired"); |
1237 | print_lock_dependencies(forwards_match, 0); | 1383 | printk(" and %s-irq-unsafe lock:\n", irqclass); |
1384 | if (!save_trace(&next_root->trace)) | ||
1385 | return 0; | ||
1386 | print_shortest_lock_dependencies(forwards_entry, next_root); | ||
1238 | 1387 | ||
1239 | printk("\nstack backtrace:\n"); | 1388 | printk("\nstack backtrace:\n"); |
1240 | dump_stack(); | 1389 | dump_stack(); |
@@ -1248,19 +1397,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1248 | enum lock_usage_bit bit_forwards, const char *irqclass) | 1397 | enum lock_usage_bit bit_forwards, const char *irqclass) |
1249 | { | 1398 | { |
1250 | int ret; | 1399 | int ret; |
1400 | struct lock_list this, that; | ||
1401 | struct lock_list *uninitialized_var(target_entry); | ||
1402 | struct lock_list *uninitialized_var(target_entry1); | ||
1251 | 1403 | ||
1252 | find_usage_bit = bit_backwards; | 1404 | this.parent = NULL; |
1253 | /* fills in <backwards_match> */ | 1405 | |
1254 | ret = find_usage_backwards(hlock_class(prev), 0); | 1406 | this.class = hlock_class(prev); |
1255 | if (!ret || ret == 1) | 1407 | ret = find_usage_backwards(&this, bit_backwards, &target_entry); |
1408 | if (ret < 0) | ||
1409 | return print_bfs_bug(ret); | ||
1410 | if (ret == 1) | ||
1256 | return ret; | 1411 | return ret; |
1257 | 1412 | ||
1258 | find_usage_bit = bit_forwards; | 1413 | that.parent = NULL; |
1259 | ret = find_usage_forwards(hlock_class(next), 0); | 1414 | that.class = hlock_class(next); |
1260 | if (!ret || ret == 1) | 1415 | ret = find_usage_forwards(&that, bit_forwards, &target_entry1); |
1416 | if (ret < 0) | ||
1417 | return print_bfs_bug(ret); | ||
1418 | if (ret == 1) | ||
1261 | return ret; | 1419 | return ret; |
1262 | /* ret == 2 */ | 1420 | |
1263 | return print_bad_irq_dependency(curr, prev, next, | 1421 | return print_bad_irq_dependency(curr, &this, &that, |
1422 | target_entry, target_entry1, | ||
1423 | prev, next, | ||
1264 | bit_backwards, bit_forwards, irqclass); | 1424 | bit_backwards, bit_forwards, irqclass); |
1265 | } | 1425 | } |
1266 | 1426 | ||
@@ -1472,6 +1632,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1472 | { | 1632 | { |
1473 | struct lock_list *entry; | 1633 | struct lock_list *entry; |
1474 | int ret; | 1634 | int ret; |
1635 | struct lock_list this; | ||
1636 | struct lock_list *uninitialized_var(target_entry); | ||
1475 | 1637 | ||
1476 | /* | 1638 | /* |
1477 | * Prove that the new <prev> -> <next> dependency would not | 1639 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1482,10 +1644,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1482 | * We are using global variables to control the recursion, to | 1644 | * We are using global variables to control the recursion, to |
1483 | * keep the stackframe size of the recursive functions low: | 1645 | * keep the stackframe size of the recursive functions low: |
1484 | */ | 1646 | */ |
1485 | check_source = next; | 1647 | this.class = hlock_class(next); |
1486 | check_target = prev; | 1648 | this.parent = NULL; |
1487 | if (!(check_noncircular(hlock_class(next), 0))) | 1649 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
1488 | return print_circular_bug_tail(); | 1650 | if (unlikely(!ret)) |
1651 | return print_circular_bug(&this, target_entry, next, prev); | ||
1652 | else if (unlikely(ret < 0)) | ||
1653 | return print_bfs_bug(ret); | ||
1489 | 1654 | ||
1490 | if (!check_prev_add_irq(curr, prev, next)) | 1655 | if (!check_prev_add_irq(curr, prev, next)) |
1491 | return 0; | 1656 | return 0; |
@@ -1884,7 +2049,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
1884 | * print irq inversion bug: | 2049 | * print irq inversion bug: |
1885 | */ | 2050 | */ |
1886 | static int | 2051 | static int |
1887 | print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | 2052 | print_irq_inversion_bug(struct task_struct *curr, |
2053 | struct lock_list *root, struct lock_list *other, | ||
1888 | struct held_lock *this, int forwards, | 2054 | struct held_lock *this, int forwards, |
1889 | const char *irqclass) | 2055 | const char *irqclass) |
1890 | { | 2056 | { |
@@ -1902,17 +2068,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1902 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); | 2068 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
1903 | else | 2069 | else |
1904 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); | 2070 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
1905 | print_lock_name(other); | 2071 | print_lock_name(other->class); |
1906 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2072 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
1907 | 2073 | ||
1908 | printk("\nother info that might help us debug this:\n"); | 2074 | printk("\nother info that might help us debug this:\n"); |
1909 | lockdep_print_held_locks(curr); | 2075 | lockdep_print_held_locks(curr); |
1910 | 2076 | ||
1911 | printk("\nthe first lock's dependencies:\n"); | 2077 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
1912 | print_lock_dependencies(hlock_class(this), 0); | 2078 | if (!save_trace(&root->trace)) |
1913 | 2079 | return 0; | |
1914 | printk("\nthe second lock's dependencies:\n"); | 2080 | print_shortest_lock_dependencies(other, root); |
1915 | print_lock_dependencies(other, 0); | ||
1916 | 2081 | ||
1917 | printk("\nstack backtrace:\n"); | 2082 | printk("\nstack backtrace:\n"); |
1918 | dump_stack(); | 2083 | dump_stack(); |
@@ -1929,14 +2094,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
1929 | enum lock_usage_bit bit, const char *irqclass) | 2094 | enum lock_usage_bit bit, const char *irqclass) |
1930 | { | 2095 | { |
1931 | int ret; | 2096 | int ret; |
1932 | 2097 | struct lock_list root; | |
1933 | find_usage_bit = bit; | 2098 | struct lock_list *uninitialized_var(target_entry); |
1934 | /* fills in <forwards_match> */ | 2099 | |
1935 | ret = find_usage_forwards(hlock_class(this), 0); | 2100 | root.parent = NULL; |
1936 | if (!ret || ret == 1) | 2101 | root.class = hlock_class(this); |
2102 | ret = find_usage_forwards(&root, bit, &target_entry); | ||
2103 | if (ret < 0) | ||
2104 | return print_bfs_bug(ret); | ||
2105 | if (ret == 1) | ||
1937 | return ret; | 2106 | return ret; |
1938 | 2107 | ||
1939 | return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); | 2108 | return print_irq_inversion_bug(curr, &root, target_entry, |
2109 | this, 1, irqclass); | ||
1940 | } | 2110 | } |
1941 | 2111 | ||
1942 | /* | 2112 | /* |
@@ -1948,14 +2118,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
1948 | enum lock_usage_bit bit, const char *irqclass) | 2118 | enum lock_usage_bit bit, const char *irqclass) |
1949 | { | 2119 | { |
1950 | int ret; | 2120 | int ret; |
1951 | 2121 | struct lock_list root; | |
1952 | find_usage_bit = bit; | 2122 | struct lock_list *uninitialized_var(target_entry); |
1953 | /* fills in <backwards_match> */ | 2123 | |
1954 | ret = find_usage_backwards(hlock_class(this), 0); | 2124 | root.parent = NULL; |
1955 | if (!ret || ret == 1) | 2125 | root.class = hlock_class(this); |
2126 | ret = find_usage_backwards(&root, bit, &target_entry); | ||
2127 | if (ret < 0) | ||
2128 | return print_bfs_bug(ret); | ||
2129 | if (ret == 1) | ||
1956 | return ret; | 2130 | return ret; |
1957 | 2131 | ||
1958 | return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); | 2132 | return print_irq_inversion_bug(curr, &root, target_entry, |
2133 | this, 1, irqclass); | ||
1959 | } | 2134 | } |
1960 | 2135 | ||
1961 | void print_irqtrace_events(struct task_struct *curr) | 2136 | void print_irqtrace_events(struct task_struct *curr) |
@@ -2530,13 +2705,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
2530 | */ | 2705 | */ |
2531 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2706 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2532 | int trylock, int read, int check, int hardirqs_off, | 2707 | int trylock, int read, int check, int hardirqs_off, |
2533 | struct lockdep_map *nest_lock, unsigned long ip) | 2708 | struct lockdep_map *nest_lock, unsigned long ip, |
2709 | int references) | ||
2534 | { | 2710 | { |
2535 | struct task_struct *curr = current; | 2711 | struct task_struct *curr = current; |
2536 | struct lock_class *class = NULL; | 2712 | struct lock_class *class = NULL; |
2537 | struct held_lock *hlock; | 2713 | struct held_lock *hlock; |
2538 | unsigned int depth, id; | 2714 | unsigned int depth, id; |
2539 | int chain_head = 0; | 2715 | int chain_head = 0; |
2716 | int class_idx; | ||
2540 | u64 chain_key; | 2717 | u64 chain_key; |
2541 | 2718 | ||
2542 | if (!prove_locking) | 2719 | if (!prove_locking) |
@@ -2584,10 +2761,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2584 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) | 2761 | if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) |
2585 | return 0; | 2762 | return 0; |
2586 | 2763 | ||
2764 | class_idx = class - lock_classes + 1; | ||
2765 | |||
2766 | if (depth) { | ||
2767 | hlock = curr->held_locks + depth - 1; | ||
2768 | if (hlock->class_idx == class_idx && nest_lock) { | ||
2769 | if (hlock->references) | ||
2770 | hlock->references++; | ||
2771 | else | ||
2772 | hlock->references = 2; | ||
2773 | |||
2774 | return 1; | ||
2775 | } | ||
2776 | } | ||
2777 | |||
2587 | hlock = curr->held_locks + depth; | 2778 | hlock = curr->held_locks + depth; |
2588 | if (DEBUG_LOCKS_WARN_ON(!class)) | 2779 | if (DEBUG_LOCKS_WARN_ON(!class)) |
2589 | return 0; | 2780 | return 0; |
2590 | hlock->class_idx = class - lock_classes + 1; | 2781 | hlock->class_idx = class_idx; |
2591 | hlock->acquire_ip = ip; | 2782 | hlock->acquire_ip = ip; |
2592 | hlock->instance = lock; | 2783 | hlock->instance = lock; |
2593 | hlock->nest_lock = nest_lock; | 2784 | hlock->nest_lock = nest_lock; |
@@ -2595,6 +2786,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2595 | hlock->read = read; | 2786 | hlock->read = read; |
2596 | hlock->check = check; | 2787 | hlock->check = check; |
2597 | hlock->hardirqs_off = !!hardirqs_off; | 2788 | hlock->hardirqs_off = !!hardirqs_off; |
2789 | hlock->references = references; | ||
2598 | #ifdef CONFIG_LOCK_STAT | 2790 | #ifdef CONFIG_LOCK_STAT |
2599 | hlock->waittime_stamp = 0; | 2791 | hlock->waittime_stamp = 0; |
2600 | hlock->holdtime_stamp = sched_clock(); | 2792 | hlock->holdtime_stamp = sched_clock(); |
@@ -2703,6 +2895,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
2703 | return 1; | 2895 | return 1; |
2704 | } | 2896 | } |
2705 | 2897 | ||
2898 | static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | ||
2899 | { | ||
2900 | if (hlock->instance == lock) | ||
2901 | return 1; | ||
2902 | |||
2903 | if (hlock->references) { | ||
2904 | struct lock_class *class = lock->class_cache; | ||
2905 | |||
2906 | if (!class) | ||
2907 | class = look_up_lock_class(lock, 0); | ||
2908 | |||
2909 | if (DEBUG_LOCKS_WARN_ON(!class)) | ||
2910 | return 0; | ||
2911 | |||
2912 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) | ||
2913 | return 0; | ||
2914 | |||
2915 | if (hlock->class_idx == class - lock_classes + 1) | ||
2916 | return 1; | ||
2917 | } | ||
2918 | |||
2919 | return 0; | ||
2920 | } | ||
2921 | |||
2706 | static int | 2922 | static int |
2707 | __lock_set_class(struct lockdep_map *lock, const char *name, | 2923 | __lock_set_class(struct lockdep_map *lock, const char *name, |
2708 | struct lock_class_key *key, unsigned int subclass, | 2924 | struct lock_class_key *key, unsigned int subclass, |
@@ -2726,7 +2942,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name, | |||
2726 | */ | 2942 | */ |
2727 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 2943 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
2728 | break; | 2944 | break; |
2729 | if (hlock->instance == lock) | 2945 | if (match_held_lock(hlock, lock)) |
2730 | goto found_it; | 2946 | goto found_it; |
2731 | prev_hlock = hlock; | 2947 | prev_hlock = hlock; |
2732 | } | 2948 | } |
@@ -2745,7 +2961,8 @@ found_it: | |||
2745 | if (!__lock_acquire(hlock->instance, | 2961 | if (!__lock_acquire(hlock->instance, |
2746 | hlock_class(hlock)->subclass, hlock->trylock, | 2962 | hlock_class(hlock)->subclass, hlock->trylock, |
2747 | hlock->read, hlock->check, hlock->hardirqs_off, | 2963 | hlock->read, hlock->check, hlock->hardirqs_off, |
2748 | hlock->nest_lock, hlock->acquire_ip)) | 2964 | hlock->nest_lock, hlock->acquire_ip, |
2965 | hlock->references)) | ||
2749 | return 0; | 2966 | return 0; |
2750 | } | 2967 | } |
2751 | 2968 | ||
@@ -2784,20 +3001,34 @@ lock_release_non_nested(struct task_struct *curr, | |||
2784 | */ | 3001 | */ |
2785 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 3002 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
2786 | break; | 3003 | break; |
2787 | if (hlock->instance == lock) | 3004 | if (match_held_lock(hlock, lock)) |
2788 | goto found_it; | 3005 | goto found_it; |
2789 | prev_hlock = hlock; | 3006 | prev_hlock = hlock; |
2790 | } | 3007 | } |
2791 | return print_unlock_inbalance_bug(curr, lock, ip); | 3008 | return print_unlock_inbalance_bug(curr, lock, ip); |
2792 | 3009 | ||
2793 | found_it: | 3010 | found_it: |
2794 | lock_release_holdtime(hlock); | 3011 | if (hlock->instance == lock) |
3012 | lock_release_holdtime(hlock); | ||
3013 | |||
3014 | if (hlock->references) { | ||
3015 | hlock->references--; | ||
3016 | if (hlock->references) { | ||
3017 | /* | ||
3018 | * We had, and after removing one, still have | ||
3019 | * references, the current lock stack is still | ||
3020 | * valid. We're done! | ||
3021 | */ | ||
3022 | return 1; | ||
3023 | } | ||
3024 | } | ||
2795 | 3025 | ||
2796 | /* | 3026 | /* |
2797 | * We have the right lock to unlock, 'hlock' points to it. | 3027 | * We have the right lock to unlock, 'hlock' points to it. |
2798 | * Now we remove it from the stack, and add back the other | 3028 | * Now we remove it from the stack, and add back the other |
2799 | * entries (if any), recalculating the hash along the way: | 3029 | * entries (if any), recalculating the hash along the way: |
2800 | */ | 3030 | */ |
3031 | |||
2801 | curr->lockdep_depth = i; | 3032 | curr->lockdep_depth = i; |
2802 | curr->curr_chain_key = hlock->prev_chain_key; | 3033 | curr->curr_chain_key = hlock->prev_chain_key; |
2803 | 3034 | ||
@@ -2806,7 +3037,8 @@ found_it: | |||
2806 | if (!__lock_acquire(hlock->instance, | 3037 | if (!__lock_acquire(hlock->instance, |
2807 | hlock_class(hlock)->subclass, hlock->trylock, | 3038 | hlock_class(hlock)->subclass, hlock->trylock, |
2808 | hlock->read, hlock->check, hlock->hardirqs_off, | 3039 | hlock->read, hlock->check, hlock->hardirqs_off, |
2809 | hlock->nest_lock, hlock->acquire_ip)) | 3040 | hlock->nest_lock, hlock->acquire_ip, |
3041 | hlock->references)) | ||
2810 | return 0; | 3042 | return 0; |
2811 | } | 3043 | } |
2812 | 3044 | ||
@@ -2836,7 +3068,7 @@ static int lock_release_nested(struct task_struct *curr, | |||
2836 | /* | 3068 | /* |
2837 | * Is the unlock non-nested: | 3069 | * Is the unlock non-nested: |
2838 | */ | 3070 | */ |
2839 | if (hlock->instance != lock) | 3071 | if (hlock->instance != lock || hlock->references) |
2840 | return lock_release_non_nested(curr, lock, ip); | 3072 | return lock_release_non_nested(curr, lock, ip); |
2841 | curr->lockdep_depth--; | 3073 | curr->lockdep_depth--; |
2842 | 3074 | ||
@@ -2881,6 +3113,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
2881 | check_chain_key(curr); | 3113 | check_chain_key(curr); |
2882 | } | 3114 | } |
2883 | 3115 | ||
3116 | static int __lock_is_held(struct lockdep_map *lock) | ||
3117 | { | ||
3118 | struct task_struct *curr = current; | ||
3119 | int i; | ||
3120 | |||
3121 | for (i = 0; i < curr->lockdep_depth; i++) { | ||
3122 | struct held_lock *hlock = curr->held_locks + i; | ||
3123 | |||
3124 | if (match_held_lock(hlock, lock)) | ||
3125 | return 1; | ||
3126 | } | ||
3127 | |||
3128 | return 0; | ||
3129 | } | ||
3130 | |||
2884 | /* | 3131 | /* |
2885 | * Check whether we follow the irq-flags state precisely: | 3132 | * Check whether we follow the irq-flags state precisely: |
2886 | */ | 3133 | */ |
@@ -2957,7 +3204,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2957 | 3204 | ||
2958 | current->lockdep_recursion = 1; | 3205 | current->lockdep_recursion = 1; |
2959 | __lock_acquire(lock, subclass, trylock, read, check, | 3206 | __lock_acquire(lock, subclass, trylock, read, check, |
2960 | irqs_disabled_flags(flags), nest_lock, ip); | 3207 | irqs_disabled_flags(flags), nest_lock, ip, 0); |
2961 | current->lockdep_recursion = 0; | 3208 | current->lockdep_recursion = 0; |
2962 | raw_local_irq_restore(flags); | 3209 | raw_local_irq_restore(flags); |
2963 | } | 3210 | } |
@@ -2982,6 +3229,26 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2982 | } | 3229 | } |
2983 | EXPORT_SYMBOL_GPL(lock_release); | 3230 | EXPORT_SYMBOL_GPL(lock_release); |
2984 | 3231 | ||
3232 | int lock_is_held(struct lockdep_map *lock) | ||
3233 | { | ||
3234 | unsigned long flags; | ||
3235 | int ret = 0; | ||
3236 | |||
3237 | if (unlikely(current->lockdep_recursion)) | ||
3238 | return ret; | ||
3239 | |||
3240 | raw_local_irq_save(flags); | ||
3241 | check_flags(flags); | ||
3242 | |||
3243 | current->lockdep_recursion = 1; | ||
3244 | ret = __lock_is_held(lock); | ||
3245 | current->lockdep_recursion = 0; | ||
3246 | raw_local_irq_restore(flags); | ||
3247 | |||
3248 | return ret; | ||
3249 | } | ||
3250 | EXPORT_SYMBOL_GPL(lock_is_held); | ||
3251 | |||
2985 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) | 3252 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) |
2986 | { | 3253 | { |
2987 | current->lockdep_reclaim_gfp = gfp_mask; | 3254 | current->lockdep_reclaim_gfp = gfp_mask; |
@@ -3041,7 +3308,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3041 | */ | 3308 | */ |
3042 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 3309 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
3043 | break; | 3310 | break; |
3044 | if (hlock->instance == lock) | 3311 | if (match_held_lock(hlock, lock)) |
3045 | goto found_it; | 3312 | goto found_it; |
3046 | prev_hlock = hlock; | 3313 | prev_hlock = hlock; |
3047 | } | 3314 | } |
@@ -3049,6 +3316,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3049 | return; | 3316 | return; |
3050 | 3317 | ||
3051 | found_it: | 3318 | found_it: |
3319 | if (hlock->instance != lock) | ||
3320 | return; | ||
3321 | |||
3052 | hlock->waittime_stamp = sched_clock(); | 3322 | hlock->waittime_stamp = sched_clock(); |
3053 | 3323 | ||
3054 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); | 3324 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
@@ -3088,7 +3358,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3088 | */ | 3358 | */ |
3089 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | 3359 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) |
3090 | break; | 3360 | break; |
3091 | if (hlock->instance == lock) | 3361 | if (match_held_lock(hlock, lock)) |
3092 | goto found_it; | 3362 | goto found_it; |
3093 | prev_hlock = hlock; | 3363 | prev_hlock = hlock; |
3094 | } | 3364 | } |
@@ -3096,6 +3366,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3096 | return; | 3366 | return; |
3097 | 3367 | ||
3098 | found_it: | 3368 | found_it: |
3369 | if (hlock->instance != lock) | ||
3370 | return; | ||
3371 | |||
3099 | cpu = smp_processor_id(); | 3372 | cpu = smp_processor_id(); |
3100 | if (hlock->waittime_stamp) { | 3373 | if (hlock->waittime_stamp) { |
3101 | now = sched_clock(); | 3374 | now = sched_clock(); |
@@ -3326,7 +3599,12 @@ void __init lockdep_info(void) | |||
3326 | sizeof(struct list_head) * CLASSHASH_SIZE + | 3599 | sizeof(struct list_head) * CLASSHASH_SIZE + |
3327 | sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + | 3600 | sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + |
3328 | sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + | 3601 | sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + |
3329 | sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); | 3602 | sizeof(struct list_head) * CHAINHASH_SIZE |
3603 | #ifdef CONFIG_PROVE_LOCKING | ||
3604 | + sizeof(struct circular_queue) | ||
3605 | #endif | ||
3606 | ) / 1024 | ||
3607 | ); | ||
3330 | 3608 | ||
3331 | printk(" per task-struct memory footprint: %lu bytes\n", | 3609 | printk(" per task-struct memory footprint: %lu bytes\n", |
3332 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); | 3610 | sizeof(struct held_lock) * MAX_LOCK_DEPTH); |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 699a2ac3a0d7..a2ee95ad1313 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains; | |||
91 | extern unsigned int max_lockdep_depth; | 91 | extern unsigned int max_lockdep_depth; |
92 | extern unsigned int max_recursion_depth; | 92 | extern unsigned int max_recursion_depth; |
93 | 93 | ||
94 | extern unsigned int max_bfs_queue_depth; | ||
95 | |||
94 | #ifdef CONFIG_PROVE_LOCKING | 96 | #ifdef CONFIG_PROVE_LOCKING |
95 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | 97 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
96 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | 98 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index e94caa666dba..d4b3dbc79fdb 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -25,38 +25,12 @@ | |||
25 | 25 | ||
26 | static void *l_next(struct seq_file *m, void *v, loff_t *pos) | 26 | static void *l_next(struct seq_file *m, void *v, loff_t *pos) |
27 | { | 27 | { |
28 | struct lock_class *class; | 28 | return seq_list_next(v, &all_lock_classes, pos); |
29 | |||
30 | (*pos)++; | ||
31 | |||
32 | if (v == SEQ_START_TOKEN) | ||
33 | class = m->private; | ||
34 | else { | ||
35 | class = v; | ||
36 | |||
37 | if (class->lock_entry.next != &all_lock_classes) | ||
38 | class = list_entry(class->lock_entry.next, | ||
39 | struct lock_class, lock_entry); | ||
40 | else | ||
41 | class = NULL; | ||
42 | } | ||
43 | |||
44 | return class; | ||
45 | } | 29 | } |
46 | 30 | ||
47 | static void *l_start(struct seq_file *m, loff_t *pos) | 31 | static void *l_start(struct seq_file *m, loff_t *pos) |
48 | { | 32 | { |
49 | struct lock_class *class; | 33 | return seq_list_start_head(&all_lock_classes, *pos); |
50 | loff_t i = 0; | ||
51 | |||
52 | if (*pos == 0) | ||
53 | return SEQ_START_TOKEN; | ||
54 | |||
55 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | ||
56 | if (++i == *pos) | ||
57 | return class; | ||
58 | } | ||
59 | return NULL; | ||
60 | } | 34 | } |
61 | 35 | ||
62 | static void l_stop(struct seq_file *m, void *v) | 36 | static void l_stop(struct seq_file *m, void *v) |
@@ -82,11 +56,11 @@ static void print_name(struct seq_file *m, struct lock_class *class) | |||
82 | 56 | ||
83 | static int l_show(struct seq_file *m, void *v) | 57 | static int l_show(struct seq_file *m, void *v) |
84 | { | 58 | { |
85 | struct lock_class *class = v; | 59 | struct lock_class *class = list_entry(v, struct lock_class, lock_entry); |
86 | struct lock_list *entry; | 60 | struct lock_list *entry; |
87 | char usage[LOCK_USAGE_CHARS]; | 61 | char usage[LOCK_USAGE_CHARS]; |
88 | 62 | ||
89 | if (v == SEQ_START_TOKEN) { | 63 | if (v == &all_lock_classes) { |
90 | seq_printf(m, "all lock classes:\n"); | 64 | seq_printf(m, "all lock classes:\n"); |
91 | return 0; | 65 | return 0; |
92 | } | 66 | } |
@@ -128,17 +102,7 @@ static const struct seq_operations lockdep_ops = { | |||
128 | 102 | ||
129 | static int lockdep_open(struct inode *inode, struct file *file) | 103 | static int lockdep_open(struct inode *inode, struct file *file) |
130 | { | 104 | { |
131 | int res = seq_open(file, &lockdep_ops); | 105 | return seq_open(file, &lockdep_ops); |
132 | if (!res) { | ||
133 | struct seq_file *m = file->private_data; | ||
134 | |||
135 | if (!list_empty(&all_lock_classes)) | ||
136 | m->private = list_entry(all_lock_classes.next, | ||
137 | struct lock_class, lock_entry); | ||
138 | else | ||
139 | m->private = NULL; | ||
140 | } | ||
141 | return res; | ||
142 | } | 106 | } |
143 | 107 | ||
144 | static const struct file_operations proc_lockdep_operations = { | 108 | static const struct file_operations proc_lockdep_operations = { |
@@ -149,37 +113,23 @@ static const struct file_operations proc_lockdep_operations = { | |||
149 | }; | 113 | }; |
150 | 114 | ||
151 | #ifdef CONFIG_PROVE_LOCKING | 115 | #ifdef CONFIG_PROVE_LOCKING |
152 | static void *lc_next(struct seq_file *m, void *v, loff_t *pos) | ||
153 | { | ||
154 | struct lock_chain *chain; | ||
155 | |||
156 | (*pos)++; | ||
157 | |||
158 | if (v == SEQ_START_TOKEN) | ||
159 | chain = m->private; | ||
160 | else { | ||
161 | chain = v; | ||
162 | |||
163 | if (*pos < nr_lock_chains) | ||
164 | chain = lock_chains + *pos; | ||
165 | else | ||
166 | chain = NULL; | ||
167 | } | ||
168 | |||
169 | return chain; | ||
170 | } | ||
171 | |||
172 | static void *lc_start(struct seq_file *m, loff_t *pos) | 116 | static void *lc_start(struct seq_file *m, loff_t *pos) |
173 | { | 117 | { |
174 | if (*pos == 0) | 118 | if (*pos == 0) |
175 | return SEQ_START_TOKEN; | 119 | return SEQ_START_TOKEN; |
176 | 120 | ||
177 | if (*pos < nr_lock_chains) | 121 | if (*pos - 1 < nr_lock_chains) |
178 | return lock_chains + *pos; | 122 | return lock_chains + (*pos - 1); |
179 | 123 | ||
180 | return NULL; | 124 | return NULL; |
181 | } | 125 | } |
182 | 126 | ||
127 | static void *lc_next(struct seq_file *m, void *v, loff_t *pos) | ||
128 | { | ||
129 | (*pos)++; | ||
130 | return lc_start(m, pos); | ||
131 | } | ||
132 | |||
183 | static void lc_stop(struct seq_file *m, void *v) | 133 | static void lc_stop(struct seq_file *m, void *v) |
184 | { | 134 | { |
185 | } | 135 | } |
@@ -220,16 +170,7 @@ static const struct seq_operations lockdep_chains_ops = { | |||
220 | 170 | ||
221 | static int lockdep_chains_open(struct inode *inode, struct file *file) | 171 | static int lockdep_chains_open(struct inode *inode, struct file *file) |
222 | { | 172 | { |
223 | int res = seq_open(file, &lockdep_chains_ops); | 173 | return seq_open(file, &lockdep_chains_ops); |
224 | if (!res) { | ||
225 | struct seq_file *m = file->private_data; | ||
226 | |||
227 | if (nr_lock_chains) | ||
228 | m->private = lock_chains; | ||
229 | else | ||
230 | m->private = NULL; | ||
231 | } | ||
232 | return res; | ||
233 | } | 174 | } |
234 | 175 | ||
235 | static const struct file_operations proc_lockdep_chains_operations = { | 176 | static const struct file_operations proc_lockdep_chains_operations = { |
@@ -258,16 +199,10 @@ static void lockdep_stats_debug_show(struct seq_file *m) | |||
258 | debug_atomic_read(&chain_lookup_hits)); | 199 | debug_atomic_read(&chain_lookup_hits)); |
259 | seq_printf(m, " cyclic checks: %11u\n", | 200 | seq_printf(m, " cyclic checks: %11u\n", |
260 | debug_atomic_read(&nr_cyclic_checks)); | 201 | debug_atomic_read(&nr_cyclic_checks)); |
261 | seq_printf(m, " cyclic-check recursions: %11u\n", | ||
262 | debug_atomic_read(&nr_cyclic_check_recursions)); | ||
263 | seq_printf(m, " find-mask forwards checks: %11u\n", | 202 | seq_printf(m, " find-mask forwards checks: %11u\n", |
264 | debug_atomic_read(&nr_find_usage_forwards_checks)); | 203 | debug_atomic_read(&nr_find_usage_forwards_checks)); |
265 | seq_printf(m, " find-mask forwards recursions: %11u\n", | ||
266 | debug_atomic_read(&nr_find_usage_forwards_recursions)); | ||
267 | seq_printf(m, " find-mask backwards checks: %11u\n", | 204 | seq_printf(m, " find-mask backwards checks: %11u\n", |
268 | debug_atomic_read(&nr_find_usage_backwards_checks)); | 205 | debug_atomic_read(&nr_find_usage_backwards_checks)); |
269 | seq_printf(m, " find-mask backwards recursions:%11u\n", | ||
270 | debug_atomic_read(&nr_find_usage_backwards_recursions)); | ||
271 | 206 | ||
272 | seq_printf(m, " hardirq on events: %11u\n", hi1); | 207 | seq_printf(m, " hardirq on events: %11u\n", hi1); |
273 | seq_printf(m, " hardirq off events: %11u\n", hi2); | 208 | seq_printf(m, " hardirq off events: %11u\n", hi2); |
@@ -409,8 +344,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
409 | nr_unused); | 344 | nr_unused); |
410 | seq_printf(m, " max locking depth: %11u\n", | 345 | seq_printf(m, " max locking depth: %11u\n", |
411 | max_lockdep_depth); | 346 | max_lockdep_depth); |
412 | seq_printf(m, " max recursion depth: %11u\n", | 347 | #ifdef CONFIG_PROVE_LOCKING |
413 | max_recursion_depth); | 348 | seq_printf(m, " max bfs queue depth: %11u\n", |
349 | max_bfs_queue_depth); | ||
350 | #endif | ||
414 | lockdep_stats_debug_show(m); | 351 | lockdep_stats_debug_show(m); |
415 | seq_printf(m, " debug_locks: %11u\n", | 352 | seq_printf(m, " debug_locks: %11u\n", |
416 | debug_locks); | 353 | debug_locks); |
@@ -438,7 +375,6 @@ struct lock_stat_data { | |||
438 | }; | 375 | }; |
439 | 376 | ||
440 | struct lock_stat_seq { | 377 | struct lock_stat_seq { |
441 | struct lock_stat_data *iter; | ||
442 | struct lock_stat_data *iter_end; | 378 | struct lock_stat_data *iter_end; |
443 | struct lock_stat_data stats[MAX_LOCKDEP_KEYS]; | 379 | struct lock_stat_data stats[MAX_LOCKDEP_KEYS]; |
444 | }; | 380 | }; |
@@ -626,34 +562,22 @@ static void seq_header(struct seq_file *m) | |||
626 | static void *ls_start(struct seq_file *m, loff_t *pos) | 562 | static void *ls_start(struct seq_file *m, loff_t *pos) |
627 | { | 563 | { |
628 | struct lock_stat_seq *data = m->private; | 564 | struct lock_stat_seq *data = m->private; |
565 | struct lock_stat_data *iter; | ||
629 | 566 | ||
630 | if (*pos == 0) | 567 | if (*pos == 0) |
631 | return SEQ_START_TOKEN; | 568 | return SEQ_START_TOKEN; |
632 | 569 | ||
633 | data->iter = data->stats + *pos; | 570 | iter = data->stats + (*pos - 1); |
634 | if (data->iter >= data->iter_end) | 571 | if (iter >= data->iter_end) |
635 | data->iter = NULL; | 572 | iter = NULL; |
636 | 573 | ||
637 | return data->iter; | 574 | return iter; |
638 | } | 575 | } |
639 | 576 | ||
640 | static void *ls_next(struct seq_file *m, void *v, loff_t *pos) | 577 | static void *ls_next(struct seq_file *m, void *v, loff_t *pos) |
641 | { | 578 | { |
642 | struct lock_stat_seq *data = m->private; | ||
643 | |||
644 | (*pos)++; | 579 | (*pos)++; |
645 | 580 | return ls_start(m, pos); | |
646 | if (v == SEQ_START_TOKEN) | ||
647 | data->iter = data->stats; | ||
648 | else { | ||
649 | data->iter = v; | ||
650 | data->iter++; | ||
651 | } | ||
652 | |||
653 | if (data->iter == data->iter_end) | ||
654 | data->iter = NULL; | ||
655 | |||
656 | return data->iter; | ||
657 | } | 581 | } |
658 | 582 | ||
659 | static void ls_stop(struct seq_file *m, void *v) | 583 | static void ls_stop(struct seq_file *m, void *v) |
@@ -691,7 +615,6 @@ static int lock_stat_open(struct inode *inode, struct file *file) | |||
691 | struct lock_stat_data *iter = data->stats; | 615 | struct lock_stat_data *iter = data->stats; |
692 | struct seq_file *m = file->private_data; | 616 | struct seq_file *m = file->private_data; |
693 | 617 | ||
694 | data->iter = iter; | ||
695 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 618 | list_for_each_entry(class, &all_lock_classes, lock_entry) { |
696 | iter->class = class; | 619 | iter->class = class; |
697 | iter->stats = lock_stats(class); | 620 | iter->stats = lock_stats(class); |
@@ -699,7 +622,7 @@ static int lock_stat_open(struct inode *inode, struct file *file) | |||
699 | } | 622 | } |
700 | data->iter_end = iter; | 623 | data->iter_end = iter; |
701 | 624 | ||
702 | sort(data->stats, data->iter_end - data->iter, | 625 | sort(data->stats, data->iter_end - data->stats, |
703 | sizeof(struct lock_stat_data), | 626 | sizeof(struct lock_stat_data), |
704 | lock_stat_cmp, NULL); | 627 | lock_stat_cmp, NULL); |
705 | 628 | ||
@@ -734,7 +657,6 @@ static int lock_stat_release(struct inode *inode, struct file *file) | |||
734 | struct seq_file *seq = file->private_data; | 657 | struct seq_file *seq = file->private_data; |
735 | 658 | ||
736 | vfree(seq->private); | 659 | vfree(seq->private); |
737 | seq->private = NULL; | ||
738 | return seq_release(inode, file); | 660 | return seq_release(inode, file); |
739 | } | 661 | } |
740 | 662 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 1b59e265273b..2c75f7daa439 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6609,6 +6609,8 @@ int cond_resched_lock(spinlock_t *lock) | |||
6609 | int resched = should_resched(); | 6609 | int resched = should_resched(); |
6610 | int ret = 0; | 6610 | int ret = 0; |
6611 | 6611 | ||
6612 | lockdep_assert_held(lock); | ||
6613 | |||
6612 | if (spin_needbreak(lock) || resched) { | 6614 | if (spin_needbreak(lock) || resched) { |
6613 | spin_unlock(lock); | 6615 | spin_unlock(lock); |
6614 | if (resched) | 6616 | if (resched) |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 7932653c4ebd..5ddab730cb2f 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,44 +21,29 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
24 | int __lockfunc _spin_trylock(spinlock_t *lock) | 25 | int __lockfunc _spin_trylock(spinlock_t *lock) |
25 | { | 26 | { |
26 | preempt_disable(); | 27 | return __spin_trylock(lock); |
27 | if (_raw_spin_trylock(lock)) { | ||
28 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
29 | return 1; | ||
30 | } | ||
31 | |||
32 | preempt_enable(); | ||
33 | return 0; | ||
34 | } | 28 | } |
35 | EXPORT_SYMBOL(_spin_trylock); | 29 | EXPORT_SYMBOL(_spin_trylock); |
30 | #endif | ||
36 | 31 | ||
32 | #ifndef _read_trylock | ||
37 | int __lockfunc _read_trylock(rwlock_t *lock) | 33 | int __lockfunc _read_trylock(rwlock_t *lock) |
38 | { | 34 | { |
39 | preempt_disable(); | 35 | return __read_trylock(lock); |
40 | if (_raw_read_trylock(lock)) { | ||
41 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
42 | return 1; | ||
43 | } | ||
44 | |||
45 | preempt_enable(); | ||
46 | return 0; | ||
47 | } | 36 | } |
48 | EXPORT_SYMBOL(_read_trylock); | 37 | EXPORT_SYMBOL(_read_trylock); |
38 | #endif | ||
49 | 39 | ||
40 | #ifndef _write_trylock | ||
50 | int __lockfunc _write_trylock(rwlock_t *lock) | 41 | int __lockfunc _write_trylock(rwlock_t *lock) |
51 | { | 42 | { |
52 | preempt_disable(); | 43 | return __write_trylock(lock); |
53 | if (_raw_write_trylock(lock)) { | ||
54 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
55 | return 1; | ||
56 | } | ||
57 | |||
58 | preempt_enable(); | ||
59 | return 0; | ||
60 | } | 44 | } |
61 | EXPORT_SYMBOL(_write_trylock); | 45 | EXPORT_SYMBOL(_write_trylock); |
46 | #endif | ||
62 | 47 | ||
63 | /* | 48 | /* |
64 | * If lockdep is enabled then we use the non-preemption spin-ops | 49 | * If lockdep is enabled then we use the non-preemption spin-ops |
@@ -67,132 +52,101 @@ EXPORT_SYMBOL(_write_trylock); | |||
67 | */ | 52 | */ |
68 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
69 | 54 | ||
55 | #ifndef _read_lock | ||
70 | void __lockfunc _read_lock(rwlock_t *lock) | 56 | void __lockfunc _read_lock(rwlock_t *lock) |
71 | { | 57 | { |
72 | preempt_disable(); | 58 | __read_lock(lock); |
73 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
74 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
75 | } | 59 | } |
76 | EXPORT_SYMBOL(_read_lock); | 60 | EXPORT_SYMBOL(_read_lock); |
61 | #endif | ||
77 | 62 | ||
63 | #ifndef _spin_lock_irqsave | ||
78 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
79 | { | 65 | { |
80 | unsigned long flags; | 66 | return __spin_lock_irqsave(lock); |
81 | |||
82 | local_irq_save(flags); | ||
83 | preempt_disable(); | ||
84 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
85 | /* | ||
86 | * On lockdep we dont want the hand-coded irq-enable of | ||
87 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
88 | * that interrupts are not re-enabled during lock-acquire: | ||
89 | */ | ||
90 | #ifdef CONFIG_LOCKDEP | ||
91 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
92 | #else | ||
93 | _raw_spin_lock_flags(lock, &flags); | ||
94 | #endif | ||
95 | return flags; | ||
96 | } | 67 | } |
97 | EXPORT_SYMBOL(_spin_lock_irqsave); | 68 | EXPORT_SYMBOL(_spin_lock_irqsave); |
69 | #endif | ||
98 | 70 | ||
71 | #ifndef _spin_lock_irq | ||
99 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | 72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) |
100 | { | 73 | { |
101 | local_irq_disable(); | 74 | __spin_lock_irq(lock); |
102 | preempt_disable(); | ||
103 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
104 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
105 | } | 75 | } |
106 | EXPORT_SYMBOL(_spin_lock_irq); | 76 | EXPORT_SYMBOL(_spin_lock_irq); |
77 | #endif | ||
107 | 78 | ||
79 | #ifndef _spin_lock_bh | ||
108 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | 80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) |
109 | { | 81 | { |
110 | local_bh_disable(); | 82 | __spin_lock_bh(lock); |
111 | preempt_disable(); | ||
112 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
113 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
114 | } | 83 | } |
115 | EXPORT_SYMBOL(_spin_lock_bh); | 84 | EXPORT_SYMBOL(_spin_lock_bh); |
85 | #endif | ||
116 | 86 | ||
87 | #ifndef _read_lock_irqsave | ||
117 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) |
118 | { | 89 | { |
119 | unsigned long flags; | 90 | return __read_lock_irqsave(lock); |
120 | |||
121 | local_irq_save(flags); | ||
122 | preempt_disable(); | ||
123 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
124 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
125 | _raw_read_lock_flags, &flags); | ||
126 | return flags; | ||
127 | } | 91 | } |
128 | EXPORT_SYMBOL(_read_lock_irqsave); | 92 | EXPORT_SYMBOL(_read_lock_irqsave); |
93 | #endif | ||
129 | 94 | ||
95 | #ifndef _read_lock_irq | ||
130 | void __lockfunc _read_lock_irq(rwlock_t *lock) | 96 | void __lockfunc _read_lock_irq(rwlock_t *lock) |
131 | { | 97 | { |
132 | local_irq_disable(); | 98 | __read_lock_irq(lock); |
133 | preempt_disable(); | ||
134 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
135 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
136 | } | 99 | } |
137 | EXPORT_SYMBOL(_read_lock_irq); | 100 | EXPORT_SYMBOL(_read_lock_irq); |
101 | #endif | ||
138 | 102 | ||
103 | #ifndef _read_lock_bh | ||
139 | void __lockfunc _read_lock_bh(rwlock_t *lock) | 104 | void __lockfunc _read_lock_bh(rwlock_t *lock) |
140 | { | 105 | { |
141 | local_bh_disable(); | 106 | __read_lock_bh(lock); |
142 | preempt_disable(); | ||
143 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
144 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
145 | } | 107 | } |
146 | EXPORT_SYMBOL(_read_lock_bh); | 108 | EXPORT_SYMBOL(_read_lock_bh); |
109 | #endif | ||
147 | 110 | ||
111 | #ifndef _write_lock_irqsave | ||
148 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) |
149 | { | 113 | { |
150 | unsigned long flags; | 114 | return __write_lock_irqsave(lock); |
151 | |||
152 | local_irq_save(flags); | ||
153 | preempt_disable(); | ||
154 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
155 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
156 | _raw_write_lock_flags, &flags); | ||
157 | return flags; | ||
158 | } | 115 | } |
159 | EXPORT_SYMBOL(_write_lock_irqsave); | 116 | EXPORT_SYMBOL(_write_lock_irqsave); |
117 | #endif | ||
160 | 118 | ||
119 | #ifndef _write_lock_irq | ||
161 | void __lockfunc _write_lock_irq(rwlock_t *lock) | 120 | void __lockfunc _write_lock_irq(rwlock_t *lock) |
162 | { | 121 | { |
163 | local_irq_disable(); | 122 | __write_lock_irq(lock); |
164 | preempt_disable(); | ||
165 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
166 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
167 | } | 123 | } |
168 | EXPORT_SYMBOL(_write_lock_irq); | 124 | EXPORT_SYMBOL(_write_lock_irq); |
125 | #endif | ||
169 | 126 | ||
127 | #ifndef _write_lock_bh | ||
170 | void __lockfunc _write_lock_bh(rwlock_t *lock) | 128 | void __lockfunc _write_lock_bh(rwlock_t *lock) |
171 | { | 129 | { |
172 | local_bh_disable(); | 130 | __write_lock_bh(lock); |
173 | preempt_disable(); | ||
174 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
175 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
176 | } | 131 | } |
177 | EXPORT_SYMBOL(_write_lock_bh); | 132 | EXPORT_SYMBOL(_write_lock_bh); |
133 | #endif | ||
178 | 134 | ||
135 | #ifndef _spin_lock | ||
179 | void __lockfunc _spin_lock(spinlock_t *lock) | 136 | void __lockfunc _spin_lock(spinlock_t *lock) |
180 | { | 137 | { |
181 | preempt_disable(); | 138 | __spin_lock(lock); |
182 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
183 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
184 | } | 139 | } |
185 | |||
186 | EXPORT_SYMBOL(_spin_lock); | 140 | EXPORT_SYMBOL(_spin_lock); |
141 | #endif | ||
187 | 142 | ||
143 | #ifndef _write_lock | ||
188 | void __lockfunc _write_lock(rwlock_t *lock) | 144 | void __lockfunc _write_lock(rwlock_t *lock) |
189 | { | 145 | { |
190 | preempt_disable(); | 146 | __write_lock(lock); |
191 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
192 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
193 | } | 147 | } |
194 | |||
195 | EXPORT_SYMBOL(_write_lock); | 148 | EXPORT_SYMBOL(_write_lock); |
149 | #endif | ||
196 | 150 | ||
197 | #else /* CONFIG_PREEMPT: */ | 151 | #else /* CONFIG_PREEMPT: */ |
198 | 152 | ||
@@ -318,125 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
318 | 272 | ||
319 | #endif | 273 | #endif |
320 | 274 | ||
275 | #ifndef _spin_unlock | ||
321 | void __lockfunc _spin_unlock(spinlock_t *lock) | 276 | void __lockfunc _spin_unlock(spinlock_t *lock) |
322 | { | 277 | { |
323 | spin_release(&lock->dep_map, 1, _RET_IP_); | 278 | __spin_unlock(lock); |
324 | _raw_spin_unlock(lock); | ||
325 | preempt_enable(); | ||
326 | } | 279 | } |
327 | EXPORT_SYMBOL(_spin_unlock); | 280 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif | ||
328 | 282 | ||
283 | #ifndef _write_unlock | ||
329 | void __lockfunc _write_unlock(rwlock_t *lock) | 284 | void __lockfunc _write_unlock(rwlock_t *lock) |
330 | { | 285 | { |
331 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 286 | __write_unlock(lock); |
332 | _raw_write_unlock(lock); | ||
333 | preempt_enable(); | ||
334 | } | 287 | } |
335 | EXPORT_SYMBOL(_write_unlock); | 288 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif | ||
336 | 290 | ||
291 | #ifndef _read_unlock | ||
337 | void __lockfunc _read_unlock(rwlock_t *lock) | 292 | void __lockfunc _read_unlock(rwlock_t *lock) |
338 | { | 293 | { |
339 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 294 | __read_unlock(lock); |
340 | _raw_read_unlock(lock); | ||
341 | preempt_enable(); | ||
342 | } | 295 | } |
343 | EXPORT_SYMBOL(_read_unlock); | 296 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif | ||
344 | 298 | ||
299 | #ifndef _spin_unlock_irqrestore | ||
345 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
346 | { | 301 | { |
347 | spin_release(&lock->dep_map, 1, _RET_IP_); | 302 | __spin_unlock_irqrestore(lock, flags); |
348 | _raw_spin_unlock(lock); | ||
349 | local_irq_restore(flags); | ||
350 | preempt_enable(); | ||
351 | } | 303 | } |
352 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif | ||
353 | 306 | ||
307 | #ifndef _spin_unlock_irq | ||
354 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
355 | { | 309 | { |
356 | spin_release(&lock->dep_map, 1, _RET_IP_); | 310 | __spin_unlock_irq(lock); |
357 | _raw_spin_unlock(lock); | ||
358 | local_irq_enable(); | ||
359 | preempt_enable(); | ||
360 | } | 311 | } |
361 | EXPORT_SYMBOL(_spin_unlock_irq); | 312 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif | ||
362 | 314 | ||
315 | #ifndef _spin_unlock_bh | ||
363 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
364 | { | 317 | { |
365 | spin_release(&lock->dep_map, 1, _RET_IP_); | 318 | __spin_unlock_bh(lock); |
366 | _raw_spin_unlock(lock); | ||
367 | preempt_enable_no_resched(); | ||
368 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
369 | } | 319 | } |
370 | EXPORT_SYMBOL(_spin_unlock_bh); | 320 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif | ||
371 | 322 | ||
323 | #ifndef _read_unlock_irqrestore | ||
372 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
373 | { | 325 | { |
374 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 326 | __read_unlock_irqrestore(lock, flags); |
375 | _raw_read_unlock(lock); | ||
376 | local_irq_restore(flags); | ||
377 | preempt_enable(); | ||
378 | } | 327 | } |
379 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 328 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif | ||
380 | 330 | ||
331 | #ifndef _read_unlock_irq | ||
381 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
382 | { | 333 | { |
383 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 334 | __read_unlock_irq(lock); |
384 | _raw_read_unlock(lock); | ||
385 | local_irq_enable(); | ||
386 | preempt_enable(); | ||
387 | } | 335 | } |
388 | EXPORT_SYMBOL(_read_unlock_irq); | 336 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif | ||
389 | 338 | ||
339 | #ifndef _read_unlock_bh | ||
390 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
391 | { | 341 | { |
392 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 342 | __read_unlock_bh(lock); |
393 | _raw_read_unlock(lock); | ||
394 | preempt_enable_no_resched(); | ||
395 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
396 | } | 343 | } |
397 | EXPORT_SYMBOL(_read_unlock_bh); | 344 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif | ||
398 | 346 | ||
347 | #ifndef _write_unlock_irqrestore | ||
399 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
400 | { | 349 | { |
401 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 350 | __write_unlock_irqrestore(lock, flags); |
402 | _raw_write_unlock(lock); | ||
403 | local_irq_restore(flags); | ||
404 | preempt_enable(); | ||
405 | } | 351 | } |
406 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 352 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif | ||
407 | 354 | ||
355 | #ifndef _write_unlock_irq | ||
408 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
409 | { | 357 | { |
410 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 358 | __write_unlock_irq(lock); |
411 | _raw_write_unlock(lock); | ||
412 | local_irq_enable(); | ||
413 | preempt_enable(); | ||
414 | } | 359 | } |
415 | EXPORT_SYMBOL(_write_unlock_irq); | 360 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif | ||
416 | 362 | ||
363 | #ifndef _write_unlock_bh | ||
417 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
418 | { | 365 | { |
419 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 366 | __write_unlock_bh(lock); |
420 | _raw_write_unlock(lock); | ||
421 | preempt_enable_no_resched(); | ||
422 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
423 | } | 367 | } |
424 | EXPORT_SYMBOL(_write_unlock_bh); | 368 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif | ||
425 | 370 | ||
371 | #ifndef _spin_trylock_bh | ||
426 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
427 | { | 373 | { |
428 | local_bh_disable(); | 374 | return __spin_trylock_bh(lock); |
429 | preempt_disable(); | ||
430 | if (_raw_spin_trylock(lock)) { | ||
431 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
432 | return 1; | ||
433 | } | ||
434 | |||
435 | preempt_enable_no_resched(); | ||
436 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
437 | return 0; | ||
438 | } | 375 | } |
439 | EXPORT_SYMBOL(_spin_trylock_bh); | 376 | EXPORT_SYMBOL(_spin_trylock_bh); |
377 | #endif | ||
440 | 378 | ||
441 | notrace int in_lock_functions(unsigned long addr) | 379 | notrace int in_lock_functions(unsigned long addr) |
442 | { | 380 | { |