diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
commit | 8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch) | |
tree | b907c35c79caadafff6ad46a91614e30afd2f967 /kernel/lockdep.c | |
parent | 050cbb09dac0402672edeaeac06094ef8ff1749a (diff) | |
parent | b5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
clockevents: Convert to raw_spinlock
clockevents: Make tick_device_lock static
debugobjects: Convert to raw_spinlocks
perf_event: Convert to raw_spinlock
hrtimers: Convert to raw_spinlocks
genirq: Convert irq_desc.lock to raw_spinlock
smp: Convert smplocks to raw_spinlocks
rtmutes: Convert rtmutex.lock to raw_spinlock
sched: Convert pi_lock to raw_spinlock
sched: Convert cpupri lock to raw_spinlock
sched: Convert rt_runtime_lock to raw_spinlock
sched: Convert rq->lock to raw_spinlock
plist: Make plist debugging raw_spinlock aware
bkl: Fixup core_lock fallout
locking: Cleanup the name space completely
locking: Further name space cleanups
alpha: Fix fallout from locking changes
locking: Implement new raw_spinlock
locking: Convert raw_rwlock functions to arch_rwlock
locking: Convert raw_rwlock to arch_rwlock
...
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 429540c70d3f..5feaddcdbe49 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644); | |||
73 | * to use a raw spinlock - we really dont want the spinlock | 73 | * to use a raw spinlock - we really dont want the spinlock |
74 | * code to recurse back into the lockdep code... | 74 | * code to recurse back into the lockdep code... |
75 | */ | 75 | */ |
76 | static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 76 | static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
77 | 77 | ||
78 | static int graph_lock(void) | 78 | static int graph_lock(void) |
79 | { | 79 | { |
80 | __raw_spin_lock(&lockdep_lock); | 80 | arch_spin_lock(&lockdep_lock); |
81 | /* | 81 | /* |
82 | * Make sure that if another CPU detected a bug while | 82 | * Make sure that if another CPU detected a bug while |
83 | * walking the graph we dont change it (while the other | 83 | * walking the graph we dont change it (while the other |
@@ -85,7 +85,7 @@ static int graph_lock(void) | |||
85 | * dropped already) | 85 | * dropped already) |
86 | */ | 86 | */ |
87 | if (!debug_locks) { | 87 | if (!debug_locks) { |
88 | __raw_spin_unlock(&lockdep_lock); | 88 | arch_spin_unlock(&lockdep_lock); |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | /* prevent any recursions within lockdep from causing deadlocks */ | 91 | /* prevent any recursions within lockdep from causing deadlocks */ |
@@ -95,11 +95,11 @@ static int graph_lock(void) | |||
95 | 95 | ||
96 | static inline int graph_unlock(void) | 96 | static inline int graph_unlock(void) |
97 | { | 97 | { |
98 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) | 98 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) |
99 | return DEBUG_LOCKS_WARN_ON(1); | 99 | return DEBUG_LOCKS_WARN_ON(1); |
100 | 100 | ||
101 | current->lockdep_recursion--; | 101 | current->lockdep_recursion--; |
102 | __raw_spin_unlock(&lockdep_lock); | 102 | arch_spin_unlock(&lockdep_lock); |
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | 105 | ||
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) | |||
111 | { | 111 | { |
112 | int ret = debug_locks_off(); | 112 | int ret = debug_locks_off(); |
113 | 113 | ||
114 | __raw_spin_unlock(&lockdep_lock); | 114 | arch_spin_unlock(&lockdep_lock); |
115 | 115 | ||
116 | return ret; | 116 | return ret; |
117 | } | 117 | } |
@@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) | |||
1170 | this.class = class; | 1170 | this.class = class; |
1171 | 1171 | ||
1172 | local_irq_save(flags); | 1172 | local_irq_save(flags); |
1173 | __raw_spin_lock(&lockdep_lock); | 1173 | arch_spin_lock(&lockdep_lock); |
1174 | ret = __lockdep_count_forward_deps(&this); | 1174 | ret = __lockdep_count_forward_deps(&this); |
1175 | __raw_spin_unlock(&lockdep_lock); | 1175 | arch_spin_unlock(&lockdep_lock); |
1176 | local_irq_restore(flags); | 1176 | local_irq_restore(flags); |
1177 | 1177 | ||
1178 | return ret; | 1178 | return ret; |
@@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1197 | this.class = class; | 1197 | this.class = class; |
1198 | 1198 | ||
1199 | local_irq_save(flags); | 1199 | local_irq_save(flags); |
1200 | __raw_spin_lock(&lockdep_lock); | 1200 | arch_spin_lock(&lockdep_lock); |
1201 | ret = __lockdep_count_backward_deps(&this); | 1201 | ret = __lockdep_count_backward_deps(&this); |
1202 | __raw_spin_unlock(&lockdep_lock); | 1202 | arch_spin_unlock(&lockdep_lock); |
1203 | local_irq_restore(flags); | 1203 | local_irq_restore(flags); |
1204 | 1204 | ||
1205 | return ret; | 1205 | return ret; |