diff options
Diffstat (limited to 'kernel/lockdep.c')
| -rw-r--r-- | kernel/lockdep.c | 20 | 
1 files changed, 10 insertions, 10 deletions
| diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 429540c70d3f..5feaddcdbe49 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644); | |||
| 73 | * to use a raw spinlock - we really dont want the spinlock | 73 | * to use a raw spinlock - we really dont want the spinlock | 
| 74 | * code to recurse back into the lockdep code... | 74 | * code to recurse back into the lockdep code... | 
| 75 | */ | 75 | */ | 
| 76 | static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 76 | static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 
| 77 | 77 | ||
| 78 | static int graph_lock(void) | 78 | static int graph_lock(void) | 
| 79 | { | 79 | { | 
| 80 | __raw_spin_lock(&lockdep_lock); | 80 | arch_spin_lock(&lockdep_lock); | 
| 81 | /* | 81 | /* | 
| 82 | * Make sure that if another CPU detected a bug while | 82 | * Make sure that if another CPU detected a bug while | 
| 83 | * walking the graph we dont change it (while the other | 83 | * walking the graph we dont change it (while the other | 
| @@ -85,7 +85,7 @@ static int graph_lock(void) | |||
| 85 | * dropped already) | 85 | * dropped already) | 
| 86 | */ | 86 | */ | 
| 87 | if (!debug_locks) { | 87 | if (!debug_locks) { | 
| 88 | __raw_spin_unlock(&lockdep_lock); | 88 | arch_spin_unlock(&lockdep_lock); | 
| 89 | return 0; | 89 | return 0; | 
| 90 | } | 90 | } | 
| 91 | /* prevent any recursions within lockdep from causing deadlocks */ | 91 | /* prevent any recursions within lockdep from causing deadlocks */ | 
| @@ -95,11 +95,11 @@ static int graph_lock(void) | |||
| 95 | 95 | ||
| 96 | static inline int graph_unlock(void) | 96 | static inline int graph_unlock(void) | 
| 97 | { | 97 | { | 
| 98 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) | 98 | if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) | 
| 99 | return DEBUG_LOCKS_WARN_ON(1); | 99 | return DEBUG_LOCKS_WARN_ON(1); | 
| 100 | 100 | ||
| 101 | current->lockdep_recursion--; | 101 | current->lockdep_recursion--; | 
| 102 | __raw_spin_unlock(&lockdep_lock); | 102 | arch_spin_unlock(&lockdep_lock); | 
| 103 | return 0; | 103 | return 0; | 
| 104 | } | 104 | } | 
| 105 | 105 | ||
| @@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) | |||
| 111 | { | 111 | { | 
| 112 | int ret = debug_locks_off(); | 112 | int ret = debug_locks_off(); | 
| 113 | 113 | ||
| 114 | __raw_spin_unlock(&lockdep_lock); | 114 | arch_spin_unlock(&lockdep_lock); | 
| 115 | 115 | ||
| 116 | return ret; | 116 | return ret; | 
| 117 | } | 117 | } | 
| @@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) | |||
| 1170 | this.class = class; | 1170 | this.class = class; | 
| 1171 | 1171 | ||
| 1172 | local_irq_save(flags); | 1172 | local_irq_save(flags); | 
| 1173 | __raw_spin_lock(&lockdep_lock); | 1173 | arch_spin_lock(&lockdep_lock); | 
| 1174 | ret = __lockdep_count_forward_deps(&this); | 1174 | ret = __lockdep_count_forward_deps(&this); | 
| 1175 | __raw_spin_unlock(&lockdep_lock); | 1175 | arch_spin_unlock(&lockdep_lock); | 
| 1176 | local_irq_restore(flags); | 1176 | local_irq_restore(flags); | 
| 1177 | 1177 | ||
| 1178 | return ret; | 1178 | return ret; | 
| @@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
| 1197 | this.class = class; | 1197 | this.class = class; | 
| 1198 | 1198 | ||
| 1199 | local_irq_save(flags); | 1199 | local_irq_save(flags); | 
| 1200 | __raw_spin_lock(&lockdep_lock); | 1200 | arch_spin_lock(&lockdep_lock); | 
| 1201 | ret = __lockdep_count_backward_deps(&this); | 1201 | ret = __lockdep_count_backward_deps(&this); | 
| 1202 | __raw_spin_unlock(&lockdep_lock); | 1202 | arch_spin_unlock(&lockdep_lock); | 
| 1203 | local_irq_restore(flags); | 1203 | local_irq_restore(flags); | 
| 1204 | 1204 | ||
| 1205 | return ret; | 1205 | return ret; | 
