aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/kernel_lock.c22
-rw-r--r--lib/plist.c8
-rw-r--r--lib/spinlock_debug.c64
4 files changed, 85 insertions, 83 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index eae56fddfa3b..a9a8996d286a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -26,14 +26,14 @@
26 26
27struct debug_bucket { 27struct debug_bucket {
28 struct hlist_head list; 28 struct hlist_head list;
29 spinlock_t lock; 29 raw_spinlock_t lock;
30}; 30};
31 31
32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
33 33
34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
35 35
36static DEFINE_SPINLOCK(pool_lock); 36static DEFINE_RAW_SPINLOCK(pool_lock);
37 37
38static HLIST_HEAD(obj_pool); 38static HLIST_HEAD(obj_pool);
39 39
@@ -96,10 +96,10 @@ static int fill_pool(void)
96 if (!new) 96 if (!new)
97 return obj_pool_free; 97 return obj_pool_free;
98 98
99 spin_lock_irqsave(&pool_lock, flags); 99 raw_spin_lock_irqsave(&pool_lock, flags);
100 hlist_add_head(&new->node, &obj_pool); 100 hlist_add_head(&new->node, &obj_pool);
101 obj_pool_free++; 101 obj_pool_free++;
102 spin_unlock_irqrestore(&pool_lock, flags); 102 raw_spin_unlock_irqrestore(&pool_lock, flags);
103 } 103 }
104 return obj_pool_free; 104 return obj_pool_free;
105} 105}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
133{ 133{
134 struct debug_obj *obj = NULL; 134 struct debug_obj *obj = NULL;
135 135
136 spin_lock(&pool_lock); 136 raw_spin_lock(&pool_lock);
137 if (obj_pool.first) { 137 if (obj_pool.first) {
138 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 138 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
139 139
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
152 if (obj_pool_free < obj_pool_min_free) 152 if (obj_pool_free < obj_pool_min_free)
153 obj_pool_min_free = obj_pool_free; 153 obj_pool_min_free = obj_pool_free;
154 } 154 }
155 spin_unlock(&pool_lock); 155 raw_spin_unlock(&pool_lock);
156 156
157 return obj; 157 return obj;
158} 158}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
165 struct debug_obj *obj; 165 struct debug_obj *obj;
166 unsigned long flags; 166 unsigned long flags;
167 167
168 spin_lock_irqsave(&pool_lock, flags); 168 raw_spin_lock_irqsave(&pool_lock, flags);
169 while (obj_pool_free > ODEBUG_POOL_SIZE) { 169 while (obj_pool_free > ODEBUG_POOL_SIZE) {
170 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
171 hlist_del(&obj->node); 171 hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
174 * We release pool_lock across kmem_cache_free() to 174 * We release pool_lock across kmem_cache_free() to
175 * avoid contention on pool_lock. 175 * avoid contention on pool_lock.
176 */ 176 */
177 spin_unlock_irqrestore(&pool_lock, flags); 177 raw_spin_unlock_irqrestore(&pool_lock, flags);
178 kmem_cache_free(obj_cache, obj); 178 kmem_cache_free(obj_cache, obj);
179 spin_lock_irqsave(&pool_lock, flags); 179 raw_spin_lock_irqsave(&pool_lock, flags);
180 } 180 }
181 spin_unlock_irqrestore(&pool_lock, flags); 181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182} 182}
183 183
184/* 184/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
190 unsigned long flags; 190 unsigned long flags;
191 int sched = 0; 191 int sched = 0;
192 192
193 spin_lock_irqsave(&pool_lock, flags); 193 raw_spin_lock_irqsave(&pool_lock, flags);
194 /* 194 /*
195 * schedule work when the pool is filled and the cache is 195 * schedule work when the pool is filled and the cache is
196 * initialized: 196 * initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
200 hlist_add_head(&obj->node, &obj_pool); 200 hlist_add_head(&obj->node, &obj_pool);
201 obj_pool_free++; 201 obj_pool_free++;
202 obj_pool_used--; 202 obj_pool_used--;
203 spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
204 if (sched) 204 if (sched)
205 schedule_work(&debug_obj_work); 205 schedule_work(&debug_obj_work);
206} 206}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
222 222
223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
224 spin_lock_irqsave(&db->lock, flags); 224 raw_spin_lock_irqsave(&db->lock, flags);
225 hlist_move_list(&db->list, &freelist); 225 hlist_move_list(&db->list, &freelist);
226 spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
227 227
228 /* Now free them */ 228 /* Now free them */
229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
303 303
304 db = get_bucket((unsigned long) addr); 304 db = get_bucket((unsigned long) addr);
305 305
306 spin_lock_irqsave(&db->lock, flags); 306 raw_spin_lock_irqsave(&db->lock, flags);
307 307
308 obj = lookup_object(addr, db); 308 obj = lookup_object(addr, db);
309 if (!obj) { 309 if (!obj) {
310 obj = alloc_object(addr, db, descr); 310 obj = alloc_object(addr, db, descr);
311 if (!obj) { 311 if (!obj) {
312 debug_objects_enabled = 0; 312 debug_objects_enabled = 0;
313 spin_unlock_irqrestore(&db->lock, flags); 313 raw_spin_unlock_irqrestore(&db->lock, flags);
314 debug_objects_oom(); 314 debug_objects_oom();
315 return; 315 return;
316 } 316 }
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
327 case ODEBUG_STATE_ACTIVE: 327 case ODEBUG_STATE_ACTIVE:
328 debug_print_object(obj, "init"); 328 debug_print_object(obj, "init");
329 state = obj->state; 329 state = obj->state;
330 spin_unlock_irqrestore(&db->lock, flags); 330 raw_spin_unlock_irqrestore(&db->lock, flags);
331 debug_object_fixup(descr->fixup_init, addr, state); 331 debug_object_fixup(descr->fixup_init, addr, state);
332 return; 332 return;
333 333
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
338 break; 338 break;
339 } 339 }
340 340
341 spin_unlock_irqrestore(&db->lock, flags); 341 raw_spin_unlock_irqrestore(&db->lock, flags);
342} 342}
343 343
344/** 344/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
385 385
386 db = get_bucket((unsigned long) addr); 386 db = get_bucket((unsigned long) addr);
387 387
388 spin_lock_irqsave(&db->lock, flags); 388 raw_spin_lock_irqsave(&db->lock, flags);
389 389
390 obj = lookup_object(addr, db); 390 obj = lookup_object(addr, db);
391 if (obj) { 391 if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
398 case ODEBUG_STATE_ACTIVE: 398 case ODEBUG_STATE_ACTIVE:
399 debug_print_object(obj, "activate"); 399 debug_print_object(obj, "activate");
400 state = obj->state; 400 state = obj->state;
401 spin_unlock_irqrestore(&db->lock, flags); 401 raw_spin_unlock_irqrestore(&db->lock, flags);
402 debug_object_fixup(descr->fixup_activate, addr, state); 402 debug_object_fixup(descr->fixup_activate, addr, state);
403 return; 403 return;
404 404
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 spin_unlock_irqrestore(&db->lock, flags); 411 raw_spin_unlock_irqrestore(&db->lock, flags);
412 return; 412 return;
413 } 413 }
414 414
415 spin_unlock_irqrestore(&db->lock, flags); 415 raw_spin_unlock_irqrestore(&db->lock, flags);
416 /* 416 /*
417 * This happens when a static object is activated. We 417 * This happens when a static object is activated. We
418 * let the type specific code decide whether this is 418 * let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
438 438
439 db = get_bucket((unsigned long) addr); 439 db = get_bucket((unsigned long) addr);
440 440
441 spin_lock_irqsave(&db->lock, flags); 441 raw_spin_lock_irqsave(&db->lock, flags);
442 442
443 obj = lookup_object(addr, db); 443 obj = lookup_object(addr, db);
444 if (obj) { 444 if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
463 debug_print_object(&o, "deactivate"); 463 debug_print_object(&o, "deactivate");
464 } 464 }
465 465
466 spin_unlock_irqrestore(&db->lock, flags); 466 raw_spin_unlock_irqrestore(&db->lock, flags);
467} 467}
468 468
469/** 469/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
483 483
484 db = get_bucket((unsigned long) addr); 484 db = get_bucket((unsigned long) addr);
485 485
486 spin_lock_irqsave(&db->lock, flags); 486 raw_spin_lock_irqsave(&db->lock, flags);
487 487
488 obj = lookup_object(addr, db); 488 obj = lookup_object(addr, db);
489 if (!obj) 489 if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
498 case ODEBUG_STATE_ACTIVE: 498 case ODEBUG_STATE_ACTIVE:
499 debug_print_object(obj, "destroy"); 499 debug_print_object(obj, "destroy");
500 state = obj->state; 500 state = obj->state;
501 spin_unlock_irqrestore(&db->lock, flags); 501 raw_spin_unlock_irqrestore(&db->lock, flags);
502 debug_object_fixup(descr->fixup_destroy, addr, state); 502 debug_object_fixup(descr->fixup_destroy, addr, state);
503 return; 503 return;
504 504
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
509 break; 509 break;
510 } 510 }
511out_unlock: 511out_unlock:
512 spin_unlock_irqrestore(&db->lock, flags); 512 raw_spin_unlock_irqrestore(&db->lock, flags);
513} 513}
514 514
515/** 515/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
529 529
530 db = get_bucket((unsigned long) addr); 530 db = get_bucket((unsigned long) addr);
531 531
532 spin_lock_irqsave(&db->lock, flags); 532 raw_spin_lock_irqsave(&db->lock, flags);
533 533
534 obj = lookup_object(addr, db); 534 obj = lookup_object(addr, db);
535 if (!obj) 535 if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
539 case ODEBUG_STATE_ACTIVE: 539 case ODEBUG_STATE_ACTIVE:
540 debug_print_object(obj, "free"); 540 debug_print_object(obj, "free");
541 state = obj->state; 541 state = obj->state;
542 spin_unlock_irqrestore(&db->lock, flags); 542 raw_spin_unlock_irqrestore(&db->lock, flags);
543 debug_object_fixup(descr->fixup_free, addr, state); 543 debug_object_fixup(descr->fixup_free, addr, state);
544 return; 544 return;
545 default: 545 default:
546 hlist_del(&obj->node); 546 hlist_del(&obj->node);
547 spin_unlock_irqrestore(&db->lock, flags); 547 raw_spin_unlock_irqrestore(&db->lock, flags);
548 free_object(obj); 548 free_object(obj);
549 return; 549 return;
550 } 550 }
551out_unlock: 551out_unlock:
552 spin_unlock_irqrestore(&db->lock, flags); 552 raw_spin_unlock_irqrestore(&db->lock, flags);
553} 553}
554 554
555#ifdef CONFIG_DEBUG_OBJECTS_FREE 555#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
575 575
576repeat: 576repeat:
577 cnt = 0; 577 cnt = 0;
578 spin_lock_irqsave(&db->lock, flags); 578 raw_spin_lock_irqsave(&db->lock, flags);
579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
580 cnt++; 580 cnt++;
581 oaddr = (unsigned long) obj->object; 581 oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
587 debug_print_object(obj, "free"); 587 debug_print_object(obj, "free");
588 descr = obj->descr; 588 descr = obj->descr;
589 state = obj->state; 589 state = obj->state;
590 spin_unlock_irqrestore(&db->lock, flags); 590 raw_spin_unlock_irqrestore(&db->lock, flags);
591 debug_object_fixup(descr->fixup_free, 591 debug_object_fixup(descr->fixup_free,
592 (void *) oaddr, state); 592 (void *) oaddr, state);
593 goto repeat; 593 goto repeat;
@@ -597,7 +597,7 @@ repeat:
597 break; 597 break;
598 } 598 }
599 } 599 }
600 spin_unlock_irqrestore(&db->lock, flags); 600 raw_spin_unlock_irqrestore(&db->lock, flags);
601 601
602 /* Now free them */ 602 /* Now free them */
603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
783 783
784 db = get_bucket((unsigned long) addr); 784 db = get_bucket((unsigned long) addr);
785 785
786 spin_lock_irqsave(&db->lock, flags); 786 raw_spin_lock_irqsave(&db->lock, flags);
787 787
788 obj = lookup_object(addr, db); 788 obj = lookup_object(addr, db);
789 if (!obj && state != ODEBUG_STATE_NONE) { 789 if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
807 } 807 }
808 res = 0; 808 res = 0;
809out: 809out:
810 spin_unlock_irqrestore(&db->lock, flags); 810 raw_spin_unlock_irqrestore(&db->lock, flags);
811 if (res) 811 if (res)
812 debug_objects_enabled = 0; 812 debug_objects_enabled = 0;
813 return res; 813 return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
907 int i; 907 int i;
908 908
909 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 909 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
910 spin_lock_init(&obj_hash[i].lock); 910 raw_spin_lock_init(&obj_hash[i].lock);
911 911
912 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 912 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
913 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 913 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * Don't use in new code. 24 * Don't use in new code.
25 */ 25 */
26static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
27 27
28 28
29/* 29/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
82 */ 82 */
83 do { 83 do {
84 preempt_enable(); 84 preempt_enable();
85 while (spin_is_locked(&kernel_flag)) 85 while (raw_spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
54 54
55static void plist_check_head(struct plist_head *head) 55static void plist_check_head(struct plist_head *head)
56{ 56{
57 WARN_ON(!head->lock); 57 WARN_ON(!head->rawlock && !head->spinlock);
58 if (head->lock) 58 if (head->rawlock)
59 WARN_ON_SMP(!spin_is_locked(head->lock)); 59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock));
60 plist_check_list(&head->prio_list); 62 plist_check_list(&head->prio_list);
61 plist_check_list(&head->node_list); 63 plist_check_list(&head->node_list);
62} 64}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0); 42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif 43#endif
44 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC; 45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT; 46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1; 47 lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
176 176
177 for (;;) { 177 for (;;) {
178 for (i = 0; i < loops; i++) { 178 for (i = 0; i < loops; i++) {
179 if (__raw_read_trylock(&lock->raw_lock)) 179 if (arch_read_trylock(&lock->raw_lock))
180 return; 180 return;
181 __delay(1); 181 __delay(1);
182 } 182 }
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 __raw_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = __raw_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
206#ifndef CONFIG_SMP 206#ifndef CONFIG_SMP
207 /* 207 /*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 __raw_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
219} 219}
220 220
221static inline void debug_write_lock_before(rwlock_t *lock) 221static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
251 251
252 for (;;) { 252 for (;;) {
253 for (i = 0; i < loops; i++) { 253 for (i = 0; i < loops; i++) {
254 if (__raw_write_trylock(&lock->raw_lock)) 254 if (arch_write_trylock(&lock->raw_lock))
255 return; 255 return;
256 __delay(1); 256 __delay(1);
257 } 257 }
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 __raw_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = __raw_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
282 if (ret) 282 if (ret)
283 debug_write_lock_after(lock); 283 debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 __raw_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);
297} 297}