aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/lockdep.h92
-rw-r--r--kernel/hung_task.c13
-rw-r--r--kernel/lglock.c12
-rw-r--r--kernel/mutex.c43
-rw-r--r--kernel/smp.c2
5 files changed, 58 insertions, 104 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f1e877b79ed8..cfc2f119779a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask);
365 365
366#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 366#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
367 367
368#else /* !LOCKDEP */ 368#else /* !CONFIG_LOCKDEP */
369 369
370static inline void lockdep_off(void) 370static inline void lockdep_off(void)
371{ 371{
@@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr)
479 * on the per lock-class debug mode: 479 * on the per lock-class debug mode:
480 */ 480 */
481 481
482#ifdef CONFIG_DEBUG_LOCK_ALLOC 482#ifdef CONFIG_PROVE_LOCKING
483# ifdef CONFIG_PROVE_LOCKING 483 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
484# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 484 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
485# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 485 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
486# else
487# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
488# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
489# endif
490# define spin_release(l, n, i) lock_release(l, n, i)
491#else 486#else
492# define spin_acquire(l, s, t, i) do { } while (0) 487 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
493# define spin_release(l, n, i) do { } while (0) 488 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
489 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
494#endif 490#endif
495 491
496#ifdef CONFIG_DEBUG_LOCK_ALLOC 492#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
497# ifdef CONFIG_PROVE_LOCKING 493#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
498# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 494#define spin_release(l, n, i) lock_release(l, n, i)
499# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
500# else
501# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
502# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
503# endif
504# define rwlock_release(l, n, i) lock_release(l, n, i)
505#else
506# define rwlock_acquire(l, s, t, i) do { } while (0)
507# define rwlock_acquire_read(l, s, t, i) do { } while (0)
508# define rwlock_release(l, n, i) do { } while (0)
509#endif
510 495
511#ifdef CONFIG_DEBUG_LOCK_ALLOC 496#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
512# ifdef CONFIG_PROVE_LOCKING 497#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
513# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 498#define rwlock_release(l, n, i) lock_release(l, n, i)
514# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
515# else
516# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
517# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
518# endif
519# define mutex_release(l, n, i) lock_release(l, n, i)
520#else
521# define mutex_acquire(l, s, t, i) do { } while (0)
522# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
523# define mutex_release(l, n, i) do { } while (0)
524#endif
525 499
526#ifdef CONFIG_DEBUG_LOCK_ALLOC 500#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
527# ifdef CONFIG_PROVE_LOCKING 501#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
528# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 502#define mutex_release(l, n, i) lock_release(l, n, i)
529# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 503
530# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) 504#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
531# else 505#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
532# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 506#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
533# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
534# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
535# endif
536# define rwsem_release(l, n, i) lock_release(l, n, i) 507# define rwsem_release(l, n, i) lock_release(l, n, i)
537#else
538# define rwsem_acquire(l, s, t, i) do { } while (0)
539# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
540# define rwsem_acquire_read(l, s, t, i) do { } while (0)
541# define rwsem_release(l, n, i) do { } while (0)
542#endif
543 508
544#ifdef CONFIG_DEBUG_LOCK_ALLOC 509#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
545# ifdef CONFIG_PROVE_LOCKING 510#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
546# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
547# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
548# else
549# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
550# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
551# endif
552# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 511# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
553#else
554# define lock_map_acquire(l) do { } while (0)
555# define lock_map_acquire_read(l) do { } while (0)
556# define lock_map_release(l) do { } while (0)
557#endif
558 512
559#ifdef CONFIG_PROVE_LOCKING 513#ifdef CONFIG_PROVE_LOCKING
560# define might_lock(lock) \ 514# define might_lock(lock) \
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 6df614912b9d..3e97fb126e6b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
15#include <linux/lockdep.h> 15#include <linux/lockdep.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/sysctl.h> 17#include <linux/sysctl.h>
18#include <linux/utsname.h>
18 19
19/* 20/*
20 * The number of tasks checked: 21 * The number of tasks checked:
@@ -99,10 +100,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
99 * Ok, the task did not get scheduled for more than 2 minutes, 100 * Ok, the task did not get scheduled for more than 2 minutes,
100 * complain: 101 * complain:
101 */ 102 */
102 printk(KERN_ERR "INFO: task %s:%d blocked for more than " 103 pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
103 "%ld seconds.\n", t->comm, t->pid, timeout); 104 t->comm, t->pid, timeout);
104 printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" 105 pr_err(" %s %s %.*s\n",
105 " disables this message.\n"); 106 print_tainted(), init_utsname()->release,
107 (int)strcspn(init_utsname()->version, " "),
108 init_utsname()->version);
109 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
110 " disables this message.\n");
106 sched_show_task(t); 111 sched_show_task(t);
107 debug_show_held_locks(t); 112 debug_show_held_locks(t);
108 113
diff --git a/kernel/lglock.c b/kernel/lglock.c
index 6535a667a5a7..86ae2aebf004 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
21 arch_spinlock_t *lock; 21 arch_spinlock_t *lock;
22 22
23 preempt_disable(); 23 preempt_disable();
24 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); 24 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
25 lock = this_cpu_ptr(lg->lock); 25 lock = this_cpu_ptr(lg->lock);
26 arch_spin_lock(lock); 26 arch_spin_lock(lock);
27} 27}
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
31{ 31{
32 arch_spinlock_t *lock; 32 arch_spinlock_t *lock;
33 33
34 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 34 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
35 lock = this_cpu_ptr(lg->lock); 35 lock = this_cpu_ptr(lg->lock);
36 arch_spin_unlock(lock); 36 arch_spin_unlock(lock);
37 preempt_enable(); 37 preempt_enable();
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
43 arch_spinlock_t *lock; 43 arch_spinlock_t *lock;
44 44
45 preempt_disable(); 45 preempt_disable();
46 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); 46 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
47 lock = per_cpu_ptr(lg->lock, cpu); 47 lock = per_cpu_ptr(lg->lock, cpu);
48 arch_spin_lock(lock); 48 arch_spin_lock(lock);
49} 49}
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
53{ 53{
54 arch_spinlock_t *lock; 54 arch_spinlock_t *lock;
55 55
56 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 56 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
57 lock = per_cpu_ptr(lg->lock, cpu); 57 lock = per_cpu_ptr(lg->lock, cpu);
58 arch_spin_unlock(lock); 58 arch_spin_unlock(lock);
59 preempt_enable(); 59 preempt_enable();
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
65 int i; 65 int i;
66 66
67 preempt_disable(); 67 preempt_disable();
68 rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); 68 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
69 for_each_possible_cpu(i) { 69 for_each_possible_cpu(i) {
70 arch_spinlock_t *lock; 70 arch_spinlock_t *lock;
71 lock = per_cpu_ptr(lg->lock, i); 71 lock = per_cpu_ptr(lg->lock, i);
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
78{ 78{
79 int i; 79 int i;
80 80
81 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 81 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
82 for_each_possible_cpu(i) { 82 for_each_possible_cpu(i) {
83 arch_spinlock_t *lock; 83 arch_spinlock_t *lock;
84 lock = per_cpu_ptr(lg->lock, i); 84 lock = per_cpu_ptr(lg->lock, i);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a52ee7bb830d..6d647aedffea 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
209 */ 209 */
210static inline int mutex_can_spin_on_owner(struct mutex *lock) 210static inline int mutex_can_spin_on_owner(struct mutex *lock)
211{ 211{
212 struct task_struct *owner;
212 int retval = 1; 213 int retval = 1;
213 214
214 rcu_read_lock(); 215 rcu_read_lock();
215 if (lock->owner) 216 owner = ACCESS_ONCE(lock->owner);
216 retval = lock->owner->on_cpu; 217 if (owner)
218 retval = owner->on_cpu;
217 rcu_read_unlock(); 219 rcu_read_unlock();
218 /* 220 /*
219 * if lock->owner is not set, the mutex owner may have just acquired 221 * if lock->owner is not set, the mutex owner may have just acquired
@@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
461 * performed the optimistic spinning cannot be done. 463 * performed the optimistic spinning cannot be done.
462 */ 464 */
463 if (ACCESS_ONCE(ww->ctx)) 465 if (ACCESS_ONCE(ww->ctx))
464 break; 466 goto slowpath;
465 } 467 }
466 468
467 /* 469 /*
@@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
472 owner = ACCESS_ONCE(lock->owner); 474 owner = ACCESS_ONCE(lock->owner);
473 if (owner && !mutex_spin_on_owner(lock, owner)) { 475 if (owner && !mutex_spin_on_owner(lock, owner)) {
474 mspin_unlock(MLOCK(lock), &node); 476 mspin_unlock(MLOCK(lock), &node);
475 break; 477 goto slowpath;
476 } 478 }
477 479
478 if ((atomic_read(&lock->count) == 1) && 480 if ((atomic_read(&lock->count) == 1) &&
@@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
499 * the owner complete. 501 * the owner complete.
500 */ 502 */
501 if (!owner && (need_resched() || rt_task(task))) 503 if (!owner && (need_resched() || rt_task(task)))
502 break; 504 goto slowpath;
503 505
504 /* 506 /*
505 * The cpu_relax() call is a compiler barrier which forces 507 * The cpu_relax() call is a compiler barrier which forces
@@ -513,6 +515,10 @@ slowpath:
513#endif 515#endif
514 spin_lock_mutex(&lock->wait_lock, flags); 516 spin_lock_mutex(&lock->wait_lock, flags);
515 517
518 /* once more, can we acquire the lock? */
519 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
520 goto skip_wait;
521
516 debug_mutex_lock_common(lock, &waiter); 522 debug_mutex_lock_common(lock, &waiter);
517 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 523 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
518 524
@@ -520,9 +526,6 @@ slowpath:
520 list_add_tail(&waiter.list, &lock->wait_list); 526 list_add_tail(&waiter.list, &lock->wait_list);
521 waiter.task = task; 527 waiter.task = task;
522 528
523 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
524 goto done;
525
526 lock_contended(&lock->dep_map, ip); 529 lock_contended(&lock->dep_map, ip);
527 530
528 for (;;) { 531 for (;;) {
@@ -536,7 +539,7 @@ slowpath:
536 * other waiters: 539 * other waiters:
537 */ 540 */
538 if (MUTEX_SHOW_NO_WAITER(lock) && 541 if (MUTEX_SHOW_NO_WAITER(lock) &&
539 (atomic_xchg(&lock->count, -1) == 1)) 542 (atomic_xchg(&lock->count, -1) == 1))
540 break; 543 break;
541 544
542 /* 545 /*
@@ -561,24 +564,25 @@ slowpath:
561 schedule_preempt_disabled(); 564 schedule_preempt_disabled();
562 spin_lock_mutex(&lock->wait_lock, flags); 565 spin_lock_mutex(&lock->wait_lock, flags);
563 } 566 }
567 mutex_remove_waiter(lock, &waiter, current_thread_info());
568 /* set it to 0 if there are no waiters left: */
569 if (likely(list_empty(&lock->wait_list)))
570 atomic_set(&lock->count, 0);
571 debug_mutex_free_waiter(&waiter);
564 572
565done: 573skip_wait:
574 /* got the lock - cleanup and rejoice! */
566 lock_acquired(&lock->dep_map, ip); 575 lock_acquired(&lock->dep_map, ip);
567 /* got the lock - rejoice! */
568 mutex_remove_waiter(lock, &waiter, current_thread_info());
569 mutex_set_owner(lock); 576 mutex_set_owner(lock);
570 577
571 if (!__builtin_constant_p(ww_ctx == NULL)) { 578 if (!__builtin_constant_p(ww_ctx == NULL)) {
572 struct ww_mutex *ww = container_of(lock, 579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
573 struct ww_mutex,
574 base);
575 struct mutex_waiter *cur; 580 struct mutex_waiter *cur;
576 581
577 /* 582 /*
578 * This branch gets optimized out for the common case, 583 * This branch gets optimized out for the common case,
579 * and is only important for ww_mutex_lock. 584 * and is only important for ww_mutex_lock.
580 */ 585 */
581
582 ww_mutex_lock_acquired(ww, ww_ctx); 586 ww_mutex_lock_acquired(ww, ww_ctx);
583 ww->ctx = ww_ctx; 587 ww->ctx = ww_ctx;
584 588
@@ -592,15 +596,8 @@ done:
592 } 596 }
593 } 597 }
594 598
595 /* set it to 0 if there are no waiters left: */
596 if (likely(list_empty(&lock->wait_list)))
597 atomic_set(&lock->count, 0);
598
599 spin_unlock_mutex(&lock->wait_lock, flags); 599 spin_unlock_mutex(&lock->wait_lock, flags);
600
601 debug_mutex_free_waiter(&waiter);
602 preempt_enable(); 600 preempt_enable();
603
604 return 0; 601 return 0;
605 602
606err: 603err:
diff --git a/kernel/smp.c b/kernel/smp.c
index fe9f773d7114..b1c9034bdfcb 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -278,8 +278,6 @@ EXPORT_SYMBOL(smp_call_function_single);
278 * @wait: If true, wait until function has completed. 278 * @wait: If true, wait until function has completed.
279 * 279 *
280 * Returns 0 on success, else a negative status code (if no cpus were online). 280 * Returns 0 on success, else a negative status code (if no cpus were online).
281 * Note that @wait will be implicitly turned on in case of allocation failures,
282 * since we fall back to on-stack allocation.
283 * 281 *
284 * Selection preference: 282 * Selection preference:
285 * 1) current cpu if in @mask 283 * 1) current cpu if in @mask