aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 17:47:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 17:47:16 -0400
commit825a3b2605c3aa193e0075d0f9c72e33c17ab16a (patch)
treee8665c4cc20076ae53165475839d36b4bc641cd3 /kernel/locking/lockdep.c
parentcf6ed9a6682d3f171cf9550d4bbe0ef31b768a7e (diff)
parentef0491ea17f8019821c7e9c8e801184ecf17f85a (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - massive CPU hotplug rework (Thomas Gleixner) - improve migration fairness (Peter Zijlstra) - CPU load calculation updates/cleanups (Yuyang Du) - cpufreq updates (Steve Muckle) - nohz optimizations (Frederic Weisbecker) - switch_mm() micro-optimization on x86 (Andy Lutomirski) - ... lots of other enhancements, fixes and cleanups. * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (66 commits) ARM: Hide finish_arch_post_lock_switch() from modules sched/core: Provide a tsk_nr_cpus_allowed() helper sched/core: Use tsk_cpus_allowed() instead of accessing ->cpus_allowed sched/loadavg: Fix loadavg artifacts on fully idle and on fully loaded systems sched/fair: Correct unit of load_above_capacity sched/fair: Clean up scale confusion sched/nohz: Fix affine unpinned timers mess sched/fair: Fix fairness issue on migration sched/core: Kill sched_class::task_waking to clean up the migration logic sched/fair: Prepare to fix fairness problems on migration sched/fair: Move record_wakee() sched/core: Fix comment typo in wake_q_add() sched/core: Remove unused variable sched: Make hrtick_notifier an explicit call sched/fair: Make ilb_notifier an explicit call sched/hotplug: Make activate() the last hotplug step sched/hotplug: Move migration CPU_DYING to sched_cpu_dying() sched/migration: Move CPU_ONLINE into scheduler state sched/migration: Move calc_load_migrate() into CPU_DYING sched/migration: Move prepare transition to SCHED_STARTING state ...
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c71
1 files changed, 62 insertions, 9 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 874d53eaf389..81f1a7107c0e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -45,6 +45,7 @@
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/kmemcheck.h> 47#include <linux/kmemcheck.h>
48#include <linux/random.h>
48 49
49#include <asm/sections.h> 50#include <asm/sections.h>
50 51
@@ -3585,7 +3586,35 @@ static int __lock_is_held(struct lockdep_map *lock)
3585 return 0; 3586 return 0;
3586} 3587}
3587 3588
3588static void __lock_pin_lock(struct lockdep_map *lock) 3589static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3590{
3591 struct pin_cookie cookie = NIL_COOKIE;
3592 struct task_struct *curr = current;
3593 int i;
3594
3595 if (unlikely(!debug_locks))
3596 return cookie;
3597
3598 for (i = 0; i < curr->lockdep_depth; i++) {
3599 struct held_lock *hlock = curr->held_locks + i;
3600
3601 if (match_held_lock(hlock, lock)) {
3602 /*
3603 * Grab 16bits of randomness; this is sufficient to not
3604 * be guessable and still allows some pin nesting in
3605 * our u32 pin_count.
3606 */
3607 cookie.val = 1 + (prandom_u32() >> 16);
3608 hlock->pin_count += cookie.val;
3609 return cookie;
3610 }
3611 }
3612
3613 WARN(1, "pinning an unheld lock\n");
3614 return cookie;
3615}
3616
3617static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3589{ 3618{
3590 struct task_struct *curr = current; 3619 struct task_struct *curr = current;
3591 int i; 3620 int i;
@@ -3597,7 +3626,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
3597 struct held_lock *hlock = curr->held_locks + i; 3626 struct held_lock *hlock = curr->held_locks + i;
3598 3627
3599 if (match_held_lock(hlock, lock)) { 3628 if (match_held_lock(hlock, lock)) {
3600 hlock->pin_count++; 3629 hlock->pin_count += cookie.val;
3601 return; 3630 return;
3602 } 3631 }
3603 } 3632 }
@@ -3605,7 +3634,7 @@ static void __lock_pin_lock(struct lockdep_map *lock)
3605 WARN(1, "pinning an unheld lock\n"); 3634 WARN(1, "pinning an unheld lock\n");
3606} 3635}
3607 3636
3608static void __lock_unpin_lock(struct lockdep_map *lock) 3637static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3609{ 3638{
3610 struct task_struct *curr = current; 3639 struct task_struct *curr = current;
3611 int i; 3640 int i;
@@ -3620,7 +3649,11 @@ static void __lock_unpin_lock(struct lockdep_map *lock)
3620 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 3649 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3621 return; 3650 return;
3622 3651
3623 hlock->pin_count--; 3652 hlock->pin_count -= cookie.val;
3653
3654 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3655 hlock->pin_count = 0;
3656
3624 return; 3657 return;
3625 } 3658 }
3626 } 3659 }
@@ -3751,24 +3784,44 @@ int lock_is_held(struct lockdep_map *lock)
3751} 3784}
3752EXPORT_SYMBOL_GPL(lock_is_held); 3785EXPORT_SYMBOL_GPL(lock_is_held);
3753 3786
3754void lock_pin_lock(struct lockdep_map *lock) 3787struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3755{ 3788{
3789 struct pin_cookie cookie = NIL_COOKIE;
3756 unsigned long flags; 3790 unsigned long flags;
3757 3791
3758 if (unlikely(current->lockdep_recursion)) 3792 if (unlikely(current->lockdep_recursion))
3759 return; 3793 return cookie;
3760 3794
3761 raw_local_irq_save(flags); 3795 raw_local_irq_save(flags);
3762 check_flags(flags); 3796 check_flags(flags);
3763 3797
3764 current->lockdep_recursion = 1; 3798 current->lockdep_recursion = 1;
3765 __lock_pin_lock(lock); 3799 cookie = __lock_pin_lock(lock);
3766 current->lockdep_recursion = 0; 3800 current->lockdep_recursion = 0;
3767 raw_local_irq_restore(flags); 3801 raw_local_irq_restore(flags);
3802
3803 return cookie;
3768} 3804}
3769EXPORT_SYMBOL_GPL(lock_pin_lock); 3805EXPORT_SYMBOL_GPL(lock_pin_lock);
3770 3806
3771void lock_unpin_lock(struct lockdep_map *lock) 3807void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3808{
3809 unsigned long flags;
3810
3811 if (unlikely(current->lockdep_recursion))
3812 return;
3813
3814 raw_local_irq_save(flags);
3815 check_flags(flags);
3816
3817 current->lockdep_recursion = 1;
3818 __lock_repin_lock(lock, cookie);
3819 current->lockdep_recursion = 0;
3820 raw_local_irq_restore(flags);
3821}
3822EXPORT_SYMBOL_GPL(lock_repin_lock);
3823
3824void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3772{ 3825{
3773 unsigned long flags; 3826 unsigned long flags;
3774 3827
@@ -3779,7 +3832,7 @@ void lock_unpin_lock(struct lockdep_map *lock)
3779 check_flags(flags); 3832 check_flags(flags);
3780 3833
3781 current->lockdep_recursion = 1; 3834 current->lockdep_recursion = 1;
3782 __lock_unpin_lock(lock); 3835 __lock_unpin_lock(lock, cookie);
3783 current->lockdep_recursion = 0; 3836 current->lockdep_recursion = 0;
3784 raw_local_irq_restore(flags); 3837 raw_local_irq_restore(flags);
3785} 3838}