aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /kernel
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c18
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/latencytop.c14
-rw-r--r--kernel/lockdep.c156
-rw-r--r--kernel/posix-cpu-timers.c14
-rw-r--r--kernel/printk.c46
-rw-r--r--kernel/rtmutex-debug.c77
-rw-r--r--kernel/sched_stats.h12
-rw-r--r--kernel/semaphore.c28
-rw-r--r--kernel/time/timer_stats.c6
-rw-r--r--kernel/trace/ring_buffer.c52
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_irqsoff.c6
13 files changed, 279 insertions, 194 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1d2b6ceea95d..453100a4159d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list)
265/* the list of cgroups eligible for automatic release. Protected by 265/* the list of cgroups eligible for automatic release. Protected by
266 * release_list_lock */ 266 * release_list_lock */
267static LIST_HEAD(release_list); 267static LIST_HEAD(release_list);
268static DEFINE_SPINLOCK(release_list_lock); 268static DEFINE_RAW_SPINLOCK(release_list_lock);
269static void cgroup_release_agent(struct work_struct *work); 269static void cgroup_release_agent(struct work_struct *work);
270static DECLARE_WORK(release_agent_work, cgroup_release_agent); 270static DECLARE_WORK(release_agent_work, cgroup_release_agent);
271static void check_for_release(struct cgroup *cgrp); 271static void check_for_release(struct cgroup *cgrp);
@@ -4014,11 +4014,11 @@ again:
4014 finish_wait(&cgroup_rmdir_waitq, &wait); 4014 finish_wait(&cgroup_rmdir_waitq, &wait);
4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
4016 4016
4017 spin_lock(&release_list_lock); 4017 raw_spin_lock(&release_list_lock);
4018 set_bit(CGRP_REMOVED, &cgrp->flags); 4018 set_bit(CGRP_REMOVED, &cgrp->flags);
4019 if (!list_empty(&cgrp->release_list)) 4019 if (!list_empty(&cgrp->release_list))
4020 list_del_init(&cgrp->release_list); 4020 list_del_init(&cgrp->release_list);
4021 spin_unlock(&release_list_lock); 4021 raw_spin_unlock(&release_list_lock);
4022 4022
4023 cgroup_lock_hierarchy(cgrp->root); 4023 cgroup_lock_hierarchy(cgrp->root);
4024 /* delete this cgroup from parent->children */ 4024 /* delete this cgroup from parent->children */
@@ -4671,13 +4671,13 @@ static void check_for_release(struct cgroup *cgrp)
4671 * already queued for a userspace notification, queue 4671 * already queued for a userspace notification, queue
4672 * it now */ 4672 * it now */
4673 int need_schedule_work = 0; 4673 int need_schedule_work = 0;
4674 spin_lock(&release_list_lock); 4674 raw_spin_lock(&release_list_lock);
4675 if (!cgroup_is_removed(cgrp) && 4675 if (!cgroup_is_removed(cgrp) &&
4676 list_empty(&cgrp->release_list)) { 4676 list_empty(&cgrp->release_list)) {
4677 list_add(&cgrp->release_list, &release_list); 4677 list_add(&cgrp->release_list, &release_list);
4678 need_schedule_work = 1; 4678 need_schedule_work = 1;
4679 } 4679 }
4680 spin_unlock(&release_list_lock); 4680 raw_spin_unlock(&release_list_lock);
4681 if (need_schedule_work) 4681 if (need_schedule_work)
4682 schedule_work(&release_agent_work); 4682 schedule_work(&release_agent_work);
4683 } 4683 }
@@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work)
4729{ 4729{
4730 BUG_ON(work != &release_agent_work); 4730 BUG_ON(work != &release_agent_work);
4731 mutex_lock(&cgroup_mutex); 4731 mutex_lock(&cgroup_mutex);
4732 spin_lock(&release_list_lock); 4732 raw_spin_lock(&release_list_lock);
4733 while (!list_empty(&release_list)) { 4733 while (!list_empty(&release_list)) {
4734 char *argv[3], *envp[3]; 4734 char *argv[3], *envp[3];
4735 int i; 4735 int i;
@@ -4738,7 +4738,7 @@ static void cgroup_release_agent(struct work_struct *work)
4738 struct cgroup, 4738 struct cgroup,
4739 release_list); 4739 release_list);
4740 list_del_init(&cgrp->release_list); 4740 list_del_init(&cgrp->release_list);
4741 spin_unlock(&release_list_lock); 4741 raw_spin_unlock(&release_list_lock);
4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4743 if (!pathbuf) 4743 if (!pathbuf)
4744 goto continue_free; 4744 goto continue_free;
@@ -4768,9 +4768,9 @@ static void cgroup_release_agent(struct work_struct *work)
4768 continue_free: 4768 continue_free:
4769 kfree(pathbuf); 4769 kfree(pathbuf);
4770 kfree(agentbuf); 4770 kfree(agentbuf);
4771 spin_lock(&release_list_lock); 4771 raw_spin_lock(&release_list_lock);
4772 } 4772 }
4773 spin_unlock(&release_list_lock); 4773 raw_spin_unlock(&release_list_lock);
4774 mutex_unlock(&cgroup_mutex); 4774 mutex_unlock(&cgroup_mutex);
4775} 4775}
4776 4776
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b30fd54eb985..2f193d0ba7f2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
78static DEFINE_MUTEX(kprobe_mutex); 78static DEFINE_MUTEX(kprobe_mutex);
79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
80static struct { 80static struct {
81 spinlock_t lock ____cacheline_aligned_in_smp; 81 raw_spinlock_t lock ____cacheline_aligned_in_smp;
82} kretprobe_table_locks[KPROBE_TABLE_SIZE]; 82} kretprobe_table_locks[KPROBE_TABLE_SIZE];
83 83
84static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 84static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85{ 85{
86 return &(kretprobe_table_locks[hash].lock); 86 return &(kretprobe_table_locks[hash].lock);
87} 87}
@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1013 hlist_del(&ri->hlist); 1013 hlist_del(&ri->hlist);
1014 INIT_HLIST_NODE(&ri->hlist); 1014 INIT_HLIST_NODE(&ri->hlist);
1015 if (likely(rp)) { 1015 if (likely(rp)) {
1016 spin_lock(&rp->lock); 1016 raw_spin_lock(&rp->lock);
1017 hlist_add_head(&ri->hlist, &rp->free_instances); 1017 hlist_add_head(&ri->hlist, &rp->free_instances);
1018 spin_unlock(&rp->lock); 1018 raw_spin_unlock(&rp->lock);
1019 } else 1019 } else
1020 /* Unregistering */ 1020 /* Unregistering */
1021 hlist_add_head(&ri->hlist, head); 1021 hlist_add_head(&ri->hlist, head);
@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1026__acquires(hlist_lock) 1026__acquires(hlist_lock)
1027{ 1027{
1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1029 spinlock_t *hlist_lock; 1029 raw_spinlock_t *hlist_lock;
1030 1030
1031 *head = &kretprobe_inst_table[hash]; 1031 *head = &kretprobe_inst_table[hash];
1032 hlist_lock = kretprobe_table_lock_ptr(hash); 1032 hlist_lock = kretprobe_table_lock_ptr(hash);
1033 spin_lock_irqsave(hlist_lock, *flags); 1033 raw_spin_lock_irqsave(hlist_lock, *flags);
1034} 1034}
1035 1035
1036static void __kprobes kretprobe_table_lock(unsigned long hash, 1036static void __kprobes kretprobe_table_lock(unsigned long hash,
1037 unsigned long *flags) 1037 unsigned long *flags)
1038__acquires(hlist_lock) 1038__acquires(hlist_lock)
1039{ 1039{
1040 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1040 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1041 spin_lock_irqsave(hlist_lock, *flags); 1041 raw_spin_lock_irqsave(hlist_lock, *flags);
1042} 1042}
1043 1043
1044void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 1044void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1046__releases(hlist_lock) 1046__releases(hlist_lock)
1047{ 1047{
1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1049 spinlock_t *hlist_lock; 1049 raw_spinlock_t *hlist_lock;
1050 1050
1051 hlist_lock = kretprobe_table_lock_ptr(hash); 1051 hlist_lock = kretprobe_table_lock_ptr(hash);
1052 spin_unlock_irqrestore(hlist_lock, *flags); 1052 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1053} 1053}
1054 1054
1055static void __kprobes kretprobe_table_unlock(unsigned long hash, 1055static void __kprobes kretprobe_table_unlock(unsigned long hash,
1056 unsigned long *flags) 1056 unsigned long *flags)
1057__releases(hlist_lock) 1057__releases(hlist_lock)
1058{ 1058{
1059 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1059 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1060 spin_unlock_irqrestore(hlist_lock, *flags); 1060 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1061} 1061}
1062 1062
1063/* 1063/*
@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1663 1663
1664 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1664 /*TODO: consider to only swap the RA after the last pre_handler fired */
1665 hash = hash_ptr(current, KPROBE_HASH_BITS); 1665 hash = hash_ptr(current, KPROBE_HASH_BITS);
1666 spin_lock_irqsave(&rp->lock, flags); 1666 raw_spin_lock_irqsave(&rp->lock, flags);
1667 if (!hlist_empty(&rp->free_instances)) { 1667 if (!hlist_empty(&rp->free_instances)) {
1668 ri = hlist_entry(rp->free_instances.first, 1668 ri = hlist_entry(rp->free_instances.first,
1669 struct kretprobe_instance, hlist); 1669 struct kretprobe_instance, hlist);
1670 hlist_del(&ri->hlist); 1670 hlist_del(&ri->hlist);
1671 spin_unlock_irqrestore(&rp->lock, flags); 1671 raw_spin_unlock_irqrestore(&rp->lock, flags);
1672 1672
1673 ri->rp = rp; 1673 ri->rp = rp;
1674 ri->task = current; 1674 ri->task = current;
@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1685 kretprobe_table_unlock(hash, &flags); 1685 kretprobe_table_unlock(hash, &flags);
1686 } else { 1686 } else {
1687 rp->nmissed++; 1687 rp->nmissed++;
1688 spin_unlock_irqrestore(&rp->lock, flags); 1688 raw_spin_unlock_irqrestore(&rp->lock, flags);
1689 } 1689 }
1690 return 0; 1690 return 0;
1691} 1691}
@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1721 rp->maxactive = num_possible_cpus(); 1721 rp->maxactive = num_possible_cpus();
1722#endif 1722#endif
1723 } 1723 }
1724 spin_lock_init(&rp->lock); 1724 raw_spin_lock_init(&rp->lock);
1725 INIT_HLIST_HEAD(&rp->free_instances); 1725 INIT_HLIST_HEAD(&rp->free_instances);
1726 for (i = 0; i < rp->maxactive; i++) { 1726 for (i = 0; i < rp->maxactive; i++) {
1727 inst = kmalloc(sizeof(struct kretprobe_instance) + 1727 inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
1959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1960 INIT_HLIST_HEAD(&kprobe_table[i]); 1960 INIT_HLIST_HEAD(&kprobe_table[i]);
1961 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1961 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1962 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1962 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
1963 } 1963 }
1964 1964
1965 /* 1965 /*
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 376066e10413..4ac8ebfcab59 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -58,7 +58,7 @@
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/stacktrace.h> 59#include <linux/stacktrace.h>
60 60
61static DEFINE_SPINLOCK(latency_lock); 61static DEFINE_RAW_SPINLOCK(latency_lock);
62 62
63#define MAXLR 128 63#define MAXLR 128
64static struct latency_record latency_record[MAXLR]; 64static struct latency_record latency_record[MAXLR];
@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct task_struct *p)
72 if (!latencytop_enabled) 72 if (!latencytop_enabled)
73 return; 73 return;
74 74
75 spin_lock_irqsave(&latency_lock, flags); 75 raw_spin_lock_irqsave(&latency_lock, flags);
76 memset(&p->latency_record, 0, sizeof(p->latency_record)); 76 memset(&p->latency_record, 0, sizeof(p->latency_record));
77 p->latency_record_count = 0; 77 p->latency_record_count = 0;
78 spin_unlock_irqrestore(&latency_lock, flags); 78 raw_spin_unlock_irqrestore(&latency_lock, flags);
79} 79}
80 80
81static void clear_global_latency_tracing(void) 81static void clear_global_latency_tracing(void)
82{ 82{
83 unsigned long flags; 83 unsigned long flags;
84 84
85 spin_lock_irqsave(&latency_lock, flags); 85 raw_spin_lock_irqsave(&latency_lock, flags);
86 memset(&latency_record, 0, sizeof(latency_record)); 86 memset(&latency_record, 0, sizeof(latency_record));
87 spin_unlock_irqrestore(&latency_lock, flags); 87 raw_spin_unlock_irqrestore(&latency_lock, flags);
88} 88}
89 89
90static void __sched 90static void __sched
@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
190 lat.max = usecs; 190 lat.max = usecs;
191 store_stacktrace(tsk, &lat); 191 store_stacktrace(tsk, &lat);
192 192
193 spin_lock_irqsave(&latency_lock, flags); 193 raw_spin_lock_irqsave(&latency_lock, flags);
194 194
195 account_global_scheduler_latency(tsk, &lat); 195 account_global_scheduler_latency(tsk, &lat);
196 196
@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
232 232
233out_unlock: 233out_unlock:
234 spin_unlock_irqrestore(&latency_lock, flags); 234 raw_spin_unlock_irqrestore(&latency_lock, flags);
235} 235}
236 236
237static int lstats_show(struct seq_file *m, void *v) 237static int lstats_show(struct seq_file *m, void *v)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 91d67ce3a8d5..c081fa967c8f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -96,8 +96,13 @@ static int graph_lock(void)
96 96
97static inline int graph_unlock(void) 97static inline int graph_unlock(void)
98{ 98{
99 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) 99 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
100 /*
101 * The lockdep graph lock isn't locked while we expect it to
102 * be, we're confused now, bye!
103 */
100 return DEBUG_LOCKS_WARN_ON(1); 104 return DEBUG_LOCKS_WARN_ON(1);
105 }
101 106
102 current->lockdep_recursion--; 107 current->lockdep_recursion--;
103 arch_spin_unlock(&lockdep_lock); 108 arch_spin_unlock(&lockdep_lock);
@@ -134,6 +139,9 @@ static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
134static inline struct lock_class *hlock_class(struct held_lock *hlock) 139static inline struct lock_class *hlock_class(struct held_lock *hlock)
135{ 140{
136 if (!hlock->class_idx) { 141 if (!hlock->class_idx) {
142 /*
143 * Someone passed in garbage, we give up.
144 */
137 DEBUG_LOCKS_WARN_ON(1); 145 DEBUG_LOCKS_WARN_ON(1);
138 return NULL; 146 return NULL;
139 } 147 }
@@ -687,6 +695,10 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
687 */ 695 */
688 list_for_each_entry(class, hash_head, hash_entry) { 696 list_for_each_entry(class, hash_head, hash_entry) {
689 if (class->key == key) { 697 if (class->key == key) {
698 /*
699 * Huh! same key, different name? Did someone trample
700 * on some memory? We're most confused.
701 */
690 WARN_ON_ONCE(class->name != lock->name); 702 WARN_ON_ONCE(class->name != lock->name);
691 return class; 703 return class;
692 } 704 }
@@ -800,6 +812,10 @@ out_unlock_set:
800 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 812 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
801 lock->class_cache[subclass] = class; 813 lock->class_cache[subclass] = class;
802 814
815 /*
816 * Hash collision, did we smoke some? We found a class with a matching
817 * hash but the subclass -- which is hashed in -- didn't match.
818 */
803 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 819 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
804 return NULL; 820 return NULL;
805 821
@@ -926,7 +942,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
926 unsigned long nr; 942 unsigned long nr;
927 943
928 nr = lock - list_entries; 944 nr = lock - list_entries;
929 WARN_ON(nr >= nr_list_entries); 945 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
930 lock->parent = parent; 946 lock->parent = parent;
931 lock->class->dep_gen_id = lockdep_dependency_gen_id; 947 lock->class->dep_gen_id = lockdep_dependency_gen_id;
932} 948}
@@ -936,7 +952,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
936 unsigned long nr; 952 unsigned long nr;
937 953
938 nr = lock - list_entries; 954 nr = lock - list_entries;
939 WARN_ON(nr >= nr_list_entries); 955 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
940 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 956 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
941} 957}
942 958
@@ -1196,6 +1212,9 @@ static noinline int print_bfs_bug(int ret)
1196 if (!debug_locks_off_graph_unlock()) 1212 if (!debug_locks_off_graph_unlock())
1197 return 0; 1213 return 0;
1198 1214
1215 /*
1216 * Breadth-first-search failed, graph got corrupted?
1217 */
1199 WARN(1, "lockdep bfs error:%d\n", ret); 1218 WARN(1, "lockdep bfs error:%d\n", ret);
1200 1219
1201 return 0; 1220 return 0;
@@ -1944,6 +1963,11 @@ out_bug:
1944 if (!debug_locks_off_graph_unlock()) 1963 if (!debug_locks_off_graph_unlock())
1945 return 0; 1964 return 0;
1946 1965
1966 /*
1967 * Clearly we all shouldn't be here, but since we made it we
1968 * can reliable say we messed up our state. See the above two
1969 * gotos for reasons why we could possibly end up here.
1970 */
1947 WARN_ON(1); 1971 WARN_ON(1);
1948 1972
1949 return 0; 1973 return 0;
@@ -1975,6 +1999,11 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1975 struct held_lock *hlock_curr, *hlock_next; 1999 struct held_lock *hlock_curr, *hlock_next;
1976 int i, j; 2000 int i, j;
1977 2001
2002 /*
2003 * We might need to take the graph lock, ensure we've got IRQs
2004 * disabled to make this an IRQ-safe lock.. for recursion reasons
2005 * lockdep won't complain about its own locking errors.
2006 */
1978 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2007 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1979 return 0; 2008 return 0;
1980 /* 2009 /*
@@ -2126,6 +2155,10 @@ static void check_chain_key(struct task_struct *curr)
2126 hlock = curr->held_locks + i; 2155 hlock = curr->held_locks + i;
2127 if (chain_key != hlock->prev_chain_key) { 2156 if (chain_key != hlock->prev_chain_key) {
2128 debug_locks_off(); 2157 debug_locks_off();
2158 /*
2159 * We got mighty confused, our chain keys don't match
2160 * with what we expect, someone trample on our task state?
2161 */
2129 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2162 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2130 curr->lockdep_depth, i, 2163 curr->lockdep_depth, i,
2131 (unsigned long long)chain_key, 2164 (unsigned long long)chain_key,
@@ -2133,6 +2166,9 @@ static void check_chain_key(struct task_struct *curr)
2133 return; 2166 return;
2134 } 2167 }
2135 id = hlock->class_idx - 1; 2168 id = hlock->class_idx - 1;
2169 /*
2170 * Whoops ran out of static storage again?
2171 */
2136 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 2172 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2137 return; 2173 return;
2138 2174
@@ -2144,6 +2180,10 @@ static void check_chain_key(struct task_struct *curr)
2144 } 2180 }
2145 if (chain_key != curr->curr_chain_key) { 2181 if (chain_key != curr->curr_chain_key) {
2146 debug_locks_off(); 2182 debug_locks_off();
2183 /*
2184 * More smoking hash instead of calculating it, damn see these
2185 * numbers float.. I bet that a pink elephant stepped on my memory.
2186 */
2147 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2187 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2148 curr->lockdep_depth, i, 2188 curr->lockdep_depth, i,
2149 (unsigned long long)chain_key, 2189 (unsigned long long)chain_key,
@@ -2525,12 +2565,24 @@ void trace_hardirqs_on_caller(unsigned long ip)
2525 return; 2565 return;
2526 } 2566 }
2527 2567
2568 /*
2569 * We're enabling irqs and according to our state above irqs weren't
2570 * already enabled, yet we find the hardware thinks they are in fact
2571 * enabled.. someone messed up their IRQ state tracing.
2572 */
2528 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2573 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2529 return; 2574 return;
2530 2575
2576 /*
2577 * See the fine text that goes along with this variable definition.
2578 */
2531 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2579 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2532 return; 2580 return;
2533 2581
2582 /*
2583 * Can't allow enabling interrupts while in an interrupt handler,
2584 * that's general bad form and such. Recursion, limited stack etc..
2585 */
2534 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2586 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2535 return; 2587 return;
2536 2588
@@ -2558,6 +2610,10 @@ void trace_hardirqs_off_caller(unsigned long ip)
2558 if (unlikely(!debug_locks || current->lockdep_recursion)) 2610 if (unlikely(!debug_locks || current->lockdep_recursion))
2559 return; 2611 return;
2560 2612
2613 /*
2614 * So we're supposed to get called after you mask local IRQs, but for
2615 * some reason the hardware doesn't quite think you did a proper job.
2616 */
2561 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2617 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2562 return; 2618 return;
2563 2619
@@ -2590,6 +2646,10 @@ void trace_softirqs_on(unsigned long ip)
2590 if (unlikely(!debug_locks || current->lockdep_recursion)) 2646 if (unlikely(!debug_locks || current->lockdep_recursion))
2591 return; 2647 return;
2592 2648
2649 /*
2650 * We fancy IRQs being disabled here, see softirq.c, avoids
2651 * funny state and nesting things.
2652 */
2593 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2653 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2594 return; 2654 return;
2595 2655
@@ -2626,6 +2686,9 @@ void trace_softirqs_off(unsigned long ip)
2626 if (unlikely(!debug_locks || current->lockdep_recursion)) 2686 if (unlikely(!debug_locks || current->lockdep_recursion))
2627 return; 2687 return;
2628 2688
2689 /*
2690 * We fancy IRQs being disabled here, see softirq.c
2691 */
2629 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2692 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2630 return; 2693 return;
2631 2694
@@ -2637,6 +2700,9 @@ void trace_softirqs_off(unsigned long ip)
2637 curr->softirq_disable_ip = ip; 2700 curr->softirq_disable_ip = ip;
2638 curr->softirq_disable_event = ++curr->irq_events; 2701 curr->softirq_disable_event = ++curr->irq_events;
2639 debug_atomic_inc(softirqs_off_events); 2702 debug_atomic_inc(softirqs_off_events);
2703 /*
2704 * Whoops, we wanted softirqs off, so why aren't they?
2705 */
2640 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2706 DEBUG_LOCKS_WARN_ON(!softirq_count());
2641 } else 2707 } else
2642 debug_atomic_inc(redundant_softirqs_off); 2708 debug_atomic_inc(redundant_softirqs_off);
@@ -2661,6 +2727,9 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2661 if (!(gfp_mask & __GFP_FS)) 2727 if (!(gfp_mask & __GFP_FS))
2662 return; 2728 return;
2663 2729
2730 /*
2731 * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2732 */
2664 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) 2733 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2665 return; 2734 return;
2666 2735
@@ -2773,13 +2842,13 @@ static int separate_irq_context(struct task_struct *curr,
2773 return 0; 2842 return 0;
2774} 2843}
2775 2844
2776#else 2845#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2777 2846
2778static inline 2847static inline
2779int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2848int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2780 enum lock_usage_bit new_bit) 2849 enum lock_usage_bit new_bit)
2781{ 2850{
2782 WARN_ON(1); 2851 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2783 return 1; 2852 return 1;
2784} 2853}
2785 2854
@@ -2799,7 +2868,7 @@ void lockdep_trace_alloc(gfp_t gfp_mask)
2799{ 2868{
2800} 2869}
2801 2870
2802#endif 2871#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2803 2872
2804/* 2873/*
2805 * Mark a lock with a usage bit, and validate the state transition: 2874 * Mark a lock with a usage bit, and validate the state transition:
@@ -2880,6 +2949,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2880 lock->cpu = raw_smp_processor_id(); 2949 lock->cpu = raw_smp_processor_id();
2881#endif 2950#endif
2882 2951
2952 /*
2953 * Can't be having no nameless bastards around this place!
2954 */
2883 if (DEBUG_LOCKS_WARN_ON(!name)) { 2955 if (DEBUG_LOCKS_WARN_ON(!name)) {
2884 lock->name = "NULL"; 2956 lock->name = "NULL";
2885 return; 2957 return;
@@ -2887,6 +2959,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2887 2959
2888 lock->name = name; 2960 lock->name = name;
2889 2961
2962 /*
2963 * No key, no joy, we need to hash something.
2964 */
2890 if (DEBUG_LOCKS_WARN_ON(!key)) 2965 if (DEBUG_LOCKS_WARN_ON(!key))
2891 return; 2966 return;
2892 /* 2967 /*
@@ -2894,6 +2969,9 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2894 */ 2969 */
2895 if (!static_obj(key)) { 2970 if (!static_obj(key)) {
2896 printk("BUG: key %p not in .data!\n", key); 2971 printk("BUG: key %p not in .data!\n", key);
2972 /*
2973 * What it says above ^^^^^, I suggest you read it.
2974 */
2897 DEBUG_LOCKS_WARN_ON(1); 2975 DEBUG_LOCKS_WARN_ON(1);
2898 return; 2976 return;
2899 } 2977 }
@@ -2932,6 +3010,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2932 if (unlikely(!debug_locks)) 3010 if (unlikely(!debug_locks))
2933 return 0; 3011 return 0;
2934 3012
3013 /*
3014 * Lockdep should run with IRQs disabled, otherwise we could
3015 * get an interrupt which would want to take locks, which would
3016 * end up in lockdep and have you got a head-ache already?
3017 */
2935 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3018 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2936 return 0; 3019 return 0;
2937 3020
@@ -2963,6 +3046,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2963 * dependency checks are done) 3046 * dependency checks are done)
2964 */ 3047 */
2965 depth = curr->lockdep_depth; 3048 depth = curr->lockdep_depth;
3049 /*
3050 * Ran out of static storage for our per-task lock stack again have we?
3051 */
2966 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3052 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2967 return 0; 3053 return 0;
2968 3054
@@ -2981,6 +3067,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2981 } 3067 }
2982 3068
2983 hlock = curr->held_locks + depth; 3069 hlock = curr->held_locks + depth;
3070 /*
3071 * Plain impossible, we just registered it and checked it weren't no
3072 * NULL like.. I bet this mushroom I ate was good!
3073 */
2984 if (DEBUG_LOCKS_WARN_ON(!class)) 3074 if (DEBUG_LOCKS_WARN_ON(!class))
2985 return 0; 3075 return 0;
2986 hlock->class_idx = class_idx; 3076 hlock->class_idx = class_idx;
@@ -3015,11 +3105,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3015 * the hash, not class->key. 3105 * the hash, not class->key.
3016 */ 3106 */
3017 id = class - lock_classes; 3107 id = class - lock_classes;
3108 /*
3109 * Whoops, we did it again.. ran straight out of our static allocation.
3110 */
3018 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 3111 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3019 return 0; 3112 return 0;
3020 3113
3021 chain_key = curr->curr_chain_key; 3114 chain_key = curr->curr_chain_key;
3022 if (!depth) { 3115 if (!depth) {
3116 /*
3117 * How can we have a chain hash when we ain't got no keys?!
3118 */
3023 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3119 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3024 return 0; 3120 return 0;
3025 chain_head = 1; 3121 chain_head = 1;
@@ -3091,6 +3187,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3091{ 3187{
3092 if (unlikely(!debug_locks)) 3188 if (unlikely(!debug_locks))
3093 return 0; 3189 return 0;
3190 /*
3191 * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3192 */
3094 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3193 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3095 return 0; 3194 return 0;
3096 3195
@@ -3120,6 +3219,11 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3120 if (!class) 3219 if (!class)
3121 return 0; 3220 return 0;
3122 3221
3222 /*
3223 * References, but not a lock we're actually ref-counting?
3224 * State got messed up, follow the sites that change ->references
3225 * and try to make sense of it.
3226 */
3123 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3227 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3124 return 0; 3228 return 0;
3125 3229
@@ -3142,6 +3246,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
3142 int i; 3246 int i;
3143 3247
3144 depth = curr->lockdep_depth; 3248 depth = curr->lockdep_depth;
3249 /*
3250 * This function is about (re)setting the class of a held lock,
3251 * yet we're not actually holding any locks. Naughty user!
3252 */
3145 if (DEBUG_LOCKS_WARN_ON(!depth)) 3253 if (DEBUG_LOCKS_WARN_ON(!depth))
3146 return 0; 3254 return 0;
3147 3255
@@ -3177,6 +3285,10 @@ found_it:
3177 return 0; 3285 return 0;
3178 } 3286 }
3179 3287
3288 /*
3289 * I took it apart and put it back together again, except now I have
3290 * these 'spare' parts.. where shall I put them.
3291 */
3180 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3292 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3181 return 0; 3293 return 0;
3182 return 1; 3294 return 1;
@@ -3201,6 +3313,10 @@ lock_release_non_nested(struct task_struct *curr,
3201 * of held locks: 3313 * of held locks:
3202 */ 3314 */
3203 depth = curr->lockdep_depth; 3315 depth = curr->lockdep_depth;
3316 /*
3317 * So we're all set to release this lock.. wait what lock? We don't
3318 * own any locks, you've been drinking again?
3319 */
3204 if (DEBUG_LOCKS_WARN_ON(!depth)) 3320 if (DEBUG_LOCKS_WARN_ON(!depth))
3205 return 0; 3321 return 0;
3206 3322
@@ -3253,6 +3369,10 @@ found_it:
3253 return 0; 3369 return 0;
3254 } 3370 }
3255 3371
3372 /*
3373 * We had N bottles of beer on the wall, we drank one, but now
3374 * there's not N-1 bottles of beer left on the wall...
3375 */
3256 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3376 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3257 return 0; 3377 return 0;
3258 return 1; 3378 return 1;
@@ -3283,6 +3403,9 @@ static int lock_release_nested(struct task_struct *curr,
3283 return lock_release_non_nested(curr, lock, ip); 3403 return lock_release_non_nested(curr, lock, ip);
3284 curr->lockdep_depth--; 3404 curr->lockdep_depth--;
3285 3405
3406 /*
3407 * No more locks, but somehow we've got hash left over, who left it?
3408 */
3286 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) 3409 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3287 return 0; 3410 return 0;
3288 3411
@@ -3365,10 +3488,13 @@ static void check_flags(unsigned long flags)
3365 * check if not in hardirq contexts: 3488 * check if not in hardirq contexts:
3366 */ 3489 */
3367 if (!hardirq_count()) { 3490 if (!hardirq_count()) {
3368 if (softirq_count()) 3491 if (softirq_count()) {
3492 /* like the above, but with softirqs */
3369 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3493 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3370 else 3494 } else {
3495 /* lick the above, does it taste good? */
3371 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3496 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3497 }
3372 } 3498 }
3373 3499
3374 if (!debug_locks) 3500 if (!debug_locks)
@@ -3506,6 +3632,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3506 int i, contention_point, contending_point; 3632 int i, contention_point, contending_point;
3507 3633
3508 depth = curr->lockdep_depth; 3634 depth = curr->lockdep_depth;
3635 /*
3636 * Whee, we contended on this lock, except it seems we're not
3637 * actually trying to acquire anything much at all..
3638 */
3509 if (DEBUG_LOCKS_WARN_ON(!depth)) 3639 if (DEBUG_LOCKS_WARN_ON(!depth))
3510 return; 3640 return;
3511 3641
@@ -3555,6 +3685,10 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3555 int i, cpu; 3685 int i, cpu;
3556 3686
3557 depth = curr->lockdep_depth; 3687 depth = curr->lockdep_depth;
3688 /*
3689 * Yay, we acquired ownership of this lock we didn't try to
3690 * acquire, how the heck did that happen?
3691 */
3558 if (DEBUG_LOCKS_WARN_ON(!depth)) 3692 if (DEBUG_LOCKS_WARN_ON(!depth))
3559 return; 3693 return;
3560 3694
@@ -3759,8 +3893,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3759 match |= class == lock->class_cache[j]; 3893 match |= class == lock->class_cache[j];
3760 3894
3761 if (unlikely(match)) { 3895 if (unlikely(match)) {
3762 if (debug_locks_off_graph_unlock()) 3896 if (debug_locks_off_graph_unlock()) {
3897 /*
3898 * We all just reset everything, how did it match?
3899 */
3763 WARN_ON(1); 3900 WARN_ON(1);
3901 }
3764 goto out_restore; 3902 goto out_restore;
3765 } 3903 }
3766 } 3904 }
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 640ded8f5c48..e7cb76dc18f5 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -282,13 +282,13 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
282 * it. 282 * it.
283 */ 283 */
284 thread_group_cputime(tsk, &sum); 284 thread_group_cputime(tsk, &sum);
285 spin_lock_irqsave(&cputimer->lock, flags); 285 raw_spin_lock_irqsave(&cputimer->lock, flags);
286 cputimer->running = 1; 286 cputimer->running = 1;
287 update_gt_cputime(&cputimer->cputime, &sum); 287 update_gt_cputime(&cputimer->cputime, &sum);
288 } else 288 } else
289 spin_lock_irqsave(&cputimer->lock, flags); 289 raw_spin_lock_irqsave(&cputimer->lock, flags);
290 *times = cputimer->cputime; 290 *times = cputimer->cputime;
291 spin_unlock_irqrestore(&cputimer->lock, flags); 291 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
292} 292}
293 293
294/* 294/*
@@ -999,9 +999,9 @@ static void stop_process_timers(struct signal_struct *sig)
999 struct thread_group_cputimer *cputimer = &sig->cputimer; 999 struct thread_group_cputimer *cputimer = &sig->cputimer;
1000 unsigned long flags; 1000 unsigned long flags;
1001 1001
1002 spin_lock_irqsave(&cputimer->lock, flags); 1002 raw_spin_lock_irqsave(&cputimer->lock, flags);
1003 cputimer->running = 0; 1003 cputimer->running = 0;
1004 spin_unlock_irqrestore(&cputimer->lock, flags); 1004 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
1005} 1005}
1006 1006
1007static u32 onecputick; 1007static u32 onecputick;
@@ -1291,9 +1291,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1291 if (sig->cputimer.running) { 1291 if (sig->cputimer.running) {
1292 struct task_cputime group_sample; 1292 struct task_cputime group_sample;
1293 1293
1294 spin_lock(&sig->cputimer.lock); 1294 raw_spin_lock(&sig->cputimer.lock);
1295 group_sample = sig->cputimer.cputime; 1295 group_sample = sig->cputimer.cputime;
1296 spin_unlock(&sig->cputimer.lock); 1296 raw_spin_unlock(&sig->cputimer.lock);
1297 1297
1298 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1298 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1299 return 1; 1299 return 1;
diff --git a/kernel/printk.c b/kernel/printk.c
index 28a40d8171b8..b7da18391c38 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -100,7 +100,7 @@ static int console_locked, console_suspended;
100 * It is also used in interesting ways to provide interlocking in 100 * It is also used in interesting ways to provide interlocking in
101 * console_unlock();. 101 * console_unlock();.
102 */ 102 */
103static DEFINE_SPINLOCK(logbuf_lock); 103static DEFINE_RAW_SPINLOCK(logbuf_lock);
104 104
105#define LOG_BUF_MASK (log_buf_len-1) 105#define LOG_BUF_MASK (log_buf_len-1)
106#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) 106#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
@@ -212,7 +212,7 @@ void __init setup_log_buf(int early)
212 return; 212 return;
213 } 213 }
214 214
215 spin_lock_irqsave(&logbuf_lock, flags); 215 raw_spin_lock_irqsave(&logbuf_lock, flags);
216 log_buf_len = new_log_buf_len; 216 log_buf_len = new_log_buf_len;
217 log_buf = new_log_buf; 217 log_buf = new_log_buf;
218 new_log_buf_len = 0; 218 new_log_buf_len = 0;
@@ -230,7 +230,7 @@ void __init setup_log_buf(int early)
230 log_start -= offset; 230 log_start -= offset;
231 con_start -= offset; 231 con_start -= offset;
232 log_end -= offset; 232 log_end -= offset;
233 spin_unlock_irqrestore(&logbuf_lock, flags); 233 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
234 234
235 pr_info("log_buf_len: %d\n", log_buf_len); 235 pr_info("log_buf_len: %d\n", log_buf_len);
236 pr_info("early log buf free: %d(%d%%)\n", 236 pr_info("early log buf free: %d(%d%%)\n",
@@ -365,18 +365,18 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
365 if (error) 365 if (error)
366 goto out; 366 goto out;
367 i = 0; 367 i = 0;
368 spin_lock_irq(&logbuf_lock); 368 raw_spin_lock_irq(&logbuf_lock);
369 while (!error && (log_start != log_end) && i < len) { 369 while (!error && (log_start != log_end) && i < len) {
370 c = LOG_BUF(log_start); 370 c = LOG_BUF(log_start);
371 log_start++; 371 log_start++;
372 spin_unlock_irq(&logbuf_lock); 372 raw_spin_unlock_irq(&logbuf_lock);
373 error = __put_user(c,buf); 373 error = __put_user(c,buf);
374 buf++; 374 buf++;
375 i++; 375 i++;
376 cond_resched(); 376 cond_resched();
377 spin_lock_irq(&logbuf_lock); 377 raw_spin_lock_irq(&logbuf_lock);
378 } 378 }
379 spin_unlock_irq(&logbuf_lock); 379 raw_spin_unlock_irq(&logbuf_lock);
380 if (!error) 380 if (!error)
381 error = i; 381 error = i;
382 break; 382 break;
@@ -399,7 +399,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
399 count = len; 399 count = len;
400 if (count > log_buf_len) 400 if (count > log_buf_len)
401 count = log_buf_len; 401 count = log_buf_len;
402 spin_lock_irq(&logbuf_lock); 402 raw_spin_lock_irq(&logbuf_lock);
403 if (count > logged_chars) 403 if (count > logged_chars)
404 count = logged_chars; 404 count = logged_chars;
405 if (do_clear) 405 if (do_clear)
@@ -416,12 +416,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
416 if (j + log_buf_len < log_end) 416 if (j + log_buf_len < log_end)
417 break; 417 break;
418 c = LOG_BUF(j); 418 c = LOG_BUF(j);
419 spin_unlock_irq(&logbuf_lock); 419 raw_spin_unlock_irq(&logbuf_lock);
420 error = __put_user(c,&buf[count-1-i]); 420 error = __put_user(c,&buf[count-1-i]);
421 cond_resched(); 421 cond_resched();
422 spin_lock_irq(&logbuf_lock); 422 raw_spin_lock_irq(&logbuf_lock);
423 } 423 }
424 spin_unlock_irq(&logbuf_lock); 424 raw_spin_unlock_irq(&logbuf_lock);
425 if (error) 425 if (error)
426 break; 426 break;
427 error = i; 427 error = i;
@@ -689,7 +689,7 @@ static void zap_locks(void)
689 oops_timestamp = jiffies; 689 oops_timestamp = jiffies;
690 690
691 /* If a crash is occurring, make sure we can't deadlock */ 691 /* If a crash is occurring, make sure we can't deadlock */
692 spin_lock_init(&logbuf_lock); 692 raw_spin_lock_init(&logbuf_lock);
693 /* And make sure that we print immediately */ 693 /* And make sure that we print immediately */
694 sema_init(&console_sem, 1); 694 sema_init(&console_sem, 1);
695} 695}
@@ -802,9 +802,9 @@ static int console_trylock_for_printk(unsigned int cpu)
802 } 802 }
803 } 803 }
804 printk_cpu = UINT_MAX; 804 printk_cpu = UINT_MAX;
805 spin_unlock(&logbuf_lock);
806 if (wake) 805 if (wake)
807 up(&console_sem); 806 up(&console_sem);
807 raw_spin_unlock(&logbuf_lock);
808 return retval; 808 return retval;
809} 809}
810static const char recursion_bug_msg [] = 810static const char recursion_bug_msg [] =
@@ -864,7 +864,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
864 } 864 }
865 865
866 lockdep_off(); 866 lockdep_off();
867 spin_lock(&logbuf_lock); 867 raw_spin_lock(&logbuf_lock);
868 printk_cpu = this_cpu; 868 printk_cpu = this_cpu;
869 869
870 if (recursion_bug) { 870 if (recursion_bug) {
@@ -1257,14 +1257,14 @@ void console_unlock(void)
1257 1257
1258again: 1258again:
1259 for ( ; ; ) { 1259 for ( ; ; ) {
1260 spin_lock_irqsave(&logbuf_lock, flags); 1260 raw_spin_lock_irqsave(&logbuf_lock, flags);
1261 wake_klogd |= log_start - log_end; 1261 wake_klogd |= log_start - log_end;
1262 if (con_start == log_end) 1262 if (con_start == log_end)
1263 break; /* Nothing to print */ 1263 break; /* Nothing to print */
1264 _con_start = con_start; 1264 _con_start = con_start;
1265 _log_end = log_end; 1265 _log_end = log_end;
1266 con_start = log_end; /* Flush */ 1266 con_start = log_end; /* Flush */
1267 spin_unlock(&logbuf_lock); 1267 raw_spin_unlock(&logbuf_lock);
1268 stop_critical_timings(); /* don't trace print latency */ 1268 stop_critical_timings(); /* don't trace print latency */
1269 call_console_drivers(_con_start, _log_end); 1269 call_console_drivers(_con_start, _log_end);
1270 start_critical_timings(); 1270 start_critical_timings();
@@ -1276,7 +1276,7 @@ again:
1276 if (unlikely(exclusive_console)) 1276 if (unlikely(exclusive_console))
1277 exclusive_console = NULL; 1277 exclusive_console = NULL;
1278 1278
1279 spin_unlock(&logbuf_lock); 1279 raw_spin_unlock(&logbuf_lock);
1280 1280
1281 up(&console_sem); 1281 up(&console_sem);
1282 1282
@@ -1286,13 +1286,13 @@ again:
1286 * there's a new owner and the console_unlock() from them will do the 1286 * there's a new owner and the console_unlock() from them will do the
1287 * flush, no worries. 1287 * flush, no worries.
1288 */ 1288 */
1289 spin_lock(&logbuf_lock); 1289 raw_spin_lock(&logbuf_lock);
1290 if (con_start != log_end) 1290 if (con_start != log_end)
1291 retry = 1; 1291 retry = 1;
1292 spin_unlock_irqrestore(&logbuf_lock, flags);
1293 if (retry && console_trylock()) 1292 if (retry && console_trylock())
1294 goto again; 1293 goto again;
1295 1294
1295 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1296 if (wake_klogd) 1296 if (wake_klogd)
1297 wake_up_klogd(); 1297 wake_up_klogd();
1298} 1298}
@@ -1522,9 +1522,9 @@ void register_console(struct console *newcon)
1522 * console_unlock(); will print out the buffered messages 1522 * console_unlock(); will print out the buffered messages
1523 * for us. 1523 * for us.
1524 */ 1524 */
1525 spin_lock_irqsave(&logbuf_lock, flags); 1525 raw_spin_lock_irqsave(&logbuf_lock, flags);
1526 con_start = log_start; 1526 con_start = log_start;
1527 spin_unlock_irqrestore(&logbuf_lock, flags); 1527 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1528 /* 1528 /*
1529 * We're about to replay the log buffer. Only do this to the 1529 * We're about to replay the log buffer. Only do this to the
1530 * just-registered console to avoid excessive message spam to 1530 * just-registered console to avoid excessive message spam to
@@ -1731,10 +1731,10 @@ void kmsg_dump(enum kmsg_dump_reason reason)
1731 /* Theoretically, the log could move on after we do this, but 1731 /* Theoretically, the log could move on after we do this, but
1732 there's not a lot we can do about that. The new messages 1732 there's not a lot we can do about that. The new messages
1733 will overwrite the start of what we dump. */ 1733 will overwrite the start of what we dump. */
1734 spin_lock_irqsave(&logbuf_lock, flags); 1734 raw_spin_lock_irqsave(&logbuf_lock, flags);
1735 end = log_end & LOG_BUF_MASK; 1735 end = log_end & LOG_BUF_MASK;
1736 chars = logged_chars; 1736 chars = logged_chars;
1737 spin_unlock_irqrestore(&logbuf_lock, flags); 1737 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1738 1738
1739 if (chars > end) { 1739 if (chars > end) {
1740 s1 = log_buf + log_buf_len - chars + end; 1740 s1 = log_buf + log_buf_len - chars + end;
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 3c7cbc2c33be..a2e7e7210f3e 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -29,61 +29,6 @@
29 29
30#include "rtmutex_common.h" 30#include "rtmutex_common.h"
31 31
32# define TRACE_WARN_ON(x) WARN_ON(x)
33# define TRACE_BUG_ON(x) BUG_ON(x)
34
35# define TRACE_OFF() \
36do { \
37 if (rt_trace_on) { \
38 rt_trace_on = 0; \
39 console_verbose(); \
40 if (raw_spin_is_locked(&current->pi_lock)) \
41 raw_spin_unlock(&current->pi_lock); \
42 } \
43} while (0)
44
45# define TRACE_OFF_NOLOCK() \
46do { \
47 if (rt_trace_on) { \
48 rt_trace_on = 0; \
49 console_verbose(); \
50 } \
51} while (0)
52
53# define TRACE_BUG_LOCKED() \
54do { \
55 TRACE_OFF(); \
56 BUG(); \
57} while (0)
58
59# define TRACE_WARN_ON_LOCKED(c) \
60do { \
61 if (unlikely(c)) { \
62 TRACE_OFF(); \
63 WARN_ON(1); \
64 } \
65} while (0)
66
67# define TRACE_BUG_ON_LOCKED(c) \
68do { \
69 if (unlikely(c)) \
70 TRACE_BUG_LOCKED(); \
71} while (0)
72
73#ifdef CONFIG_SMP
74# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
75#else
76# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
77#endif
78
79/*
80 * deadlock detection flag. We turn it off when we detect
81 * the first problem because we dont want to recurse back
82 * into the tracing code when doing error printk or
83 * executing a BUG():
84 */
85static int rt_trace_on = 1;
86
87static void printk_task(struct task_struct *p) 32static void printk_task(struct task_struct *p)
88{ 33{
89 if (p) 34 if (p)
@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
111 56
112void rt_mutex_debug_task_free(struct task_struct *task) 57void rt_mutex_debug_task_free(struct task_struct *task)
113{ 58{
114 WARN_ON(!plist_head_empty(&task->pi_waiters)); 59 DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
115 WARN_ON(task->pi_blocked_on); 60 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
116} 61}
117 62
118/* 63/*
@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
125{ 70{
126 struct task_struct *task; 71 struct task_struct *task;
127 72
128 if (!rt_trace_on || detect || !act_waiter) 73 if (!debug_locks || detect || !act_waiter)
129 return; 74 return;
130 75
131 task = rt_mutex_owner(act_waiter->lock); 76 task = rt_mutex_owner(act_waiter->lock);
@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
139{ 84{
140 struct task_struct *task; 85 struct task_struct *task;
141 86
142 if (!waiter->deadlock_lock || !rt_trace_on) 87 if (!waiter->deadlock_lock || !debug_locks)
143 return; 88 return;
144 89
145 rcu_read_lock(); 90 rcu_read_lock();
@@ -149,7 +94,10 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
149 return; 94 return;
150 } 95 }
151 96
152 TRACE_OFF_NOLOCK(); 97 if (!debug_locks_off()) {
98 rcu_read_unlock();
99 return;
100 }
153 101
154 printk("\n============================================\n"); 102 printk("\n============================================\n");
155 printk( "[ BUG: circular locking deadlock detected! ]\n"); 103 printk( "[ BUG: circular locking deadlock detected! ]\n");
@@ -180,7 +128,6 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
180 128
181 printk("[ turning off deadlock detection." 129 printk("[ turning off deadlock detection."
182 "Please report this trace. ]\n\n"); 130 "Please report this trace. ]\n\n");
183 local_irq_disable();
184} 131}
185 132
186void debug_rt_mutex_lock(struct rt_mutex *lock) 133void debug_rt_mutex_lock(struct rt_mutex *lock)
@@ -189,7 +136,7 @@ void debug_rt_mutex_lock(struct rt_mutex *lock)
189 136
190void debug_rt_mutex_unlock(struct rt_mutex *lock) 137void debug_rt_mutex_unlock(struct rt_mutex *lock)
191{ 138{
192 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); 139 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
193} 140}
194 141
195void 142void
@@ -199,7 +146,7 @@ debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
199 146
200void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) 147void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
201{ 148{
202 TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); 149 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
203} 150}
204 151
205void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 152void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
@@ -213,8 +160,8 @@ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
213void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 160void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
214{ 161{
215 put_pid(waiter->deadlock_task_pid); 162 put_pid(waiter->deadlock_task_pid);
216 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); 163 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
217 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 164 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
218 memset(waiter, 0x22, sizeof(*waiter)); 165 memset(waiter, 0x22, sizeof(*waiter));
219} 166}
220 167
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 331e01bcd026..87f9e36ea56e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk,
282 if (!cputimer->running) 282 if (!cputimer->running)
283 return; 283 return;
284 284
285 spin_lock(&cputimer->lock); 285 raw_spin_lock(&cputimer->lock);
286 cputimer->cputime.utime = 286 cputimer->cputime.utime =
287 cputime_add(cputimer->cputime.utime, cputime); 287 cputime_add(cputimer->cputime.utime, cputime);
288 spin_unlock(&cputimer->lock); 288 raw_spin_unlock(&cputimer->lock);
289} 289}
290 290
291/** 291/**
@@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk,
306 if (!cputimer->running) 306 if (!cputimer->running)
307 return; 307 return;
308 308
309 spin_lock(&cputimer->lock); 309 raw_spin_lock(&cputimer->lock);
310 cputimer->cputime.stime = 310 cputimer->cputime.stime =
311 cputime_add(cputimer->cputime.stime, cputime); 311 cputime_add(cputimer->cputime.stime, cputime);
312 spin_unlock(&cputimer->lock); 312 raw_spin_unlock(&cputimer->lock);
313} 313}
314 314
315/** 315/**
@@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
330 if (!cputimer->running) 330 if (!cputimer->running)
331 return; 331 return;
332 332
333 spin_lock(&cputimer->lock); 333 raw_spin_lock(&cputimer->lock);
334 cputimer->cputime.sum_exec_runtime += ns; 334 cputimer->cputime.sum_exec_runtime += ns;
335 spin_unlock(&cputimer->lock); 335 raw_spin_unlock(&cputimer->lock);
336} 336}
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..d831841e55a7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
54{ 54{
55 unsigned long flags; 55 unsigned long flags;
56 56
57 spin_lock_irqsave(&sem->lock, flags); 57 raw_spin_lock_irqsave(&sem->lock, flags);
58 if (likely(sem->count > 0)) 58 if (likely(sem->count > 0))
59 sem->count--; 59 sem->count--;
60 else 60 else
61 __down(sem); 61 __down(sem);
62 spin_unlock_irqrestore(&sem->lock, flags); 62 raw_spin_unlock_irqrestore(&sem->lock, flags);
63} 63}
64EXPORT_SYMBOL(down); 64EXPORT_SYMBOL(down);
65 65
@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem)
77 unsigned long flags; 77 unsigned long flags;
78 int result = 0; 78 int result = 0;
79 79
80 spin_lock_irqsave(&sem->lock, flags); 80 raw_spin_lock_irqsave(&sem->lock, flags);
81 if (likely(sem->count > 0)) 81 if (likely(sem->count > 0))
82 sem->count--; 82 sem->count--;
83 else 83 else
84 result = __down_interruptible(sem); 84 result = __down_interruptible(sem);
85 spin_unlock_irqrestore(&sem->lock, flags); 85 raw_spin_unlock_irqrestore(&sem->lock, flags);
86 86
87 return result; 87 return result;
88} 88}
@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
103 unsigned long flags; 103 unsigned long flags;
104 int result = 0; 104 int result = 0;
105 105
106 spin_lock_irqsave(&sem->lock, flags); 106 raw_spin_lock_irqsave(&sem->lock, flags);
107 if (likely(sem->count > 0)) 107 if (likely(sem->count > 0))
108 sem->count--; 108 sem->count--;
109 else 109 else
110 result = __down_killable(sem); 110 result = __down_killable(sem);
111 spin_unlock_irqrestore(&sem->lock, flags); 111 raw_spin_unlock_irqrestore(&sem->lock, flags);
112 112
113 return result; 113 return result;
114} 114}
@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
132 unsigned long flags; 132 unsigned long flags;
133 int count; 133 int count;
134 134
135 spin_lock_irqsave(&sem->lock, flags); 135 raw_spin_lock_irqsave(&sem->lock, flags);
136 count = sem->count - 1; 136 count = sem->count - 1;
137 if (likely(count >= 0)) 137 if (likely(count >= 0))
138 sem->count = count; 138 sem->count = count;
139 spin_unlock_irqrestore(&sem->lock, flags); 139 raw_spin_unlock_irqrestore(&sem->lock, flags);
140 140
141 return (count < 0); 141 return (count < 0);
142} 142}
@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies)
157 unsigned long flags; 157 unsigned long flags;
158 int result = 0; 158 int result = 0;
159 159
160 spin_lock_irqsave(&sem->lock, flags); 160 raw_spin_lock_irqsave(&sem->lock, flags);
161 if (likely(sem->count > 0)) 161 if (likely(sem->count > 0))
162 sem->count--; 162 sem->count--;
163 else 163 else
164 result = __down_timeout(sem, jiffies); 164 result = __down_timeout(sem, jiffies);
165 spin_unlock_irqrestore(&sem->lock, flags); 165 raw_spin_unlock_irqrestore(&sem->lock, flags);
166 166
167 return result; 167 return result;
168} 168}
@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
179{ 179{
180 unsigned long flags; 180 unsigned long flags;
181 181
182 spin_lock_irqsave(&sem->lock, flags); 182 raw_spin_lock_irqsave(&sem->lock, flags);
183 if (likely(list_empty(&sem->wait_list))) 183 if (likely(list_empty(&sem->wait_list)))
184 sem->count++; 184 sem->count++;
185 else 185 else
186 __up(sem); 186 __up(sem);
187 spin_unlock_irqrestore(&sem->lock, flags); 187 raw_spin_unlock_irqrestore(&sem->lock, flags);
188} 188}
189EXPORT_SYMBOL(up); 189EXPORT_SYMBOL(up);
190 190
@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
217 if (timeout <= 0) 217 if (timeout <= 0)
218 goto timed_out; 218 goto timed_out;
219 __set_task_state(task, state); 219 __set_task_state(task, state);
220 spin_unlock_irq(&sem->lock); 220 raw_spin_unlock_irq(&sem->lock);
221 timeout = schedule_timeout(timeout); 221 timeout = schedule_timeout(timeout);
222 spin_lock_irq(&sem->lock); 222 raw_spin_lock_irq(&sem->lock);
223 if (waiter.up) 223 if (waiter.up)
224 return 0; 224 return 0;
225 } 225 }
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index a5d0a3a85dd8..0b537f27b559 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -81,7 +81,7 @@ struct entry {
81/* 81/*
82 * Spinlock protecting the tables - not taken during lookup: 82 * Spinlock protecting the tables - not taken during lookup:
83 */ 83 */
84static DEFINE_SPINLOCK(table_lock); 84static DEFINE_RAW_SPINLOCK(table_lock);
85 85
86/* 86/*
87 * Per-CPU lookup locks for fast hash lookup: 87 * Per-CPU lookup locks for fast hash lookup:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
188 prev = NULL; 188 prev = NULL;
189 curr = *head; 189 curr = *head;
190 190
191 spin_lock(&table_lock); 191 raw_spin_lock(&table_lock);
192 /* 192 /*
193 * Make sure we have not raced with another CPU: 193 * Make sure we have not raced with another CPU:
194 */ 194 */
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
215 *head = curr; 215 *head = curr;
216 } 216 }
217 out_unlock: 217 out_unlock:
218 spin_unlock(&table_lock); 218 raw_spin_unlock(&table_lock);
219 219
220 return curr; 220 return curr;
221} 221}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 731201bf4acc..f2f821acc597 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
478 int cpu; 478 int cpu;
479 atomic_t record_disabled; 479 atomic_t record_disabled;
480 struct ring_buffer *buffer; 480 struct ring_buffer *buffer;
481 spinlock_t reader_lock; /* serialize readers */ 481 raw_spinlock_t reader_lock; /* serialize readers */
482 arch_spinlock_t lock; 482 arch_spinlock_t lock;
483 struct lock_class_key lock_key; 483 struct lock_class_key lock_key;
484 struct list_head *pages; 484 struct list_head *pages;
@@ -1062,7 +1062,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1062 1062
1063 cpu_buffer->cpu = cpu; 1063 cpu_buffer->cpu = cpu;
1064 cpu_buffer->buffer = buffer; 1064 cpu_buffer->buffer = buffer;
1065 spin_lock_init(&cpu_buffer->reader_lock); 1065 raw_spin_lock_init(&cpu_buffer->reader_lock);
1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068 1068
@@ -1259,7 +1259,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1259 struct list_head *p; 1259 struct list_head *p;
1260 unsigned i; 1260 unsigned i;
1261 1261
1262 spin_lock_irq(&cpu_buffer->reader_lock); 1262 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1263 rb_head_page_deactivate(cpu_buffer); 1263 rb_head_page_deactivate(cpu_buffer);
1264 1264
1265 for (i = 0; i < nr_pages; i++) { 1265 for (i = 0; i < nr_pages; i++) {
@@ -1277,7 +1277,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1277 rb_check_pages(cpu_buffer); 1277 rb_check_pages(cpu_buffer);
1278 1278
1279out: 1279out:
1280 spin_unlock_irq(&cpu_buffer->reader_lock); 1280 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1281} 1281}
1282 1282
1283static void 1283static void
@@ -1288,7 +1288,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1288 struct list_head *p; 1288 struct list_head *p;
1289 unsigned i; 1289 unsigned i;
1290 1290
1291 spin_lock_irq(&cpu_buffer->reader_lock); 1291 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1292 rb_head_page_deactivate(cpu_buffer); 1292 rb_head_page_deactivate(cpu_buffer);
1293 1293
1294 for (i = 0; i < nr_pages; i++) { 1294 for (i = 0; i < nr_pages; i++) {
@@ -1303,7 +1303,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1303 rb_check_pages(cpu_buffer); 1303 rb_check_pages(cpu_buffer);
1304 1304
1305out: 1305out:
1306 spin_unlock_irq(&cpu_buffer->reader_lock); 1306 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1307} 1307}
1308 1308
1309/** 1309/**
@@ -2804,9 +2804,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2804 2804
2805 cpu_buffer = iter->cpu_buffer; 2805 cpu_buffer = iter->cpu_buffer;
2806 2806
2807 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2807 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808 rb_iter_reset(iter); 2808 rb_iter_reset(iter);
2809 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2809 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810} 2810}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812 2812
@@ -3265,12 +3265,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3265 again: 3265 again:
3266 local_irq_save(flags); 3266 local_irq_save(flags);
3267 if (dolock) 3267 if (dolock)
3268 spin_lock(&cpu_buffer->reader_lock); 3268 raw_spin_lock(&cpu_buffer->reader_lock);
3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3270 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271 rb_advance_reader(cpu_buffer); 3271 rb_advance_reader(cpu_buffer);
3272 if (dolock) 3272 if (dolock)
3273 spin_unlock(&cpu_buffer->reader_lock); 3273 raw_spin_unlock(&cpu_buffer->reader_lock);
3274 local_irq_restore(flags); 3274 local_irq_restore(flags);
3275 3275
3276 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3276 if (event && event->type_len == RINGBUF_TYPE_PADDING)
@@ -3295,9 +3295,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3295 unsigned long flags; 3295 unsigned long flags;
3296 3296
3297 again: 3297 again:
3298 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3298 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299 event = rb_iter_peek(iter, ts); 3299 event = rb_iter_peek(iter, ts);
3300 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3300 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301 3301
3302 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3302 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303 goto again; 3303 goto again;
@@ -3337,7 +3337,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3337 cpu_buffer = buffer->buffers[cpu]; 3337 cpu_buffer = buffer->buffers[cpu];
3338 local_irq_save(flags); 3338 local_irq_save(flags);
3339 if (dolock) 3339 if (dolock)
3340 spin_lock(&cpu_buffer->reader_lock); 3340 raw_spin_lock(&cpu_buffer->reader_lock);
3341 3341
3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343 if (event) { 3343 if (event) {
@@ -3346,7 +3346,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3346 } 3346 }
3347 3347
3348 if (dolock) 3348 if (dolock)
3349 spin_unlock(&cpu_buffer->reader_lock); 3349 raw_spin_unlock(&cpu_buffer->reader_lock);
3350 local_irq_restore(flags); 3350 local_irq_restore(flags);
3351 3351
3352 out: 3352 out:
@@ -3438,11 +3438,11 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
3438 3438
3439 cpu_buffer = iter->cpu_buffer; 3439 cpu_buffer = iter->cpu_buffer;
3440 3440
3441 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3441 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442 arch_spin_lock(&cpu_buffer->lock); 3442 arch_spin_lock(&cpu_buffer->lock);
3443 rb_iter_reset(iter); 3443 rb_iter_reset(iter);
3444 arch_spin_unlock(&cpu_buffer->lock); 3444 arch_spin_unlock(&cpu_buffer->lock);
3445 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3445 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446} 3446}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3447EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448 3448
@@ -3477,7 +3477,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478 unsigned long flags; 3478 unsigned long flags;
3479 3479
3480 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3480 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again: 3481 again:
3482 event = rb_iter_peek(iter, ts); 3482 event = rb_iter_peek(iter, ts);
3483 if (!event) 3483 if (!event)
@@ -3488,7 +3488,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3488 3488
3489 rb_advance_iter(iter); 3489 rb_advance_iter(iter);
3490 out: 3490 out:
3491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3491 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492 3492
3493 return event; 3493 return event;
3494} 3494}
@@ -3557,7 +3557,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3557 3557
3558 atomic_inc(&cpu_buffer->record_disabled); 3558 atomic_inc(&cpu_buffer->record_disabled);
3559 3559
3560 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3560 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561 3561
3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563 goto out; 3563 goto out;
@@ -3569,7 +3569,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3569 arch_spin_unlock(&cpu_buffer->lock); 3569 arch_spin_unlock(&cpu_buffer->lock);
3570 3570
3571 out: 3571 out:
3572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3572 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573 3573
3574 atomic_dec(&cpu_buffer->record_disabled); 3574 atomic_dec(&cpu_buffer->record_disabled);
3575} 3575}
@@ -3607,10 +3607,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
3607 cpu_buffer = buffer->buffers[cpu]; 3607 cpu_buffer = buffer->buffers[cpu];
3608 local_irq_save(flags); 3608 local_irq_save(flags);
3609 if (dolock) 3609 if (dolock)
3610 spin_lock(&cpu_buffer->reader_lock); 3610 raw_spin_lock(&cpu_buffer->reader_lock);
3611 ret = rb_per_cpu_empty(cpu_buffer); 3611 ret = rb_per_cpu_empty(cpu_buffer);
3612 if (dolock) 3612 if (dolock)
3613 spin_unlock(&cpu_buffer->reader_lock); 3613 raw_spin_unlock(&cpu_buffer->reader_lock);
3614 local_irq_restore(flags); 3614 local_irq_restore(flags);
3615 3615
3616 if (!ret) 3616 if (!ret)
@@ -3641,10 +3641,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3641 cpu_buffer = buffer->buffers[cpu]; 3641 cpu_buffer = buffer->buffers[cpu];
3642 local_irq_save(flags); 3642 local_irq_save(flags);
3643 if (dolock) 3643 if (dolock)
3644 spin_lock(&cpu_buffer->reader_lock); 3644 raw_spin_lock(&cpu_buffer->reader_lock);
3645 ret = rb_per_cpu_empty(cpu_buffer); 3645 ret = rb_per_cpu_empty(cpu_buffer);
3646 if (dolock) 3646 if (dolock)
3647 spin_unlock(&cpu_buffer->reader_lock); 3647 raw_spin_unlock(&cpu_buffer->reader_lock);
3648 local_irq_restore(flags); 3648 local_irq_restore(flags);
3649 3649
3650 return ret; 3650 return ret;
@@ -3841,7 +3841,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3841 if (!bpage) 3841 if (!bpage)
3842 goto out; 3842 goto out;
3843 3843
3844 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3844 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845 3845
3846 reader = rb_get_reader_page(cpu_buffer); 3846 reader = rb_get_reader_page(cpu_buffer);
3847 if (!reader) 3847 if (!reader)
@@ -3964,7 +3964,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965 3965
3966 out_unlock: 3966 out_unlock:
3967 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3967 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968 3968
3969 out: 3969 out:
3970 return ret; 3970 return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e5df02c69b1d..0c8bdeeb358b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; 341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
342 342
343static int trace_stop_count; 343static int trace_stop_count;
344static DEFINE_SPINLOCK(tracing_start_lock); 344static DEFINE_RAW_SPINLOCK(tracing_start_lock);
345 345
346static void wakeup_work_handler(struct work_struct *work) 346static void wakeup_work_handler(struct work_struct *work)
347{ 347{
@@ -960,7 +960,7 @@ void tracing_start(void)
960 if (tracing_disabled) 960 if (tracing_disabled)
961 return; 961 return;
962 962
963 spin_lock_irqsave(&tracing_start_lock, flags); 963 raw_spin_lock_irqsave(&tracing_start_lock, flags);
964 if (--trace_stop_count) { 964 if (--trace_stop_count) {
965 if (trace_stop_count < 0) { 965 if (trace_stop_count < 0) {
966 /* Someone screwed up their debugging */ 966 /* Someone screwed up their debugging */
@@ -985,7 +985,7 @@ void tracing_start(void)
985 985
986 ftrace_start(); 986 ftrace_start();
987 out: 987 out:
988 spin_unlock_irqrestore(&tracing_start_lock, flags); 988 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
989} 989}
990 990
991/** 991/**
@@ -1000,7 +1000,7 @@ void tracing_stop(void)
1000 unsigned long flags; 1000 unsigned long flags;
1001 1001
1002 ftrace_stop(); 1002 ftrace_stop();
1003 spin_lock_irqsave(&tracing_start_lock, flags); 1003 raw_spin_lock_irqsave(&tracing_start_lock, flags);
1004 if (trace_stop_count++) 1004 if (trace_stop_count++)
1005 goto out; 1005 goto out;
1006 1006
@@ -1018,7 +1018,7 @@ void tracing_stop(void)
1018 arch_spin_unlock(&ftrace_max_lock); 1018 arch_spin_unlock(&ftrace_max_lock);
1019 1019
1020 out: 1020 out:
1021 spin_unlock_irqrestore(&tracing_start_lock, flags); 1021 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1022} 1022}
1023 1023
1024void trace_stop_cmdline_recording(void); 1024void trace_stop_cmdline_recording(void);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 667aa8cc0cfc..11186212068c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
23 23
24static DEFINE_PER_CPU(int, tracing_cpu); 24static DEFINE_PER_CPU(int, tracing_cpu);
25 25
26static DEFINE_SPINLOCK(max_trace_lock); 26static DEFINE_RAW_SPINLOCK(max_trace_lock);
27 27
28enum { 28enum {
29 TRACER_IRQS_OFF = (1 << 1), 29 TRACER_IRQS_OFF = (1 << 1),
@@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr,
321 if (!report_latency(delta)) 321 if (!report_latency(delta))
322 goto out; 322 goto out;
323 323
324 spin_lock_irqsave(&max_trace_lock, flags); 324 raw_spin_lock_irqsave(&max_trace_lock, flags);
325 325
326 /* check if we are still the max latency */ 326 /* check if we are still the max latency */
327 if (!report_latency(delta)) 327 if (!report_latency(delta))
@@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr,
344 max_sequence++; 344 max_sequence++;
345 345
346out_unlock: 346out_unlock:
347 spin_unlock_irqrestore(&max_trace_lock, flags); 347 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
348 348
349out: 349out:
350 data->critical_sequence = max_sequence; 350 data->critical_sequence = max_sequence;