diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-13 17:43:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-13 17:43:01 -0500 |
commit | 4e3eaddd142e2142c048c5052a0a9d2604fccfc6 (patch) | |
tree | 5bc45a286502e54e790c54948f22364c5afd9d89 /include | |
parent | 8655e7e3ddec60603c4f6c14cdf642e2ba198df8 (diff) | |
parent | b97c4bc16734a2e597dac7f91ee9eb78f4aeef9a (diff) |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
locking: Make sparse work with inline spinlocks and rwlocks
x86/mce: Fix RCU lockdep splats
rcu: Increase RCU CPU stall timeouts if PROVE_RCU
ftrace: Replace read_barrier_depends() with rcu_dereference_raw()
rcu: Suppress RCU lockdep warnings during early boot
rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare()
rcu: Suppress __mpol_dup() false positive from RCU lockdep
rcu: Make rcu_read_lock_sched_held() handle !PREEMPT
rcu: Add control variables to lockdep_rcu_dereference() diagnostics
rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU
rcu: Use wrapper function instead of exporting tasklist_lock
sched, rcu: Fix rcu_dereference() for RCU-lockdep
rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use
rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU
x86/gart: Unexport gart_iommu_aperture
Fix trivial conflicts in kernel/trace/ftrace.c
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cred.h | 2 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 45 | ||||
-rw-r--r-- | include/linux/rwlock.h | 20 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | include/linux/spinlock.h | 13 | ||||
-rw-r--r-- | include/trace/ftrace.h | 4 |
6 files changed, 61 insertions, 27 deletions
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4db09f89b637..52507c3e1387 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred) | |||
280 | * task or by holding tasklist_lock to prevent it from being unlinked. | 280 | * task or by holding tasklist_lock to prevent it from being unlinked. |
281 | */ | 281 | */ |
282 | #define __task_cred(task) \ | 282 | #define __task_cred(task) \ |
283 | ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)))) | 283 | ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * get_task_cred - Get another task's objective credentials | 286 | * get_task_cred - Get another task's objective credentials |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a005cac5e302..3024050c82a1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -101,6 +101,11 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
101 | # define rcu_read_release_sched() \ | 101 | # define rcu_read_release_sched() \ |
102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
103 | 103 | ||
104 | static inline int debug_lockdep_rcu_enabled(void) | ||
105 | { | ||
106 | return likely(rcu_scheduler_active && debug_locks); | ||
107 | } | ||
108 | |||
104 | /** | 109 | /** |
105 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 110 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
106 | * | 111 | * |
@@ -108,12 +113,14 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
108 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 113 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
109 | * this assumes we are in an RCU read-side critical section unless it can | 114 | * this assumes we are in an RCU read-side critical section unless it can |
110 | * prove otherwise. | 115 | * prove otherwise. |
116 | * | ||
117 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
111 | */ | 118 | */ |
112 | static inline int rcu_read_lock_held(void) | 119 | static inline int rcu_read_lock_held(void) |
113 | { | 120 | { |
114 | if (debug_locks) | 121 | if (!debug_lockdep_rcu_enabled()) |
115 | return lock_is_held(&rcu_lock_map); | 122 | return 1; |
116 | return 1; | 123 | return lock_is_held(&rcu_lock_map); |
117 | } | 124 | } |
118 | 125 | ||
119 | /** | 126 | /** |
@@ -123,12 +130,14 @@ static inline int rcu_read_lock_held(void) | |||
123 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 130 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
124 | * this assumes we are in an RCU-bh read-side critical section unless it can | 131 | * this assumes we are in an RCU-bh read-side critical section unless it can |
125 | * prove otherwise. | 132 | * prove otherwise. |
133 | * | ||
134 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
126 | */ | 135 | */ |
127 | static inline int rcu_read_lock_bh_held(void) | 136 | static inline int rcu_read_lock_bh_held(void) |
128 | { | 137 | { |
129 | if (debug_locks) | 138 | if (!debug_lockdep_rcu_enabled()) |
130 | return lock_is_held(&rcu_bh_lock_map); | 139 | return 1; |
131 | return 1; | 140 | return lock_is_held(&rcu_bh_lock_map); |
132 | } | 141 | } |
133 | 142 | ||
134 | /** | 143 | /** |
@@ -139,15 +148,26 @@ static inline int rcu_read_lock_bh_held(void) | |||
139 | * this assumes we are in an RCU-sched read-side critical section unless it | 148 | * this assumes we are in an RCU-sched read-side critical section unless it |
140 | * can prove otherwise. Note that disabling of preemption (including | 149 | * can prove otherwise. Note that disabling of preemption (including |
141 | * disabling irqs) counts as an RCU-sched read-side critical section. | 150 | * disabling irqs) counts as an RCU-sched read-side critical section. |
151 | * | ||
152 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
142 | */ | 153 | */ |
154 | #ifdef CONFIG_PREEMPT | ||
143 | static inline int rcu_read_lock_sched_held(void) | 155 | static inline int rcu_read_lock_sched_held(void) |
144 | { | 156 | { |
145 | int lockdep_opinion = 0; | 157 | int lockdep_opinion = 0; |
146 | 158 | ||
159 | if (!debug_lockdep_rcu_enabled()) | ||
160 | return 1; | ||
147 | if (debug_locks) | 161 | if (debug_locks) |
148 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 162 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
149 | return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; | 163 | return lockdep_opinion || preempt_count() != 0; |
164 | } | ||
165 | #else /* #ifdef CONFIG_PREEMPT */ | ||
166 | static inline int rcu_read_lock_sched_held(void) | ||
167 | { | ||
168 | return 1; | ||
150 | } | 169 | } |
170 | #endif /* #else #ifdef CONFIG_PREEMPT */ | ||
151 | 171 | ||
152 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 172 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
153 | 173 | ||
@@ -168,10 +188,17 @@ static inline int rcu_read_lock_bh_held(void) | |||
168 | return 1; | 188 | return 1; |
169 | } | 189 | } |
170 | 190 | ||
191 | #ifdef CONFIG_PREEMPT | ||
171 | static inline int rcu_read_lock_sched_held(void) | 192 | static inline int rcu_read_lock_sched_held(void) |
172 | { | 193 | { |
173 | return preempt_count() != 0 || !rcu_scheduler_active; | 194 | return !rcu_scheduler_active || preempt_count() != 0; |
195 | } | ||
196 | #else /* #ifdef CONFIG_PREEMPT */ | ||
197 | static inline int rcu_read_lock_sched_held(void) | ||
198 | { | ||
199 | return 1; | ||
174 | } | 200 | } |
201 | #endif /* #else #ifdef CONFIG_PREEMPT */ | ||
175 | 202 | ||
176 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 203 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
177 | 204 | ||
@@ -188,7 +215,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
188 | */ | 215 | */ |
189 | #define rcu_dereference_check(p, c) \ | 216 | #define rcu_dereference_check(p, c) \ |
190 | ({ \ | 217 | ({ \ |
191 | if (debug_locks && !(c)) \ | 218 | if (debug_lockdep_rcu_enabled() && !(c)) \ |
192 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | 219 | lockdep_rcu_dereference(__FILE__, __LINE__); \ |
193 | rcu_dereference_raw(p); \ | 220 | rcu_dereference_raw(p); \ |
194 | }) | 221 | }) |
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 71e0b00b6f2c..bc2994ed66e1 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h | |||
@@ -29,25 +29,25 @@ do { \ | |||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifdef CONFIG_DEBUG_SPINLOCK | 31 | #ifdef CONFIG_DEBUG_SPINLOCK |
32 | extern void do_raw_read_lock(rwlock_t *lock); | 32 | extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); |
33 | #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) | 33 | #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) |
34 | extern int do_raw_read_trylock(rwlock_t *lock); | 34 | extern int do_raw_read_trylock(rwlock_t *lock); |
35 | extern void do_raw_read_unlock(rwlock_t *lock); | 35 | extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); |
36 | extern void do_raw_write_lock(rwlock_t *lock); | 36 | extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); |
37 | #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) | 37 | #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) |
38 | extern int do_raw_write_trylock(rwlock_t *lock); | 38 | extern int do_raw_write_trylock(rwlock_t *lock); |
39 | extern void do_raw_write_unlock(rwlock_t *lock); | 39 | extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); |
40 | #else | 40 | #else |
41 | # define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | 41 | # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) |
42 | # define do_raw_read_lock_flags(lock, flags) \ | 42 | # define do_raw_read_lock_flags(lock, flags) \ |
43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) | 43 | do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) |
44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) | 44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) |
45 | # define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) | 45 | # define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) |
46 | # define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) | 46 | # define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) |
47 | # define do_raw_write_lock_flags(lock, flags) \ | 47 | # define do_raw_write_lock_flags(lock, flags) \ |
48 | arch_write_lock_flags(&(lock)->raw_lock, *(flags)) | 48 | do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) |
49 | # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) | 49 | # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) |
50 | # define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) | 50 | # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) | 53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d70ff802da2..dad7f668ebf7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock; | |||
258 | 258 | ||
259 | struct task_struct; | 259 | struct task_struct; |
260 | 260 | ||
261 | #ifdef CONFIG_PROVE_RCU | ||
262 | extern int lockdep_tasklist_lock_is_held(void); | ||
263 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
264 | |||
261 | extern void sched_init(void); | 265 | extern void sched_init(void); |
262 | extern void sched_init_smp(void); | 266 | extern void sched_init_smp(void); |
263 | extern asmlinkage void schedule_tail(struct task_struct *prev); | 267 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 86088213334a..89fac6a3f78b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -128,19 +128,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | ||
130 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | extern void do_raw_spin_lock(raw_spinlock_t *lock); | 131 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
132 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) | 132 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
133 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); | 133 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
134 | extern void do_raw_spin_unlock(raw_spinlock_t *lock); | 134 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
135 | #else | 135 | #else |
136 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) | 136 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
137 | { | 137 | { |
138 | __acquire(lock); | ||
138 | arch_spin_lock(&lock->raw_lock); | 139 | arch_spin_lock(&lock->raw_lock); |
139 | } | 140 | } |
140 | 141 | ||
141 | static inline void | 142 | static inline void |
142 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) | 143 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
143 | { | 144 | { |
145 | __acquire(lock); | ||
144 | arch_spin_lock_flags(&lock->raw_lock, *flags); | 146 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
145 | } | 147 | } |
146 | 148 | ||
@@ -149,9 +151,10 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock) | |||
149 | return arch_spin_trylock(&(lock)->raw_lock); | 151 | return arch_spin_trylock(&(lock)->raw_lock); |
150 | } | 152 | } |
151 | 153 | ||
152 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | 154 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
153 | { | 155 | { |
154 | arch_spin_unlock(&lock->raw_lock); | 156 | arch_spin_unlock(&lock->raw_lock); |
157 | __release(lock); | ||
155 | } | 158 | } |
156 | #endif | 159 | #endif |
157 | 160 | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd594803..601ad7744247 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -699,9 +699,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
699 | * __cpu = smp_processor_id(); | 699 | * __cpu = smp_processor_id(); |
700 | * | 700 | * |
701 | * if (in_nmi()) | 701 | * if (in_nmi()) |
702 | * trace_buf = rcu_dereference(perf_trace_buf_nmi); | 702 | * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); |
703 | * else | 703 | * else |
704 | * trace_buf = rcu_dereference(perf_trace_buf); | 704 | * trace_buf = rcu_dereference_sched(perf_trace_buf); |
705 | * | 705 | * |
706 | * if (!trace_buf) | 706 | * if (!trace_buf) |
707 | * goto end; | 707 | * goto end; |