aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
commit4e3eaddd142e2142c048c5052a0a9d2604fccfc6 (patch)
tree5bc45a286502e54e790c54948f22364c5afd9d89
parent8655e7e3ddec60603c4f6c14cdf642e2ba198df8 (diff)
parentb97c4bc16734a2e597dac7f91ee9eb78f4aeef9a (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: locking: Make sparse work with inline spinlocks and rwlocks x86/mce: Fix RCU lockdep splats rcu: Increase RCU CPU stall timeouts if PROVE_RCU ftrace: Replace read_barrier_depends() with rcu_dereference_raw() rcu: Suppress RCU lockdep warnings during early boot rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() rcu: Suppress __mpol_dup() false positive from RCU lockdep rcu: Make rcu_read_lock_sched_held() handle !PREEMPT rcu: Add control variables to lockdep_rcu_dereference() diagnostics rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU rcu: Use wrapper function instead of exporting tasklist_lock sched, rcu: Fix rcu_dereference() for RCU-lockdep rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU x86/gart: Unexport gart_iommu_aperture Fix trivial conflicts in kernel/trace/ftrace.c
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c11
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/rcupdate.h45
-rw-r--r--include/linux/rwlock.h20
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/spinlock.h13
-rw-r--r--include/trace/ftrace.h4
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/lockdep.c1
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/rcutree.h21
-rw-r--r--kernel/rcutree_plugin.h8
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/trace/ftrace.c22
-rw-r--r--kernel/trace/trace_event_profile.c4
-rw-r--r--mm/mempolicy.c2
18 files changed, 120 insertions, 55 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f147a95fd84a..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,7 +31,6 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
35int gart_iommu_aperture_disabled __initdata; 34int gart_iommu_aperture_disabled __initdata;
36int gart_iommu_aperture_allowed __initdata; 35int gart_iommu_aperture_allowed __initdata;
37 36
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 28cba46bf32c..bd58de4d7a29 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,6 +46,11 @@
46 46
47#include "mce-internal.h" 47#include "mce-internal.h"
48 48
49#define rcu_dereference_check_mce(p) \
50 rcu_dereference_check((p), \
51 rcu_read_lock_sched_held() || \
52 lockdep_is_held(&mce_read_mutex))
53
49#define CREATE_TRACE_POINTS 54#define CREATE_TRACE_POINTS
50#include <trace/events/mce.h> 55#include <trace/events/mce.h>
51 56
@@ -158,7 +163,7 @@ void mce_log(struct mce *mce)
158 mce->finished = 0; 163 mce->finished = 0;
159 wmb(); 164 wmb();
160 for (;;) { 165 for (;;) {
161 entry = rcu_dereference(mcelog.next); 166 entry = rcu_dereference_check_mce(mcelog.next);
162 for (;;) { 167 for (;;) {
163 /* 168 /*
164 * When the buffer fills up discard new entries. 169 * When the buffer fills up discard new entries.
@@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1500 return -ENOMEM; 1505 return -ENOMEM;
1501 1506
1502 mutex_lock(&mce_read_mutex); 1507 mutex_lock(&mce_read_mutex);
1503 next = rcu_dereference(mcelog.next); 1508 next = rcu_dereference_check_mce(mcelog.next);
1504 1509
1505 /* Only supports full reads right now */ 1510 /* Only supports full reads right now */
1506 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 1511 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
@@ -1565,7 +1570,7 @@ timeout:
1565static unsigned int mce_poll(struct file *file, poll_table *wait) 1570static unsigned int mce_poll(struct file *file, poll_table *wait)
1566{ 1571{
1567 poll_wait(file, &mce_wait, wait); 1572 poll_wait(file, &mce_wait, wait);
1568 if (rcu_dereference(mcelog.next)) 1573 if (rcu_dereference_check_mce(mcelog.next))
1569 return POLLIN | POLLRDNORM; 1574 return POLLIN | POLLRDNORM;
1570 return 0; 1575 return 0;
1571} 1576}
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4db09f89b637..52507c3e1387 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
280 * task or by holding tasklist_lock to prevent it from being unlinked. 280 * task or by holding tasklist_lock to prevent it from being unlinked.
281 */ 281 */
282#define __task_cred(task) \ 282#define __task_cred(task) \
283 ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)))) 283 ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held())))
284 284
285/** 285/**
286 * get_task_cred - Get another task's objective credentials 286 * get_task_cred - Get another task's objective credentials
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a005cac5e302..3024050c82a1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -101,6 +101,11 @@ extern struct lockdep_map rcu_sched_lock_map;
101# define rcu_read_release_sched() \ 101# define rcu_read_release_sched() \
102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) 102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
103 103
104static inline int debug_lockdep_rcu_enabled(void)
105{
106 return likely(rcu_scheduler_active && debug_locks);
107}
108
104/** 109/**
105 * rcu_read_lock_held - might we be in RCU read-side critical section? 110 * rcu_read_lock_held - might we be in RCU read-side critical section?
106 * 111 *
@@ -108,12 +113,14 @@ extern struct lockdep_map rcu_sched_lock_map;
108 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 113 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
109 * this assumes we are in an RCU read-side critical section unless it can 114 * this assumes we are in an RCU read-side critical section unless it can
110 * prove otherwise. 115 * prove otherwise.
116 *
117 * Check rcu_scheduler_active to prevent false positives during boot.
111 */ 118 */
112static inline int rcu_read_lock_held(void) 119static inline int rcu_read_lock_held(void)
113{ 120{
114 if (debug_locks) 121 if (!debug_lockdep_rcu_enabled())
115 return lock_is_held(&rcu_lock_map); 122 return 1;
116 return 1; 123 return lock_is_held(&rcu_lock_map);
117} 124}
118 125
119/** 126/**
@@ -123,12 +130,14 @@ static inline int rcu_read_lock_held(void)
123 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, 130 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
124 * this assumes we are in an RCU-bh read-side critical section unless it can 131 * this assumes we are in an RCU-bh read-side critical section unless it can
125 * prove otherwise. 132 * prove otherwise.
133 *
134 * Check rcu_scheduler_active to prevent false positives during boot.
126 */ 135 */
127static inline int rcu_read_lock_bh_held(void) 136static inline int rcu_read_lock_bh_held(void)
128{ 137{
129 if (debug_locks) 138 if (!debug_lockdep_rcu_enabled())
130 return lock_is_held(&rcu_bh_lock_map); 139 return 1;
131 return 1; 140 return lock_is_held(&rcu_bh_lock_map);
132} 141}
133 142
134/** 143/**
@@ -139,15 +148,26 @@ static inline int rcu_read_lock_bh_held(void)
139 * this assumes we are in an RCU-sched read-side critical section unless it 148 * this assumes we are in an RCU-sched read-side critical section unless it
140 * can prove otherwise. Note that disabling of preemption (including 149 * can prove otherwise. Note that disabling of preemption (including
141 * disabling irqs) counts as an RCU-sched read-side critical section. 150 * disabling irqs) counts as an RCU-sched read-side critical section.
151 *
152 * Check rcu_scheduler_active to prevent false positives during boot.
142 */ 153 */
154#ifdef CONFIG_PREEMPT
143static inline int rcu_read_lock_sched_held(void) 155static inline int rcu_read_lock_sched_held(void)
144{ 156{
145 int lockdep_opinion = 0; 157 int lockdep_opinion = 0;
146 158
159 if (!debug_lockdep_rcu_enabled())
160 return 1;
147 if (debug_locks) 161 if (debug_locks)
148 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 162 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
149 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; 163 return lockdep_opinion || preempt_count() != 0;
164}
165#else /* #ifdef CONFIG_PREEMPT */
166static inline int rcu_read_lock_sched_held(void)
167{
168 return 1;
150} 169}
170#endif /* #else #ifdef CONFIG_PREEMPT */
151 171
152#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 172#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
153 173
@@ -168,10 +188,17 @@ static inline int rcu_read_lock_bh_held(void)
168 return 1; 188 return 1;
169} 189}
170 190
191#ifdef CONFIG_PREEMPT
171static inline int rcu_read_lock_sched_held(void) 192static inline int rcu_read_lock_sched_held(void)
172{ 193{
173 return preempt_count() != 0 || !rcu_scheduler_active; 194 return !rcu_scheduler_active || preempt_count() != 0;
195}
196#else /* #ifdef CONFIG_PREEMPT */
197static inline int rcu_read_lock_sched_held(void)
198{
199 return 1;
174} 200}
201#endif /* #else #ifdef CONFIG_PREEMPT */
175 202
176#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 203#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
177 204
@@ -188,7 +215,7 @@ static inline int rcu_read_lock_sched_held(void)
188 */ 215 */
189#define rcu_dereference_check(p, c) \ 216#define rcu_dereference_check(p, c) \
190 ({ \ 217 ({ \
191 if (debug_locks && !(c)) \ 218 if (debug_lockdep_rcu_enabled() && !(c)) \
192 lockdep_rcu_dereference(__FILE__, __LINE__); \ 219 lockdep_rcu_dereference(__FILE__, __LINE__); \
193 rcu_dereference_raw(p); \ 220 rcu_dereference_raw(p); \
194 }) 221 })
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 71e0b00b6f2c..bc2994ed66e1 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,25 +29,25 @@ do { \
29#endif 29#endif
30 30
31#ifdef CONFIG_DEBUG_SPINLOCK 31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void do_raw_read_lock(rwlock_t *lock); 32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) 33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock); 34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
36 extern void do_raw_write_lock(rwlock_t *lock); 36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) 37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock); 38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock); 39 extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
40#else 40#else
41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) 41# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
42# define do_raw_read_lock_flags(lock, flags) \ 42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags)) 43 do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) 44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) 45# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) 46# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
47# define do_raw_write_lock_flags(lock, flags) \ 47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags)) 48 do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) 49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) 50# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
51#endif 51#endif
52 52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) 53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d70ff802da2..dad7f668ebf7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -258,6 +258,10 @@ extern spinlock_t mmlist_lock;
258 258
259struct task_struct; 259struct task_struct;
260 260
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
261extern void sched_init(void); 265extern void sched_init(void);
262extern void sched_init_smp(void); 266extern void sched_init_smp(void);
263extern asmlinkage void schedule_tail(struct task_struct *prev); 267extern asmlinkage void schedule_tail(struct task_struct *prev);
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 86088213334a..89fac6a3f78b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -128,19 +128,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
129 129
130#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
131 extern void do_raw_spin_lock(raw_spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
133 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
134 extern void do_raw_spin_unlock(raw_spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
135#else 135#else
136static inline void do_raw_spin_lock(raw_spinlock_t *lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
137{ 137{
138 __acquire(lock);
138 arch_spin_lock(&lock->raw_lock); 139 arch_spin_lock(&lock->raw_lock);
139} 140}
140 141
141static inline void 142static inline void
142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) 143do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
143{ 144{
145 __acquire(lock);
144 arch_spin_lock_flags(&lock->raw_lock, *flags); 146 arch_spin_lock_flags(&lock->raw_lock, *flags);
145} 147}
146 148
@@ -149,9 +151,10 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
149 return arch_spin_trylock(&(lock)->raw_lock); 151 return arch_spin_trylock(&(lock)->raw_lock);
150} 152}
151 153
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock) 154static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
153{ 155{
154 arch_spin_unlock(&lock->raw_lock); 156 arch_spin_unlock(&lock->raw_lock);
157 __release(lock);
155} 158}
156#endif 159#endif
157 160
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0804cd594803..601ad7744247 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -699,9 +699,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
699 * __cpu = smp_processor_id(); 699 * __cpu = smp_processor_id();
700 * 700 *
701 * if (in_nmi()) 701 * if (in_nmi())
702 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 702 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
703 * else 703 * else
704 * trace_buf = rcu_dereference(perf_trace_buf); 704 * trace_buf = rcu_dereference_sched(perf_trace_buf);
705 * 705 *
706 * if (!trace_buf) 706 * if (!trace_buf)
707 * goto end; 707 * goto end;
diff --git a/kernel/exit.c b/kernel/exit.c
index ce1e48c2d93d..cce59cb5ee6a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -87,7 +87,7 @@ static void __exit_signal(struct task_struct *tsk)
87 87
88 sighand = rcu_dereference_check(tsk->sighand, 88 sighand = rcu_dereference_check(tsk->sighand,
89 rcu_read_lock_held() || 89 rcu_read_lock_held() ||
90 lockdep_is_held(&tasklist_lock)); 90 lockdep_tasklist_lock_is_held());
91 spin_lock(&sighand->siglock); 91 spin_lock(&sighand->siglock);
92 92
93 posix_cpu_timers_exit(tsk); 93 posix_cpu_timers_exit(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 1beb6c303c41..4799c5f0e6d0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -86,7 +86,14 @@ int max_threads; /* tunable limit on nr_threads */
86DEFINE_PER_CPU(unsigned long, process_counts) = 0; 86DEFINE_PER_CPU(unsigned long, process_counts) = 0;
87 87
88__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 88__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
89EXPORT_SYMBOL_GPL(tasklist_lock); 89
90#ifdef CONFIG_PROVE_RCU
91int lockdep_tasklist_lock_is_held(void)
92{
93 return lockdep_is_held(&tasklist_lock);
94}
95EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
96#endif /* #ifdef CONFIG_PROVE_RCU */
90 97
91int nr_processes(void) 98int nr_processes(void)
92{ 99{
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 0c30d0455de1..681bc2e1e187 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3822,6 +3822,7 @@ void lockdep_rcu_dereference(const char *file, const int line)
3822 printk("%s:%d invoked rcu_dereference_check() without protection!\n", 3822 printk("%s:%d invoked rcu_dereference_check() without protection!\n",
3823 file, line); 3823 file, line);
3824 printk("\nother info that might help us debug this:\n\n"); 3824 printk("\nother info that might help us debug this:\n\n");
3825 printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
3825 lockdep_print_held_locks(curr); 3826 lockdep_print_held_locks(curr);
3826 printk("\nstack backtrace:\n"); 3827 printk("\nstack backtrace:\n");
3827 dump_stack(); 3828 dump_stack();
diff --git a/kernel/pid.c b/kernel/pid.c
index 86b296943e5f..aebb30d9c233 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -367,7 +367,9 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
367 struct task_struct *result = NULL; 367 struct task_struct *result = NULL;
368 if (pid) { 368 if (pid) {
369 struct hlist_node *first; 369 struct hlist_node *first;
370 first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)); 370 first = rcu_dereference_check(pid->tasks[type].first,
371 rcu_read_lock_held() ||
372 lockdep_tasklist_lock_is_held());
371 if (first) 373 if (first)
372 result = hlist_entry(first, struct task_struct, pids[(type)].node); 374 result = hlist_entry(first, struct task_struct, pids[(type)].node);
373 } 375 }
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1439eb504c22..4a525a30e08e 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -246,12 +246,21 @@ struct rcu_data {
246 246
247#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 247#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
248#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 248#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
249#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ 249
250#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ 250#ifdef CONFIG_PROVE_RCU
251#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ 251#define RCU_STALL_DELAY_DELTA (5 * HZ)
252 /* to take at least one */ 252#else
253 /* scheduling clock irq */ 253#define RCU_STALL_DELAY_DELTA 0
254 /* before ratting on them. */ 254#endif
255
256#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA)
257 /* for rsp->jiffies_stall */
258#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
259 /* for rsp->jiffies_stall */
260#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
261 /* to take at least one */
262 /* scheduling clock irq */
263 /* before ratting on them. */
255 264
256#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 265#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
257 266
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 464ad2cdee00..79b53bda8943 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1010,6 +1010,10 @@ int rcu_needs_cpu(int cpu)
1010 int c = 0; 1010 int c = 0;
1011 int thatcpu; 1011 int thatcpu;
1012 1012
1013 /* Check for being in the holdoff period. */
1014 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
1015 return rcu_needs_cpu_quick_check(cpu);
1016
1013 /* Don't bother unless we are the last non-dyntick-idle CPU. */ 1017 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1014 for_each_cpu_not(thatcpu, nohz_cpu_mask) 1018 for_each_cpu_not(thatcpu, nohz_cpu_mask)
1015 if (thatcpu != cpu) { 1019 if (thatcpu != cpu) {
@@ -1041,10 +1045,8 @@ int rcu_needs_cpu(int cpu)
1041 } 1045 }
1042 1046
1043 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1047 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1044 if (c) { 1048 if (c)
1045 raise_softirq(RCU_SOFTIRQ); 1049 raise_softirq(RCU_SOFTIRQ);
1046 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1047 }
1048 return c; 1050 return c;
1049} 1051}
1050 1052
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3e1fd96c6cf9..5a5ea2cd924f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3476,7 +3476,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3476 3476
3477static inline int on_null_domain(int cpu) 3477static inline int on_null_domain(int cpu)
3478{ 3478{
3479 return !rcu_dereference(cpu_rq(cpu)->sd); 3479 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
3480} 3480}
3481 3481
3482/* 3482/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bb53edbb5c8c..d9062f5cc0c0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,7 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hash.h> 29#include <linux/hash.h>
30#include <linux/rcupdate.h>
30 31
31#include <trace/events/sched.h> 32#include <trace/events/sched.h>
32 33
@@ -84,18 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
84ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
85ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
86 87
88/*
89 * Traverse the ftrace_list, invoking all entries. The reason that we
90 * can use rcu_dereference_raw() is that elements removed from this list
91 * are simply leaked, so there is no need to interact with a grace-period
92 * mechanism. The rcu_dereference_raw() calls are needed to handle
93 * concurrent insertions into the ftrace_list.
94 *
95 * Silly Alpha and silly pointer-speculation compiler optimizations!
96 */
87static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 97static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88{ 98{
89 struct ftrace_ops *op = ftrace_list; 99 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
90
91 /* in case someone actually ports this to alpha! */
92 read_barrier_depends();
93 100
94 while (op != &ftrace_list_end) { 101 while (op != &ftrace_list_end) {
95 /* silly alpha */
96 read_barrier_depends();
97 op->func(ip, parent_ip); 102 op->func(ip, parent_ip);
98 op = op->next; 103 op = rcu_dereference_raw(op->next); /*see above*/
99 }; 104 };
100} 105}
101 106
@@ -150,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
150 * the ops->next pointer is valid before another CPU sees 155 * the ops->next pointer is valid before another CPU sees
151 * the ops pointer included into the ftrace_list. 156 * the ops pointer included into the ftrace_list.
152 */ 157 */
153 smp_wmb(); 158 rcu_assign_pointer(ftrace_list, ops);
154 ftrace_list = ops;
155 159
156 if (ftrace_enabled) { 160 if (ftrace_enabled) {
157 ftrace_func_t func; 161 ftrace_func_t func;
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index f0d693005075..c1cc3ab633de 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -138,9 +138,9 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
138 cpu = smp_processor_id(); 138 cpu = smp_processor_id();
139 139
140 if (in_nmi()) 140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi); 141 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
142 else 142 else
143 trace_buf = rcu_dereference(perf_trace_buf); 143 trace_buf = rcu_dereference_sched(perf_trace_buf);
144 144
145 if (!trace_buf) 145 if (!trace_buf)
146 goto err; 146 goto err;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bda230e52acd..643f66e10187 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1756,10 +1756,12 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
1756 1756
1757 if (!new) 1757 if (!new)
1758 return ERR_PTR(-ENOMEM); 1758 return ERR_PTR(-ENOMEM);
1759 rcu_read_lock();
1759 if (current_cpuset_is_being_rebound()) { 1760 if (current_cpuset_is_being_rebound()) {
1760 nodemask_t mems = cpuset_mems_allowed(current); 1761 nodemask_t mems = cpuset_mems_allowed(current);
1761 mpol_rebind_policy(old, &mems); 1762 mpol_rebind_policy(old, &mems);
1762 } 1763 }
1764 rcu_read_unlock();
1763 *new = *old; 1765 *new = *old;
1764 atomic_set(&new->refcnt, 1); 1766 atomic_set(&new->refcnt, 1);
1765 return new; 1767 return new;