diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-02-22 20:04:50 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 04:34:26 -0500 |
commit | d11c563dd20ff35da5652c3e1c989d9e10e1d6d0 (patch) | |
tree | b189f50de7a01d7603935d4da7e755d764dfe67e | |
parent | a898def29e4119bc01ebe7ca97423181f4c0ea2d (diff) |
sched: Use lockdep-based checking on rcu_dereference()
Update the rcu_dereference() usages to take advantage of the new
lockdep-based checking.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-6-git-send-email-paulmck@linux.vnet.ibm.com>
[ -v2: fix allmodconfig missing symbol export build failure on x86 ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/cgroup.h | 5 | ||||
-rw-r--r-- | include/linux/cred.h | 2 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 14 | ||||
-rw-r--r-- | kernel/exit.c | 14 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/notifier.c | 6 | ||||
-rw-r--r-- | kernel/pid.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 11 |
9 files changed, 45 insertions, 12 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0008dee66514..c9bbcb2a75ae 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -28,6 +28,7 @@ struct css_id; | |||
28 | extern int cgroup_init_early(void); | 28 | extern int cgroup_init_early(void); |
29 | extern int cgroup_init(void); | 29 | extern int cgroup_init(void); |
30 | extern void cgroup_lock(void); | 30 | extern void cgroup_lock(void); |
31 | extern int cgroup_lock_is_held(void); | ||
31 | extern bool cgroup_lock_live_group(struct cgroup *cgrp); | 32 | extern bool cgroup_lock_live_group(struct cgroup *cgrp); |
32 | extern void cgroup_unlock(void); | 33 | extern void cgroup_unlock(void); |
33 | extern void cgroup_fork(struct task_struct *p); | 34 | extern void cgroup_fork(struct task_struct *p); |
@@ -486,7 +487,9 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state( | |||
486 | static inline struct cgroup_subsys_state *task_subsys_state( | 487 | static inline struct cgroup_subsys_state *task_subsys_state( |
487 | struct task_struct *task, int subsys_id) | 488 | struct task_struct *task, int subsys_id) |
488 | { | 489 | { |
489 | return rcu_dereference(task->cgroups->subsys[subsys_id]); | 490 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], |
491 | rcu_read_lock_held() || | ||
492 | cgroup_lock_is_held()); | ||
490 | } | 493 | } |
491 | 494 | ||
492 | static inline struct cgroup* task_cgroup(struct task_struct *task, | 495 | static inline struct cgroup* task_cgroup(struct task_struct *task, |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4e3387a89cb9..4db09f89b637 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred) | |||
280 | * task or by holding tasklist_lock to prevent it from being unlinked. | 280 | * task or by holding tasklist_lock to prevent it from being unlinked. |
281 | */ | 281 | */ |
282 | #define __task_cred(task) \ | 282 | #define __task_cred(task) \ |
283 | ((const struct cred *)(rcu_dereference((task)->real_cred))) | 283 | ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)))) |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * get_task_cred - Get another task's objective credentials | 286 | * get_task_cred - Get another task's objective credentials |
diff --git a/init/main.c b/init/main.c index 4cb47a159f02..c75dcd6eef09 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -416,7 +416,9 @@ static noinline void __init_refok rest_init(void) | |||
416 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); | 416 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
417 | numa_default_policy(); | 417 | numa_default_policy(); |
418 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); | 418 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
419 | rcu_read_lock(); | ||
419 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); | 420 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
421 | rcu_read_unlock(); | ||
420 | unlock_kernel(); | 422 | unlock_kernel(); |
421 | 423 | ||
422 | /* | 424 | /* |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index aa3bee566446..b1a0f5a528fe 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -166,6 +166,20 @@ static DEFINE_SPINLOCK(hierarchy_id_lock); | |||
166 | */ | 166 | */ |
167 | static int need_forkexit_callback __read_mostly; | 167 | static int need_forkexit_callback __read_mostly; |
168 | 168 | ||
169 | #ifdef CONFIG_PROVE_LOCKING | ||
170 | int cgroup_lock_is_held(void) | ||
171 | { | ||
172 | return lockdep_is_held(&cgroup_mutex); | ||
173 | } | ||
174 | #else /* #ifdef CONFIG_PROVE_LOCKING */ | ||
175 | int cgroup_lock_is_held(void) | ||
176 | { | ||
177 | return mutex_is_locked(&cgroup_mutex); | ||
178 | } | ||
179 | #endif /* #else #ifdef CONFIG_PROVE_LOCKING */ | ||
180 | |||
181 | EXPORT_SYMBOL_GPL(cgroup_lock_is_held); | ||
182 | |||
169 | /* convenient tests for these bits */ | 183 | /* convenient tests for these bits */ |
170 | inline int cgroup_is_removed(const struct cgroup *cgrp) | 184 | inline int cgroup_is_removed(const struct cgroup *cgrp) |
171 | { | 185 | { |
diff --git a/kernel/exit.c b/kernel/exit.c index 546774a31a66..45ed043b8bf5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -85,7 +85,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
85 | BUG_ON(!sig); | 85 | BUG_ON(!sig); |
86 | BUG_ON(!atomic_read(&sig->count)); | 86 | BUG_ON(!atomic_read(&sig->count)); |
87 | 87 | ||
88 | sighand = rcu_dereference(tsk->sighand); | 88 | sighand = rcu_dereference_check(tsk->sighand, |
89 | rcu_read_lock_held() || | ||
90 | lockdep_is_held(&tasklist_lock)); | ||
89 | spin_lock(&sighand->siglock); | 91 | spin_lock(&sighand->siglock); |
90 | 92 | ||
91 | posix_cpu_timers_exit(tsk); | 93 | posix_cpu_timers_exit(tsk); |
@@ -170,8 +172,10 @@ void release_task(struct task_struct * p) | |||
170 | repeat: | 172 | repeat: |
171 | tracehook_prepare_release_task(p); | 173 | tracehook_prepare_release_task(p); |
172 | /* don't need to get the RCU readlock here - the process is dead and | 174 | /* don't need to get the RCU readlock here - the process is dead and |
173 | * can't be modifying its own credentials */ | 175 | * can't be modifying its own credentials. But shut RCU-lockdep up */ |
176 | rcu_read_lock(); | ||
174 | atomic_dec(&__task_cred(p)->user->processes); | 177 | atomic_dec(&__task_cred(p)->user->processes); |
178 | rcu_read_unlock(); | ||
175 | 179 | ||
176 | proc_flush_task(p); | 180 | proc_flush_task(p); |
177 | 181 | ||
@@ -473,9 +477,11 @@ static void close_files(struct files_struct * files) | |||
473 | /* | 477 | /* |
474 | * It is safe to dereference the fd table without RCU or | 478 | * It is safe to dereference the fd table without RCU or |
475 | * ->file_lock because this is the last reference to the | 479 | * ->file_lock because this is the last reference to the |
476 | * files structure. | 480 | * files structure. But use RCU to shut RCU-lockdep up. |
477 | */ | 481 | */ |
482 | rcu_read_lock(); | ||
478 | fdt = files_fdtable(files); | 483 | fdt = files_fdtable(files); |
484 | rcu_read_unlock(); | ||
479 | for (;;) { | 485 | for (;;) { |
480 | unsigned long set; | 486 | unsigned long set; |
481 | i = j * __NFDBITS; | 487 | i = j * __NFDBITS; |
@@ -521,10 +527,12 @@ void put_files_struct(struct files_struct *files) | |||
521 | * at the end of the RCU grace period. Otherwise, | 527 | * at the end of the RCU grace period. Otherwise, |
522 | * you can free files immediately. | 528 | * you can free files immediately. |
523 | */ | 529 | */ |
530 | rcu_read_lock(); | ||
524 | fdt = files_fdtable(files); | 531 | fdt = files_fdtable(files); |
525 | if (fdt != &files->fdtab) | 532 | if (fdt != &files->fdtab) |
526 | kmem_cache_free(files_cachep, files); | 533 | kmem_cache_free(files_cachep, files); |
527 | free_fdtable(fdt); | 534 | free_fdtable(fdt); |
535 | rcu_read_unlock(); | ||
528 | } | 536 | } |
529 | } | 537 | } |
530 | 538 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index f88bd984df35..17bbf093356d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -86,6 +86,7 @@ int max_threads; /* tunable limit on nr_threads */ | |||
86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; | 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
87 | 87 | ||
88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
89 | EXPORT_SYMBOL_GPL(tasklist_lock); | ||
89 | 90 | ||
90 | int nr_processes(void) | 91 | int nr_processes(void) |
91 | { | 92 | { |
diff --git a/kernel/notifier.c b/kernel/notifier.c index acd24e7643eb..2488ba7eb568 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
@@ -78,10 +78,10 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl, | |||
78 | int ret = NOTIFY_DONE; | 78 | int ret = NOTIFY_DONE; |
79 | struct notifier_block *nb, *next_nb; | 79 | struct notifier_block *nb, *next_nb; |
80 | 80 | ||
81 | nb = rcu_dereference(*nl); | 81 | nb = rcu_dereference_raw(*nl); |
82 | 82 | ||
83 | while (nb && nr_to_call) { | 83 | while (nb && nr_to_call) { |
84 | next_nb = rcu_dereference(nb->next); | 84 | next_nb = rcu_dereference_raw(nb->next); |
85 | 85 | ||
86 | #ifdef CONFIG_DEBUG_NOTIFIERS | 86 | #ifdef CONFIG_DEBUG_NOTIFIERS |
87 | if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { | 87 | if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { |
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, | |||
309 | * racy then it does not matter what the result of the test | 309 | * racy then it does not matter what the result of the test |
310 | * is, we re-check the list after having taken the lock anyway: | 310 | * is, we re-check the list after having taken the lock anyway: |
311 | */ | 311 | */ |
312 | if (rcu_dereference(nh->head)) { | 312 | if (rcu_dereference_raw(nh->head)) { |
313 | down_read(&nh->rwsem); | 313 | down_read(&nh->rwsem); |
314 | ret = notifier_call_chain(&nh->head, val, v, nr_to_call, | 314 | ret = notifier_call_chain(&nh->head, val, v, nr_to_call, |
315 | nr_calls); | 315 | nr_calls); |
diff --git a/kernel/pid.c b/kernel/pid.c index 2e17c9c92cbe..b08e697cd83f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -367,7 +367,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) | |||
367 | struct task_struct *result = NULL; | 367 | struct task_struct *result = NULL; |
368 | if (pid) { | 368 | if (pid) { |
369 | struct hlist_node *first; | 369 | struct hlist_node *first; |
370 | first = rcu_dereference(pid->tasks[type].first); | 370 | first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)); |
371 | if (first) | 371 | if (first) |
372 | result = hlist_entry(first, struct task_struct, pids[(type)].node); | 372 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
373 | } | 373 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 3a8fb30a91b1..70ae68680d4c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq) | |||
645 | #endif | 645 | #endif |
646 | } | 646 | } |
647 | 647 | ||
648 | #define for_each_domain_rd(p) \ | ||
649 | rcu_dereference_check((p), \ | ||
650 | rcu_read_lock_sched_held() || \ | ||
651 | lockdep_is_held(&sched_domains_mutex)) | ||
652 | |||
648 | /* | 653 | /* |
649 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | 654 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
650 | * See detach_destroy_domains: synchronize_sched for details. | 655 | * See detach_destroy_domains: synchronize_sched for details. |
@@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq) | |||
653 | * preempt-disabled sections. | 658 | * preempt-disabled sections. |
654 | */ | 659 | */ |
655 | #define for_each_domain(cpu, __sd) \ | 660 | #define for_each_domain(cpu, __sd) \ |
656 | for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) | 661 | for (__sd = for_each_domain_rd(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
657 | 662 | ||
658 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 663 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
659 | #define this_rq() (&__get_cpu_var(runqueues)) | 664 | #define this_rq() (&__get_cpu_var(runqueues)) |
@@ -1531,7 +1536,7 @@ static unsigned long target_load(int cpu, int type) | |||
1531 | 1536 | ||
1532 | static struct sched_group *group_of(int cpu) | 1537 | static struct sched_group *group_of(int cpu) |
1533 | { | 1538 | { |
1534 | struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); | 1539 | struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd); |
1535 | 1540 | ||
1536 | if (!sd) | 1541 | if (!sd) |
1537 | return NULL; | 1542 | return NULL; |
@@ -4888,7 +4893,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
4888 | 4893 | ||
4889 | static inline int on_null_domain(int cpu) | 4894 | static inline int on_null_domain(int cpu) |
4890 | { | 4895 | { |
4891 | return !rcu_dereference(cpu_rq(cpu)->sd); | 4896 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); |
4892 | } | 4897 | } |
4893 | 4898 | ||
4894 | /* | 4899 | /* |