diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:13:16 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:13:16 -0500 |
| commit | 642c4c75a765d7a3244ab39c8e6fb09be21eca5b (patch) | |
| tree | ce0be9b476f362835d3a3d6e4fd32801cd15c9fe /kernel/sched.c | |
| parent | f91b22c35f6b0ae06ec5b67922eca1999c3b6e0a (diff) | |
| parent | 71da81324c83ef65bb196c7f874ac1c6996d8287 (diff) | |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (44 commits)
rcu: Fix accelerated GPs for last non-dynticked CPU
rcu: Make non-RCU_PROVE_LOCKING rcu_read_lock_sched_held() understand boot
rcu: Fix accelerated grace periods for last non-dynticked CPU
rcu: Export rcu_scheduler_active
rcu: Make rcu_read_lock_sched_held() take boot time into account
rcu: Make lockdep_rcu_dereference() message less alarmist
sched, cgroups: Fix module export
rcu: Add RCU_CPU_STALL_VERBOSE to dump detailed per-task information
rcu: Fix rcutorture mod_timer argument to delay one jiffy
rcu: Fix deadlock in TREE_PREEMPT_RCU CPU stall detection
rcu: Convert to raw_spinlocks
rcu: Stop overflowing signed integers
rcu: Use canonical URL for Mathieu's dissertation
rcu: Accelerate grace period if last non-dynticked CPU
rcu: Fix citation of Mathieu's dissertation
rcu: Documentation update for CONFIG_PROVE_RCU
security: Apply lockdep-based checking to rcu_dereference() uses
idr: Apply lockdep-based diagnostics to rcu_dereference() uses
radix-tree: Disable RCU lockdep checking in radix tree
vfs: Abstract rcu_dereference_check for files-fdtable use
...
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3a8fb30a91b1..3218f5213717 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq) | |||
| 645 | #endif | 645 | #endif |
| 646 | } | 646 | } |
| 647 | 647 | ||
| 648 | #define rcu_dereference_check_sched_domain(p) \ | ||
| 649 | rcu_dereference_check((p), \ | ||
| 650 | rcu_read_lock_sched_held() || \ | ||
| 651 | lockdep_is_held(&sched_domains_mutex)) | ||
| 652 | |||
| 648 | /* | 653 | /* |
| 649 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | 654 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| 650 | * See detach_destroy_domains: synchronize_sched for details. | 655 | * See detach_destroy_domains: synchronize_sched for details. |
| @@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq) | |||
| 653 | * preempt-disabled sections. | 658 | * preempt-disabled sections. |
| 654 | */ | 659 | */ |
| 655 | #define for_each_domain(cpu, __sd) \ | 660 | #define for_each_domain(cpu, __sd) \ |
| 656 | for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) | 661 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
| 657 | 662 | ||
| 658 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 663 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
| 659 | #define this_rq() (&__get_cpu_var(runqueues)) | 664 | #define this_rq() (&__get_cpu_var(runqueues)) |
| @@ -1531,7 +1536,7 @@ static unsigned long target_load(int cpu, int type) | |||
| 1531 | 1536 | ||
| 1532 | static struct sched_group *group_of(int cpu) | 1537 | static struct sched_group *group_of(int cpu) |
| 1533 | { | 1538 | { |
| 1534 | struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); | 1539 | struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd); |
| 1535 | 1540 | ||
| 1536 | if (!sd) | 1541 | if (!sd) |
| 1537 | return NULL; | 1542 | return NULL; |
| @@ -4888,7 +4893,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
| 4888 | 4893 | ||
| 4889 | static inline int on_null_domain(int cpu) | 4894 | static inline int on_null_domain(int cpu) |
| 4890 | { | 4895 | { |
| 4891 | return !rcu_dereference(cpu_rq(cpu)->sd); | 4896 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); |
| 4892 | } | 4897 | } |
| 4893 | 4898 | ||
| 4894 | /* | 4899 | /* |
