aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorChris Friesen <cfriesen@nortel.com>2008-09-22 13:06:09 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-22 13:43:10 -0400
commitcaea8a03702c147e8ae90da0801e7ba8297b1d46 (patch)
tree7ff0746c3dbcc73bc241957c2b3149bcf8901fb2 /kernel/sched_fair.c
parentf681bbd656b01439be904250a1581ca9c27505a1 (diff)
sched: fix list traversal to use _rcu variant
load_balance_fair() calls rcu_read_lock() but then traverses the list using the regular list traversal routine. This patch converts the list traversal to use the _rcu version. Signed-off-by: Chris Friesen <cfriesen@nortel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7328383690f1..3b89aa6594a9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1521,7 +1521,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1521 rcu_read_lock(); 1521 rcu_read_lock();
1522 update_h_load(busiest_cpu); 1522 update_h_load(busiest_cpu);
1523 1523
1524 list_for_each_entry(tg, &task_groups, list) { 1524 list_for_each_entry_rcu(tg, &task_groups, list) {
1525 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; 1525 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1526 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 1526 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1527 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 1527 unsigned long busiest_weight = busiest_cfs_rq->load.weight;