aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 13:45:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:45:00 -0400
commit4a55bd5e97b1775913f88f11108a4f144f590e89 (patch)
tree4514f2370d898b93086779c821023319fe4c8b9d
parentac884dec6d4a7df252150af875cffddf8f1d9c15 (diff)
sched: fair-group: de-couple load-balancing from the rb-trees
De-couple load-balancing from the rb-trees, so that I can change their organization. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c21
4 files changed, 25 insertions, 10 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1f74e1d7415f..37a6f5bc4a92 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -151,6 +151,9 @@ extern struct group_info init_groups;
151 .cpus_allowed = CPU_MASK_ALL, \ 151 .cpus_allowed = CPU_MASK_ALL, \
152 .mm = NULL, \ 152 .mm = NULL, \
153 .active_mm = &init_mm, \ 153 .active_mm = &init_mm, \
154 .se = { \
155 .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
156 }, \
154 .rt = { \ 157 .rt = { \
155 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 158 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
156 .time_slice = HZ, \ 159 .time_slice = HZ, \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 887f5db8942d..be6914014c70 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -946,6 +946,7 @@ struct load_weight {
946struct sched_entity { 946struct sched_entity {
947 struct load_weight load; /* for load-balancing */ 947 struct load_weight load; /* for load-balancing */
948 struct rb_node run_node; 948 struct rb_node run_node;
949 struct list_head group_node;
949 unsigned int on_rq; 950 unsigned int on_rq;
950 951
951 u64 exec_start; 952 u64 exec_start;
diff --git a/kernel/sched.c b/kernel/sched.c
index ae1a3e936d28..3202462109f5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -384,8 +384,12 @@ struct cfs_rq {
384 384
385 struct rb_root tasks_timeline; 385 struct rb_root tasks_timeline;
386 struct rb_node *rb_leftmost; 386 struct rb_node *rb_leftmost;
387 struct rb_node *rb_load_balance_curr; 387
388 /* 'curr' points to currently running entity on this cfs_rq. 388 struct list_head tasks;
389 struct list_head *balance_iterator;
390
391 /*
392 * 'curr' points to currently running entity on this cfs_rq.
389 * It is set to NULL otherwise (i.e when none are currently running). 393 * It is set to NULL otherwise (i.e when none are currently running).
390 */ 394 */
391 struct sched_entity *curr, *next; 395 struct sched_entity *curr, *next;
@@ -2525,6 +2529,7 @@ static void __sched_fork(struct task_struct *p)
2525 2529
2526 INIT_LIST_HEAD(&p->rt.run_list); 2530 INIT_LIST_HEAD(&p->rt.run_list);
2527 p->se.on_rq = 0; 2531 p->se.on_rq = 0;
2532 INIT_LIST_HEAD(&p->se.group_node);
2528 2533
2529#ifdef CONFIG_PREEMPT_NOTIFIERS 2534#ifdef CONFIG_PREEMPT_NOTIFIERS
2530 INIT_HLIST_HEAD(&p->preempt_notifiers); 2535 INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -7898,6 +7903,7 @@ int in_sched_functions(unsigned long addr)
7898static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) 7903static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7899{ 7904{
7900 cfs_rq->tasks_timeline = RB_ROOT; 7905 cfs_rq->tasks_timeline = RB_ROOT;
7906 INIT_LIST_HEAD(&cfs_rq->tasks);
7901#ifdef CONFIG_FAIR_GROUP_SCHED 7907#ifdef CONFIG_FAIR_GROUP_SCHED
7902 cfs_rq->rq = rq; 7908 cfs_rq->rq = rq;
7903#endif 7909#endif
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9e301a2bab6f..ed8ce329899b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
533 add_cfs_task_weight(cfs_rq, se->load.weight); 533 add_cfs_task_weight(cfs_rq, se->load.weight);
534 cfs_rq->nr_running++; 534 cfs_rq->nr_running++;
535 se->on_rq = 1; 535 se->on_rq = 1;
536 list_add(&se->group_node, &cfs_rq->tasks);
536} 537}
537 538
538static void 539static void
@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
545 add_cfs_task_weight(cfs_rq, -se->load.weight); 546 add_cfs_task_weight(cfs_rq, -se->load.weight);
546 cfs_rq->nr_running--; 547 cfs_rq->nr_running--;
547 se->on_rq = 0; 548 se->on_rq = 0;
549 list_del_init(&se->group_node);
548} 550}
549 551
550static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 552static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1289 * the current task: 1291 * the current task:
1290 */ 1292 */
1291static struct task_struct * 1293static struct task_struct *
1292__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) 1294__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1293{ 1295{
1294 struct task_struct *p = NULL; 1296 struct task_struct *p = NULL;
1295 struct sched_entity *se; 1297 struct sched_entity *se;
1296 1298
1297 if (!curr) 1299 if (next == &cfs_rq->tasks)
1298 return NULL; 1300 return NULL;
1299 1301
1300 /* Skip over entities that are not tasks */ 1302 /* Skip over entities that are not tasks */
1301 do { 1303 do {
1302 se = rb_entry(curr, struct sched_entity, run_node); 1304 se = list_entry(next, struct sched_entity, group_node);
1303 curr = rb_next(curr); 1305 next = next->next;
1304 } while (curr && !entity_is_task(se)); 1306 } while (next != &cfs_rq->tasks && !entity_is_task(se));
1305 1307
1306 cfs_rq->rb_load_balance_curr = curr; 1308 if (next == &cfs_rq->tasks)
1309 return NULL;
1310
1311 cfs_rq->balance_iterator = next;
1307 1312
1308 if (entity_is_task(se)) 1313 if (entity_is_task(se))
1309 p = task_of(se); 1314 p = task_of(se);
@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg)
1315{ 1320{
1316 struct cfs_rq *cfs_rq = arg; 1321 struct cfs_rq *cfs_rq = arg;
1317 1322
1318 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq)); 1323 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1319} 1324}
1320 1325
1321static struct task_struct *load_balance_next_fair(void *arg) 1326static struct task_struct *load_balance_next_fair(void *arg)
1322{ 1327{
1323 struct cfs_rq *cfs_rq = arg; 1328 struct cfs_rq *cfs_rq = arg;
1324 1329
1325 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); 1330 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1326} 1331}
1327 1332
1328static unsigned long 1333static unsigned long