diff options
author | Dhaval Giani <dhaval@linux.vnet.ibm.com> | 2008-04-19 13:44:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:59 -0400 |
commit | 354d60c2ff72d86627dfe2089d186824abf4bb8e (patch) | |
tree | 10cea61ce7036448ed7246820c5575df2a61bb3b /kernel/sched_fair.c | |
parent | ea736ed5d353d7a3aa1cf8ce4cf8d947bc353fb2 (diff) |
sched: mix tasks and groups
This patch allows tasks and groups to exist in the same cfs_rq. With this
change the CFS group scheduling follows a 1/(M+N) model from a 1/(1+N)
fairness model where M tasks and N groups exist at the cfs_rq level.
[a.p.zijlstra@chello.nl: rt bits and assorted fixes]
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 51 |
1 files changed, 45 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 022e036f2c3e..3dde0f0ec93a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1133,6 +1133,17 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1133 | return 0; | 1133 | return 0; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | /* return depth at which a sched entity is present in the hierarchy */ | ||
1137 | static inline int depth_se(struct sched_entity *se) | ||
1138 | { | ||
1139 | int depth = 0; | ||
1140 | |||
1141 | for_each_sched_entity(se) | ||
1142 | depth++; | ||
1143 | |||
1144 | return depth; | ||
1145 | } | ||
1146 | |||
1136 | /* | 1147 | /* |
1137 | * Preempt the current task with a newly woken task if needed: | 1148 | * Preempt the current task with a newly woken task if needed: |
1138 | */ | 1149 | */ |
@@ -1141,6 +1152,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1141 | struct task_struct *curr = rq->curr; | 1152 | struct task_struct *curr = rq->curr; |
1142 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1153 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1143 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1154 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1155 | int se_depth, pse_depth; | ||
1144 | 1156 | ||
1145 | if (unlikely(rt_prio(p->prio))) { | 1157 | if (unlikely(rt_prio(p->prio))) { |
1146 | update_rq_clock(rq); | 1158 | update_rq_clock(rq); |
@@ -1165,6 +1177,27 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1165 | if (!sched_feat(WAKEUP_PREEMPT)) | 1177 | if (!sched_feat(WAKEUP_PREEMPT)) |
1166 | return; | 1178 | return; |
1167 | 1179 | ||
1180 | /* | ||
1181 | * preemption test can be made between sibling entities who are in the | ||
1182 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | ||
1183 | * both tasks until we find their ancestors who are siblings of common | ||
1184 | * parent. | ||
1185 | */ | ||
1186 | |||
1187 | /* First walk up until both entities are at same depth */ | ||
1188 | se_depth = depth_se(se); | ||
1189 | pse_depth = depth_se(pse); | ||
1190 | |||
1191 | while (se_depth > pse_depth) { | ||
1192 | se_depth--; | ||
1193 | se = parent_entity(se); | ||
1194 | } | ||
1195 | |||
1196 | while (pse_depth > se_depth) { | ||
1197 | pse_depth--; | ||
1198 | pse = parent_entity(pse); | ||
1199 | } | ||
1200 | |||
1168 | while (!is_same_group(se, pse)) { | 1201 | while (!is_same_group(se, pse)) { |
1169 | se = parent_entity(se); | 1202 | se = parent_entity(se); |
1170 | pse = parent_entity(pse); | 1203 | pse = parent_entity(pse); |
@@ -1223,13 +1256,22 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | |||
1223 | static struct task_struct * | 1256 | static struct task_struct * |
1224 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) | 1257 | __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) |
1225 | { | 1258 | { |
1226 | struct task_struct *p; | 1259 | struct task_struct *p = NULL; |
1260 | struct sched_entity *se; | ||
1227 | 1261 | ||
1228 | if (!curr) | 1262 | if (!curr) |
1229 | return NULL; | 1263 | return NULL; |
1230 | 1264 | ||
1231 | p = rb_entry(curr, struct task_struct, se.run_node); | 1265 | /* Skip over entities that are not tasks */ |
1232 | cfs_rq->rb_load_balance_curr = rb_next(curr); | 1266 | do { |
1267 | se = rb_entry(curr, struct sched_entity, run_node); | ||
1268 | curr = rb_next(curr); | ||
1269 | } while (curr && !entity_is_task(se)); | ||
1270 | |||
1271 | cfs_rq->rb_load_balance_curr = curr; | ||
1272 | |||
1273 | if (entity_is_task(se)) | ||
1274 | p = task_of(se); | ||
1233 | 1275 | ||
1234 | return p; | 1276 | return p; |
1235 | } | 1277 | } |
@@ -1489,9 +1531,6 @@ static void print_cfs_stats(struct seq_file *m, int cpu) | |||
1489 | { | 1531 | { |
1490 | struct cfs_rq *cfs_rq; | 1532 | struct cfs_rq *cfs_rq; |
1491 | 1533 | ||
1492 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1493 | print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs); | ||
1494 | #endif | ||
1495 | rcu_read_lock(); | 1534 | rcu_read_lock(); |
1496 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 1535 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) |
1497 | print_cfs_rq(m, cpu, cfs_rq); | 1536 | print_cfs_rq(m, cpu, cfs_rq); |