aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-24 05:06:15 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-24 06:51:02 -0400
commit464b75273f64be7c81fee975bd6ca9593df3427b (patch)
treec7efd037f65129ba9bff5b8a4a61d506e904715f /kernel/sched_fair.c
parent0d13033bc9257fe65c1aa25e84568b1608da0901 (diff)
sched: re-instate vruntime based wakeup preemption
The advantage is that vruntime based wakeup preemption has a better conceptual model. Here wakeup_gran = 0 means: preempt when 'fair'. Therefore wakeup_gran is the granularity of unfairness we allow in order to make progress. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c98
1 files changed, 92 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b71ee2c62297..7af17e04a6db 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -143,6 +143,49 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
143 return se->parent; 143 return se->parent;
144} 144}
145 145
146/* return depth at which a sched entity is present in the hierarchy */
147static inline int depth_se(struct sched_entity *se)
148{
149 int depth = 0;
150
151 for_each_sched_entity(se)
152 depth++;
153
154 return depth;
155}
156
157static void
158find_matching_se(struct sched_entity **se, struct sched_entity **pse)
159{
160 int se_depth, pse_depth;
161
162 /*
163 * preemption test can be made between sibling entities who are in the
164 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
165 * both tasks until we find their ancestors who are siblings of common
166 * parent.
167 */
168
169 /* First walk up until both entities are at same depth */
170 se_depth = depth_se(*se);
171 pse_depth = depth_se(*pse);
172
173 while (se_depth > pse_depth) {
174 se_depth--;
175 *se = parent_entity(*se);
176 }
177
178 while (pse_depth > se_depth) {
179 pse_depth--;
180 *pse = parent_entity(*pse);
181 }
182
183 while (!is_same_group(*se, *pse)) {
184 *se = parent_entity(*se);
185 *pse = parent_entity(*pse);
186 }
187}
188
146#else /* CONFIG_FAIR_GROUP_SCHED */ 189#else /* CONFIG_FAIR_GROUP_SCHED */
147 190
148static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 191static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -193,6 +236,11 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
193 return NULL; 236 return NULL;
194} 237}
195 238
239static inline void
240find_matching_se(struct sched_entity **se, struct sched_entity **pse)
241{
242}
243
196#endif /* CONFIG_FAIR_GROUP_SCHED */ 244#endif /* CONFIG_FAIR_GROUP_SCHED */
197 245
198 246
@@ -1244,13 +1292,42 @@ static unsigned long wakeup_gran(struct sched_entity *se)
1244 * More easily preempt - nice tasks, while not making it harder for 1292 * More easily preempt - nice tasks, while not making it harder for
1245 * + nice tasks. 1293 * + nice tasks.
1246 */ 1294 */
1247 if (sched_feat(ASYM_GRAN)) 1295 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
1248 gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); 1296 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
1249 1297
1250 return gran; 1298 return gran;
1251} 1299}
1252 1300
1253/* 1301/*
1302 * Should 'se' preempt 'curr'.
1303 *
1304 * |s1
1305 * |s2
1306 * |s3
1307 * g
1308 * |<--->|c
1309 *
1310 * w(c, s1) = -1
1311 * w(c, s2) = 0
1312 * w(c, s3) = 1
1313 *
1314 */
1315static int
1316wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1317{
1318 s64 gran, vdiff = curr->vruntime - se->vruntime;
1319
1320 if (vdiff <= 0)
1321 return -1;
1322
1323 gran = wakeup_gran(curr);
1324 if (vdiff > gran)
1325 return 1;
1326
1327 return 0;
1328}
1329
1330/*
1254 * Preempt the current task with a newly woken task if needed: 1331 * Preempt the current task with a newly woken task if needed:
1255 */ 1332 */
1256static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) 1333static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
@@ -1258,7 +1335,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1258 struct task_struct *curr = rq->curr; 1335 struct task_struct *curr = rq->curr;
1259 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1336 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1260 struct sched_entity *se = &curr->se, *pse = &p->se; 1337 struct sched_entity *se = &curr->se, *pse = &p->se;
1261 s64 delta_exec;
1262 1338
1263 if (unlikely(rt_prio(p->prio))) { 1339 if (unlikely(rt_prio(p->prio))) {
1264 update_rq_clock(rq); 1340 update_rq_clock(rq);
@@ -1296,9 +1372,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1296 return; 1372 return;
1297 } 1373 }
1298 1374
1299 delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; 1375 find_matching_se(&se, &pse);
1300 if (delta_exec > wakeup_gran(pse)) 1376
1301 resched_task(curr); 1377 while (se) {
1378 BUG_ON(!pse);
1379
1380 if (wakeup_preempt_entity(se, pse) == 1) {
1381 resched_task(curr);
1382 break;
1383 }
1384
1385 se = parent_entity(se);
1386 pse = parent_entity(pse);
1387 }
1302} 1388}
1303 1389
1304static struct task_struct *pick_next_task_fair(struct rq *rq) 1390static struct task_struct *pick_next_task_fair(struct rq *rq)