aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 11:00:04 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:04 -0400
commit4d78e7b656aa6440c337302fe065338ce840a64e (patch)
tree3013b8b4971ba9b6e3e250d36404c89d7a7c107f /kernel
parent6cb58195143b55d4c427d92f8425bec2b0d9c56c (diff)
sched: new task placement for vruntime
add proper new task placement for the vruntime based math too. ( note: introduces a swap() macro, but the swap token is too widely used in the kernel namespace for a generic version to be added without changing non-scheduler code - so this cleanup will be done separately. ) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7041dc697855..95487e3c8b06 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -203,6 +203,20 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
203 * Scheduling class statistics methods: 203 * Scheduling class statistics methods:
204 */ 204 */
205 205
206static u64 __sched_period(unsigned long nr_running)
207{
208 u64 period = sysctl_sched_latency;
209 unsigned long nr_latency =
210 sysctl_sched_latency / sysctl_sched_min_granularity;
211
212 if (unlikely(nr_running > nr_latency)) {
213 period *= nr_running;
214 do_div(period, nr_latency);
215 }
216
217 return period;
218}
219
206/* 220/*
207 * Calculate the preemption granularity needed to schedule every 221 * Calculate the preemption granularity needed to schedule every
208 * runnable task once per sysctl_sched_latency amount of time. 222 * runnable task once per sysctl_sched_latency amount of time.
@@ -1103,6 +1117,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1103 } 1117 }
1104} 1118}
1105 1119
1120#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1121
1106/* 1122/*
1107 * Share the fairness runtime between parent and child, thus the 1123 * Share the fairness runtime between parent and child, thus the
1108 * total amount of pressure for CPU stays equal - new tasks 1124 * total amount of pressure for CPU stays equal - new tasks
@@ -1118,14 +1134,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1118 sched_info_queued(p); 1134 sched_info_queued(p);
1119 1135
1120 update_curr(cfs_rq); 1136 update_curr(cfs_rq);
1137 se->vruntime = cfs_rq->min_vruntime;
1121 update_stats_enqueue(cfs_rq, se); 1138 update_stats_enqueue(cfs_rq, se);
1122 /* 1139
1123 * Child runs first: we let it run before the parent
1124 * until it reschedules once. We set up the key so that
1125 * it will preempt the parent:
1126 */
1127 se->fair_key = curr->fair_key -
1128 niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
1129 /* 1140 /*
1130 * The first wait is dominated by the child-runs-first logic, 1141 * The first wait is dominated by the child-runs-first logic,
1131 * so do not credit it with that waiting time yet: 1142 * so do not credit it with that waiting time yet:
@@ -1138,9 +1149,16 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1138 * -granularity/2, so initialize the task with that: 1149 * -granularity/2, so initialize the task with that:
1139 */ 1150 */
1140 if (sched_feat(START_DEBIT)) 1151 if (sched_feat(START_DEBIT))
1141 se->wait_runtime = -(sched_granularity(cfs_rq) / 2); 1152 se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2);
1153
1154 if (sysctl_sched_child_runs_first &&
1155 curr->vruntime < se->vruntime) {
1156
1157 dequeue_entity(cfs_rq, curr, 0);
1158 swap(curr->vruntime, se->vruntime);
1159 enqueue_entity(cfs_rq, curr, 0);
1160 }
1142 1161
1143 se->vruntime = cfs_rq->min_vruntime;
1144 update_stats_enqueue(cfs_rq, se); 1162 update_stats_enqueue(cfs_rq, se);
1145 __enqueue_entity(cfs_rq, se); 1163 __enqueue_entity(cfs_rq, se);
1146 resched_task(rq->curr); 1164 resched_task(rq->curr);