diff options
-rw-r--r-- | kernel/sched_fair.c | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7041dc697855..95487e3c8b06 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -203,6 +203,20 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) | |||
203 | * Scheduling class statistics methods: | 203 | * Scheduling class statistics methods: |
204 | */ | 204 | */ |
205 | 205 | ||
206 | static u64 __sched_period(unsigned long nr_running) | ||
207 | { | ||
208 | u64 period = sysctl_sched_latency; | ||
209 | unsigned long nr_latency = | ||
210 | sysctl_sched_latency / sysctl_sched_min_granularity; | ||
211 | |||
212 | if (unlikely(nr_running > nr_latency)) { | ||
213 | period *= nr_running; | ||
214 | do_div(period, nr_latency); | ||
215 | } | ||
216 | |||
217 | return period; | ||
218 | } | ||
219 | |||
206 | /* | 220 | /* |
207 | * Calculate the preemption granularity needed to schedule every | 221 | * Calculate the preemption granularity needed to schedule every |
208 | * runnable task once per sysctl_sched_latency amount of time. | 222 | * runnable task once per sysctl_sched_latency amount of time. |
@@ -1103,6 +1117,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) | |||
1103 | } | 1117 | } |
1104 | } | 1118 | } |
1105 | 1119 | ||
1120 | #define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0) | ||
1121 | |||
1106 | /* | 1122 | /* |
1107 | * Share the fairness runtime between parent and child, thus the | 1123 | * Share the fairness runtime between parent and child, thus the |
1108 | * total amount of pressure for CPU stays equal - new tasks | 1124 | * total amount of pressure for CPU stays equal - new tasks |
@@ -1118,14 +1134,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1118 | sched_info_queued(p); | 1134 | sched_info_queued(p); |
1119 | 1135 | ||
1120 | update_curr(cfs_rq); | 1136 | update_curr(cfs_rq); |
1137 | se->vruntime = cfs_rq->min_vruntime; | ||
1121 | update_stats_enqueue(cfs_rq, se); | 1138 | update_stats_enqueue(cfs_rq, se); |
1122 | /* | 1139 | |
1123 | * Child runs first: we let it run before the parent | ||
1124 | * until it reschedules once. We set up the key so that | ||
1125 | * it will preempt the parent: | ||
1126 | */ | ||
1127 | se->fair_key = curr->fair_key - | ||
1128 | niced_granularity(curr, sched_granularity(cfs_rq)) - 1; | ||
1129 | /* | 1140 | /* |
1130 | * The first wait is dominated by the child-runs-first logic, | 1141 | * The first wait is dominated by the child-runs-first logic, |
1131 | * so do not credit it with that waiting time yet: | 1142 | * so do not credit it with that waiting time yet: |
@@ -1138,9 +1149,16 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1138 | * -granularity/2, so initialize the task with that: | 1149 | * -granularity/2, so initialize the task with that: |
1139 | */ | 1150 | */ |
1140 | if (sched_feat(START_DEBIT)) | 1151 | if (sched_feat(START_DEBIT)) |
1141 | se->wait_runtime = -(sched_granularity(cfs_rq) / 2); | 1152 | se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2); |
1153 | |||
1154 | if (sysctl_sched_child_runs_first && | ||
1155 | curr->vruntime < se->vruntime) { | ||
1156 | |||
1157 | dequeue_entity(cfs_rq, curr, 0); | ||
1158 | swap(curr->vruntime, se->vruntime); | ||
1159 | enqueue_entity(cfs_rq, curr, 0); | ||
1160 | } | ||
1142 | 1161 | ||
1143 | se->vruntime = cfs_rq->min_vruntime; | ||
1144 | update_stats_enqueue(cfs_rq, se); | 1162 | update_stats_enqueue(cfs_rq, se); |
1145 | __enqueue_entity(cfs_rq, se); | 1163 | __enqueue_entity(cfs_rq, se); |
1146 | resched_task(rq->curr); | 1164 | resched_task(rq->curr); |