aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c37
2 files changed, 37 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index eb1931eef587..52bbf1c842a8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1323 * slice expiry etc. 1323 * slice expiry etc.
1324 */ 1324 */
1325 1325
1326#define WEIGHT_IDLEPRIO 2 1326#define WEIGHT_IDLEPRIO 3
1327#define WMULT_IDLEPRIO (1 << 31) 1327#define WMULT_IDLEPRIO 1431655765
1328 1328
1329/* 1329/*
1330 * Nice levels are multiplicative, with a gentle 10% change for every 1330 * Nice levels are multiplicative, with a gentle 10% change for every
@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val)
4440 /* 4440 /*
4441 * Underflow? 4441 * Underflow?
4442 */ 4442 */
4443 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) 4443 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4444 return; 4444 return;
4445 /* 4445 /*
4446 * Is the spinlock portion underflowing? 4446 * Is the spinlock portion underflowing?
@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
9050 runtime = d->rt_runtime; 9050 runtime = d->rt_runtime;
9051 } 9051 }
9052 9052
9053#ifdef CONFIG_USER_SCHED
9054 if (tg == &root_task_group) {
9055 period = global_rt_period();
9056 runtime = global_rt_runtime();
9057 }
9058#endif
9059
9053 /* 9060 /*
9054 * Cannot have more runtime than the period. 9061 * Cannot have more runtime than the period.
9055 */ 9062 */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8e1352c75557..5cc1c162044f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
283 struct sched_entity, 283 struct sched_entity,
284 run_node); 284 run_node);
285 285
286 if (vruntime == cfs_rq->min_vruntime) 286 if (!cfs_rq->curr)
287 vruntime = se->vruntime; 287 vruntime = se->vruntime;
288 else 288 else
289 vruntime = min_vruntime(vruntime, se->vruntime); 289 vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
430 430
431 for_each_sched_entity(se) { 431 for_each_sched_entity(se) {
432 struct load_weight *load = &cfs_rq->load; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
433 436
434 if (unlikely(!se->on_rq)) { 437 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load; 438 struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
677 unsigned long thresh = sysctl_sched_latency; 680 unsigned long thresh = sysctl_sched_latency;
678 681
679 /* 682 /*
680 * convert the sleeper threshold into virtual time 683 * Convert the sleeper threshold into virtual time.
684 * SCHED_IDLE is a special sub-class. We care about
685 * fairness only relative to other SCHED_IDLE tasks,
686 * all of which have the same weight.
681 */ 687 */
682 if (sched_feat(NORMALIZED_SLEEPER)) 688 if (sched_feat(NORMALIZED_SLEEPER) &&
689 task_of(se)->policy != SCHED_IDLE)
683 thresh = calc_delta_fair(thresh, se); 690 thresh = calc_delta_fair(thresh, se);
684 691
685 vruntime -= thresh; 692 vruntime -= thresh;
@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1340 1347
1341static void set_last_buddy(struct sched_entity *se) 1348static void set_last_buddy(struct sched_entity *se)
1342{ 1349{
1343 for_each_sched_entity(se) 1350 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1344 cfs_rq_of(se)->last = se; 1351 for_each_sched_entity(se)
1352 cfs_rq_of(se)->last = se;
1353 }
1345} 1354}
1346 1355
1347static void set_next_buddy(struct sched_entity *se) 1356static void set_next_buddy(struct sched_entity *se)
1348{ 1357{
1349 for_each_sched_entity(se) 1358 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1350 cfs_rq_of(se)->next = se; 1359 for_each_sched_entity(se)
1360 cfs_rq_of(se)->next = se;
1361 }
1351} 1362}
1352 1363
1353/* 1364/*
@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1393 return; 1404 return;
1394 1405
1395 /* 1406 /*
1396 * Batch tasks do not preempt (their preemption is driven by 1407 * Batch and idle tasks do not preempt (their preemption is driven by
1397 * the tick): 1408 * the tick):
1398 */ 1409 */
1399 if (unlikely(p->policy == SCHED_BATCH)) 1410 if (unlikely(p->policy != SCHED_NORMAL))
1400 return; 1411 return;
1401 1412
1413 /* Idle tasks are by definition preempted by everybody. */
1414 if (unlikely(curr->policy == SCHED_IDLE)) {
1415 resched_task(curr);
1416 return;
1417 }
1418
1402 if (!sched_feat(WAKEUP_PREEMPT)) 1419 if (!sched_feat(WAKEUP_PREEMPT))
1403 return; 1420 return;
1404 1421