aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_fair.c26
2 files changed, 23 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9fe473a190de..b533d6db78aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p)
1587 p->se.wait_start_fair = 0; 1587 p->se.wait_start_fair = 0;
1588 p->se.exec_start = 0; 1588 p->se.exec_start = 0;
1589 p->se.sum_exec_runtime = 0; 1589 p->se.sum_exec_runtime = 0;
1590 p->se.prev_sum_exec_runtime = 0;
1590 p->se.delta_exec = 0; 1591 p->se.delta_exec = 0;
1591 p->se.delta_fair_run = 0; 1592 p->se.delta_fair_run = 0;
1592 p->se.delta_fair_sleep = 0; 1593 p->se.delta_fair_sleep = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9f53d49f3aab..721fe7744874 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -668,7 +668,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
668/* 668/*
669 * Preempt the current task with a newly woken task if needed: 669 * Preempt the current task with a newly woken task if needed:
670 */ 670 */
671static void 671static int
672__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, 672__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
673 struct sched_entity *curr, unsigned long granularity) 673 struct sched_entity *curr, unsigned long granularity)
674{ 674{
@@ -679,8 +679,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
679 * preempt the current task unless the best task has 679 * preempt the current task unless the best task has
680 * a larger than sched_granularity fairness advantage: 680 * a larger than sched_granularity fairness advantage:
681 */ 681 */
682 if (__delta > niced_granularity(curr, granularity)) 682 if (__delta > niced_granularity(curr, granularity)) {
683 resched_task(rq_of(cfs_rq)->curr); 683 resched_task(rq_of(cfs_rq)->curr);
684 return 1;
685 }
686 return 0;
684} 687}
685 688
686static inline void 689static inline void
@@ -725,6 +728,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
725 728
726static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 729static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
727{ 730{
731 unsigned long gran, ideal_runtime, delta_exec;
728 struct sched_entity *next; 732 struct sched_entity *next;
729 733
730 /* 734 /*
@@ -741,8 +745,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
741 if (next == curr) 745 if (next == curr)
742 return; 746 return;
743 747
744 __check_preempt_curr_fair(cfs_rq, next, curr, 748 gran = sched_granularity(cfs_rq);
745 sched_granularity(cfs_rq)); 749 ideal_runtime = niced_granularity(curr,
750 max(sysctl_sched_latency / cfs_rq->nr_running,
751 (unsigned long)sysctl_sched_min_granularity));
752 /*
753 * If we executed more than what the latency constraint suggests,
754 * reduce the rescheduling granularity. This way the total latency
755 * of how much a task is not scheduled converges to
756 * sysctl_sched_latency:
757 */
758 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
759 if (delta_exec > ideal_runtime)
760 gran = 0;
761
762 if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
763 curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
746} 764}
747 765
748/************************************************** 766/**************************************************