aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2d01bbc2d04a..892616bf2c77 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
678 struct sched_entity *curr, unsigned long granularity) 678 struct sched_entity *curr, unsigned long granularity)
679{ 679{
680 s64 __delta = curr->fair_key - se->fair_key; 680 s64 __delta = curr->fair_key - se->fair_key;
681 unsigned long ideal_runtime, delta_exec;
682
683 /*
684 * ideal_runtime is compared against sum_exec_runtime, which is
685 * walltime, hence do not scale.
686 */
687 ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
688 (unsigned long)sysctl_sched_min_granularity);
689
690 /*
691 * If we executed more than what the latency constraint suggests,
692 * reduce the rescheduling granularity. This way the total latency
693 * of how much a task is not scheduled converges to
694 * sysctl_sched_latency:
695 */
696 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
697 if (delta_exec > ideal_runtime)
698 granularity = 0;
681 699
682 /* 700 /*
683 * Take scheduling granularity into account - do not 701 * Take scheduling granularity into account - do not
684 * preempt the current task unless the best task has 702 * preempt the current task unless the best task has
685 * a larger than sched_granularity fairness advantage: 703 * a larger than sched_granularity fairness advantage:
704 *
705 * scale granularity as key space is in fair_clock.
686 */ 706 */
687 if (__delta > niced_granularity(curr, granularity)) 707 if (__delta > niced_granularity(curr, granularity))
688 resched_task(rq_of(cfs_rq)->curr); 708 resched_task(rq_of(cfs_rq)->curr);
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
731 751
732static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 752static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
733{ 753{
734 unsigned long gran, ideal_runtime, delta_exec;
735 struct sched_entity *next; 754 struct sched_entity *next;
736 755
737 /* 756 /*
@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
748 if (next == curr) 767 if (next == curr)
749 return; 768 return;
750 769
751 gran = sched_granularity(cfs_rq); 770 __check_preempt_curr_fair(cfs_rq, next, curr,
752 ideal_runtime = niced_granularity(curr, 771 sched_granularity(cfs_rq));
753 max(sysctl_sched_latency / cfs_rq->nr_running,
754 (unsigned long)sysctl_sched_min_granularity));
755 /*
756 * If we executed more than what the latency constraint suggests,
757 * reduce the rescheduling granularity. This way the total latency
758 * of how much a task is not scheduled converges to
759 * sysctl_sched_latency:
760 */
761 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
762 if (delta_exec > ideal_runtime)
763 gran = 0;
764
765 __check_preempt_curr_fair(cfs_rq, next, curr, gran);
766} 772}
767 773
768/************************************************** 774/**************************************************