aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c69
1 files changed, 65 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index dfa18d55561d..3dab1ff83c4f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -642,13 +642,29 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
642 cfs_rq->curr = NULL; 642 cfs_rq->curr = NULL;
643} 643}
644 644
645static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 645static void
646entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
646{ 647{
647 /* 648 /*
648 * Update run-time statistics of the 'current'. 649 * Update run-time statistics of the 'current'.
649 */ 650 */
650 update_curr(cfs_rq); 651 update_curr(cfs_rq);
651 652
653#ifdef CONFIG_SCHED_HRTICK
654 /*
655 * queued ticks are scheduled to match the slice, so don't bother
656 * validating it and just reschedule.
657 */
658 if (queued)
659 return resched_task(rq_of(cfs_rq)->curr);
660 /*
661 * don't let the period tick interfere with the hrtick preemption
662 */
663 if (!sched_feat(DOUBLE_TICK) &&
664 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
665 return;
666#endif
667
652 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) 668 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
653 check_preempt_tick(cfs_rq, curr); 669 check_preempt_tick(cfs_rq, curr);
654} 670}
@@ -754,6 +770,43 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
754 770
755#endif /* CONFIG_FAIR_GROUP_SCHED */ 771#endif /* CONFIG_FAIR_GROUP_SCHED */
756 772
773#ifdef CONFIG_SCHED_HRTICK
774static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
775{
776 int requeue = rq->curr == p;
777 struct sched_entity *se = &p->se;
778 struct cfs_rq *cfs_rq = cfs_rq_of(se);
779
780 WARN_ON(task_rq(p) != rq);
781
782 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
783 u64 slice = sched_slice(cfs_rq, se);
784 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
785 s64 delta = slice - ran;
786
787 if (delta < 0) {
788 if (rq->curr == p)
789 resched_task(p);
790 return;
791 }
792
793 /*
794 * Don't schedule slices shorter than 10000ns, that just
795 * doesn't make sense. Rely on vruntime for fairness.
796 */
797 if (!requeue)
798 delta = max(10000LL, delta);
799
800 hrtick_start(rq, delta, requeue);
801 }
802}
803#else
804static inline void
805hrtick_start_fair(struct rq *rq, struct task_struct *p)
806{
807}
808#endif
809
757/* 810/*
758 * The enqueue_task method is called before nr_running is 811 * The enqueue_task method is called before nr_running is
759 * increased. Here we update the fair scheduling stats and 812 * increased. Here we update the fair scheduling stats and
@@ -782,6 +835,8 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
782 */ 835 */
783 if (incload) 836 if (incload)
784 inc_cpu_load(rq, topse->load.weight); 837 inc_cpu_load(rq, topse->load.weight);
838
839 hrtick_start_fair(rq, rq->curr);
785} 840}
786 841
787/* 842/*
@@ -814,6 +869,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
814 */ 869 */
815 if (decload) 870 if (decload)
816 dec_cpu_load(rq, topse->load.weight); 871 dec_cpu_load(rq, topse->load.weight);
872
873 hrtick_start_fair(rq, rq->curr);
817} 874}
818 875
819/* 876/*
@@ -1049,6 +1106,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1049 1106
1050static struct task_struct *pick_next_task_fair(struct rq *rq) 1107static struct task_struct *pick_next_task_fair(struct rq *rq)
1051{ 1108{
1109 struct task_struct *p;
1052 struct cfs_rq *cfs_rq = &rq->cfs; 1110 struct cfs_rq *cfs_rq = &rq->cfs;
1053 struct sched_entity *se; 1111 struct sched_entity *se;
1054 1112
@@ -1060,7 +1118,10 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1060 cfs_rq = group_cfs_rq(se); 1118 cfs_rq = group_cfs_rq(se);
1061 } while (cfs_rq); 1119 } while (cfs_rq);
1062 1120
1063 return task_of(se); 1121 p = task_of(se);
1122 hrtick_start_fair(rq, p);
1123
1124 return p;
1064} 1125}
1065 1126
1066/* 1127/*
@@ -1235,14 +1296,14 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1235/* 1296/*
1236 * scheduler tick hitting a task of our scheduling class: 1297 * scheduler tick hitting a task of our scheduling class:
1237 */ 1298 */
1238static void task_tick_fair(struct rq *rq, struct task_struct *curr) 1299static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1239{ 1300{
1240 struct cfs_rq *cfs_rq; 1301 struct cfs_rq *cfs_rq;
1241 struct sched_entity *se = &curr->se; 1302 struct sched_entity *se = &curr->se;
1242 1303
1243 for_each_sched_entity(se) { 1304 for_each_sched_entity(se) {
1244 cfs_rq = cfs_rq_of(se); 1305 cfs_rq = cfs_rq_of(se);
1245 entity_tick(cfs_rq, se); 1306 entity_tick(cfs_rq, se, queued);
1246 } 1307 }
1247} 1308}
1248 1309