aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c50
1 files changed, 15 insertions, 35 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 89fa32b4edf2..e24ecd39c4b8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -662,10 +662,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
662 if (!initial) { 662 if (!initial) {
663 /* sleeps upto a single latency don't count. */ 663 /* sleeps upto a single latency don't count. */
664 if (sched_feat(NEW_FAIR_SLEEPERS)) { 664 if (sched_feat(NEW_FAIR_SLEEPERS)) {
665 unsigned long thresh = sysctl_sched_latency;
666
667 /*
668 * convert the sleeper threshold into virtual time
669 */
665 if (sched_feat(NORMALIZED_SLEEPER)) 670 if (sched_feat(NORMALIZED_SLEEPER))
666 vruntime -= calc_delta_weight(sysctl_sched_latency, se); 671 thresh = calc_delta_fair(thresh, se);
667 else 672
668 vruntime -= sysctl_sched_latency; 673 vruntime -= thresh;
669 } 674 }
670 675
671 /* ensure we never gain time by being placed backwards. */ 676 /* ensure we never gain time by being placed backwards. */
@@ -682,6 +687,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
682 * Update run-time statistics of the 'current'. 687 * Update run-time statistics of the 'current'.
683 */ 688 */
684 update_curr(cfs_rq); 689 update_curr(cfs_rq);
690 account_entity_enqueue(cfs_rq, se);
685 691
686 if (wakeup) { 692 if (wakeup) {
687 place_entity(cfs_rq, se, 0); 693 place_entity(cfs_rq, se, 0);
@@ -692,7 +698,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
692 check_spread(cfs_rq, se); 698 check_spread(cfs_rq, se);
693 if (se != cfs_rq->curr) 699 if (se != cfs_rq->curr)
694 __enqueue_entity(cfs_rq, se); 700 __enqueue_entity(cfs_rq, se);
695 account_entity_enqueue(cfs_rq, se);
696} 701}
697 702
698static void update_avg(u64 *avg, u64 sample) 703static void update_avg(u64 *avg, u64 sample)
@@ -841,8 +846,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
841 * queued ticks are scheduled to match the slice, so don't bother 846 * queued ticks are scheduled to match the slice, so don't bother
842 * validating it and just reschedule. 847 * validating it and just reschedule.
843 */ 848 */
844 if (queued) 849 if (queued) {
845 return resched_task(rq_of(cfs_rq)->curr); 850 resched_task(rq_of(cfs_rq)->curr);
851 return;
852 }
846 /* 853 /*
847 * don't let the period tick interfere with the hrtick preemption 854 * don't let the period tick interfere with the hrtick preemption
848 */ 855 */
@@ -957,7 +964,7 @@ static void yield_task_fair(struct rq *rq)
957 return; 964 return;
958 965
959 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { 966 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
960 __update_rq_clock(rq); 967 update_rq_clock(rq);
961 /* 968 /*
962 * Update run-time statistics of the 'current'. 969 * Update run-time statistics of the 'current'.
963 */ 970 */
@@ -1007,7 +1014,7 @@ static int wake_idle(int cpu, struct task_struct *p)
1007 * sibling runqueue info. This will avoid the checks and cache miss 1014 * sibling runqueue info. This will avoid the checks and cache miss
1008 * penalities associated with that. 1015 * penalities associated with that.
1009 */ 1016 */
1010 if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) 1017 if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
1011 return cpu; 1018 return cpu;
1012 1019
1013 for_each_domain(cpu, sd) { 1020 for_each_domain(cpu, sd) {
@@ -1611,30 +1618,6 @@ static const struct sched_class fair_sched_class = {
1611}; 1618};
1612 1619
1613#ifdef CONFIG_SCHED_DEBUG 1620#ifdef CONFIG_SCHED_DEBUG
1614static void
1615print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth)
1616{
1617 struct sched_entity *se;
1618
1619 if (!cfs_rq)
1620 return;
1621
1622 list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) {
1623 int i;
1624
1625 for (i = depth; i; i--)
1626 seq_puts(m, " ");
1627
1628 seq_printf(m, "%lu %s %lu\n",
1629 se->load.weight,
1630 entity_is_task(se) ? "T" : "G",
1631 calc_delta_weight(SCHED_LOAD_SCALE, se)
1632 );
1633 if (!entity_is_task(se))
1634 print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1);
1635 }
1636}
1637
1638static void print_cfs_stats(struct seq_file *m, int cpu) 1621static void print_cfs_stats(struct seq_file *m, int cpu)
1639{ 1622{
1640 struct cfs_rq *cfs_rq; 1623 struct cfs_rq *cfs_rq;
@@ -1642,9 +1625,6 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
1642 rcu_read_lock(); 1625 rcu_read_lock();
1643 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) 1626 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1644 print_cfs_rq(m, cpu, cfs_rq); 1627 print_cfs_rq(m, cpu, cfs_rq);
1645
1646 seq_printf(m, "\nWeight tree:\n");
1647 print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1);
1648 rcu_read_unlock(); 1628 rcu_read_unlock();
1649} 1629}
1650#endif 1630#endif