diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 39 |
1 files changed, 7 insertions, 32 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 89fa32b4edf2..c863663d204d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -682,6 +682,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
682 | * Update run-time statistics of the 'current'. | 682 | * Update run-time statistics of the 'current'. |
683 | */ | 683 | */ |
684 | update_curr(cfs_rq); | 684 | update_curr(cfs_rq); |
685 | account_entity_enqueue(cfs_rq, se); | ||
685 | 686 | ||
686 | if (wakeup) { | 687 | if (wakeup) { |
687 | place_entity(cfs_rq, se, 0); | 688 | place_entity(cfs_rq, se, 0); |
@@ -692,7 +693,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
692 | check_spread(cfs_rq, se); | 693 | check_spread(cfs_rq, se); |
693 | if (se != cfs_rq->curr) | 694 | if (se != cfs_rq->curr) |
694 | __enqueue_entity(cfs_rq, se); | 695 | __enqueue_entity(cfs_rq, se); |
695 | account_entity_enqueue(cfs_rq, se); | ||
696 | } | 696 | } |
697 | 697 | ||
698 | static void update_avg(u64 *avg, u64 sample) | 698 | static void update_avg(u64 *avg, u64 sample) |
@@ -841,8 +841,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
841 | * queued ticks are scheduled to match the slice, so don't bother | 841 | * queued ticks are scheduled to match the slice, so don't bother |
842 | * validating it and just reschedule. | 842 | * validating it and just reschedule. |
843 | */ | 843 | */ |
844 | if (queued) | 844 | if (queued) { |
845 | return resched_task(rq_of(cfs_rq)->curr); | 845 | resched_task(rq_of(cfs_rq)->curr); |
846 | return; | ||
847 | } | ||
846 | /* | 848 | /* |
847 | * don't let the period tick interfere with the hrtick preemption | 849 | * don't let the period tick interfere with the hrtick preemption |
848 | */ | 850 | */ |
@@ -957,7 +959,7 @@ static void yield_task_fair(struct rq *rq) | |||
957 | return; | 959 | return; |
958 | 960 | ||
959 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { | 961 | if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { |
960 | __update_rq_clock(rq); | 962 | update_rq_clock(rq); |
961 | /* | 963 | /* |
962 | * Update run-time statistics of the 'current'. | 964 | * Update run-time statistics of the 'current'. |
963 | */ | 965 | */ |
@@ -1007,7 +1009,7 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1007 | * sibling runqueue info. This will avoid the checks and cache miss | 1009 | * sibling runqueue info. This will avoid the checks and cache miss |
1008 | * penalities associated with that. | 1010 | * penalities associated with that. |
1009 | */ | 1011 | */ |
1010 | if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) | 1012 | if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1) |
1011 | return cpu; | 1013 | return cpu; |
1012 | 1014 | ||
1013 | for_each_domain(cpu, sd) { | 1015 | for_each_domain(cpu, sd) { |
@@ -1611,30 +1613,6 @@ static const struct sched_class fair_sched_class = { | |||
1611 | }; | 1613 | }; |
1612 | 1614 | ||
1613 | #ifdef CONFIG_SCHED_DEBUG | 1615 | #ifdef CONFIG_SCHED_DEBUG |
1614 | static void | ||
1615 | print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth) | ||
1616 | { | ||
1617 | struct sched_entity *se; | ||
1618 | |||
1619 | if (!cfs_rq) | ||
1620 | return; | ||
1621 | |||
1622 | list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) { | ||
1623 | int i; | ||
1624 | |||
1625 | for (i = depth; i; i--) | ||
1626 | seq_puts(m, " "); | ||
1627 | |||
1628 | seq_printf(m, "%lu %s %lu\n", | ||
1629 | se->load.weight, | ||
1630 | entity_is_task(se) ? "T" : "G", | ||
1631 | calc_delta_weight(SCHED_LOAD_SCALE, se) | ||
1632 | ); | ||
1633 | if (!entity_is_task(se)) | ||
1634 | print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1); | ||
1635 | } | ||
1636 | } | ||
1637 | |||
1638 | static void print_cfs_stats(struct seq_file *m, int cpu) | 1616 | static void print_cfs_stats(struct seq_file *m, int cpu) |
1639 | { | 1617 | { |
1640 | struct cfs_rq *cfs_rq; | 1618 | struct cfs_rq *cfs_rq; |
@@ -1642,9 +1620,6 @@ static void print_cfs_stats(struct seq_file *m, int cpu) | |||
1642 | rcu_read_lock(); | 1620 | rcu_read_lock(); |
1643 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | 1621 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) |
1644 | print_cfs_rq(m, cpu, cfs_rq); | 1622 | print_cfs_rq(m, cpu, cfs_rq); |
1645 | |||
1646 | seq_printf(m, "\nWeight tree:\n"); | ||
1647 | print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1); | ||
1648 | rcu_read_unlock(); | 1623 | rcu_read_unlock(); |
1649 | } | 1624 | } |
1650 | #endif | 1625 | #endif |