diff options
-rw-r--r-- | kernel/sched/fair.c | 47 | ||||
-rw-r--r-- | kernel/sched/sched.h | 7 |
2 files changed, 29 insertions, 25 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 280da893cd0f..40c758bbdd57 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2374,13 +2374,13 @@ static inline void __update_group_entity_contrib(struct sched_entity *se) | |||
2374 | se->avg.load_avg_contrib >>= NICE_0_SHIFT; | 2374 | se->avg.load_avg_contrib >>= NICE_0_SHIFT; |
2375 | } | 2375 | } |
2376 | } | 2376 | } |
2377 | #else | 2377 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
2378 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, | 2378 | static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, |
2379 | int force_update) {} | 2379 | int force_update) {} |
2380 | static inline void __update_tg_runnable_avg(struct sched_avg *sa, | 2380 | static inline void __update_tg_runnable_avg(struct sched_avg *sa, |
2381 | struct cfs_rq *cfs_rq) {} | 2381 | struct cfs_rq *cfs_rq) {} |
2382 | static inline void __update_group_entity_contrib(struct sched_entity *se) {} | 2382 | static inline void __update_group_entity_contrib(struct sched_entity *se) {} |
2383 | #endif | 2383 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
2384 | 2384 | ||
2385 | static inline void __update_task_entity_contrib(struct sched_entity *se) | 2385 | static inline void __update_task_entity_contrib(struct sched_entity *se) |
2386 | { | 2386 | { |
@@ -2571,6 +2571,8 @@ void idle_exit_fair(struct rq *this_rq) | |||
2571 | update_rq_runnable_avg(this_rq, 0); | 2571 | update_rq_runnable_avg(this_rq, 0); |
2572 | } | 2572 | } |
2573 | 2573 | ||
2574 | static int idle_balance(struct rq *this_rq); | ||
2575 | |||
2574 | #else /* CONFIG_SMP */ | 2576 | #else /* CONFIG_SMP */ |
2575 | 2577 | ||
2576 | static inline void update_entity_load_avg(struct sched_entity *se, | 2578 | static inline void update_entity_load_avg(struct sched_entity *se, |
@@ -2584,6 +2586,12 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, | |||
2584 | int sleep) {} | 2586 | int sleep) {} |
2585 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, | 2587 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, |
2586 | int force_update) {} | 2588 | int force_update) {} |
2589 | |||
2590 | static inline int idle_balance(struct rq *rq) | ||
2591 | { | ||
2592 | return 0; | ||
2593 | } | ||
2594 | |||
2587 | #endif /* CONFIG_SMP */ | 2595 | #endif /* CONFIG_SMP */ |
2588 | 2596 | ||
2589 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 2597 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
@@ -4677,7 +4685,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) | |||
4677 | struct sched_entity *se; | 4685 | struct sched_entity *se; |
4678 | struct task_struct *p; | 4686 | struct task_struct *p; |
4679 | 4687 | ||
4680 | again: __maybe_unused | 4688 | again: |
4681 | #ifdef CONFIG_FAIR_GROUP_SCHED | 4689 | #ifdef CONFIG_FAIR_GROUP_SCHED |
4682 | if (!cfs_rq->nr_running) | 4690 | if (!cfs_rq->nr_running) |
4683 | goto idle; | 4691 | goto idle; |
@@ -4775,18 +4783,8 @@ simple: | |||
4775 | return p; | 4783 | return p; |
4776 | 4784 | ||
4777 | idle: | 4785 | idle: |
4778 | #ifdef CONFIG_SMP | 4786 | if (idle_balance(rq)) /* drops rq->lock */ |
4779 | idle_enter_fair(rq); | ||
4780 | /* | ||
4781 | * We must set idle_stamp _before_ calling idle_balance(), such that we | ||
4782 | * measure the duration of idle_balance() as idle time. | ||
4783 | */ | ||
4784 | rq->idle_stamp = rq_clock(rq); | ||
4785 | if (idle_balance(rq)) { /* drops rq->lock */ | ||
4786 | rq->idle_stamp = 0; | ||
4787 | goto again; | 4787 | goto again; |
4788 | } | ||
4789 | #endif | ||
4790 | 4788 | ||
4791 | return NULL; | 4789 | return NULL; |
4792 | } | 4790 | } |
@@ -6634,7 +6632,7 @@ out: | |||
6634 | * idle_balance is called by schedule() if this_cpu is about to become | 6632 | * idle_balance is called by schedule() if this_cpu is about to become |
6635 | * idle. Attempts to pull tasks from other CPUs. | 6633 | * idle. Attempts to pull tasks from other CPUs. |
6636 | */ | 6634 | */ |
6637 | int idle_balance(struct rq *this_rq) | 6635 | static int idle_balance(struct rq *this_rq) |
6638 | { | 6636 | { |
6639 | struct sched_domain *sd; | 6637 | struct sched_domain *sd; |
6640 | int pulled_task = 0; | 6638 | int pulled_task = 0; |
@@ -6642,8 +6640,15 @@ int idle_balance(struct rq *this_rq) | |||
6642 | u64 curr_cost = 0; | 6640 | u64 curr_cost = 0; |
6643 | int this_cpu = this_rq->cpu; | 6641 | int this_cpu = this_rq->cpu; |
6644 | 6642 | ||
6643 | idle_enter_fair(this_rq); | ||
6644 | /* | ||
6645 | * We must set idle_stamp _before_ calling idle_balance(), such that we | ||
6646 | * measure the duration of idle_balance() as idle time. | ||
6647 | */ | ||
6648 | this_rq->idle_stamp = rq_clock(this_rq); | ||
6649 | |||
6645 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | 6650 | if (this_rq->avg_idle < sysctl_sched_migration_cost) |
6646 | return 0; | 6651 | goto out; |
6647 | 6652 | ||
6648 | /* | 6653 | /* |
6649 | * Drop the rq->lock, but keep IRQ/preempt disabled. | 6654 | * Drop the rq->lock, but keep IRQ/preempt disabled. |
@@ -6692,8 +6697,10 @@ int idle_balance(struct rq *this_rq) | |||
6692 | * While browsing the domains, we released the rq lock. | 6697 | * While browsing the domains, we released the rq lock. |
6693 | * A task could have be enqueued in the meantime | 6698 | * A task could have be enqueued in the meantime |
6694 | */ | 6699 | */ |
6695 | if (this_rq->nr_running && !pulled_task) | 6700 | if (this_rq->nr_running && !pulled_task) { |
6696 | return 1; | 6701 | pulled_task = 1; |
6702 | goto out; | ||
6703 | } | ||
6697 | 6704 | ||
6698 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | 6705 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { |
6699 | /* | 6706 | /* |
@@ -6706,6 +6713,10 @@ int idle_balance(struct rq *this_rq) | |||
6706 | if (curr_cost > this_rq->max_idle_balance_cost) | 6713 | if (curr_cost > this_rq->max_idle_balance_cost) |
6707 | this_rq->max_idle_balance_cost = curr_cost; | 6714 | this_rq->max_idle_balance_cost = curr_cost; |
6708 | 6715 | ||
6716 | out: | ||
6717 | if (pulled_task) | ||
6718 | this_rq->idle_stamp = 0; | ||
6719 | |||
6709 | return pulled_task; | 6720 | return pulled_task; |
6710 | } | 6721 | } |
6711 | 6722 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1bf34c257d3b..92018f9821e8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1163,17 +1163,10 @@ extern const struct sched_class idle_sched_class; | |||
1163 | extern void update_group_power(struct sched_domain *sd, int cpu); | 1163 | extern void update_group_power(struct sched_domain *sd, int cpu); |
1164 | 1164 | ||
1165 | extern void trigger_load_balance(struct rq *rq); | 1165 | extern void trigger_load_balance(struct rq *rq); |
1166 | extern int idle_balance(struct rq *this_rq); | ||
1167 | 1166 | ||
1168 | extern void idle_enter_fair(struct rq *this_rq); | 1167 | extern void idle_enter_fair(struct rq *this_rq); |
1169 | extern void idle_exit_fair(struct rq *this_rq); | 1168 | extern void idle_exit_fair(struct rq *this_rq); |
1170 | 1169 | ||
1171 | #else /* CONFIG_SMP */ | ||
1172 | |||
1173 | static inline void idle_balance(int cpu, struct rq *rq) | ||
1174 | { | ||
1175 | } | ||
1176 | |||
1177 | #endif | 1170 | #endif |
1178 | 1171 | ||
1179 | extern void sysrq_sched_debug_show(void); | 1172 | extern void sysrq_sched_debug_show(void); |