aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee1..6fa833ab2cb8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/cpumask.h>
25 26
26/* 27/*
27 * Targeted preemption latency for CPU-bound tasks: 28 * Targeted preemption latency for CPU-bound tasks:
@@ -2103,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2103 enum cpu_idle_type idle, int *all_pinned, 2104 enum cpu_idle_type idle, int *all_pinned,
2104 int *this_best_prio, struct cfs_rq *busiest_cfs_rq) 2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
2105{ 2106{
2106 int loops = 0, pulled = 0, pinned = 0; 2107 int loops = 0, pulled = 0;
2107 long rem_load_move = max_load_move; 2108 long rem_load_move = max_load_move;
2108 struct task_struct *p, *n; 2109 struct task_struct *p, *n;
2109 2110
2110 if (max_load_move == 0) 2111 if (max_load_move == 0)
2111 goto out; 2112 goto out;
2112 2113
2113 pinned = 1;
2114
2115 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { 2114 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2116 if (loops++ > sysctl_sched_nr_migrate) 2115 if (loops++ > sysctl_sched_nr_migrate)
2117 break; 2116 break;
2118 2117
2119 if ((p->se.load.weight >> 1) > rem_load_move || 2118 if ((p->se.load.weight >> 1) > rem_load_move ||
2120 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) 2119 !can_migrate_task(p, busiest, this_cpu, sd, idle,
2120 all_pinned))
2121 continue; 2121 continue;
2122 2122
2123 pull_task(busiest, p, this_rq, this_cpu); 2123 pull_task(busiest, p, this_rq, this_cpu);
@@ -2152,9 +2152,6 @@ out:
2152 */ 2152 */
2153 schedstat_add(sd, lb_gained[idle], pulled); 2153 schedstat_add(sd, lb_gained[idle], pulled);
2154 2154
2155 if (all_pinned)
2156 *all_pinned = pinned;
2157
2158 return max_load_move - rem_load_move; 2155 return max_load_move - rem_load_move;
2159} 2156}
2160 2157
@@ -3061,7 +3058,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3061 3058
3062 /* 3059 /*
3063 * if *imbalance is less than the average load per runnable task 3060 * if *imbalance is less than the average load per runnable task
3064 * there is no gaurantee that any tasks will be moved so we'll have 3061 * there is no guarantee that any tasks will be moved so we'll have
3065 * a think about bumping its value to force at least one task to be 3062 * a think about bumping its value to force at least one task to be
3066 * moved 3063 * moved
3067 */ 3064 */
@@ -3126,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3126 if (!sds.busiest || sds.busiest_nr_running == 0) 3123 if (!sds.busiest || sds.busiest_nr_running == 0)
3127 goto out_balanced; 3124 goto out_balanced;
3128 3125
3126 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3127
3129 /* 3128 /*
3130 * If the busiest group is imbalanced the below checks don't 3129 * If the busiest group is imbalanced the below checks don't
3131 * work because they assumes all things are equal, which typically 3130 * work because they assumes all things are equal, which typically
@@ -3150,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3150 * Don't pull any tasks if this group is already above the domain 3149 * Don't pull any tasks if this group is already above the domain
3151 * average load. 3150 * average load.
3152 */ 3151 */
3153 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3154 if (sds.this_load >= sds.avg_load) 3152 if (sds.this_load >= sds.avg_load)
3155 goto out_balanced; 3153 goto out_balanced;
3156 3154
@@ -3339,6 +3337,7 @@ redo:
3339 * still unbalanced. ld_moved simply stays zero, so it is 3337 * still unbalanced. ld_moved simply stays zero, so it is
3340 * correctly treated as an imbalance. 3338 * correctly treated as an imbalance.
3341 */ 3339 */
3340 all_pinned = 1;
3342 local_irq_save(flags); 3341 local_irq_save(flags);
3343 double_rq_lock(this_rq, busiest); 3342 double_rq_lock(this_rq, busiest);
3344 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3343 ld_moved = move_tasks(this_rq, this_cpu, busiest,
@@ -3819,6 +3818,17 @@ void select_nohz_load_balancer(int stop_tick)
3819 3818
3820static DEFINE_SPINLOCK(balancing); 3819static DEFINE_SPINLOCK(balancing);
3821 3820
3821static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3822
3823/*
3824 * Scale the max load_balance interval with the number of CPUs in the system.
3825 * This trades load-balance latency on larger machines for less cross talk.
3826 */
3827static void update_max_interval(void)
3828{
3829 max_load_balance_interval = HZ*num_online_cpus()/10;
3830}
3831
3822/* 3832/*
3823 * It checks each scheduling domain to see if it is due to be balanced, 3833 * It checks each scheduling domain to see if it is due to be balanced,
3824 * and initiates a balancing operation if so. 3834 * and initiates a balancing operation if so.
@@ -3848,10 +3858,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3848 3858
3849 /* scale ms to jiffies */ 3859 /* scale ms to jiffies */
3850 interval = msecs_to_jiffies(interval); 3860 interval = msecs_to_jiffies(interval);
3851 if (unlikely(!interval)) 3861 interval = clamp(interval, 1UL, max_load_balance_interval);
3852 interval = 1;
3853 if (interval > HZ*NR_CPUS/10)
3854 interval = HZ*NR_CPUS/10;
3855 3862
3856 need_serialize = sd->flags & SD_SERIALIZE; 3863 need_serialize = sd->flags & SD_SERIALIZE;
3857 3864