diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-11-26 09:03:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-26 09:05:21 -0500 |
commit | 22a867d81707b0a2720bb5f65255265b95d30526 (patch) | |
tree | 7ec19b155b50b13ae95244c2bfa16aea4920c4f6 /kernel/sched_fair.c | |
parent | 5bb6b1ea67a73f0665a41726dd7138977b992c6c (diff) | |
parent | 3561d43fd289f590fdae672e5eb831b8d5cf0bf6 (diff) |
Merge commit 'v2.6.37-rc3' into sched/core
Merge reason: Pick up latest fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 40 |
1 files changed, 31 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 82fd884b4e33..fdbdb5084c49 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1799,12 +1799,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1799 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1799 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1800 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 1800 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
1801 | 1801 | ||
1802 | if (unlikely(rt_prio(p->prio))) | ||
1803 | goto preempt; | ||
1804 | |||
1805 | if (unlikely(p->sched_class != &fair_sched_class)) | ||
1806 | return; | ||
1807 | |||
1808 | if (unlikely(se == pse)) | 1802 | if (unlikely(se == pse)) |
1809 | return; | 1803 | return; |
1810 | 1804 | ||
@@ -2226,13 +2220,16 @@ struct sd_lb_stats { | |||
2226 | unsigned long this_load_per_task; | 2220 | unsigned long this_load_per_task; |
2227 | unsigned long this_nr_running; | 2221 | unsigned long this_nr_running; |
2228 | unsigned long this_has_capacity; | 2222 | unsigned long this_has_capacity; |
2223 | unsigned int this_idle_cpus; | ||
2229 | 2224 | ||
2230 | /* Statistics of the busiest group */ | 2225 | /* Statistics of the busiest group */ |
2226 | unsigned int busiest_idle_cpus; | ||
2231 | unsigned long max_load; | 2227 | unsigned long max_load; |
2232 | unsigned long busiest_load_per_task; | 2228 | unsigned long busiest_load_per_task; |
2233 | unsigned long busiest_nr_running; | 2229 | unsigned long busiest_nr_running; |
2234 | unsigned long busiest_group_capacity; | 2230 | unsigned long busiest_group_capacity; |
2235 | unsigned long busiest_has_capacity; | 2231 | unsigned long busiest_has_capacity; |
2232 | unsigned int busiest_group_weight; | ||
2236 | 2233 | ||
2237 | int group_imb; /* Is there imbalance in this sd */ | 2234 | int group_imb; /* Is there imbalance in this sd */ |
2238 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2235 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
@@ -2254,6 +2251,8 @@ struct sg_lb_stats { | |||
2254 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | 2251 | unsigned long sum_nr_running; /* Nr tasks running in the group */ |
2255 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 2252 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
2256 | unsigned long group_capacity; | 2253 | unsigned long group_capacity; |
2254 | unsigned long idle_cpus; | ||
2255 | unsigned long group_weight; | ||
2257 | int group_imb; /* Is there an imbalance in the group ? */ | 2256 | int group_imb; /* Is there an imbalance in the group ? */ |
2258 | int group_has_capacity; /* Is there extra capacity in the group? */ | 2257 | int group_has_capacity; /* Is there extra capacity in the group? */ |
2259 | }; | 2258 | }; |
@@ -2622,7 +2621,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2622 | sgs->group_load += load; | 2621 | sgs->group_load += load; |
2623 | sgs->sum_nr_running += rq->nr_running; | 2622 | sgs->sum_nr_running += rq->nr_running; |
2624 | sgs->sum_weighted_load += weighted_cpuload(i); | 2623 | sgs->sum_weighted_load += weighted_cpuload(i); |
2625 | 2624 | if (idle_cpu(i)) | |
2625 | sgs->idle_cpus++; | ||
2626 | } | 2626 | } |
2627 | 2627 | ||
2628 | /* | 2628 | /* |
@@ -2660,6 +2660,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2660 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); | 2660 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); |
2661 | if (!sgs->group_capacity) | 2661 | if (!sgs->group_capacity) |
2662 | sgs->group_capacity = fix_small_capacity(sd, group); | 2662 | sgs->group_capacity = fix_small_capacity(sd, group); |
2663 | sgs->group_weight = group->group_weight; | ||
2663 | 2664 | ||
2664 | if (sgs->group_capacity > sgs->sum_nr_running) | 2665 | if (sgs->group_capacity > sgs->sum_nr_running) |
2665 | sgs->group_has_capacity = 1; | 2666 | sgs->group_has_capacity = 1; |
@@ -2767,13 +2768,16 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
2767 | sds->this_nr_running = sgs.sum_nr_running; | 2768 | sds->this_nr_running = sgs.sum_nr_running; |
2768 | sds->this_load_per_task = sgs.sum_weighted_load; | 2769 | sds->this_load_per_task = sgs.sum_weighted_load; |
2769 | sds->this_has_capacity = sgs.group_has_capacity; | 2770 | sds->this_has_capacity = sgs.group_has_capacity; |
2771 | sds->this_idle_cpus = sgs.idle_cpus; | ||
2770 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 2772 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { |
2771 | sds->max_load = sgs.avg_load; | 2773 | sds->max_load = sgs.avg_load; |
2772 | sds->busiest = sg; | 2774 | sds->busiest = sg; |
2773 | sds->busiest_nr_running = sgs.sum_nr_running; | 2775 | sds->busiest_nr_running = sgs.sum_nr_running; |
2776 | sds->busiest_idle_cpus = sgs.idle_cpus; | ||
2774 | sds->busiest_group_capacity = sgs.group_capacity; | 2777 | sds->busiest_group_capacity = sgs.group_capacity; |
2775 | sds->busiest_load_per_task = sgs.sum_weighted_load; | 2778 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
2776 | sds->busiest_has_capacity = sgs.group_has_capacity; | 2779 | sds->busiest_has_capacity = sgs.group_has_capacity; |
2780 | sds->busiest_group_weight = sgs.group_weight; | ||
2777 | sds->group_imb = sgs.group_imb; | 2781 | sds->group_imb = sgs.group_imb; |
2778 | } | 2782 | } |
2779 | 2783 | ||
@@ -3051,8 +3055,26 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3051 | if (sds.this_load >= sds.avg_load) | 3055 | if (sds.this_load >= sds.avg_load) |
3052 | goto out_balanced; | 3056 | goto out_balanced; |
3053 | 3057 | ||
3054 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 3058 | /* |
3055 | goto out_balanced; | 3059 | * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative. |
3060 | * And to check for busy balance use !idle_cpu instead of | ||
3061 | * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE | ||
3062 | * even when they are idle. | ||
3063 | */ | ||
3064 | if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) { | ||
3065 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | ||
3066 | goto out_balanced; | ||
3067 | } else { | ||
3068 | /* | ||
3069 | * This cpu is idle. If the busiest group load doesn't | ||
3070 | * have more tasks than the number of available cpu's and | ||
3071 | * there is no imbalance between this and busiest group | ||
3072 | * wrt to idle cpu's, it is balanced. | ||
3073 | */ | ||
3074 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | ||
3075 | sds.busiest_nr_running <= sds.busiest_group_weight) | ||
3076 | goto out_balanced; | ||
3077 | } | ||
3056 | 3078 | ||
3057 | force_balance: | 3079 | force_balance: |
3058 | /* Looks like there is an imbalance. Compute it */ | 3080 | /* Looks like there is an imbalance. Compute it */ |