aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-03 13:58:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-03 13:58:13 -0400
commitfcc1d2a9cea4ba97c9800e1de0748003bba07335 (patch)
treec27c56275048e33e61097202cabaa48ef49f1314 /kernel
parentbd463a06064c4bc8497f6aa6dfb4437be8f07a3b (diff)
parentb9403130a5350fca59a50ed11c198cb8c7e54119 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Fixes and two late cleanups" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cleanups: Add load balance cpumask pointer to 'struct lb_env' sched: Fix comment about PREEMPT_ACTIVE bit location sched: Fix minor code style issues sched: Use task_rq_unlock() in __sched_setscheduler() sched/numa: Add SD_PERFER_SIBLING to CPU domain
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/cpupri.c10
-rw-r--r--kernel/sched/fair.c29
3 files changed, 20 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d325c4b2dcbb..82ad284f823b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4340,9 +4340,7 @@ recheck:
4340 */ 4340 */
4341 if (unlikely(policy == p->policy && (!rt_policy(policy) || 4341 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4342 param->sched_priority == p->rt_priority))) { 4342 param->sched_priority == p->rt_priority))) {
4343 4343 task_rq_unlock(rq, p, &flags);
4344 __task_rq_unlock(rq);
4345 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4346 return 0; 4344 return 0;
4347 } 4345 }
4348 4346
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d72586fdf660..23aa789c53ee 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
65int cpupri_find(struct cpupri *cp, struct task_struct *p, 65int cpupri_find(struct cpupri *cp, struct task_struct *p,
66 struct cpumask *lowest_mask) 66 struct cpumask *lowest_mask)
67{ 67{
68 int idx = 0; 68 int idx = 0;
69 int task_pri = convert_prio(p->prio); 69 int task_pri = convert_prio(p->prio);
70 70
71 if (task_pri >= MAX_RT_PRIO) 71 if (task_pri >= MAX_RT_PRIO)
72 return 0; 72 return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
137 */ 137 */
138void cpupri_set(struct cpupri *cp, int cpu, int newpri) 138void cpupri_set(struct cpupri *cp, int cpu, int newpri)
139{ 139{
140 int *currpri = &cp->cpu_to_pri[cpu]; 140 int *currpri = &cp->cpu_to_pri[cpu];
141 int oldpri = *currpri; 141 int oldpri = *currpri;
142 int do_mb = 0; 142 int do_mb = 0;
143 143
144 newpri = convert_prio(newpri); 144 newpri = convert_prio(newpri);
145 145
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db64952..d0cc03b3e70b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@ struct lb_env {
3069 int new_dst_cpu; 3069 int new_dst_cpu;
3070 enum cpu_idle_type idle; 3070 enum cpu_idle_type idle;
3071 long imbalance; 3071 long imbalance;
3072 /* The set of CPUs under consideration for load-balancing */
3073 struct cpumask *cpus;
3074
3072 unsigned int flags; 3075 unsigned int flags;
3073 3076
3074 unsigned int loop; 3077 unsigned int loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3653 */ 3656 */
3654static inline void update_sg_lb_stats(struct lb_env *env, 3657static inline void update_sg_lb_stats(struct lb_env *env,
3655 struct sched_group *group, int load_idx, 3658 struct sched_group *group, int load_idx,
3656 int local_group, const struct cpumask *cpus, 3659 int local_group, int *balance, struct sg_lb_stats *sgs)
3657 int *balance, struct sg_lb_stats *sgs)
3658{ 3660{
3659 unsigned long nr_running, max_nr_running, min_nr_running; 3661 unsigned long nr_running, max_nr_running, min_nr_running;
3660 unsigned long load, max_cpu_load, min_cpu_load; 3662 unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3671 max_nr_running = 0; 3673 max_nr_running = 0;
3672 min_nr_running = ~0UL; 3674 min_nr_running = ~0UL;
3673 3675
3674 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3676 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
3675 struct rq *rq = cpu_rq(i); 3677 struct rq *rq = cpu_rq(i);
3676 3678
3677 nr_running = rq->nr_running; 3679 nr_running = rq->nr_running;
@@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3800 * @sds: variable to hold the statistics for this sched_domain. 3802 * @sds: variable to hold the statistics for this sched_domain.
3801 */ 3803 */
3802static inline void update_sd_lb_stats(struct lb_env *env, 3804static inline void update_sd_lb_stats(struct lb_env *env,
3803 const struct cpumask *cpus, 3805 int *balance, struct sd_lb_stats *sds)
3804 int *balance, struct sd_lb_stats *sds)
3805{ 3806{
3806 struct sched_domain *child = env->sd->child; 3807 struct sched_domain *child = env->sd->child;
3807 struct sched_group *sg = env->sd->groups; 3808 struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
3818 3819
3819 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); 3820 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
3820 memset(&sgs, 0, sizeof(sgs)); 3821 memset(&sgs, 0, sizeof(sgs));
3821 update_sg_lb_stats(env, sg, load_idx, local_group, 3822 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
3822 cpus, balance, &sgs);
3823 3823
3824 if (local_group && !(*balance)) 3824 if (local_group && !(*balance))
3825 return; 3825 return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4055 * to restore balance. 4055 * to restore balance.
4056 * 4056 *
4057 * @env: The load balancing environment. 4057 * @env: The load balancing environment.
4058 * @cpus: The set of CPUs under consideration for load-balancing.
4059 * @balance: Pointer to a variable indicating if this_cpu 4058 * @balance: Pointer to a variable indicating if this_cpu
4060 * is the appropriate cpu to perform load balancing at this_level. 4059 * is the appropriate cpu to perform load balancing at this_level.
4061 * 4060 *
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4065 * put to idle by rebalancing its tasks onto our group. 4064 * put to idle by rebalancing its tasks onto our group.
4066 */ 4065 */
4067static struct sched_group * 4066static struct sched_group *
4068find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) 4067find_busiest_group(struct lb_env *env, int *balance)
4069{ 4068{
4070 struct sd_lb_stats sds; 4069 struct sd_lb_stats sds;
4071 4070
@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
4075 * Compute the various statistics relavent for load balancing at 4074 * Compute the various statistics relavent for load balancing at
4076 * this level. 4075 * this level.
4077 */ 4076 */
4078 update_sd_lb_stats(env, cpus, balance, &sds); 4077 update_sd_lb_stats(env, balance, &sds);
4079 4078
4080 /* 4079 /*
4081 * this_cpu is not the appropriate cpu to perform load balancing at 4080 * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ ret:
4155 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4154 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4156 */ 4155 */
4157static struct rq *find_busiest_queue(struct lb_env *env, 4156static struct rq *find_busiest_queue(struct lb_env *env,
4158 struct sched_group *group, 4157 struct sched_group *group)
4159 const struct cpumask *cpus)
4160{ 4158{
4161 struct rq *busiest = NULL, *rq; 4159 struct rq *busiest = NULL, *rq;
4162 unsigned long max_load = 0; 4160 unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
4171 if (!capacity) 4169 if (!capacity)
4172 capacity = fix_small_capacity(env->sd, group); 4170 capacity = fix_small_capacity(env->sd, group);
4173 4171
4174 if (!cpumask_test_cpu(i, cpus)) 4172 if (!cpumask_test_cpu(i, env->cpus))
4175 continue; 4173 continue;
4176 4174
4177 rq = cpu_rq(i); 4175 rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4252 .dst_grpmask = sched_group_cpus(sd->groups), 4250 .dst_grpmask = sched_group_cpus(sd->groups),
4253 .idle = idle, 4251 .idle = idle,
4254 .loop_break = sched_nr_migrate_break, 4252 .loop_break = sched_nr_migrate_break,
4253 .cpus = cpus,
4255 }; 4254 };
4256 4255
4257 cpumask_copy(cpus, cpu_active_mask); 4256 cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4260 schedstat_inc(sd, lb_count[idle]); 4259 schedstat_inc(sd, lb_count[idle]);
4261 4260
4262redo: 4261redo:
4263 group = find_busiest_group(&env, cpus, balance); 4262 group = find_busiest_group(&env, balance);
4264 4263
4265 if (*balance == 0) 4264 if (*balance == 0)
4266 goto out_balanced; 4265 goto out_balanced;
@@ -4270,7 +4269,7 @@ redo:
4270 goto out_balanced; 4269 goto out_balanced;
4271 } 4270 }
4272 4271
4273 busiest = find_busiest_queue(&env, group, cpus); 4272 busiest = find_busiest_queue(&env, group);
4274 if (!busiest) { 4273 if (!busiest) {
4275 schedstat_inc(sd, lb_nobusyq[idle]); 4274 schedstat_inc(sd, lb_nobusyq[idle]);
4276 goto out_balanced; 4275 goto out_balanced;