aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 23e353568d8e..ad37776cc39b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2354,17 +2354,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2354 if (rq != orig_rq) 2354 if (rq != orig_rq)
2355 update_rq_clock(rq); 2355 update_rq_clock(rq);
2356 2356
2357 if (rq->idle_stamp) {
2358 u64 delta = rq->clock - rq->idle_stamp;
2359 u64 max = 2*sysctl_sched_migration_cost;
2360
2361 if (delta > max)
2362 rq->avg_idle = max;
2363 else
2364 update_avg(&rq->avg_idle, delta);
2365 rq->idle_stamp = 0;
2366 }
2367
2368 WARN_ON(p->state != TASK_WAKING); 2357 WARN_ON(p->state != TASK_WAKING);
2369 cpu = task_cpu(p); 2358 cpu = task_cpu(p);
2370 2359
@@ -2421,6 +2410,17 @@ out_running:
2421#ifdef CONFIG_SMP 2410#ifdef CONFIG_SMP
2422 if (p->sched_class->task_wake_up) 2411 if (p->sched_class->task_wake_up)
2423 p->sched_class->task_wake_up(rq, p); 2412 p->sched_class->task_wake_up(rq, p);
2413
2414 if (unlikely(rq->idle_stamp)) {
2415 u64 delta = rq->clock - rq->idle_stamp;
2416 u64 max = 2*sysctl_sched_migration_cost;
2417
2418 if (delta > max)
2419 rq->avg_idle = max;
2420 else
2421 update_avg(&rq->avg_idle, delta);
2422 rq->idle_stamp = 0;
2423 }
2424#endif 2424#endif
2425out: 2425out:
2426 task_rq_unlock(rq, &flags); 2426 task_rq_unlock(rq, &flags);
@@ -4098,7 +4098,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4098 unsigned long flags; 4098 unsigned long flags;
4099 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4099 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4100 4100
4101 cpumask_setall(cpus); 4101 cpumask_copy(cpus, cpu_online_mask);
4102 4102
4103 /* 4103 /*
4104 * When power savings policy is enabled for the parent domain, idle 4104 * When power savings policy is enabled for the parent domain, idle
@@ -4261,7 +4261,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4261 int all_pinned = 0; 4261 int all_pinned = 0;
4262 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4262 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4263 4263
4264 cpumask_setall(cpus); 4264 cpumask_copy(cpus, cpu_online_mask);
4265 4265
4266 /* 4266 /*
4267 * When power savings policy is enabled for the parent domain, idle 4267 * When power savings policy is enabled for the parent domain, idle
@@ -9522,6 +9522,8 @@ void __init sched_init(void)
9522 rq->cpu = i; 9522 rq->cpu = i;
9523 rq->online = 0; 9523 rq->online = 0;
9524 rq->migration_thread = NULL; 9524 rq->migration_thread = NULL;
9525 rq->idle_stamp = 0;
9526 rq->avg_idle = 2*sysctl_sched_migration_cost;
9525 INIT_LIST_HEAD(&rq->migration_queue); 9527 INIT_LIST_HEAD(&rq->migration_queue);
9526 rq_attach_root(rq, &def_root_domain); 9528 rq_attach_root(rq, &def_root_domain);
9527#endif 9529#endif