aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 327f91c63c99..4de56108c86f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1462,6 +1462,8 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1462 1462
1463 if (rq->nr_running) 1463 if (rq->nr_running)
1464 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1464 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1465 else
1466 rq->avg_load_per_task = 0;
1465 1467
1466 return rq->avg_load_per_task; 1468 return rq->avg_load_per_task;
1467} 1469}
@@ -5874,6 +5876,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5874 struct rq *rq = cpu_rq(cpu); 5876 struct rq *rq = cpu_rq(cpu);
5875 unsigned long flags; 5877 unsigned long flags;
5876 5878
5879 spin_lock_irqsave(&rq->lock, flags);
5880
5877 __sched_fork(idle); 5881 __sched_fork(idle);
5878 idle->se.exec_start = sched_clock(); 5882 idle->se.exec_start = sched_clock();
5879 5883
@@ -5881,7 +5885,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5881 idle->cpus_allowed = cpumask_of_cpu(cpu); 5885 idle->cpus_allowed = cpumask_of_cpu(cpu);
5882 __set_task_cpu(idle, cpu); 5886 __set_task_cpu(idle, cpu);
5883 5887
5884 spin_lock_irqsave(&rq->lock, flags);
5885 rq->curr = rq->idle = idle; 5888 rq->curr = rq->idle = idle;
5886#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5889#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5887 idle->oncpu = 1; 5890 idle->oncpu = 1;
@@ -7792,13 +7795,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7792 * 7795 *
7793 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7796 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7794 * ownership of it and will kfree it when done with it. If the caller 7797 * ownership of it and will kfree it when done with it. If the caller
7795 * failed the kmalloc call, then it can pass in doms_new == NULL, 7798 * failed the kmalloc call, then it can pass in doms_new == NULL &&
7796 * and partition_sched_domains() will fallback to the single partition 7799 * ndoms_new == 1, and partition_sched_domains() will fallback to
7797 * 'fallback_doms', it also forces the domains to be rebuilt. 7800 * the single partition 'fallback_doms', it also forces the domains
7801 * to be rebuilt.
7798 * 7802 *
7799 * If doms_new==NULL it will be replaced with cpu_online_map. 7803 * If doms_new == NULL it will be replaced with cpu_online_map.
7800 * ndoms_new==0 is a special case for destroying existing domains. 7804 * ndoms_new == 0 is a special case for destroying existing domains,
7801 * It will not create the default domain. 7805 * and it will not create the default domain.
7802 * 7806 *
7803 * Call with hotplug lock held 7807 * Call with hotplug lock held
7804 */ 7808 */