aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ebaf432365f6..a4c156d9a4a5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -399,7 +399,7 @@ struct cfs_rq {
399 */ 399 */
400 struct sched_entity *curr, *next, *last; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned int nr_spread_over;
403 403
404#ifdef CONFIG_FAIR_GROUP_SCHED 404#ifdef CONFIG_FAIR_GROUP_SCHED
405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
@@ -949,6 +949,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
949 } 949 }
950} 950}
951 951
952void task_rq_unlock_wait(struct task_struct *p)
953{
954 struct rq *rq = task_rq(p);
955
956 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
957 spin_unlock_wait(&rq->lock);
958}
959
952static void __task_rq_unlock(struct rq *rq) 960static void __task_rq_unlock(struct rq *rq)
953 __releases(rq->lock) 961 __releases(rq->lock)
954{ 962{
@@ -1428,6 +1436,8 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1428 1436
1429 if (rq->nr_running) 1437 if (rq->nr_running)
1430 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1438 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1439 else
1440 rq->avg_load_per_task = 0;
1431 1441
1432 return rq->avg_load_per_task; 1442 return rq->avg_load_per_task;
1433} 1443}
@@ -5840,6 +5850,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5840 struct rq *rq = cpu_rq(cpu); 5850 struct rq *rq = cpu_rq(cpu);
5841 unsigned long flags; 5851 unsigned long flags;
5842 5852
5853 spin_lock_irqsave(&rq->lock, flags);
5854
5843 __sched_fork(idle); 5855 __sched_fork(idle);
5844 idle->se.exec_start = sched_clock(); 5856 idle->se.exec_start = sched_clock();
5845 5857
@@ -5847,7 +5859,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5847 idle->cpus_allowed = cpumask_of_cpu(cpu); 5859 idle->cpus_allowed = cpumask_of_cpu(cpu);
5848 __set_task_cpu(idle, cpu); 5860 __set_task_cpu(idle, cpu);
5849 5861
5850 spin_lock_irqsave(&rq->lock, flags);
5851 rq->curr = rq->idle = idle; 5862 rq->curr = rq->idle = idle;
5852#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5863#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5853 idle->oncpu = 1; 5864 idle->oncpu = 1;
@@ -7740,13 +7751,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7740 * 7751 *
7741 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7752 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7742 * ownership of it and will kfree it when done with it. If the caller 7753 * ownership of it and will kfree it when done with it. If the caller
7743 * failed the kmalloc call, then it can pass in doms_new == NULL, 7754 * failed the kmalloc call, then it can pass in doms_new == NULL &&
7744 * and partition_sched_domains() will fallback to the single partition 7755 * ndoms_new == 1, and partition_sched_domains() will fallback to
7745 * 'fallback_doms', it also forces the domains to be rebuilt. 7756 * the single partition 'fallback_doms', it also forces the domains
7757 * to be rebuilt.
7746 * 7758 *
7747 * If doms_new==NULL it will be replaced with cpu_online_map. 7759 * If doms_new == NULL it will be replaced with cpu_online_map.
7748 * ndoms_new==0 is a special case for destroying existing domains. 7760 * ndoms_new == 0 is a special case for destroying existing domains,
7749 * It will not create the default domain. 7761 * and it will not create the default domain.
7750 * 7762 *
7751 * Call with hotplug lock held 7763 * Call with hotplug lock held
7752 */ 7764 */