aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c35
1 files changed, 25 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 57c933ffbee1..e4bb1dd7b308 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -399,7 +399,7 @@ struct cfs_rq {
399 */ 399 */
400 struct sched_entity *curr, *next, *last; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned int nr_spread_over;
403 403
404#ifdef CONFIG_FAIR_GROUP_SCHED 404#ifdef CONFIG_FAIR_GROUP_SCHED
405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 405 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
@@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 } 969 }
970} 970}
971 971
972void task_rq_unlock_wait(struct task_struct *p)
973{
974 struct rq *rq = task_rq(p);
975
976 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
977 spin_unlock_wait(&rq->lock);
978}
979
972static void __task_rq_unlock(struct rq *rq) 980static void __task_rq_unlock(struct rq *rq)
973 __releases(rq->lock) 981 __releases(rq->lock)
974{ 982{
@@ -1445,9 +1453,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1445static unsigned long cpu_avg_load_per_task(int cpu) 1453static unsigned long cpu_avg_load_per_task(int cpu)
1446{ 1454{
1447 struct rq *rq = cpu_rq(cpu); 1455 struct rq *rq = cpu_rq(cpu);
1456 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1448 1457
1449 if (rq->nr_running) 1458 if (nr_running)
1450 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1459 rq->avg_load_per_task = rq->load.weight / nr_running;
1460 else
1461 rq->avg_load_per_task = 0;
1451 1462
1452 return rq->avg_load_per_task; 1463 return rq->avg_load_per_task;
1453} 1464}
@@ -5860,6 +5871,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5860 struct rq *rq = cpu_rq(cpu); 5871 struct rq *rq = cpu_rq(cpu);
5861 unsigned long flags; 5872 unsigned long flags;
5862 5873
5874 spin_lock_irqsave(&rq->lock, flags);
5875
5863 __sched_fork(idle); 5876 __sched_fork(idle);
5864 idle->se.exec_start = sched_clock(); 5877 idle->se.exec_start = sched_clock();
5865 5878
@@ -5867,7 +5880,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5867 idle->cpus_allowed = cpumask_of_cpu(cpu); 5880 idle->cpus_allowed = cpumask_of_cpu(cpu);
5868 __set_task_cpu(idle, cpu); 5881 __set_task_cpu(idle, cpu);
5869 5882
5870 spin_lock_irqsave(&rq->lock, flags);
5871 rq->curr = rq->idle = idle; 5883 rq->curr = rq->idle = idle;
5872#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5884#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5873 idle->oncpu = 1; 5885 idle->oncpu = 1;
@@ -6575,7 +6587,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6575 req = list_entry(rq->migration_queue.next, 6587 req = list_entry(rq->migration_queue.next,
6576 struct migration_req, list); 6588 struct migration_req, list);
6577 list_del_init(&req->list); 6589 list_del_init(&req->list);
6590 spin_unlock_irq(&rq->lock);
6578 complete(&req->done); 6591 complete(&req->done);
6592 spin_lock_irq(&rq->lock);
6579 } 6593 }
6580 spin_unlock_irq(&rq->lock); 6594 spin_unlock_irq(&rq->lock);
6581 break; 6595 break;
@@ -7778,13 +7792,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7778 * 7792 *
7779 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7793 * The passed in 'doms_new' should be kmalloc'd. This routine takes
7780 * ownership of it and will kfree it when done with it. If the caller 7794 * ownership of it and will kfree it when done with it. If the caller
7781 * failed the kmalloc call, then it can pass in doms_new == NULL, 7795 * failed the kmalloc call, then it can pass in doms_new == NULL &&
7782 * and partition_sched_domains() will fallback to the single partition 7796 * ndoms_new == 1, and partition_sched_domains() will fallback to
7783 * 'fallback_doms', it also forces the domains to be rebuilt. 7797 * the single partition 'fallback_doms', it also forces the domains
7798 * to be rebuilt.
7784 * 7799 *
7785 * If doms_new==NULL it will be replaced with cpu_online_map. 7800 * If doms_new == NULL it will be replaced with cpu_online_map.
7786 * ndoms_new==0 is a special case for destroying existing domains. 7801 * ndoms_new == 0 is a special case for destroying existing domains,
7787 * It will not create the default domain. 7802 * and it will not create the default domain.
7788 * 7803 *
7789 * Call with hotplug lock held 7804 * Call with hotplug lock held
7790 */ 7805 */