diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2a106b6b78b0..1b8f8c3aecc4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1453,9 +1453,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
| 1453 | static unsigned long cpu_avg_load_per_task(int cpu) | 1453 | static unsigned long cpu_avg_load_per_task(int cpu) |
| 1454 | { | 1454 | { |
| 1455 | struct rq *rq = cpu_rq(cpu); | 1455 | struct rq *rq = cpu_rq(cpu); |
| 1456 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
| 1456 | 1457 | ||
| 1457 | if (rq->nr_running) | 1458 | if (nr_running) |
| 1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1459 | rq->avg_load_per_task = rq->load.weight / nr_running; |
| 1460 | else | ||
| 1461 | rq->avg_load_per_task = 0; | ||
| 1459 | 1462 | ||
| 1460 | return rq->avg_load_per_task; | 1463 | return rq->avg_load_per_task; |
| 1461 | } | 1464 | } |
| @@ -5868,6 +5871,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5868 | struct rq *rq = cpu_rq(cpu); | 5871 | struct rq *rq = cpu_rq(cpu); |
| 5869 | unsigned long flags; | 5872 | unsigned long flags; |
| 5870 | 5873 | ||
| 5874 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5875 | |||
| 5871 | __sched_fork(idle); | 5876 | __sched_fork(idle); |
| 5872 | idle->se.exec_start = sched_clock(); | 5877 | idle->se.exec_start = sched_clock(); |
| 5873 | 5878 | ||
| @@ -5875,7 +5880,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5880 | idle->cpus_allowed = cpumask_of_cpu(cpu); |
| 5876 | __set_task_cpu(idle, cpu); | 5881 | __set_task_cpu(idle, cpu); |
| 5877 | 5882 | ||
| 5878 | spin_lock_irqsave(&rq->lock, flags); | ||
| 5879 | rq->curr = rq->idle = idle; | 5883 | rq->curr = rq->idle = idle; |
| 5880 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) | 5884 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
| 5881 | idle->oncpu = 1; | 5885 | idle->oncpu = 1; |
| @@ -7786,13 +7790,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7786 | * | 7790 | * |
| 7787 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7791 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
| 7788 | * ownership of it and will kfree it when done with it. If the caller | 7792 | * ownership of it and will kfree it when done with it. If the caller |
| 7789 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7793 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
| 7790 | * and partition_sched_domains() will fallback to the single partition | 7794 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
| 7791 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7795 | * the single partition 'fallback_doms', it also forces the domains |
| 7796 | * to be rebuilt. | ||
| 7792 | * | 7797 | * |
| 7793 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7798 | * If doms_new == NULL it will be replaced with cpu_online_map. |
| 7794 | * ndoms_new==0 is a special case for destroying existing domains. | 7799 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7795 | * It will not create the default domain. | 7800 | * and it will not create the default domain. |
| 7796 | * | 7801 | * |
| 7797 | * Call with hotplug lock held | 7802 | * Call with hotplug lock held |
| 7798 | */ | 7803 | */ |
