aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9b1e79371c20..3e70963120a0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1453,9 +1453,10 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1453static unsigned long cpu_avg_load_per_task(int cpu) 1453static unsigned long cpu_avg_load_per_task(int cpu)
1454{ 1454{
1455 struct rq *rq = cpu_rq(cpu); 1455 struct rq *rq = cpu_rq(cpu);
1456 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1456 1457
1457 if (rq->nr_running) 1458 if (nr_running)
1458 rq->avg_load_per_task = rq->load.weight / rq->nr_running; 1459 rq->avg_load_per_task = rq->load.weight / nr_running;
1459 else 1460 else
1460 rq->avg_load_per_task = 0; 1461 rq->avg_load_per_task = 0;
1461 1462
@@ -4202,7 +4203,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4202 4203
4203 if (p == rq->idle) { 4204 if (p == rq->idle) {
4204 p->stime = cputime_add(p->stime, steal); 4205 p->stime = cputime_add(p->stime, steal);
4205 account_group_system_time(p, steal);
4206 if (atomic_read(&rq->nr_iowait) > 0) 4206 if (atomic_read(&rq->nr_iowait) > 0)
4207 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4207 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4208 else 4208 else
@@ -4338,7 +4338,7 @@ void __kprobes sub_preempt_count(int val)
4338 /* 4338 /*
4339 * Underflow? 4339 * Underflow?
4340 */ 4340 */
4341 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4341 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
4342 return; 4342 return;
4343 /* 4343 /*
4344 * Is the spinlock portion underflowing? 4344 * Is the spinlock portion underflowing?
@@ -6586,7 +6586,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6586 req = list_entry(rq->migration_queue.next, 6586 req = list_entry(rq->migration_queue.next,
6587 struct migration_req, list); 6587 struct migration_req, list);
6588 list_del_init(&req->list); 6588 list_del_init(&req->list);
6589 spin_unlock_irq(&rq->lock);
6589 complete(&req->done); 6590 complete(&req->done);
6591 spin_lock_irq(&rq->lock);
6590 } 6592 }
6591 spin_unlock_irq(&rq->lock); 6593 spin_unlock_irq(&rq->lock);
6592 break; 6594 break;