diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9b1e79371c20..22c532a6f82c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -203,7 +203,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
203 | hrtimer_init(&rt_b->rt_period_timer, | 203 | hrtimer_init(&rt_b->rt_period_timer, |
204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 204 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
205 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 205 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
206 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
207 | } | 206 | } |
208 | 207 | ||
209 | static inline int rt_bandwidth_enabled(void) | 208 | static inline int rt_bandwidth_enabled(void) |
@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq) | |||
1139 | 1138 | ||
1140 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1139 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1141 | rq->hrtick_timer.function = hrtick; | 1140 | rq->hrtick_timer.function = hrtick; |
1142 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
1143 | } | 1141 | } |
1144 | #else /* CONFIG_SCHED_HRTICK */ | 1142 | #else /* CONFIG_SCHED_HRTICK */ |
1145 | static inline void hrtick_clear(struct rq *rq) | 1143 | static inline void hrtick_clear(struct rq *rq) |
@@ -1453,9 +1451,10 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | |||
1453 | static unsigned long cpu_avg_load_per_task(int cpu) | 1451 | static unsigned long cpu_avg_load_per_task(int cpu) |
1454 | { | 1452 | { |
1455 | struct rq *rq = cpu_rq(cpu); | 1453 | struct rq *rq = cpu_rq(cpu); |
1454 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
1456 | 1455 | ||
1457 | if (rq->nr_running) | 1456 | if (nr_running) |
1458 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | 1457 | rq->avg_load_per_task = rq->load.weight / nr_running; |
1459 | else | 1458 | else |
1460 | rq->avg_load_per_task = 0; | 1459 | rq->avg_load_per_task = 0; |
1461 | 1460 | ||
@@ -6586,7 +6585,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6586 | req = list_entry(rq->migration_queue.next, | 6585 | req = list_entry(rq->migration_queue.next, |
6587 | struct migration_req, list); | 6586 | struct migration_req, list); |
6588 | list_del_init(&req->list); | 6587 | list_del_init(&req->list); |
6588 | spin_unlock_irq(&rq->lock); | ||
6589 | complete(&req->done); | 6589 | complete(&req->done); |
6590 | spin_lock_irq(&rq->lock); | ||
6590 | } | 6591 | } |
6591 | spin_unlock_irq(&rq->lock); | 6592 | spin_unlock_irq(&rq->lock); |
6592 | break; | 6593 | break; |