diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf45..ec5f472bc5b9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3065 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3066 | local_irq_disable(); | 3066 | local_irq_disable(); |
| 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3067 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
| 3068 | perf_event_task_sched_in(current); | 3068 | perf_event_task_sched_in(prev, current); |
| 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 3069 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
| 3070 | local_irq_enable(); | 3070 | local_irq_enable(); |
| 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | 3071 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ |
| @@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq) | |||
| 4279 | } | 4279 | } |
| 4280 | 4280 | ||
| 4281 | /* | 4281 | /* |
| 4282 | * schedule() is the main scheduler function. | 4282 | * __schedule() is the main scheduler function. |
| 4283 | */ | 4283 | */ |
| 4284 | asmlinkage void __sched schedule(void) | 4284 | static void __sched __schedule(void) |
| 4285 | { | 4285 | { |
| 4286 | struct task_struct *prev, *next; | 4286 | struct task_struct *prev, *next; |
| 4287 | unsigned long *switch_count; | 4287 | unsigned long *switch_count; |
| @@ -4322,16 +4322,6 @@ need_resched: | |||
| 4322 | if (to_wakeup) | 4322 | if (to_wakeup) |
| 4323 | try_to_wake_up_local(to_wakeup); | 4323 | try_to_wake_up_local(to_wakeup); |
| 4324 | } | 4324 | } |
| 4325 | |||
| 4326 | /* | ||
| 4327 | * If we are going to sleep and we have plugged IO | ||
| 4328 | * queued, make sure to submit it to avoid deadlocks. | ||
| 4329 | */ | ||
| 4330 | if (blk_needs_flush_plug(prev)) { | ||
| 4331 | raw_spin_unlock(&rq->lock); | ||
| 4332 | blk_schedule_flush_plug(prev); | ||
| 4333 | raw_spin_lock(&rq->lock); | ||
| 4334 | } | ||
| 4335 | } | 4325 | } |
| 4336 | switch_count = &prev->nvcsw; | 4326 | switch_count = &prev->nvcsw; |
| 4337 | } | 4327 | } |
| @@ -4369,6 +4359,26 @@ need_resched: | |||
| 4369 | if (need_resched()) | 4359 | if (need_resched()) |
| 4370 | goto need_resched; | 4360 | goto need_resched; |
| 4371 | } | 4361 | } |
| 4362 | |||
| 4363 | static inline void sched_submit_work(struct task_struct *tsk) | ||
| 4364 | { | ||
| 4365 | if (!tsk->state) | ||
| 4366 | return; | ||
| 4367 | /* | ||
| 4368 | * If we are going to sleep and we have plugged IO queued, | ||
| 4369 | * make sure to submit it to avoid deadlocks. | ||
| 4370 | */ | ||
| 4371 | if (blk_needs_flush_plug(tsk)) | ||
| 4372 | blk_schedule_flush_plug(tsk); | ||
| 4373 | } | ||
| 4374 | |||
| 4375 | asmlinkage void schedule(void) | ||
| 4376 | { | ||
| 4377 | struct task_struct *tsk = current; | ||
| 4378 | |||
| 4379 | sched_submit_work(tsk); | ||
| 4380 | __schedule(); | ||
| 4381 | } | ||
| 4372 | EXPORT_SYMBOL(schedule); | 4382 | EXPORT_SYMBOL(schedule); |
| 4373 | 4383 | ||
| 4374 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 4384 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| @@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
| 4435 | 4445 | ||
| 4436 | do { | 4446 | do { |
| 4437 | add_preempt_count_notrace(PREEMPT_ACTIVE); | 4447 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
| 4438 | schedule(); | 4448 | __schedule(); |
| 4439 | sub_preempt_count_notrace(PREEMPT_ACTIVE); | 4449 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
| 4440 | 4450 | ||
| 4441 | /* | 4451 | /* |
| @@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
| 4463 | do { | 4473 | do { |
| 4464 | add_preempt_count(PREEMPT_ACTIVE); | 4474 | add_preempt_count(PREEMPT_ACTIVE); |
| 4465 | local_irq_enable(); | 4475 | local_irq_enable(); |
| 4466 | schedule(); | 4476 | __schedule(); |
| 4467 | local_irq_disable(); | 4477 | local_irq_disable(); |
| 4468 | sub_preempt_count(PREEMPT_ACTIVE); | 4478 | sub_preempt_count(PREEMPT_ACTIVE); |
| 4469 | 4479 | ||
| @@ -5588,7 +5598,7 @@ static inline int should_resched(void) | |||
| 5588 | static void __cond_resched(void) | 5598 | static void __cond_resched(void) |
| 5589 | { | 5599 | { |
| 5590 | add_preempt_count(PREEMPT_ACTIVE); | 5600 | add_preempt_count(PREEMPT_ACTIVE); |
| 5591 | schedule(); | 5601 | __schedule(); |
| 5592 | sub_preempt_count(PREEMPT_ACTIVE); | 5602 | sub_preempt_count(PREEMPT_ACTIVE); |
| 5593 | } | 5603 | } |
| 5594 | 5604 | ||
| @@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map) | |||
| 7443 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); | 7453 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); |
| 7444 | if (sd && (sd->flags & SD_OVERLAP)) | 7454 | if (sd && (sd->flags & SD_OVERLAP)) |
| 7445 | free_sched_groups(sd->groups, 0); | 7455 | free_sched_groups(sd->groups, 0); |
| 7456 | kfree(*per_cpu_ptr(sdd->sd, j)); | ||
| 7446 | kfree(*per_cpu_ptr(sdd->sg, j)); | 7457 | kfree(*per_cpu_ptr(sdd->sg, j)); |
| 7447 | kfree(*per_cpu_ptr(sdd->sgp, j)); | 7458 | kfree(*per_cpu_ptr(sdd->sgp, j)); |
| 7448 | } | 7459 | } |
