aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-09-18 08:01:26 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-18 08:01:39 -0400
commitbfa322c48dc69bfdaee10faf3bd8dbc23b39a21c (patch)
tree95360c5d253115003080264d878f3c0f907f2978 /kernel/sched.c
parent88ebc08ea9f721d1345d5414288a308ea42ac458 (diff)
parent003f6c9df54970d8b19578d195b3e2b398cdbde2 (diff)
Merge branch 'linus' into sched/core
Merge reason: We are queueing up a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6baade0d7649..6b0ae522f927 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3206,7 +3206,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3206#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3206#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3207 local_irq_disable(); 3207 local_irq_disable();
3208#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3208#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3209 perf_event_task_sched_in(current); 3209 perf_event_task_sched_in(prev, current);
3210#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3210#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3211 local_irq_enable(); 3211 local_irq_enable();
3212#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3212#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4420,9 +4420,9 @@ pick_next_task(struct rq *rq)
4420} 4420}
4421 4421
4422/* 4422/*
4423 * schedule() is the main scheduler function. 4423 * __schedule() is the main scheduler function.
4424 */ 4424 */
4425asmlinkage void __sched schedule(void) 4425static void __sched __schedule(void)
4426{ 4426{
4427 struct task_struct *prev, *next; 4427 struct task_struct *prev, *next;
4428 unsigned long *switch_count; 4428 unsigned long *switch_count;
@@ -4463,16 +4463,6 @@ need_resched:
4463 if (to_wakeup) 4463 if (to_wakeup)
4464 try_to_wake_up_local(to_wakeup); 4464 try_to_wake_up_local(to_wakeup);
4465 } 4465 }
4466
4467 /*
4468 * If we are going to sleep and we have plugged IO
4469 * queued, make sure to submit it to avoid deadlocks.
4470 */
4471 if (blk_needs_flush_plug(prev)) {
4472 raw_spin_unlock(&rq->lock);
4473 blk_schedule_flush_plug(prev);
4474 raw_spin_lock(&rq->lock);
4475 }
4476 } 4466 }
4477 switch_count = &prev->nvcsw; 4467 switch_count = &prev->nvcsw;
4478 } 4468 }
@@ -4510,6 +4500,26 @@ need_resched:
4510 if (need_resched()) 4500 if (need_resched())
4511 goto need_resched; 4501 goto need_resched;
4512} 4502}
4503
4504static inline void sched_submit_work(struct task_struct *tsk)
4505{
4506 if (!tsk->state)
4507 return;
4508 /*
4509 * If we are going to sleep and we have plugged IO queued,
4510 * make sure to submit it to avoid deadlocks.
4511 */
4512 if (blk_needs_flush_plug(tsk))
4513 blk_schedule_flush_plug(tsk);
4514}
4515
4516asmlinkage void schedule(void)
4517{
4518 struct task_struct *tsk = current;
4519
4520 sched_submit_work(tsk);
4521 __schedule();
4522}
4513EXPORT_SYMBOL(schedule); 4523EXPORT_SYMBOL(schedule);
4514 4524
4515#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4525#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4576,7 +4586,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
4576 4586
4577 do { 4587 do {
4578 add_preempt_count_notrace(PREEMPT_ACTIVE); 4588 add_preempt_count_notrace(PREEMPT_ACTIVE);
4579 schedule(); 4589 __schedule();
4580 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4590 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4581 4591
4582 /* 4592 /*
@@ -4604,7 +4614,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4604 do { 4614 do {
4605 add_preempt_count(PREEMPT_ACTIVE); 4615 add_preempt_count(PREEMPT_ACTIVE);
4606 local_irq_enable(); 4616 local_irq_enable();
4607 schedule(); 4617 __schedule();
4608 local_irq_disable(); 4618 local_irq_disable();
4609 sub_preempt_count(PREEMPT_ACTIVE); 4619 sub_preempt_count(PREEMPT_ACTIVE);
4610 4620
@@ -5729,7 +5739,7 @@ static inline int should_resched(void)
5729static void __cond_resched(void) 5739static void __cond_resched(void)
5730{ 5740{
5731 add_preempt_count(PREEMPT_ACTIVE); 5741 add_preempt_count(PREEMPT_ACTIVE);
5732 schedule(); 5742 __schedule();
5733 sub_preempt_count(PREEMPT_ACTIVE); 5743 sub_preempt_count(PREEMPT_ACTIVE);
5734} 5744}
5735 5745
@@ -7611,6 +7621,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
7611 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7621 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7612 if (sd && (sd->flags & SD_OVERLAP)) 7622 if (sd && (sd->flags & SD_OVERLAP))
7613 free_sched_groups(sd->groups, 0); 7623 free_sched_groups(sd->groups, 0);
7624 kfree(*per_cpu_ptr(sdd->sd, j));
7614 kfree(*per_cpu_ptr(sdd->sg, j)); 7625 kfree(*per_cpu_ptr(sdd->sg, j));
7615 kfree(*per_cpu_ptr(sdd->sgp, j)); 7626 kfree(*per_cpu_ptr(sdd->sgp, j));
7616 } 7627 }