diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 9 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a1f10984adb3..e29a97235f26 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2601,8 +2601,12 @@ void scheduler_ipi(void) | |||
2601 | struct rq *rq = this_rq(); | 2601 | struct rq *rq = this_rq(); |
2602 | struct task_struct *list = xchg(&rq->wake_list, NULL); | 2602 | struct task_struct *list = xchg(&rq->wake_list, NULL); |
2603 | 2603 | ||
2604 | if (!list) | 2604 | if (!list) { |
2605 | /* If we don't call irq_enter(), we need to trigger the IRQ | ||
2606 | * tracing manually. */ | ||
2607 | ft_irq_fired(); | ||
2605 | return; | 2608 | return; |
2609 | } | ||
2606 | 2610 | ||
2607 | /* | 2611 | /* |
2608 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since | 2612 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since |
@@ -4420,23 +4424,20 @@ litmus_need_resched_nonpreemptible: | |||
4420 | raw_spin_unlock_irq(&rq->lock); | 4424 | raw_spin_unlock_irq(&rq->lock); |
4421 | } | 4425 | } |
4422 | 4426 | ||
4427 | TS_SCHED2_START(prev); | ||
4423 | sched_trace_task_switch_to(current); | 4428 | sched_trace_task_switch_to(current); |
4424 | 4429 | ||
4425 | post_schedule(rq); | 4430 | post_schedule(rq); |
4426 | 4431 | ||
4427 | if (sched_state_validate_switch()) { | 4432 | if (sched_state_validate_switch()) { |
4428 | TRACE_CUR("cpu %d: have to redo scheduling decision!\n", cpu); | 4433 | TS_SCHED2_END(prev); |
4429 | goto litmus_need_resched_nonpreemptible; | 4434 | goto litmus_need_resched_nonpreemptible; |
4430 | } | 4435 | } |
4431 | else if (current->policy == SCHED_LITMUS) { | ||
4432 | TRACE_CUR("cpu %d: valid switch to rt task %s/%d.\n", cpu, current->comm, current->pid); | ||
4433 | } | ||
4434 | else { | ||
4435 | // TRACE_CUR("cpu %d: switch: %s/%d\n", cpu, current->comm, current->pid); | ||
4436 | } | ||
4437 | 4436 | ||
4438 | preempt_enable_no_resched(); | 4437 | preempt_enable_no_resched(); |
4439 | 4438 | ||
4439 | TS_SCHED2_END(prev); | ||
4440 | |||
4440 | if (need_resched()) | 4441 | if (need_resched()) |
4441 | goto need_resched; | 4442 | goto need_resched; |
4442 | 4443 | ||
@@ -4717,17 +4718,6 @@ void complete_all(struct completion *x) | |||
4717 | } | 4718 | } |
4718 | EXPORT_SYMBOL(complete_all); | 4719 | EXPORT_SYMBOL(complete_all); |
4719 | 4720 | ||
4720 | void complete_n(struct completion *x, int n) | ||
4721 | { | ||
4722 | unsigned long flags; | ||
4723 | |||
4724 | spin_lock_irqsave(&x->wait.lock, flags); | ||
4725 | x->done += n; | ||
4726 | __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL); | ||
4727 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
4728 | } | ||
4729 | EXPORT_SYMBOL(complete_n); | ||
4730 | |||
4731 | static inline long __sched | 4721 | static inline long __sched |
4732 | do_wait_for_common(struct completion *x, long timeout, int state) | 4722 | do_wait_for_common(struct completion *x, long timeout, int state) |
4733 | { | 4723 | { |