aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3a8fb30a91b1..3e71ebb101c2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2794,7 +2794,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2794 */ 2794 */
2795 prev_state = prev->state; 2795 prev_state = prev->state;
2796 finish_arch_switch(prev); 2796 finish_arch_switch(prev);
2797 perf_event_task_sched_in(current, cpu_of(rq)); 2797#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2798 local_irq_disable();
2799#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2800 perf_event_task_sched_in(current);
2801#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2802 local_irq_enable();
2803#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2798 finish_lock_switch(rq, prev); 2804 finish_lock_switch(rq, prev);
2799 2805
2800 fire_sched_in_preempt_notifiers(current); 2806 fire_sched_in_preempt_notifiers(current);
@@ -5309,7 +5315,7 @@ void scheduler_tick(void)
5309 curr->sched_class->task_tick(rq, curr, 0); 5315 curr->sched_class->task_tick(rq, curr, 0);
5310 raw_spin_unlock(&rq->lock); 5316 raw_spin_unlock(&rq->lock);
5311 5317
5312 perf_event_task_tick(curr, cpu); 5318 perf_event_task_tick(curr);
5313 5319
5314#ifdef CONFIG_SMP 5320#ifdef CONFIG_SMP
5315 rq->idle_at_tick = idle_cpu(cpu); 5321 rq->idle_at_tick = idle_cpu(cpu);
@@ -5523,7 +5529,7 @@ need_resched_nonpreemptible:
5523 5529
5524 if (likely(prev != next)) { 5530 if (likely(prev != next)) {
5525 sched_info_switch(prev, next); 5531 sched_info_switch(prev, next);
5526 perf_event_task_sched_out(prev, next, cpu); 5532 perf_event_task_sched_out(prev, next);
5527 5533
5528 rq->nr_switches++; 5534 rq->nr_switches++;
5529 rq->curr = next; 5535 rq->curr = next;