aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3218f5213717..9d163f83e5c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2799,7 +2799,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2799 */ 2799 */
2800 prev_state = prev->state; 2800 prev_state = prev->state;
2801 finish_arch_switch(prev); 2801 finish_arch_switch(prev);
2802 perf_event_task_sched_in(current, cpu_of(rq)); 2802#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2803 local_irq_disable();
2804#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2805 perf_event_task_sched_in(current);
2806#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2807 local_irq_enable();
2808#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2803 finish_lock_switch(rq, prev); 2809 finish_lock_switch(rq, prev);
2804 2810
2805 fire_sched_in_preempt_notifiers(current); 2811 fire_sched_in_preempt_notifiers(current);
@@ -5314,7 +5320,7 @@ void scheduler_tick(void)
5314 curr->sched_class->task_tick(rq, curr, 0); 5320 curr->sched_class->task_tick(rq, curr, 0);
5315 raw_spin_unlock(&rq->lock); 5321 raw_spin_unlock(&rq->lock);
5316 5322
5317 perf_event_task_tick(curr, cpu); 5323 perf_event_task_tick(curr);
5318 5324
5319#ifdef CONFIG_SMP 5325#ifdef CONFIG_SMP
5320 rq->idle_at_tick = idle_cpu(cpu); 5326 rq->idle_at_tick = idle_cpu(cpu);
@@ -5528,7 +5534,7 @@ need_resched_nonpreemptible:
5528 5534
5529 if (likely(prev != next)) { 5535 if (likely(prev != next)) {
5530 sched_info_switch(prev, next); 5536 sched_info_switch(prev, next);
5531 perf_event_task_sched_out(prev, next, cpu); 5537 perf_event_task_sched_out(prev, next);
5532 5538
5533 rq->nr_switches++; 5539 rq->nr_switches++;
5534 rq->curr = next; 5540 rq->curr = next;