aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0b914fc90a55..6a212c97f523 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -602,6 +602,11 @@ static inline int cpu_of(struct rq *rq)
602#endif 602#endif
603} 603}
604 604
605#define rcu_dereference_check_sched_domain(p) \
606 rcu_dereference_check((p), \
607 rcu_read_lock_sched_held() || \
608 lockdep_is_held(&sched_domains_mutex))
609
605/* 610/*
606 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 611 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
607 * See detach_destroy_domains: synchronize_sched for details. 612 * See detach_destroy_domains: synchronize_sched for details.
@@ -610,7 +615,7 @@ static inline int cpu_of(struct rq *rq)
610 * preempt-disabled sections. 615 * preempt-disabled sections.
611 */ 616 */
612#define for_each_domain(cpu, __sd) \ 617#define for_each_domain(cpu, __sd) \
613 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 618 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
614 619
615#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 620#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
616#define this_rq() (&__get_cpu_var(runqueues)) 621#define this_rq() (&__get_cpu_var(runqueues))
@@ -1481,7 +1486,7 @@ static unsigned long target_load(int cpu, int type)
1481 1486
1482static struct sched_group *group_of(int cpu) 1487static struct sched_group *group_of(int cpu)
1483{ 1488{
1484 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); 1489 struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
1485 1490
1486 if (!sd) 1491 if (!sd)
1487 return NULL; 1492 return NULL;
@@ -2798,7 +2803,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2798 */ 2803 */
2799 prev_state = prev->state; 2804 prev_state = prev->state;
2800 finish_arch_switch(prev); 2805 finish_arch_switch(prev);
2801 perf_event_task_sched_in(current, cpu_of(rq)); 2806#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2807 local_irq_disable();
2808#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2809 perf_event_task_sched_in(current);
2810#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2811 local_irq_enable();
2812#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2802 finish_lock_switch(rq, prev); 2813 finish_lock_switch(rq, prev);
2803 2814
2804 fire_sched_in_preempt_notifiers(current); 2815 fire_sched_in_preempt_notifiers(current);
@@ -3504,7 +3515,7 @@ void scheduler_tick(void)
3504 curr->sched_class->task_tick(rq, curr, 0); 3515 curr->sched_class->task_tick(rq, curr, 0);
3505 raw_spin_unlock(&rq->lock); 3516 raw_spin_unlock(&rq->lock);
3506 3517
3507 perf_event_task_tick(curr, cpu); 3518 perf_event_task_tick(curr);
3508 3519
3509#ifdef CONFIG_SMP 3520#ifdef CONFIG_SMP
3510 rq->idle_at_tick = idle_cpu(cpu); 3521 rq->idle_at_tick = idle_cpu(cpu);
@@ -3718,7 +3729,7 @@ need_resched_nonpreemptible:
3718 3729
3719 if (likely(prev != next)) { 3730 if (likely(prev != next)) {
3720 sched_info_switch(prev, next); 3731 sched_info_switch(prev, next);
3721 perf_event_task_sched_out(prev, next, cpu); 3732 perf_event_task_sched_out(prev, next);
3722 3733
3723 rq->nr_switches++; 3734 rq->nr_switches++;
3724 rq->curr = next; 3735 rq->curr = next;