diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 404e2017c0cf..caf54e1eef6e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq) | |||
645 | #endif | 645 | #endif |
646 | } | 646 | } |
647 | 647 | ||
648 | #define rcu_dereference_check_sched_domain(p) \ | ||
649 | rcu_dereference_check((p), \ | ||
650 | rcu_read_lock_sched_held() || \ | ||
651 | lockdep_is_held(&sched_domains_mutex)) | ||
652 | |||
648 | /* | 653 | /* |
649 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | 654 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
650 | * See detach_destroy_domains: synchronize_sched for details. | 655 | * See detach_destroy_domains: synchronize_sched for details. |
@@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq) | |||
653 | * preempt-disabled sections. | 658 | * preempt-disabled sections. |
654 | */ | 659 | */ |
655 | #define for_each_domain(cpu, __sd) \ | 660 | #define for_each_domain(cpu, __sd) \ |
656 | for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) | 661 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
657 | 662 | ||
658 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 663 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
659 | #define this_rq() (&__get_cpu_var(runqueues)) | 664 | #define this_rq() (&__get_cpu_var(runqueues)) |
@@ -1550,7 +1555,7 @@ static unsigned long target_load(int cpu, int type) | |||
1550 | 1555 | ||
1551 | static struct sched_group *group_of(int cpu) | 1556 | static struct sched_group *group_of(int cpu) |
1552 | { | 1557 | { |
1553 | struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); | 1558 | struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd); |
1554 | 1559 | ||
1555 | if (!sd) | 1560 | if (!sd) |
1556 | return NULL; | 1561 | return NULL; |
@@ -2832,7 +2837,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2832 | */ | 2837 | */ |
2833 | prev_state = prev->state; | 2838 | prev_state = prev->state; |
2834 | finish_arch_switch(prev); | 2839 | finish_arch_switch(prev); |
2835 | perf_event_task_sched_in(current, cpu_of(rq)); | 2840 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
2841 | local_irq_disable(); | ||
2842 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2843 | perf_event_task_sched_in(current); | ||
2844 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
2845 | local_irq_enable(); | ||
2846 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2836 | finish_lock_switch(rq, prev); | 2847 | finish_lock_switch(rq, prev); |
2837 | 2848 | ||
2838 | fire_sched_in_preempt_notifiers(current); | 2849 | fire_sched_in_preempt_notifiers(current); |
@@ -4937,7 +4948,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
4937 | 4948 | ||
4938 | static inline int on_null_domain(int cpu) | 4949 | static inline int on_null_domain(int cpu) |
4939 | { | 4950 | { |
4940 | return !rcu_dereference(cpu_rq(cpu)->sd); | 4951 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); |
4941 | } | 4952 | } |
4942 | 4953 | ||
4943 | /* | 4954 | /* |
@@ -5358,7 +5369,7 @@ void scheduler_tick(void) | |||
5358 | curr->sched_class->task_tick(rq, curr, 0); | 5369 | curr->sched_class->task_tick(rq, curr, 0); |
5359 | raw_spin_unlock(&rq->lock); | 5370 | raw_spin_unlock(&rq->lock); |
5360 | 5371 | ||
5361 | perf_event_task_tick(curr, cpu); | 5372 | perf_event_task_tick(curr); |
5362 | 5373 | ||
5363 | #ifdef CONFIG_SMP | 5374 | #ifdef CONFIG_SMP |
5364 | rq->idle_at_tick = idle_cpu(cpu); | 5375 | rq->idle_at_tick = idle_cpu(cpu); |
@@ -5572,7 +5583,7 @@ need_resched_nonpreemptible: | |||
5572 | 5583 | ||
5573 | if (likely(prev != next)) { | 5584 | if (likely(prev != next)) { |
5574 | sched_info_switch(prev, next); | 5585 | sched_info_switch(prev, next); |
5575 | perf_event_task_sched_out(prev, next, cpu); | 5586 | perf_event_task_sched_out(prev, next); |
5576 | 5587 | ||
5577 | rq->nr_switches++; | 5588 | rq->nr_switches++; |
5578 | rq->curr = next; | 5589 | rq->curr = next; |