diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-20 14:18:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-20 14:18:35 -0400 |
commit | a3d5c3460a86f52ea435b3fb98be112bd18faabc (patch) | |
tree | 48a32968b569af0e0f0af1def3effa0770710fea /kernel | |
parent | 86c76676cfdbf283f6131d5a2783bed3f3d490ea (diff) | |
parent | 29bb9e5a75684106a37593ad75ec75ff8312731b (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar:
"Two smaller fixes - plus a context tracking tracing fix that is a bit
bigger"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tracing/context-tracking: Add preempt_schedule_context() for tracing
sched: Fix clear NOHZ_BALANCE_KICK
sched/x86: Construct all sibling maps if smt
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/context_tracking.c | 40 | ||||
-rw-r--r-- | kernel/sched/core.c | 21 |
2 files changed, 57 insertions, 4 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 85bdde1137eb..383f8231e436 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
@@ -70,6 +70,46 @@ void user_enter(void) | |||
70 | local_irq_restore(flags); | 70 | local_irq_restore(flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_PREEMPT | ||
74 | /** | ||
75 | * preempt_schedule_context - preempt_schedule called by tracing | ||
76 | * | ||
77 | * The tracing infrastructure uses preempt_enable_notrace to prevent | ||
78 | * recursion and tracing preempt enabling caused by the tracing | ||
79 | * infrastructure itself. But as tracing can happen in areas coming | ||
80 | * from userspace or just about to enter userspace, a preempt enable | ||
81 | * can occur before user_exit() is called. This will cause the scheduler | ||
82 | * to be called when the system is still in usermode. | ||
83 | * | ||
84 | * To prevent this, the preempt_enable_notrace will use this function | ||
85 | * instead of preempt_schedule() to exit user context if needed before | ||
86 | * calling the scheduler. | ||
87 | */ | ||
88 | void __sched notrace preempt_schedule_context(void) | ||
89 | { | ||
90 | struct thread_info *ti = current_thread_info(); | ||
91 | enum ctx_state prev_ctx; | ||
92 | |||
93 | if (likely(ti->preempt_count || irqs_disabled())) | ||
94 | return; | ||
95 | |||
96 | /* | ||
97 | * Need to disable preemption in case user_exit() is traced | ||
98 | * and the tracer calls preempt_enable_notrace() causing | ||
99 | * an infinite recursion. | ||
100 | */ | ||
101 | preempt_disable_notrace(); | ||
102 | prev_ctx = exception_enter(); | ||
103 | preempt_enable_no_resched_notrace(); | ||
104 | |||
105 | preempt_schedule(); | ||
106 | |||
107 | preempt_disable_notrace(); | ||
108 | exception_exit(prev_ctx); | ||
109 | preempt_enable_notrace(); | ||
110 | } | ||
111 | EXPORT_SYMBOL_GPL(preempt_schedule_context); | ||
112 | #endif /* CONFIG_PREEMPT */ | ||
73 | 113 | ||
74 | /** | 114 | /** |
75 | * user_exit - Inform the context tracking that the CPU is | 115 | * user_exit - Inform the context tracking that the CPU is |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1a27f918723..e8b335016c52 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu) | |||
633 | static inline bool got_nohz_idle_kick(void) | 633 | static inline bool got_nohz_idle_kick(void) |
634 | { | 634 | { |
635 | int cpu = smp_processor_id(); | 635 | int cpu = smp_processor_id(); |
636 | return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | 636 | |
637 | if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) | ||
638 | return false; | ||
639 | |||
640 | if (idle_cpu(cpu) && !need_resched()) | ||
641 | return true; | ||
642 | |||
643 | /* | ||
644 | * We can't run Idle Load Balance on this CPU for this time so we | ||
645 | * cancel it and clear NOHZ_BALANCE_KICK | ||
646 | */ | ||
647 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | ||
648 | return false; | ||
637 | } | 649 | } |
638 | 650 | ||
639 | #else /* CONFIG_NO_HZ_COMMON */ | 651 | #else /* CONFIG_NO_HZ_COMMON */ |
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void) | |||
1393 | 1405 | ||
1394 | void scheduler_ipi(void) | 1406 | void scheduler_ipi(void) |
1395 | { | 1407 | { |
1396 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() | 1408 | if (llist_empty(&this_rq()->wake_list) |
1397 | && !tick_nohz_full_cpu(smp_processor_id())) | 1409 | && !tick_nohz_full_cpu(smp_processor_id()) |
1410 | && !got_nohz_idle_kick()) | ||
1398 | return; | 1411 | return; |
1399 | 1412 | ||
1400 | /* | 1413 | /* |
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void) | |||
1417 | /* | 1430 | /* |
1418 | * Check if someone kicked us for doing the nohz idle load balance. | 1431 | * Check if someone kicked us for doing the nohz idle load balance. |
1419 | */ | 1432 | */ |
1420 | if (unlikely(got_nohz_idle_kick() && !need_resched())) { | 1433 | if (unlikely(got_nohz_idle_kick())) { |
1421 | this_rq()->idle_balance = 1; | 1434 | this_rq()->idle_balance = 1; |
1422 | raise_softirq_irqoff(SCHED_SOFTIRQ); | 1435 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
1423 | } | 1436 | } |