aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-06-18 12:47:03 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-18 12:47:03 -0400
commitd490b3e2c23369c6adfa183d18d9a24ced247797 (patch)
tree3355c2173228ad1986299c84d22d40ce24764390 /kernel/sched/core.c
parentebe06187bf2aec10d537ce4595e416035367d703 (diff)
parent3882ec643997757824cd5f25180cd8a787b9dbe1 (diff)
Merge branch 'timers/nohz-irq-work-v7' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull nohz updates from Frederic Weisbecker: " This set moves the nohz kick, used to notify a full dynticks CPU when events require tick rescheduling, out of the scheduler tick to a dedicated IPI. This debloats a bit the scheduler IPI from off-topic work that was abusing that scheduler fast path for its convenient asynchronous properties. Now the nohz kick uses irq-work for its own needs. Of course this implied quite some background infrastructure rework, including: * Clean up some irq-work internals * Implement remote irq-work * Implement nohz kick on top of remote irq-work * Move full dynticks timer enqueue notification to new kick * Move multi-task notification to new kick * Remove unecessary barriers on multi-task notification " Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..7f3063c153d8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -684,10 +684,16 @@ static void wake_up_idle_cpu(int cpu)
684 684
685static bool wake_up_full_nohz_cpu(int cpu) 685static bool wake_up_full_nohz_cpu(int cpu)
686{ 686{
687 /*
688 * We just need the target to call irq_exit() and re-evaluate
689 * the next tick. The nohz full kick at least implies that.
690 * If needed we can still optimize that later with an
691 * empty IRQ.
692 */
687 if (tick_nohz_full_cpu(cpu)) { 693 if (tick_nohz_full_cpu(cpu)) {
688 if (cpu != smp_processor_id() || 694 if (cpu != smp_processor_id() ||
689 tick_nohz_tick_stopped()) 695 tick_nohz_tick_stopped())
690 smp_send_reschedule(cpu); 696 tick_nohz_full_kick_cpu(cpu);
691 return true; 697 return true;
692 } 698 }
693 699
@@ -734,10 +740,11 @@ bool sched_can_stop_tick(void)
734 740
735 rq = this_rq(); 741 rq = this_rq();
736 742
737 /* Make sure rq->nr_running update is visible after the IPI */ 743 /*
738 smp_rmb(); 744 * More than one running task need preemption.
739 745 * nr_running update is assumed to be visible
740 /* More than one running task need preemption */ 746 * after IPI is sent from wakers.
747 */
741 if (rq->nr_running > 1) 748 if (rq->nr_running > 1)
742 return false; 749 return false;
743 750
@@ -1568,9 +1575,7 @@ void scheduler_ipi(void)
1568 */ 1575 */
1569 preempt_fold_need_resched(); 1576 preempt_fold_need_resched();
1570 1577
1571 if (llist_empty(&this_rq()->wake_list) 1578 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1572 && !tick_nohz_full_cpu(smp_processor_id())
1573 && !got_nohz_idle_kick())
1574 return; 1579 return;
1575 1580
1576 /* 1581 /*
@@ -1587,7 +1592,6 @@ void scheduler_ipi(void)
1587 * somewhat pessimize the simple resched case. 1592 * somewhat pessimize the simple resched case.
1588 */ 1593 */
1589 irq_enter(); 1594 irq_enter();
1590 tick_nohz_full_check();
1591 sched_ttwu_pending(); 1595 sched_ttwu_pending();
1592 1596
1593 /* 1597 /*