aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-07-05 05:06:10 -0400
committerIngo Molnar <mingo@kernel.org>2014-07-05 05:06:10 -0400
commit51da9830d7a58c8f77127c622ee57d453c88af09 (patch)
tree85bd2caae0344f77f0afd5f9617a45855000b821 /kernel/sched
parent5d5e2b1bcbdc996e72815c03fdc5ea82c4642397 (diff)
parentd490b3e2c23369c6adfa183d18d9a24ced247797 (diff)
Merge branch 'timers/nohz' into sched/core
Merge these two, because upcoming patches will touch both areas. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/sched.h12
2 files changed, 22 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..7f3063c153d8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -684,10 +684,16 @@ static void wake_up_idle_cpu(int cpu)
684 684
685static bool wake_up_full_nohz_cpu(int cpu) 685static bool wake_up_full_nohz_cpu(int cpu)
686{ 686{
687 /*
688 * We just need the target to call irq_exit() and re-evaluate
689 * the next tick. The nohz full kick at least implies that.
690 * If needed we can still optimize that later with an
691 * empty IRQ.
692 */
687 if (tick_nohz_full_cpu(cpu)) { 693 if (tick_nohz_full_cpu(cpu)) {
688 if (cpu != smp_processor_id() || 694 if (cpu != smp_processor_id() ||
689 tick_nohz_tick_stopped()) 695 tick_nohz_tick_stopped())
690 smp_send_reschedule(cpu); 696 tick_nohz_full_kick_cpu(cpu);
691 return true; 697 return true;
692 } 698 }
693 699
@@ -734,10 +740,11 @@ bool sched_can_stop_tick(void)
734 740
735 rq = this_rq(); 741 rq = this_rq();
736 742
737 /* Make sure rq->nr_running update is visible after the IPI */ 743 /*
738 smp_rmb(); 744 * More than one running task need preemption.
739 745 * nr_running update is assumed to be visible
740 /* More than one running task need preemption */ 746 * after IPI is sent from wakers.
747 */
741 if (rq->nr_running > 1) 748 if (rq->nr_running > 1)
742 return false; 749 return false;
743 750
@@ -1568,9 +1575,7 @@ void scheduler_ipi(void)
1568 */ 1575 */
1569 preempt_fold_need_resched(); 1576 preempt_fold_need_resched();
1570 1577
1571 if (llist_empty(&this_rq()->wake_list) 1578 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1572 && !tick_nohz_full_cpu(smp_processor_id())
1573 && !got_nohz_idle_kick())
1574 return; 1579 return;
1575 1580
1576 /* 1581 /*
@@ -1587,7 +1592,6 @@ void scheduler_ipi(void)
1587 * somewhat pessimize the simple resched case. 1592 * somewhat pessimize the simple resched case.
1588 */ 1593 */
1589 irq_enter(); 1594 irq_enter();
1590 tick_nohz_full_check();
1591 sched_ttwu_pending(); 1595 sched_ttwu_pending();
1592 1596
1593 /* 1597 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 31cc02ebc54e..eb8567610295 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1221,9 +1221,15 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
1221#ifdef CONFIG_NO_HZ_FULL 1221#ifdef CONFIG_NO_HZ_FULL
1222 if (prev_nr < 2 && rq->nr_running >= 2) { 1222 if (prev_nr < 2 && rq->nr_running >= 2) {
1223 if (tick_nohz_full_cpu(rq->cpu)) { 1223 if (tick_nohz_full_cpu(rq->cpu)) {
1224 /* Order rq->nr_running write against the IPI */ 1224 /*
1225 smp_wmb(); 1225 * Tick is needed if more than one task runs on a CPU.
1226 smp_send_reschedule(rq->cpu); 1226 * Send the target an IPI to kick it out of nohz mode.
1227 *
1228 * We assume that IPI implies full memory barrier and the
1229 * new value of rq->nr_running is visible on reception
1230 * from the target.
1231 */
1232 tick_nohz_full_kick_cpu(rq->cpu);
1227 } 1233 }
1228 } 1234 }
1229#endif 1235#endif