aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 21:39:05 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-04 02:32:02 -0400
commit265f22a975c1e4cc3a4d1f94a3ec53ffbb6f5b9f (patch)
treec5b7ec6b64fc31e879e730d2edf8e836cfaf7e9b /kernel/sched/core.c
parent73c30828771acafb0a5e3a1c4cf75e6c5dc5f98a (diff)
sched: Keep at least 1 tick per second for active dynticks tasks
The scheduler doesn't yet fully support environments with a single task running without a periodic tick. In order to ensure we still maintain the duties of scheduler_tick(), keep at least 1 tick per second. This makes sure that we keep the progression of various scheduler accounting and background maintainance even with a very low granularity. Examples include cpu load, sched average, CFS entity vruntime, avenrun and events such as load balancing, amongst other details handled in sched_class::task_tick(). This limitation will be removed in the future once we get these individual items to work in full dynticks CPUs. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e94842d4400c..3bdf986a091a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2736,8 +2736,35 @@ void scheduler_tick(void)
2736 rq->idle_balance = idle_cpu(cpu); 2736 rq->idle_balance = idle_cpu(cpu);
2737 trigger_load_balance(rq, cpu); 2737 trigger_load_balance(rq, cpu);
2738#endif 2738#endif
2739 rq_last_tick_reset(rq);
2739} 2740}
2740 2741
2742#ifdef CONFIG_NO_HZ_FULL
2743/**
2744 * scheduler_tick_max_deferment
2745 *
2746 * Keep at least one tick per second when a single
2747 * active task is running because the scheduler doesn't
2748 * yet completely support full dynticks environment.
2749 *
2750 * This makes sure that uptime, CFS vruntime, load
2751 * balancing, etc... continue to move forward, even
2752 * with a very low granularity.
2753 */
2754u64 scheduler_tick_max_deferment(void)
2755{
2756 struct rq *rq = this_rq();
2757 unsigned long next, now = ACCESS_ONCE(jiffies);
2758
2759 next = rq->last_sched_tick + HZ;
2760
2761 if (time_before_eq(next, now))
2762 return 0;
2763
2764 return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
2765}
2766#endif
2767
2741notrace unsigned long get_parent_ip(unsigned long addr) 2768notrace unsigned long get_parent_ip(unsigned long addr)
2742{ 2769{
2743 if (in_lock_functions(addr)) { 2770 if (in_lock_functions(addr)) {
@@ -6993,6 +7020,9 @@ void __init sched_init(void)
6993#ifdef CONFIG_NO_HZ_COMMON 7020#ifdef CONFIG_NO_HZ_COMMON
6994 rq->nohz_flags = 0; 7021 rq->nohz_flags = 0;
6995#endif 7022#endif
7023#ifdef CONFIG_NO_HZ_FULL
7024 rq->last_sched_tick = 0;
7025#endif
6996#endif 7026#endif
6997 init_rq_hrtick(rq); 7027 init_rq_hrtick(rq);
6998 atomic_set(&rq->nr_iowait, 0); 7028 atomic_set(&rq->nr_iowait, 0);