aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 21:39:05 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-04 02:32:02 -0400
commit265f22a975c1e4cc3a4d1f94a3ec53ffbb6f5b9f (patch)
treec5b7ec6b64fc31e879e730d2edf8e836cfaf7e9b /kernel/sched
parent73c30828771acafb0a5e3a1c4cf75e6c5dc5f98a (diff)
sched: Keep at least 1 tick per second for active dynticks tasks
The scheduler doesn't yet fully support environments with a single task running without a periodic tick. In order to ensure we still maintain the duties of scheduler_tick(), keep at least 1 tick per second. This makes sure that we keep the progression of various scheduler accounting and background maintainance even with a very low granularity. Examples include cpu load, sched average, CFS entity vruntime, avenrun and events such as load balancing, amongst other details handled in sched_class::task_tick(). This limitation will be removed in the future once we get these individual items to work in full dynticks CPUs. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c30
-rw-r--r--kernel/sched/idle_task.c1
-rw-r--r--kernel/sched/sched.h10
3 files changed, 41 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e94842d4400c..3bdf986a091a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2736,8 +2736,35 @@ void scheduler_tick(void)
2736 rq->idle_balance = idle_cpu(cpu); 2736 rq->idle_balance = idle_cpu(cpu);
2737 trigger_load_balance(rq, cpu); 2737 trigger_load_balance(rq, cpu);
2738#endif 2738#endif
2739 rq_last_tick_reset(rq);
2739} 2740}
2740 2741
2742#ifdef CONFIG_NO_HZ_FULL
2743/**
2744 * scheduler_tick_max_deferment
2745 *
2746 * Keep at least one tick per second when a single
2747 * active task is running because the scheduler doesn't
2748 * yet completely support full dynticks environment.
2749 *
2750 * This makes sure that uptime, CFS vruntime, load
2751 * balancing, etc... continue to move forward, even
2752 * with a very low granularity.
2753 */
2754u64 scheduler_tick_max_deferment(void)
2755{
2756 struct rq *rq = this_rq();
2757 unsigned long next, now = ACCESS_ONCE(jiffies);
2758
2759 next = rq->last_sched_tick + HZ;
2760
2761 if (time_before_eq(next, now))
2762 return 0;
2763
2764 return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
2765}
2766#endif
2767
2741notrace unsigned long get_parent_ip(unsigned long addr) 2768notrace unsigned long get_parent_ip(unsigned long addr)
2742{ 2769{
2743 if (in_lock_functions(addr)) { 2770 if (in_lock_functions(addr)) {
@@ -6993,6 +7020,9 @@ void __init sched_init(void)
6993#ifdef CONFIG_NO_HZ_COMMON 7020#ifdef CONFIG_NO_HZ_COMMON
6994 rq->nohz_flags = 0; 7021 rq->nohz_flags = 0;
6995#endif 7022#endif
7023#ifdef CONFIG_NO_HZ_FULL
7024 rq->last_sched_tick = 0;
7025#endif
6996#endif 7026#endif
6997 init_rq_hrtick(rq); 7027 init_rq_hrtick(rq);
6998 atomic_set(&rq->nr_iowait, 0); 7028 atomic_set(&rq->nr_iowait, 0);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b8ce77328341..d8da01008d39 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -17,6 +17,7 @@ select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
17static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) 17static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
18{ 18{
19 idle_exit_fair(rq); 19 idle_exit_fair(rq);
20 rq_last_tick_reset(rq);
20} 21}
21 22
22static void post_schedule_idle(struct rq *rq) 23static void post_schedule_idle(struct rq *rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 24dc29897749..ce39224d6155 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -410,6 +410,9 @@ struct rq {
410 u64 nohz_stamp; 410 u64 nohz_stamp;
411 unsigned long nohz_flags; 411 unsigned long nohz_flags;
412#endif 412#endif
413#ifdef CONFIG_NO_HZ_FULL
414 unsigned long last_sched_tick;
415#endif
413 int skip_clock_update; 416 int skip_clock_update;
414 417
415 /* capture load from *all* tasks on this cpu: */ 418 /* capture load from *all* tasks on this cpu: */
@@ -1090,6 +1093,13 @@ static inline void dec_nr_running(struct rq *rq)
1090 rq->nr_running--; 1093 rq->nr_running--;
1091} 1094}
1092 1095
1096static inline void rq_last_tick_reset(struct rq *rq)
1097{
1098#ifdef CONFIG_NO_HZ_FULL
1099 rq->last_sched_tick = jiffies;
1100#endif
1101}
1102
1093extern void update_rq_clock(struct rq *rq); 1103extern void update_rq_clock(struct rq *rq);
1094 1104
1095extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1105extern void activate_task(struct rq *rq, struct task_struct *p, int flags);