aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2014-03-05 11:05:57 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2014-03-13 10:56:44 -0400
commit300a9d887ea221f344962506f724e02101bacc08 (patch)
treeee4bbd30f5a02a957211351b358fc23f33c68681
parentdee08a72deefac251267ed2717717596aa8b6818 (diff)
sched: Remove needless round trip nsecs <-> tick conversion of steal time
When update_rq_clock_task() accounts the pending steal time for a task, it converts the steal delta from nsecs to tick then from tick to nsecs. There is no apparent good reason for doing that though because both the task clock and the prev steal delta are u64 and store values in nsecs. So lets remove the needless conversion. Cc: Ingo Molnar <mingo@kernel.org> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/sched.h10
2 files changed, 0 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..b14a188af898 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -823,19 +823,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
823#endif 823#endif
824#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 824#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
825 if (static_key_false((&paravirt_steal_rq_enabled))) { 825 if (static_key_false((&paravirt_steal_rq_enabled))) {
826 u64 st;
827
828 steal = paravirt_steal_clock(cpu_of(rq)); 826 steal = paravirt_steal_clock(cpu_of(rq));
829 steal -= rq->prev_steal_time_rq; 827 steal -= rq->prev_steal_time_rq;
830 828
831 if (unlikely(steal > delta)) 829 if (unlikely(steal > delta))
832 steal = delta; 830 steal = delta;
833 831
834 st = steal_ticks(steal);
835 steal = st * TICK_NSEC;
836
837 rq->prev_steal_time_rq += steal; 832 rq->prev_steal_time_rq += steal;
838
839 delta -= steal; 833 delta -= steal;
840 } 834 }
841#endif 835#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd20f8b..5ec991010122 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1214,16 +1214,6 @@ extern void update_idle_cpu_load(struct rq *this_rq);
1214 1214
1215extern void init_task_runnable_average(struct task_struct *p); 1215extern void init_task_runnable_average(struct task_struct *p);
1216 1216
1217#ifdef CONFIG_PARAVIRT
1218static inline u64 steal_ticks(u64 steal)
1219{
1220 if (unlikely(steal > NSEC_PER_SEC))
1221 return div_u64(steal, TICK_NSEC);
1222
1223 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1224}
1225#endif
1226
1227static inline void inc_nr_running(struct rq *rq) 1217static inline void inc_nr_running(struct rq *rq)
1228{ 1218{
1229 rq->nr_running++; 1219 rq->nr_running++;