summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorYuyang Du <yuyang.du@intel.com>2016-04-05 00:12:26 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-05 03:24:00 -0400
commit6ecdd74962f246dfe8750b7bea481a1c0816315d (patch)
tree3b07aefd3f08af08e983771f34eb1f9037cc7848 /kernel/sched/sched.h
parent2159197d66770ec01f75c93fb11dc66df81fd45b (diff)
sched/fair: Generalize the load/util averages resolution definition
Integer metric needs fixed point arithmetic. In sched/fair, a few metrics, e.g., weight, load, load_avg, util_avg, freq, and capacity, may have different fixed point ranges, which makes their update and usage error-prone. In order to avoid the errors relating to the fixed point range, we definie a basic fixed point range, and then formalize all metrics to base on the basic range. The basic range is 1024 or (1 << 10). Further, one can recursively apply the basic range to have larger range. Pointed out by Ben Segall, weight (visible to user, e.g., NICE-0 has 1024) and load (e.g., NICE_0_LOAD) have independent ranges, but they must be well calibrated. Signed-off-by: Yuyang Du <yuyang.du@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: lizefan@huawei.com Cc: morten.rasmussen@arm.com Cc: pjt@google.com Cc: umgwanakikbuti@gmail.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1459829551-21625-2-git-send-email-yuyang.du@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 066a4c2d2695..ad83361f9e67 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -56,18 +56,23 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
56 * increase coverage and consistency always enable it on 64bit platforms. 56 * increase coverage and consistency always enable it on 64bit platforms.
57 */ 57 */
58#ifdef CONFIG_64BIT 58#ifdef CONFIG_64BIT
59# define SCHED_LOAD_RESOLUTION 10 59# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
60# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) 60# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
61# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) 61# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
62#else 62#else
63# define SCHED_LOAD_RESOLUTION 0 63# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
64# define scale_load(w) (w) 64# define scale_load(w) (w)
65# define scale_load_down(w) (w) 65# define scale_load_down(w) (w)
66#endif 66#endif
67 67
68#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
69#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 68#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
70 69
70/*
71 * NICE_0's weight (visible to users) and its load (invisible to users) have
72 * independent ranges, but they should be well calibrated. We use scale_load()
73 * and scale_load_down(w) to convert between them, and the following must be true:
74 * scale_load(sched_prio_to_weight[20]) == NICE_0_LOAD
75 */
71#define NICE_0_LOAD SCHED_LOAD_SCALE 76#define NICE_0_LOAD SCHED_LOAD_SCALE
72#define NICE_0_SHIFT SCHED_LOAD_SHIFT 77#define NICE_0_SHIFT SCHED_LOAD_SHIFT
73 78