aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:04 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:04 -0400
commitbf5c91ba8c629b84413c761f529627195fd0a935 (patch)
tree8a795d6861ba4e13b90d2597a7306e1adb30bee8 /kernel
parente9acbff6484df51fd880e0f5fe0224e8be34c17b (diff)
sched: move sched_feat() definitions
move sched_feat() definitions so that it can be used sooner by generic code too. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c31
-rw-r--r--kernel/sched_fair.c31
2 files changed, 31 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f80ebafacc1..a5dd03522e32 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -382,6 +382,37 @@ static void update_rq_clock(struct rq *rq)
382#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 382#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
383 383
384/* 384/*
385 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
386 */
387#ifdef CONFIG_SCHED_DEBUG
388# define const_debug __read_mostly
389#else
390# define const_debug static const
391#endif
392
393/*
394 * Debugging: various feature bits
395 */
396enum {
397 SCHED_FEAT_FAIR_SLEEPERS = 1,
398 SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
399 SCHED_FEAT_SLEEPER_AVG = 4,
400 SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
401 SCHED_FEAT_START_DEBIT = 16,
402 SCHED_FEAT_SKIP_INITIAL = 32,
403};
404
405const_debug unsigned int sysctl_sched_features =
406 SCHED_FEAT_FAIR_SLEEPERS *0 |
407 SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
408 SCHED_FEAT_SLEEPER_AVG *0 |
409 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
410 SCHED_FEAT_START_DEBIT *1 |
411 SCHED_FEAT_SKIP_INITIAL *0;
412
413#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
414
415/*
385 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 416 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
386 * clock constructed from sched_clock(): 417 * clock constructed from sched_clock():
387 */ 418 */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a2af09cb6a70..a566a4558167 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -21,15 +21,6 @@
21 */ 21 */
22 22
23/* 23/*
24 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
25 */
26#ifdef CONFIG_SCHED_DEBUG
27# define const_debug __read_mostly
28#else
29# define const_debug static const
30#endif
31
32/*
33 * Targeted preemption latency for CPU-bound tasks: 24 * Targeted preemption latency for CPU-bound tasks:
34 * (default: 20ms, units: nanoseconds) 25 * (default: 20ms, units: nanoseconds)
35 * 26 *
@@ -87,28 +78,6 @@ const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
87 78
88unsigned int sysctl_sched_runtime_limit __read_mostly; 79unsigned int sysctl_sched_runtime_limit __read_mostly;
89 80
90/*
91 * Debugging: various feature bits
92 */
93enum {
94 SCHED_FEAT_FAIR_SLEEPERS = 1,
95 SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
96 SCHED_FEAT_SLEEPER_AVG = 4,
97 SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
98 SCHED_FEAT_START_DEBIT = 16,
99 SCHED_FEAT_SKIP_INITIAL = 32,
100};
101
102const_debug unsigned int sysctl_sched_features =
103 SCHED_FEAT_FAIR_SLEEPERS *0 |
104 SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
105 SCHED_FEAT_SLEEPER_AVG *0 |
106 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
107 SCHED_FEAT_START_DEBIT *1 |
108 SCHED_FEAT_SKIP_INITIAL *0;
109
110#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
111
112extern struct sched_class fair_sched_class; 81extern struct sched_class fair_sched_class;
113 82
114/************************************************************** 83/**************************************************************