aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-11-09 16:39:37 -0500
committerIngo Molnar <mingo@elte.hu>2007-11-09 16:39:37 -0500
commitb2be5e96dc0b5a179cf4cb98e65cfb605752ca26 (patch)
treecf229cf83afc2c30369d1751338886cf8a067b5c /kernel
parent2cb8600e6be4281e381d39e44de4359e46333e23 (diff)
sched: reintroduce the sched_min_granularity tunable
we lost the sched_min_granularity tunable to a clever optimization that uses the sched_latency/min_granularity ratio - but the ratio is quite unintuitive to users and can also crash the kernel if the ratio is set to 0. So reintroduce the min_granularity tunable, while keeping the ratio maintained internally. no functionality changed. [ mingo@elte.hu: some fixlets. ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c35
-rw-r--r--kernel/sysctl.c11
3 files changed, 36 insertions, 12 deletions
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 415e5c385542..ca198a797bfa 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -211,7 +211,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
211#define PN(x) \ 211#define PN(x) \
212 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 212 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
213 PN(sysctl_sched_latency); 213 PN(sysctl_sched_latency);
214 PN(sysctl_sched_nr_latency); 214 PN(sysctl_sched_min_granularity);
215 PN(sysctl_sched_wakeup_granularity); 215 PN(sysctl_sched_wakeup_granularity);
216 PN(sysctl_sched_batch_wakeup_granularity); 216 PN(sysctl_sched_batch_wakeup_granularity);
217 PN(sysctl_sched_child_runs_first); 217 PN(sysctl_sched_child_runs_first);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8763bee6b661..c495dcf7031b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,16 +35,21 @@
35const_debug unsigned int sysctl_sched_latency = 20000000ULL; 35const_debug unsigned int sysctl_sched_latency = 20000000ULL;
36 36
37/* 37/*
38 * After fork, child runs first. (default) If set to 0 then 38 * Minimal preemption granularity for CPU-bound tasks:
39 * parent will (try to) run first. 39 * (default: 1 msec, units: nanoseconds)
40 */ 40 */
41const_debug unsigned int sysctl_sched_child_runs_first = 1; 41const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL;
42 42
43/* 43/*
44 * Minimal preemption granularity for CPU-bound tasks: 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 * (default: 2 msec, units: nanoseconds)
46 */ 45 */
47const_debug unsigned int sysctl_sched_nr_latency = 20; 46const_debug unsigned int sched_nr_latency = 20;
47
48/*
49 * After fork, child runs first. (default) If set to 0 then
50 * parent will (try to) run first.
51 */
52const_debug unsigned int sysctl_sched_child_runs_first = 1;
48 53
49/* 54/*
50 * sys_sched_yield() compat mode 55 * sys_sched_yield() compat mode
@@ -212,6 +217,22 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
212 * Scheduling class statistics methods: 217 * Scheduling class statistics methods:
213 */ 218 */
214 219
220#ifdef CONFIG_SCHED_DEBUG
221int sched_nr_latency_handler(struct ctl_table *table, int write,
222 struct file *filp, void __user *buffer, size_t *lenp,
223 loff_t *ppos)
224{
225 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
226
227 if (ret || !write)
228 return ret;
229
230 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
231 sysctl_sched_min_granularity);
232
233 return 0;
234}
235#endif
215 236
216/* 237/*
217 * The idea is to set a period in which each task runs once. 238 * The idea is to set a period in which each task runs once.
@@ -224,7 +245,7 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
224static u64 __sched_period(unsigned long nr_running) 245static u64 __sched_period(unsigned long nr_running)
225{ 246{
226 u64 period = sysctl_sched_latency; 247 u64 period = sysctl_sched_latency;
227 unsigned long nr_latency = sysctl_sched_nr_latency; 248 unsigned long nr_latency = sched_nr_latency;
228 249
229 if (unlikely(nr_running > nr_latency)) { 250 if (unlikely(nr_running > nr_latency)) {
230 period *= nr_running; 251 period *= nr_running;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 3b4efbe26445..6e3b63c06856 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -235,11 +235,14 @@ static struct ctl_table kern_table[] = {
235#ifdef CONFIG_SCHED_DEBUG 235#ifdef CONFIG_SCHED_DEBUG
236 { 236 {
237 .ctl_name = CTL_UNNUMBERED, 237 .ctl_name = CTL_UNNUMBERED,
238 .procname = "sched_nr_latency", 238 .procname = "sched_min_granularity_ns",
239 .data = &sysctl_sched_nr_latency, 239 .data = &sysctl_sched_min_granularity,
240 .maxlen = sizeof(unsigned int), 240 .maxlen = sizeof(unsigned int),
241 .mode = 0644, 241 .mode = 0644,
242 .proc_handler = &proc_dointvec, 242 .proc_handler = &sched_nr_latency_handler,
243 .strategy = &sysctl_intvec,
244 .extra1 = &min_sched_granularity_ns,
245 .extra2 = &max_sched_granularity_ns,
243 }, 246 },
244 { 247 {
245 .ctl_name = CTL_UNNUMBERED, 248 .ctl_name = CTL_UNNUMBERED,
@@ -247,7 +250,7 @@ static struct ctl_table kern_table[] = {
247 .data = &sysctl_sched_latency, 250 .data = &sysctl_sched_latency,
248 .maxlen = sizeof(unsigned int), 251 .maxlen = sizeof(unsigned int),
249 .mode = 0644, 252 .mode = 0644,
250 .proc_handler = &proc_dointvec_minmax, 253 .proc_handler = &sched_nr_latency_handler,
251 .strategy = &sysctl_intvec, 254 .strategy = &sysctl_intvec,
252 .extra1 = &min_sched_granularity_ns, 255 .extra1 = &min_sched_granularity_ns,
253 .extra2 = &max_sched_granularity_ns, 256 .extra2 = &max_sched_granularity_ns,