diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8763bee6b661..c495dcf7031b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -35,16 +35,21 @@ | |||
35 | const_debug unsigned int sysctl_sched_latency = 20000000ULL; | 35 | const_debug unsigned int sysctl_sched_latency = 20000000ULL; |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * After fork, child runs first. (default) If set to 0 then | 38 | * Minimal preemption granularity for CPU-bound tasks: |
39 | * parent will (try to) run first. | 39 | * (default: 1 msec, units: nanoseconds) |
40 | */ | 40 | */ |
41 | const_debug unsigned int sysctl_sched_child_runs_first = 1; | 41 | const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL; |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Minimal preemption granularity for CPU-bound tasks: | 44 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
45 | * (default: 2 msec, units: nanoseconds) | ||
46 | */ | 45 | */ |
47 | const_debug unsigned int sysctl_sched_nr_latency = 20; | 46 | const_debug unsigned int sched_nr_latency = 20; |
47 | |||
48 | /* | ||
49 | * After fork, child runs first. (default) If set to 0 then | ||
50 | * parent will (try to) run first. | ||
51 | */ | ||
52 | const_debug unsigned int sysctl_sched_child_runs_first = 1; | ||
48 | 53 | ||
49 | /* | 54 | /* |
50 | * sys_sched_yield() compat mode | 55 | * sys_sched_yield() compat mode |
@@ -212,6 +217,22 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
212 | * Scheduling class statistics methods: | 217 | * Scheduling class statistics methods: |
213 | */ | 218 | */ |
214 | 219 | ||
220 | #ifdef CONFIG_SCHED_DEBUG | ||
221 | int sched_nr_latency_handler(struct ctl_table *table, int write, | ||
222 | struct file *filp, void __user *buffer, size_t *lenp, | ||
223 | loff_t *ppos) | ||
224 | { | ||
225 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
226 | |||
227 | if (ret || !write) | ||
228 | return ret; | ||
229 | |||
230 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | ||
231 | sysctl_sched_min_granularity); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | #endif | ||
215 | 236 | ||
216 | /* | 237 | /* |
217 | * The idea is to set a period in which each task runs once. | 238 | * The idea is to set a period in which each task runs once. |
@@ -224,7 +245,7 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
224 | static u64 __sched_period(unsigned long nr_running) | 245 | static u64 __sched_period(unsigned long nr_running) |
225 | { | 246 | { |
226 | u64 period = sysctl_sched_latency; | 247 | u64 period = sysctl_sched_latency; |
227 | unsigned long nr_latency = sysctl_sched_nr_latency; | 248 | unsigned long nr_latency = sched_nr_latency; |
228 | 249 | ||
229 | if (unlikely(nr_running > nr_latency)) { | 250 | if (unlikely(nr_running > nr_latency)) { |
230 | period *= nr_running; | 251 | period *= nr_running; |