aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
commit8ebc91d93669af39dbed50914d7daf457eeb43be (patch)
treec7ab6dfce3ea737db57599c8e42a41ddac51464c
parent2bd8e6d422a4f44c0994f909317eba80b0fe08a1 (diff)
sched: remove stat_gran
remove the stat_gran code - it was disabled by default and it causes unnecessary overhead. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_fair.c46
-rw-r--r--kernel/sysctl.c11
4 files changed, 15 insertions, 50 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index befca3f9364a..3c38a5040e8f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -895,9 +895,6 @@ struct load_weight {
895 */ 895 */
896struct sched_entity { 896struct sched_entity {
897 long wait_runtime; 897 long wait_runtime;
898 unsigned long delta_fair_run;
899 unsigned long delta_fair_sleep;
900 unsigned long delta_exec;
901 s64 fair_key; 898 s64 fair_key;
902 struct load_weight load; /* for load-balancing */ 899 struct load_weight load; /* for load-balancing */
903 struct rb_node run_node; 900 struct rb_node run_node;
diff --git a/kernel/sched.c b/kernel/sched.c
index ae1544f0a20d..d4dabfcc776c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -829,7 +829,7 @@ static void update_curr_load(struct rq *rq)
829 * Stagger updates to ls->delta_fair. Very frequent updates 829 * Stagger updates to ls->delta_fair. Very frequent updates
830 * can be expensive. 830 * can be expensive.
831 */ 831 */
832 if (ls->delta_stat >= sysctl_sched_stat_granularity) 832 if (ls->delta_stat)
833 __update_curr_load(rq, ls); 833 __update_curr_load(rq, ls);
834} 834}
835 835
@@ -1588,9 +1588,6 @@ static void __sched_fork(struct task_struct *p)
1588 p->se.exec_start = 0; 1588 p->se.exec_start = 0;
1589 p->se.sum_exec_runtime = 0; 1589 p->se.sum_exec_runtime = 0;
1590 p->se.prev_sum_exec_runtime = 0; 1590 p->se.prev_sum_exec_runtime = 0;
1591 p->se.delta_exec = 0;
1592 p->se.delta_fair_run = 0;
1593 p->se.delta_fair_sleep = 0;
1594 p->se.wait_runtime = 0; 1591 p->se.wait_runtime = 0;
1595 p->se.sleep_start_fair = 0; 1592 p->se.sleep_start_fair = 0;
1596 1593
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2e84aaffe425..2138c40f4836 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -85,8 +85,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
85 */ 85 */
86const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 86const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
87 87
88const_debug unsigned int sysctl_sched_stat_granularity;
89
90unsigned int sysctl_sched_runtime_limit __read_mostly; 88unsigned int sysctl_sched_runtime_limit __read_mostly;
91 89
92/* 90/*
@@ -360,13 +358,13 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
360 * are not in our scheduling class. 358 * are not in our scheduling class.
361 */ 359 */
362static inline void 360static inline void
363__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) 361__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
362 unsigned long delta_exec)
364{ 363{
365 unsigned long delta, delta_exec, delta_fair, delta_mine; 364 unsigned long delta, delta_fair, delta_mine;
366 struct load_weight *lw = &cfs_rq->load; 365 struct load_weight *lw = &cfs_rq->load;
367 unsigned long load = lw->weight; 366 unsigned long load = lw->weight;
368 367
369 delta_exec = curr->delta_exec;
370 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); 368 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
371 369
372 curr->sum_exec_runtime += delta_exec; 370 curr->sum_exec_runtime += delta_exec;
@@ -400,6 +398,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
400static void update_curr(struct cfs_rq *cfs_rq) 398static void update_curr(struct cfs_rq *cfs_rq)
401{ 399{
402 struct sched_entity *curr = cfs_rq_curr(cfs_rq); 400 struct sched_entity *curr = cfs_rq_curr(cfs_rq);
401 u64 now = rq_of(cfs_rq)->clock;
403 unsigned long delta_exec; 402 unsigned long delta_exec;
404 403
405 if (unlikely(!curr)) 404 if (unlikely(!curr))
@@ -410,15 +409,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
410 * since the last time we changed load (this cannot 409 * since the last time we changed load (this cannot
411 * overflow on 32 bits): 410 * overflow on 32 bits):
412 */ 411 */
413 delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); 412 delta_exec = (unsigned long)(now - curr->exec_start);
414
415 curr->delta_exec += delta_exec;
416 413
417 if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) { 414 __update_curr(cfs_rq, curr, delta_exec);
418 __update_curr(cfs_rq, curr); 415 curr->exec_start = now;
419 curr->delta_exec = 0;
420 }
421 curr->exec_start = rq_of(cfs_rq)->clock;
422} 416}
423 417
424static inline void 418static inline void
@@ -494,10 +488,9 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
494 * Note: must be called with a freshly updated rq->fair_clock. 488 * Note: must be called with a freshly updated rq->fair_clock.
495 */ 489 */
496static inline void 490static inline void
497__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 491__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
492 unsigned long delta_fair)
498{ 493{
499 unsigned long delta_fair = se->delta_fair_run;
500
501 schedstat_set(se->wait_max, max(se->wait_max, 494 schedstat_set(se->wait_max, max(se->wait_max,
502 rq_of(cfs_rq)->clock - se->wait_start)); 495 rq_of(cfs_rq)->clock - se->wait_start));
503 496
@@ -519,12 +512,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
519 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), 512 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
520 (u64)(cfs_rq->fair_clock - se->wait_start_fair)); 513 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
521 514
522 se->delta_fair_run += delta_fair; 515 __update_stats_wait_end(cfs_rq, se, delta_fair);
523 if (unlikely(abs(se->delta_fair_run) >=
524 sysctl_sched_stat_granularity)) {
525 __update_stats_wait_end(cfs_rq, se);
526 se->delta_fair_run = 0;
527 }
528 516
529 se->wait_start_fair = 0; 517 se->wait_start_fair = 0;
530 schedstat_set(se->wait_start, 0); 518 schedstat_set(se->wait_start, 0);
@@ -567,9 +555,10 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
567 * Scheduling class queueing methods: 555 * Scheduling class queueing methods:
568 */ 556 */
569 557
570static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 558static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
559 unsigned long delta_fair)
571{ 560{
572 unsigned long load = cfs_rq->load.weight, delta_fair; 561 unsigned long load = cfs_rq->load.weight;
573 long prev_runtime; 562 long prev_runtime;
574 563
575 /* 564 /*
@@ -582,8 +571,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
582 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) 571 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
583 load = rq_of(cfs_rq)->cpu_load[2]; 572 load = rq_of(cfs_rq)->cpu_load[2];
584 573
585 delta_fair = se->delta_fair_sleep;
586
587 /* 574 /*
588 * Fix up delta_fair with the effect of us running 575 * Fix up delta_fair with the effect of us running
589 * during the whole sleep period: 576 * during the whole sleep period:
@@ -618,12 +605,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
618 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), 605 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
619 (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); 606 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
620 607
621 se->delta_fair_sleep += delta_fair; 608 __enqueue_sleeper(cfs_rq, se, delta_fair);
622 if (unlikely(abs(se->delta_fair_sleep) >=
623 sysctl_sched_stat_granularity)) {
624 __enqueue_sleeper(cfs_rq, se);
625 se->delta_fair_sleep = 0;
626 }
627 609
628 se->sleep_start_fair = 0; 610 se->sleep_start_fair = 0;
629 611
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6c97259e863e..9b1b0d4ff966 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -266,17 +266,6 @@ static ctl_table kern_table[] = {
266 }, 266 },
267 { 267 {
268 .ctl_name = CTL_UNNUMBERED, 268 .ctl_name = CTL_UNNUMBERED,
269 .procname = "sched_stat_granularity_ns",
270 .data = &sysctl_sched_stat_granularity,
271 .maxlen = sizeof(unsigned int),
272 .mode = 0644,
273 .proc_handler = &proc_dointvec_minmax,
274 .strategy = &sysctl_intvec,
275 .extra1 = &min_wakeup_granularity_ns,
276 .extra2 = &max_wakeup_granularity_ns,
277 },
278 {
279 .ctl_name = CTL_UNNUMBERED,
280 .procname = "sched_runtime_limit_ns", 269 .procname = "sched_runtime_limit_ns",
281 .data = &sysctl_sched_runtime_limit, 270 .data = &sysctl_sched_runtime_limit,
282 .maxlen = sizeof(unsigned int), 271 .maxlen = sizeof(unsigned int),