aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:03 -0400
commit8ebc91d93669af39dbed50914d7daf457eeb43be (patch)
treec7ab6dfce3ea737db57599c8e42a41ddac51464c /kernel/sched_fair.c
parent2bd8e6d422a4f44c0994f909317eba80b0fe08a1 (diff)
sched: remove stat_gran
remove the stat_gran code - it was disabled by default and it causes unnecessary overhead. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c46
1 files changed, 14 insertions, 32 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2e84aaffe425..2138c40f4836 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -85,8 +85,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
85 */ 85 */
86const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 86const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
87 87
88const_debug unsigned int sysctl_sched_stat_granularity;
89
90unsigned int sysctl_sched_runtime_limit __read_mostly; 88unsigned int sysctl_sched_runtime_limit __read_mostly;
91 89
92/* 90/*
@@ -360,13 +358,13 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
360 * are not in our scheduling class. 358 * are not in our scheduling class.
361 */ 359 */
362static inline void 360static inline void
363__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) 361__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
362 unsigned long delta_exec)
364{ 363{
365 unsigned long delta, delta_exec, delta_fair, delta_mine; 364 unsigned long delta, delta_fair, delta_mine;
366 struct load_weight *lw = &cfs_rq->load; 365 struct load_weight *lw = &cfs_rq->load;
367 unsigned long load = lw->weight; 366 unsigned long load = lw->weight;
368 367
369 delta_exec = curr->delta_exec;
370 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); 368 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
371 369
372 curr->sum_exec_runtime += delta_exec; 370 curr->sum_exec_runtime += delta_exec;
@@ -400,6 +398,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
400static void update_curr(struct cfs_rq *cfs_rq) 398static void update_curr(struct cfs_rq *cfs_rq)
401{ 399{
402 struct sched_entity *curr = cfs_rq_curr(cfs_rq); 400 struct sched_entity *curr = cfs_rq_curr(cfs_rq);
401 u64 now = rq_of(cfs_rq)->clock;
403 unsigned long delta_exec; 402 unsigned long delta_exec;
404 403
405 if (unlikely(!curr)) 404 if (unlikely(!curr))
@@ -410,15 +409,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
410 * since the last time we changed load (this cannot 409 * since the last time we changed load (this cannot
411 * overflow on 32 bits): 410 * overflow on 32 bits):
412 */ 411 */
413 delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); 412 delta_exec = (unsigned long)(now - curr->exec_start);
414
415 curr->delta_exec += delta_exec;
416 413
417 if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) { 414 __update_curr(cfs_rq, curr, delta_exec);
418 __update_curr(cfs_rq, curr); 415 curr->exec_start = now;
419 curr->delta_exec = 0;
420 }
421 curr->exec_start = rq_of(cfs_rq)->clock;
422} 416}
423 417
424static inline void 418static inline void
@@ -494,10 +488,9 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
494 * Note: must be called with a freshly updated rq->fair_clock. 488 * Note: must be called with a freshly updated rq->fair_clock.
495 */ 489 */
496static inline void 490static inline void
497__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 491__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
492 unsigned long delta_fair)
498{ 493{
499 unsigned long delta_fair = se->delta_fair_run;
500
501 schedstat_set(se->wait_max, max(se->wait_max, 494 schedstat_set(se->wait_max, max(se->wait_max,
502 rq_of(cfs_rq)->clock - se->wait_start)); 495 rq_of(cfs_rq)->clock - se->wait_start));
503 496
@@ -519,12 +512,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
519 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), 512 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
520 (u64)(cfs_rq->fair_clock - se->wait_start_fair)); 513 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
521 514
522 se->delta_fair_run += delta_fair; 515 __update_stats_wait_end(cfs_rq, se, delta_fair);
523 if (unlikely(abs(se->delta_fair_run) >=
524 sysctl_sched_stat_granularity)) {
525 __update_stats_wait_end(cfs_rq, se);
526 se->delta_fair_run = 0;
527 }
528 516
529 se->wait_start_fair = 0; 517 se->wait_start_fair = 0;
530 schedstat_set(se->wait_start, 0); 518 schedstat_set(se->wait_start, 0);
@@ -567,9 +555,10 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
567 * Scheduling class queueing methods: 555 * Scheduling class queueing methods:
568 */ 556 */
569 557
570static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 558static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
559 unsigned long delta_fair)
571{ 560{
572 unsigned long load = cfs_rq->load.weight, delta_fair; 561 unsigned long load = cfs_rq->load.weight;
573 long prev_runtime; 562 long prev_runtime;
574 563
575 /* 564 /*
@@ -582,8 +571,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
582 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) 571 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
583 load = rq_of(cfs_rq)->cpu_load[2]; 572 load = rq_of(cfs_rq)->cpu_load[2];
584 573
585 delta_fair = se->delta_fair_sleep;
586
587 /* 574 /*
588 * Fix up delta_fair with the effect of us running 575 * Fix up delta_fair with the effect of us running
589 * during the whole sleep period: 576 * during the whole sleep period:
@@ -618,12 +605,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
618 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), 605 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
619 (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); 606 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
620 607
621 se->delta_fair_sleep += delta_fair; 608 __enqueue_sleeper(cfs_rq, se, delta_fair);
622 if (unlikely(abs(se->delta_fair_sleep) >=
623 sysctl_sched_stat_granularity)) {
624 __enqueue_sleeper(cfs_rq, se);
625 se->delta_fair_sleep = 0;
626 }
627 609
628 se->sleep_start_fair = 0; 610 se->sleep_start_fair = 0;
629 611