diff options
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 5 | ||||
-rw-r--r-- | kernel/sched_fair.c | 46 | ||||
-rw-r--r-- | kernel/sysctl.c | 11 |
4 files changed, 15 insertions, 50 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index befca3f9364a..3c38a5040e8f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -895,9 +895,6 @@ struct load_weight { | |||
895 | */ | 895 | */ |
896 | struct sched_entity { | 896 | struct sched_entity { |
897 | long wait_runtime; | 897 | long wait_runtime; |
898 | unsigned long delta_fair_run; | ||
899 | unsigned long delta_fair_sleep; | ||
900 | unsigned long delta_exec; | ||
901 | s64 fair_key; | 898 | s64 fair_key; |
902 | struct load_weight load; /* for load-balancing */ | 899 | struct load_weight load; /* for load-balancing */ |
903 | struct rb_node run_node; | 900 | struct rb_node run_node; |
diff --git a/kernel/sched.c b/kernel/sched.c index ae1544f0a20d..d4dabfcc776c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -829,7 +829,7 @@ static void update_curr_load(struct rq *rq) | |||
829 | * Stagger updates to ls->delta_fair. Very frequent updates | 829 | * Stagger updates to ls->delta_fair. Very frequent updates |
830 | * can be expensive. | 830 | * can be expensive. |
831 | */ | 831 | */ |
832 | if (ls->delta_stat >= sysctl_sched_stat_granularity) | 832 | if (ls->delta_stat) |
833 | __update_curr_load(rq, ls); | 833 | __update_curr_load(rq, ls); |
834 | } | 834 | } |
835 | 835 | ||
@@ -1588,9 +1588,6 @@ static void __sched_fork(struct task_struct *p) | |||
1588 | p->se.exec_start = 0; | 1588 | p->se.exec_start = 0; |
1589 | p->se.sum_exec_runtime = 0; | 1589 | p->se.sum_exec_runtime = 0; |
1590 | p->se.prev_sum_exec_runtime = 0; | 1590 | p->se.prev_sum_exec_runtime = 0; |
1591 | p->se.delta_exec = 0; | ||
1592 | p->se.delta_fair_run = 0; | ||
1593 | p->se.delta_fair_sleep = 0; | ||
1594 | p->se.wait_runtime = 0; | 1591 | p->se.wait_runtime = 0; |
1595 | p->se.sleep_start_fair = 0; | 1592 | p->se.sleep_start_fair = 0; |
1596 | 1593 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2e84aaffe425..2138c40f4836 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -85,8 +85,6 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL; | |||
85 | */ | 85 | */ |
86 | const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | 86 | const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; |
87 | 87 | ||
88 | const_debug unsigned int sysctl_sched_stat_granularity; | ||
89 | |||
90 | unsigned int sysctl_sched_runtime_limit __read_mostly; | 88 | unsigned int sysctl_sched_runtime_limit __read_mostly; |
91 | 89 | ||
92 | /* | 90 | /* |
@@ -360,13 +358,13 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | |||
360 | * are not in our scheduling class. | 358 | * are not in our scheduling class. |
361 | */ | 359 | */ |
362 | static inline void | 360 | static inline void |
363 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 361 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
362 | unsigned long delta_exec) | ||
364 | { | 363 | { |
365 | unsigned long delta, delta_exec, delta_fair, delta_mine; | 364 | unsigned long delta, delta_fair, delta_mine; |
366 | struct load_weight *lw = &cfs_rq->load; | 365 | struct load_weight *lw = &cfs_rq->load; |
367 | unsigned long load = lw->weight; | 366 | unsigned long load = lw->weight; |
368 | 367 | ||
369 | delta_exec = curr->delta_exec; | ||
370 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); | 368 | schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); |
371 | 369 | ||
372 | curr->sum_exec_runtime += delta_exec; | 370 | curr->sum_exec_runtime += delta_exec; |
@@ -400,6 +398,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
400 | static void update_curr(struct cfs_rq *cfs_rq) | 398 | static void update_curr(struct cfs_rq *cfs_rq) |
401 | { | 399 | { |
402 | struct sched_entity *curr = cfs_rq_curr(cfs_rq); | 400 | struct sched_entity *curr = cfs_rq_curr(cfs_rq); |
401 | u64 now = rq_of(cfs_rq)->clock; | ||
403 | unsigned long delta_exec; | 402 | unsigned long delta_exec; |
404 | 403 | ||
405 | if (unlikely(!curr)) | 404 | if (unlikely(!curr)) |
@@ -410,15 +409,10 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
410 | * since the last time we changed load (this cannot | 409 | * since the last time we changed load (this cannot |
411 | * overflow on 32 bits): | 410 | * overflow on 32 bits): |
412 | */ | 411 | */ |
413 | delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); | 412 | delta_exec = (unsigned long)(now - curr->exec_start); |
414 | |||
415 | curr->delta_exec += delta_exec; | ||
416 | 413 | ||
417 | if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) { | 414 | __update_curr(cfs_rq, curr, delta_exec); |
418 | __update_curr(cfs_rq, curr); | 415 | curr->exec_start = now; |
419 | curr->delta_exec = 0; | ||
420 | } | ||
421 | curr->exec_start = rq_of(cfs_rq)->clock; | ||
422 | } | 416 | } |
423 | 417 | ||
424 | static inline void | 418 | static inline void |
@@ -494,10 +488,9 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
494 | * Note: must be called with a freshly updated rq->fair_clock. | 488 | * Note: must be called with a freshly updated rq->fair_clock. |
495 | */ | 489 | */ |
496 | static inline void | 490 | static inline void |
497 | __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | 491 | __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, |
492 | unsigned long delta_fair) | ||
498 | { | 493 | { |
499 | unsigned long delta_fair = se->delta_fair_run; | ||
500 | |||
501 | schedstat_set(se->wait_max, max(se->wait_max, | 494 | schedstat_set(se->wait_max, max(se->wait_max, |
502 | rq_of(cfs_rq)->clock - se->wait_start)); | 495 | rq_of(cfs_rq)->clock - se->wait_start)); |
503 | 496 | ||
@@ -519,12 +512,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
519 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | 512 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), |
520 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); | 513 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); |
521 | 514 | ||
522 | se->delta_fair_run += delta_fair; | 515 | __update_stats_wait_end(cfs_rq, se, delta_fair); |
523 | if (unlikely(abs(se->delta_fair_run) >= | ||
524 | sysctl_sched_stat_granularity)) { | ||
525 | __update_stats_wait_end(cfs_rq, se); | ||
526 | se->delta_fair_run = 0; | ||
527 | } | ||
528 | 516 | ||
529 | se->wait_start_fair = 0; | 517 | se->wait_start_fair = 0; |
530 | schedstat_set(se->wait_start, 0); | 518 | schedstat_set(se->wait_start, 0); |
@@ -567,9 +555,10 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
567 | * Scheduling class queueing methods: | 555 | * Scheduling class queueing methods: |
568 | */ | 556 | */ |
569 | 557 | ||
570 | static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 558 | static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, |
559 | unsigned long delta_fair) | ||
571 | { | 560 | { |
572 | unsigned long load = cfs_rq->load.weight, delta_fair; | 561 | unsigned long load = cfs_rq->load.weight; |
573 | long prev_runtime; | 562 | long prev_runtime; |
574 | 563 | ||
575 | /* | 564 | /* |
@@ -582,8 +571,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
582 | if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) | 571 | if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) |
583 | load = rq_of(cfs_rq)->cpu_load[2]; | 572 | load = rq_of(cfs_rq)->cpu_load[2]; |
584 | 573 | ||
585 | delta_fair = se->delta_fair_sleep; | ||
586 | |||
587 | /* | 574 | /* |
588 | * Fix up delta_fair with the effect of us running | 575 | * Fix up delta_fair with the effect of us running |
589 | * during the whole sleep period: | 576 | * during the whole sleep period: |
@@ -618,12 +605,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
618 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | 605 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), |
619 | (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); | 606 | (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); |
620 | 607 | ||
621 | se->delta_fair_sleep += delta_fair; | 608 | __enqueue_sleeper(cfs_rq, se, delta_fair); |
622 | if (unlikely(abs(se->delta_fair_sleep) >= | ||
623 | sysctl_sched_stat_granularity)) { | ||
624 | __enqueue_sleeper(cfs_rq, se); | ||
625 | se->delta_fair_sleep = 0; | ||
626 | } | ||
627 | 609 | ||
628 | se->sleep_start_fair = 0; | 610 | se->sleep_start_fair = 0; |
629 | 611 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6c97259e863e..9b1b0d4ff966 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -266,17 +266,6 @@ static ctl_table kern_table[] = { | |||
266 | }, | 266 | }, |
267 | { | 267 | { |
268 | .ctl_name = CTL_UNNUMBERED, | 268 | .ctl_name = CTL_UNNUMBERED, |
269 | .procname = "sched_stat_granularity_ns", | ||
270 | .data = &sysctl_sched_stat_granularity, | ||
271 | .maxlen = sizeof(unsigned int), | ||
272 | .mode = 0644, | ||
273 | .proc_handler = &proc_dointvec_minmax, | ||
274 | .strategy = &sysctl_intvec, | ||
275 | .extra1 = &min_wakeup_granularity_ns, | ||
276 | .extra2 = &max_wakeup_granularity_ns, | ||
277 | }, | ||
278 | { | ||
279 | .ctl_name = CTL_UNNUMBERED, | ||
280 | .procname = "sched_runtime_limit_ns", | 269 | .procname = "sched_runtime_limit_ns", |
281 | .data = &sysctl_sched_runtime_limit, | 270 | .data = &sysctl_sched_runtime_limit, |
282 | .maxlen = sizeof(unsigned int), | 271 | .maxlen = sizeof(unsigned int), |