diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_fair.c | 12 |
2 files changed, 20 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index dadab4d13875..e914a716e1d4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -353,9 +353,16 @@ struct cfs_rq { | |||
353 | */ | 353 | */ |
354 | unsigned long h_load; | 354 | unsigned long h_load; |
355 | 355 | ||
356 | /* | ||
357 | * Maintaining per-cpu shares distribution for group scheduling | ||
358 | * | ||
359 | * load_stamp is the last time we updated the load average | ||
360 | * load_last is the last time we updated the load average and saw load | ||
361 | * load_unacc_exec_time is currently unaccounted execution time | ||
362 | */ | ||
356 | u64 load_avg; | 363 | u64 load_avg; |
357 | u64 load_period; | 364 | u64 load_period; |
358 | u64 load_stamp, load_last; | 365 | u64 load_stamp, load_last, load_unacc_exec_time; |
359 | 366 | ||
360 | unsigned long load_contribution; | 367 | unsigned long load_contribution; |
361 | #endif | 368 | #endif |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 33f941dcf88c..e7e2f08e6d01 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -539,6 +539,9 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
539 | return calc_delta_fair(sched_slice(cfs_rq, se), se); | 539 | return calc_delta_fair(sched_slice(cfs_rq, se), se); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void update_cfs_load(struct cfs_rq *cfs_rq); | ||
543 | static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); | ||
544 | |||
542 | /* | 545 | /* |
543 | * Update the current task's runtime statistics. Skip current tasks that | 546 | * Update the current task's runtime statistics. Skip current tasks that |
544 | * are not in our scheduling class. | 547 | * are not in our scheduling class. |
@@ -558,6 +561,14 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
558 | 561 | ||
559 | curr->vruntime += delta_exec_weighted; | 562 | curr->vruntime += delta_exec_weighted; |
560 | update_min_vruntime(cfs_rq); | 563 | update_min_vruntime(cfs_rq); |
564 | |||
565 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
566 | cfs_rq->load_unacc_exec_time += delta_exec; | ||
567 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
568 | update_cfs_load(cfs_rq); | ||
569 | update_cfs_shares(cfs_rq, 0); | ||
570 | } | ||
571 | #endif | ||
561 | } | 572 | } |
562 | 573 | ||
563 | static void update_curr(struct cfs_rq *cfs_rq) | 574 | static void update_curr(struct cfs_rq *cfs_rq) |
@@ -713,6 +724,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq) | |||
713 | } | 724 | } |
714 | 725 | ||
715 | cfs_rq->load_stamp = now; | 726 | cfs_rq->load_stamp = now; |
727 | cfs_rq->load_unacc_exec_time = 0; | ||
716 | cfs_rq->load_period += delta; | 728 | cfs_rq->load_period += delta; |
717 | if (load) { | 729 | if (load) { |
718 | cfs_rq->load_last = now; | 730 | cfs_rq->load_last = now; |