diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 30 |
2 files changed, 15 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7ba..e0fa3ff7f194 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -8510,7 +8510,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
| 8510 | /* Propagate contribution to hierarchy */ | 8510 | /* Propagate contribution to hierarchy */ |
| 8511 | raw_spin_lock_irqsave(&rq->lock, flags); | 8511 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 8512 | for_each_sched_entity(se) | 8512 | for_each_sched_entity(se) |
| 8513 | update_cfs_shares(group_cfs_rq(se), 0); | 8513 | update_cfs_shares(group_cfs_rq(se)); |
| 8514 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 8514 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 8515 | } | 8515 | } |
| 8516 | 8516 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0c26e2df450e..0c550c841eee 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -540,7 +540,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); | 542 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); |
| 543 | static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); | 543 | static void update_cfs_shares(struct cfs_rq *cfs_rq); |
| 544 | 544 | ||
| 545 | /* | 545 | /* |
| 546 | * Update the current task's runtime statistics. Skip current tasks that | 546 | * Update the current task's runtime statistics. Skip current tasks that |
| @@ -763,16 +763,15 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
| 763 | list_del_leaf_cfs_rq(cfs_rq); | 763 | list_del_leaf_cfs_rq(cfs_rq); |
| 764 | } | 764 | } |
| 765 | 765 | ||
| 766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | 766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) |
| 767 | long weight_delta) | ||
| 768 | { | 767 | { |
| 769 | long load_weight, load, shares; | 768 | long load_weight, load, shares; |
| 770 | 769 | ||
| 771 | load = cfs_rq->load.weight + weight_delta; | 770 | load = cfs_rq->load.weight; |
| 772 | 771 | ||
| 773 | load_weight = atomic_read(&tg->load_weight); | 772 | load_weight = atomic_read(&tg->load_weight); |
| 774 | load_weight -= cfs_rq->load_contribution; | ||
| 775 | load_weight += load; | 773 | load_weight += load; |
| 774 | load_weight -= cfs_rq->load_contribution; | ||
| 776 | 775 | ||
| 777 | shares = (tg->shares * load); | 776 | shares = (tg->shares * load); |
| 778 | if (load_weight) | 777 | if (load_weight) |
| @@ -790,7 +789,7 @@ static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | |||
| 790 | { | 789 | { |
| 791 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | 790 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { |
| 792 | update_cfs_load(cfs_rq, 0); | 791 | update_cfs_load(cfs_rq, 0); |
| 793 | update_cfs_shares(cfs_rq, 0); | 792 | update_cfs_shares(cfs_rq); |
| 794 | } | 793 | } |
| 795 | } | 794 | } |
| 796 | # else /* CONFIG_SMP */ | 795 | # else /* CONFIG_SMP */ |
| @@ -798,8 +797,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
| 798 | { | 797 | { |
| 799 | } | 798 | } |
| 800 | 799 | ||
| 801 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | 800 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) |
| 802 | long weight_delta) | ||
| 803 | { | 801 | { |
| 804 | return tg->shares; | 802 | return tg->shares; |
| 805 | } | 803 | } |
| @@ -824,7 +822,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
| 824 | account_entity_enqueue(cfs_rq, se); | 822 | account_entity_enqueue(cfs_rq, se); |
| 825 | } | 823 | } |
| 826 | 824 | ||
| 827 | static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | 825 | static void update_cfs_shares(struct cfs_rq *cfs_rq) |
| 828 | { | 826 | { |
| 829 | struct task_group *tg; | 827 | struct task_group *tg; |
| 830 | struct sched_entity *se; | 828 | struct sched_entity *se; |
| @@ -838,7 +836,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
| 838 | if (likely(se->load.weight == tg->shares)) | 836 | if (likely(se->load.weight == tg->shares)) |
| 839 | return; | 837 | return; |
| 840 | #endif | 838 | #endif |
| 841 | shares = calc_cfs_shares(cfs_rq, tg, weight_delta); | 839 | shares = calc_cfs_shares(cfs_rq, tg); |
| 842 | 840 | ||
| 843 | reweight_entity(cfs_rq_of(se), se, shares); | 841 | reweight_entity(cfs_rq_of(se), se, shares); |
| 844 | } | 842 | } |
| @@ -847,7 +845,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
| 847 | { | 845 | { |
| 848 | } | 846 | } |
| 849 | 847 | ||
| 850 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | 848 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq) |
| 851 | { | 849 | { |
| 852 | } | 850 | } |
| 853 | 851 | ||
| @@ -978,8 +976,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
| 978 | */ | 976 | */ |
| 979 | update_curr(cfs_rq); | 977 | update_curr(cfs_rq); |
| 980 | update_cfs_load(cfs_rq, 0); | 978 | update_cfs_load(cfs_rq, 0); |
| 981 | update_cfs_shares(cfs_rq, se->load.weight); | ||
| 982 | account_entity_enqueue(cfs_rq, se); | 979 | account_entity_enqueue(cfs_rq, se); |
| 980 | update_cfs_shares(cfs_rq); | ||
| 983 | 981 | ||
| 984 | if (flags & ENQUEUE_WAKEUP) { | 982 | if (flags & ENQUEUE_WAKEUP) { |
| 985 | place_entity(cfs_rq, se, 0); | 983 | place_entity(cfs_rq, se, 0); |
| @@ -1041,7 +1039,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
| 1041 | update_cfs_load(cfs_rq, 0); | 1039 | update_cfs_load(cfs_rq, 0); |
| 1042 | account_entity_dequeue(cfs_rq, se); | 1040 | account_entity_dequeue(cfs_rq, se); |
| 1043 | update_min_vruntime(cfs_rq); | 1041 | update_min_vruntime(cfs_rq); |
| 1044 | update_cfs_shares(cfs_rq, 0); | 1042 | update_cfs_shares(cfs_rq); |
| 1045 | 1043 | ||
| 1046 | /* | 1044 | /* |
| 1047 | * Normalize the entity after updating the min_vruntime because the | 1045 | * Normalize the entity after updating the min_vruntime because the |
| @@ -1282,7 +1280,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
| 1282 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1280 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1283 | 1281 | ||
| 1284 | update_cfs_load(cfs_rq, 0); | 1282 | update_cfs_load(cfs_rq, 0); |
| 1285 | update_cfs_shares(cfs_rq, 0); | 1283 | update_cfs_shares(cfs_rq); |
| 1286 | } | 1284 | } |
| 1287 | 1285 | ||
| 1288 | hrtick_update(rq); | 1286 | hrtick_update(rq); |
| @@ -1312,7 +1310,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
| 1312 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1310 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 1313 | 1311 | ||
| 1314 | update_cfs_load(cfs_rq, 0); | 1312 | update_cfs_load(cfs_rq, 0); |
| 1315 | update_cfs_shares(cfs_rq, 0); | 1313 | update_cfs_shares(cfs_rq); |
| 1316 | } | 1314 | } |
| 1317 | 1315 | ||
| 1318 | hrtick_update(rq); | 1316 | hrtick_update(rq); |
| @@ -2123,7 +2121,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu) | |||
| 2123 | * We need to update shares after updating tg->load_weight in | 2121 | * We need to update shares after updating tg->load_weight in |
| 2124 | * order to adjust the weight of groups with long running tasks. | 2122 | * order to adjust the weight of groups with long running tasks. |
| 2125 | */ | 2123 | */ |
| 2126 | update_cfs_shares(cfs_rq, 0); | 2124 | update_cfs_shares(cfs_rq); |
| 2127 | 2125 | ||
| 2128 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 2126 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 2129 | 2127 | ||
