diff options
author | Alex Shi <alex.shi@intel.com> | 2013-06-19 22:18:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-27 04:07:40 -0400 |
commit | bf5b986ed4d20428eeec3df4a03dbfebb9b6538c (patch) | |
tree | cb3b21f66bb5e5e11a94db3bc26764497e814a26 /kernel/sched/fair.c | |
parent | 72a4cf20cb71a327c636c7042fdacc25abffc87c (diff) |
sched/tg: Use 'unsigned long' for load variable in task group
Since tg->load_avg is smaller than tg->load_weight, we don't need a
atomic64_t variable for load_avg in 32 bit machine.
The same reason for cfs_rq->tg_load_contrib.
The atomic_long_t/unsigned long variable type are more efficient and
convenience for them.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-11-git-send-email-alex.shi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f19772de1b1c..30ccc37112d0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1075,7 +1075,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) | |||
1075 | * to gain a more accurate current total weight. See | 1075 | * to gain a more accurate current total weight. See |
1076 | * update_cfs_rq_load_contribution(). | 1076 | * update_cfs_rq_load_contribution(). |
1077 | */ | 1077 | */ |
1078 | tg_weight = atomic64_read(&tg->load_avg); | 1078 | tg_weight = atomic_long_read(&tg->load_avg); |
1079 | tg_weight -= cfs_rq->tg_load_contrib; | 1079 | tg_weight -= cfs_rq->tg_load_contrib; |
1080 | tg_weight += cfs_rq->load.weight; | 1080 | tg_weight += cfs_rq->load.weight; |
1081 | 1081 | ||
@@ -1356,13 +1356,13 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, | |||
1356 | int force_update) | 1356 | int force_update) |
1357 | { | 1357 | { |
1358 | struct task_group *tg = cfs_rq->tg; | 1358 | struct task_group *tg = cfs_rq->tg; |
1359 | s64 tg_contrib; | 1359 | long tg_contrib; |
1360 | 1360 | ||
1361 | tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg; | 1361 | tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg; |
1362 | tg_contrib -= cfs_rq->tg_load_contrib; | 1362 | tg_contrib -= cfs_rq->tg_load_contrib; |
1363 | 1363 | ||
1364 | if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) { | 1364 | if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) { |
1365 | atomic64_add(tg_contrib, &tg->load_avg); | 1365 | atomic_long_add(tg_contrib, &tg->load_avg); |
1366 | cfs_rq->tg_load_contrib += tg_contrib; | 1366 | cfs_rq->tg_load_contrib += tg_contrib; |
1367 | } | 1367 | } |
1368 | } | 1368 | } |
@@ -1397,8 +1397,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se) | |||
1397 | u64 contrib; | 1397 | u64 contrib; |
1398 | 1398 | ||
1399 | contrib = cfs_rq->tg_load_contrib * tg->shares; | 1399 | contrib = cfs_rq->tg_load_contrib * tg->shares; |
1400 | se->avg.load_avg_contrib = div64_u64(contrib, | 1400 | se->avg.load_avg_contrib = div_u64(contrib, |
1401 | atomic64_read(&tg->load_avg) + 1); | 1401 | atomic_long_read(&tg->load_avg) + 1); |
1402 | 1402 | ||
1403 | /* | 1403 | /* |
1404 | * For group entities we need to compute a correction term in the case | 1404 | * For group entities we need to compute a correction term in the case |