diff options
author | Alex Shi <alex.shi@intel.com> | 2013-06-19 22:18:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-06-27 04:07:41 -0400 |
commit | 2509940fd71c2e2915a05052bbdbf2d478364184 (patch) | |
tree | e2a9466c8cca831158c18a4c4c750420b11fea30 | |
parent | bf5b986ed4d20428eeec3df4a03dbfebb9b6538c (diff) |
sched/cfs_rq: Change atomic64_t removed_load to atomic_long_t
Similar to runnable_load_avg, blocked_load_avg variable, long type is
enough for removed_load in 64 bit or 32 bit machine.
Then we avoid the expensive atomic64 operations on 32 bit machine.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Reviewed-by: Paul Turner <pjt@google.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1371694737-29336-12-git-send-email-alex.shi@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/fair.c | 10 | ||||
-rw-r--r-- | kernel/sched/sched.h | 3 |
2 files changed, 8 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 30ccc37112d0..b43474a964c2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1517,8 +1517,9 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) | |||
1517 | if (!decays && !force_update) | 1517 | if (!decays && !force_update) |
1518 | return; | 1518 | return; |
1519 | 1519 | ||
1520 | if (atomic64_read(&cfs_rq->removed_load)) { | 1520 | if (atomic_long_read(&cfs_rq->removed_load)) { |
1521 | u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0); | 1521 | unsigned long removed_load; |
1522 | removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0); | ||
1522 | subtract_blocked_load_contrib(cfs_rq, removed_load); | 1523 | subtract_blocked_load_contrib(cfs_rq, removed_load); |
1523 | } | 1524 | } |
1524 | 1525 | ||
@@ -3480,7 +3481,8 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) | |||
3480 | */ | 3481 | */ |
3481 | if (se->avg.decay_count) { | 3482 | if (se->avg.decay_count) { |
3482 | se->avg.decay_count = -__synchronize_entity_decay(se); | 3483 | se->avg.decay_count = -__synchronize_entity_decay(se); |
3483 | atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); | 3484 | atomic_long_add(se->avg.load_avg_contrib, |
3485 | &cfs_rq->removed_load); | ||
3484 | } | 3486 | } |
3485 | } | 3487 | } |
3486 | #endif /* CONFIG_SMP */ | 3488 | #endif /* CONFIG_SMP */ |
@@ -5942,7 +5944,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) | |||
5942 | #endif | 5944 | #endif |
5943 | #ifdef CONFIG_SMP | 5945 | #ifdef CONFIG_SMP |
5944 | atomic64_set(&cfs_rq->decay_counter, 1); | 5946 | atomic64_set(&cfs_rq->decay_counter, 1); |
5945 | atomic64_set(&cfs_rq->removed_load, 0); | 5947 | atomic_long_set(&cfs_rq->removed_load, 0); |
5946 | #endif | 5948 | #endif |
5947 | } | 5949 | } |
5948 | 5950 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5585eb25e9a3..705991906fbe 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -278,8 +278,9 @@ struct cfs_rq { | |||
278 | * the FAIR_GROUP_SCHED case). | 278 | * the FAIR_GROUP_SCHED case). |
279 | */ | 279 | */ |
280 | unsigned long runnable_load_avg, blocked_load_avg; | 280 | unsigned long runnable_load_avg, blocked_load_avg; |
281 | atomic64_t decay_counter, removed_load; | 281 | atomic64_t decay_counter; |
282 | u64 last_decay; | 282 | u64 last_decay; |
283 | atomic_long_t removed_load; | ||
283 | 284 | ||
284 | #ifdef CONFIG_FAIR_GROUP_SCHED | 285 | #ifdef CONFIG_FAIR_GROUP_SCHED |
285 | /* Required to track per-cpu representation of a task_group */ | 286 | /* Required to track per-cpu representation of a task_group */ |