diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-08-19 09:22:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-02 02:27:35 -0400 |
commit | 147c5fc2bad780d8093b547f2baa204e78107faf (patch) | |
tree | d3edc50c11c47dc03d405275adffd116d5d5c480 /kernel/sched | |
parent | 56cf515b4b1567c4e8fa9926175b40c66b9ec472 (diff) |
sched/fair: Shrink sg_lb_stats and play memset games
We can shrink sg_lb_stats because rq::nr_running is an unsigned int
and cpu numbers are 'int'
Before:
sgs: /* size: 72, cachelines: 2, members: 10 */
sds: /* size: 184, cachelines: 3, members: 7 */
After:
sgs: /* size: 56, cachelines: 1, members: 10 */
sds: /* size: 152, cachelines: 3, members: 7 */
Further we can avoid clearing all of sds since we do a total
clear/assignment of sg_stats in update_sg_lb_stats() with exception of
busiest_stat.avg_load which is referenced in update_sd_pick_busiest().
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-0klzmz9okll8wc0nsudguc9p@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2da80a55827b..4c6a8a5a789a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4237,12 +4237,12 @@ static unsigned long task_h_load(struct task_struct *p) | |||
4237 | struct sg_lb_stats { | 4237 | struct sg_lb_stats { |
4238 | unsigned long avg_load; /*Avg load across the CPUs of the group */ | 4238 | unsigned long avg_load; /*Avg load across the CPUs of the group */ |
4239 | unsigned long group_load; /* Total load over the CPUs of the group */ | 4239 | unsigned long group_load; /* Total load over the CPUs of the group */ |
4240 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | ||
4241 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 4240 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
4242 | unsigned long load_per_task; | 4241 | unsigned long load_per_task; |
4243 | unsigned long group_capacity; | 4242 | unsigned int sum_nr_running; /* Nr tasks running in the group */ |
4244 | unsigned long idle_cpus; | 4243 | unsigned int group_capacity; |
4245 | unsigned long group_weight; | 4244 | unsigned int idle_cpus; |
4245 | unsigned int group_weight; | ||
4246 | int group_imb; /* Is there an imbalance in the group ? */ | 4246 | int group_imb; /* Is there an imbalance in the group ? */ |
4247 | int group_has_capacity; /* Is there extra capacity in the group? */ | 4247 | int group_has_capacity; /* Is there extra capacity in the group? */ |
4248 | }; | 4248 | }; |
@@ -4258,10 +4258,29 @@ struct sd_lb_stats { | |||
4258 | unsigned long total_pwr; /* Total power of all groups in sd */ | 4258 | unsigned long total_pwr; /* Total power of all groups in sd */ |
4259 | unsigned long avg_load; /* Average load across all groups in sd */ | 4259 | unsigned long avg_load; /* Average load across all groups in sd */ |
4260 | 4260 | ||
4261 | struct sg_lb_stats local_stat; /* Statistics of the local group */ | ||
4262 | struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ | 4261 | struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ |
4262 | struct sg_lb_stats local_stat; /* Statistics of the local group */ | ||
4263 | }; | 4263 | }; |
4264 | 4264 | ||
4265 | static inline void init_sd_lb_stats(struct sd_lb_stats *sds) | ||
4266 | { | ||
4267 | /* | ||
4268 | * Skimp on the clearing to avoid duplicate work. We can avoid clearing | ||
4269 | * local_stat because update_sg_lb_stats() does a full clear/assignment. | ||
4270 | * We must however clear busiest_stat::avg_load because | ||
4271 | * update_sd_pick_busiest() reads this before assignment. | ||
4272 | */ | ||
4273 | *sds = (struct sd_lb_stats){ | ||
4274 | .busiest = NULL, | ||
4275 | .local = NULL, | ||
4276 | .total_load = 0UL, | ||
4277 | .total_pwr = 0UL, | ||
4278 | .busiest_stat = { | ||
4279 | .avg_load = 0UL, | ||
4280 | }, | ||
4281 | }; | ||
4282 | } | ||
4283 | |||
4265 | /** | 4284 | /** |
4266 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 4285 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
4267 | * @sd: The sched_domain whose load_idx is to be obtained. | 4286 | * @sd: The sched_domain whose load_idx is to be obtained. |
@@ -4615,7 +4634,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
4615 | */ | 4634 | */ |
4616 | if (prefer_sibling && !local_group && | 4635 | if (prefer_sibling && !local_group && |
4617 | sds->local && sds->local_stat.group_has_capacity) | 4636 | sds->local && sds->local_stat.group_has_capacity) |
4618 | sgs->group_capacity = min(sgs->group_capacity, 1UL); | 4637 | sgs->group_capacity = min(sgs->group_capacity, 1U); |
4619 | 4638 | ||
4620 | /* Now, start updating sd_lb_stats */ | 4639 | /* Now, start updating sd_lb_stats */ |
4621 | sds->total_load += sgs->group_load; | 4640 | sds->total_load += sgs->group_load; |
@@ -4846,7 +4865,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) | |||
4846 | struct sg_lb_stats *local, *busiest; | 4865 | struct sg_lb_stats *local, *busiest; |
4847 | struct sd_lb_stats sds; | 4866 | struct sd_lb_stats sds; |
4848 | 4867 | ||
4849 | memset(&sds, 0, sizeof(sds)); | 4868 | init_sd_lb_stats(&sds); |
4850 | 4869 | ||
4851 | /* | 4870 | /* |
4852 | * Compute the various statistics relavent for load balancing at | 4871 | * Compute the various statistics relavent for load balancing at |