diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-04-26 07:12:27 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-09 09:00:49 -0400 |
commit | c82513e513556a04f81aa511cd890acd23349c48 (patch) | |
tree | dbccd379fffd75cd87513e4dd0e91ac597628382 /kernel/sched | |
parent | ad7687dde8780a0d618a3e3b5a62bb383696fc22 (diff) |
sched: Change rq->nr_running to unsigned int
Since there's a PID space limit of 30bits (see
futex.h:FUTEX_TID_MASK) and allocating that many tasks (assuming a
lower bound of 2 pages per task) would still take 8T of memory it
seems reasonable to say that unsigned int is sufficient for
rq->nr_running.
When we do get anywhere near that amount of tasks I suspect other
things would go funny, load-balancer load computations would really
need to be hoisted to 128bit etc.
So save a few bytes and convert rq->nr_running and friends to
unsigned int.
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-y3tvyszjdmbibade5bw8zl81@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/debug.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 6 |
3 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 09acaa15161d..31e4f61a1629 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -202,7 +202,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
202 | SPLIT_NS(spread0)); | 202 | SPLIT_NS(spread0)); |
203 | SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", | 203 | SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", |
204 | cfs_rq->nr_spread_over); | 204 | cfs_rq->nr_spread_over); |
205 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); | 205 | SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); |
206 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 206 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
207 | #ifdef CONFIG_FAIR_GROUP_SCHED | 207 | #ifdef CONFIG_FAIR_GROUP_SCHED |
208 | #ifdef CONFIG_SMP | 208 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e9553640c1c3..678966ca393b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4447,10 +4447,10 @@ redo: | |||
4447 | * correctly treated as an imbalance. | 4447 | * correctly treated as an imbalance. |
4448 | */ | 4448 | */ |
4449 | env.flags |= LBF_ALL_PINNED; | 4449 | env.flags |= LBF_ALL_PINNED; |
4450 | env.load_move = imbalance; | 4450 | env.load_move = imbalance; |
4451 | env.src_cpu = busiest->cpu; | 4451 | env.src_cpu = busiest->cpu; |
4452 | env.src_rq = busiest; | 4452 | env.src_rq = busiest; |
4453 | env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running); | 4453 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
4454 | 4454 | ||
4455 | more_balance: | 4455 | more_balance: |
4456 | local_irq_save(flags); | 4456 | local_irq_save(flags); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fb3acba4d52e..7282e7b5f4c7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -201,7 +201,7 @@ struct cfs_bandwidth { }; | |||
201 | /* CFS-related fields in a runqueue */ | 201 | /* CFS-related fields in a runqueue */ |
202 | struct cfs_rq { | 202 | struct cfs_rq { |
203 | struct load_weight load; | 203 | struct load_weight load; |
204 | unsigned long nr_running, h_nr_running; | 204 | unsigned int nr_running, h_nr_running; |
205 | 205 | ||
206 | u64 exec_clock; | 206 | u64 exec_clock; |
207 | u64 min_vruntime; | 207 | u64 min_vruntime; |
@@ -279,7 +279,7 @@ static inline int rt_bandwidth_enabled(void) | |||
279 | /* Real-Time classes' related field in a runqueue: */ | 279 | /* Real-Time classes' related field in a runqueue: */ |
280 | struct rt_rq { | 280 | struct rt_rq { |
281 | struct rt_prio_array active; | 281 | struct rt_prio_array active; |
282 | unsigned long rt_nr_running; | 282 | unsigned int rt_nr_running; |
283 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 283 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
284 | struct { | 284 | struct { |
285 | int curr; /* highest queued rt task prio */ | 285 | int curr; /* highest queued rt task prio */ |
@@ -353,7 +353,7 @@ struct rq { | |||
353 | * nr_running and cpu_load should be in the same cacheline because | 353 | * nr_running and cpu_load should be in the same cacheline because |
354 | * remote CPUs use both these fields when doing load calculation. | 354 | * remote CPUs use both these fields when doing load calculation. |
355 | */ | 355 | */ |
356 | unsigned long nr_running; | 356 | unsigned int nr_running; |
357 | #define CPU_LOAD_IDX_MAX 5 | 357 | #define CPU_LOAD_IDX_MAX 5 |
358 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 358 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
359 | unsigned long last_load_update_tick; | 359 | unsigned long last_load_update_tick; |