aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:13 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:52:35 -0500
commit0e3900e6d3b04c44737ebc505604dcd8ed30e354 (patch)
tree1c791c57282972db3181d4fb1e7c98c5948f96c2 /kernel
parent24600ce89a819a8f2fb4fd69fd777218a82ade20 (diff)
sched: convert local_cpu_mask to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_rt.c13
2 files changed, 12 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 94fa333c1e7c..f2be61870030 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8018,6 +8018,7 @@ void __init sched_init_smp(void)
8018 free_cpumask_var(non_isolated_cpus); 8018 free_cpumask_var(non_isolated_cpus);
8019 8019
8020 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 8020 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
8021 init_sched_rt_class();
8021} 8022}
8022#else 8023#else
8023void __init sched_init_smp(void) 8024void __init sched_init_smp(void)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1fa13624293e..1f0e99d1a8ce 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -962,7 +962,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
962 return next; 962 return next;
963} 963}
964 964
965static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); 965static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
966 966
967static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 967static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
968{ 968{
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
982static int find_lowest_rq(struct task_struct *task) 982static int find_lowest_rq(struct task_struct *task)
983{ 983{
984 struct sched_domain *sd; 984 struct sched_domain *sd;
985 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); 985 cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask);
986 int this_cpu = smp_processor_id(); 986 int this_cpu = smp_processor_id();
987 int cpu = task_cpu(task); 987 int cpu = task_cpu(task);
988 988
@@ -1551,3 +1551,12 @@ static void print_rt_stats(struct seq_file *m, int cpu)
1551 rcu_read_unlock(); 1551 rcu_read_unlock();
1552} 1552}
1553#endif /* CONFIG_SCHED_DEBUG */ 1553#endif /* CONFIG_SCHED_DEBUG */
1554
1555/* Note that this is never called for !SMP, but that's OK. */
1556static inline void init_sched_rt_class(void)
1557{
1558 unsigned int i;
1559
1560 for_each_possible_cpu(i)
1561 alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL);
1562}