diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:51:18 -0500 |
commit | c6c4927b22a3514c6660f0e72c78716226bd3cc8 (patch) | |
tree | ee7fc9031eb14ac1c9d2f0732950b4faae2b6245 /kernel/sched_rt.c | |
parent | 6a7b3dc3440f7b5a9b67594af01ed562cdeb41e4 (diff) |
sched: convert struct root_domain to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS.
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.
def_root_domain is static, and so its masks are initialized with
alloc_bootmem_cpumask_var. After that, alloc_cpumask_var is used.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 4cd813abc23a..820fc422c6df 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq) | |||
15 | if (!rq->online) | 15 | if (!rq->online) |
16 | return; | 16 | return; |
17 | 17 | ||
18 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); |
19 | /* | 19 | /* |
20 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
21 | * the overload count. That is checked to determine | 21 | * the overload count. That is checked to determine |
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
34 | 34 | ||
35 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
36 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
37 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void update_rt_migration(struct rq *rq) | 40 | static void update_rt_migration(struct rq *rq) |
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
142 | static inline cpumask_t sched_rt_period_mask(void) | 142 | static inline const struct cpumask *sched_rt_period_mask(void) |
143 | { | 143 | { |
144 | return cpu_rq(smp_processor_id())->rd->span; | 144 | return cpu_rq(smp_processor_id())->rd->span; |
145 | } | 145 | } |
146 | #else | 146 | #else |
147 | static inline cpumask_t sched_rt_period_mask(void) | 147 | static inline const struct cpumask *sched_rt_period_mask(void) |
148 | { | 148 | { |
149 | return cpu_online_map; | 149 | return cpu_online_mask; |
150 | } | 150 | } |
151 | #endif | 151 | #endif |
152 | 152 | ||
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq) | |||
212 | return rt_rq->rt_throttled; | 212 | return rt_rq->rt_throttled; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline cpumask_t sched_rt_period_mask(void) | 215 | static inline const struct cpumask *sched_rt_period_mask(void) |
216 | { | 216 | { |
217 | return cpu_online_map; | 217 | return cpu_online_mask; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline | 220 | static inline |
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
241 | int i, weight, more = 0; | 241 | int i, weight, more = 0; |
242 | u64 rt_period; | 242 | u64 rt_period; |
243 | 243 | ||
244 | weight = cpus_weight(rd->span); | 244 | weight = cpumask_weight(rd->span); |
245 | 245 | ||
246 | spin_lock(&rt_b->rt_runtime_lock); | 246 | spin_lock(&rt_b->rt_runtime_lock); |
247 | rt_period = ktime_to_ns(rt_b->rt_period); | 247 | rt_period = ktime_to_ns(rt_b->rt_period); |
248 | for_each_cpu_mask_nr(i, rd->span) { | 248 | for_each_cpu(i, rd->span) { |
249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
250 | s64 diff; | 250 | s64 diff; |
251 | 251 | ||
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq) | |||
324 | /* | 324 | /* |
325 | * Greedy reclaim, take back as much as we can. | 325 | * Greedy reclaim, take back as much as we can. |
326 | */ | 326 | */ |
327 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu(i, rd->span) { |
328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
329 | s64 diff; | 329 | s64 diff; |
330 | 330 | ||
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq) | |||
429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | 429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
430 | { | 430 | { |
431 | int i, idle = 1; | 431 | int i, idle = 1; |
432 | cpumask_t span; | 432 | const struct cpumask *span; |
433 | 433 | ||
434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
435 | return 1; | 435 | return 1; |
436 | 436 | ||
437 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
438 | for_each_cpu_mask(i, span) { | 438 | for_each_cpu(i, span) { |
439 | int enqueue = 0; | 439 | int enqueue = 0; |
440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
441 | struct rq *rq = rq_of_rt_rq(rt_rq); | 441 | struct rq *rq = rq_of_rt_rq(rt_rq); |
@@ -1181,7 +1181,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1181 | 1181 | ||
1182 | next = pick_next_task_rt(this_rq); | 1182 | next = pick_next_task_rt(this_rq); |
1183 | 1183 | ||
1184 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { | 1184 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1185 | if (this_cpu == cpu) | 1185 | if (this_cpu == cpu) |
1186 | continue; | 1186 | continue; |
1187 | 1187 | ||