diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:52:42 -0500 |
commit | 96f874e26428ab5d2db681c100210c254775e154 (patch) | |
tree | e18a6f0629ef17f2344f3691c8df4692ccb875fa /kernel/sched_rt.c | |
parent | 0e3900e6d3b04c44737ebc505604dcd8ed30e354 (diff) |
sched: convert remaining old-style cpumask operators
Impact: Trivial API conversion
NR_CPUS -> nr_cpu_ids
cpumask_t -> struct cpumask
sizeof(cpumask_t) -> cpumask_size()
cpumask_a = cpumask_b -> cpumask_copy(&cpumask_a, &cpumask_b)
cpu_set() -> cpumask_set_cpu()
first_cpu() -> cpumask_first()
cpumask_of_cpu() -> cpumask_of()
cpus_* -> cpumask_*
There are some FIXMEs where we all archs to complete infrastructure
(patches have been sent):
cpu_coregroup_map -> cpu_coregroup_mask
node_to_cpumask* -> cpumask_of_node
There is also one FIXME where we pass an array of cpumasks to
partition_sched_domains(): this implies knowing the definition of
'struct cpumask' and the size of a cpumask. This will be fixed in a
future patch.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 1f0e99d1a8ce..fb3964579a8a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -923,7 +923,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | |||
923 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 923 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
924 | { | 924 | { |
925 | if (!task_running(rq, p) && | 925 | if (!task_running(rq, p) && |
926 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && | 926 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
927 | (p->rt.nr_cpus_allowed > 1)) | 927 | (p->rt.nr_cpus_allowed > 1)) |
928 | return 1; | 928 | return 1; |
929 | return 0; | 929 | return 0; |
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
982 | static int find_lowest_rq(struct task_struct *task) | 982 | static int find_lowest_rq(struct task_struct *task) |
983 | { | 983 | { |
984 | struct sched_domain *sd; | 984 | struct sched_domain *sd; |
985 | cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask); | 985 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
986 | int this_cpu = smp_processor_id(); | 986 | int this_cpu = smp_processor_id(); |
987 | int cpu = task_cpu(task); | 987 | int cpu = task_cpu(task); |
988 | 988 | ||
@@ -997,7 +997,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
997 | * I guess we might want to change cpupri_find() to ignore those | 997 | * I guess we might want to change cpupri_find() to ignore those |
998 | * in the first place. | 998 | * in the first place. |
999 | */ | 999 | */ |
1000 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | 1000 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
1001 | 1001 | ||
1002 | /* | 1002 | /* |
1003 | * At this point we have built a mask of cpus representing the | 1003 | * At this point we have built a mask of cpus representing the |
@@ -1007,7 +1007,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
1007 | * We prioritize the last cpu that the task executed on since | 1007 | * We prioritize the last cpu that the task executed on since |
1008 | * it is most likely cache-hot in that location. | 1008 | * it is most likely cache-hot in that location. |
1009 | */ | 1009 | */ |
1010 | if (cpu_isset(cpu, *lowest_mask)) | 1010 | if (cpumask_test_cpu(cpu, lowest_mask)) |
1011 | return cpu; | 1011 | return cpu; |
1012 | 1012 | ||
1013 | /* | 1013 | /* |
@@ -1064,8 +1064,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1064 | * Also make sure that it wasn't scheduled on its rq. | 1064 | * Also make sure that it wasn't scheduled on its rq. |
1065 | */ | 1065 | */ |
1066 | if (unlikely(task_rq(task) != rq || | 1066 | if (unlikely(task_rq(task) != rq || |
1067 | !cpu_isset(lowest_rq->cpu, | 1067 | !cpumask_test_cpu(lowest_rq->cpu, |
1068 | task->cpus_allowed) || | 1068 | &task->cpus_allowed) || |
1069 | task_running(rq, task) || | 1069 | task_running(rq, task) || |
1070 | !task->se.on_rq)) { | 1070 | !task->se.on_rq)) { |
1071 | 1071 | ||
@@ -1315,9 +1315,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | static void set_cpus_allowed_rt(struct task_struct *p, | 1317 | static void set_cpus_allowed_rt(struct task_struct *p, |
1318 | const cpumask_t *new_mask) | 1318 | const struct cpumask *new_mask) |
1319 | { | 1319 | { |
1320 | int weight = cpus_weight(*new_mask); | 1320 | int weight = cpumask_weight(new_mask); |
1321 | 1321 | ||
1322 | BUG_ON(!rt_task(p)); | 1322 | BUG_ON(!rt_task(p)); |
1323 | 1323 | ||
@@ -1338,7 +1338,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1338 | update_rt_migration(rq); | 1338 | update_rt_migration(rq); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | p->cpus_allowed = *new_mask; | 1341 | cpumask_copy(&p->cpus_allowed, new_mask); |
1342 | p->rt.nr_cpus_allowed = weight; | 1342 | p->rt.nr_cpus_allowed = weight; |
1343 | } | 1343 | } |
1344 | 1344 | ||