aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1f0e99d1a8ce..fb3964579a8a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -923,7 +923,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
923static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 923static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
924{ 924{
925 if (!task_running(rq, p) && 925 if (!task_running(rq, p) &&
926 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && 926 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
927 (p->rt.nr_cpus_allowed > 1)) 927 (p->rt.nr_cpus_allowed > 1))
928 return 1; 928 return 1;
929 return 0; 929 return 0;
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
982static int find_lowest_rq(struct task_struct *task) 982static int find_lowest_rq(struct task_struct *task)
983{ 983{
984 struct sched_domain *sd; 984 struct sched_domain *sd;
985 cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask); 985 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
986 int this_cpu = smp_processor_id(); 986 int this_cpu = smp_processor_id();
987 int cpu = task_cpu(task); 987 int cpu = task_cpu(task);
988 988
@@ -997,7 +997,7 @@ static int find_lowest_rq(struct task_struct *task)
997 * I guess we might want to change cpupri_find() to ignore those 997 * I guess we might want to change cpupri_find() to ignore those
998 * in the first place. 998 * in the first place.
999 */ 999 */
1000 cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); 1000 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
1001 1001
1002 /* 1002 /*
1003 * At this point we have built a mask of cpus representing the 1003 * At this point we have built a mask of cpus representing the
@@ -1007,7 +1007,7 @@ static int find_lowest_rq(struct task_struct *task)
1007 * We prioritize the last cpu that the task executed on since 1007 * We prioritize the last cpu that the task executed on since
1008 * it is most likely cache-hot in that location. 1008 * it is most likely cache-hot in that location.
1009 */ 1009 */
1010 if (cpu_isset(cpu, *lowest_mask)) 1010 if (cpumask_test_cpu(cpu, lowest_mask))
1011 return cpu; 1011 return cpu;
1012 1012
1013 /* 1013 /*
@@ -1064,8 +1064,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1064 * Also make sure that it wasn't scheduled on its rq. 1064 * Also make sure that it wasn't scheduled on its rq.
1065 */ 1065 */
1066 if (unlikely(task_rq(task) != rq || 1066 if (unlikely(task_rq(task) != rq ||
1067 !cpu_isset(lowest_rq->cpu, 1067 !cpumask_test_cpu(lowest_rq->cpu,
1068 task->cpus_allowed) || 1068 &task->cpus_allowed) ||
1069 task_running(rq, task) || 1069 task_running(rq, task) ||
1070 !task->se.on_rq)) { 1070 !task->se.on_rq)) {
1071 1071
@@ -1315,9 +1315,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1315} 1315}
1316 1316
1317static void set_cpus_allowed_rt(struct task_struct *p, 1317static void set_cpus_allowed_rt(struct task_struct *p,
1318 const cpumask_t *new_mask) 1318 const struct cpumask *new_mask)
1319{ 1319{
1320 int weight = cpus_weight(*new_mask); 1320 int weight = cpumask_weight(new_mask);
1321 1321
1322 BUG_ON(!rt_task(p)); 1322 BUG_ON(!rt_task(p));
1323 1323
@@ -1338,7 +1338,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1338 update_rt_migration(rq); 1338 update_rt_migration(rq);
1339 } 1339 }
1340 1340
1341 p->cpus_allowed = *new_mask; 1341 cpumask_copy(&p->cpus_allowed, new_mask);
1342 p->rt.nr_cpus_allowed = weight; 1342 p->rt.nr_cpus_allowed = weight;
1343} 1343}
1344 1344