diff options
-rw-r--r-- | kernel/sched_cpupri.c | 5 | ||||
-rw-r--r-- | kernel/sched_rt.c | 15 |
2 files changed, 7 insertions, 13 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b8..cdd3c89574cd 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -55,7 +55,7 @@ static int convert_prio(int prio) | |||
55 | * cpupri_find - find the best (lowest-pri) CPU in the system | 55 | * cpupri_find - find the best (lowest-pri) CPU in the system |
56 | * @cp: The cpupri context | 56 | * @cp: The cpupri context |
57 | * @p: The task | 57 | * @p: The task |
58 | * @lowest_mask: A mask to fill in with selected CPUs | 58 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
59 | * | 59 | * |
60 | * Note: This function returns the recommended CPUs as calculated during the | 60 | * Note: This function returns the recommended CPUs as calculated during the |
61 | * current invokation. By the time the call returns, the CPUs may have in | 61 | * current invokation. By the time the call returns, the CPUs may have in |
@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | 84 | if (lowest_mask) |
85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | ||
85 | return 1; | 86 | return 1; |
86 | } | 87 | } |
87 | 88 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2f..fbec5a58ff10 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
805 | 805 | ||
806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
807 | { | 807 | { |
808 | cpumask_var_t mask; | ||
809 | |||
810 | if (rq->curr->rt.nr_cpus_allowed == 1) | 808 | if (rq->curr->rt.nr_cpus_allowed == 1) |
811 | return; | 809 | return; |
812 | 810 | ||
813 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
814 | return; | ||
815 | |||
816 | if (p->rt.nr_cpus_allowed != 1 | 811 | if (p->rt.nr_cpus_allowed != 1 |
817 | && cpupri_find(&rq->rd->cpupri, p, mask)) | 812 | && cpupri_find(&rq->rd->cpupri, p, NULL)) |
818 | goto free; | 813 | return; |
819 | 814 | ||
820 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) | 815 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) |
821 | goto free; | 816 | return; |
822 | 817 | ||
823 | /* | 818 | /* |
824 | * There appears to be other cpus that can accept | 819 | * There appears to be other cpus that can accept |
@@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
827 | */ | 822 | */ |
828 | requeue_task_rt(rq, p, 1); | 823 | requeue_task_rt(rq, p, 1); |
829 | resched_task(rq->curr); | 824 | resched_task(rq->curr); |
830 | free: | ||
831 | free_cpumask_var(mask); | ||
832 | } | 825 | } |
833 | 826 | ||
834 | #endif /* CONFIG_SMP */ | 827 | #endif /* CONFIG_SMP */ |