diff options
Diffstat (limited to 'kernel/sched_cpupri.c')
-rw-r--r-- | kernel/sched_cpupri.c | 58 |
1 files changed, 38 insertions, 20 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b8..0f052fc674d5 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -55,7 +55,7 @@ static int convert_prio(int prio) | |||
55 | * cpupri_find - find the best (lowest-pri) CPU in the system | 55 | * cpupri_find - find the best (lowest-pri) CPU in the system |
56 | * @cp: The cpupri context | 56 | * @cp: The cpupri context |
57 | * @p: The task | 57 | * @p: The task |
58 | * @lowest_mask: A mask to fill in with selected CPUs | 58 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
59 | * | 59 | * |
60 | * Note: This function returns the recommended CPUs as calculated during the | 60 | * Note: This function returns the recommended CPUs as calculated during the |
61 | * current invokation. By the time the call returns, the CPUs may have in | 61 | * current invokation. By the time the call returns, the CPUs may have in |
@@ -81,7 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | 84 | if (lowest_mask) { |
85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | ||
86 | |||
87 | /* | ||
88 | * We have to ensure that we have at least one bit | ||
89 | * still set in the array, since the map could have | ||
90 | * been concurrently emptied between the first and | ||
91 | * second reads of vec->mask. If we hit this | ||
92 | * condition, simply act as though we never hit this | ||
93 | * priority level and continue on. | ||
94 | */ | ||
95 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | ||
96 | continue; | ||
97 | } | ||
98 | |||
85 | return 1; | 99 | return 1; |
86 | } | 100 | } |
87 | 101 | ||
@@ -113,21 +127,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
113 | 127 | ||
114 | /* | 128 | /* |
115 | * If the cpu was currently mapped to a different value, we | 129 | * If the cpu was currently mapped to a different value, we |
116 | * first need to unmap the old value | 130 | * need to map it to the new value then remove the old value. |
131 | * Note, we must add the new value first, otherwise we risk the | ||
132 | * cpu being cleared from pri_active, and this cpu could be | ||
133 | * missed for a push or pull. | ||
117 | */ | 134 | */ |
118 | if (likely(oldpri != CPUPRI_INVALID)) { | ||
119 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | ||
120 | |||
121 | spin_lock_irqsave(&vec->lock, flags); | ||
122 | |||
123 | vec->count--; | ||
124 | if (!vec->count) | ||
125 | clear_bit(oldpri, cp->pri_active); | ||
126 | cpumask_clear_cpu(cpu, vec->mask); | ||
127 | |||
128 | spin_unlock_irqrestore(&vec->lock, flags); | ||
129 | } | ||
130 | |||
131 | if (likely(newpri != CPUPRI_INVALID)) { | 135 | if (likely(newpri != CPUPRI_INVALID)) { |
132 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | 136 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; |
133 | 137 | ||
@@ -140,6 +144,18 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
140 | 144 | ||
141 | spin_unlock_irqrestore(&vec->lock, flags); | 145 | spin_unlock_irqrestore(&vec->lock, flags); |
142 | } | 146 | } |
147 | if (likely(oldpri != CPUPRI_INVALID)) { | ||
148 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | ||
149 | |||
150 | spin_lock_irqsave(&vec->lock, flags); | ||
151 | |||
152 | vec->count--; | ||
153 | if (!vec->count) | ||
154 | clear_bit(oldpri, cp->pri_active); | ||
155 | cpumask_clear_cpu(cpu, vec->mask); | ||
156 | |||
157 | spin_unlock_irqrestore(&vec->lock, flags); | ||
158 | } | ||
143 | 159 | ||
144 | *currpri = newpri; | 160 | *currpri = newpri; |
145 | } | 161 | } |
@@ -151,10 +167,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
151 | * | 167 | * |
152 | * Returns: -ENOMEM if memory fails. | 168 | * Returns: -ENOMEM if memory fails. |
153 | */ | 169 | */ |
154 | int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) | 170 | int cpupri_init(struct cpupri *cp, bool bootmem) |
155 | { | 171 | { |
172 | gfp_t gfp = GFP_KERNEL; | ||
156 | int i; | 173 | int i; |
157 | 174 | ||
175 | if (bootmem) | ||
176 | gfp = GFP_NOWAIT; | ||
177 | |||
158 | memset(cp, 0, sizeof(*cp)); | 178 | memset(cp, 0, sizeof(*cp)); |
159 | 179 | ||
160 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { | 180 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { |
@@ -162,9 +182,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) | |||
162 | 182 | ||
163 | spin_lock_init(&vec->lock); | 183 | spin_lock_init(&vec->lock); |
164 | vec->count = 0; | 184 | vec->count = 0; |
165 | if (bootmem) | 185 | if (!zalloc_cpumask_var(&vec->mask, gfp)) |
166 | alloc_bootmem_cpumask_var(&vec->mask); | ||
167 | else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | ||
168 | goto cleanup; | 186 | goto cleanup; |
169 | } | 187 | } |
170 | 188 | ||