diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-05-14 10:13:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-22 04:21:28 -0400 |
commit | 944770ab54babaef29d9d1dc8189898b3ee8afcf (patch) | |
tree | 7bc0c6c9424b46601c4fc6c5d693b3601fbf06ef /kernel/sched/cpudeadline.c | |
parent | b0827819b0da4acfbc1df1e05edcf50efd07cbd1 (diff) |
sched/deadline: Replace NR_CPUS arrays
Tejun reported that his resume was failing due to order-3 allocations
from sched_domain building.
Replace the NR_CPUS arrays in there with a dynamically allocated
array.
Reported-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Juri Lelli <juri.lelli@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-kat4gl1m5a6dwy6nzuqox45e@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/cpudeadline.c')
-rw-r--r-- | kernel/sched/cpudeadline.c | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index ab001b5d5048..bd95963dae80 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/gfp.h> | 14 | #include <linux/gfp.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/slab.h> | ||
16 | #include "cpudeadline.h" | 17 | #include "cpudeadline.h" |
17 | 18 | ||
18 | static inline int parent(int i) | 19 | static inline int parent(int i) |
@@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b) | |||
39 | { | 40 | { |
40 | int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; | 41 | int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; |
41 | 42 | ||
42 | swap(cp->elements[a], cp->elements[b]); | 43 | swap(cp->elements[a].cpu, cp->elements[b].cpu); |
43 | swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); | 44 | swap(cp->elements[a].dl , cp->elements[b].dl ); |
45 | |||
46 | swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx); | ||
44 | } | 47 | } |
45 | 48 | ||
46 | static void cpudl_heapify(struct cpudl *cp, int idx) | 49 | static void cpudl_heapify(struct cpudl *cp, int idx) |
@@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
140 | WARN_ON(!cpu_present(cpu)); | 143 | WARN_ON(!cpu_present(cpu)); |
141 | 144 | ||
142 | raw_spin_lock_irqsave(&cp->lock, flags); | 145 | raw_spin_lock_irqsave(&cp->lock, flags); |
143 | old_idx = cp->cpu_to_idx[cpu]; | 146 | old_idx = cp->elements[cpu].idx; |
144 | if (!is_valid) { | 147 | if (!is_valid) { |
145 | /* remove item */ | 148 | /* remove item */ |
146 | if (old_idx == IDX_INVALID) { | 149 | if (old_idx == IDX_INVALID) { |
@@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
155 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; | 158 | cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; |
156 | cp->elements[old_idx].cpu = new_cpu; | 159 | cp->elements[old_idx].cpu = new_cpu; |
157 | cp->size--; | 160 | cp->size--; |
158 | cp->cpu_to_idx[new_cpu] = old_idx; | 161 | cp->elements[new_cpu].idx = old_idx; |
159 | cp->cpu_to_idx[cpu] = IDX_INVALID; | 162 | cp->elements[cpu].idx = IDX_INVALID; |
160 | while (old_idx > 0 && dl_time_before( | 163 | while (old_idx > 0 && dl_time_before( |
161 | cp->elements[parent(old_idx)].dl, | 164 | cp->elements[parent(old_idx)].dl, |
162 | cp->elements[old_idx].dl)) { | 165 | cp->elements[old_idx].dl)) { |
@@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) | |||
173 | cp->size++; | 176 | cp->size++; |
174 | cp->elements[cp->size - 1].dl = 0; | 177 | cp->elements[cp->size - 1].dl = 0; |
175 | cp->elements[cp->size - 1].cpu = cpu; | 178 | cp->elements[cp->size - 1].cpu = cpu; |
176 | cp->cpu_to_idx[cpu] = cp->size - 1; | 179 | cp->elements[cpu].idx = cp->size - 1; |
177 | cpudl_change_key(cp, cp->size - 1, dl); | 180 | cpudl_change_key(cp, cp->size - 1, dl); |
178 | cpumask_clear_cpu(cpu, cp->free_cpus); | 181 | cpumask_clear_cpu(cpu, cp->free_cpus); |
179 | } else { | 182 | } else { |
@@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp) | |||
195 | memset(cp, 0, sizeof(*cp)); | 198 | memset(cp, 0, sizeof(*cp)); |
196 | raw_spin_lock_init(&cp->lock); | 199 | raw_spin_lock_init(&cp->lock); |
197 | cp->size = 0; | 200 | cp->size = 0; |
198 | for (i = 0; i < NR_CPUS; i++) | 201 | |
199 | cp->cpu_to_idx[i] = IDX_INVALID; | 202 | cp->elements = kcalloc(nr_cpu_ids, |
200 | if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) | 203 | sizeof(struct cpudl_item), |
204 | GFP_KERNEL); | ||
205 | if (!cp->elements) | ||
206 | return -ENOMEM; | ||
207 | |||
208 | if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { | ||
209 | kfree(cp->elements); | ||
201 | return -ENOMEM; | 210 | return -ENOMEM; |
211 | } | ||
212 | |||
213 | for_each_possible_cpu(i) | ||
214 | cp->elements[i].idx = IDX_INVALID; | ||
215 | |||
202 | cpumask_setall(cp->free_cpus); | 216 | cpumask_setall(cp->free_cpus); |
203 | 217 | ||
204 | return 0; | 218 | return 0; |
@@ -211,4 +225,5 @@ int cpudl_init(struct cpudl *cp) | |||
211 | void cpudl_cleanup(struct cpudl *cp) | 225 | void cpudl_cleanup(struct cpudl *cp) |
212 | { | 226 | { |
213 | free_cpumask_var(cp->free_cpus); | 227 | free_cpumask_var(cp->free_cpus); |
228 | kfree(cp->elements); | ||
214 | } | 229 | } |