aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-09-27 14:02:56 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 10:39:07 -0400
commitaa99ec0f3f26bf2bcd0fa5176de93598427f1e5e (patch)
treebbba6c0fc1293f0bc854644aa177ea67306f8814 /kernel
parent25ade601a0f97453c6f511ebfae9339e06a28d75 (diff)
genirq: Use sane sparse allocator
Make irq_to_desc_alloc_node() a wrapper around the new allocator. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/irqdesc.c129
1 files changed, 20 insertions, 109 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 35d9052901b9..7cbe4f93e2fb 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -51,7 +51,7 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
51 51
52static void desc_smp_init(struct irq_desc *desc, int node) 52static void desc_smp_init(struct irq_desc *desc, int node)
53{ 53{
54 desc->node = node; 54 desc->irq_data.node = node;
55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity); 55 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56} 56}
57 57
@@ -84,13 +84,6 @@ static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
84 84
85#ifdef CONFIG_SPARSE_IRQ 85#ifdef CONFIG_SPARSE_IRQ
86 86
87static struct irq_desc irq_desc_init = {
88 .status = IRQ_DEFAULT_INIT_FLAGS,
89 .handle_irq = handle_bad_irq,
90 .depth = 1,
91 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
92};
93
94void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 87void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
95{ 88{
96 void *ptr; 89 void *ptr;
@@ -108,29 +101,6 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
108 } 101 }
109} 102}
110 103
111static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
112{
113 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
114
115 raw_spin_lock_init(&desc->lock);
116 desc->irq_data.irq = irq;
117#ifdef CONFIG_SMP
118 desc->irq_data.node = node;
119#endif
120 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
121 init_kstat_irqs(desc, node, nr_cpu_ids);
122 if (!desc->kstat_irqs) {
123 printk(KERN_ERR "can not alloc kstat_irqs\n");
124 BUG_ON(1);
125 }
126 if (!alloc_desc_masks(desc, node, false)) {
127 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
128 BUG_ON(1);
129 }
130 init_desc_masks(desc);
131 arch_init_chip_data(desc, node);
132}
133
134static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 104static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
135 105
136static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 106static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
@@ -171,8 +141,9 @@ static inline void free_masks(struct irq_desc *desc) { }
171 141
172static struct irq_desc *alloc_desc(int irq, int node) 142static struct irq_desc *alloc_desc(int irq, int node)
173{ 143{
144 /* Temporary hack until we can switch to GFP_KERNEL */
145 gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC;
174 struct irq_desc *desc; 146 struct irq_desc *desc;
175 gfp_t gfp = GFP_KERNEL;
176 147
177 desc = kzalloc_node(sizeof(*desc), gfp, node); 148 desc = kzalloc_node(sizeof(*desc), gfp, node);
178 if (!desc) 149 if (!desc)
@@ -226,6 +197,8 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node)
226 desc = alloc_desc(start + i, node); 197 desc = alloc_desc(start + i, node);
227 if (!desc) 198 if (!desc)
228 goto err; 199 goto err;
200 /* temporary until I fixed x86 madness */
201 arch_init_chip_data(desc, node);
229 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 202 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
230 irq_insert_desc(start + i, desc); 203 irq_insert_desc(start + i, desc);
231 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 204 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
@@ -242,23 +215,19 @@ err:
242 return -ENOMEM; 215 return -ENOMEM;
243} 216}
244 217
245static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 218struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
246 [0 ... NR_IRQS_LEGACY-1] = { 219{
247 .status = IRQ_DEFAULT_INIT_FLAGS, 220 int res = irq_alloc_descs(irq, irq, 1, node);
248 .handle_irq = handle_bad_irq,
249 .depth = 1,
250 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
251 }
252};
253 221
254static unsigned int *kstat_irqs_legacy; 222 if (res == -EEXIST || res == irq)
223 return irq_to_desc(irq);
224 return NULL;
225}
255 226
256int __init early_irq_init(void) 227int __init early_irq_init(void)
257{ 228{
229 int i, node = first_online_node;
258 struct irq_desc *desc; 230 struct irq_desc *desc;
259 int legacy_count;
260 int node;
261 int i;
262 231
263 init_irq_default_affinity(); 232 init_irq_default_affinity();
264 233
@@ -266,71 +235,14 @@ int __init early_irq_init(void)
266 arch_probe_nr_irqs(); 235 arch_probe_nr_irqs();
267 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 236 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
268 237
269 desc = irq_desc_legacy; 238 for (i = 0; i < NR_IRQS_LEGACY; i++) {
270 legacy_count = ARRAY_SIZE(irq_desc_legacy); 239 desc = alloc_desc(i, node);
271 node = first_online_node; 240 set_bit(i, allocated_irqs);
272 241 irq_insert_desc(i, desc);
273 /* allocate based on nr_cpu_ids */
274 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
275 sizeof(int), GFP_NOWAIT, node);
276
277 irq_desc_init.irq_data.chip = &no_irq_chip;
278
279 for (i = 0; i < legacy_count; i++) {
280 desc[i].irq_data.irq = i;
281 desc[i].irq_data.chip = &no_irq_chip;
282#ifdef CONFIG_SMP
283 desc[i].irq_data.node = node;
284#endif
285 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
286 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
287 alloc_desc_masks(&desc[i], node, true);
288 init_desc_masks(&desc[i]);
289 irq_insert_desc(i, &desc[i]);
290 } 242 }
291
292 return arch_early_irq_init(); 243 return arch_early_irq_init();
293} 244}
294 245
295struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
296{
297 struct irq_desc *desc;
298 unsigned long flags;
299
300 if (irq >= nr_irqs) {
301 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
302 irq, nr_irqs);
303 return NULL;
304 }
305
306 desc = irq_to_desc(irq);
307 if (desc)
308 return desc;
309
310 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
311
312 /* We have to check it to avoid races with another CPU */
313 desc = irq_to_desc(irq);
314 if (desc)
315 goto out_unlock;
316
317 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
318
319 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
320 if (!desc) {
321 printk(KERN_ERR "can not alloc irq_desc\n");
322 BUG_ON(1);
323 }
324 init_one_irq_desc(irq, desc, node);
325
326 irq_insert_desc(irq, desc);
327
328out_unlock:
329 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
330
331 return desc;
332}
333
334#else /* !CONFIG_SPARSE_IRQ */ 246#else /* !CONFIG_SPARSE_IRQ */
335 247
336struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 248struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
@@ -345,9 +257,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
345static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 257static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
346int __init early_irq_init(void) 258int __init early_irq_init(void)
347{ 259{
260 int count, i, node = first_online_node;
348 struct irq_desc *desc; 261 struct irq_desc *desc;
349 int count;
350 int i;
351 262
352 init_irq_default_affinity(); 263 init_irq_default_affinity();
353 264
@@ -359,9 +270,9 @@ int __init early_irq_init(void)
359 for (i = 0; i < count; i++) { 270 for (i = 0; i < count; i++) {
360 desc[i].irq_data.irq = i; 271 desc[i].irq_data.irq = i;
361 desc[i].irq_data.chip = &no_irq_chip; 272 desc[i].irq_data.chip = &no_irq_chip;
362 alloc_desc_masks(&desc[i], 0, true);
363 init_desc_masks(&desc[i]);
364 desc[i].kstat_irqs = kstat_irqs_all[i]; 273 desc[i].kstat_irqs = kstat_irqs_all[i];
274 alloc_masks(desc + i, GFP_KERNEL, node);
275 desc_smp_init(desc + i, node);
365 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 276 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
366 } 277 }
367 return arch_early_irq_init(); 278 return arch_early_irq_init();