aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-10-15 08:16:55 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-16 10:53:15 -0400
commit2cc21ef843d4fb7da122239b644a1f6f0aca60a6 (patch)
tree5d08e110164176c4011e42d4700ecd0050ad0ce9 /kernel
parentc6b7674f323622d86316bf7951ad9cae1ce24642 (diff)
genirq: remove sparse irq code
This code is not ready, but we need to rip it out instead of rebasing as we would lose the APIC/IO_APIC unification otherwise. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/handle.c114
1 files changed, 0 insertions, 114 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c19896f895f9..f837133cdfbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -111,15 +111,6 @@ static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
111 } 111 }
112} 112}
113 113
114#ifdef CONFIG_HAVE_SPARSE_IRQ
115/*
116 * Protect the sparse_irqs_free freelist:
117 */
118static DEFINE_SPINLOCK(sparse_irq_lock);
119static struct irq_desc *sparse_irqs_free;
120struct irq_desc *sparse_irqs;
121#endif
122
123static void __init init_work(void *data) 114static void __init init_work(void *data)
124{ 115{
125 struct dyn_array *da = data; 116 struct dyn_array *da = data;
@@ -130,121 +121,16 @@ static void __init init_work(void *data)
130 121
131 for (i = 0; i < *da->nr; i++) { 122 for (i = 0; i < *da->nr; i++) {
132 init_one_irq_desc(&desc[i]); 123 init_one_irq_desc(&desc[i]);
133#ifndef CONFIG_HAVE_SPARSE_IRQ
134 desc[i].irq = i; 124 desc[i].irq = i;
135#endif
136 } 125 }
137 126
138 /* init kstat_irqs, nr_cpu_ids is ready already */ 127 /* init kstat_irqs, nr_cpu_ids is ready already */
139 init_kstat_irqs(desc, *da->nr, nr_cpu_ids); 128 init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
140
141#ifdef CONFIG_HAVE_SPARSE_IRQ
142 for (i = 1; i < *da->nr; i++)
143 desc[i-1].next = &desc[i];
144
145 sparse_irqs_free = sparse_irqs;
146 sparse_irqs = NULL;
147#endif
148}
149
150#ifdef CONFIG_HAVE_SPARSE_IRQ
151static int nr_irq_desc = 32;
152
153static int __init parse_nr_irq_desc(char *arg)
154{
155 if (arg)
156 nr_irq_desc = simple_strtoul(arg, NULL, 0);
157 return 0;
158}
159
160early_param("nr_irq_desc", parse_nr_irq_desc);
161
162DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
163
164struct irq_desc *irq_to_desc(unsigned int irq)
165{
166 struct irq_desc *desc;
167
168 desc = sparse_irqs;
169 while (desc) {
170 if (desc->irq == irq)
171 return desc;
172
173 desc = desc->next;
174 }
175 return NULL;
176} 129}
177 130
178struct irq_desc *irq_to_desc_alloc(unsigned int irq)
179{
180 struct irq_desc *desc, *desc_pri;
181 unsigned long flags;
182 int count = 0;
183 int i;
184
185 desc_pri = desc = sparse_irqs;
186 while (desc) {
187 if (desc->irq == irq)
188 return desc;
189
190 desc_pri = desc;
191 desc = desc->next;
192 count++;
193 }
194
195 spin_lock_irqsave(&sparse_irq_lock, flags);
196 /*
197 * we run out of pre-allocate ones, allocate more
198 */
199 if (!sparse_irqs_free) {
200 unsigned long phys;
201 unsigned long total_bytes;
202
203 printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
204
205 total_bytes = sizeof(struct irq_desc) * nr_irq_desc;
206 if (after_bootmem)
207 desc = kzalloc(total_bytes, GFP_ATOMIC);
208 else
209 desc = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
210
211 if (!desc)
212 panic("please boot with nr_irq_desc= %d\n", count * 2);
213
214 phys = __pa(desc);
215 printk(KERN_DEBUG "irq_desc ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
216
217 for (i = 0; i < nr_irq_desc; i++)
218 init_one_irq_desc(&desc[i]);
219
220 for (i = 1; i < nr_irq_desc; i++)
221 desc[i-1].next = &desc[i];
222
223 /* init kstat_irqs, nr_cpu_ids is ready already */
224 init_kstat_irqs(desc, nr_irq_desc, nr_cpu_ids);
225
226 sparse_irqs_free = desc;
227 }
228
229 desc = sparse_irqs_free;
230 sparse_irqs_free = sparse_irqs_free->next;
231 desc->next = NULL;
232 if (desc_pri)
233 desc_pri->next = desc;
234 else
235 sparse_irqs = desc;
236 desc->irq = irq;
237
238 spin_unlock_irqrestore(&sparse_irq_lock, flags);
239
240 return desc;
241}
242#else
243struct irq_desc *irq_desc; 131struct irq_desc *irq_desc;
244DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work); 132DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
245 133
246#endif
247
248#else 134#else
249 135
250struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 136struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {