aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/irq_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/irq_32.c')
-rw-r--r--arch/sparc/kernel/irq_32.c513
1 files changed, 124 insertions, 389 deletions
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 7c93df4099cb..9b89d842913c 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -15,6 +15,7 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16 16
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/cpudata.h>
18#include <asm/pcic.h> 19#include <asm/pcic.h>
19#include <asm/leon.h> 20#include <asm/leon.h>
20 21
@@ -101,284 +102,173 @@ EXPORT_SYMBOL(arch_local_irq_restore);
101 * directed CPU interrupts using the existing enable/disable irq code 102 * directed CPU interrupts using the existing enable/disable irq code
102 * with tweaks. 103 * with tweaks.
103 * 104 *
105 * Sun4d complicates things even further. IRQ numbers are arbitrary
106 * 32-bit values in that case. Since this is similar to sparc64,
107 * we adopt a virtual IRQ numbering scheme as is done there.
108 * Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS
109 * just becomes a limit of how many interrupt sources we can handle in
110 * a single system. Even fully loaded SS2000 machines top off at
111 * about 32 interrupt sources or so, therefore a NR_IRQS value of 64
112 * is more than enough.
113 *
114 * We keep a map of per-PIL enable interrupts. These get wired
115 * up via the irq_chip->startup() method which gets invoked by
116 * the generic IRQ layer during request_irq().
104 */ 117 */
105 118
106 119
120/* Table of allocated irqs. Unused entries has irq == 0 */
121static struct irq_bucket irq_table[NR_IRQS];
122/* Protect access to irq_table */
123static DEFINE_SPINLOCK(irq_table_lock);
107 124
108/* 125/* Map between the irq identifier used in hw to the irq_bucket. */
109 * Dave Redman (djhr@tadpole.co.uk) 126struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
110 * 127/* Protect access to irq_map */
111 * There used to be extern calls and hard coded values here.. very sucky! 128static DEFINE_SPINLOCK(irq_map_lock);
112 * instead, because some of the devices attach very early, I do something
113 * equally sucky but at least we'll never try to free statically allocated
114 * space or call kmalloc before kmalloc_init :(.
115 *
116 * In fact it's the timer10 that attaches first.. then timer14
117 * then kmalloc_init is called.. then the tty interrupts attach.
118 * hmmm....
119 *
120 */
121#define MAX_STATIC_ALLOC 4
122struct irqaction static_irqaction[MAX_STATIC_ALLOC];
123int static_irq_count;
124
125static struct {
126 struct irqaction *action;
127 int flags;
128} sparc_irq[NR_IRQS];
129#define SPARC_IRQ_INPROGRESS 1
130
131/* Used to protect the IRQ action lists */
132DEFINE_SPINLOCK(irq_action_lock);
133 129
134int show_interrupts(struct seq_file *p, void *v) 130/* Allocate a new irq from the irq_table */
131unsigned int irq_alloc(unsigned int real_irq, unsigned int pil)
135{ 132{
136 int i = *(loff_t *)v;
137 struct irqaction *action;
138 unsigned long flags; 133 unsigned long flags;
139#ifdef CONFIG_SMP 134 unsigned int i;
140 int j; 135
141#endif 136 spin_lock_irqsave(&irq_table_lock, flags);
137 for (i = 1; i < NR_IRQS; i++) {
138 if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil)
139 goto found;
140 }
142 141
143 if (sparc_cpu_model == sun4d) 142 for (i = 1; i < NR_IRQS; i++) {
144 return show_sun4d_interrupts(p, v); 143 if (!irq_table[i].irq)
144 break;
145 }
145 146
146 spin_lock_irqsave(&irq_action_lock, flags);
147 if (i < NR_IRQS) { 147 if (i < NR_IRQS) {
148 action = sparc_irq[i].action; 148 irq_table[i].real_irq = real_irq;
149 if (!action) 149 irq_table[i].irq = i;
150 goto out_unlock; 150 irq_table[i].pil = pil;
151 seq_printf(p, "%3d: ", i); 151 } else {
152#ifndef CONFIG_SMP 152 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
153 seq_printf(p, "%10u ", kstat_irqs(i)); 153 i = 0;
154#else
155 for_each_online_cpu(j) {
156 seq_printf(p, "%10u ",
157 kstat_cpu(j).irqs[i]);
158 }
159#endif
160 seq_printf(p, " %c %s",
161 (action->flags & IRQF_DISABLED) ? '+' : ' ',
162 action->name);
163 for (action = action->next; action; action = action->next) {
164 seq_printf(p, ",%s %s",
165 (action->flags & IRQF_DISABLED) ? " +" : "",
166 action->name);
167 }
168 seq_putc(p, '\n');
169 } 154 }
170out_unlock: 155found:
171 spin_unlock_irqrestore(&irq_action_lock, flags); 156 spin_unlock_irqrestore(&irq_table_lock, flags);
172 return 0; 157
158 return i;
173} 159}
174 160
175void free_irq(unsigned int irq, void *dev_id) 161/* Based on a single pil handler_irq may need to call several
162 * interrupt handlers. Use irq_map as entry to irq_table,
163 * and let each entry in irq_table point to the next entry.
164 */
165void irq_link(unsigned int irq)
176{ 166{
177 struct irqaction *action; 167 struct irq_bucket *p;
178 struct irqaction **actionp;
179 unsigned long flags; 168 unsigned long flags;
180 unsigned int cpu_irq; 169 unsigned int pil;
181
182 if (sparc_cpu_model == sun4d) {
183 sun4d_free_irq(irq, dev_id);
184 return;
185 }
186 cpu_irq = irq & (NR_IRQS - 1);
187 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
188 printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq);
189 return;
190 }
191 170
192 spin_lock_irqsave(&irq_action_lock, flags); 171 BUG_ON(irq >= NR_IRQS);
193 172
194 actionp = &sparc_irq[cpu_irq].action; 173 spin_lock_irqsave(&irq_map_lock, flags);
195 action = *actionp;
196 174
197 if (!action->handler) { 175 p = &irq_table[irq];
198 printk(KERN_ERR "Trying to free free IRQ%d\n", irq); 176 pil = p->pil;
199 goto out_unlock; 177 BUG_ON(pil > SUN4D_MAX_IRQ);
200 } 178 p->next = irq_map[pil];
201 if (dev_id) { 179 irq_map[pil] = p;
202 for (; action; action = action->next) {
203 if (action->dev_id == dev_id)
204 break;
205 actionp = &action->next;
206 }
207 if (!action) {
208 printk(KERN_ERR "Trying to free free shared IRQ%d\n",
209 irq);
210 goto out_unlock;
211 }
212 } else if (action->flags & IRQF_SHARED) {
213 printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
214 irq);
215 goto out_unlock;
216 }
217 if (action->flags & SA_STATIC_ALLOC) {
218 /*
219 * This interrupt is marked as specially allocated
220 * so it is a bad idea to free it.
221 */
222 printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
223 irq, action->name);
224 goto out_unlock;
225 }
226
227 *actionp = action->next;
228 180
229 spin_unlock_irqrestore(&irq_action_lock, flags); 181 spin_unlock_irqrestore(&irq_map_lock, flags);
182}
230 183
231 synchronize_irq(irq); 184void irq_unlink(unsigned int irq)
185{
186 struct irq_bucket *p, **pnext;
187 unsigned long flags;
232 188
233 spin_lock_irqsave(&irq_action_lock, flags); 189 BUG_ON(irq >= NR_IRQS);
234 190
235 kfree(action); 191 spin_lock_irqsave(&irq_map_lock, flags);
236 192
237 if (!sparc_irq[cpu_irq].action) 193 p = &irq_table[irq];
238 __disable_irq(irq); 194 BUG_ON(p->pil > SUN4D_MAX_IRQ);
195 pnext = &irq_map[p->pil];
196 while (*pnext != p)
197 pnext = &(*pnext)->next;
198 *pnext = p->next;
239 199
240out_unlock: 200 spin_unlock_irqrestore(&irq_map_lock, flags);
241 spin_unlock_irqrestore(&irq_action_lock, flags);
242} 201}
243EXPORT_SYMBOL(free_irq);
244
245/*
246 * This is called when we want to synchronize with
247 * interrupts. We may for example tell a device to
248 * stop sending interrupts: but to make sure there
249 * are no interrupts that are executing on another
250 * CPU we need to call this function.
251 */
252#ifdef CONFIG_SMP
253void synchronize_irq(unsigned int irq)
254{
255 unsigned int cpu_irq;
256 202
257 cpu_irq = irq & (NR_IRQS - 1);
258 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
259 cpu_relax();
260}
261EXPORT_SYMBOL(synchronize_irq);
262#endif /* SMP */
263 203
264void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs) 204/* /proc/interrupts printing */
205int arch_show_interrupts(struct seq_file *p, int prec)
265{ 206{
266 int i; 207 int j;
267 struct irqaction *action;
268 unsigned int cpu_irq;
269 208
270 cpu_irq = irq & (NR_IRQS - 1); 209#ifdef CONFIG_SMP
271 action = sparc_irq[cpu_irq].action; 210 seq_printf(p, "RES: ");
272 211 for_each_online_cpu(j)
273 printk(KERN_ERR "IO device interrupt, irq = %d\n", irq); 212 seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
274 printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 213 seq_printf(p, " IPI rescheduling interrupts\n");
275 regs->npc, regs->u_regs[14]); 214 seq_printf(p, "CAL: ");
276 if (action) { 215 for_each_online_cpu(j)
277 printk(KERN_ERR "Expecting: "); 216 seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
278 for (i = 0; i < 16; i++) 217 seq_printf(p, " IPI function call interrupts\n");
279 if (action->handler) 218#endif
280 printk(KERN_CONT "[%s:%d:0x%x] ", action->name, 219 seq_printf(p, "NMI: ");
281 i, (unsigned int)action->handler); 220 for_each_online_cpu(j)
282 } 221 seq_printf(p, "%10u ", cpu_data(j).counter);
283 printk(KERN_ERR "AIEEE\n"); 222 seq_printf(p, " Non-maskable interrupts\n");
284 panic("bogus interrupt received"); 223 return 0;
285} 224}
286 225
287void handler_irq(int pil, struct pt_regs *regs) 226void handler_irq(unsigned int pil, struct pt_regs *regs)
288{ 227{
289 struct pt_regs *old_regs; 228 struct pt_regs *old_regs;
290 struct irqaction *action; 229 struct irq_bucket *p;
291 int cpu = smp_processor_id();
292 230
231 BUG_ON(pil > 15);
293 old_regs = set_irq_regs(regs); 232 old_regs = set_irq_regs(regs);
294 irq_enter(); 233 irq_enter();
295 disable_pil_irq(pil); 234
296#ifdef CONFIG_SMP 235 p = irq_map[pil];
297 /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ 236 while (p) {
298 if ((sparc_cpu_model==sun4m) && (pil < 10)) 237 struct irq_bucket *next = p->next;
299 smp4m_irq_rotate(cpu); 238
300#endif 239 generic_handle_irq(p->irq);
301 action = sparc_irq[pil].action; 240 p = next;
302 sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS; 241 }
303 kstat_cpu(cpu).irqs[pil]++;
304 do {
305 if (!action || !action->handler)
306 unexpected_irq(pil, NULL, regs);
307 action->handler(pil, action->dev_id);
308 action = action->next;
309 } while (action);
310 sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS;
311 enable_pil_irq(pil);
312 irq_exit(); 242 irq_exit();
313 set_irq_regs(old_regs); 243 set_irq_regs(old_regs);
314} 244}
315 245
316#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 246#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
247static unsigned int floppy_irq;
317 248
318/* 249int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
319 * Fast IRQs on the Sparc can only have one routine attached to them,
320 * thus no sharing possible.
321 */
322static int request_fast_irq(unsigned int irq,
323 void (*handler)(void),
324 unsigned long irqflags, const char *devname)
325{ 250{
326 struct irqaction *action;
327 unsigned long flags;
328 unsigned int cpu_irq; 251 unsigned int cpu_irq;
329 int ret; 252 int err;
253
330#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON 254#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
331 struct tt_entry *trap_table; 255 struct tt_entry *trap_table;
332#endif 256#endif
333 cpu_irq = irq & (NR_IRQS - 1);
334 if (cpu_irq > 14) {
335 ret = -EINVAL;
336 goto out;
337 }
338 if (!handler) {
339 ret = -EINVAL;
340 goto out;
341 }
342 257
343 spin_lock_irqsave(&irq_action_lock, flags); 258 err = request_irq(irq, irq_handler, 0, "floppy", NULL);
259 if (err)
260 return -1;
344 261
345 action = sparc_irq[cpu_irq].action; 262 /* Save for later use in floppy interrupt handler */
346 if (action) { 263 floppy_irq = irq;
347 if (action->flags & IRQF_SHARED)
348 panic("Trying to register fast irq when already shared.\n");
349 if (irqflags & IRQF_SHARED)
350 panic("Trying to register fast irq as shared.\n");
351 264
352 /* Anyway, someone already owns it so cannot be made fast. */ 265 cpu_irq = (irq & (NR_IRQS - 1));
353 printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n");
354 ret = -EBUSY;
355 goto out_unlock;
356 }
357
358 /*
359 * If this is flagged as statically allocated then we use our
360 * private struct which is never freed.
361 */
362 if (irqflags & SA_STATIC_ALLOC) {
363 if (static_irq_count < MAX_STATIC_ALLOC)
364 action = &static_irqaction[static_irq_count++];
365 else
366 printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
367 irq, devname);
368 }
369
370 if (action == NULL)
371 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
372 if (!action) {
373 ret = -ENOMEM;
374 goto out_unlock;
375 }
376 266
377 /* Dork with trap table if we get this far. */ 267 /* Dork with trap table if we get this far. */
378#define INSTANTIATE(table) \ 268#define INSTANTIATE(table) \
379 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ 269 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
380 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ 270 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
381 SPARC_BRANCH((unsigned long) handler, \ 271 SPARC_BRANCH((unsigned long) floppy_hardint, \
382 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ 272 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
383 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ 273 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
384 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 274 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
@@ -399,22 +289,9 @@ static int request_fast_irq(unsigned int irq,
399 * writing we have no CPU-neutral interface to fine-grained flushes. 289 * writing we have no CPU-neutral interface to fine-grained flushes.
400 */ 290 */
401 flush_cache_all(); 291 flush_cache_all();
402 292 return 0;
403 action->flags = irqflags;
404 action->name = devname;
405 action->dev_id = NULL;
406 action->next = NULL;
407
408 sparc_irq[cpu_irq].action = action;
409
410 __enable_irq(irq);
411
412 ret = 0;
413out_unlock:
414 spin_unlock_irqrestore(&irq_action_lock, flags);
415out:
416 return ret;
417} 293}
294EXPORT_SYMBOL(sparc_floppy_request_irq);
418 295
419/* 296/*
420 * These variables are used to access state from the assembler 297 * These variables are used to access state from the assembler
@@ -440,154 +317,23 @@ EXPORT_SYMBOL(pdma_base);
440unsigned long pdma_areasize; 317unsigned long pdma_areasize;
441EXPORT_SYMBOL(pdma_areasize); 318EXPORT_SYMBOL(pdma_areasize);
442 319
443static irq_handler_t floppy_irq_handler; 320/* Use the generic irq support to call floppy_interrupt
444 321 * which was setup using request_irq() in sparc_floppy_request_irq().
322 * We only have one floppy interrupt so we do not need to check
323 * for additional handlers being wired up by irq_link()
324 */
445void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) 325void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
446{ 326{
447 struct pt_regs *old_regs; 327 struct pt_regs *old_regs;
448 int cpu = smp_processor_id();
449 328
450 old_regs = set_irq_regs(regs); 329 old_regs = set_irq_regs(regs);
451 disable_pil_irq(irq);
452 irq_enter(); 330 irq_enter();
453 kstat_cpu(cpu).irqs[irq]++; 331 generic_handle_irq(floppy_irq);
454 floppy_irq_handler(irq, dev_id);
455 irq_exit(); 332 irq_exit();
456 enable_pil_irq(irq);
457 set_irq_regs(old_regs); 333 set_irq_regs(old_regs);
458 /*
459 * XXX Eek, it's totally changed with preempt_count() and such
460 * if (softirq_pending(cpu))
461 * do_softirq();
462 */
463}
464
465int sparc_floppy_request_irq(int irq, unsigned long flags,
466 irq_handler_t irq_handler)
467{
468 floppy_irq_handler = irq_handler;
469 return request_fast_irq(irq, floppy_hardint, flags, "floppy");
470} 334}
471EXPORT_SYMBOL(sparc_floppy_request_irq);
472
473#endif 335#endif
474 336
475int request_irq(unsigned int irq,
476 irq_handler_t handler,
477 unsigned long irqflags, const char *devname, void *dev_id)
478{
479 struct irqaction *action, **actionp;
480 unsigned long flags;
481 unsigned int cpu_irq;
482 int ret;
483
484 if (sparc_cpu_model == sun4d)
485 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
486
487 cpu_irq = irq & (NR_IRQS - 1);
488 if (cpu_irq > 14) {
489 ret = -EINVAL;
490 goto out;
491 }
492 if (!handler) {
493 ret = -EINVAL;
494 goto out;
495 }
496
497 spin_lock_irqsave(&irq_action_lock, flags);
498
499 actionp = &sparc_irq[cpu_irq].action;
500 action = *actionp;
501 if (action) {
502 if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) {
503 ret = -EBUSY;
504 goto out_unlock;
505 }
506 if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
507 printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
508 irq);
509 ret = -EBUSY;
510 goto out_unlock;
511 }
512 for ( ; action; action = *actionp)
513 actionp = &action->next;
514 }
515
516 /* If this is flagged as statically allocated then we use our
517 * private struct which is never freed.
518 */
519 if (irqflags & SA_STATIC_ALLOC) {
520 if (static_irq_count < MAX_STATIC_ALLOC)
521 action = &static_irqaction[static_irq_count++];
522 else
523 printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
524 irq, devname);
525 }
526 if (action == NULL)
527 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
528 if (!action) {
529 ret = -ENOMEM;
530 goto out_unlock;
531 }
532
533 action->handler = handler;
534 action->flags = irqflags;
535 action->name = devname;
536 action->next = NULL;
537 action->dev_id = dev_id;
538
539 *actionp = action;
540
541 __enable_irq(irq);
542
543 ret = 0;
544out_unlock:
545 spin_unlock_irqrestore(&irq_action_lock, flags);
546out:
547 return ret;
548}
549EXPORT_SYMBOL(request_irq);
550
551void disable_irq_nosync(unsigned int irq)
552{
553 __disable_irq(irq);
554}
555EXPORT_SYMBOL(disable_irq_nosync);
556
557void disable_irq(unsigned int irq)
558{
559 __disable_irq(irq);
560}
561EXPORT_SYMBOL(disable_irq);
562
563void enable_irq(unsigned int irq)
564{
565 __enable_irq(irq);
566}
567EXPORT_SYMBOL(enable_irq);
568
569/*
570 * We really don't need these at all on the Sparc. We only have
571 * stubs here because they are exported to modules.
572 */
573unsigned long probe_irq_on(void)
574{
575 return 0;
576}
577EXPORT_SYMBOL(probe_irq_on);
578
579int probe_irq_off(unsigned long mask)
580{
581 return 0;
582}
583EXPORT_SYMBOL(probe_irq_off);
584
585static unsigned int build_device_irq(struct platform_device *op,
586 unsigned int real_irq)
587{
588 return real_irq;
589}
590
591/* djhr 337/* djhr
592 * This could probably be made indirect too and assigned in the CPU 338 * This could probably be made indirect too and assigned in the CPU
593 * bits of the code. That would be much nicer I think and would also 339 * bits of the code. That would be much nicer I think and would also
@@ -598,8 +344,6 @@ static unsigned int build_device_irq(struct platform_device *op,
598 344
599void __init init_IRQ(void) 345void __init init_IRQ(void)
600{ 346{
601 sparc_irq_config.build_device_irq = build_device_irq;
602
603 switch (sparc_cpu_model) { 347 switch (sparc_cpu_model) {
604 case sun4c: 348 case sun4c:
605 case sun4: 349 case sun4:
@@ -607,14 +351,11 @@ void __init init_IRQ(void)
607 break; 351 break;
608 352
609 case sun4m: 353 case sun4m:
610#ifdef CONFIG_PCI
611 pcic_probe(); 354 pcic_probe();
612 if (pcic_present()) { 355 if (pcic_present())
613 sun4m_pci_init_IRQ(); 356 sun4m_pci_init_IRQ();
614 break; 357 else
615 } 358 sun4m_init_IRQ();
616#endif
617 sun4m_init_IRQ();
618 break; 359 break;
619 360
620 case sun4d: 361 case sun4d:
@@ -632,9 +373,3 @@ void __init init_IRQ(void)
632 btfixup(); 373 btfixup();
633} 374}
634 375
635#ifdef CONFIG_PROC_FS
636void init_irq_proc(void)
637{
638 /* For now, nothing... */
639}
640#endif /* CONFIG_PROC_FS */