diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/frv/kernel/irq.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/frv/kernel/irq.c')
-rw-r--r-- | arch/frv/kernel/irq.c | 764 |
1 files changed, 764 insertions, 0 deletions
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c new file mode 100644 index 000000000000..8c524cdd2717 --- /dev/null +++ b/arch/frv/kernel/irq.c | |||
@@ -0,0 +1,764 @@ | |||
1 | /* irq.c: FRV IRQ handling | ||
2 | * | ||
3 | * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * (mostly architecture independent, will move to kernel/irq.c in 2.5.) | ||
14 | * | ||
15 | * IRQs are in fact implemented a bit like signal handlers for the kernel. | ||
16 | * Naturally it's not a 1:1 relation, but there are similarities. | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/timex.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/random.h> | ||
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/kernel_stat.h> | ||
32 | #include <linux/irq.h> | ||
33 | #include <linux/proc_fs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | |||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/smp.h> | ||
39 | #include <asm/system.h> | ||
40 | #include <asm/bitops.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgalloc.h> | ||
43 | #include <asm/delay.h> | ||
44 | #include <asm/irq.h> | ||
45 | #include <asm/irc-regs.h> | ||
46 | #include <asm/irq-routing.h> | ||
47 | #include <asm/gdb-stub.h> | ||
48 | |||
49 | extern void __init fpga_init(void); | ||
50 | extern void __init route_mb93493_irqs(void); | ||
51 | |||
52 | static void register_irq_proc (unsigned int irq); | ||
53 | |||
54 | /* | ||
55 | * Special irq handlers. | ||
56 | */ | ||
57 | |||
58 | irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_HANDLED; } | ||
59 | |||
60 | atomic_t irq_err_count; | ||
61 | |||
62 | /* | ||
63 | * Generic, controller-independent functions: | ||
64 | */ | ||
65 | int show_interrupts(struct seq_file *p, void *v) | ||
66 | { | ||
67 | struct irqaction *action; | ||
68 | struct irq_group *group; | ||
69 | unsigned long flags; | ||
70 | int level, grp, ix, i, j; | ||
71 | |||
72 | i = *(loff_t *) v; | ||
73 | |||
74 | switch (i) { | ||
75 | case 0: | ||
76 | seq_printf(p, " "); | ||
77 | for (j = 0; j < NR_CPUS; j++) | ||
78 | if (cpu_online(j)) | ||
79 | seq_printf(p, "CPU%d ",j); | ||
80 | |||
81 | seq_putc(p, '\n'); | ||
82 | break; | ||
83 | |||
84 | case 1 ... NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP: | ||
85 | local_irq_save(flags); | ||
86 | |||
87 | grp = (i - 1) / NR_IRQ_ACTIONS_PER_GROUP; | ||
88 | group = irq_groups[grp]; | ||
89 | if (!group) | ||
90 | goto skip; | ||
91 | |||
92 | ix = (i - 1) % NR_IRQ_ACTIONS_PER_GROUP; | ||
93 | action = group->actions[ix]; | ||
94 | if (!action) | ||
95 | goto skip; | ||
96 | |||
97 | seq_printf(p, "%3d: ", i - 1); | ||
98 | |||
99 | #ifndef CONFIG_SMP | ||
100 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
101 | #else | ||
102 | for (j = 0; j < NR_CPUS; j++) | ||
103 | if (cpu_online(j)) | ||
104 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); | ||
105 | #endif | ||
106 | |||
107 | level = group->sources[ix]->level - frv_irq_levels; | ||
108 | |||
109 | seq_printf(p, " %12s@%x", group->sources[ix]->muxname, level); | ||
110 | seq_printf(p, " %s", action->name); | ||
111 | |||
112 | for (action = action->next; action; action = action->next) | ||
113 | seq_printf(p, ", %s", action->name); | ||
114 | |||
115 | seq_putc(p, '\n'); | ||
116 | skip: | ||
117 | local_irq_restore(flags); | ||
118 | break; | ||
119 | |||
120 | case NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP + 1: | ||
121 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
122 | break; | ||
123 | |||
124 | default: | ||
125 | break; | ||
126 | } | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | |||
132 | /* | ||
133 | * Generic enable/disable code: this just calls | ||
134 | * down into the PIC-specific version for the actual | ||
135 | * hardware disable after having gotten the irq | ||
136 | * controller lock. | ||
137 | */ | ||
138 | |||
139 | /** | ||
140 | * disable_irq_nosync - disable an irq without waiting | ||
141 | * @irq: Interrupt to disable | ||
142 | * | ||
143 | * Disable the selected interrupt line. Disables and Enables are | ||
144 | * nested. | ||
145 | * Unlike disable_irq(), this function does not ensure existing | ||
146 | * instances of the IRQ handler have completed before returning. | ||
147 | * | ||
148 | * This function may be called from IRQ context. | ||
149 | */ | ||
150 | |||
151 | void disable_irq_nosync(unsigned int irq) | ||
152 | { | ||
153 | struct irq_source *source; | ||
154 | struct irq_group *group; | ||
155 | struct irq_level *level; | ||
156 | unsigned long flags; | ||
157 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | ||
158 | |||
159 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | ||
160 | if (!group) | ||
161 | BUG(); | ||
162 | |||
163 | source = group->sources[idx]; | ||
164 | if (!source) | ||
165 | BUG(); | ||
166 | |||
167 | level = source->level; | ||
168 | |||
169 | spin_lock_irqsave(&level->lock, flags); | ||
170 | |||
171 | if (group->control) { | ||
172 | if (!group->disable_cnt[idx]++) | ||
173 | group->control(group, idx, 0); | ||
174 | } else if (!level->disable_count++) { | ||
175 | __set_MASK(level - frv_irq_levels); | ||
176 | } | ||
177 | |||
178 | spin_unlock_irqrestore(&level->lock, flags); | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * disable_irq - disable an irq and wait for completion | ||
183 | * @irq: Interrupt to disable | ||
184 | * | ||
185 | * Disable the selected interrupt line. Enables and Disables are | ||
186 | * nested. | ||
187 | * This function waits for any pending IRQ handlers for this interrupt | ||
188 | * to complete before returning. If you use this function while | ||
189 | * holding a resource the IRQ handler may need you will deadlock. | ||
190 | * | ||
191 | * This function may be called - with care - from IRQ context. | ||
192 | */ | ||
193 | |||
194 | void disable_irq(unsigned int irq) | ||
195 | { | ||
196 | disable_irq_nosync(irq); | ||
197 | |||
198 | #ifdef CONFIG_SMP | ||
199 | if (!local_irq_count(smp_processor_id())) { | ||
200 | do { | ||
201 | barrier(); | ||
202 | } while (irq_desc[irq].status & IRQ_INPROGRESS); | ||
203 | } | ||
204 | #endif | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * enable_irq - enable handling of an irq | ||
209 | * @irq: Interrupt to enable | ||
210 | * | ||
211 | * Undoes the effect of one call to disable_irq(). If this | ||
212 | * matches the last disable, processing of interrupts on this | ||
213 | * IRQ line is re-enabled. | ||
214 | * | ||
215 | * This function may be called from IRQ context. | ||
216 | */ | ||
217 | |||
218 | void enable_irq(unsigned int irq) | ||
219 | { | ||
220 | struct irq_source *source; | ||
221 | struct irq_group *group; | ||
222 | struct irq_level *level; | ||
223 | unsigned long flags; | ||
224 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | ||
225 | int count; | ||
226 | |||
227 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | ||
228 | if (!group) | ||
229 | BUG(); | ||
230 | |||
231 | source = group->sources[idx]; | ||
232 | if (!source) | ||
233 | BUG(); | ||
234 | |||
235 | level = source->level; | ||
236 | |||
237 | spin_lock_irqsave(&level->lock, flags); | ||
238 | |||
239 | if (group->control) | ||
240 | count = group->disable_cnt[idx]; | ||
241 | else | ||
242 | count = level->disable_count; | ||
243 | |||
244 | switch (count) { | ||
245 | case 1: | ||
246 | if (group->control) { | ||
247 | if (group->actions[idx]) | ||
248 | group->control(group, idx, 1); | ||
249 | } else { | ||
250 | if (level->usage) | ||
251 | __clr_MASK(level - frv_irq_levels); | ||
252 | } | ||
253 | /* fall-through */ | ||
254 | |||
255 | default: | ||
256 | count--; | ||
257 | break; | ||
258 | |||
259 | case 0: | ||
260 | printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0)); | ||
261 | } | ||
262 | |||
263 | if (group->control) | ||
264 | group->disable_cnt[idx] = count; | ||
265 | else | ||
266 | level->disable_count = count; | ||
267 | |||
268 | spin_unlock_irqrestore(&level->lock, flags); | ||
269 | } | ||
270 | |||
271 | /*****************************************************************************/ | ||
272 | /* | ||
273 | * handles all normal device IRQ's | ||
274 | * - registers are referred to by the __frame variable (GR28) | ||
275 | * - IRQ distribution is complicated in this arch because of the many PICs, the | ||
276 | * way they work and the way they cascade | ||
277 | */ | ||
278 | asmlinkage void do_IRQ(void) | ||
279 | { | ||
280 | struct irq_source *source; | ||
281 | int level, cpu; | ||
282 | |||
283 | level = (__frame->tbr >> 4) & 0xf; | ||
284 | cpu = smp_processor_id(); | ||
285 | |||
286 | #if 0 | ||
287 | { | ||
288 | static u32 irqcount; | ||
289 | *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level); | ||
290 | *(volatile u16 *) 0xffc00100 = (u16) ~0x9999; | ||
291 | mb(); | ||
292 | } | ||
293 | #endif | ||
294 | |||
295 | if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) | ||
296 | BUG(); | ||
297 | |||
298 | __set_MASK(level); | ||
299 | __clr_RC(level); | ||
300 | __clr_IRL(); | ||
301 | |||
302 | kstat_this_cpu.irqs[level]++; | ||
303 | |||
304 | irq_enter(); | ||
305 | |||
306 | for (source = frv_irq_levels[level].sources; source; source = source->next) | ||
307 | source->doirq(source); | ||
308 | |||
309 | irq_exit(); | ||
310 | |||
311 | __clr_MASK(level); | ||
312 | |||
313 | /* only process softirqs if we didn't interrupt another interrupt handler */ | ||
314 | if ((__frame->psr & PSR_PIL) == PSR_PIL_0) | ||
315 | if (local_softirq_pending()) | ||
316 | do_softirq(); | ||
317 | |||
318 | #ifdef CONFIG_PREEMPT | ||
319 | local_irq_disable(); | ||
320 | while (--current->preempt_count == 0) { | ||
321 | if (!(__frame->psr & PSR_S) || | ||
322 | current->need_resched == 0 || | ||
323 | in_interrupt()) | ||
324 | break; | ||
325 | current->preempt_count++; | ||
326 | local_irq_enable(); | ||
327 | preempt_schedule(); | ||
328 | local_irq_disable(); | ||
329 | } | ||
330 | #endif | ||
331 | |||
332 | #if 0 | ||
333 | { | ||
334 | *(volatile u16 *) 0xffc00100 = (u16) ~0x6666; | ||
335 | mb(); | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | } /* end do_IRQ() */ | ||
340 | |||
341 | /*****************************************************************************/ | ||
342 | /* | ||
343 | * handles all NMIs when not co-opted by the debugger | ||
344 | * - registers are referred to by the __frame variable (GR28) | ||
345 | */ | ||
346 | asmlinkage void do_NMI(void) | ||
347 | { | ||
348 | } /* end do_NMI() */ | ||
349 | |||
350 | /*****************************************************************************/ | ||
351 | /** | ||
352 | * request_irq - allocate an interrupt line | ||
353 | * @irq: Interrupt line to allocate | ||
354 | * @handler: Function to be called when the IRQ occurs | ||
355 | * @irqflags: Interrupt type flags | ||
356 | * @devname: An ascii name for the claiming device | ||
357 | * @dev_id: A cookie passed back to the handler function | ||
358 | * | ||
359 | * This call allocates interrupt resources and enables the | ||
360 | * interrupt line and IRQ handling. From the point this | ||
361 | * call is made your handler function may be invoked. Since | ||
362 | * your handler function must clear any interrupt the board | ||
363 | * raises, you must take care both to initialise your hardware | ||
364 | * and to set up the interrupt handler in the right order. | ||
365 | * | ||
366 | * Dev_id must be globally unique. Normally the address of the | ||
367 | * device data structure is used as the cookie. Since the handler | ||
368 | * receives this value it makes sense to use it. | ||
369 | * | ||
370 | * If your interrupt is shared you must pass a non NULL dev_id | ||
371 | * as this is required when freeing the interrupt. | ||
372 | * | ||
373 | * Flags: | ||
374 | * | ||
375 | * SA_SHIRQ Interrupt is shared | ||
376 | * | ||
377 | * SA_INTERRUPT Disable local interrupts while processing | ||
378 | * | ||
379 | * SA_SAMPLE_RANDOM The interrupt can be used for entropy | ||
380 | * | ||
381 | */ | ||
382 | |||
383 | int request_irq(unsigned int irq, | ||
384 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
385 | unsigned long irqflags, | ||
386 | const char * devname, | ||
387 | void *dev_id) | ||
388 | { | ||
389 | int retval; | ||
390 | struct irqaction *action; | ||
391 | |||
392 | #if 1 | ||
393 | /* | ||
394 | * Sanity-check: shared interrupts should REALLY pass in | ||
395 | * a real dev-ID, otherwise we'll have trouble later trying | ||
396 | * to figure out which interrupt is which (messes up the | ||
397 | * interrupt freeing logic etc). | ||
398 | */ | ||
399 | if (irqflags & SA_SHIRQ) { | ||
400 | if (!dev_id) | ||
401 | printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", | ||
402 | devname, (&irq)[-1]); | ||
403 | } | ||
404 | #endif | ||
405 | |||
406 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | ||
407 | return -EINVAL; | ||
408 | if (!handler) | ||
409 | return -EINVAL; | ||
410 | |||
411 | action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
412 | if (!action) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | action->handler = handler; | ||
416 | action->flags = irqflags; | ||
417 | action->mask = CPU_MASK_NONE; | ||
418 | action->name = devname; | ||
419 | action->next = NULL; | ||
420 | action->dev_id = dev_id; | ||
421 | |||
422 | retval = setup_irq(irq, action); | ||
423 | if (retval) | ||
424 | kfree(action); | ||
425 | return retval; | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * free_irq - free an interrupt | ||
430 | * @irq: Interrupt line to free | ||
431 | * @dev_id: Device identity to free | ||
432 | * | ||
433 | * Remove an interrupt handler. The handler is removed and if the | ||
434 | * interrupt line is no longer in use by any driver it is disabled. | ||
435 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
436 | * on the card it drives before calling this function. The function | ||
437 | * does not return until any executing interrupts for this IRQ | ||
438 | * have completed. | ||
439 | * | ||
440 | * This function may be called from interrupt context. | ||
441 | * | ||
442 | * Bugs: Attempting to free an irq in a handler for the same irq hangs | ||
443 | * the machine. | ||
444 | */ | ||
445 | |||
446 | void free_irq(unsigned int irq, void *dev_id) | ||
447 | { | ||
448 | struct irq_source *source; | ||
449 | struct irq_group *group; | ||
450 | struct irq_level *level; | ||
451 | struct irqaction **p, **pp; | ||
452 | unsigned long flags; | ||
453 | |||
454 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | ||
455 | return; | ||
456 | |||
457 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | ||
458 | if (!group) | ||
459 | BUG(); | ||
460 | |||
461 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | ||
462 | if (!source) | ||
463 | BUG(); | ||
464 | |||
465 | level = source->level; | ||
466 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | ||
467 | |||
468 | spin_lock_irqsave(&level->lock, flags); | ||
469 | |||
470 | for (pp = p; *pp; pp = &(*pp)->next) { | ||
471 | struct irqaction *action = *pp; | ||
472 | |||
473 | if (action->dev_id != dev_id) | ||
474 | continue; | ||
475 | |||
476 | /* found it - remove from the list of entries */ | ||
477 | *pp = action->next; | ||
478 | |||
479 | level->usage--; | ||
480 | |||
481 | if (p == pp && group->control) | ||
482 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 0); | ||
483 | |||
484 | if (level->usage == 0) | ||
485 | __set_MASK(level - frv_irq_levels); | ||
486 | |||
487 | spin_unlock_irqrestore(&level->lock,flags); | ||
488 | |||
489 | #ifdef CONFIG_SMP | ||
490 | /* Wait to make sure it's not being used on another CPU */ | ||
491 | while (desc->status & IRQ_INPROGRESS) | ||
492 | barrier(); | ||
493 | #endif | ||
494 | kfree(action); | ||
495 | return; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * IRQ autodetection code.. | ||
501 | * | ||
502 | * This depends on the fact that any interrupt that comes in on to an | ||
503 | * unassigned IRQ will cause GxICR_DETECT to be set | ||
504 | */ | ||
505 | |||
506 | static DECLARE_MUTEX(probe_sem); | ||
507 | |||
508 | /** | ||
509 | * probe_irq_on - begin an interrupt autodetect | ||
510 | * | ||
511 | * Commence probing for an interrupt. The interrupts are scanned | ||
512 | * and a mask of potential interrupt lines is returned. | ||
513 | * | ||
514 | */ | ||
515 | |||
516 | unsigned long probe_irq_on(void) | ||
517 | { | ||
518 | down(&probe_sem); | ||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * Return a mask of triggered interrupts (this | ||
524 | * can handle only legacy ISA interrupts). | ||
525 | */ | ||
526 | |||
527 | /** | ||
528 | * probe_irq_mask - scan a bitmap of interrupt lines | ||
529 | * @val: mask of interrupts to consider | ||
530 | * | ||
531 | * Scan the ISA bus interrupt lines and return a bitmap of | ||
532 | * active interrupts. The interrupt probe logic state is then | ||
533 | * returned to its previous value. | ||
534 | * | ||
535 | * Note: we need to scan all the irq's even though we will | ||
536 | * only return ISA irq numbers - just so that we reset them | ||
537 | * all to a known state. | ||
538 | */ | ||
539 | unsigned int probe_irq_mask(unsigned long xmask) | ||
540 | { | ||
541 | up(&probe_sem); | ||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Return the one interrupt that triggered (this can | ||
547 | * handle any interrupt source). | ||
548 | */ | ||
549 | |||
550 | /** | ||
551 | * probe_irq_off - end an interrupt autodetect | ||
552 | * @xmask: mask of potential interrupts (unused) | ||
553 | * | ||
554 | * Scans the unused interrupt lines and returns the line which | ||
555 | * appears to have triggered the interrupt. If no interrupt was | ||
556 | * found then zero is returned. If more than one interrupt is | ||
557 | * found then minus the first candidate is returned to indicate | ||
558 | * their is doubt. | ||
559 | * | ||
560 | * The interrupt probe logic state is returned to its previous | ||
561 | * value. | ||
562 | * | ||
563 | * BUGS: When used in a module (which arguably shouldnt happen) | ||
564 | * nothing prevents two IRQ probe callers from overlapping. The | ||
565 | * results of this are non-optimal. | ||
566 | */ | ||
567 | |||
568 | int probe_irq_off(unsigned long xmask) | ||
569 | { | ||
570 | up(&probe_sem); | ||
571 | return -1; | ||
572 | } | ||
573 | |||
574 | /* this was setup_x86_irq but it seems pretty generic */ | ||
575 | int setup_irq(unsigned int irq, struct irqaction *new) | ||
576 | { | ||
577 | struct irq_source *source; | ||
578 | struct irq_group *group; | ||
579 | struct irq_level *level; | ||
580 | struct irqaction **p, **pp; | ||
581 | unsigned long flags; | ||
582 | |||
583 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | ||
584 | if (!group) | ||
585 | BUG(); | ||
586 | |||
587 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | ||
588 | if (!source) | ||
589 | BUG(); | ||
590 | |||
591 | level = source->level; | ||
592 | |||
593 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | ||
594 | |||
595 | /* | ||
596 | * Some drivers like serial.c use request_irq() heavily, | ||
597 | * so we have to be careful not to interfere with a | ||
598 | * running system. | ||
599 | */ | ||
600 | if (new->flags & SA_SAMPLE_RANDOM) { | ||
601 | /* | ||
602 | * This function might sleep, we want to call it first, | ||
603 | * outside of the atomic block. | ||
604 | * Yes, this might clear the entropy pool if the wrong | ||
605 | * driver is attempted to be loaded, without actually | ||
606 | * installing a new handler, but is this really a problem, | ||
607 | * only the sysadmin is able to do this. | ||
608 | */ | ||
609 | rand_initialize_irq(irq); | ||
610 | } | ||
611 | |||
612 | /* must juggle the interrupt processing stuff with interrupts disabled */ | ||
613 | spin_lock_irqsave(&level->lock, flags); | ||
614 | |||
615 | /* can't share interrupts unless all parties agree to */ | ||
616 | if (level->usage != 0 && !(level->flags & new->flags & SA_SHIRQ)) { | ||
617 | spin_unlock_irqrestore(&level->lock,flags); | ||
618 | return -EBUSY; | ||
619 | } | ||
620 | |||
621 | /* add new interrupt at end of irq queue */ | ||
622 | pp = p; | ||
623 | while (*pp) | ||
624 | pp = &(*pp)->next; | ||
625 | |||
626 | *pp = new; | ||
627 | |||
628 | level->usage++; | ||
629 | level->flags = new->flags; | ||
630 | |||
631 | /* turn the interrupts on */ | ||
632 | if (level->usage == 1) | ||
633 | __clr_MASK(level - frv_irq_levels); | ||
634 | |||
635 | if (p == pp && group->control) | ||
636 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 1); | ||
637 | |||
638 | spin_unlock_irqrestore(&level->lock, flags); | ||
639 | register_irq_proc(irq); | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static struct proc_dir_entry * root_irq_dir; | ||
644 | static struct proc_dir_entry * irq_dir [NR_IRQS]; | ||
645 | |||
646 | #define HEX_DIGITS 8 | ||
647 | |||
648 | static unsigned int parse_hex_value (const char *buffer, | ||
649 | unsigned long count, unsigned long *ret) | ||
650 | { | ||
651 | unsigned char hexnum [HEX_DIGITS]; | ||
652 | unsigned long value; | ||
653 | int i; | ||
654 | |||
655 | if (!count) | ||
656 | return -EINVAL; | ||
657 | if (count > HEX_DIGITS) | ||
658 | count = HEX_DIGITS; | ||
659 | if (copy_from_user(hexnum, buffer, count)) | ||
660 | return -EFAULT; | ||
661 | |||
662 | /* | ||
663 | * Parse the first 8 characters as a hex string, any non-hex char | ||
664 | * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. | ||
665 | */ | ||
666 | value = 0; | ||
667 | |||
668 | for (i = 0; i < count; i++) { | ||
669 | unsigned int c = hexnum[i]; | ||
670 | |||
671 | switch (c) { | ||
672 | case '0' ... '9': c -= '0'; break; | ||
673 | case 'a' ... 'f': c -= 'a'-10; break; | ||
674 | case 'A' ... 'F': c -= 'A'-10; break; | ||
675 | default: | ||
676 | goto out; | ||
677 | } | ||
678 | value = (value << 4) | c; | ||
679 | } | ||
680 | out: | ||
681 | *ret = value; | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | |||
686 | static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, | ||
687 | int count, int *eof, void *data) | ||
688 | { | ||
689 | unsigned long *mask = (unsigned long *) data; | ||
690 | if (count < HEX_DIGITS+1) | ||
691 | return -EINVAL; | ||
692 | return sprintf (page, "%08lx\n", *mask); | ||
693 | } | ||
694 | |||
695 | static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, | ||
696 | unsigned long count, void *data) | ||
697 | { | ||
698 | unsigned long *mask = (unsigned long *) data, full_count = count, err; | ||
699 | unsigned long new_value; | ||
700 | |||
701 | show_state(); | ||
702 | err = parse_hex_value(buffer, count, &new_value); | ||
703 | if (err) | ||
704 | return err; | ||
705 | |||
706 | *mask = new_value; | ||
707 | return full_count; | ||
708 | } | ||
709 | |||
710 | #define MAX_NAMELEN 10 | ||
711 | |||
712 | static void register_irq_proc (unsigned int irq) | ||
713 | { | ||
714 | char name [MAX_NAMELEN]; | ||
715 | |||
716 | if (!root_irq_dir || irq_dir[irq]) | ||
717 | return; | ||
718 | |||
719 | memset(name, 0, MAX_NAMELEN); | ||
720 | sprintf(name, "%d", irq); | ||
721 | |||
722 | /* create /proc/irq/1234 */ | ||
723 | irq_dir[irq] = proc_mkdir(name, root_irq_dir); | ||
724 | } | ||
725 | |||
726 | unsigned long prof_cpu_mask = -1; | ||
727 | |||
728 | void init_irq_proc (void) | ||
729 | { | ||
730 | struct proc_dir_entry *entry; | ||
731 | int i; | ||
732 | |||
733 | /* create /proc/irq */ | ||
734 | root_irq_dir = proc_mkdir("irq", 0); | ||
735 | |||
736 | /* create /proc/irq/prof_cpu_mask */ | ||
737 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | ||
738 | if (!entry) | ||
739 | return; | ||
740 | |||
741 | entry->nlink = 1; | ||
742 | entry->data = (void *)&prof_cpu_mask; | ||
743 | entry->read_proc = prof_cpu_mask_read_proc; | ||
744 | entry->write_proc = prof_cpu_mask_write_proc; | ||
745 | |||
746 | /* | ||
747 | * Create entries for all existing IRQs. | ||
748 | */ | ||
749 | for (i = 0; i < NR_IRQS; i++) | ||
750 | register_irq_proc(i); | ||
751 | } | ||
752 | |||
753 | /*****************************************************************************/ | ||
754 | /* | ||
755 | * initialise the interrupt system | ||
756 | */ | ||
757 | void __init init_IRQ(void) | ||
758 | { | ||
759 | route_cpu_irqs(); | ||
760 | fpga_init(); | ||
761 | #ifdef CONFIG_FUJITSU_MB93493 | ||
762 | route_mb93493_irqs(); | ||
763 | #endif | ||
764 | } /* end init_IRQ() */ | ||