diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/irq.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/irq.c')
-rw-r--r-- | arch/ia64/kernel/irq.c | 238 |
1 files changed, 238 insertions, 0 deletions
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c new file mode 100644 index 000000000000..28f2aadc38d0 --- /dev/null +++ b/arch/ia64/kernel/irq.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * linux/arch/ia64/kernel/irq.c | ||
3 | * | ||
4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | ||
5 | * | ||
6 | * This file contains the code used by various IRQ handling routines: | ||
7 | * asking for different IRQ's should be done through these routines | ||
8 | * instead of just grabbing them. Thus setups with different IRQ numbers | ||
9 | * shouldn't result in any weird surprises, and installing new handlers | ||
10 | * should be easier. | ||
11 | * | ||
12 | * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 | ||
13 | * | ||
14 | * 4/14/2004: Added code to handle cpu migration and do safe irq | ||
15 | * migration without lossing interrupts for iosapic | ||
16 | * architecture. | ||
17 | */ | ||
18 | |||
19 | #include <asm/delay.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/kernel_stat.h> | ||
25 | |||
26 | /* | ||
27 | * 'what should we do if we get a hw irq event on an illegal vector'. | ||
28 | * each architecture has to answer this themselves. | ||
29 | */ | ||
30 | void ack_bad_irq(unsigned int irq) | ||
31 | { | ||
32 | printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); | ||
33 | } | ||
34 | |||
35 | #ifdef CONFIG_IA64_GENERIC | ||
36 | unsigned int __ia64_local_vector_to_irq (ia64_vector vec) | ||
37 | { | ||
38 | return (unsigned int) vec; | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Interrupt statistics: | ||
44 | */ | ||
45 | |||
46 | atomic_t irq_err_count; | ||
47 | |||
48 | /* | ||
49 | * /proc/interrupts printing: | ||
50 | */ | ||
51 | |||
52 | int show_interrupts(struct seq_file *p, void *v) | ||
53 | { | ||
54 | int i = *(loff_t *) v, j; | ||
55 | struct irqaction * action; | ||
56 | unsigned long flags; | ||
57 | |||
58 | if (i == 0) { | ||
59 | seq_printf(p, " "); | ||
60 | for (j=0; j<NR_CPUS; j++) | ||
61 | if (cpu_online(j)) | ||
62 | seq_printf(p, "CPU%d ",j); | ||
63 | seq_putc(p, '\n'); | ||
64 | } | ||
65 | |||
66 | if (i < NR_IRQS) { | ||
67 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
68 | action = irq_desc[i].action; | ||
69 | if (!action) | ||
70 | goto skip; | ||
71 | seq_printf(p, "%3d: ",i); | ||
72 | #ifndef CONFIG_SMP | ||
73 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
74 | #else | ||
75 | for (j = 0; j < NR_CPUS; j++) | ||
76 | if (cpu_online(j)) | ||
77 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
78 | #endif | ||
79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | ||
80 | seq_printf(p, " %s", action->name); | ||
81 | |||
82 | for (action=action->next; action; action = action->next) | ||
83 | seq_printf(p, ", %s", action->name); | ||
84 | |||
85 | seq_putc(p, '\n'); | ||
86 | skip: | ||
87 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
88 | } else if (i == NR_IRQS) | ||
89 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | #ifdef CONFIG_SMP | ||
94 | /* | ||
95 | * This is updated when the user sets irq affinity via /proc | ||
96 | */ | ||
97 | static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; | ||
98 | static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)]; | ||
99 | |||
100 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | ||
101 | |||
102 | /* | ||
103 | * Arch specific routine for deferred write to iosapic rte to reprogram | ||
104 | * intr destination. | ||
105 | */ | ||
106 | void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) | ||
107 | { | ||
108 | pending_irq_cpumask[irq] = mask_val; | ||
109 | } | ||
110 | |||
111 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | ||
112 | { | ||
113 | cpumask_t mask = CPU_MASK_NONE; | ||
114 | |||
115 | cpu_set(cpu_logical_id(hwid), mask); | ||
116 | |||
117 | if (irq < NR_IRQS) { | ||
118 | irq_affinity[irq] = mask; | ||
119 | irq_redir[irq] = (char) (redir & 0xff); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | |||
124 | void move_irq(int irq) | ||
125 | { | ||
126 | /* note - we hold desc->lock */ | ||
127 | cpumask_t tmp; | ||
128 | irq_desc_t *desc = irq_descp(irq); | ||
129 | int redir = test_bit(irq, pending_irq_redir); | ||
130 | |||
131 | if (unlikely(!desc->handler->set_affinity)) | ||
132 | return; | ||
133 | |||
134 | if (!cpus_empty(pending_irq_cpumask[irq])) { | ||
135 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
136 | if (unlikely(!cpus_empty(tmp))) { | ||
137 | desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0), | ||
138 | pending_irq_cpumask[irq]); | ||
139 | } | ||
140 | cpus_clear(pending_irq_cpumask[irq]); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | |||
145 | #endif /* CONFIG_SMP */ | ||
146 | |||
147 | #ifdef CONFIG_HOTPLUG_CPU | ||
148 | unsigned int vectors_in_migration[NR_IRQS]; | ||
149 | |||
150 | /* | ||
151 | * Since cpu_online_map is already updated, we just need to check for | ||
152 | * affinity that has zeros | ||
153 | */ | ||
154 | static void migrate_irqs(void) | ||
155 | { | ||
156 | cpumask_t mask; | ||
157 | irq_desc_t *desc; | ||
158 | int irq, new_cpu; | ||
159 | |||
160 | for (irq=0; irq < NR_IRQS; irq++) { | ||
161 | desc = irq_descp(irq); | ||
162 | |||
163 | /* | ||
164 | * No handling for now. | ||
165 | * TBD: Implement a disable function so we can now | ||
166 | * tell CPU not to respond to these local intr sources. | ||
167 | * such as ITV,CPEI,MCA etc. | ||
168 | */ | ||
169 | if (desc->status == IRQ_PER_CPU) | ||
170 | continue; | ||
171 | |||
172 | cpus_and(mask, irq_affinity[irq], cpu_online_map); | ||
173 | if (any_online_cpu(mask) == NR_CPUS) { | ||
174 | /* | ||
175 | * Save it for phase 2 processing | ||
176 | */ | ||
177 | vectors_in_migration[irq] = irq; | ||
178 | |||
179 | new_cpu = any_online_cpu(cpu_online_map); | ||
180 | mask = cpumask_of_cpu(new_cpu); | ||
181 | |||
182 | /* | ||
183 | * Al three are essential, currently WARN_ON.. maybe panic? | ||
184 | */ | ||
185 | if (desc->handler && desc->handler->disable && | ||
186 | desc->handler->enable && desc->handler->set_affinity) { | ||
187 | desc->handler->disable(irq); | ||
188 | desc->handler->set_affinity(irq, mask); | ||
189 | desc->handler->enable(irq); | ||
190 | } else { | ||
191 | WARN_ON((!(desc->handler) || !(desc->handler->disable) || | ||
192 | !(desc->handler->enable) || | ||
193 | !(desc->handler->set_affinity))); | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | } | ||
198 | |||
199 | void fixup_irqs(void) | ||
200 | { | ||
201 | unsigned int irq; | ||
202 | extern void ia64_process_pending_intr(void); | ||
203 | |||
204 | ia64_set_itv(1<<16); | ||
205 | /* | ||
206 | * Phase 1: Locate irq's bound to this cpu and | ||
207 | * relocate them for cpu removal. | ||
208 | */ | ||
209 | migrate_irqs(); | ||
210 | |||
211 | /* | ||
212 | * Phase 2: Perform interrupt processing for all entries reported in | ||
213 | * local APIC. | ||
214 | */ | ||
215 | ia64_process_pending_intr(); | ||
216 | |||
217 | /* | ||
218 | * Phase 3: Now handle any interrupts not captured in local APIC. | ||
219 | * This is to account for cases that device interrupted during the time the | ||
220 | * rte was being disabled and re-programmed. | ||
221 | */ | ||
222 | for (irq=0; irq < NR_IRQS; irq++) { | ||
223 | if (vectors_in_migration[irq]) { | ||
224 | vectors_in_migration[irq]=0; | ||
225 | __do_IRQ(irq, NULL); | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Now let processor die. We do irq disable and max_xtp() to | ||
231 | * ensure there is no more interrupts routed to this processor. | ||
232 | * But the local timer interrupt can have 1 pending which we | ||
233 | * take care in timer_interrupt(). | ||
234 | */ | ||
235 | max_xtp(); | ||
236 | local_irq_disable(); | ||
237 | } | ||
238 | #endif | ||