diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn/kernel/irq.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/sn/kernel/irq.c')
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 431 |
1 files changed, 431 insertions, 0 deletions
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c new file mode 100644 index 000000000000..3be44724f6c8 --- /dev/null +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * Platform dependent support for SGI SN | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. | ||
9 | */ | ||
10 | |||
11 | #include <linux/irq.h> | ||
12 | #include <asm/sn/intr.h> | ||
13 | #include <asm/sn/addrs.h> | ||
14 | #include <asm/sn/arch.h> | ||
15 | #include "xtalk/xwidgetdev.h" | ||
16 | #include "pci/pcibus_provider_defs.h" | ||
17 | #include "pci/pcidev.h" | ||
18 | #include "pci/pcibr_provider.h" | ||
19 | #include <asm/sn/shub_mmr.h> | ||
20 | #include <asm/sn/sn_sal.h> | ||
21 | |||
22 | static void force_interrupt(int irq); | ||
23 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); | ||
24 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | ||
25 | |||
26 | extern int sn_force_interrupt_flag; | ||
27 | extern int sn_ioif_inited; | ||
28 | struct sn_irq_info **sn_irq; | ||
29 | |||
30 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | ||
31 | u64 sn_irq_info, | ||
32 | int req_irq, nasid_t req_nasid, | ||
33 | int req_slice) | ||
34 | { | ||
35 | struct ia64_sal_retval ret_stuff; | ||
36 | ret_stuff.status = 0; | ||
37 | ret_stuff.v0 = 0; | ||
38 | |||
39 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
40 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, | ||
41 | (u64) local_widget, (u64) sn_irq_info, (u64) req_irq, | ||
42 | (u64) req_nasid, (u64) req_slice); | ||
43 | return ret_stuff.status; | ||
44 | } | ||
45 | |||
46 | static inline void sn_intr_free(nasid_t local_nasid, int local_widget, | ||
47 | struct sn_irq_info *sn_irq_info) | ||
48 | { | ||
49 | struct ia64_sal_retval ret_stuff; | ||
50 | ret_stuff.status = 0; | ||
51 | ret_stuff.v0 = 0; | ||
52 | |||
53 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
54 | (u64) SAL_INTR_FREE, (u64) local_nasid, | ||
55 | (u64) local_widget, (u64) sn_irq_info->irq_irq, | ||
56 | (u64) sn_irq_info->irq_cookie, 0, 0); | ||
57 | } | ||
58 | |||
59 | static unsigned int sn_startup_irq(unsigned int irq) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static void sn_shutdown_irq(unsigned int irq) | ||
65 | { | ||
66 | } | ||
67 | |||
68 | static void sn_disable_irq(unsigned int irq) | ||
69 | { | ||
70 | } | ||
71 | |||
72 | static void sn_enable_irq(unsigned int irq) | ||
73 | { | ||
74 | } | ||
75 | |||
76 | static void sn_ack_irq(unsigned int irq) | ||
77 | { | ||
78 | uint64_t event_occurred, mask = 0; | ||
79 | int nasid; | ||
80 | |||
81 | irq = irq & 0xff; | ||
82 | nasid = get_nasid(); | ||
83 | event_occurred = | ||
84 | HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); | ||
85 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { | ||
86 | mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT); | ||
87 | } | ||
88 | if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) { | ||
89 | mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT); | ||
90 | } | ||
91 | if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) { | ||
92 | mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT); | ||
93 | } | ||
94 | if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) { | ||
95 | mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT); | ||
96 | } | ||
97 | HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), | ||
98 | mask); | ||
99 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | ||
100 | |||
101 | move_irq(irq); | ||
102 | } | ||
103 | |||
104 | static void sn_end_irq(unsigned int irq) | ||
105 | { | ||
106 | int nasid; | ||
107 | int ivec; | ||
108 | uint64_t event_occurred; | ||
109 | |||
110 | ivec = irq & 0xff; | ||
111 | if (ivec == SGI_UART_VECTOR) { | ||
112 | nasid = get_nasid(); | ||
113 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | ||
114 | (nasid, SH_EVENT_OCCURRED)); | ||
115 | /* If the UART bit is set here, we may have received an | ||
116 | * interrupt from the UART that the driver missed. To | ||
117 | * make sure, we IPI ourselves to force us to look again. | ||
118 | */ | ||
119 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { | ||
120 | platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, | ||
121 | IA64_IPI_DM_INT, 0); | ||
122 | } | ||
123 | } | ||
124 | __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); | ||
125 | if (sn_force_interrupt_flag) | ||
126 | force_interrupt(irq); | ||
127 | } | ||
128 | |||
129 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | ||
130 | { | ||
131 | struct sn_irq_info *sn_irq_info = sn_irq[irq]; | ||
132 | struct sn_irq_info *tmp_sn_irq_info; | ||
133 | int cpuid, cpuphys; | ||
134 | nasid_t t_nasid; /* nasid to target */ | ||
135 | int t_slice; /* slice to target */ | ||
136 | |||
137 | /* allocate a temp sn_irq_info struct to get new target info */ | ||
138 | tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); | ||
139 | if (!tmp_sn_irq_info) | ||
140 | return; | ||
141 | |||
142 | cpuid = first_cpu(mask); | ||
143 | cpuphys = cpu_physical_id(cpuid); | ||
144 | t_nasid = cpuid_to_nasid(cpuid); | ||
145 | t_slice = cpuid_to_slice(cpuid); | ||
146 | |||
147 | while (sn_irq_info) { | ||
148 | int status; | ||
149 | int local_widget; | ||
150 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
151 | nasid_t local_nasid = NASID_GET(bridge); | ||
152 | |||
153 | if (!bridge) | ||
154 | break; /* irq is not a device interrupt */ | ||
155 | |||
156 | if (local_nasid & 1) | ||
157 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
158 | else | ||
159 | local_widget = SWIN_WIDGETNUM(bridge); | ||
160 | |||
161 | /* Free the old PROM sn_irq_info structure */ | ||
162 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
163 | |||
164 | /* allocate a new PROM sn_irq_info struct */ | ||
165 | status = sn_intr_alloc(local_nasid, local_widget, | ||
166 | __pa(tmp_sn_irq_info), irq, t_nasid, | ||
167 | t_slice); | ||
168 | |||
169 | if (status == 0) { | ||
170 | /* Update kernels sn_irq_info with new target info */ | ||
171 | unregister_intr_pda(sn_irq_info); | ||
172 | sn_irq_info->irq_cpuid = cpuid; | ||
173 | sn_irq_info->irq_nasid = t_nasid; | ||
174 | sn_irq_info->irq_slice = t_slice; | ||
175 | sn_irq_info->irq_xtalkaddr = | ||
176 | tmp_sn_irq_info->irq_xtalkaddr; | ||
177 | sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; | ||
178 | register_intr_pda(sn_irq_info); | ||
179 | |||
180 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { | ||
181 | pcibr_change_devices_irq(sn_irq_info); | ||
182 | } | ||
183 | |||
184 | sn_irq_info = sn_irq_info->irq_next; | ||
185 | |||
186 | #ifdef CONFIG_SMP | ||
187 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | ||
188 | #endif | ||
189 | } else { | ||
190 | break; /* snp_affinity failed the intr_alloc */ | ||
191 | } | ||
192 | } | ||
193 | kfree(tmp_sn_irq_info); | ||
194 | } | ||
195 | |||
196 | struct hw_interrupt_type irq_type_sn = { | ||
197 | "SN hub", | ||
198 | sn_startup_irq, | ||
199 | sn_shutdown_irq, | ||
200 | sn_enable_irq, | ||
201 | sn_disable_irq, | ||
202 | sn_ack_irq, | ||
203 | sn_end_irq, | ||
204 | sn_set_affinity_irq | ||
205 | }; | ||
206 | |||
207 | unsigned int sn_local_vector_to_irq(u8 vector) | ||
208 | { | ||
209 | return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); | ||
210 | } | ||
211 | |||
212 | void sn_irq_init(void) | ||
213 | { | ||
214 | int i; | ||
215 | irq_desc_t *base_desc = irq_desc; | ||
216 | |||
217 | for (i = 0; i < NR_IRQS; i++) { | ||
218 | if (base_desc[i].handler == &no_irq_type) { | ||
219 | base_desc[i].handler = &irq_type_sn; | ||
220 | } | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static void register_intr_pda(struct sn_irq_info *sn_irq_info) | ||
225 | { | ||
226 | int irq = sn_irq_info->irq_irq; | ||
227 | int cpu = sn_irq_info->irq_cpuid; | ||
228 | |||
229 | if (pdacpu(cpu)->sn_last_irq < irq) { | ||
230 | pdacpu(cpu)->sn_last_irq = irq; | ||
231 | } | ||
232 | |||
233 | if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) { | ||
234 | pdacpu(cpu)->sn_first_irq = irq; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | ||
239 | { | ||
240 | int irq = sn_irq_info->irq_irq; | ||
241 | int cpu = sn_irq_info->irq_cpuid; | ||
242 | struct sn_irq_info *tmp_irq_info; | ||
243 | int i, foundmatch; | ||
244 | |||
245 | if (pdacpu(cpu)->sn_last_irq == irq) { | ||
246 | foundmatch = 0; | ||
247 | for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { | ||
248 | tmp_irq_info = sn_irq[i]; | ||
249 | while (tmp_irq_info) { | ||
250 | if (tmp_irq_info->irq_cpuid == cpu) { | ||
251 | foundmatch++; | ||
252 | break; | ||
253 | } | ||
254 | tmp_irq_info = tmp_irq_info->irq_next; | ||
255 | } | ||
256 | if (foundmatch) { | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | pdacpu(cpu)->sn_last_irq = i; | ||
261 | } | ||
262 | |||
263 | if (pdacpu(cpu)->sn_first_irq == irq) { | ||
264 | foundmatch = 0; | ||
265 | for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { | ||
266 | tmp_irq_info = sn_irq[i]; | ||
267 | while (tmp_irq_info) { | ||
268 | if (tmp_irq_info->irq_cpuid == cpu) { | ||
269 | foundmatch++; | ||
270 | break; | ||
271 | } | ||
272 | tmp_irq_info = tmp_irq_info->irq_next; | ||
273 | } | ||
274 | if (foundmatch) { | ||
275 | break; | ||
276 | } | ||
277 | } | ||
278 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, | ||
283 | nasid_t nasid, int slice) | ||
284 | { | ||
285 | struct sn_irq_info *sn_irq_info; | ||
286 | int status; | ||
287 | |||
288 | sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); | ||
289 | if (sn_irq_info == NULL) | ||
290 | return NULL; | ||
291 | |||
292 | memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); | ||
293 | |||
294 | status = | ||
295 | sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, | ||
296 | nasid, slice); | ||
297 | |||
298 | if (status) { | ||
299 | kfree(sn_irq_info); | ||
300 | return NULL; | ||
301 | } else { | ||
302 | return sn_irq_info; | ||
303 | } | ||
304 | } | ||
305 | |||
306 | void sn_irq_free(struct sn_irq_info *sn_irq_info) | ||
307 | { | ||
308 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
309 | nasid_t local_nasid = NASID_GET(bridge); | ||
310 | int local_widget; | ||
311 | |||
312 | if (local_nasid & 1) /* tio check */ | ||
313 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
314 | else | ||
315 | local_widget = SWIN_WIDGETNUM(bridge); | ||
316 | |||
317 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
318 | |||
319 | kfree(sn_irq_info); | ||
320 | } | ||
321 | |||
322 | void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | ||
323 | { | ||
324 | nasid_t nasid = sn_irq_info->irq_nasid; | ||
325 | int slice = sn_irq_info->irq_slice; | ||
326 | int cpu = nasid_slice_to_cpuid(nasid, slice); | ||
327 | |||
328 | sn_irq_info->irq_cpuid = cpu; | ||
329 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | ||
330 | |||
331 | /* link it into the sn_irq[irq] list */ | ||
332 | sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; | ||
333 | sn_irq[sn_irq_info->irq_irq] = sn_irq_info; | ||
334 | |||
335 | (void)register_intr_pda(sn_irq_info); | ||
336 | } | ||
337 | |||
338 | static void force_interrupt(int irq) | ||
339 | { | ||
340 | struct sn_irq_info *sn_irq_info; | ||
341 | |||
342 | if (!sn_ioif_inited) | ||
343 | return; | ||
344 | sn_irq_info = sn_irq[irq]; | ||
345 | while (sn_irq_info) { | ||
346 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | ||
347 | (sn_irq_info->irq_bridge != NULL)) { | ||
348 | pcibr_force_interrupt(sn_irq_info); | ||
349 | } | ||
350 | sn_irq_info = sn_irq_info->irq_next; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Check for lost interrupts. If the PIC int_status reg. says that | ||
356 | * an interrupt has been sent, but not handled, and the interrupt | ||
357 | * is not pending in either the cpu irr regs or in the soft irr regs, | ||
358 | * and the interrupt is not in service, then the interrupt may have | ||
359 | * been lost. Force an interrupt on that pin. It is possible that | ||
360 | * the interrupt is in flight, so we may generate a spurious interrupt, | ||
361 | * but we should never miss a real lost interrupt. | ||
362 | */ | ||
363 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | ||
364 | { | ||
365 | uint64_t regval; | ||
366 | int irr_reg_num; | ||
367 | int irr_bit; | ||
368 | uint64_t irr_reg; | ||
369 | struct pcidev_info *pcidev_info; | ||
370 | struct pcibus_info *pcibus_info; | ||
371 | |||
372 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
373 | if (!pcidev_info) | ||
374 | return; | ||
375 | |||
376 | pcibus_info = | ||
377 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> | ||
378 | pdi_pcibus_info; | ||
379 | regval = pcireg_intr_status_get(pcibus_info); | ||
380 | |||
381 | irr_reg_num = irq_to_vector(irq) / 64; | ||
382 | irr_bit = irq_to_vector(irq) % 64; | ||
383 | switch (irr_reg_num) { | ||
384 | case 0: | ||
385 | irr_reg = ia64_getreg(_IA64_REG_CR_IRR0); | ||
386 | break; | ||
387 | case 1: | ||
388 | irr_reg = ia64_getreg(_IA64_REG_CR_IRR1); | ||
389 | break; | ||
390 | case 2: | ||
391 | irr_reg = ia64_getreg(_IA64_REG_CR_IRR2); | ||
392 | break; | ||
393 | case 3: | ||
394 | irr_reg = ia64_getreg(_IA64_REG_CR_IRR3); | ||
395 | break; | ||
396 | } | ||
397 | if (!test_bit(irr_bit, &irr_reg)) { | ||
398 | if (!test_bit(irq, pda->sn_soft_irr)) { | ||
399 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { | ||
400 | regval &= 0xff; | ||
401 | if (sn_irq_info->irq_int_bit & regval & | ||
402 | sn_irq_info->irq_last_intr) { | ||
403 | regval &= | ||
404 | ~(sn_irq_info-> | ||
405 | irq_int_bit & regval); | ||
406 | pcibr_force_interrupt(sn_irq_info); | ||
407 | } | ||
408 | } | ||
409 | } | ||
410 | } | ||
411 | sn_irq_info->irq_last_intr = regval; | ||
412 | } | ||
413 | |||
414 | void sn_lb_int_war_check(void) | ||
415 | { | ||
416 | int i; | ||
417 | |||
418 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | ||
419 | return; | ||
420 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | ||
421 | struct sn_irq_info *sn_irq_info = sn_irq[i]; | ||
422 | while (sn_irq_info) { | ||
423 | /* Only call for PCI bridges that are fully initialized. */ | ||
424 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | ||
425 | (sn_irq_info->irq_bridge != NULL)) { | ||
426 | sn_check_intr(i, sn_irq_info); | ||
427 | } | ||
428 | sn_irq_info = sn_irq_info->irq_next; | ||
429 | } | ||
430 | } | ||
431 | } | ||