diff options
Diffstat (limited to 'arch/ia64/kernel/irq_ia64.c')
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 278 |
1 files changed, 278 insertions, 0 deletions
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c new file mode 100644 index 000000000000..5ba06ebe355b --- /dev/null +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -0,0 +1,278 @@ | |||
1 | /* | ||
2 | * linux/arch/ia64/kernel/irq.c | ||
3 | * | ||
4 | * Copyright (C) 1998-2001 Hewlett-Packard Co | ||
5 | * Stephane Eranian <eranian@hpl.hp.com> | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | * | ||
8 | * 6/10/99: Updated to bring in sync with x86 version to facilitate | ||
9 | * support for SMP and different interrupt controllers. | ||
10 | * | ||
11 | * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector | ||
12 | * PCI to vector allocation routine. | ||
13 | * 04/14/2004 Ashok Raj <ashok.raj@intel.com> | ||
14 | * Added CPU Hotplug handling for IPF. | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/module.h> | ||
19 | |||
20 | #include <linux/jiffies.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/kernel_stat.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/random.h> /* for rand_initialize_irq() */ | ||
29 | #include <linux/signal.h> | ||
30 | #include <linux/smp.h> | ||
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/threads.h> | ||
33 | #include <linux/bitops.h> | ||
34 | |||
35 | #include <asm/delay.h> | ||
36 | #include <asm/intrinsics.h> | ||
37 | #include <asm/io.h> | ||
38 | #include <asm/hw_irq.h> | ||
39 | #include <asm/machvec.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/system.h> | ||
42 | |||
43 | #ifdef CONFIG_PERFMON | ||
44 | # include <asm/perfmon.h> | ||
45 | #endif | ||
46 | |||
47 | #define IRQ_DEBUG 0 | ||
48 | |||
49 | /* default base addr of IPI table */ | ||
50 | void __iomem *ipi_base_addr = ((void __iomem *) | ||
51 | (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); | ||
52 | |||
53 | /* | ||
54 | * Legacy IRQ to IA-64 vector translation table. | ||
55 | */ | ||
56 | __u8 isa_irq_to_vector_map[16] = { | ||
57 | /* 8259 IRQ translation, first 16 entries */ | ||
58 | 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, | ||
59 | 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 | ||
60 | }; | ||
61 | EXPORT_SYMBOL(isa_irq_to_vector_map); | ||
62 | |||
63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; | ||
64 | |||
65 | int | ||
66 | assign_irq_vector (int irq) | ||
67 | { | ||
68 | int pos, vector; | ||
69 | again: | ||
70 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); | ||
71 | vector = IA64_FIRST_DEVICE_VECTOR + pos; | ||
72 | if (vector > IA64_LAST_DEVICE_VECTOR) | ||
73 | /* XXX could look for sharable vectors instead of panic'ing... */ | ||
74 | panic("assign_irq_vector: out of interrupt vectors!"); | ||
75 | if (test_and_set_bit(pos, ia64_vector_mask)) | ||
76 | goto again; | ||
77 | return vector; | ||
78 | } | ||
79 | |||
80 | void | ||
81 | free_irq_vector (int vector) | ||
82 | { | ||
83 | int pos; | ||
84 | |||
85 | if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) | ||
86 | return; | ||
87 | |||
88 | pos = vector - IA64_FIRST_DEVICE_VECTOR; | ||
89 | if (!test_and_clear_bit(pos, ia64_vector_mask)) | ||
90 | printk(KERN_WARNING "%s: double free!\n", __FUNCTION__); | ||
91 | } | ||
92 | |||
93 | #ifdef CONFIG_SMP | ||
94 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) | ||
95 | #else | ||
96 | # define IS_RESCHEDULE(vec) (0) | ||
97 | #endif | ||
98 | /* | ||
99 | * That's where the IVT branches when we get an external | ||
100 | * interrupt. This branches to the correct hardware IRQ handler via | ||
101 | * function ptr. | ||
102 | */ | ||
103 | void | ||
104 | ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | ||
105 | { | ||
106 | unsigned long saved_tpr; | ||
107 | |||
108 | #if IRQ_DEBUG | ||
109 | { | ||
110 | unsigned long bsp, sp; | ||
111 | |||
112 | /* | ||
113 | * Note: if the interrupt happened while executing in | ||
114 | * the context switch routine (ia64_switch_to), we may | ||
115 | * get a spurious stack overflow here. This is | ||
116 | * because the register and the memory stack are not | ||
117 | * switched atomically. | ||
118 | */ | ||
119 | bsp = ia64_getreg(_IA64_REG_AR_BSP); | ||
120 | sp = ia64_getreg(_IA64_REG_SP); | ||
121 | |||
122 | if ((sp - bsp) < 1024) { | ||
123 | static unsigned char count; | ||
124 | static long last_time; | ||
125 | |||
126 | if (jiffies - last_time > 5*HZ) | ||
127 | count = 0; | ||
128 | if (++count < 5) { | ||
129 | last_time = jiffies; | ||
130 | printk("ia64_handle_irq: DANGER: less than " | ||
131 | "1KB of free stack space!!\n" | ||
132 | "(bsp=0x%lx, sp=%lx)\n", bsp, sp); | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | #endif /* IRQ_DEBUG */ | ||
137 | |||
138 | /* | ||
139 | * Always set TPR to limit maximum interrupt nesting depth to | ||
140 | * 16 (without this, it would be ~240, which could easily lead | ||
141 | * to kernel stack overflows). | ||
142 | */ | ||
143 | irq_enter(); | ||
144 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); | ||
145 | ia64_srlz_d(); | ||
146 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | ||
147 | if (!IS_RESCHEDULE(vector)) { | ||
148 | ia64_setreg(_IA64_REG_CR_TPR, vector); | ||
149 | ia64_srlz_d(); | ||
150 | |||
151 | __do_IRQ(local_vector_to_irq(vector), regs); | ||
152 | |||
153 | /* | ||
154 | * Disable interrupts and send EOI: | ||
155 | */ | ||
156 | local_irq_disable(); | ||
157 | ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); | ||
158 | } | ||
159 | ia64_eoi(); | ||
160 | vector = ia64_get_ivr(); | ||
161 | } | ||
162 | /* | ||
163 | * This must be done *after* the ia64_eoi(). For example, the keyboard softirq | ||
164 | * handler needs to be able to wait for further keyboard interrupts, which can't | ||
165 | * come through until ia64_eoi() has been done. | ||
166 | */ | ||
167 | irq_exit(); | ||
168 | } | ||
169 | |||
170 | #ifdef CONFIG_HOTPLUG_CPU | ||
171 | /* | ||
172 | * This function emulates a interrupt processing when a cpu is about to be | ||
173 | * brought down. | ||
174 | */ | ||
175 | void ia64_process_pending_intr(void) | ||
176 | { | ||
177 | ia64_vector vector; | ||
178 | unsigned long saved_tpr; | ||
179 | extern unsigned int vectors_in_migration[NR_IRQS]; | ||
180 | |||
181 | vector = ia64_get_ivr(); | ||
182 | |||
183 | irq_enter(); | ||
184 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); | ||
185 | ia64_srlz_d(); | ||
186 | |||
187 | /* | ||
188 | * Perform normal interrupt style processing | ||
189 | */ | ||
190 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | ||
191 | if (!IS_RESCHEDULE(vector)) { | ||
192 | ia64_setreg(_IA64_REG_CR_TPR, vector); | ||
193 | ia64_srlz_d(); | ||
194 | |||
195 | /* | ||
196 | * Now try calling normal ia64_handle_irq as it would have got called | ||
197 | * from a real intr handler. Try passing null for pt_regs, hopefully | ||
198 | * it will work. I hope it works!. | ||
199 | * Probably could shared code. | ||
200 | */ | ||
201 | vectors_in_migration[local_vector_to_irq(vector)]=0; | ||
202 | __do_IRQ(local_vector_to_irq(vector), NULL); | ||
203 | |||
204 | /* | ||
205 | * Disable interrupts and send EOI | ||
206 | */ | ||
207 | local_irq_disable(); | ||
208 | ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); | ||
209 | } | ||
210 | ia64_eoi(); | ||
211 | vector = ia64_get_ivr(); | ||
212 | } | ||
213 | irq_exit(); | ||
214 | } | ||
215 | #endif | ||
216 | |||
217 | |||
218 | #ifdef CONFIG_SMP | ||
219 | extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs); | ||
220 | |||
221 | static struct irqaction ipi_irqaction = { | ||
222 | .handler = handle_IPI, | ||
223 | .flags = SA_INTERRUPT, | ||
224 | .name = "IPI" | ||
225 | }; | ||
226 | #endif | ||
227 | |||
228 | void | ||
229 | register_percpu_irq (ia64_vector vec, struct irqaction *action) | ||
230 | { | ||
231 | irq_desc_t *desc; | ||
232 | unsigned int irq; | ||
233 | |||
234 | for (irq = 0; irq < NR_IRQS; ++irq) | ||
235 | if (irq_to_vector(irq) == vec) { | ||
236 | desc = irq_descp(irq); | ||
237 | desc->status |= IRQ_PER_CPU; | ||
238 | desc->handler = &irq_type_ia64_lsapic; | ||
239 | if (action) | ||
240 | setup_irq(irq, action); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | void __init | ||
245 | init_IRQ (void) | ||
246 | { | ||
247 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); | ||
248 | #ifdef CONFIG_SMP | ||
249 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); | ||
250 | #endif | ||
251 | #ifdef CONFIG_PERFMON | ||
252 | pfm_init_percpu(); | ||
253 | #endif | ||
254 | platform_irq_init(); | ||
255 | } | ||
256 | |||
257 | void | ||
258 | ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) | ||
259 | { | ||
260 | void __iomem *ipi_addr; | ||
261 | unsigned long ipi_data; | ||
262 | unsigned long phys_cpu_id; | ||
263 | |||
264 | #ifdef CONFIG_SMP | ||
265 | phys_cpu_id = cpu_physical_id(cpu); | ||
266 | #else | ||
267 | phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; | ||
268 | #endif | ||
269 | |||
270 | /* | ||
271 | * cpu number is in 8bit ID and 8bit EID | ||
272 | */ | ||
273 | |||
274 | ipi_data = (delivery_mode << 8) | (vector & 0xff); | ||
275 | ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); | ||
276 | |||
277 | writeq(ipi_data, ipi_addr); | ||
278 | } | ||