diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2006-07-03 07:36:01 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-07-03 07:36:01 -0400 |
commit | 0ebfff1491ef85d41ddf9c633834838be144f69f (patch) | |
tree | 5b469a6d61a9fcfbf94e7b6d411e544dbdec8dec /arch/powerpc/platforms | |
parent | f63e115fb50db39706b955b81e3375ef6bab2268 (diff) |
[POWERPC] Add new interrupt mapping core and change platforms to use it
This adds the new irq remapper core and removes the old one. Because
there are some fundamental conflicts with the old code, like the value
of NO_IRQ which I'm now setting to 0 (as per discussions with Linus),
etc..., this commit also changes the relevant platform and driver code
over to use the new remapper (so as not to cause difficulties later
in bisecting).
This patch removes the old pre-parsing of the open firmware interrupt
tree along with all the bogus assumptions it made to try to renumber
interrupts according to the platform. This is all to be handled by the
new code now.
For the pSeries XICS interrupt controller, a single remapper host is
created for the whole machine regardless of how many interrupt
presentation and source controllers are found, and it's set to match
any device node that isn't a 8259. That works fine on pSeries and
avoids having to deal with some of the complexities of split source
controllers vs. presentation controllers in the pSeries device trees.
The powerpc i8259 PIC driver now always requests the legacy interrupt
range. It also has the feature of being able to match any device node
(including NULL) if passed no device node as an input. That will help
porting over platforms with broken device-trees like Pegasos who don't
have a proper interrupt tree.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
26 files changed, 1396 insertions, 1098 deletions
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 97936f547f19..9d5da7896892 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -1,6 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Cell Internal Interrupt Controller | 2 | * Cell Internal Interrupt Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * IBM, Corp. | ||
6 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | 7 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
5 | * | 8 | * |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | 9 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
@@ -25,11 +28,13 @@ | |||
25 | #include <linux/module.h> | 28 | #include <linux/module.h> |
26 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
27 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/ioport.h> | ||
28 | 32 | ||
29 | #include <asm/io.h> | 33 | #include <asm/io.h> |
30 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
31 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
32 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
37 | #include <asm/machdep.h> | ||
33 | 38 | ||
34 | #include "interrupt.h" | 39 | #include "interrupt.h" |
35 | #include "cbe_regs.h" | 40 | #include "cbe_regs.h" |
@@ -39,9 +44,25 @@ struct iic { | |||
39 | u8 target_id; | 44 | u8 target_id; |
40 | u8 eoi_stack[16]; | 45 | u8 eoi_stack[16]; |
41 | int eoi_ptr; | 46 | int eoi_ptr; |
47 | struct irq_host *host; | ||
42 | }; | 48 | }; |
43 | 49 | ||
44 | static DEFINE_PER_CPU(struct iic, iic); | 50 | static DEFINE_PER_CPU(struct iic, iic); |
51 | #define IIC_NODE_COUNT 2 | ||
52 | static struct irq_host *iic_hosts[IIC_NODE_COUNT]; | ||
53 | |||
54 | /* Convert between "pending" bits and hw irq number */ | ||
55 | static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) | ||
56 | { | ||
57 | unsigned char unit = bits.source & 0xf; | ||
58 | |||
59 | if (bits.flags & CBE_IIC_IRQ_IPI) | ||
60 | return IIC_IRQ_IPI0 | (bits.prio >> 4); | ||
61 | else if (bits.class <= 3) | ||
62 | return (bits.class << 4) | unit; | ||
63 | else | ||
64 | return IIC_IRQ_INVALID; | ||
65 | } | ||
45 | 66 | ||
46 | static void iic_mask(unsigned int irq) | 67 | static void iic_mask(unsigned int irq) |
47 | { | 68 | { |
@@ -65,197 +86,21 @@ static struct irq_chip iic_chip = { | |||
65 | .eoi = iic_eoi, | 86 | .eoi = iic_eoi, |
66 | }; | 87 | }; |
67 | 88 | ||
68 | /* XXX All of this has to be reworked completely. We need to assign a real | ||
69 | * interrupt numbers to the external interrupts and remove all the hard coded | ||
70 | * interrupt maps (rely on the device-tree whenever possible). | ||
71 | * | ||
72 | * Basically, my scheme is to define the "pendings" bits to be the HW interrupt | ||
73 | * number (ignoring the data and flags here). That means we can sort-of split | ||
74 | * external sources based on priority, and we can use request_irq() on pretty | ||
75 | * much anything. | ||
76 | * | ||
77 | * For spider or axon, they have their own interrupt space. spider will just have | ||
78 | * local "hardward" interrupts 0...xx * node stride. The node stride is not | ||
79 | * necessary (separate interrupt chips will have separate HW number space), but | ||
80 | * will allow to be compatible with existing device-trees. | ||
81 | * | ||
82 | * All of thise little world will get a standard remapping scheme to map those HW | ||
83 | * numbers into the linux flat irq number space. | ||
84 | */ | ||
85 | static int iic_external_get_irq(struct cbe_iic_pending_bits pending) | ||
86 | { | ||
87 | int irq; | ||
88 | unsigned char node, unit; | ||
89 | |||
90 | node = pending.source >> 4; | ||
91 | unit = pending.source & 0xf; | ||
92 | irq = -1; | ||
93 | |||
94 | /* | ||
95 | * This mapping is specific to the Cell Broadband | ||
96 | * Engine. We might need to get the numbers | ||
97 | * from the device tree to support future CPUs. | ||
98 | */ | ||
99 | switch (unit) { | ||
100 | case 0x00: | ||
101 | case 0x0b: | ||
102 | /* | ||
103 | * One of these units can be connected | ||
104 | * to an external interrupt controller. | ||
105 | */ | ||
106 | if (pending.class != 2) | ||
107 | break; | ||
108 | /* TODO: We might want to silently ignore cascade interrupts | ||
109 | * when no cascade handler exist yet | ||
110 | */ | ||
111 | irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; | ||
112 | break; | ||
113 | case 0x01 ... 0x04: | ||
114 | case 0x07 ... 0x0a: | ||
115 | /* | ||
116 | * These units are connected to the SPEs | ||
117 | */ | ||
118 | if (pending.class > 2) | ||
119 | break; | ||
120 | irq = IIC_SPE_OFFSET | ||
121 | + pending.class * IIC_CLASS_STRIDE | ||
122 | + node * IIC_NODE_STRIDE | ||
123 | + unit; | ||
124 | break; | ||
125 | } | ||
126 | if (irq == -1) | ||
127 | printk(KERN_WARNING "Unexpected interrupt class %02x, " | ||
128 | "source %02x, prio %02x, cpu %02x\n", pending.class, | ||
129 | pending.source, pending.prio, smp_processor_id()); | ||
130 | return irq; | ||
131 | } | ||
132 | |||
133 | /* Get an IRQ number from the pending state register of the IIC */ | 89 | /* Get an IRQ number from the pending state register of the IIC */ |
134 | int iic_get_irq(struct pt_regs *regs) | 90 | static unsigned int iic_get_irq(struct pt_regs *regs) |
135 | { | ||
136 | struct iic *iic; | ||
137 | int irq; | ||
138 | struct cbe_iic_pending_bits pending; | ||
139 | |||
140 | iic = &__get_cpu_var(iic); | ||
141 | *(unsigned long *) &pending = | ||
142 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | ||
143 | iic->eoi_stack[++iic->eoi_ptr] = pending.prio; | ||
144 | BUG_ON(iic->eoi_ptr > 15); | ||
145 | |||
146 | irq = -1; | ||
147 | if (pending.flags & CBE_IIC_IRQ_VALID) { | ||
148 | if (pending.flags & CBE_IIC_IRQ_IPI) { | ||
149 | irq = IIC_IPI_OFFSET + (pending.prio >> 4); | ||
150 | /* | ||
151 | if (irq > 0x80) | ||
152 | printk(KERN_WARNING "Unexpected IPI prio %02x" | ||
153 | "on CPU %02x\n", pending.prio, | ||
154 | smp_processor_id()); | ||
155 | */ | ||
156 | } else { | ||
157 | irq = iic_external_get_irq(pending); | ||
158 | } | ||
159 | } | ||
160 | return irq; | ||
161 | } | ||
162 | |||
163 | /* hardcoded part to be compatible with older firmware */ | ||
164 | |||
165 | static int __init setup_iic_hardcoded(void) | ||
166 | { | ||
167 | struct device_node *np; | ||
168 | int nodeid, cpu; | ||
169 | unsigned long regs; | ||
170 | struct iic *iic; | ||
171 | |||
172 | for_each_possible_cpu(cpu) { | ||
173 | iic = &per_cpu(iic, cpu); | ||
174 | nodeid = cpu/2; | ||
175 | |||
176 | for (np = of_find_node_by_type(NULL, "cpu"); | ||
177 | np; | ||
178 | np = of_find_node_by_type(np, "cpu")) { | ||
179 | if (nodeid == *(int *)get_property(np, "node-id", NULL)) | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | if (!np) { | ||
184 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | ||
185 | iic->regs = NULL; | ||
186 | iic->target_id = 0xff; | ||
187 | return -ENODEV; | ||
188 | } | ||
189 | |||
190 | regs = *(long *)get_property(np, "iic", NULL); | ||
191 | |||
192 | /* hack until we have decided on the devtree info */ | ||
193 | regs += 0x400; | ||
194 | if (cpu & 1) | ||
195 | regs += 0x20; | ||
196 | |||
197 | printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); | ||
198 | iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); | ||
199 | iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); | ||
200 | iic->eoi_stack[0] = 0xff; | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int __init setup_iic(void) | ||
207 | { | 91 | { |
208 | struct device_node *dn; | 92 | struct cbe_iic_pending_bits pending; |
209 | unsigned long *regs; | 93 | struct iic *iic; |
210 | char *compatible; | 94 | |
211 | unsigned *np, found = 0; | 95 | iic = &__get_cpu_var(iic); |
212 | struct iic *iic = NULL; | 96 | *(unsigned long *) &pending = |
213 | 97 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | |
214 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | 98 | iic->eoi_stack[++iic->eoi_ptr] = pending.prio; |
215 | compatible = (char *)get_property(dn, "compatible", NULL); | 99 | BUG_ON(iic->eoi_ptr > 15); |
216 | 100 | if (pending.flags & CBE_IIC_IRQ_VALID) | |
217 | if (!compatible) { | 101 | return irq_linear_revmap(iic->host, |
218 | printk(KERN_WARNING "no compatible property found !\n"); | 102 | iic_pending_to_hwnum(pending)); |
219 | continue; | 103 | return NO_IRQ; |
220 | } | ||
221 | |||
222 | if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller")) | ||
223 | regs = (unsigned long *)get_property(dn,"reg", NULL); | ||
224 | else | ||
225 | continue; | ||
226 | |||
227 | if (!regs) | ||
228 | printk(KERN_WARNING "IIC: no reg property\n"); | ||
229 | |||
230 | np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL); | ||
231 | |||
232 | if (!np) { | ||
233 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
234 | iic->regs = NULL; | ||
235 | iic->target_id = 0xff; | ||
236 | return -ENODEV; | ||
237 | } | ||
238 | |||
239 | iic = &per_cpu(iic, np[0]); | ||
240 | iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); | ||
241 | iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); | ||
242 | iic->eoi_stack[0] = 0xff; | ||
243 | printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); | ||
244 | |||
245 | iic = &per_cpu(iic, np[1]); | ||
246 | iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); | ||
247 | iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); | ||
248 | iic->eoi_stack[0] = 0xff; | ||
249 | |||
250 | printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); | ||
251 | |||
252 | found++; | ||
253 | } | ||
254 | |||
255 | if (found) | ||
256 | return 0; | ||
257 | else | ||
258 | return -ENODEV; | ||
259 | } | 104 | } |
260 | 105 | ||
261 | #ifdef CONFIG_SMP | 106 | #ifdef CONFIG_SMP |
@@ -263,12 +108,12 @@ static int __init setup_iic(void) | |||
263 | /* Use the highest interrupt priorities for IPI */ | 108 | /* Use the highest interrupt priorities for IPI */ |
264 | static inline int iic_ipi_to_irq(int ipi) | 109 | static inline int iic_ipi_to_irq(int ipi) |
265 | { | 110 | { |
266 | return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; | 111 | return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi; |
267 | } | 112 | } |
268 | 113 | ||
269 | static inline int iic_irq_to_ipi(int irq) | 114 | static inline int iic_irq_to_ipi(int irq) |
270 | { | 115 | { |
271 | return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); | 116 | return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0); |
272 | } | 117 | } |
273 | 118 | ||
274 | void iic_setup_cpu(void) | 119 | void iic_setup_cpu(void) |
@@ -287,22 +132,51 @@ u8 iic_get_target_id(int cpu) | |||
287 | } | 132 | } |
288 | EXPORT_SYMBOL_GPL(iic_get_target_id); | 133 | EXPORT_SYMBOL_GPL(iic_get_target_id); |
289 | 134 | ||
135 | struct irq_host *iic_get_irq_host(int node) | ||
136 | { | ||
137 | if (node < 0 || node >= IIC_NODE_COUNT) | ||
138 | return NULL; | ||
139 | return iic_hosts[node]; | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | ||
142 | |||
143 | |||
290 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 144 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
291 | { | 145 | { |
292 | smp_message_recv(iic_irq_to_ipi(irq), regs); | 146 | int ipi = (int)(long)dev_id; |
147 | |||
148 | smp_message_recv(ipi, regs); | ||
149 | |||
293 | return IRQ_HANDLED; | 150 | return IRQ_HANDLED; |
294 | } | 151 | } |
295 | 152 | ||
296 | static void iic_request_ipi(int ipi, const char *name) | 153 | static void iic_request_ipi(int ipi, const char *name) |
297 | { | 154 | { |
298 | int irq; | 155 | int node, virq; |
299 | 156 | ||
300 | irq = iic_ipi_to_irq(ipi); | 157 | for (node = 0; node < IIC_NODE_COUNT; node++) { |
301 | 158 | char *rname; | |
302 | /* IPIs are marked IRQF_DISABLED as they must run with irqs | 159 | if (iic_hosts[node] == NULL) |
303 | * disabled */ | 160 | continue; |
304 | set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq); | 161 | virq = irq_create_mapping(iic_hosts[node], |
305 | request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); | 162 | iic_ipi_to_irq(ipi), 0); |
163 | if (virq == NO_IRQ) { | ||
164 | printk(KERN_ERR | ||
165 | "iic: failed to map IPI %s on node %d\n", | ||
166 | name, node); | ||
167 | continue; | ||
168 | } | ||
169 | rname = kzalloc(strlen(name) + 16, GFP_KERNEL); | ||
170 | if (rname) | ||
171 | sprintf(rname, "%s node %d", name, node); | ||
172 | else | ||
173 | rname = (char *)name; | ||
174 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, | ||
175 | rname, (void *)(long)ipi)) | ||
176 | printk(KERN_ERR | ||
177 | "iic: failed to request IPI %s on node %d\n", | ||
178 | name, node); | ||
179 | } | ||
306 | } | 180 | } |
307 | 181 | ||
308 | void iic_request_IPIs(void) | 182 | void iic_request_IPIs(void) |
@@ -313,41 +187,119 @@ void iic_request_IPIs(void) | |||
313 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | 187 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); |
314 | #endif /* CONFIG_DEBUGGER */ | 188 | #endif /* CONFIG_DEBUGGER */ |
315 | } | 189 | } |
190 | |||
316 | #endif /* CONFIG_SMP */ | 191 | #endif /* CONFIG_SMP */ |
317 | 192 | ||
318 | static void __init iic_setup_builtin_handlers(void) | 193 | |
194 | static int iic_host_match(struct irq_host *h, struct device_node *node) | ||
195 | { | ||
196 | return h->host_data != NULL && node == h->host_data; | ||
197 | } | ||
198 | |||
199 | static int iic_host_map(struct irq_host *h, unsigned int virq, | ||
200 | irq_hw_number_t hw, unsigned int flags) | ||
201 | { | ||
202 | if (hw < IIC_IRQ_IPI0) | ||
203 | set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); | ||
204 | else | ||
205 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int iic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
210 | u32 *intspec, unsigned int intsize, | ||
211 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
212 | |||
319 | { | 213 | { |
320 | int be, isrc; | 214 | /* Currently, we don't translate anything. That needs to be fixed as |
215 | * we get better defined device-trees. iic interrupts have to be | ||
216 | * explicitely mapped by whoever needs them | ||
217 | */ | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | static struct irq_host_ops iic_host_ops = { | ||
222 | .match = iic_host_match, | ||
223 | .map = iic_host_map, | ||
224 | .xlate = iic_host_xlate, | ||
225 | }; | ||
226 | |||
227 | static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, | ||
228 | struct irq_host *host) | ||
229 | { | ||
230 | /* XXX FIXME: should locate the linux CPU number from the HW cpu | ||
231 | * number properly. We are lucky for now | ||
232 | */ | ||
233 | struct iic *iic = &per_cpu(iic, hw_cpu); | ||
321 | 234 | ||
322 | /* XXX FIXME: Assume two threads per BE are present */ | 235 | iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); |
323 | for (be=0; be < num_present_cpus() / 2; be++) { | 236 | BUG_ON(iic->regs == NULL); |
324 | int irq; | ||
325 | 237 | ||
326 | /* setup SPE chip and handlers */ | 238 | iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); |
327 | for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { | 239 | iic->eoi_stack[0] = 0xff; |
328 | irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; | 240 | iic->host = host; |
329 | set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); | 241 | out_be64(&iic->regs->prio, 0); |
242 | |||
243 | printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n", | ||
244 | hw_cpu, addr, iic->regs, iic->target_id); | ||
245 | } | ||
246 | |||
247 | static int __init setup_iic(void) | ||
248 | { | ||
249 | struct device_node *dn; | ||
250 | struct resource r0, r1; | ||
251 | struct irq_host *host; | ||
252 | int found = 0; | ||
253 | u32 *np; | ||
254 | |||
255 | for (dn = NULL; | ||
256 | (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { | ||
257 | if (!device_is_compatible(dn, | ||
258 | "IBM,CBEA-Internal-Interrupt-Controller")) | ||
259 | continue; | ||
260 | np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges", | ||
261 | NULL); | ||
262 | if (np == NULL) { | ||
263 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
264 | of_node_put(dn); | ||
265 | return -ENODEV; | ||
330 | } | 266 | } |
331 | /* setup cascade chip */ | 267 | if (of_address_to_resource(dn, 0, &r0) || |
332 | irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE; | 268 | of_address_to_resource(dn, 1, &r1)) { |
333 | set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); | 269 | printk(KERN_WARNING "IIC: Can't resolve addresses\n"); |
270 | of_node_put(dn); | ||
271 | return -ENODEV; | ||
272 | } | ||
273 | host = NULL; | ||
274 | if (found < IIC_NODE_COUNT) { | ||
275 | host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, | ||
276 | IIC_SOURCE_COUNT, | ||
277 | &iic_host_ops, | ||
278 | IIC_IRQ_INVALID); | ||
279 | iic_hosts[found] = host; | ||
280 | BUG_ON(iic_hosts[found] == NULL); | ||
281 | iic_hosts[found]->host_data = of_node_get(dn); | ||
282 | found++; | ||
283 | } | ||
284 | init_one_iic(np[0], r0.start, host); | ||
285 | init_one_iic(np[1], r1.start, host); | ||
334 | } | 286 | } |
287 | |||
288 | if (found) | ||
289 | return 0; | ||
290 | else | ||
291 | return -ENODEV; | ||
335 | } | 292 | } |
336 | 293 | ||
337 | void __init iic_init_IRQ(void) | 294 | void __init iic_init_IRQ(void) |
338 | { | 295 | { |
339 | int cpu, irq_offset; | 296 | /* Discover and initialize iics */ |
340 | struct iic *iic; | ||
341 | |||
342 | if (setup_iic() < 0) | 297 | if (setup_iic() < 0) |
343 | setup_iic_hardcoded(); | 298 | panic("IIC: Failed to initialize !\n"); |
344 | 299 | ||
345 | irq_offset = 0; | 300 | /* Set master interrupt handling function */ |
346 | for_each_possible_cpu(cpu) { | 301 | ppc_md.get_irq = iic_get_irq; |
347 | iic = &per_cpu(iic, cpu); | ||
348 | if (iic->regs) | ||
349 | out_be64(&iic->regs->prio, 0xff); | ||
350 | } | ||
351 | iic_setup_builtin_handlers(); | ||
352 | 302 | ||
303 | /* Enable on current CPU */ | ||
304 | iic_setup_cpu(); | ||
353 | } | 305 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index c74515aeb630..5560a92ec3ab 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -37,23 +37,22 @@ | |||
37 | */ | 37 | */ |
38 | 38 | ||
39 | enum { | 39 | enum { |
40 | IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ | 40 | IIC_IRQ_INVALID = 0xff, |
41 | IIC_EXT_CASCADE = 0x20, /* There is no interrupt 32 on spider */ | 41 | IIC_IRQ_MAX = 0x3f, |
42 | IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ | 42 | IIC_IRQ_EXT_IOIF0 = 0x20, |
43 | IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ | 43 | IIC_IRQ_EXT_IOIF1 = 0x2b, |
44 | IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ | 44 | IIC_IRQ_IPI0 = 0x40, |
45 | IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ | 45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ |
46 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ | 46 | IIC_SOURCE_COUNT = 0x50, |
47 | IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ | ||
48 | }; | 47 | }; |
49 | 48 | ||
50 | extern void iic_init_IRQ(void); | 49 | extern void iic_init_IRQ(void); |
51 | extern int iic_get_irq(struct pt_regs *regs); | ||
52 | extern void iic_cause_IPI(int cpu, int mesg); | 50 | extern void iic_cause_IPI(int cpu, int mesg); |
53 | extern void iic_request_IPIs(void); | 51 | extern void iic_request_IPIs(void); |
54 | extern void iic_setup_cpu(void); | 52 | extern void iic_setup_cpu(void); |
55 | 53 | ||
56 | extern u8 iic_get_target_id(int cpu); | 54 | extern u8 iic_get_target_id(int cpu); |
55 | extern struct irq_host *iic_get_irq_host(int node); | ||
57 | 56 | ||
58 | extern void spider_init_IRQ(void); | 57 | extern void spider_init_IRQ(void); |
59 | 58 | ||
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 70a4e903f7e2..282987d6d4a2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -80,6 +80,14 @@ static void cell_progress(char *s, unsigned short hex) | |||
80 | printk("*** %04x : %s\n", hex, s ? s : ""); | 80 | printk("*** %04x : %s\n", hex, s ? s : ""); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void __init cell_pcibios_fixup(void) | ||
84 | { | ||
85 | struct pci_dev *dev = NULL; | ||
86 | |||
87 | for_each_pci_dev(dev) | ||
88 | pci_read_irq_line(dev); | ||
89 | } | ||
90 | |||
83 | static void __init cell_init_irq(void) | 91 | static void __init cell_init_irq(void) |
84 | { | 92 | { |
85 | iic_init_IRQ(); | 93 | iic_init_IRQ(); |
@@ -130,8 +138,6 @@ static void __init cell_init_early(void) | |||
130 | 138 | ||
131 | cell_init_iommu(); | 139 | cell_init_iommu(); |
132 | 140 | ||
133 | ppc64_interrupt_controller = IC_CELL_PIC; | ||
134 | |||
135 | DBG(" <- cell_init_early()\n"); | 141 | DBG(" <- cell_init_early()\n"); |
136 | } | 142 | } |
137 | 143 | ||
@@ -178,8 +184,7 @@ define_machine(cell) { | |||
178 | .check_legacy_ioport = cell_check_legacy_ioport, | 184 | .check_legacy_ioport = cell_check_legacy_ioport, |
179 | .progress = cell_progress, | 185 | .progress = cell_progress, |
180 | .init_IRQ = cell_init_irq, | 186 | .init_IRQ = cell_init_irq, |
181 | .get_irq = iic_get_irq, | 187 | .pcibios_fixup = cell_pcibios_fixup, |
182 | |||
183 | #ifdef CONFIG_KEXEC | 188 | #ifdef CONFIG_KEXEC |
184 | .machine_kexec = default_machine_kexec, | 189 | .machine_kexec = default_machine_kexec, |
185 | .machine_kexec_prepare = default_machine_kexec_prepare, | 190 | .machine_kexec_prepare = default_machine_kexec_prepare, |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 98425acb6cda..ae7ef88f1a37 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/ioport.h> | ||
25 | 26 | ||
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
27 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
@@ -56,58 +57,67 @@ enum { | |||
56 | REISWAITEN = 0x508, /* Reissue Wait Control*/ | 57 | REISWAITEN = 0x508, /* Reissue Wait Control*/ |
57 | }; | 58 | }; |
58 | 59 | ||
59 | static void __iomem *spider_pics[4]; | 60 | #define SPIDER_CHIP_COUNT 4 |
61 | #define SPIDER_SRC_COUNT 64 | ||
62 | #define SPIDER_IRQ_INVALID 63 | ||
60 | 63 | ||
61 | static void __iomem *spider_get_pic(int irq) | 64 | struct spider_pic { |
62 | { | 65 | struct irq_host *host; |
63 | int node = irq / IIC_NODE_STRIDE; | 66 | struct device_node *of_node; |
64 | irq %= IIC_NODE_STRIDE; | 67 | void __iomem *regs; |
65 | 68 | unsigned int node_id; | |
66 | if (irq >= IIC_EXT_OFFSET && | 69 | }; |
67 | irq < IIC_EXT_OFFSET + IIC_NUM_EXT && | 70 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; |
68 | spider_pics) | ||
69 | return spider_pics[node]; | ||
70 | return NULL; | ||
71 | } | ||
72 | 71 | ||
73 | static int spider_get_nr(unsigned int irq) | 72 | static struct spider_pic *spider_virq_to_pic(unsigned int virq) |
74 | { | 73 | { |
75 | return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; | 74 | return irq_map[virq].host->host_data; |
76 | } | 75 | } |
77 | 76 | ||
78 | static void __iomem *spider_get_irq_config(int irq) | 77 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, |
78 | unsigned int src) | ||
79 | { | 79 | { |
80 | void __iomem *pic; | 80 | return pic->regs + TIR_CFGA + 8 * src; |
81 | pic = spider_get_pic(irq); | ||
82 | return pic + TIR_CFGA + 8 * spider_get_nr(irq); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | static void spider_unmask_irq(unsigned int irq) | 83 | static void spider_unmask_irq(unsigned int virq) |
86 | { | 84 | { |
87 | int nodeid = (irq / IIC_NODE_STRIDE) * 0x10; | 85 | struct spider_pic *pic = spider_virq_to_pic(virq); |
88 | void __iomem *cfg = spider_get_irq_config(irq); | 86 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
89 | irq = spider_get_nr(irq); | ||
90 | 87 | ||
91 | /* FIXME: Most of that is configuration and has nothing to do with enabling/disable, | 88 | /* We use no locking as we should be covered by the descriptor lock |
92 | * besides, it's also partially bogus. | 89 | * for access to invidual source configuration registers |
93 | */ | 90 | */ |
94 | out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid); | 91 | out_be32(cfg, in_be32(cfg) | 0x30000000u); |
95 | out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); | ||
96 | } | 92 | } |
97 | 93 | ||
98 | static void spider_mask_irq(unsigned int irq) | 94 | static void spider_mask_irq(unsigned int virq) |
99 | { | 95 | { |
100 | void __iomem *cfg = spider_get_irq_config(irq); | 96 | struct spider_pic *pic = spider_virq_to_pic(virq); |
101 | irq = spider_get_nr(irq); | 97 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
102 | 98 | ||
99 | /* We use no locking as we should be covered by the descriptor lock | ||
100 | * for access to invidual source configuration registers | ||
101 | */ | ||
103 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | 102 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); |
104 | } | 103 | } |
105 | 104 | ||
106 | static void spider_ack_irq(unsigned int irq) | 105 | static void spider_ack_irq(unsigned int virq) |
107 | { | 106 | { |
108 | /* Should reset edge detection logic but we don't configure any edge interrupt | 107 | struct spider_pic *pic = spider_virq_to_pic(virq); |
109 | * at the moment. | 108 | unsigned int src = irq_map[virq].hwirq; |
109 | |||
110 | /* Reset edge detection logic if necessary | ||
110 | */ | 111 | */ |
112 | if (get_irq_desc(virq)->status & IRQ_LEVEL) | ||
113 | return; | ||
114 | |||
115 | /* Only interrupts 47 to 50 can be set to edge */ | ||
116 | if (src < 47 || src > 50) | ||
117 | return; | ||
118 | |||
119 | /* Perform the clear of the edge logic */ | ||
120 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); | ||
111 | } | 121 | } |
112 | 122 | ||
113 | static struct irq_chip spider_pic = { | 123 | static struct irq_chip spider_pic = { |
@@ -117,102 +127,243 @@ static struct irq_chip spider_pic = { | |||
117 | .ack = spider_ack_irq, | 127 | .ack = spider_ack_irq, |
118 | }; | 128 | }; |
119 | 129 | ||
120 | static int spider_get_irq(int node) | 130 | static int spider_host_match(struct irq_host *h, struct device_node *node) |
131 | { | ||
132 | struct spider_pic *pic = h->host_data; | ||
133 | return node == pic->of_node; | ||
134 | } | ||
135 | |||
136 | static int spider_host_map(struct irq_host *h, unsigned int virq, | ||
137 | irq_hw_number_t hw, unsigned int flags) | ||
121 | { | 138 | { |
122 | unsigned long cs; | 139 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; |
123 | void __iomem *regs = spider_pics[node]; | 140 | struct spider_pic *pic = h->host_data; |
141 | void __iomem *cfg = spider_get_irq_config(pic, hw); | ||
142 | int level = 0; | ||
143 | u32 ic; | ||
144 | |||
145 | /* Note that only level high is supported for most interrupts */ | ||
146 | if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && | ||
147 | (hw < 47 || hw > 50)) | ||
148 | return -EINVAL; | ||
149 | |||
150 | /* Decode sense type */ | ||
151 | switch(sense) { | ||
152 | case IRQ_TYPE_EDGE_RISING: | ||
153 | ic = 0x3; | ||
154 | break; | ||
155 | case IRQ_TYPE_EDGE_FALLING: | ||
156 | ic = 0x2; | ||
157 | break; | ||
158 | case IRQ_TYPE_LEVEL_LOW: | ||
159 | ic = 0x0; | ||
160 | level = 1; | ||
161 | break; | ||
162 | case IRQ_TYPE_LEVEL_HIGH: | ||
163 | case IRQ_TYPE_NONE: | ||
164 | ic = 0x1; | ||
165 | level = 1; | ||
166 | break; | ||
167 | default: | ||
168 | return -EINVAL; | ||
169 | } | ||
124 | 170 | ||
125 | cs = in_be32(regs + TIR_CS) >> 24; | 171 | /* Configure the source. One gross hack that was there before and |
172 | * that I've kept around is the priority to the BE which I set to | ||
173 | * be the same as the interrupt source number. I don't know wether | ||
174 | * that's supposed to make any kind of sense however, we'll have to | ||
175 | * decide that, but for now, I'm not changing the behaviour. | ||
176 | */ | ||
177 | out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe); | ||
178 | out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); | ||
126 | 179 | ||
127 | if (cs == 63) | 180 | if (level) |
128 | return -1; | 181 | get_irq_desc(virq)->status |= IRQ_LEVEL; |
129 | else | 182 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); |
130 | return cs; | 183 | return 0; |
184 | } | ||
185 | |||
186 | static int spider_host_xlate(struct irq_host *h, struct device_node *ct, | ||
187 | u32 *intspec, unsigned int intsize, | ||
188 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
189 | |||
190 | { | ||
191 | /* Spider interrupts have 2 cells, first is the interrupt source, | ||
192 | * second, well, I don't know for sure yet ... We mask the top bits | ||
193 | * because old device-trees encode a node number in there | ||
194 | */ | ||
195 | *out_hwirq = intspec[0] & 0x3f; | ||
196 | *out_flags = IRQ_TYPE_LEVEL_HIGH; | ||
197 | return 0; | ||
131 | } | 198 | } |
132 | 199 | ||
200 | static struct irq_host_ops spider_host_ops = { | ||
201 | .match = spider_host_match, | ||
202 | .map = spider_host_map, | ||
203 | .xlate = spider_host_xlate, | ||
204 | }; | ||
205 | |||
133 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, | 206 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, |
134 | struct pt_regs *regs) | 207 | struct pt_regs *regs) |
135 | { | 208 | { |
136 | int node = (int)(long)desc->handler_data; | 209 | struct spider_pic *pic = desc->handler_data; |
137 | int cascade_irq; | 210 | unsigned int cs, virq; |
138 | 211 | ||
139 | cascade_irq = spider_get_irq(node); | 212 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
140 | generic_handle_irq(cascade_irq, regs); | 213 | if (cs == SPIDER_IRQ_INVALID) |
214 | virq = NO_IRQ; | ||
215 | else | ||
216 | virq = irq_linear_revmap(pic->host, cs); | ||
217 | if (virq != NO_IRQ) | ||
218 | generic_handle_irq(virq, regs); | ||
141 | desc->chip->eoi(irq); | 219 | desc->chip->eoi(irq); |
142 | } | 220 | } |
143 | 221 | ||
144 | /* hardcoded part to be compatible with older firmware */ | 222 | /* For hooking up the cascace we have a problem. Our device-tree is |
223 | * crap and we don't know on which BE iic interrupt we are hooked on at | ||
224 | * least not the "standard" way. We can reconstitute it based on two | ||
225 | * informations though: which BE node we are connected to and wether | ||
226 | * we are connected to IOIF0 or IOIF1. Right now, we really only care | ||
227 | * about the IBM cell blade and we know that its firmware gives us an | ||
228 | * interrupt-map property which is pretty strange. | ||
229 | */ | ||
230 | static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | ||
231 | { | ||
232 | unsigned int virq; | ||
233 | u32 *imap, *tmp; | ||
234 | int imaplen, intsize, unit; | ||
235 | struct device_node *iic; | ||
236 | struct irq_host *iic_host; | ||
237 | |||
238 | #if 0 /* Enable that when we have a way to retreive the node as well */ | ||
239 | /* First, we check wether we have a real "interrupts" in the device | ||
240 | * tree in case the device-tree is ever fixed | ||
241 | */ | ||
242 | struct of_irq oirq; | ||
243 | if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { | ||
244 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
245 | oirq.size); | ||
246 | goto bail; | ||
247 | } | ||
248 | #endif | ||
249 | |||
250 | /* Now do the horrible hacks */ | ||
251 | tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL); | ||
252 | if (tmp == NULL) | ||
253 | return NO_IRQ; | ||
254 | intsize = *tmp; | ||
255 | imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen); | ||
256 | if (imap == NULL || imaplen < (intsize + 1)) | ||
257 | return NO_IRQ; | ||
258 | iic = of_find_node_by_phandle(imap[intsize]); | ||
259 | if (iic == NULL) | ||
260 | return NO_IRQ; | ||
261 | imap += intsize + 1; | ||
262 | tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL); | ||
263 | if (tmp == NULL) | ||
264 | return NO_IRQ; | ||
265 | intsize = *tmp; | ||
266 | /* Assume unit is last entry of interrupt specifier */ | ||
267 | unit = imap[intsize - 1]; | ||
268 | /* Ok, we have a unit, now let's try to get the node */ | ||
269 | tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL); | ||
270 | if (tmp == NULL) { | ||
271 | of_node_put(iic); | ||
272 | return NO_IRQ; | ||
273 | } | ||
274 | /* ugly as hell but works for now */ | ||
275 | pic->node_id = (*tmp) >> 1; | ||
276 | of_node_put(iic); | ||
277 | |||
278 | /* Ok, now let's get cracking. You may ask me why I just didn't match | ||
279 | * the iic host from the iic OF node, but that way I'm still compatible | ||
280 | * with really really old old firmwares for which we don't have a node | ||
281 | */ | ||
282 | iic_host = iic_get_irq_host(pic->node_id); | ||
283 | if (iic_host == NULL) | ||
284 | return NO_IRQ; | ||
285 | /* Manufacture an IIC interrupt number of class 2 */ | ||
286 | virq = irq_create_mapping(iic_host, 0x20 | unit, 0); | ||
287 | if (virq == NO_IRQ) | ||
288 | printk(KERN_ERR "spider_pic: failed to map cascade !"); | ||
289 | return virq; | ||
290 | } | ||
291 | |||
145 | 292 | ||
146 | static void __init spider_init_one(int node, unsigned long addr) | 293 | static void __init spider_init_one(struct device_node *of_node, int chip, |
294 | unsigned long addr) | ||
147 | { | 295 | { |
148 | int n, irq; | 296 | struct spider_pic *pic = &spider_pics[chip]; |
297 | int i, virq; | ||
149 | 298 | ||
150 | spider_pics[node] = ioremap(addr, 0x800); | 299 | /* Map registers */ |
151 | if (spider_pics[node] == NULL) | 300 | pic->regs = ioremap(addr, 0x1000); |
301 | if (pic->regs == NULL) | ||
152 | panic("spider_pic: can't map registers !"); | 302 | panic("spider_pic: can't map registers !"); |
153 | 303 | ||
154 | printk(KERN_INFO "spider_pic: mapped for node %d, addr: 0x%lx mapped to %p\n", | 304 | /* Allocate a host */ |
155 | node, addr, spider_pics[node]); | 305 | pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT, |
306 | &spider_host_ops, SPIDER_IRQ_INVALID); | ||
307 | if (pic->host == NULL) | ||
308 | panic("spider_pic: can't allocate irq host !"); | ||
309 | pic->host->host_data = pic; | ||
156 | 310 | ||
157 | for (n = 0; n < IIC_NUM_EXT; n++) { | 311 | /* Fill out other bits */ |
158 | if (n == IIC_EXT_CASCADE) | 312 | pic->of_node = of_node_get(of_node); |
159 | continue; | 313 | |
160 | irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | 314 | /* Go through all sources and disable them */ |
161 | set_irq_chip_and_handler(irq, &spider_pic, handle_level_irq); | 315 | for (i = 0; i < SPIDER_SRC_COUNT; i++) { |
162 | get_irq_desc(irq)->status |= IRQ_LEVEL; | 316 | void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; |
317 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | ||
163 | } | 318 | } |
164 | 319 | ||
165 | /* do not mask any interrupts because of level */ | 320 | /* do not mask any interrupts because of level */ |
166 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | 321 | out_be32(pic->regs + TIR_MSK, 0x0); |
167 | |||
168 | /* disable edge detection clear */ | ||
169 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | ||
170 | 322 | ||
171 | /* enable interrupt packets to be output */ | 323 | /* enable interrupt packets to be output */ |
172 | out_be32(spider_pics[node] + TIR_PIEN, | 324 | out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); |
173 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | ||
174 | 325 | ||
175 | /* Hook up cascade */ | 326 | /* Hook up the cascade interrupt to the iic and nodeid */ |
176 | irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; | 327 | virq = spider_find_cascade_and_node(pic); |
177 | set_irq_data(irq, (void *)(long)node); | 328 | if (virq == NO_IRQ) |
178 | set_irq_chained_handler(irq, spider_irq_cascade); | 329 | return; |
330 | set_irq_data(virq, pic); | ||
331 | set_irq_chained_handler(virq, spider_irq_cascade); | ||
332 | |||
333 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", | ||
334 | pic->node_id, addr, of_node->full_name); | ||
179 | 335 | ||
180 | /* Enable the interrupt detection enable bit. Do this last! */ | 336 | /* Enable the interrupt detection enable bit. Do this last! */ |
181 | out_be32(spider_pics[node] + TIR_DEN, | 337 | out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); |
182 | in_be32(spider_pics[node] + TIR_DEN) | 0x1); | ||
183 | } | 338 | } |
184 | 339 | ||
185 | void __init spider_init_IRQ(void) | 340 | void __init spider_init_IRQ(void) |
186 | { | 341 | { |
187 | unsigned long *spider_reg; | 342 | struct resource r; |
188 | struct device_node *dn; | 343 | struct device_node *dn; |
189 | char *compatible; | 344 | int chip = 0; |
190 | int node = 0; | 345 | |
191 | 346 | /* XXX node numbers are totally bogus. We _hope_ we get the device | |
192 | /* XXX node numbers are totally bogus. We _hope_ we get the device nodes in the right | 347 | * nodes in the right order here but that's definitely not guaranteed, |
193 | * order here but that's definitely not guaranteed, we need to get the node from the | 348 | * we need to get the node from the device tree instead. |
194 | * device tree instead. There is currently no proper property for it (but our whole | 349 | * There is currently no proper property for it (but our whole |
195 | * device-tree is bogus anyway) so all we can do is pray or maybe test the address | 350 | * device-tree is bogus anyway) so all we can do is pray or maybe test |
196 | * and deduce the node-id | 351 | * the address and deduce the node-id |
197 | */ | 352 | */ |
198 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | 353 | for (dn = NULL; |
199 | compatible = (char *)get_property(dn, "compatible", NULL); | 354 | (dn = of_find_node_by_name(dn, "interrupt-controller"));) { |
200 | 355 | if (device_is_compatible(dn, "CBEA,platform-spider-pic")) { | |
201 | if (!compatible) | 356 | if (of_address_to_resource(dn, 0, &r)) { |
202 | continue; | 357 | printk(KERN_WARNING "spider-pic: Failed\n"); |
203 | 358 | continue; | |
204 | if (strstr(compatible, "CBEA,platform-spider-pic")) | 359 | } |
205 | spider_reg = (unsigned long *)get_property(dn, "reg", NULL); | 360 | } else if (device_is_compatible(dn, "sti,platform-spider-pic") |
206 | else if (strstr(compatible, "sti,platform-spider-pic") && (node < 2)) { | 361 | && (chip < 2)) { |
207 | static long hard_coded_pics[] = { 0x24000008000, 0x34000008000 }; | 362 | static long hard_coded_pics[] = |
208 | spider_reg = &hard_coded_pics[node]; | 363 | { 0x24000008000, 0x34000008000 }; |
364 | r.start = hard_coded_pics[chip]; | ||
209 | } else | 365 | } else |
210 | continue; | 366 | continue; |
211 | 367 | spider_init_one(dn, chip++, r.start); | |
212 | if (spider_reg == NULL) | ||
213 | printk(KERN_ERR "spider_pic: No address for node %d\n", node); | ||
214 | |||
215 | spider_init_one(node, *spider_reg); | ||
216 | node++; | ||
217 | } | 368 | } |
218 | } | 369 | } |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 656c1ef5f4ad..5d2313a6c82b 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -264,51 +264,57 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
264 | return stat ? IRQ_HANDLED : IRQ_NONE; | 264 | return stat ? IRQ_HANDLED : IRQ_NONE; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int | 267 | static int spu_request_irqs(struct spu *spu) |
268 | spu_request_irqs(struct spu *spu) | ||
269 | { | 268 | { |
270 | int ret; | 269 | int ret = 0; |
271 | int irq_base; | ||
272 | |||
273 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | ||
274 | |||
275 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | ||
276 | ret = request_irq(irq_base + spu->isrc, | ||
277 | spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu); | ||
278 | if (ret) | ||
279 | goto out; | ||
280 | |||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | ||
282 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | ||
283 | spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu); | ||
284 | if (ret) | ||
285 | goto out1; | ||
286 | 270 | ||
287 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | 271 | if (spu->irqs[0] != NO_IRQ) { |
288 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | 272 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
289 | spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu); | 273 | spu->number); |
290 | if (ret) | 274 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
291 | goto out2; | 275 | IRQF_DISABLED, |
292 | goto out; | 276 | spu->irq_c0, spu); |
277 | if (ret) | ||
278 | goto bail0; | ||
279 | } | ||
280 | if (spu->irqs[1] != NO_IRQ) { | ||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | ||
282 | spu->number); | ||
283 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | ||
284 | IRQF_DISABLED, | ||
285 | spu->irq_c1, spu); | ||
286 | if (ret) | ||
287 | goto bail1; | ||
288 | } | ||
289 | if (spu->irqs[2] != NO_IRQ) { | ||
290 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | ||
291 | spu->number); | ||
292 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | ||
293 | IRQF_DISABLED, | ||
294 | spu->irq_c2, spu); | ||
295 | if (ret) | ||
296 | goto bail2; | ||
297 | } | ||
298 | return 0; | ||
293 | 299 | ||
294 | out2: | 300 | bail2: |
295 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 301 | if (spu->irqs[1] != NO_IRQ) |
296 | out1: | 302 | free_irq(spu->irqs[1], spu); |
297 | free_irq(irq_base + spu->isrc, spu); | 303 | bail1: |
298 | out: | 304 | if (spu->irqs[0] != NO_IRQ) |
305 | free_irq(spu->irqs[0], spu); | ||
306 | bail0: | ||
299 | return ret; | 307 | return ret; |
300 | } | 308 | } |
301 | 309 | ||
302 | static void | 310 | static void spu_free_irqs(struct spu *spu) |
303 | spu_free_irqs(struct spu *spu) | ||
304 | { | 311 | { |
305 | int irq_base; | 312 | if (spu->irqs[0] != NO_IRQ) |
306 | 313 | free_irq(spu->irqs[0], spu); | |
307 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | 314 | if (spu->irqs[1] != NO_IRQ) |
308 | 315 | free_irq(spu->irqs[1], spu); | |
309 | free_irq(irq_base + spu->isrc, spu); | 316 | if (spu->irqs[2] != NO_IRQ) |
310 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 317 | free_irq(spu->irqs[2], spu); |
311 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | ||
312 | } | 318 | } |
313 | 319 | ||
314 | static LIST_HEAD(spu_list); | 320 | static LIST_HEAD(spu_list); |
@@ -559,17 +565,38 @@ static void spu_unmap(struct spu *spu) | |||
559 | iounmap((u8 __iomem *)spu->local_store); | 565 | iounmap((u8 __iomem *)spu->local_store); |
560 | } | 566 | } |
561 | 567 | ||
568 | /* This function shall be abstracted for HV platforms */ | ||
569 | static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) | ||
570 | { | ||
571 | struct irq_host *host; | ||
572 | unsigned int isrc; | ||
573 | u32 *tmp; | ||
574 | |||
575 | host = iic_get_irq_host(spu->node); | ||
576 | if (host == NULL) | ||
577 | return -ENODEV; | ||
578 | |||
579 | /* Get the interrupt source from the device-tree */ | ||
580 | tmp = (u32 *)get_property(np, "isrc", NULL); | ||
581 | if (!tmp) | ||
582 | return -ENODEV; | ||
583 | spu->isrc = isrc = tmp[0]; | ||
584 | |||
585 | /* Now map interrupts of all 3 classes */ | ||
586 | spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0); | ||
587 | spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0); | ||
588 | spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0); | ||
589 | |||
590 | /* Right now, we only fail if class 2 failed */ | ||
591 | return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; | ||
592 | } | ||
593 | |||
562 | static int __init spu_map_device(struct spu *spu, struct device_node *node) | 594 | static int __init spu_map_device(struct spu *spu, struct device_node *node) |
563 | { | 595 | { |
564 | char *prop; | 596 | char *prop; |
565 | int ret; | 597 | int ret; |
566 | 598 | ||
567 | ret = -ENODEV; | 599 | ret = -ENODEV; |
568 | prop = get_property(node, "isrc", NULL); | ||
569 | if (!prop) | ||
570 | goto out; | ||
571 | spu->isrc = *(unsigned int *)prop; | ||
572 | |||
573 | spu->name = get_property(node, "name", NULL); | 600 | spu->name = get_property(node, "name", NULL); |
574 | if (!spu->name) | 601 | if (!spu->name) |
575 | goto out; | 602 | goto out; |
@@ -636,7 +663,8 @@ static int spu_create_sysdev(struct spu *spu) | |||
636 | return ret; | 663 | return ret; |
637 | } | 664 | } |
638 | 665 | ||
639 | sysdev_create_file(&spu->sysdev, &attr_isrc); | 666 | if (spu->isrc != 0) |
667 | sysdev_create_file(&spu->sysdev, &attr_isrc); | ||
640 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); | 668 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); |
641 | 669 | ||
642 | return 0; | 670 | return 0; |
@@ -668,6 +696,9 @@ static int __init create_spu(struct device_node *spe) | |||
668 | spu->nid = of_node_to_nid(spe); | 696 | spu->nid = of_node_to_nid(spe); |
669 | if (spu->nid == -1) | 697 | if (spu->nid == -1) |
670 | spu->nid = 0; | 698 | spu->nid = 0; |
699 | ret = spu_map_interrupts(spu, spe); | ||
700 | if (ret) | ||
701 | goto out_unmap; | ||
671 | spin_lock_init(&spu->register_lock); | 702 | spin_lock_init(&spu->register_lock); |
672 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); | 703 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
673 | spu_mfc_sr1_set(spu, 0x33); | 704 | spu_mfc_sr1_set(spu, 0x33); |
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 66c253498803..6802cdc3168a 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/sections.h> | 19 | #include <asm/sections.h> |
20 | #include <asm/pci-bridge.h> | 20 | #include <asm/pci-bridge.h> |
21 | #include <asm/open_pic.h> | ||
22 | #include <asm/grackle.h> | 21 | #include <asm/grackle.h> |
23 | #include <asm/rtas.h> | 22 | #include <asm/rtas.h> |
24 | 23 | ||
@@ -161,15 +160,9 @@ void __init | |||
161 | chrp_pcibios_fixup(void) | 160 | chrp_pcibios_fixup(void) |
162 | { | 161 | { |
163 | struct pci_dev *dev = NULL; | 162 | struct pci_dev *dev = NULL; |
164 | struct device_node *np; | ||
165 | 163 | ||
166 | /* PCI interrupts are controlled by the OpenPIC */ | 164 | for_each_pci_dev(dev) |
167 | for_each_pci_dev(dev) { | 165 | pci_read_irq_line(dev); |
168 | np = pci_device_to_OF_node(dev); | ||
169 | if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0)) | ||
170 | dev->irq = np->intrs[0].line; | ||
171 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | ||
172 | } | ||
173 | } | 166 | } |
174 | 167 | ||
175 | #define PRG_CL_RESET_VALID 0x00010000 | 168 | #define PRG_CL_RESET_VALID 0x00010000 |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index a5dffc8d93a9..bb10171132fa 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -59,7 +59,7 @@ void rtas_indicator_progress(char *, unsigned short); | |||
59 | int _chrp_type; | 59 | int _chrp_type; |
60 | EXPORT_SYMBOL(_chrp_type); | 60 | EXPORT_SYMBOL(_chrp_type); |
61 | 61 | ||
62 | struct mpic *chrp_mpic; | 62 | static struct mpic *chrp_mpic; |
63 | 63 | ||
64 | /* Used for doing CHRP event-scans */ | 64 | /* Used for doing CHRP event-scans */ |
65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); | 65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); |
@@ -315,19 +315,13 @@ chrp_event_scan(unsigned long unused) | |||
315 | jiffies + event_scan_interval); | 315 | jiffies + event_scan_interval); |
316 | } | 316 | } |
317 | 317 | ||
318 | void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, | 318 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, |
319 | struct pt_regs *regs) | 319 | struct pt_regs *regs) |
320 | { | 320 | { |
321 | unsigned int max = 100; | 321 | unsigned int cascade_irq = i8259_irq(regs); |
322 | 322 | if (cascade_irq != NO_IRQ) | |
323 | while(max--) { | 323 | generic_handle_irq(cascade_irq, regs); |
324 | int irq = i8259_irq(regs); | 324 | desc->chip->eoi(irq); |
325 | if (max == 99) | ||
326 | desc->chip->eoi(irq); | ||
327 | if (irq < 0) | ||
328 | break; | ||
329 | generic_handle_irq(irq, regs); | ||
330 | }; | ||
331 | } | 325 | } |
332 | 326 | ||
333 | /* | 327 | /* |
@@ -336,18 +330,17 @@ void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, | |||
336 | static void __init chrp_find_openpic(void) | 330 | static void __init chrp_find_openpic(void) |
337 | { | 331 | { |
338 | struct device_node *np, *root; | 332 | struct device_node *np, *root; |
339 | int len, i, j, irq_count; | 333 | int len, i, j; |
340 | int isu_size, idu_size; | 334 | int isu_size, idu_size; |
341 | unsigned int *iranges, *opprop = NULL; | 335 | unsigned int *iranges, *opprop = NULL; |
342 | int oplen = 0; | 336 | int oplen = 0; |
343 | unsigned long opaddr; | 337 | unsigned long opaddr; |
344 | int na = 1; | 338 | int na = 1; |
345 | unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS]; | ||
346 | 339 | ||
347 | np = find_type_devices("open-pic"); | 340 | np = of_find_node_by_type(NULL, "open-pic"); |
348 | if (np == NULL) | 341 | if (np == NULL) |
349 | return; | 342 | return; |
350 | root = find_path_device("/"); | 343 | root = of_find_node_by_path("/"); |
351 | if (root) { | 344 | if (root) { |
352 | opprop = (unsigned int *) get_property | 345 | opprop = (unsigned int *) get_property |
353 | (root, "platform-open-pic", &oplen); | 346 | (root, "platform-open-pic", &oplen); |
@@ -358,19 +351,15 @@ static void __init chrp_find_openpic(void) | |||
358 | oplen /= na * sizeof(unsigned int); | 351 | oplen /= na * sizeof(unsigned int); |
359 | } else { | 352 | } else { |
360 | struct resource r; | 353 | struct resource r; |
361 | if (of_address_to_resource(np, 0, &r)) | 354 | if (of_address_to_resource(np, 0, &r)) { |
362 | return; | 355 | goto bail; |
356 | } | ||
363 | opaddr = r.start; | 357 | opaddr = r.start; |
364 | oplen = 0; | 358 | oplen = 0; |
365 | } | 359 | } |
366 | 360 | ||
367 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); | 361 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); |
368 | 362 | ||
369 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | ||
370 | prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4); | ||
371 | /* i8259 cascade is always positive level */ | ||
372 | init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE; | ||
373 | |||
374 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); | 363 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); |
375 | if (iranges == NULL) | 364 | if (iranges == NULL) |
376 | len = 0; /* non-distributed mpic */ | 365 | len = 0; /* non-distributed mpic */ |
@@ -397,15 +386,12 @@ static void __init chrp_find_openpic(void) | |||
397 | if (len > 1) | 386 | if (len > 1) |
398 | isu_size = iranges[3]; | 387 | isu_size = iranges[3]; |
399 | 388 | ||
400 | chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY, | 389 | chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY, |
401 | isu_size, NUM_ISA_INTERRUPTS, irq_count, | 390 | isu_size, 0, " MPIC "); |
402 | NR_IRQS - 4, init_senses, irq_count, | ||
403 | " MPIC "); | ||
404 | if (chrp_mpic == NULL) { | 391 | if (chrp_mpic == NULL) { |
405 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); | 392 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); |
406 | return; | 393 | goto bail; |
407 | } | 394 | } |
408 | |||
409 | j = na - 1; | 395 | j = na - 1; |
410 | for (i = 1; i < len; ++i) { | 396 | for (i = 1; i < len; ++i) { |
411 | iranges += 2; | 397 | iranges += 2; |
@@ -417,7 +403,10 @@ static void __init chrp_find_openpic(void) | |||
417 | } | 403 | } |
418 | 404 | ||
419 | mpic_init(chrp_mpic); | 405 | mpic_init(chrp_mpic); |
420 | set_irq_chained_handler(NUM_ISA_INTERRUPTS, chrp_8259_cascade); | 406 | ppc_md.get_irq = mpic_get_irq; |
407 | bail: | ||
408 | of_node_put(root); | ||
409 | of_node_put(np); | ||
421 | } | 410 | } |
422 | 411 | ||
423 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 412 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) |
@@ -428,14 +417,34 @@ static struct irqaction xmon_irqaction = { | |||
428 | }; | 417 | }; |
429 | #endif | 418 | #endif |
430 | 419 | ||
431 | void __init chrp_init_IRQ(void) | 420 | static void __init chrp_find_8259(void) |
432 | { | 421 | { |
433 | struct device_node *np; | 422 | struct device_node *np, *pic = NULL; |
434 | unsigned long chrp_int_ack = 0; | 423 | unsigned long chrp_int_ack = 0; |
435 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 424 | unsigned int cascade_irq; |
436 | struct device_node *kbd; | ||
437 | #endif | ||
438 | 425 | ||
426 | /* Look for cascade */ | ||
427 | for_each_node_by_type(np, "interrupt-controller") | ||
428 | if (device_is_compatible(np, "chrp,iic")) { | ||
429 | pic = np; | ||
430 | break; | ||
431 | } | ||
432 | /* Ok, 8259 wasn't found. We need to handle the case where | ||
433 | * we have a pegasos that claims to be chrp but doesn't have | ||
434 | * a proper interrupt tree | ||
435 | */ | ||
436 | if (pic == NULL && chrp_mpic != NULL) { | ||
437 | printk(KERN_ERR "i8259: Not found in device-tree" | ||
438 | " assuming no legacy interrupts\n"); | ||
439 | return; | ||
440 | } | ||
441 | |||
442 | /* Look for intack. In a perfect world, we would look for it on | ||
443 | * the ISA bus that holds the 8259 but heh... Works that way. If | ||
444 | * we ever see a problem, we can try to re-use the pSeries code here. | ||
445 | * Also, Pegasos-type platforms don't have a proper node to start | ||
446 | * from anyway | ||
447 | */ | ||
439 | for (np = find_devices("pci"); np != NULL; np = np->next) { | 448 | for (np = find_devices("pci"); np != NULL; np = np->next) { |
440 | unsigned int *addrp = (unsigned int *) | 449 | unsigned int *addrp = (unsigned int *) |
441 | get_property(np, "8259-interrupt-acknowledge", NULL); | 450 | get_property(np, "8259-interrupt-acknowledge", NULL); |
@@ -446,11 +455,29 @@ void __init chrp_init_IRQ(void) | |||
446 | break; | 455 | break; |
447 | } | 456 | } |
448 | if (np == NULL) | 457 | if (np == NULL) |
449 | printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n"); | 458 | printk(KERN_WARNING "Cannot find PCI interrupt acknowledge" |
459 | " address, polling\n"); | ||
460 | |||
461 | i8259_init(pic, chrp_int_ack); | ||
462 | if (ppc_md.get_irq == NULL) | ||
463 | ppc_md.get_irq = i8259_irq; | ||
464 | if (chrp_mpic != NULL) { | ||
465 | cascade_irq = irq_of_parse_and_map(pic, 0); | ||
466 | if (cascade_irq == NO_IRQ) | ||
467 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | ||
468 | else | ||
469 | set_irq_chained_handler(cascade_irq, | ||
470 | chrp_8259_cascade); | ||
471 | } | ||
472 | } | ||
450 | 473 | ||
474 | void __init chrp_init_IRQ(void) | ||
475 | { | ||
476 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | ||
477 | struct device_node *kbd; | ||
478 | #endif | ||
451 | chrp_find_openpic(); | 479 | chrp_find_openpic(); |
452 | 480 | chrp_find_8259(); | |
453 | i8259_init(chrp_int_ack, 0); | ||
454 | 481 | ||
455 | if (_chrp_type == _CHRP_Pegasos) | 482 | if (_chrp_type == _CHRP_Pegasos) |
456 | ppc_md.get_irq = i8259_irq; | 483 | ppc_md.get_irq = i8259_irq; |
@@ -535,10 +562,6 @@ static int __init chrp_probe(void) | |||
535 | DMA_MODE_READ = 0x44; | 562 | DMA_MODE_READ = 0x44; |
536 | DMA_MODE_WRITE = 0x48; | 563 | DMA_MODE_WRITE = 0x48; |
537 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ | 564 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ |
538 | ppc_do_canonicalize_irqs = 1; | ||
539 | |||
540 | /* Assume we have an 8259... */ | ||
541 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
542 | 565 | ||
543 | return 1; | 566 | return 1; |
544 | } | 567 | } |
@@ -550,7 +573,6 @@ define_machine(chrp) { | |||
550 | .init = chrp_init2, | 573 | .init = chrp_init2, |
551 | .show_cpuinfo = chrp_show_cpuinfo, | 574 | .show_cpuinfo = chrp_show_cpuinfo, |
552 | .init_IRQ = chrp_init_IRQ, | 575 | .init_IRQ = chrp_init_IRQ, |
553 | .get_irq = mpic_get_irq, | ||
554 | .pcibios_fixup = chrp_pcibios_fixup, | 576 | .pcibios_fixup = chrp_pcibios_fixup, |
555 | .restart = rtas_restart, | 577 | .restart = rtas_restart, |
556 | .power_off = rtas_power_off, | 578 | .power_off = rtas_power_off, |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index c298ca1ea680..1d2307e87c30 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <asm/smp.h> | 29 | #include <asm/smp.h> |
30 | #include <asm/residual.h> | 30 | #include <asm/residual.h> |
31 | #include <asm/time.h> | 31 | #include <asm/time.h> |
32 | #include <asm/open_pic.h> | ||
33 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
34 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
35 | #include <asm/mpic.h> | 34 | #include <asm/mpic.h> |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 7fb6a08786c0..2275e64f3152 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -162,27 +162,6 @@ static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs) | |||
162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); | 162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); |
163 | } | 163 | } |
164 | 164 | ||
165 | /* | ||
166 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
167 | * It must be called before the bus walk. | ||
168 | */ | ||
169 | void __init iSeries_init_IRQ(void) | ||
170 | { | ||
171 | /* Register PCI event handler and open an event path */ | ||
172 | int ret; | ||
173 | |||
174 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
175 | &pci_event_handler); | ||
176 | if (ret == 0) { | ||
177 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
178 | if (ret != 0) | ||
179 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
180 | "failed with rc 0x%x\n", ret); | ||
181 | } else | ||
182 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
183 | "failed with rc 0x%x\n", ret); | ||
184 | } | ||
185 | |||
186 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) | 165 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) |
187 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 166 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
188 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 167 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
@@ -196,7 +175,7 @@ static void iseries_enable_IRQ(unsigned int irq) | |||
196 | { | 175 | { |
197 | u32 bus, dev_id, function, mask; | 176 | u32 bus, dev_id, function, mask; |
198 | const u32 sub_bus = 0; | 177 | const u32 sub_bus = 0; |
199 | unsigned int rirq = virt_irq_to_real_map[irq]; | 178 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
200 | 179 | ||
201 | /* The IRQ has already been locked by the caller */ | 180 | /* The IRQ has already been locked by the caller */ |
202 | bus = REAL_IRQ_TO_BUS(rirq); | 181 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -213,7 +192,7 @@ static unsigned int iseries_startup_IRQ(unsigned int irq) | |||
213 | { | 192 | { |
214 | u32 bus, dev_id, function, mask; | 193 | u32 bus, dev_id, function, mask; |
215 | const u32 sub_bus = 0; | 194 | const u32 sub_bus = 0; |
216 | unsigned int rirq = virt_irq_to_real_map[irq]; | 195 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
217 | 196 | ||
218 | bus = REAL_IRQ_TO_BUS(rirq); | 197 | bus = REAL_IRQ_TO_BUS(rirq); |
219 | function = REAL_IRQ_TO_FUNC(rirq); | 198 | function = REAL_IRQ_TO_FUNC(rirq); |
@@ -254,7 +233,7 @@ static void iseries_shutdown_IRQ(unsigned int irq) | |||
254 | { | 233 | { |
255 | u32 bus, dev_id, function, mask; | 234 | u32 bus, dev_id, function, mask; |
256 | const u32 sub_bus = 0; | 235 | const u32 sub_bus = 0; |
257 | unsigned int rirq = virt_irq_to_real_map[irq]; | 236 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
258 | 237 | ||
259 | /* irq should be locked by the caller */ | 238 | /* irq should be locked by the caller */ |
260 | bus = REAL_IRQ_TO_BUS(rirq); | 239 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -277,7 +256,7 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
277 | { | 256 | { |
278 | u32 bus, dev_id, function, mask; | 257 | u32 bus, dev_id, function, mask; |
279 | const u32 sub_bus = 0; | 258 | const u32 sub_bus = 0; |
280 | unsigned int rirq = virt_irq_to_real_map[irq]; | 259 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
281 | 260 | ||
282 | /* The IRQ has already been locked by the caller */ | 261 | /* The IRQ has already been locked by the caller */ |
283 | bus = REAL_IRQ_TO_BUS(rirq); | 262 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -291,7 +270,7 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
291 | 270 | ||
292 | static void iseries_end_IRQ(unsigned int irq) | 271 | static void iseries_end_IRQ(unsigned int irq) |
293 | { | 272 | { |
294 | unsigned int rirq = virt_irq_to_real_map[irq]; | 273 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
295 | 274 | ||
296 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | 275 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), |
297 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | 276 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); |
@@ -314,16 +293,14 @@ static struct irq_chip iseries_pic = { | |||
314 | int __init iSeries_allocate_IRQ(HvBusNumber bus, | 293 | int __init iSeries_allocate_IRQ(HvBusNumber bus, |
315 | HvSubBusNumber sub_bus, u32 bsubbus) | 294 | HvSubBusNumber sub_bus, u32 bsubbus) |
316 | { | 295 | { |
317 | int virtirq; | ||
318 | unsigned int realirq; | 296 | unsigned int realirq; |
319 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); | 297 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); |
320 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); | 298 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); |
321 | 299 | ||
322 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) | 300 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) |
323 | + function; | 301 | + function; |
324 | virtirq = virt_irq_create_mapping(realirq); | 302 | |
325 | set_irq_chip_and_handler(virtirq, &iseries_pic, handle_fasteoi_irq); | 303 | return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE); |
326 | return virtirq; | ||
327 | } | 304 | } |
328 | 305 | ||
329 | #endif /* CONFIG_PCI */ | 306 | #endif /* CONFIG_PCI */ |
@@ -331,10 +308,9 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus, | |||
331 | /* | 308 | /* |
332 | * Get the next pending IRQ. | 309 | * Get the next pending IRQ. |
333 | */ | 310 | */ |
334 | int iSeries_get_irq(struct pt_regs *regs) | 311 | unsigned int iSeries_get_irq(struct pt_regs *regs) |
335 | { | 312 | { |
336 | /* -2 means ignore this interrupt */ | 313 | int irq = NO_IRQ_IGNORE; |
337 | int irq = -2; | ||
338 | 314 | ||
339 | #ifdef CONFIG_SMP | 315 | #ifdef CONFIG_SMP |
340 | if (get_lppaca()->int_dword.fields.ipi_cnt) { | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
@@ -357,9 +333,57 @@ int iSeries_get_irq(struct pt_regs *regs) | |||
357 | } | 333 | } |
358 | spin_unlock(&pending_irqs_lock); | 334 | spin_unlock(&pending_irqs_lock); |
359 | if (irq >= NR_IRQS) | 335 | if (irq >= NR_IRQS) |
360 | irq = -2; | 336 | irq = NO_IRQ_IGNORE; |
361 | } | 337 | } |
362 | #endif | 338 | #endif |
363 | 339 | ||
364 | return irq; | 340 | return irq; |
365 | } | 341 | } |
342 | |||
343 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | ||
344 | irq_hw_number_t hw, unsigned int flags) | ||
345 | { | ||
346 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static struct irq_host_ops iseries_irq_host_ops = { | ||
352 | .map = iseries_irq_host_map, | ||
353 | }; | ||
354 | |||
355 | /* | ||
356 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
357 | * It must be called before the bus walk. | ||
358 | */ | ||
359 | void __init iSeries_init_IRQ(void) | ||
360 | { | ||
361 | /* Register PCI event handler and open an event path */ | ||
362 | struct irq_host *host; | ||
363 | int ret; | ||
364 | |||
365 | /* | ||
366 | * The Hypervisor only allows us up to 256 interrupt | ||
367 | * sources (the irq number is passed in a u8). | ||
368 | */ | ||
369 | irq_set_virq_count(256); | ||
370 | |||
371 | /* Create irq host. No need for a revmap since HV will give us | ||
372 | * back our virtual irq number | ||
373 | */ | ||
374 | host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); | ||
375 | BUG_ON(host == NULL); | ||
376 | irq_set_default_host(host); | ||
377 | |||
378 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
379 | &pci_event_handler); | ||
380 | if (ret == 0) { | ||
381 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
382 | if (ret != 0) | ||
383 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
384 | "failed with rc 0x%x\n", ret); | ||
385 | } else | ||
386 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
387 | "failed with rc 0x%x\n", ret); | ||
388 | } | ||
389 | |||
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h index 188aa808abd7..1ee8985140e5 100644 --- a/arch/powerpc/platforms/iseries/irq.h +++ b/arch/powerpc/platforms/iseries/irq.h | |||
@@ -4,6 +4,6 @@ | |||
4 | extern void iSeries_init_IRQ(void); | 4 | extern void iSeries_init_IRQ(void); |
5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); | 5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); |
6 | extern void iSeries_activate_IRQs(void); | 6 | extern void iSeries_activate_IRQs(void); |
7 | extern int iSeries_get_irq(struct pt_regs *); | 7 | extern unsigned int iSeries_get_irq(struct pt_regs *); |
8 | 8 | ||
9 | #endif /* _ISERIES_IRQ_H */ | 9 | #endif /* _ISERIES_IRQ_H */ |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index c877074745b2..c9605d773a77 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -294,8 +294,6 @@ static void __init iSeries_init_early(void) | |||
294 | { | 294 | { |
295 | DBG(" -> iSeries_init_early()\n"); | 295 | DBG(" -> iSeries_init_early()\n"); |
296 | 296 | ||
297 | ppc64_interrupt_controller = IC_ISERIES; | ||
298 | |||
299 | #if defined(CONFIG_BLK_DEV_INITRD) | 297 | #if defined(CONFIG_BLK_DEV_INITRD) |
300 | /* | 298 | /* |
301 | * If the init RAM disk has been configured and there is | 299 | * If the init RAM disk has been configured and there is |
@@ -659,12 +657,6 @@ static int __init iseries_probe(void) | |||
659 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 657 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
660 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 658 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
661 | 659 | ||
662 | /* | ||
663 | * The Hypervisor only allows us up to 256 interrupt | ||
664 | * sources (the irq number is passed in a u8). | ||
665 | */ | ||
666 | virt_irq_max = 255; | ||
667 | |||
668 | hpte_init_iSeries(); | 660 | hpte_init_iSeries(); |
669 | 661 | ||
670 | return 1; | 662 | return 1; |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index f7170ff86dab..63a1670d3bfd 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -443,18 +443,23 @@ void __init maple_pci_init(void) | |||
443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) | 443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) |
444 | { | 444 | { |
445 | struct device_node *np; | 445 | struct device_node *np; |
446 | int irq = channel ? 15 : 14; | 446 | unsigned int defirq = channel ? 15 : 14; |
447 | unsigned int irq; | ||
447 | 448 | ||
448 | if (pdev->vendor != PCI_VENDOR_ID_AMD || | 449 | if (pdev->vendor != PCI_VENDOR_ID_AMD || |
449 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) | 450 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) |
450 | return irq; | 451 | return defirq; |
451 | 452 | ||
452 | np = pci_device_to_OF_node(pdev); | 453 | np = pci_device_to_OF_node(pdev); |
453 | if (np == NULL) | 454 | if (np == NULL) |
454 | return irq; | 455 | return defirq; |
455 | if (np->n_intrs < 2) | 456 | irq = irq_of_parse_and_map(np, channel & 0x1); |
456 | return irq; | 457 | if (irq == NO_IRQ) { |
457 | return np->intrs[channel & 0x1].line; | 458 | printk("Failed to map onboard IDE interrupt for channel %d\n", |
459 | channel); | ||
460 | return defirq; | ||
461 | } | ||
462 | return irq; | ||
458 | } | 463 | } |
459 | 464 | ||
460 | /* XXX: To remove once all firmwares are ok */ | 465 | /* XXX: To remove once all firmwares are ok */ |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 611ca8e979e5..cb528c9de4c3 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -198,50 +198,81 @@ static void __init maple_init_early(void) | |||
198 | { | 198 | { |
199 | DBG(" -> maple_init_early\n"); | 199 | DBG(" -> maple_init_early\n"); |
200 | 200 | ||
201 | /* Setup interrupt mapping options */ | ||
202 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
203 | |||
204 | iommu_init_early_dart(); | 201 | iommu_init_early_dart(); |
205 | 202 | ||
206 | DBG(" <- maple_init_early\n"); | 203 | DBG(" <- maple_init_early\n"); |
207 | } | 204 | } |
208 | 205 | ||
209 | 206 | /* | |
210 | static __init void maple_init_IRQ(void) | 207 | * This is almost identical to pSeries and CHRP. We need to make that |
208 | * code generic at one point, with appropriate bits in the device-tree to | ||
209 | * identify the presence of an HT APIC | ||
210 | */ | ||
211 | static void __init maple_init_IRQ(void) | ||
211 | { | 212 | { |
212 | struct device_node *root; | 213 | struct device_node *root, *np, *mpic_node = NULL; |
213 | unsigned int *opprop; | 214 | unsigned int *opprop; |
214 | unsigned long opic_addr; | 215 | unsigned long openpic_addr = 0; |
216 | int naddr, n, i, opplen, has_isus = 0; | ||
215 | struct mpic *mpic; | 217 | struct mpic *mpic; |
216 | unsigned char senses[128]; | 218 | unsigned int flags = MPIC_PRIMARY; |
217 | int n; | ||
218 | 219 | ||
219 | DBG(" -> maple_init_IRQ\n"); | 220 | /* Locate MPIC in the device-tree. Note that there is a bug |
221 | * in Maple device-tree where the type of the controller is | ||
222 | * open-pic and not interrupt-controller | ||
223 | */ | ||
224 | for_each_node_by_type(np, "open-pic") { | ||
225 | mpic_node = np; | ||
226 | break; | ||
227 | } | ||
228 | if (mpic_node == NULL) { | ||
229 | printk(KERN_ERR | ||
230 | "Failed to locate the MPIC interrupt controller\n"); | ||
231 | return; | ||
232 | } | ||
220 | 233 | ||
221 | /* XXX: Non standard, replace that with a proper openpic/mpic node | 234 | /* Find address list in /platform-open-pic */ |
222 | * in the device-tree. Find the Open PIC if present */ | ||
223 | root = of_find_node_by_path("/"); | 235 | root = of_find_node_by_path("/"); |
224 | opprop = (unsigned int *) get_property(root, | 236 | naddr = prom_n_addr_cells(root); |
225 | "platform-open-pic", NULL); | 237 | opprop = (unsigned int *) get_property(root, "platform-open-pic", |
226 | if (opprop == 0) | 238 | &opplen); |
227 | panic("OpenPIC not found !\n"); | 239 | if (opprop != 0) { |
228 | 240 | openpic_addr = of_read_number(opprop, naddr); | |
229 | n = prom_n_addr_cells(root); | 241 | has_isus = (opplen > naddr); |
230 | for (opic_addr = 0; n > 0; --n) | 242 | printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", |
231 | opic_addr = (opic_addr << 32) + *opprop++; | 243 | openpic_addr, has_isus); |
244 | } | ||
232 | of_node_put(root); | 245 | of_node_put(root); |
233 | 246 | ||
234 | /* Obtain sense values from device-tree */ | 247 | BUG_ON(openpic_addr == 0); |
235 | prom_get_irq_senses(senses, 0, 128); | ||
236 | 248 | ||
237 | mpic = mpic_alloc(opic_addr, | 249 | /* Check for a big endian MPIC */ |
238 | MPIC_PRIMARY | MPIC_BIG_ENDIAN | | 250 | if (get_property(np, "big-endian", NULL) != NULL) |
239 | MPIC_BROKEN_U3 | MPIC_WANTS_RESET, | 251 | flags |= MPIC_BIG_ENDIAN; |
240 | 0, 0, 128, 128, senses, 128, "U3-MPIC"); | 252 | |
253 | /* XXX Maple specific bits */ | ||
254 | flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET; | ||
255 | |||
256 | /* Setup the openpic driver. More device-tree junks, we hard code no | ||
257 | * ISUs for now. I'll have to revisit some stuffs with the folks doing | ||
258 | * the firmware for those | ||
259 | */ | ||
260 | mpic = mpic_alloc(mpic_node, openpic_addr, flags, | ||
261 | /*has_isus ? 16 :*/ 0, 0, " MPIC "); | ||
241 | BUG_ON(mpic == NULL); | 262 | BUG_ON(mpic == NULL); |
242 | mpic_init(mpic); | ||
243 | 263 | ||
244 | DBG(" <- maple_init_IRQ\n"); | 264 | /* Add ISUs */ |
265 | opplen /= sizeof(u32); | ||
266 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
267 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
268 | mpic_assign_isu(mpic, n, isuaddr); | ||
269 | } | ||
270 | |||
271 | /* All ISUs are setup, complete initialization */ | ||
272 | mpic_init(mpic); | ||
273 | ppc_md.get_irq = mpic_get_irq; | ||
274 | of_node_put(mpic_node); | ||
275 | of_node_put(root); | ||
245 | } | 276 | } |
246 | 277 | ||
247 | static void __init maple_progress(char *s, unsigned short hex) | 278 | static void __init maple_progress(char *s, unsigned short hex) |
@@ -279,7 +310,6 @@ define_machine(maple_md) { | |||
279 | .setup_arch = maple_setup_arch, | 310 | .setup_arch = maple_setup_arch, |
280 | .init_early = maple_init_early, | 311 | .init_early = maple_init_early, |
281 | .init_IRQ = maple_init_IRQ, | 312 | .init_IRQ = maple_init_IRQ, |
282 | .get_irq = mpic_get_irq, | ||
283 | .pcibios_fixup = maple_pcibios_fixup, | 313 | .pcibios_fixup = maple_pcibios_fixup, |
284 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, | 314 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, |
285 | .restart = maple_restart, | 315 | .restart = maple_restart, |
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index 5685ad9e88e8..e63d52f227ee 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c | |||
@@ -162,6 +162,8 @@ static void __init bootx_add_chosen_props(unsigned long base, | |||
162 | { | 162 | { |
163 | u32 val; | 163 | u32 val; |
164 | 164 | ||
165 | bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end); | ||
166 | |||
165 | if (bootx_info->kernelParamsOffset) { | 167 | if (bootx_info->kernelParamsOffset) { |
166 | char *args = (char *)((unsigned long)bootx_info) + | 168 | char *args = (char *)((unsigned long)bootx_info) + |
167 | bootx_info->kernelParamsOffset; | 169 | bootx_info->kernelParamsOffset; |
@@ -228,7 +230,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base, | |||
228 | 230 | ||
229 | if (!strcmp(namep, "/chosen")) { | 231 | if (!strcmp(namep, "/chosen")) { |
230 | DBG(" detected /chosen ! adding properties names !\n"); | 232 | DBG(" detected /chosen ! adding properties names !\n"); |
231 | bootx_dt_add_string("linux,platform", mem_end); | 233 | bootx_dt_add_string("linux,bootx", mem_end); |
232 | bootx_dt_add_string("linux,stdout-path", mem_end); | 234 | bootx_dt_add_string("linux,stdout-path", mem_end); |
233 | bootx_dt_add_string("linux,initrd-start", mem_end); | 235 | bootx_dt_add_string("linux,initrd-start", mem_end); |
234 | bootx_dt_add_string("linux,initrd-end", mem_end); | 236 | bootx_dt_add_string("linux,initrd-end", mem_end); |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index ceafaf52a668..8677f50c2586 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -522,10 +522,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) | |||
522 | host->speed = KW_I2C_MODE_25KHZ; | 522 | host->speed = KW_I2C_MODE_25KHZ; |
523 | break; | 523 | break; |
524 | } | 524 | } |
525 | if (np->n_intrs > 0) | 525 | host->irq = irq_of_parse_and_map(np, 0); |
526 | host->irq = np->intrs[0].line; | 526 | if (host->irq == NO_IRQ) |
527 | else | 527 | printk(KERN_WARNING |
528 | host->irq = NO_IRQ; | 528 | "low_i2c: Failed to map interrupt for %s\n", |
529 | np->full_name); | ||
529 | 530 | ||
530 | host->base = ioremap((*addrp), 0x1000); | 531 | host->base = ioremap((*addrp), 0x1000); |
531 | if (host->base == NULL) { | 532 | if (host->base == NULL) { |
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 41fa2409482a..6a36ea9bf673 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
30 | #include <asm/nvram.h> | 30 | #include <asm/nvram.h> |
31 | 31 | ||
32 | #include "pmac.h" | ||
33 | |||
32 | #define DEBUG | 34 | #define DEBUG |
33 | 35 | ||
34 | #ifdef DEBUG | 36 | #ifdef DEBUG |
@@ -80,9 +82,6 @@ static int nvram_partitions[3]; | |||
80 | // XXX Turn that into a sem | 82 | // XXX Turn that into a sem |
81 | static DEFINE_SPINLOCK(nv_lock); | 83 | static DEFINE_SPINLOCK(nv_lock); |
82 | 84 | ||
83 | extern int pmac_newworld; | ||
84 | extern int system_running; | ||
85 | |||
86 | static int (*core99_write_bank)(int bank, u8* datas); | 85 | static int (*core99_write_bank)(int bank, u8* datas); |
87 | static int (*core99_erase_bank)(int bank); | 86 | static int (*core99_erase_bank)(int bank); |
88 | 87 | ||
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index d524a915aa86..556b349797e8 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -46,6 +46,9 @@ static int has_uninorth; | |||
46 | static struct pci_controller *u3_agp; | 46 | static struct pci_controller *u3_agp; |
47 | static struct pci_controller *u4_pcie; | 47 | static struct pci_controller *u4_pcie; |
48 | static struct pci_controller *u3_ht; | 48 | static struct pci_controller *u3_ht; |
49 | #define has_second_ohare 0 | ||
50 | #else | ||
51 | static int has_second_ohare; | ||
49 | #endif /* CONFIG_PPC64 */ | 52 | #endif /* CONFIG_PPC64 */ |
50 | 53 | ||
51 | extern u8 pci_cache_line_size; | 54 | extern u8 pci_cache_line_size; |
@@ -647,6 +650,33 @@ static void __init init_p2pbridge(void) | |||
647 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); | 650 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); |
648 | } | 651 | } |
649 | 652 | ||
653 | static void __init init_second_ohare(void) | ||
654 | { | ||
655 | struct device_node *np = of_find_node_by_name(NULL, "pci106b,7"); | ||
656 | unsigned char bus, devfn; | ||
657 | unsigned short cmd; | ||
658 | |||
659 | if (np == NULL) | ||
660 | return; | ||
661 | |||
662 | /* This must run before we initialize the PICs since the second | ||
663 | * ohare hosts a PIC that will be accessed there. | ||
664 | */ | ||
665 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
666 | struct pci_controller* hose = | ||
667 | pci_find_hose_for_OF_device(np); | ||
668 | if (!hose) { | ||
669 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
670 | return; | ||
671 | } | ||
672 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
673 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
674 | cmd &= ~PCI_COMMAND_IO; | ||
675 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
676 | } | ||
677 | has_second_ohare = 1; | ||
678 | } | ||
679 | |||
650 | /* | 680 | /* |
651 | * Some Apple desktop machines have a NEC PD720100A USB2 controller | 681 | * Some Apple desktop machines have a NEC PD720100A USB2 controller |
652 | * on the motherboard. Open Firmware, on these, will disable the | 682 | * on the motherboard. Open Firmware, on these, will disable the |
@@ -688,9 +718,6 @@ static void __init fixup_nec_usb2(void) | |||
688 | " EHCI, fixing up...\n"); | 718 | " EHCI, fixing up...\n"); |
689 | data &= ~1UL; | 719 | data &= ~1UL; |
690 | early_write_config_dword(hose, bus, devfn, 0xe4, data); | 720 | early_write_config_dword(hose, bus, devfn, 0xe4, data); |
691 | early_write_config_byte(hose, bus, | ||
692 | devfn | 2, PCI_INTERRUPT_LINE, | ||
693 | nec->intrs[0].line); | ||
694 | } | 721 | } |
695 | } | 722 | } |
696 | } | 723 | } |
@@ -958,32 +985,28 @@ static int __init add_bridge(struct device_node *dev) | |||
958 | return 0; | 985 | return 0; |
959 | } | 986 | } |
960 | 987 | ||
961 | static void __init pcibios_fixup_OF_interrupts(void) | 988 | void __init pmac_pcibios_fixup(void) |
962 | { | 989 | { |
963 | struct pci_dev* dev = NULL; | 990 | struct pci_dev* dev = NULL; |
964 | 991 | ||
965 | /* | ||
966 | * Open Firmware often doesn't initialize the | ||
967 | * PCI_INTERRUPT_LINE config register properly, so we | ||
968 | * should find the device node and apply the interrupt | ||
969 | * obtained from the OF device-tree | ||
970 | */ | ||
971 | for_each_pci_dev(dev) { | 992 | for_each_pci_dev(dev) { |
972 | struct device_node *node; | 993 | /* Read interrupt from the device-tree */ |
973 | node = pci_device_to_OF_node(dev); | 994 | pci_read_irq_line(dev); |
974 | /* this is the node, see if it has interrupts */ | 995 | |
975 | if (node && node->n_intrs > 0) | 996 | /* Fixup interrupt for the modem/ethernet combo controller. |
976 | dev->irq = node->intrs[0].line; | 997 | * on machines with a second ohare chip. |
977 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | 998 | * The number in the device tree (27) is bogus (correct for |
999 | * the ethernet-only board but not the combo ethernet/modem | ||
1000 | * board). The real interrupt is 28 on the second controller | ||
1001 | * -> 28+32 = 60. | ||
1002 | */ | ||
1003 | if (has_second_ohare && | ||
1004 | dev->vendor == PCI_VENDOR_ID_DEC && | ||
1005 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) | ||
1006 | dev->irq = irq_create_mapping(NULL, 60, 0); | ||
978 | } | 1007 | } |
979 | } | 1008 | } |
980 | 1009 | ||
981 | void __init pmac_pcibios_fixup(void) | ||
982 | { | ||
983 | /* Fixup interrupts according to OF tree */ | ||
984 | pcibios_fixup_OF_interrupts(); | ||
985 | } | ||
986 | |||
987 | #ifdef CONFIG_PPC64 | 1010 | #ifdef CONFIG_PPC64 |
988 | static void __init pmac_fixup_phb_resources(void) | 1011 | static void __init pmac_fixup_phb_resources(void) |
989 | { | 1012 | { |
@@ -1071,6 +1094,7 @@ void __init pmac_pci_init(void) | |||
1071 | 1094 | ||
1072 | #else /* CONFIG_PPC64 */ | 1095 | #else /* CONFIG_PPC64 */ |
1073 | init_p2pbridge(); | 1096 | init_p2pbridge(); |
1097 | init_second_ohare(); | ||
1074 | fixup_nec_usb2(); | 1098 | fixup_nec_usb2(); |
1075 | 1099 | ||
1076 | /* We are still having some issues with the Xserve G4, enabling | 1100 | /* We are still having some issues with the Xserve G4, enabling |
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index d6eab8b3f7de..6d66359ec8c8 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c | |||
@@ -24,19 +24,18 @@ static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs) | |||
24 | 24 | ||
25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) | 25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) |
26 | { | 26 | { |
27 | if (func->node->n_intrs < 1) | 27 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
28 | if (irq == NO_IRQ) | ||
28 | return -EINVAL; | 29 | return -EINVAL; |
29 | 30 | return request_irq(irq, macio_gpio_irq, 0, func->node->name, func); | |
30 | return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0, | ||
31 | func->node->name, func); | ||
32 | } | 31 | } |
33 | 32 | ||
34 | static int macio_do_gpio_irq_disable(struct pmf_function *func) | 33 | static int macio_do_gpio_irq_disable(struct pmf_function *func) |
35 | { | 34 | { |
36 | if (func->node->n_intrs < 1) | 35 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
36 | if (irq == NO_IRQ) | ||
37 | return -EINVAL; | 37 | return -EINVAL; |
38 | 38 | free_irq(irq, func); | |
39 | free_irq(func->node->intrs[0].line, func); | ||
40 | return 0; | 39 | return 0; |
41 | } | 40 | } |
42 | 41 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 58a4c7b90b8b..3d328bc1f7e0 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -65,13 +65,11 @@ static u32 level_mask[4]; | |||
65 | 65 | ||
66 | static DEFINE_SPINLOCK(pmac_pic_lock); | 66 | static DEFINE_SPINLOCK(pmac_pic_lock); |
67 | 67 | ||
68 | #define GATWICK_IRQ_POOL_SIZE 10 | ||
69 | static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; | ||
70 | |||
71 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 68 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
72 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | 69 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
73 | static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | 70 | static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
74 | static int pmac_irq_cascade = -1; | 71 | static int pmac_irq_cascade = -1; |
72 | static struct irq_host *pmac_pic_host; | ||
75 | 73 | ||
76 | static void __pmac_retrigger(unsigned int irq_nr) | 74 | static void __pmac_retrigger(unsigned int irq_nr) |
77 | { | 75 | { |
@@ -86,18 +84,16 @@ static void __pmac_retrigger(unsigned int irq_nr) | |||
86 | } | 84 | } |
87 | } | 85 | } |
88 | 86 | ||
89 | static void pmac_mask_and_ack_irq(unsigned int irq_nr) | 87 | static void pmac_mask_and_ack_irq(unsigned int virq) |
90 | { | 88 | { |
91 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 89 | unsigned int src = irq_map[virq].hwirq; |
92 | int i = irq_nr >> 5; | 90 | unsigned long bit = 1UL << (virq & 0x1f); |
91 | int i = virq >> 5; | ||
93 | unsigned long flags; | 92 | unsigned long flags; |
94 | 93 | ||
95 | if ((unsigned)irq_nr >= max_irqs) | ||
96 | return; | ||
97 | |||
98 | spin_lock_irqsave(&pmac_pic_lock, flags); | 94 | spin_lock_irqsave(&pmac_pic_lock, flags); |
99 | __clear_bit(irq_nr, ppc_cached_irq_mask); | 95 | __clear_bit(src, ppc_cached_irq_mask); |
100 | if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | 96 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) |
101 | atomic_dec(&ppc_n_lost_interrupts); | 97 | atomic_dec(&ppc_n_lost_interrupts); |
102 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | 98 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); |
103 | out_le32(&pmac_irq_hw[i]->ack, bit); | 99 | out_le32(&pmac_irq_hw[i]->ack, bit); |
@@ -110,17 +106,15 @@ static void pmac_mask_and_ack_irq(unsigned int irq_nr) | |||
110 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 106 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
111 | } | 107 | } |
112 | 108 | ||
113 | static void pmac_ack_irq(unsigned int irq_nr) | 109 | static void pmac_ack_irq(unsigned int virq) |
114 | { | 110 | { |
115 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 111 | unsigned int src = irq_map[virq].hwirq; |
116 | int i = irq_nr >> 5; | 112 | unsigned long bit = 1UL << (src & 0x1f); |
113 | int i = src >> 5; | ||
117 | unsigned long flags; | 114 | unsigned long flags; |
118 | 115 | ||
119 | if ((unsigned)irq_nr >= max_irqs) | ||
120 | return; | ||
121 | |||
122 | spin_lock_irqsave(&pmac_pic_lock, flags); | 116 | spin_lock_irqsave(&pmac_pic_lock, flags); |
123 | if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | 117 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) |
124 | atomic_dec(&ppc_n_lost_interrupts); | 118 | atomic_dec(&ppc_n_lost_interrupts); |
125 | out_le32(&pmac_irq_hw[i]->ack, bit); | 119 | out_le32(&pmac_irq_hw[i]->ack, bit); |
126 | (void)in_le32(&pmac_irq_hw[i]->ack); | 120 | (void)in_le32(&pmac_irq_hw[i]->ack); |
@@ -157,48 +151,51 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | |||
157 | /* When an irq gets requested for the first client, if it's an | 151 | /* When an irq gets requested for the first client, if it's an |
158 | * edge interrupt, we clear any previous one on the controller | 152 | * edge interrupt, we clear any previous one on the controller |
159 | */ | 153 | */ |
160 | static unsigned int pmac_startup_irq(unsigned int irq_nr) | 154 | static unsigned int pmac_startup_irq(unsigned int virq) |
161 | { | 155 | { |
162 | unsigned long flags; | 156 | unsigned long flags; |
163 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 157 | unsigned int src = irq_map[virq].hwirq; |
164 | int i = irq_nr >> 5; | 158 | unsigned long bit = 1UL << (src & 0x1f); |
159 | int i = src >> 5; | ||
165 | 160 | ||
166 | spin_lock_irqsave(&pmac_pic_lock, flags); | 161 | spin_lock_irqsave(&pmac_pic_lock, flags); |
167 | if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) | 162 | if ((irq_desc[virq].status & IRQ_LEVEL) == 0) |
168 | out_le32(&pmac_irq_hw[i]->ack, bit); | 163 | out_le32(&pmac_irq_hw[i]->ack, bit); |
169 | __set_bit(irq_nr, ppc_cached_irq_mask); | 164 | __set_bit(src, ppc_cached_irq_mask); |
170 | __pmac_set_irq_mask(irq_nr, 0); | 165 | __pmac_set_irq_mask(src, 0); |
171 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 166 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
172 | 167 | ||
173 | return 0; | 168 | return 0; |
174 | } | 169 | } |
175 | 170 | ||
176 | static void pmac_mask_irq(unsigned int irq_nr) | 171 | static void pmac_mask_irq(unsigned int virq) |
177 | { | 172 | { |
178 | unsigned long flags; | 173 | unsigned long flags; |
174 | unsigned int src = irq_map[virq].hwirq; | ||
179 | 175 | ||
180 | spin_lock_irqsave(&pmac_pic_lock, flags); | 176 | spin_lock_irqsave(&pmac_pic_lock, flags); |
181 | __clear_bit(irq_nr, ppc_cached_irq_mask); | 177 | __clear_bit(src, ppc_cached_irq_mask); |
182 | __pmac_set_irq_mask(irq_nr, 0); | 178 | __pmac_set_irq_mask(src, 0); |
183 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 179 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
184 | } | 180 | } |
185 | 181 | ||
186 | static void pmac_unmask_irq(unsigned int irq_nr) | 182 | static void pmac_unmask_irq(unsigned int virq) |
187 | { | 183 | { |
188 | unsigned long flags; | 184 | unsigned long flags; |
185 | unsigned int src = irq_map[virq].hwirq; | ||
189 | 186 | ||
190 | spin_lock_irqsave(&pmac_pic_lock, flags); | 187 | spin_lock_irqsave(&pmac_pic_lock, flags); |
191 | __set_bit(irq_nr, ppc_cached_irq_mask); | 188 | __set_bit(src, ppc_cached_irq_mask); |
192 | __pmac_set_irq_mask(irq_nr, 0); | 189 | __pmac_set_irq_mask(src, 0); |
193 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 190 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
194 | } | 191 | } |
195 | 192 | ||
196 | static int pmac_retrigger(unsigned int irq_nr) | 193 | static int pmac_retrigger(unsigned int virq) |
197 | { | 194 | { |
198 | unsigned long flags; | 195 | unsigned long flags; |
199 | 196 | ||
200 | spin_lock_irqsave(&pmac_pic_lock, flags); | 197 | spin_lock_irqsave(&pmac_pic_lock, flags); |
201 | __pmac_retrigger(irq_nr); | 198 | __pmac_retrigger(irq_map[virq].hwirq); |
202 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 199 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
203 | return 1; | 200 | return 1; |
204 | } | 201 | } |
@@ -238,7 +235,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | |||
238 | return rc; | 235 | return rc; |
239 | } | 236 | } |
240 | 237 | ||
241 | static int pmac_get_irq(struct pt_regs *regs) | 238 | static unsigned int pmac_pic_get_irq(struct pt_regs *regs) |
242 | { | 239 | { |
243 | int irq; | 240 | int irq; |
244 | unsigned long bits = 0; | 241 | unsigned long bits = 0; |
@@ -250,7 +247,7 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
250 | /* IPI's are a hack on the powersurge -- Cort */ | 247 | /* IPI's are a hack on the powersurge -- Cort */ |
251 | if ( smp_processor_id() != 0 ) { | 248 | if ( smp_processor_id() != 0 ) { |
252 | psurge_smp_message_recv(regs); | 249 | psurge_smp_message_recv(regs); |
253 | return -2; /* ignore, already handled */ | 250 | return NO_IRQ_IGNORE; /* ignore, already handled */ |
254 | } | 251 | } |
255 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
256 | spin_lock_irqsave(&pmac_pic_lock, flags); | 253 | spin_lock_irqsave(&pmac_pic_lock, flags); |
@@ -266,133 +263,9 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
266 | break; | 263 | break; |
267 | } | 264 | } |
268 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 265 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
269 | 266 | if (unlikely(irq < 0)) | |
270 | return irq; | 267 | return NO_IRQ; |
271 | } | 268 | return irq_linear_revmap(pmac_pic_host, irq); |
272 | |||
273 | /* This routine will fix some missing interrupt values in the device tree | ||
274 | * on the gatwick mac-io controller used by some PowerBooks | ||
275 | * | ||
276 | * Walking of OF nodes could use a bit more fixing up here, but it's not | ||
277 | * very important as this is all boot time code on static portions of the | ||
278 | * device-tree. | ||
279 | * | ||
280 | * However, the modifications done to "intrs" will have to be removed and | ||
281 | * replaced with proper updates of the "interrupts" properties or | ||
282 | * AAPL,interrupts, yet to be decided, once the dynamic parsing is there. | ||
283 | */ | ||
284 | static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, | ||
285 | int irq_base) | ||
286 | { | ||
287 | struct device_node *node; | ||
288 | int count; | ||
289 | |||
290 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); | ||
291 | count = 0; | ||
292 | for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) { | ||
293 | /* Fix SCC */ | ||
294 | if ((strcasecmp(node->name, "escc") == 0) && node->child) { | ||
295 | if (node->child->n_intrs < 3) { | ||
296 | node->child->intrs = &gatwick_int_pool[count]; | ||
297 | count += 3; | ||
298 | } | ||
299 | node->child->n_intrs = 3; | ||
300 | node->child->intrs[0].line = 15+irq_base; | ||
301 | node->child->intrs[1].line = 4+irq_base; | ||
302 | node->child->intrs[2].line = 5+irq_base; | ||
303 | printk(KERN_INFO "irq: fixed SCC on gatwick" | ||
304 | " (%d,%d,%d)\n", | ||
305 | node->child->intrs[0].line, | ||
306 | node->child->intrs[1].line, | ||
307 | node->child->intrs[2].line); | ||
308 | } | ||
309 | /* Fix media-bay & left SWIM */ | ||
310 | if (strcasecmp(node->name, "media-bay") == 0) { | ||
311 | struct device_node* ya_node; | ||
312 | |||
313 | if (node->n_intrs == 0) | ||
314 | node->intrs = &gatwick_int_pool[count++]; | ||
315 | node->n_intrs = 1; | ||
316 | node->intrs[0].line = 29+irq_base; | ||
317 | printk(KERN_INFO "irq: fixed media-bay on gatwick" | ||
318 | " (%d)\n", node->intrs[0].line); | ||
319 | |||
320 | ya_node = node->child; | ||
321 | while(ya_node) { | ||
322 | if (strcasecmp(ya_node->name, "floppy") == 0) { | ||
323 | if (ya_node->n_intrs < 2) { | ||
324 | ya_node->intrs = &gatwick_int_pool[count]; | ||
325 | count += 2; | ||
326 | } | ||
327 | ya_node->n_intrs = 2; | ||
328 | ya_node->intrs[0].line = 19+irq_base; | ||
329 | ya_node->intrs[1].line = 1+irq_base; | ||
330 | printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", | ||
331 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
332 | } | ||
333 | if (strcasecmp(ya_node->name, "ata4") == 0) { | ||
334 | if (ya_node->n_intrs < 2) { | ||
335 | ya_node->intrs = &gatwick_int_pool[count]; | ||
336 | count += 2; | ||
337 | } | ||
338 | ya_node->n_intrs = 2; | ||
339 | ya_node->intrs[0].line = 14+irq_base; | ||
340 | ya_node->intrs[1].line = 3+irq_base; | ||
341 | printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", | ||
342 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
343 | } | ||
344 | ya_node = ya_node->sibling; | ||
345 | } | ||
346 | } | ||
347 | } | ||
348 | if (count > 10) { | ||
349 | printk("WARNING !! Gatwick interrupt pool overflow\n"); | ||
350 | printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); | ||
351 | printk(" requested = %d\n", count); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * The PowerBook 3400/2400/3500 can have a combo ethernet/modem | ||
357 | * card which includes an ohare chip that acts as a second interrupt | ||
358 | * controller. If we find this second ohare, set it up and fix the | ||
359 | * interrupt value in the device tree for the ethernet chip. | ||
360 | */ | ||
361 | static void __init enable_second_ohare(struct device_node *np) | ||
362 | { | ||
363 | unsigned char bus, devfn; | ||
364 | unsigned short cmd; | ||
365 | struct device_node *ether; | ||
366 | |||
367 | /* This code doesn't strictly belong here, it could be part of | ||
368 | * either the PCI initialisation or the feature code. It's kept | ||
369 | * here for historical reasons. | ||
370 | */ | ||
371 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
372 | struct pci_controller* hose = | ||
373 | pci_find_hose_for_OF_device(np); | ||
374 | if (!hose) { | ||
375 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
376 | return; | ||
377 | } | ||
378 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
379 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
380 | cmd &= ~PCI_COMMAND_IO; | ||
381 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
382 | } | ||
383 | |||
384 | /* Fix interrupt for the modem/ethernet combo controller. The number | ||
385 | * in the device tree (27) is bogus (correct for the ethernet-only | ||
386 | * board but not the combo ethernet/modem board). | ||
387 | * The real interrupt is 28 on the second controller -> 28+32 = 60. | ||
388 | */ | ||
389 | ether = of_find_node_by_name(NULL, "pci1011,14"); | ||
390 | if (ether && ether->n_intrs > 0) { | ||
391 | ether->intrs[0].line = 60; | ||
392 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", | ||
393 | ether->intrs[0].line); | ||
394 | } | ||
395 | of_node_put(ether); | ||
396 | } | 269 | } |
397 | 270 | ||
398 | #ifdef CONFIG_XMON | 271 | #ifdef CONFIG_XMON |
@@ -411,6 +284,50 @@ static struct irqaction gatwick_cascade_action = { | |||
411 | .name = "cascade", | 284 | .name = "cascade", |
412 | }; | 285 | }; |
413 | 286 | ||
287 | static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | ||
288 | { | ||
289 | /* We match all, we don't always have a node anyway */ | ||
290 | return 1; | ||
291 | } | ||
292 | |||
293 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | ||
294 | irq_hw_number_t hw, unsigned int flags) | ||
295 | { | ||
296 | struct irq_desc *desc = get_irq_desc(virq); | ||
297 | int level; | ||
298 | |||
299 | if (hw >= max_irqs) | ||
300 | return -EINVAL; | ||
301 | |||
302 | /* Mark level interrupts, set delayed disable for edge ones and set | ||
303 | * handlers | ||
304 | */ | ||
305 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | ||
306 | if (level) | ||
307 | desc->status |= IRQ_LEVEL; | ||
308 | else | ||
309 | desc->status |= IRQ_DELAYED_DISABLE; | ||
310 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | ||
311 | handle_level_irq : handle_edge_irq); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
316 | u32 *intspec, unsigned int intsize, | ||
317 | irq_hw_number_t *out_hwirq, | ||
318 | unsigned int *out_flags) | ||
319 | |||
320 | { | ||
321 | *out_hwirq = *intspec; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static struct irq_host_ops pmac_pic_host_ops = { | ||
326 | .match = pmac_pic_host_match, | ||
327 | .map = pmac_pic_host_map, | ||
328 | .xlate = pmac_pic_host_xlate, | ||
329 | }; | ||
330 | |||
414 | static void __init pmac_pic_probe_oldstyle(void) | 331 | static void __init pmac_pic_probe_oldstyle(void) |
415 | { | 332 | { |
416 | int i; | 333 | int i; |
@@ -420,7 +337,7 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
420 | struct resource r; | 337 | struct resource r; |
421 | 338 | ||
422 | /* Set our get_irq function */ | 339 | /* Set our get_irq function */ |
423 | ppc_md.get_irq = pmac_get_irq; | 340 | ppc_md.get_irq = pmac_pic_get_irq; |
424 | 341 | ||
425 | /* | 342 | /* |
426 | * Find the interrupt controller type & node | 343 | * Find the interrupt controller type & node |
@@ -438,7 +355,6 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
438 | if (slave) { | 355 | if (slave) { |
439 | max_irqs = 64; | 356 | max_irqs = 64; |
440 | level_mask[1] = OHARE_LEVEL_MASK; | 357 | level_mask[1] = OHARE_LEVEL_MASK; |
441 | enable_second_ohare(slave); | ||
442 | } | 358 | } |
443 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { | 359 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { |
444 | max_irqs = max_real_irqs = 64; | 360 | max_irqs = max_real_irqs = 64; |
@@ -462,21 +378,18 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
462 | max_irqs = 128; | 378 | max_irqs = 128; |
463 | level_mask[2] = HEATHROW_LEVEL_MASK; | 379 | level_mask[2] = HEATHROW_LEVEL_MASK; |
464 | level_mask[3] = 0; | 380 | level_mask[3] = 0; |
465 | pmac_fix_gatwick_interrupts(slave, max_real_irqs); | ||
466 | } | 381 | } |
467 | } | 382 | } |
468 | BUG_ON(master == NULL); | 383 | BUG_ON(master == NULL); |
469 | 384 | ||
470 | /* Mark level interrupts and set handlers */ | 385 | /* |
471 | for (i = 0; i < max_irqs; i++) { | 386 | * Allocate an irq host |
472 | int level = !!(level_mask[i >> 5] & (1UL << (i & 0x1f))); | 387 | */ |
473 | if (level) | 388 | pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs, |
474 | irq_desc[i].status |= IRQ_LEVEL; | 389 | &pmac_pic_host_ops, |
475 | else | 390 | max_irqs); |
476 | irq_desc[i].status |= IRQ_DELAYED_DISABLE; | 391 | BUG_ON(pmac_pic_host == NULL); |
477 | set_irq_chip_and_handler(i, &pmac_pic, level ? | 392 | irq_set_default_host(pmac_pic_host); |
478 | handle_level_irq : handle_edge_irq); | ||
479 | } | ||
480 | 393 | ||
481 | /* Get addresses of first controller if we have a node for it */ | 394 | /* Get addresses of first controller if we have a node for it */ |
482 | BUG_ON(of_address_to_resource(master, 0, &r)); | 395 | BUG_ON(of_address_to_resource(master, 0, &r)); |
@@ -503,7 +416,7 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
503 | pmac_irq_hw[i++] = | 416 | pmac_irq_hw[i++] = |
504 | (volatile struct pmac_irq_hw __iomem *) | 417 | (volatile struct pmac_irq_hw __iomem *) |
505 | (addr + 0x10); | 418 | (addr + 0x10); |
506 | pmac_irq_cascade = slave->intrs[0].line; | 419 | pmac_irq_cascade = irq_of_parse_and_map(slave, 0); |
507 | 420 | ||
508 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" | 421 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" |
509 | " cascade: %d\n", slave->full_name, | 422 | " cascade: %d\n", slave->full_name, |
@@ -516,12 +429,12 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
516 | out_le32(&pmac_irq_hw[i]->enable, 0); | 429 | out_le32(&pmac_irq_hw[i]->enable, 0); |
517 | 430 | ||
518 | /* Hookup cascade irq */ | 431 | /* Hookup cascade irq */ |
519 | if (slave) | 432 | if (slave && pmac_irq_cascade != NO_IRQ) |
520 | setup_irq(pmac_irq_cascade, &gatwick_cascade_action); | 433 | setup_irq(pmac_irq_cascade, &gatwick_cascade_action); |
521 | 434 | ||
522 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); | 435 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); |
523 | #ifdef CONFIG_XMON | 436 | #ifdef CONFIG_XMON |
524 | setup_irq(20, &xmon_action); | 437 | setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action); |
525 | #endif | 438 | #endif |
526 | } | 439 | } |
527 | #endif /* CONFIG_PPC32 */ | 440 | #endif /* CONFIG_PPC32 */ |
@@ -530,16 +443,11 @@ static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc, | |||
530 | struct pt_regs *regs) | 443 | struct pt_regs *regs) |
531 | { | 444 | { |
532 | struct mpic *mpic = desc->handler_data; | 445 | struct mpic *mpic = desc->handler_data; |
533 | unsigned int max = 100; | ||
534 | 446 | ||
535 | while(max--) { | 447 | unsigned int cascade_irq = mpic_get_one_irq(mpic, regs); |
536 | int cascade_irq = mpic_get_one_irq(mpic, regs); | 448 | if (cascade_irq != NO_IRQ) |
537 | if (max == 99) | ||
538 | desc->chip->eoi(irq); | ||
539 | if (irq < 0) | ||
540 | break; | ||
541 | generic_handle_irq(cascade_irq, regs); | 449 | generic_handle_irq(cascade_irq, regs); |
542 | }; | 450 | desc->chip->eoi(irq); |
543 | } | 451 | } |
544 | 452 | ||
545 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | 453 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) |
@@ -549,21 +457,20 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | |||
549 | int nmi_irq; | 457 | int nmi_irq; |
550 | 458 | ||
551 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); | 459 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); |
552 | if (pswitch && pswitch->n_intrs) { | 460 | if (pswitch) { |
553 | nmi_irq = pswitch->intrs[0].line; | 461 | nmi_irq = irq_of_parse_and_map(pswitch, 0); |
554 | mpic_irq_set_priority(nmi_irq, 9); | 462 | if (nmi_irq != NO_IRQ) { |
555 | setup_irq(nmi_irq, &xmon_action); | 463 | mpic_irq_set_priority(nmi_irq, 9); |
464 | setup_irq(nmi_irq, &xmon_action); | ||
465 | } | ||
466 | of_node_put(pswitch); | ||
556 | } | 467 | } |
557 | of_node_put(pswitch); | ||
558 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ | 468 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ |
559 | } | 469 | } |
560 | 470 | ||
561 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | 471 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, |
562 | int master) | 472 | int master) |
563 | { | 473 | { |
564 | unsigned char senses[128]; | ||
565 | int offset = master ? 0 : 128; | ||
566 | int count = master ? 128 : 124; | ||
567 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; | 474 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; |
568 | struct resource r; | 475 | struct resource r; |
569 | struct mpic *mpic; | 476 | struct mpic *mpic; |
@@ -576,8 +483,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
576 | 483 | ||
577 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); | 484 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); |
578 | 485 | ||
579 | prom_get_irq_senses(senses, offset, offset + count); | ||
580 | |||
581 | flags |= MPIC_WANTS_RESET; | 486 | flags |= MPIC_WANTS_RESET; |
582 | if (get_property(np, "big-endian", NULL)) | 487 | if (get_property(np, "big-endian", NULL)) |
583 | flags |= MPIC_BIG_ENDIAN; | 488 | flags |= MPIC_BIG_ENDIAN; |
@@ -588,8 +493,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
588 | if (master && (flags & MPIC_BIG_ENDIAN)) | 493 | if (master && (flags & MPIC_BIG_ENDIAN)) |
589 | flags |= MPIC_BROKEN_U3; | 494 | flags |= MPIC_BROKEN_U3; |
590 | 495 | ||
591 | mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, | 496 | mpic = mpic_alloc(np, r.start, flags, 0, 0, name); |
592 | senses, count, name); | ||
593 | if (mpic == NULL) | 497 | if (mpic == NULL) |
594 | return NULL; | 498 | return NULL; |
595 | 499 | ||
@@ -602,6 +506,7 @@ static int __init pmac_pic_probe_mpic(void) | |||
602 | { | 506 | { |
603 | struct mpic *mpic1, *mpic2; | 507 | struct mpic *mpic1, *mpic2; |
604 | struct device_node *np, *master = NULL, *slave = NULL; | 508 | struct device_node *np, *master = NULL, *slave = NULL; |
509 | unsigned int cascade; | ||
605 | 510 | ||
606 | /* We can have up to 2 MPICs cascaded */ | 511 | /* We can have up to 2 MPICs cascaded */ |
607 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) | 512 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) |
@@ -638,8 +543,15 @@ static int __init pmac_pic_probe_mpic(void) | |||
638 | of_node_put(master); | 543 | of_node_put(master); |
639 | 544 | ||
640 | /* No slave, let's go out */ | 545 | /* No slave, let's go out */ |
641 | if (slave == NULL || slave->n_intrs < 1) | 546 | if (slave == NULL) |
547 | return 0; | ||
548 | |||
549 | /* Get/Map slave interrupt */ | ||
550 | cascade = irq_of_parse_and_map(slave, 0); | ||
551 | if (cascade == NO_IRQ) { | ||
552 | printk(KERN_ERR "Failed to map cascade IRQ\n"); | ||
642 | return 0; | 553 | return 0; |
554 | } | ||
643 | 555 | ||
644 | mpic2 = pmac_setup_one_mpic(slave, 0); | 556 | mpic2 = pmac_setup_one_mpic(slave, 0); |
645 | if (mpic2 == NULL) { | 557 | if (mpic2 == NULL) { |
@@ -647,8 +559,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
647 | of_node_put(slave); | 559 | of_node_put(slave); |
648 | return 0; | 560 | return 0; |
649 | } | 561 | } |
650 | set_irq_data(slave->intrs[0].line, mpic2); | 562 | set_irq_data(cascade, mpic2); |
651 | set_irq_chained_handler(slave->intrs[0].line, pmac_u3_cascade); | 563 | set_irq_chained_handler(cascade, pmac_u3_cascade); |
652 | 564 | ||
653 | of_node_put(slave); | 565 | of_node_put(slave); |
654 | return 0; | 566 | return 0; |
@@ -657,6 +569,19 @@ static int __init pmac_pic_probe_mpic(void) | |||
657 | 569 | ||
658 | void __init pmac_pic_init(void) | 570 | void __init pmac_pic_init(void) |
659 | { | 571 | { |
572 | unsigned int flags = 0; | ||
573 | |||
574 | /* We configure the OF parsing based on our oldworld vs. newworld | ||
575 | * platform type and wether we were booted by BootX. | ||
576 | */ | ||
577 | #ifdef CONFIG_PPC32 | ||
578 | if (!pmac_newworld) | ||
579 | flags |= OF_IMAP_OLDWORLD_MAC; | ||
580 | if (get_property(of_chosen, "linux,bootx", NULL) != NULL) | ||
581 | flags |= OF_IMAP_NO_PHANDLE; | ||
582 | of_irq_map_init(flags); | ||
583 | #endif /* CONFIG_PPC_32 */ | ||
584 | |||
660 | /* We first try to detect Apple's new Core99 chipset, since mac-io | 585 | /* We first try to detect Apple's new Core99 chipset, since mac-io |
661 | * is quite different on those machines and contains an IBM MPIC2. | 586 | * is quite different on those machines and contains an IBM MPIC2. |
662 | */ | 587 | */ |
@@ -679,6 +604,7 @@ unsigned long sleep_save_mask[2]; | |||
679 | 604 | ||
680 | /* This used to be passed by the PMU driver but that link got | 605 | /* This used to be passed by the PMU driver but that link got |
681 | * broken with the new driver model. We use this tweak for now... | 606 | * broken with the new driver model. We use this tweak for now... |
607 | * We really want to do things differently though... | ||
682 | */ | 608 | */ |
683 | static int pmacpic_find_viaint(void) | 609 | static int pmacpic_find_viaint(void) |
684 | { | 610 | { |
@@ -692,7 +618,7 @@ static int pmacpic_find_viaint(void) | |||
692 | np = of_find_node_by_name(NULL, "via-pmu"); | 618 | np = of_find_node_by_name(NULL, "via-pmu"); |
693 | if (np == NULL) | 619 | if (np == NULL) |
694 | goto not_found; | 620 | goto not_found; |
695 | viaint = np->intrs[0].line; | 621 | viaint = irq_of_parse_and_map(np, 0);; |
696 | #endif /* CONFIG_ADB_PMU */ | 622 | #endif /* CONFIG_ADB_PMU */ |
697 | 623 | ||
698 | not_found: | 624 | not_found: |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 21c7b0f8f329..94e7b24b840b 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | struct rtc_time; | 13 | struct rtc_time; |
14 | 14 | ||
15 | extern int pmac_newworld; | ||
16 | |||
15 | extern long pmac_time_init(void); | 17 | extern long pmac_time_init(void); |
16 | extern unsigned long pmac_get_boot_time(void); | 18 | extern unsigned long pmac_get_boot_time(void); |
17 | extern void pmac_get_rtc_time(struct rtc_time *); | 19 | extern void pmac_get_rtc_time(struct rtc_time *); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 8654b5f07836..31a9da769fa2 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -613,9 +613,6 @@ static void __init pmac_init_early(void) | |||
613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); | 613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); |
614 | 614 | ||
615 | #ifdef CONFIG_PPC64 | 615 | #ifdef CONFIG_PPC64 |
616 | /* Setup interrupt mapping options */ | ||
617 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
618 | |||
619 | iommu_init_early_dart(); | 616 | iommu_init_early_dart(); |
620 | #endif | 617 | #endif |
621 | } | 618 | } |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 9639c66b453d..9df783088b61 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -72,32 +72,62 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id, | |||
72 | 72 | ||
73 | /* #define DEBUG */ | 73 | /* #define DEBUG */ |
74 | 74 | ||
75 | static void request_ras_irqs(struct device_node *np, char *propname, | 75 | |
76 | static void request_ras_irqs(struct device_node *np, | ||
76 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 77 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
77 | const char *name) | 78 | const char *name) |
78 | { | 79 | { |
79 | unsigned int *ireg, len, i; | 80 | int i, index, count = 0; |
80 | int virq, n_intr; | 81 | struct of_irq oirq; |
81 | 82 | u32 *opicprop; | |
82 | ireg = (unsigned int *)get_property(np, propname, &len); | 83 | unsigned int opicplen; |
83 | if (ireg == NULL) | 84 | unsigned int virqs[16]; |
84 | return; | 85 | |
85 | n_intr = prom_n_intr_cells(np); | 86 | /* Check for obsolete "open-pic-interrupt" property. If present, then |
86 | len /= n_intr * sizeof(*ireg); | 87 | * map those interrupts using the default interrupt host and default |
87 | 88 | * trigger | |
88 | for (i = 0; i < len; i++) { | 89 | */ |
89 | virq = virt_irq_create_mapping(*ireg); | 90 | opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); |
90 | if (virq == NO_IRQ) { | 91 | if (opicprop) { |
91 | printk(KERN_ERR "Unable to allocate interrupt " | 92 | opicplen /= sizeof(u32); |
92 | "number for %s\n", np->full_name); | 93 | for (i = 0; i < opicplen; i++) { |
93 | return; | 94 | if (count > 15) |
95 | break; | ||
96 | virqs[count] = irq_create_mapping(NULL, *(opicprop++), | ||
97 | IRQ_TYPE_NONE); | ||
98 | if (virqs[count] == NO_IRQ) | ||
99 | printk(KERN_ERR "Unable to allocate interrupt " | ||
100 | "number for %s\n", np->full_name); | ||
101 | else | ||
102 | count++; | ||
103 | |||
94 | } | 104 | } |
95 | if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { | 105 | } |
106 | /* Else use normal interrupt tree parsing */ | ||
107 | else { | ||
108 | /* First try to do a proper OF tree parsing */ | ||
109 | for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | ||
110 | index++) { | ||
111 | if (count > 15) | ||
112 | break; | ||
113 | virqs[count] = irq_create_of_mapping(oirq.controller, | ||
114 | oirq.specifier, | ||
115 | oirq.size); | ||
116 | if (virqs[count] == NO_IRQ) | ||
117 | printk(KERN_ERR "Unable to allocate interrupt " | ||
118 | "number for %s\n", np->full_name); | ||
119 | else | ||
120 | count++; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* Now request them */ | ||
125 | for (i = 0; i < count; i++) { | ||
126 | if (request_irq(virqs[i], handler, 0, name, NULL)) { | ||
96 | printk(KERN_ERR "Unable to request interrupt %d for " | 127 | printk(KERN_ERR "Unable to request interrupt %d for " |
97 | "%s\n", irq_offset_up(virq), np->full_name); | 128 | "%s\n", virqs[i], np->full_name); |
98 | return; | 129 | return; |
99 | } | 130 | } |
100 | ireg += n_intr; | ||
101 | } | 131 | } |
102 | } | 132 | } |
103 | 133 | ||
@@ -115,20 +145,14 @@ static int __init init_ras_IRQ(void) | |||
115 | /* Internal Errors */ | 145 | /* Internal Errors */ |
116 | np = of_find_node_by_path("/event-sources/internal-errors"); | 146 | np = of_find_node_by_path("/event-sources/internal-errors"); |
117 | if (np != NULL) { | 147 | if (np != NULL) { |
118 | request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt, | 148 | request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); |
119 | "RAS_ERROR"); | ||
120 | request_ras_irqs(np, "interrupts", ras_error_interrupt, | ||
121 | "RAS_ERROR"); | ||
122 | of_node_put(np); | 149 | of_node_put(np); |
123 | } | 150 | } |
124 | 151 | ||
125 | /* EPOW Events */ | 152 | /* EPOW Events */ |
126 | np = of_find_node_by_path("/event-sources/epow-events"); | 153 | np = of_find_node_by_path("/event-sources/epow-events"); |
127 | if (np != NULL) { | 154 | if (np != NULL) { |
128 | request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt, | 155 | request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); |
129 | "RAS_EPOW"); | ||
130 | request_ras_irqs(np, "interrupts", ras_epow_interrupt, | ||
131 | "RAS_EPOW"); | ||
132 | of_node_put(np); | 156 | of_node_put(np); |
133 | } | 157 | } |
134 | 158 | ||
@@ -162,7 +186,7 @@ ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
162 | 186 | ||
163 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 187 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
164 | RAS_VECTOR_OFFSET, | 188 | RAS_VECTOR_OFFSET, |
165 | virt_irq_to_real(irq_offset_down(irq)), | 189 | irq_map[irq].hwirq, |
166 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 190 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
167 | critical, __pa(&ras_log_buf), | 191 | critical, __pa(&ras_log_buf), |
168 | rtas_get_error_log_max()); | 192 | rtas_get_error_log_max()); |
@@ -198,7 +222,7 @@ ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
198 | 222 | ||
199 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 223 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
200 | RAS_VECTOR_OFFSET, | 224 | RAS_VECTOR_OFFSET, |
201 | virt_irq_to_real(irq_offset_down(irq)), | 225 | irq_map[irq].hwirq, |
202 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 226 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
203 | __pa(&ras_log_buf), | 227 | __pa(&ras_log_buf), |
204 | rtas_get_error_log_max()); | 228 | rtas_get_error_log_max()); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 476b564a208b..54a52437265c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -76,6 +76,9 @@ | |||
76 | #define DBG(fmt...) | 76 | #define DBG(fmt...) |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* move those away to a .h */ | ||
80 | extern void smp_init_pseries_mpic(void); | ||
81 | extern void smp_init_pseries_xics(void); | ||
79 | extern void find_udbg_vterm(void); | 82 | extern void find_udbg_vterm(void); |
80 | 83 | ||
81 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 84 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
@@ -83,7 +86,7 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ | |||
83 | static void pseries_shared_idle_sleep(void); | 86 | static void pseries_shared_idle_sleep(void); |
84 | static void pseries_dedicated_idle_sleep(void); | 87 | static void pseries_dedicated_idle_sleep(void); |
85 | 88 | ||
86 | struct mpic *pSeries_mpic; | 89 | static struct device_node *pSeries_mpic_node; |
87 | 90 | ||
88 | static void pSeries_show_cpuinfo(struct seq_file *m) | 91 | static void pSeries_show_cpuinfo(struct seq_file *m) |
89 | { | 92 | { |
@@ -118,78 +121,92 @@ static void __init fwnmi_init(void) | |||
118 | fwnmi_active = 1; | 121 | fwnmi_active = 1; |
119 | } | 122 | } |
120 | 123 | ||
121 | void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, | 124 | void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, |
122 | struct pt_regs *regs) | 125 | struct pt_regs *regs) |
123 | { | 126 | { |
124 | unsigned int max = 100; | 127 | unsigned int cascade_irq = i8259_irq(regs); |
125 | 128 | if (cascade_irq != NO_IRQ) | |
126 | while(max--) { | ||
127 | int cascade_irq = i8259_irq(regs); | ||
128 | if (max == 99) | ||
129 | desc->chip->eoi(irq); | ||
130 | if (cascade_irq < 0) | ||
131 | break; | ||
132 | generic_handle_irq(cascade_irq, regs); | 129 | generic_handle_irq(cascade_irq, regs); |
133 | }; | 130 | desc->chip->eoi(irq); |
134 | } | 131 | } |
135 | 132 | ||
136 | static void __init pSeries_init_mpic(void) | 133 | static void __init pseries_mpic_init_IRQ(void) |
137 | { | 134 | { |
135 | struct device_node *np, *old, *cascade = NULL; | ||
138 | unsigned int *addrp; | 136 | unsigned int *addrp; |
139 | struct device_node *np; | ||
140 | unsigned long intack = 0; | 137 | unsigned long intack = 0; |
141 | |||
142 | /* All ISUs are setup, complete initialization */ | ||
143 | mpic_init(pSeries_mpic); | ||
144 | |||
145 | /* Check what kind of cascade ACK we have */ | ||
146 | if (!(np = of_find_node_by_name(NULL, "pci")) | ||
147 | || !(addrp = (unsigned int *) | ||
148 | get_property(np, "8259-interrupt-acknowledge", NULL))) | ||
149 | printk(KERN_ERR "Cannot find pci to get ack address\n"); | ||
150 | else | ||
151 | intack = addrp[prom_n_addr_cells(np)-1]; | ||
152 | of_node_put(np); | ||
153 | |||
154 | /* Setup the legacy interrupts & controller */ | ||
155 | i8259_init(intack, 0); | ||
156 | |||
157 | /* Hook cascade to mpic */ | ||
158 | set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade); | ||
159 | } | ||
160 | |||
161 | static void __init pSeries_setup_mpic(void) | ||
162 | { | ||
163 | unsigned int *opprop; | 138 | unsigned int *opprop; |
164 | unsigned long openpic_addr = 0; | 139 | unsigned long openpic_addr = 0; |
165 | unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS]; | 140 | unsigned int cascade_irq; |
166 | struct device_node *root; | 141 | int naddr, n, i, opplen; |
167 | int irq_count; | 142 | struct mpic *mpic; |
168 | 143 | ||
169 | /* Find the Open PIC if present */ | 144 | np = of_find_node_by_path("/"); |
170 | root = of_find_node_by_path("/"); | 145 | naddr = prom_n_addr_cells(np); |
171 | opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL); | 146 | opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen); |
172 | if (opprop != 0) { | 147 | if (opprop != 0) { |
173 | int n = prom_n_addr_cells(root); | 148 | openpic_addr = of_read_number(opprop, naddr); |
174 | |||
175 | for (openpic_addr = 0; n > 0; --n) | ||
176 | openpic_addr = (openpic_addr << 32) + *opprop++; | ||
177 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); | 149 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); |
178 | } | 150 | } |
179 | of_node_put(root); | 151 | of_node_put(np); |
180 | 152 | ||
181 | BUG_ON(openpic_addr == 0); | 153 | BUG_ON(openpic_addr == 0); |
182 | 154 | ||
183 | /* Get the sense values from OF */ | ||
184 | prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS); | ||
185 | |||
186 | /* Setup the openpic driver */ | 155 | /* Setup the openpic driver */ |
187 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | 156 | mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, |
188 | pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY, | 157 | MPIC_PRIMARY, |
189 | 16, 16, irq_count, /* isu size, irq offset, irq count */ | 158 | 16, 250, /* isu size, irq count */ |
190 | NR_IRQS - 4, /* ipi offset */ | 159 | " MPIC "); |
191 | senses, irq_count, /* sense & sense size */ | 160 | BUG_ON(mpic == NULL); |
192 | " MPIC "); | 161 | |
162 | /* Add ISUs */ | ||
163 | opplen /= sizeof(u32); | ||
164 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
165 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
166 | mpic_assign_isu(mpic, n, isuaddr); | ||
167 | } | ||
168 | |||
169 | /* All ISUs are setup, complete initialization */ | ||
170 | mpic_init(mpic); | ||
171 | |||
172 | /* Look for cascade */ | ||
173 | for_each_node_by_type(np, "interrupt-controller") | ||
174 | if (device_is_compatible(np, "chrp,iic")) { | ||
175 | cascade = np; | ||
176 | break; | ||
177 | } | ||
178 | if (cascade == NULL) | ||
179 | return; | ||
180 | |||
181 | cascade_irq = irq_of_parse_and_map(cascade, 0); | ||
182 | if (cascade == NO_IRQ) { | ||
183 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* Check ACK type */ | ||
188 | for (old = of_node_get(cascade); old != NULL ; old = np) { | ||
189 | np = of_get_parent(old); | ||
190 | of_node_put(old); | ||
191 | if (np == NULL) | ||
192 | break; | ||
193 | if (strcmp(np->name, "pci") != 0) | ||
194 | continue; | ||
195 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", | ||
196 | NULL); | ||
197 | if (addrp == NULL) | ||
198 | continue; | ||
199 | naddr = prom_n_addr_cells(np); | ||
200 | intack = addrp[naddr-1]; | ||
201 | if (naddr > 1) | ||
202 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
203 | } | ||
204 | if (intack) | ||
205 | printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n", | ||
206 | intack); | ||
207 | i8259_init(cascade, intack); | ||
208 | of_node_put(cascade); | ||
209 | set_irq_chained_handler(cascade_irq, pseries_8259_cascade); | ||
193 | } | 210 | } |
194 | 211 | ||
195 | static void pseries_lpar_enable_pmcs(void) | 212 | static void pseries_lpar_enable_pmcs(void) |
@@ -207,21 +224,67 @@ static void pseries_lpar_enable_pmcs(void) | |||
207 | get_lppaca()->pmcregs_in_use = 1; | 224 | get_lppaca()->pmcregs_in_use = 1; |
208 | } | 225 | } |
209 | 226 | ||
210 | static void __init pSeries_setup_arch(void) | 227 | #ifdef CONFIG_KEXEC |
228 | static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) | ||
211 | { | 229 | { |
212 | /* Fixup ppc_md depending on the type of interrupt controller */ | 230 | mpic_teardown_this_cpu(secondary); |
213 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | 231 | } |
214 | ppc_md.init_IRQ = pSeries_init_mpic; | ||
215 | ppc_md.get_irq = mpic_get_irq; | ||
216 | /* Allocate the mpic now, so that find_and_init_phbs() can | ||
217 | * fill the ISUs */ | ||
218 | pSeries_setup_mpic(); | ||
219 | } else | ||
220 | ppc_md.init_IRQ = xics_init_IRQ; | ||
221 | 232 | ||
233 | static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) | ||
234 | { | ||
235 | /* Don't risk a hypervisor call if we're crashing */ | ||
236 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | ||
237 | unsigned long vpa = __pa(get_lppaca()); | ||
238 | |||
239 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { | ||
240 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
241 | "failed\n", smp_processor_id(), | ||
242 | hard_smp_processor_id()); | ||
243 | } | ||
244 | } | ||
245 | xics_teardown_cpu(secondary); | ||
246 | } | ||
247 | #endif /* CONFIG_KEXEC */ | ||
248 | |||
249 | static void __init pseries_discover_pic(void) | ||
250 | { | ||
251 | struct device_node *np; | ||
252 | char *typep; | ||
253 | |||
254 | for (np = NULL; (np = of_find_node_by_name(np, | ||
255 | "interrupt-controller"));) { | ||
256 | typep = (char *)get_property(np, "compatible", NULL); | ||
257 | if (strstr(typep, "open-pic")) { | ||
258 | pSeries_mpic_node = of_node_get(np); | ||
259 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | ||
260 | ppc_md.get_irq = mpic_get_irq; | ||
261 | #ifdef CONFIG_KEXEC | ||
262 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic; | ||
263 | #endif | ||
222 | #ifdef CONFIG_SMP | 264 | #ifdef CONFIG_SMP |
223 | smp_init_pSeries(); | 265 | smp_init_pseries_mpic(); |
224 | #endif | 266 | #endif |
267 | return; | ||
268 | } else if (strstr(typep, "ppc-xicp")) { | ||
269 | ppc_md.init_IRQ = xics_init_IRQ; | ||
270 | #ifdef CONFIG_KEXEC | ||
271 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; | ||
272 | #endif | ||
273 | #ifdef CONFIG_SMP | ||
274 | smp_init_pseries_xics(); | ||
275 | #endif | ||
276 | return; | ||
277 | } | ||
278 | } | ||
279 | printk(KERN_ERR "pSeries_discover_pic: failed to recognize" | ||
280 | " interrupt-controller\n"); | ||
281 | } | ||
282 | |||
283 | static void __init pSeries_setup_arch(void) | ||
284 | { | ||
285 | /* Discover PIC type and setup ppc_md accordingly */ | ||
286 | pseries_discover_pic(); | ||
287 | |||
225 | /* openpic global configuration register (64-bit format). */ | 288 | /* openpic global configuration register (64-bit format). */ |
226 | /* openpic Interrupt Source Unit pointer (64-bit format). */ | 289 | /* openpic Interrupt Source Unit pointer (64-bit format). */ |
227 | /* python0 facility area (mmio) (64-bit format) REAL address. */ | 290 | /* python0 facility area (mmio) (64-bit format) REAL address. */ |
@@ -273,33 +336,6 @@ static int __init pSeries_init_panel(void) | |||
273 | } | 336 | } |
274 | arch_initcall(pSeries_init_panel); | 337 | arch_initcall(pSeries_init_panel); |
275 | 338 | ||
276 | static void __init pSeries_discover_pic(void) | ||
277 | { | ||
278 | struct device_node *np; | ||
279 | char *typep; | ||
280 | |||
281 | /* | ||
282 | * Setup interrupt mapping options that are needed for finish_device_tree | ||
283 | * to properly parse the OF interrupt tree & do the virtual irq mapping | ||
284 | */ | ||
285 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
286 | ppc64_interrupt_controller = IC_INVALID; | ||
287 | for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { | ||
288 | typep = (char *)get_property(np, "compatible", NULL); | ||
289 | if (strstr(typep, "open-pic")) { | ||
290 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
291 | break; | ||
292 | } else if (strstr(typep, "ppc-xicp")) { | ||
293 | ppc64_interrupt_controller = IC_PPC_XIC; | ||
294 | break; | ||
295 | } | ||
296 | } | ||
297 | if (ppc64_interrupt_controller == IC_INVALID) | ||
298 | printk("pSeries_discover_pic: failed to recognize" | ||
299 | " interrupt-controller\n"); | ||
300 | |||
301 | } | ||
302 | |||
303 | static void pSeries_mach_cpu_die(void) | 339 | static void pSeries_mach_cpu_die(void) |
304 | { | 340 | { |
305 | local_irq_disable(); | 341 | local_irq_disable(); |
@@ -342,8 +378,6 @@ static void __init pSeries_init_early(void) | |||
342 | 378 | ||
343 | iommu_init_early_pSeries(); | 379 | iommu_init_early_pSeries(); |
344 | 380 | ||
345 | pSeries_discover_pic(); | ||
346 | |||
347 | DBG(" <- pSeries_init_early()\n"); | 381 | DBG(" <- pSeries_init_early()\n"); |
348 | } | 382 | } |
349 | 383 | ||
@@ -515,27 +549,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus) | |||
515 | return PCI_PROBE_NORMAL; | 549 | return PCI_PROBE_NORMAL; |
516 | } | 550 | } |
517 | 551 | ||
518 | #ifdef CONFIG_KEXEC | ||
519 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | ||
520 | { | ||
521 | /* Don't risk a hypervisor call if we're crashing */ | ||
522 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | ||
523 | unsigned long vpa = __pa(get_lppaca()); | ||
524 | |||
525 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { | ||
526 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
527 | "failed\n", smp_processor_id(), | ||
528 | hard_smp_processor_id()); | ||
529 | } | ||
530 | } | ||
531 | |||
532 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | ||
533 | mpic_teardown_this_cpu(secondary); | ||
534 | else | ||
535 | xics_teardown_cpu(secondary); | ||
536 | } | ||
537 | #endif | ||
538 | |||
539 | define_machine(pseries) { | 552 | define_machine(pseries) { |
540 | .name = "pSeries", | 553 | .name = "pSeries", |
541 | .probe = pSeries_probe, | 554 | .probe = pSeries_probe, |
@@ -560,7 +573,6 @@ define_machine(pseries) { | |||
560 | .system_reset_exception = pSeries_system_reset_exception, | 573 | .system_reset_exception = pSeries_system_reset_exception, |
561 | .machine_check_exception = pSeries_machine_check_exception, | 574 | .machine_check_exception = pSeries_machine_check_exception, |
562 | #ifdef CONFIG_KEXEC | 575 | #ifdef CONFIG_KEXEC |
563 | .kexec_cpu_down = pseries_kexec_cpu_down, | ||
564 | .machine_kexec = default_machine_kexec, | 576 | .machine_kexec = default_machine_kexec, |
565 | .machine_kexec_prepare = default_machine_kexec_prepare, | 577 | .machine_kexec_prepare = default_machine_kexec_prepare, |
566 | .machine_crash_shutdown = default_machine_crash_shutdown, | 578 | .machine_crash_shutdown = default_machine_crash_shutdown, |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4ad144df49c2..ac61098ff401 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -416,27 +416,12 @@ static struct smp_ops_t pSeries_xics_smp_ops = { | |||
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | /* This is called very early */ | 418 | /* This is called very early */ |
419 | void __init smp_init_pSeries(void) | 419 | static void __init smp_init_pseries(void) |
420 | { | 420 | { |
421 | int i; | 421 | int i; |
422 | 422 | ||
423 | DBG(" -> smp_init_pSeries()\n"); | 423 | DBG(" -> smp_init_pSeries()\n"); |
424 | 424 | ||
425 | switch (ppc64_interrupt_controller) { | ||
426 | #ifdef CONFIG_MPIC | ||
427 | case IC_OPEN_PIC: | ||
428 | smp_ops = &pSeries_mpic_smp_ops; | ||
429 | break; | ||
430 | #endif | ||
431 | #ifdef CONFIG_XICS | ||
432 | case IC_PPC_XIC: | ||
433 | smp_ops = &pSeries_xics_smp_ops; | ||
434 | break; | ||
435 | #endif | ||
436 | default: | ||
437 | panic("Invalid interrupt controller"); | ||
438 | } | ||
439 | |||
440 | #ifdef CONFIG_HOTPLUG_CPU | 425 | #ifdef CONFIG_HOTPLUG_CPU |
441 | smp_ops->cpu_disable = pSeries_cpu_disable; | 426 | smp_ops->cpu_disable = pSeries_cpu_disable; |
442 | smp_ops->cpu_die = pSeries_cpu_die; | 427 | smp_ops->cpu_die = pSeries_cpu_die; |
@@ -471,3 +456,18 @@ void __init smp_init_pSeries(void) | |||
471 | DBG(" <- smp_init_pSeries()\n"); | 456 | DBG(" <- smp_init_pSeries()\n"); |
472 | } | 457 | } |
473 | 458 | ||
459 | #ifdef CONFIG_MPIC | ||
460 | void __init smp_init_pseries_mpic(void) | ||
461 | { | ||
462 | smp_ops = &pSeries_mpic_smp_ops; | ||
463 | |||
464 | smp_init_pseries(); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | void __init smp_init_pseries_xics(void) | ||
469 | { | ||
470 | smp_ops = &pSeries_xics_smp_ops; | ||
471 | |||
472 | smp_init_pseries(); | ||
473 | } | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index c7f04420066d..716972aa9777 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | |||
12 | #undef DEBUG | ||
13 | |||
11 | #include <linux/types.h> | 14 | #include <linux/types.h> |
12 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
13 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -19,6 +22,7 @@ | |||
19 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
20 | #include <linux/radix-tree.h> | 23 | #include <linux/radix-tree.h> |
21 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | |||
22 | #include <asm/firmware.h> | 26 | #include <asm/firmware.h> |
23 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
24 | #include <asm/io.h> | 28 | #include <asm/io.h> |
@@ -31,9 +35,6 @@ | |||
31 | 35 | ||
32 | #include "xics.h" | 36 | #include "xics.h" |
33 | 37 | ||
34 | /* This is used to map real irq numbers to virtual */ | ||
35 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); | ||
36 | |||
37 | #define XICS_IPI 2 | 38 | #define XICS_IPI 2 |
38 | #define XICS_IRQ_SPURIOUS 0 | 39 | #define XICS_IRQ_SPURIOUS 0 |
39 | 40 | ||
@@ -64,12 +65,12 @@ struct xics_ipl { | |||
64 | 65 | ||
65 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | 66 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; |
66 | 67 | ||
67 | static int xics_irq_8259_cascade = 0; | ||
68 | static int xics_irq_8259_cascade_real = 0; | ||
69 | static unsigned int default_server = 0xFF; | 68 | static unsigned int default_server = 0xFF; |
70 | static unsigned int default_distrib_server = 0; | 69 | static unsigned int default_distrib_server = 0; |
71 | static unsigned int interrupt_server_size = 8; | 70 | static unsigned int interrupt_server_size = 8; |
72 | 71 | ||
72 | static struct irq_host *xics_host; | ||
73 | |||
73 | /* | 74 | /* |
74 | * XICS only has a single IPI, so encode the messages per CPU | 75 | * XICS only has a single IPI, so encode the messages per CPU |
75 | */ | 76 | */ |
@@ -85,7 +86,7 @@ static int ibm_int_off; | |||
85 | /* Direct HW low level accessors */ | 86 | /* Direct HW low level accessors */ |
86 | 87 | ||
87 | 88 | ||
88 | static inline int direct_xirr_info_get(int n_cpu) | 89 | static inline unsigned int direct_xirr_info_get(int n_cpu) |
89 | { | 90 | { |
90 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 91 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); |
91 | } | 92 | } |
@@ -130,7 +131,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret) | |||
130 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); | 131 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); |
131 | } | 132 | } |
132 | 133 | ||
133 | static inline int lpar_xirr_info_get(int n_cpu) | 134 | static inline unsigned int lpar_xirr_info_get(int n_cpu) |
134 | { | 135 | { |
135 | unsigned long lpar_rc; | 136 | unsigned long lpar_rc; |
136 | unsigned long return_value; | 137 | unsigned long return_value; |
@@ -138,7 +139,7 @@ static inline int lpar_xirr_info_get(int n_cpu) | |||
138 | lpar_rc = plpar_xirr(&return_value); | 139 | lpar_rc = plpar_xirr(&return_value); |
139 | if (lpar_rc != H_SUCCESS) | 140 | if (lpar_rc != H_SUCCESS) |
140 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); | 141 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); |
141 | return (int)return_value; | 142 | return (unsigned int)return_value; |
142 | } | 143 | } |
143 | 144 | ||
144 | static inline void lpar_xirr_info_set(int n_cpu, int value) | 145 | static inline void lpar_xirr_info_set(int n_cpu, int value) |
@@ -175,11 +176,11 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) | |||
175 | 176 | ||
176 | 177 | ||
177 | #ifdef CONFIG_SMP | 178 | #ifdef CONFIG_SMP |
178 | static int get_irq_server(unsigned int irq) | 179 | static int get_irq_server(unsigned int virq) |
179 | { | 180 | { |
180 | unsigned int server; | 181 | unsigned int server; |
181 | /* For the moment only implement delivery to all cpus or one cpu */ | 182 | /* For the moment only implement delivery to all cpus or one cpu */ |
182 | cpumask_t cpumask = irq_desc[irq].affinity; | 183 | cpumask_t cpumask = irq_desc[virq].affinity; |
183 | cpumask_t tmp = CPU_MASK_NONE; | 184 | cpumask_t tmp = CPU_MASK_NONE; |
184 | 185 | ||
185 | if (!distribute_irqs) | 186 | if (!distribute_irqs) |
@@ -200,7 +201,7 @@ static int get_irq_server(unsigned int irq) | |||
200 | 201 | ||
201 | } | 202 | } |
202 | #else | 203 | #else |
203 | static int get_irq_server(unsigned int irq) | 204 | static int get_irq_server(unsigned int virq) |
204 | { | 205 | { |
205 | return default_server; | 206 | return default_server; |
206 | } | 207 | } |
@@ -213,9 +214,11 @@ static void xics_unmask_irq(unsigned int virq) | |||
213 | int call_status; | 214 | int call_status; |
214 | unsigned int server; | 215 | unsigned int server; |
215 | 216 | ||
216 | irq = virt_irq_to_real(irq_offset_down(virq)); | 217 | pr_debug("xics: unmask virq %d\n", virq); |
217 | WARN_ON(irq == NO_IRQ); | 218 | |
218 | if (irq == XICS_IPI || irq == NO_IRQ) | 219 | irq = (unsigned int)irq_map[virq].hwirq; |
220 | pr_debug(" -> map to hwirq 0x%x\n", irq); | ||
221 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
219 | return; | 222 | return; |
220 | 223 | ||
221 | server = get_irq_server(virq); | 224 | server = get_irq_server(virq); |
@@ -267,75 +270,57 @@ static void xics_mask_irq(unsigned int virq) | |||
267 | { | 270 | { |
268 | unsigned int irq; | 271 | unsigned int irq; |
269 | 272 | ||
270 | irq = virt_irq_to_real(irq_offset_down(virq)); | 273 | pr_debug("xics: mask virq %d\n", virq); |
271 | WARN_ON(irq == NO_IRQ); | 274 | |
272 | if (irq != NO_IRQ) | 275 | irq = (unsigned int)irq_map[virq].hwirq; |
273 | xics_mask_real_irq(irq); | 276 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
277 | return; | ||
278 | xics_mask_real_irq(irq); | ||
274 | } | 279 | } |
275 | 280 | ||
276 | static void xics_set_irq_revmap(unsigned int virq) | 281 | static unsigned int xics_startup(unsigned int virq) |
277 | { | 282 | { |
278 | unsigned int irq; | 283 | unsigned int irq; |
279 | 284 | ||
280 | irq = irq_offset_down(virq); | 285 | /* force a reverse mapping of the interrupt so it gets in the cache */ |
281 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | 286 | irq = (unsigned int)irq_map[virq].hwirq; |
282 | &virt_irq_to_real_map[irq]) == -ENOMEM) | 287 | irq_radix_revmap(xics_host, irq); |
283 | printk(KERN_CRIT "Out of memory creating real -> virtual" | ||
284 | " IRQ mapping for irq %u (real 0x%x)\n", | ||
285 | virq, virt_irq_to_real(irq)); | ||
286 | } | ||
287 | 288 | ||
288 | static unsigned int xics_startup(unsigned int virq) | 289 | /* unmask it */ |
289 | { | ||
290 | xics_set_irq_revmap(virq); | ||
291 | xics_unmask_irq(virq); | 290 | xics_unmask_irq(virq); |
292 | return 0; | 291 | return 0; |
293 | } | 292 | } |
294 | 293 | ||
295 | static unsigned int real_irq_to_virt(unsigned int real_irq) | 294 | static void xics_eoi_direct(unsigned int virq) |
296 | { | ||
297 | unsigned int *ptr; | ||
298 | |||
299 | ptr = radix_tree_lookup(&irq_map, real_irq); | ||
300 | if (ptr == NULL) | ||
301 | return NO_IRQ; | ||
302 | return ptr - virt_irq_to_real_map; | ||
303 | } | ||
304 | |||
305 | static void xics_eoi_direct(unsigned int irq) | ||
306 | { | 295 | { |
307 | int cpu = smp_processor_id(); | 296 | int cpu = smp_processor_id(); |
297 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
308 | 298 | ||
309 | iosync(); | 299 | iosync(); |
310 | direct_xirr_info_set(cpu, ((0xff << 24) | | 300 | direct_xirr_info_set(cpu, (0xff << 24) | irq); |
311 | (virt_irq_to_real(irq_offset_down(irq))))); | ||
312 | } | 301 | } |
313 | 302 | ||
314 | 303 | ||
315 | static void xics_eoi_lpar(unsigned int irq) | 304 | static void xics_eoi_lpar(unsigned int virq) |
316 | { | 305 | { |
317 | int cpu = smp_processor_id(); | 306 | int cpu = smp_processor_id(); |
307 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
318 | 308 | ||
319 | iosync(); | 309 | iosync(); |
320 | lpar_xirr_info_set(cpu, ((0xff << 24) | | 310 | lpar_xirr_info_set(cpu, (0xff << 24) | irq); |
321 | (virt_irq_to_real(irq_offset_down(irq))))); | ||
322 | |||
323 | } | 311 | } |
324 | 312 | ||
325 | static inline int xics_remap_irq(int vec) | 313 | static inline unsigned int xics_remap_irq(unsigned int vec) |
326 | { | 314 | { |
327 | int irq; | 315 | unsigned int irq; |
328 | 316 | ||
329 | vec &= 0x00ffffff; | 317 | vec &= 0x00ffffff; |
330 | 318 | ||
331 | if (vec == XICS_IRQ_SPURIOUS) | 319 | if (vec == XICS_IRQ_SPURIOUS) |
332 | return NO_IRQ; | 320 | return NO_IRQ; |
333 | 321 | irq = irq_radix_revmap(xics_host, vec); | |
334 | irq = real_irq_to_virt(vec); | ||
335 | if (irq == NO_IRQ) | ||
336 | irq = real_irq_to_virt_slowpath(vec); | ||
337 | if (likely(irq != NO_IRQ)) | 322 | if (likely(irq != NO_IRQ)) |
338 | return irq_offset_up(irq); | 323 | return irq; |
339 | 324 | ||
340 | printk(KERN_ERR "Interrupt %u (real) is invalid," | 325 | printk(KERN_ERR "Interrupt %u (real) is invalid," |
341 | " disabling it.\n", vec); | 326 | " disabling it.\n", vec); |
@@ -343,14 +328,14 @@ static inline int xics_remap_irq(int vec) | |||
343 | return NO_IRQ; | 328 | return NO_IRQ; |
344 | } | 329 | } |
345 | 330 | ||
346 | static int xics_get_irq_direct(struct pt_regs *regs) | 331 | static unsigned int xics_get_irq_direct(struct pt_regs *regs) |
347 | { | 332 | { |
348 | unsigned int cpu = smp_processor_id(); | 333 | unsigned int cpu = smp_processor_id(); |
349 | 334 | ||
350 | return xics_remap_irq(direct_xirr_info_get(cpu)); | 335 | return xics_remap_irq(direct_xirr_info_get(cpu)); |
351 | } | 336 | } |
352 | 337 | ||
353 | static int xics_get_irq_lpar(struct pt_regs *regs) | 338 | static unsigned int xics_get_irq_lpar(struct pt_regs *regs) |
354 | { | 339 | { |
355 | unsigned int cpu = smp_processor_id(); | 340 | unsigned int cpu = smp_processor_id(); |
356 | 341 | ||
@@ -437,8 +422,8 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
437 | unsigned long newmask; | 422 | unsigned long newmask; |
438 | cpumask_t tmp = CPU_MASK_NONE; | 423 | cpumask_t tmp = CPU_MASK_NONE; |
439 | 424 | ||
440 | irq = virt_irq_to_real(irq_offset_down(virq)); | 425 | irq = (unsigned int)irq_map[virq].hwirq; |
441 | if (irq == XICS_IPI || irq == NO_IRQ) | 426 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
442 | return; | 427 | return; |
443 | 428 | ||
444 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 429 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
@@ -469,6 +454,24 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
469 | } | 454 | } |
470 | } | 455 | } |
471 | 456 | ||
457 | void xics_setup_cpu(void) | ||
458 | { | ||
459 | int cpu = smp_processor_id(); | ||
460 | |||
461 | xics_set_cpu_priority(cpu, 0xff); | ||
462 | |||
463 | /* | ||
464 | * Put the calling processor into the GIQ. This is really only | ||
465 | * necessary from a secondary thread as the OF start-cpu interface | ||
466 | * performs this function for us on primary threads. | ||
467 | * | ||
468 | * XXX: undo of teardown on kexec needs this too, as may hotplug | ||
469 | */ | ||
470 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | ||
471 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | ||
472 | } | ||
473 | |||
474 | |||
472 | static struct irq_chip xics_pic_direct = { | 475 | static struct irq_chip xics_pic_direct = { |
473 | .typename = " XICS ", | 476 | .typename = " XICS ", |
474 | .startup = xics_startup, | 477 | .startup = xics_startup, |
@@ -489,90 +492,245 @@ static struct irq_chip xics_pic_lpar = { | |||
489 | }; | 492 | }; |
490 | 493 | ||
491 | 494 | ||
492 | void xics_setup_cpu(void) | 495 | static int xics_host_match(struct irq_host *h, struct device_node *node) |
493 | { | 496 | { |
494 | int cpu = smp_processor_id(); | 497 | /* IBM machines have interrupt parents of various funky types for things |
498 | * like vdevices, events, etc... The trick we use here is to match | ||
499 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
500 | */ | ||
501 | return !device_is_compatible(node, "chrp,iic"); | ||
502 | } | ||
495 | 503 | ||
496 | xics_set_cpu_priority(cpu, 0xff); | 504 | static int xics_host_map_direct(struct irq_host *h, unsigned int virq, |
505 | irq_hw_number_t hw, unsigned int flags) | ||
506 | { | ||
507 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
497 | 508 | ||
498 | /* | 509 | pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n", |
499 | * Put the calling processor into the GIQ. This is really only | 510 | virq, hw, flags); |
500 | * necessary from a secondary thread as the OF start-cpu interface | 511 | |
501 | * performs this function for us on primary threads. | 512 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) |
502 | * | 513 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" |
503 | * XXX: undo of teardown on kexec needs this too, as may hotplug | 514 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); |
515 | |||
516 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
517 | set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int xics_host_map_lpar(struct irq_host *h, unsigned int virq, | ||
522 | irq_hw_number_t hw, unsigned int flags) | ||
523 | { | ||
524 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
525 | |||
526 | pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n", | ||
527 | virq, hw, flags); | ||
528 | |||
529 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) | ||
530 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" | ||
531 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); | ||
532 | |||
533 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
534 | set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
539 | u32 *intspec, unsigned int intsize, | ||
540 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
541 | |||
542 | { | ||
543 | /* Current xics implementation translates everything | ||
544 | * to level. It is not technically right for MSIs but this | ||
545 | * is irrelevant at this point. We might get smarter in the future | ||
504 | */ | 546 | */ |
505 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 547 | *out_hwirq = intspec[0]; |
506 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | 548 | *out_flags = IRQ_TYPE_LEVEL_LOW; |
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static struct irq_host_ops xics_host_direct_ops = { | ||
554 | .match = xics_host_match, | ||
555 | .map = xics_host_map_direct, | ||
556 | .xlate = xics_host_xlate, | ||
557 | }; | ||
558 | |||
559 | static struct irq_host_ops xics_host_lpar_ops = { | ||
560 | .match = xics_host_match, | ||
561 | .map = xics_host_map_lpar, | ||
562 | .xlate = xics_host_xlate, | ||
563 | }; | ||
564 | |||
565 | static void __init xics_init_host(void) | ||
566 | { | ||
567 | struct irq_host_ops *ops; | ||
568 | |||
569 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
570 | ops = &xics_host_lpar_ops; | ||
571 | else | ||
572 | ops = &xics_host_direct_ops; | ||
573 | xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops, | ||
574 | XICS_IRQ_SPURIOUS); | ||
575 | BUG_ON(xics_host == NULL); | ||
576 | irq_set_default_host(xics_host); | ||
507 | } | 577 | } |
508 | 578 | ||
509 | void xics_init_IRQ(void) | 579 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, |
580 | unsigned long size) | ||
510 | { | 581 | { |
582 | #ifdef CONFIG_SMP | ||
511 | int i; | 583 | int i; |
512 | unsigned long intr_size = 0; | ||
513 | struct device_node *np; | ||
514 | uint *ireg, ilen, indx = 0; | ||
515 | unsigned long intr_base = 0; | ||
516 | struct xics_interrupt_node { | ||
517 | unsigned long addr; | ||
518 | unsigned long size; | ||
519 | } intnodes[NR_CPUS]; | ||
520 | struct irq_chip *chip; | ||
521 | 584 | ||
522 | ppc64_boot_msg(0x20, "XICS Init"); | 585 | /* This may look gross but it's good enough for now, we don't quite |
586 | * have a hard -> linux processor id matching. | ||
587 | */ | ||
588 | for_each_possible_cpu(i) { | ||
589 | if (!cpu_present(i)) | ||
590 | continue; | ||
591 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
592 | xics_per_cpu[i] = ioremap(addr, size); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | #else | ||
597 | if (hw_id != 0) | ||
598 | return; | ||
599 | xics_per_cpu[0] = ioremap(addr, size); | ||
600 | #endif /* CONFIG_SMP */ | ||
601 | } | ||
523 | 602 | ||
524 | ibm_get_xive = rtas_token("ibm,get-xive"); | 603 | static void __init xics_init_one_node(struct device_node *np, |
525 | ibm_set_xive = rtas_token("ibm,set-xive"); | 604 | unsigned int *indx) |
526 | ibm_int_on = rtas_token("ibm,int-on"); | 605 | { |
527 | ibm_int_off = rtas_token("ibm,int-off"); | 606 | unsigned int ilen; |
607 | u32 *ireg; | ||
528 | 608 | ||
529 | np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); | 609 | /* This code does the theorically broken assumption that the interrupt |
530 | if (!np) | 610 | * server numbers are the same as the hard CPU numbers. |
531 | panic("xics_init_IRQ: can't find interrupt presentation"); | 611 | * This happens to be the case so far but we are playing with fire... |
612 | * should be fixed one of these days. -BenH. | ||
613 | */ | ||
614 | ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
532 | 615 | ||
533 | nextnode: | 616 | /* Do that ever happen ? we'll know soon enough... but even good'old |
534 | ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); | 617 | * f80 does have that property .. |
618 | */ | ||
619 | WARN_ON(ireg == NULL); | ||
535 | if (ireg) { | 620 | if (ireg) { |
536 | /* | 621 | /* |
537 | * set node starting index for this node | 622 | * set node starting index for this node |
538 | */ | 623 | */ |
539 | indx = *ireg; | 624 | *indx = *ireg; |
540 | } | 625 | } |
541 | 626 | ireg = (u32 *)get_property(np, "reg", &ilen); | |
542 | ireg = (uint *)get_property(np, "reg", &ilen); | ||
543 | if (!ireg) | 627 | if (!ireg) |
544 | panic("xics_init_IRQ: can't find interrupt reg property"); | 628 | panic("xics_init_IRQ: can't find interrupt reg property"); |
545 | 629 | ||
546 | while (ilen) { | 630 | while (ilen >= (4 * sizeof(u32))) { |
547 | intnodes[indx].addr = (unsigned long)*ireg++ << 32; | 631 | unsigned long addr, size; |
548 | ilen -= sizeof(uint); | 632 | |
549 | intnodes[indx].addr |= *ireg++; | 633 | /* XXX Use proper OF parsing code here !!! */ |
550 | ilen -= sizeof(uint); | 634 | addr = (unsigned long)*ireg++ << 32; |
551 | intnodes[indx].size = (unsigned long)*ireg++ << 32; | 635 | ilen -= sizeof(u32); |
552 | ilen -= sizeof(uint); | 636 | addr |= *ireg++; |
553 | intnodes[indx].size |= *ireg++; | 637 | ilen -= sizeof(u32); |
554 | ilen -= sizeof(uint); | 638 | size = (unsigned long)*ireg++ << 32; |
555 | indx++; | 639 | ilen -= sizeof(u32); |
556 | if (indx >= NR_CPUS) break; | 640 | size |= *ireg++; |
641 | ilen -= sizeof(u32); | ||
642 | xics_map_one_cpu(*indx, addr, size); | ||
643 | (*indx)++; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | |||
648 | static void __init xics_setup_8259_cascade(void) | ||
649 | { | ||
650 | struct device_node *np, *old, *found = NULL; | ||
651 | int cascade, naddr; | ||
652 | u32 *addrp; | ||
653 | unsigned long intack = 0; | ||
654 | |||
655 | for_each_node_by_type(np, "interrupt-controller") | ||
656 | if (device_is_compatible(np, "chrp,iic")) { | ||
657 | found = np; | ||
658 | break; | ||
659 | } | ||
660 | if (found == NULL) { | ||
661 | printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); | ||
662 | return; | ||
557 | } | 663 | } |
664 | cascade = irq_of_parse_and_map(found, 0); | ||
665 | if (cascade == NO_IRQ) { | ||
666 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
667 | return; | ||
668 | } | ||
669 | pr_debug("xics: cascade mapped to irq %d\n", cascade); | ||
670 | |||
671 | for (old = of_node_get(found); old != NULL ; old = np) { | ||
672 | np = of_get_parent(old); | ||
673 | of_node_put(old); | ||
674 | if (np == NULL) | ||
675 | break; | ||
676 | if (strcmp(np->name, "pci") != 0) | ||
677 | continue; | ||
678 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL); | ||
679 | if (addrp == NULL) | ||
680 | continue; | ||
681 | naddr = prom_n_addr_cells(np); | ||
682 | intack = addrp[naddr-1]; | ||
683 | if (naddr > 1) | ||
684 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
685 | } | ||
686 | if (intack) | ||
687 | printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack); | ||
688 | i8259_init(found, intack); | ||
689 | of_node_put(found); | ||
690 | set_irq_chained_handler(cascade, pseries_8259_cascade); | ||
691 | } | ||
558 | 692 | ||
559 | np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); | 693 | void __init xics_init_IRQ(void) |
560 | if ((indx < NR_CPUS) && np) goto nextnode; | 694 | { |
695 | int i; | ||
696 | struct device_node *np; | ||
697 | u32 *ireg, ilen, indx = 0; | ||
698 | int found = 0; | ||
699 | |||
700 | ppc64_boot_msg(0x20, "XICS Init"); | ||
701 | |||
702 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
703 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
704 | ibm_int_on = rtas_token("ibm,int-on"); | ||
705 | ibm_int_off = rtas_token("ibm,int-off"); | ||
706 | |||
707 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
708 | found = 1; | ||
709 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
710 | break; | ||
711 | xics_init_one_node(np, &indx); | ||
712 | } | ||
713 | if (found == 0) | ||
714 | return; | ||
715 | |||
716 | xics_init_host(); | ||
561 | 717 | ||
562 | /* Find the server numbers for the boot cpu. */ | 718 | /* Find the server numbers for the boot cpu. */ |
563 | for (np = of_find_node_by_type(NULL, "cpu"); | 719 | for (np = of_find_node_by_type(NULL, "cpu"); |
564 | np; | 720 | np; |
565 | np = of_find_node_by_type(np, "cpu")) { | 721 | np = of_find_node_by_type(np, "cpu")) { |
566 | ireg = (uint *)get_property(np, "reg", &ilen); | 722 | ireg = (u32 *)get_property(np, "reg", &ilen); |
567 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { | 723 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { |
568 | ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", | 724 | ireg = (u32 *)get_property(np, |
569 | &ilen); | 725 | "ibm,ppc-interrupt-gserver#s", |
726 | &ilen); | ||
570 | i = ilen / sizeof(int); | 727 | i = ilen / sizeof(int); |
571 | if (ireg && i > 0) { | 728 | if (ireg && i > 0) { |
572 | default_server = ireg[0]; | 729 | default_server = ireg[0]; |
573 | default_distrib_server = ireg[i-1]; /* take last element */ | 730 | /* take last element */ |
731 | default_distrib_server = ireg[i-1]; | ||
574 | } | 732 | } |
575 | ireg = (uint *)get_property(np, | 733 | ireg = (u32 *)get_property(np, |
576 | "ibm,interrupt-server#-size", NULL); | 734 | "ibm,interrupt-server#-size", NULL); |
577 | if (ireg) | 735 | if (ireg) |
578 | interrupt_server_size = *ireg; | 736 | interrupt_server_size = *ireg; |
@@ -581,102 +739,48 @@ nextnode: | |||
581 | } | 739 | } |
582 | of_node_put(np); | 740 | of_node_put(np); |
583 | 741 | ||
584 | intr_base = intnodes[0].addr; | 742 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
585 | intr_size = intnodes[0].size; | 743 | ppc_md.get_irq = xics_get_irq_lpar; |
586 | 744 | else | |
587 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
588 | ppc_md.get_irq = xics_get_irq_lpar; | ||
589 | chip = &xics_pic_lpar; | ||
590 | } else { | ||
591 | #ifdef CONFIG_SMP | ||
592 | for_each_possible_cpu(i) { | ||
593 | int hard_id; | ||
594 | |||
595 | /* FIXME: Do this dynamically! --RR */ | ||
596 | if (!cpu_present(i)) | ||
597 | continue; | ||
598 | |||
599 | hard_id = get_hard_smp_processor_id(i); | ||
600 | xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, | ||
601 | intnodes[hard_id].size); | ||
602 | } | ||
603 | #else | ||
604 | xics_per_cpu[0] = ioremap(intr_base, intr_size); | ||
605 | #endif /* CONFIG_SMP */ | ||
606 | ppc_md.get_irq = xics_get_irq_direct; | 745 | ppc_md.get_irq = xics_get_irq_direct; |
607 | chip = &xics_pic_direct; | ||
608 | |||
609 | } | ||
610 | |||
611 | for (i = irq_offset_value(); i < NR_IRQS; ++i) { | ||
612 | /* All IRQs on XICS are level for now. MSI code may want to modify | ||
613 | * that for reporting purposes | ||
614 | */ | ||
615 | get_irq_desc(i)->status |= IRQ_LEVEL; | ||
616 | set_irq_chip_and_handler(i, chip, handle_fasteoi_irq); | ||
617 | } | ||
618 | 746 | ||
619 | xics_setup_cpu(); | 747 | xics_setup_cpu(); |
620 | 748 | ||
621 | ppc64_boot_msg(0x21, "XICS Done"); | 749 | xics_setup_8259_cascade(); |
622 | } | ||
623 | 750 | ||
624 | static int xics_setup_8259_cascade(void) | 751 | ppc64_boot_msg(0x21, "XICS Done"); |
625 | { | ||
626 | struct device_node *np; | ||
627 | uint *ireg; | ||
628 | |||
629 | np = of_find_node_by_type(NULL, "interrupt-controller"); | ||
630 | if (np == NULL) { | ||
631 | printk(KERN_WARNING "xics: no ISA interrupt controller\n"); | ||
632 | xics_irq_8259_cascade_real = -1; | ||
633 | xics_irq_8259_cascade = -1; | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | ireg = (uint *) get_property(np, "interrupts", NULL); | ||
638 | if (!ireg) | ||
639 | panic("xics_init_IRQ: can't find ISA interrupts property"); | ||
640 | |||
641 | xics_irq_8259_cascade_real = *ireg; | ||
642 | xics_irq_8259_cascade = irq_offset_up | ||
643 | (virt_irq_create_mapping(xics_irq_8259_cascade_real)); | ||
644 | i8259_init(0, 0); | ||
645 | of_node_put(np); | ||
646 | |||
647 | xics_set_irq_revmap(xics_irq_8259_cascade); | ||
648 | set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade); | ||
649 | |||
650 | return 0; | ||
651 | } | 752 | } |
652 | arch_initcall(xics_setup_8259_cascade); | ||
653 | 753 | ||
654 | 754 | ||
655 | #ifdef CONFIG_SMP | 755 | #ifdef CONFIG_SMP |
656 | void xics_request_IPIs(void) | 756 | void xics_request_IPIs(void) |
657 | { | 757 | { |
658 | virt_irq_to_real_map[XICS_IPI] = XICS_IPI; | 758 | unsigned int ipi; |
759 | |||
760 | ipi = irq_create_mapping(xics_host, XICS_IPI, 0); | ||
761 | BUG_ON(ipi == NO_IRQ); | ||
659 | 762 | ||
660 | /* | 763 | /* |
661 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 764 | * IPIs are marked IRQF_DISABLED as they must run with irqs |
662 | * disabled | 765 | * disabled |
663 | */ | 766 | */ |
664 | set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq); | 767 | set_irq_handler(ipi, handle_percpu_irq); |
665 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 768 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
666 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar, | 769 | request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED, |
667 | SA_INTERRUPT, "IPI", NULL); | 770 | "IPI", NULL); |
668 | else | 771 | else |
669 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct, | 772 | request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED, |
670 | SA_INTERRUPT, "IPI", NULL); | 773 | "IPI", NULL); |
671 | } | 774 | } |
672 | #endif /* CONFIG_SMP */ | 775 | #endif /* CONFIG_SMP */ |
673 | 776 | ||
674 | void xics_teardown_cpu(int secondary) | 777 | void xics_teardown_cpu(int secondary) |
675 | { | 778 | { |
676 | struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI)); | ||
677 | int cpu = smp_processor_id(); | 779 | int cpu = smp_processor_id(); |
780 | unsigned int ipi; | ||
781 | struct irq_desc *desc; | ||
678 | 782 | ||
679 | xics_set_cpu_priority(cpu, 0); | 783 | xics_set_cpu_priority(cpu, 0); |
680 | 784 | ||
681 | /* | 785 | /* |
682 | * we need to EOI the IPI if we got here from kexec down IPI | 786 | * we need to EOI the IPI if we got here from kexec down IPI |
@@ -685,6 +789,11 @@ void xics_teardown_cpu(int secondary) | |||
685 | * should we be flagging idle loop instead? | 789 | * should we be flagging idle loop instead? |
686 | * or creating some task to be scheduled? | 790 | * or creating some task to be scheduled? |
687 | */ | 791 | */ |
792 | |||
793 | ipi = irq_find_mapping(xics_host, XICS_IPI); | ||
794 | if (ipi == XICS_IRQ_SPURIOUS) | ||
795 | return; | ||
796 | desc = get_irq_desc(ipi); | ||
688 | if (desc->chip && desc->chip->eoi) | 797 | if (desc->chip && desc->chip->eoi) |
689 | desc->chip->eoi(XICS_IPI); | 798 | desc->chip->eoi(XICS_IPI); |
690 | 799 | ||
@@ -694,8 +803,8 @@ void xics_teardown_cpu(int secondary) | |||
694 | */ | 803 | */ |
695 | if (secondary) | 804 | if (secondary) |
696 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 805 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
697 | (1UL << interrupt_server_size) - 1 - | 806 | (1UL << interrupt_server_size) - 1 - |
698 | default_distrib_server, 0); | 807 | default_distrib_server, 0); |
699 | } | 808 | } |
700 | 809 | ||
701 | #ifdef CONFIG_HOTPLUG_CPU | 810 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -723,15 +832,15 @@ void xics_migrate_irqs_away(void) | |||
723 | unsigned long flags; | 832 | unsigned long flags; |
724 | 833 | ||
725 | /* We cant set affinity on ISA interrupts */ | 834 | /* We cant set affinity on ISA interrupts */ |
726 | if (virq < irq_offset_value()) | 835 | if (virq < NUM_ISA_INTERRUPTS) |
727 | continue; | 836 | continue; |
728 | 837 | if (irq_map[virq].host != xics_host) | |
729 | desc = get_irq_desc(virq); | 838 | continue; |
730 | irq = virt_irq_to_real(irq_offset_down(virq)); | 839 | irq = (unsigned int)irq_map[virq].hwirq; |
731 | |||
732 | /* We need to get IPIs still. */ | 840 | /* We need to get IPIs still. */ |
733 | if (irq == XICS_IPI || irq == NO_IRQ) | 841 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
734 | continue; | 842 | continue; |
843 | desc = get_irq_desc(virq); | ||
735 | 844 | ||
736 | /* We only need to migrate enabled IRQS */ | 845 | /* We only need to migrate enabled IRQS */ |
737 | if (desc == NULL || desc->chip == NULL | 846 | if (desc == NULL || desc->chip == NULL |
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h index 67dedf3514ec..6ee1055b0ffb 100644 --- a/arch/powerpc/platforms/pseries/xics.h +++ b/arch/powerpc/platforms/pseries/xics.h | |||
@@ -31,7 +31,7 @@ struct xics_ipi_struct { | |||
31 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; | 31 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; |
32 | 32 | ||
33 | struct irq_desc; | 33 | struct irq_desc; |
34 | extern void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, | 34 | extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, |
35 | struct pt_regs *regs); | 35 | struct pt_regs *regs); |
36 | 36 | ||
37 | #endif /* _POWERPC_KERNEL_XICS_H */ | 37 | #endif /* _POWERPC_KERNEL_XICS_H */ |