diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2006-07-03 05:32:51 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-07-03 05:55:12 -0400 |
commit | b9e5b4e6a991a5a6d521f2e20a65835404b4169f (patch) | |
tree | a0ac972faae4bf9133f576d842667bb134190341 /arch/powerpc/platforms/pseries/xics.c | |
parent | 5a43a066b11ac2fe84cf67307f20b83bea390f83 (diff) |
[POWERPC] Use the genirq framework
This adapts the generic powerpc interrupt handling code, and all of
the platforms except for the embedded 6xx machines, to use the new
genirq framework.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/pseries/xics.c')
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 443 |
1 files changed, 237 insertions, 206 deletions
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 2ffebe31cb2d..c7f04420066d 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -31,23 +31,6 @@ | |||
31 | 31 | ||
32 | #include "xics.h" | 32 | #include "xics.h" |
33 | 33 | ||
34 | static unsigned int xics_startup(unsigned int irq); | ||
35 | static void xics_enable_irq(unsigned int irq); | ||
36 | static void xics_disable_irq(unsigned int irq); | ||
37 | static void xics_mask_and_ack_irq(unsigned int irq); | ||
38 | static void xics_end_irq(unsigned int irq); | ||
39 | static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask); | ||
40 | |||
41 | static struct hw_interrupt_type xics_pic = { | ||
42 | .typename = " XICS ", | ||
43 | .startup = xics_startup, | ||
44 | .enable = xics_enable_irq, | ||
45 | .disable = xics_disable_irq, | ||
46 | .ack = xics_mask_and_ack_irq, | ||
47 | .end = xics_end_irq, | ||
48 | .set_affinity = xics_set_affinity | ||
49 | }; | ||
50 | |||
51 | /* This is used to map real irq numbers to virtual */ | 34 | /* This is used to map real irq numbers to virtual */ |
52 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); | 35 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); |
53 | 36 | ||
@@ -98,47 +81,33 @@ static int ibm_set_xive; | |||
98 | static int ibm_int_on; | 81 | static int ibm_int_on; |
99 | static int ibm_int_off; | 82 | static int ibm_int_off; |
100 | 83 | ||
101 | typedef struct { | ||
102 | int (*xirr_info_get)(int cpu); | ||
103 | void (*xirr_info_set)(int cpu, int val); | ||
104 | void (*cppr_info)(int cpu, u8 val); | ||
105 | void (*qirr_info)(int cpu, u8 val); | ||
106 | } xics_ops; | ||
107 | 84 | ||
85 | /* Direct HW low level accessors */ | ||
108 | 86 | ||
109 | /* SMP */ | ||
110 | 87 | ||
111 | static int pSeries_xirr_info_get(int n_cpu) | 88 | static inline int direct_xirr_info_get(int n_cpu) |
112 | { | 89 | { |
113 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 90 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); |
114 | } | 91 | } |
115 | 92 | ||
116 | static void pSeries_xirr_info_set(int n_cpu, int value) | 93 | static inline void direct_xirr_info_set(int n_cpu, int value) |
117 | { | 94 | { |
118 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); | 95 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); |
119 | } | 96 | } |
120 | 97 | ||
121 | static void pSeries_cppr_info(int n_cpu, u8 value) | 98 | static inline void direct_cppr_info(int n_cpu, u8 value) |
122 | { | 99 | { |
123 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); | 100 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); |
124 | } | 101 | } |
125 | 102 | ||
126 | static void pSeries_qirr_info(int n_cpu, u8 value) | 103 | static inline void direct_qirr_info(int n_cpu, u8 value) |
127 | { | 104 | { |
128 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | 105 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); |
129 | } | 106 | } |
130 | 107 | ||
131 | static xics_ops pSeries_ops = { | ||
132 | pSeries_xirr_info_get, | ||
133 | pSeries_xirr_info_set, | ||
134 | pSeries_cppr_info, | ||
135 | pSeries_qirr_info | ||
136 | }; | ||
137 | |||
138 | static xics_ops *ops = &pSeries_ops; | ||
139 | 108 | ||
109 | /* LPAR low level accessors */ | ||
140 | 110 | ||
141 | /* LPAR */ | ||
142 | 111 | ||
143 | static inline long plpar_eoi(unsigned long xirr) | 112 | static inline long plpar_eoi(unsigned long xirr) |
144 | { | 113 | { |
@@ -161,7 +130,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret) | |||
161 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); | 130 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); |
162 | } | 131 | } |
163 | 132 | ||
164 | static int pSeriesLP_xirr_info_get(int n_cpu) | 133 | static inline int lpar_xirr_info_get(int n_cpu) |
165 | { | 134 | { |
166 | unsigned long lpar_rc; | 135 | unsigned long lpar_rc; |
167 | unsigned long return_value; | 136 | unsigned long return_value; |
@@ -172,7 +141,7 @@ static int pSeriesLP_xirr_info_get(int n_cpu) | |||
172 | return (int)return_value; | 141 | return (int)return_value; |
173 | } | 142 | } |
174 | 143 | ||
175 | static void pSeriesLP_xirr_info_set(int n_cpu, int value) | 144 | static inline void lpar_xirr_info_set(int n_cpu, int value) |
176 | { | 145 | { |
177 | unsigned long lpar_rc; | 146 | unsigned long lpar_rc; |
178 | unsigned long val64 = value & 0xffffffff; | 147 | unsigned long val64 = value & 0xffffffff; |
@@ -183,7 +152,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value) | |||
183 | val64); | 152 | val64); |
184 | } | 153 | } |
185 | 154 | ||
186 | void pSeriesLP_cppr_info(int n_cpu, u8 value) | 155 | static inline void lpar_cppr_info(int n_cpu, u8 value) |
187 | { | 156 | { |
188 | unsigned long lpar_rc; | 157 | unsigned long lpar_rc; |
189 | 158 | ||
@@ -192,7 +161,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value) | |||
192 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | 161 | panic("bad return code cppr - rc = %lx\n", lpar_rc); |
193 | } | 162 | } |
194 | 163 | ||
195 | static void pSeriesLP_qirr_info(int n_cpu , u8 value) | 164 | static inline void lpar_qirr_info(int n_cpu , u8 value) |
196 | { | 165 | { |
197 | unsigned long lpar_rc; | 166 | unsigned long lpar_rc; |
198 | 167 | ||
@@ -201,36 +170,9 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value) | |||
201 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | 170 | panic("bad return code qirr - rc = %lx\n", lpar_rc); |
202 | } | 171 | } |
203 | 172 | ||
204 | xics_ops pSeriesLP_ops = { | ||
205 | pSeriesLP_xirr_info_get, | ||
206 | pSeriesLP_xirr_info_set, | ||
207 | pSeriesLP_cppr_info, | ||
208 | pSeriesLP_qirr_info | ||
209 | }; | ||
210 | 173 | ||
211 | static unsigned int xics_startup(unsigned int virq) | 174 | /* High level handlers and init code */ |
212 | { | ||
213 | unsigned int irq; | ||
214 | 175 | ||
215 | irq = irq_offset_down(virq); | ||
216 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | ||
217 | &virt_irq_to_real_map[irq]) == -ENOMEM) | ||
218 | printk(KERN_CRIT "Out of memory creating real -> virtual" | ||
219 | " IRQ mapping for irq %u (real 0x%x)\n", | ||
220 | virq, virt_irq_to_real(irq)); | ||
221 | xics_enable_irq(virq); | ||
222 | return 0; /* return value is ignored */ | ||
223 | } | ||
224 | |||
225 | static unsigned int real_irq_to_virt(unsigned int real_irq) | ||
226 | { | ||
227 | unsigned int *ptr; | ||
228 | |||
229 | ptr = radix_tree_lookup(&irq_map, real_irq); | ||
230 | if (ptr == NULL) | ||
231 | return NO_IRQ; | ||
232 | return ptr - virt_irq_to_real_map; | ||
233 | } | ||
234 | 176 | ||
235 | #ifdef CONFIG_SMP | 177 | #ifdef CONFIG_SMP |
236 | static int get_irq_server(unsigned int irq) | 178 | static int get_irq_server(unsigned int irq) |
@@ -264,17 +206,20 @@ static int get_irq_server(unsigned int irq) | |||
264 | } | 206 | } |
265 | #endif | 207 | #endif |
266 | 208 | ||
267 | static void xics_enable_irq(unsigned int virq) | 209 | |
210 | static void xics_unmask_irq(unsigned int virq) | ||
268 | { | 211 | { |
269 | unsigned int irq; | 212 | unsigned int irq; |
270 | int call_status; | 213 | int call_status; |
271 | unsigned int server; | 214 | unsigned int server; |
272 | 215 | ||
273 | irq = virt_irq_to_real(irq_offset_down(virq)); | 216 | irq = virt_irq_to_real(irq_offset_down(virq)); |
274 | if (irq == XICS_IPI) | 217 | WARN_ON(irq == NO_IRQ); |
218 | if (irq == XICS_IPI || irq == NO_IRQ) | ||
275 | return; | 219 | return; |
276 | 220 | ||
277 | server = get_irq_server(virq); | 221 | server = get_irq_server(virq); |
222 | |||
278 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 223 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
279 | DEFAULT_PRIORITY); | 224 | DEFAULT_PRIORITY); |
280 | if (call_status != 0) { | 225 | if (call_status != 0) { |
@@ -293,7 +238,7 @@ static void xics_enable_irq(unsigned int virq) | |||
293 | } | 238 | } |
294 | } | 239 | } |
295 | 240 | ||
296 | static void xics_disable_real_irq(unsigned int irq) | 241 | static void xics_mask_real_irq(unsigned int irq) |
297 | { | 242 | { |
298 | int call_status; | 243 | int call_status; |
299 | unsigned int server; | 244 | unsigned int server; |
@@ -318,75 +263,104 @@ static void xics_disable_real_irq(unsigned int irq) | |||
318 | } | 263 | } |
319 | } | 264 | } |
320 | 265 | ||
321 | static void xics_disable_irq(unsigned int virq) | 266 | static void xics_mask_irq(unsigned int virq) |
322 | { | 267 | { |
323 | unsigned int irq; | 268 | unsigned int irq; |
324 | 269 | ||
325 | irq = virt_irq_to_real(irq_offset_down(virq)); | 270 | irq = virt_irq_to_real(irq_offset_down(virq)); |
326 | xics_disable_real_irq(irq); | 271 | WARN_ON(irq == NO_IRQ); |
272 | if (irq != NO_IRQ) | ||
273 | xics_mask_real_irq(irq); | ||
274 | } | ||
275 | |||
276 | static void xics_set_irq_revmap(unsigned int virq) | ||
277 | { | ||
278 | unsigned int irq; | ||
279 | |||
280 | irq = irq_offset_down(virq); | ||
281 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | ||
282 | &virt_irq_to_real_map[irq]) == -ENOMEM) | ||
283 | printk(KERN_CRIT "Out of memory creating real -> virtual" | ||
284 | " IRQ mapping for irq %u (real 0x%x)\n", | ||
285 | virq, virt_irq_to_real(irq)); | ||
327 | } | 286 | } |
328 | 287 | ||
329 | static void xics_end_irq(unsigned int irq) | 288 | static unsigned int xics_startup(unsigned int virq) |
289 | { | ||
290 | xics_set_irq_revmap(virq); | ||
291 | xics_unmask_irq(virq); | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static unsigned int real_irq_to_virt(unsigned int real_irq) | ||
296 | { | ||
297 | unsigned int *ptr; | ||
298 | |||
299 | ptr = radix_tree_lookup(&irq_map, real_irq); | ||
300 | if (ptr == NULL) | ||
301 | return NO_IRQ; | ||
302 | return ptr - virt_irq_to_real_map; | ||
303 | } | ||
304 | |||
305 | static void xics_eoi_direct(unsigned int irq) | ||
330 | { | 306 | { |
331 | int cpu = smp_processor_id(); | 307 | int cpu = smp_processor_id(); |
332 | 308 | ||
333 | iosync(); | 309 | iosync(); |
334 | ops->xirr_info_set(cpu, ((0xff << 24) | | 310 | direct_xirr_info_set(cpu, ((0xff << 24) | |
335 | (virt_irq_to_real(irq_offset_down(irq))))); | 311 | (virt_irq_to_real(irq_offset_down(irq))))); |
336 | |||
337 | } | 312 | } |
338 | 313 | ||
339 | static void xics_mask_and_ack_irq(unsigned int irq) | 314 | |
315 | static void xics_eoi_lpar(unsigned int irq) | ||
340 | { | 316 | { |
341 | int cpu = smp_processor_id(); | 317 | int cpu = smp_processor_id(); |
342 | 318 | ||
343 | if (irq < irq_offset_value()) { | 319 | iosync(); |
344 | i8259_pic.ack(irq); | 320 | lpar_xirr_info_set(cpu, ((0xff << 24) | |
345 | iosync(); | 321 | (virt_irq_to_real(irq_offset_down(irq))))); |
346 | ops->xirr_info_set(cpu, ((0xff<<24) | | 322 | |
347 | xics_irq_8259_cascade_real)); | ||
348 | iosync(); | ||
349 | } | ||
350 | } | 323 | } |
351 | 324 | ||
352 | int xics_get_irq(struct pt_regs *regs) | 325 | static inline int xics_remap_irq(int vec) |
353 | { | 326 | { |
354 | unsigned int cpu = smp_processor_id(); | ||
355 | unsigned int vec; | ||
356 | int irq; | 327 | int irq; |
357 | 328 | ||
358 | vec = ops->xirr_info_get(cpu); | ||
359 | /* (vec >> 24) == old priority */ | ||
360 | vec &= 0x00ffffff; | 329 | vec &= 0x00ffffff; |
361 | 330 | ||
362 | /* for sanity, this had better be < NR_IRQS - 16 */ | 331 | if (vec == XICS_IRQ_SPURIOUS) |
363 | if (vec == xics_irq_8259_cascade_real) { | 332 | return NO_IRQ; |
364 | irq = i8259_irq(regs); | 333 | |
365 | xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); | 334 | irq = real_irq_to_virt(vec); |
366 | } else if (vec == XICS_IRQ_SPURIOUS) { | 335 | if (irq == NO_IRQ) |
367 | irq = -1; | 336 | irq = real_irq_to_virt_slowpath(vec); |
368 | } else { | 337 | if (likely(irq != NO_IRQ)) |
369 | irq = real_irq_to_virt(vec); | 338 | return irq_offset_up(irq); |
370 | if (irq == NO_IRQ) | 339 | |
371 | irq = real_irq_to_virt_slowpath(vec); | 340 | printk(KERN_ERR "Interrupt %u (real) is invalid," |
372 | if (irq == NO_IRQ) { | 341 | " disabling it.\n", vec); |
373 | printk(KERN_ERR "Interrupt %u (real) is invalid," | 342 | xics_mask_real_irq(vec); |
374 | " disabling it.\n", vec); | 343 | return NO_IRQ; |
375 | xics_disable_real_irq(vec); | ||
376 | } else | ||
377 | irq = irq_offset_up(irq); | ||
378 | } | ||
379 | return irq; | ||
380 | } | 344 | } |
381 | 345 | ||
382 | #ifdef CONFIG_SMP | 346 | static int xics_get_irq_direct(struct pt_regs *regs) |
347 | { | ||
348 | unsigned int cpu = smp_processor_id(); | ||
383 | 349 | ||
384 | static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 350 | return xics_remap_irq(direct_xirr_info_get(cpu)); |
351 | } | ||
352 | |||
353 | static int xics_get_irq_lpar(struct pt_regs *regs) | ||
385 | { | 354 | { |
386 | int cpu = smp_processor_id(); | 355 | unsigned int cpu = smp_processor_id(); |
387 | 356 | ||
388 | ops->qirr_info(cpu, 0xff); | 357 | return xics_remap_irq(lpar_xirr_info_get(cpu)); |
358 | } | ||
359 | |||
360 | #ifdef CONFIG_SMP | ||
389 | 361 | ||
362 | static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs) | ||
363 | { | ||
390 | WARN_ON(cpu_is_offline(cpu)); | 364 | WARN_ON(cpu_is_offline(cpu)); |
391 | 365 | ||
392 | while (xics_ipi_message[cpu].value) { | 366 | while (xics_ipi_message[cpu].value) { |
@@ -418,18 +392,108 @@ static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | |||
418 | return IRQ_HANDLED; | 392 | return IRQ_HANDLED; |
419 | } | 393 | } |
420 | 394 | ||
395 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs) | ||
396 | { | ||
397 | int cpu = smp_processor_id(); | ||
398 | |||
399 | direct_qirr_info(cpu, 0xff); | ||
400 | |||
401 | return xics_ipi_dispatch(cpu, regs); | ||
402 | } | ||
403 | |||
404 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs) | ||
405 | { | ||
406 | int cpu = smp_processor_id(); | ||
407 | |||
408 | lpar_qirr_info(cpu, 0xff); | ||
409 | |||
410 | return xics_ipi_dispatch(cpu, regs); | ||
411 | } | ||
412 | |||
421 | void xics_cause_IPI(int cpu) | 413 | void xics_cause_IPI(int cpu) |
422 | { | 414 | { |
423 | ops->qirr_info(cpu, IPI_PRIORITY); | 415 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
416 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
417 | else | ||
418 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
424 | } | 419 | } |
420 | |||
425 | #endif /* CONFIG_SMP */ | 421 | #endif /* CONFIG_SMP */ |
426 | 422 | ||
423 | static void xics_set_cpu_priority(int cpu, unsigned char cppr) | ||
424 | { | ||
425 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
426 | lpar_cppr_info(cpu, cppr); | ||
427 | else | ||
428 | direct_cppr_info(cpu, cppr); | ||
429 | iosync(); | ||
430 | } | ||
431 | |||
432 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | ||
433 | { | ||
434 | unsigned int irq; | ||
435 | int status; | ||
436 | int xics_status[2]; | ||
437 | unsigned long newmask; | ||
438 | cpumask_t tmp = CPU_MASK_NONE; | ||
439 | |||
440 | irq = virt_irq_to_real(irq_offset_down(virq)); | ||
441 | if (irq == XICS_IPI || irq == NO_IRQ) | ||
442 | return; | ||
443 | |||
444 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
445 | |||
446 | if (status) { | ||
447 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " | ||
448 | "returns %d\n", irq, status); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
453 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | ||
454 | newmask = default_distrib_server; | ||
455 | } else { | ||
456 | cpus_and(tmp, cpu_online_map, cpumask); | ||
457 | if (cpus_empty(tmp)) | ||
458 | return; | ||
459 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); | ||
460 | } | ||
461 | |||
462 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
463 | irq, newmask, xics_status[1]); | ||
464 | |||
465 | if (status) { | ||
466 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " | ||
467 | "returns %d\n", irq, status); | ||
468 | return; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | static struct irq_chip xics_pic_direct = { | ||
473 | .typename = " XICS ", | ||
474 | .startup = xics_startup, | ||
475 | .mask = xics_mask_irq, | ||
476 | .unmask = xics_unmask_irq, | ||
477 | .eoi = xics_eoi_direct, | ||
478 | .set_affinity = xics_set_affinity | ||
479 | }; | ||
480 | |||
481 | |||
482 | static struct irq_chip xics_pic_lpar = { | ||
483 | .typename = " XICS ", | ||
484 | .startup = xics_startup, | ||
485 | .mask = xics_mask_irq, | ||
486 | .unmask = xics_unmask_irq, | ||
487 | .eoi = xics_eoi_lpar, | ||
488 | .set_affinity = xics_set_affinity | ||
489 | }; | ||
490 | |||
491 | |||
427 | void xics_setup_cpu(void) | 492 | void xics_setup_cpu(void) |
428 | { | 493 | { |
429 | int cpu = smp_processor_id(); | 494 | int cpu = smp_processor_id(); |
430 | 495 | ||
431 | ops->cppr_info(cpu, 0xff); | 496 | xics_set_cpu_priority(cpu, 0xff); |
432 | iosync(); | ||
433 | 497 | ||
434 | /* | 498 | /* |
435 | * Put the calling processor into the GIQ. This is really only | 499 | * Put the calling processor into the GIQ. This is really only |
@@ -453,6 +517,7 @@ void xics_init_IRQ(void) | |||
453 | unsigned long addr; | 517 | unsigned long addr; |
454 | unsigned long size; | 518 | unsigned long size; |
455 | } intnodes[NR_CPUS]; | 519 | } intnodes[NR_CPUS]; |
520 | struct irq_chip *chip; | ||
456 | 521 | ||
457 | ppc64_boot_msg(0x20, "XICS Init"); | 522 | ppc64_boot_msg(0x20, "XICS Init"); |
458 | 523 | ||
@@ -519,26 +584,10 @@ nextnode: | |||
519 | intr_base = intnodes[0].addr; | 584 | intr_base = intnodes[0].addr; |
520 | intr_size = intnodes[0].size; | 585 | intr_size = intnodes[0].size; |
521 | 586 | ||
522 | np = of_find_node_by_type(NULL, "interrupt-controller"); | 587 | if (firmware_has_feature(FW_FEATURE_LPAR)) { |
523 | if (!np) { | 588 | ppc_md.get_irq = xics_get_irq_lpar; |
524 | printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); | 589 | chip = &xics_pic_lpar; |
525 | xics_irq_8259_cascade_real = -1; | 590 | } else { |
526 | xics_irq_8259_cascade = -1; | ||
527 | } else { | ||
528 | ireg = (uint *) get_property(np, "interrupts", NULL); | ||
529 | if (!ireg) | ||
530 | panic("xics_init_IRQ: can't find ISA interrupts property"); | ||
531 | |||
532 | xics_irq_8259_cascade_real = *ireg; | ||
533 | xics_irq_8259_cascade | ||
534 | = virt_irq_create_mapping(xics_irq_8259_cascade_real); | ||
535 | i8259_init(0, 0); | ||
536 | of_node_put(np); | ||
537 | } | ||
538 | |||
539 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
540 | ops = &pSeriesLP_ops; | ||
541 | else { | ||
542 | #ifdef CONFIG_SMP | 591 | #ifdef CONFIG_SMP |
543 | for_each_possible_cpu(i) { | 592 | for_each_possible_cpu(i) { |
544 | int hard_id; | 593 | int hard_id; |
@@ -554,32 +603,54 @@ nextnode: | |||
554 | #else | 603 | #else |
555 | xics_per_cpu[0] = ioremap(intr_base, intr_size); | 604 | xics_per_cpu[0] = ioremap(intr_base, intr_size); |
556 | #endif /* CONFIG_SMP */ | 605 | #endif /* CONFIG_SMP */ |
606 | ppc_md.get_irq = xics_get_irq_direct; | ||
607 | chip = &xics_pic_direct; | ||
608 | |||
557 | } | 609 | } |
558 | 610 | ||
559 | for (i = irq_offset_value(); i < NR_IRQS; ++i) | 611 | for (i = irq_offset_value(); i < NR_IRQS; ++i) { |
560 | get_irq_desc(i)->chip = &xics_pic; | 612 | /* All IRQs on XICS are level for now. MSI code may want to modify |
613 | * that for reporting purposes | ||
614 | */ | ||
615 | get_irq_desc(i)->status |= IRQ_LEVEL; | ||
616 | set_irq_chip_and_handler(i, chip, handle_fasteoi_irq); | ||
617 | } | ||
561 | 618 | ||
562 | xics_setup_cpu(); | 619 | xics_setup_cpu(); |
563 | 620 | ||
564 | ppc64_boot_msg(0x21, "XICS Done"); | 621 | ppc64_boot_msg(0x21, "XICS Done"); |
565 | } | 622 | } |
566 | 623 | ||
567 | /* | 624 | static int xics_setup_8259_cascade(void) |
568 | * We cant do this in init_IRQ because we need the memory subsystem up for | 625 | { |
569 | * request_irq() | 626 | struct device_node *np; |
570 | */ | 627 | uint *ireg; |
571 | static int __init xics_setup_i8259(void) | 628 | |
572 | { | 629 | np = of_find_node_by_type(NULL, "interrupt-controller"); |
573 | if (ppc64_interrupt_controller == IC_PPC_XIC && | 630 | if (np == NULL) { |
574 | xics_irq_8259_cascade != -1) { | 631 | printk(KERN_WARNING "xics: no ISA interrupt controller\n"); |
575 | if (request_irq(irq_offset_up(xics_irq_8259_cascade), | 632 | xics_irq_8259_cascade_real = -1; |
576 | no_action, 0, "8259 cascade", NULL)) | 633 | xics_irq_8259_cascade = -1; |
577 | printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " | 634 | return 0; |
578 | "cascade\n"); | ||
579 | } | 635 | } |
636 | |||
637 | ireg = (uint *) get_property(np, "interrupts", NULL); | ||
638 | if (!ireg) | ||
639 | panic("xics_init_IRQ: can't find ISA interrupts property"); | ||
640 | |||
641 | xics_irq_8259_cascade_real = *ireg; | ||
642 | xics_irq_8259_cascade = irq_offset_up | ||
643 | (virt_irq_create_mapping(xics_irq_8259_cascade_real)); | ||
644 | i8259_init(0, 0); | ||
645 | of_node_put(np); | ||
646 | |||
647 | xics_set_irq_revmap(xics_irq_8259_cascade); | ||
648 | set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade); | ||
649 | |||
580 | return 0; | 650 | return 0; |
581 | } | 651 | } |
582 | arch_initcall(xics_setup_i8259); | 652 | arch_initcall(xics_setup_8259_cascade); |
653 | |||
583 | 654 | ||
584 | #ifdef CONFIG_SMP | 655 | #ifdef CONFIG_SMP |
585 | void xics_request_IPIs(void) | 656 | void xics_request_IPIs(void) |
@@ -590,61 +661,22 @@ void xics_request_IPIs(void) | |||
590 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 661 | * IPIs are marked IRQF_DISABLED as they must run with irqs |
591 | * disabled | 662 | * disabled |
592 | */ | 663 | */ |
593 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, | 664 | set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq); |
594 | IRQF_DISABLED, "IPI", NULL); | 665 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
595 | get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU; | 666 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar, |
596 | } | 667 | SA_INTERRUPT, "IPI", NULL); |
597 | #endif | 668 | else |
598 | 669 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct, | |
599 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 670 | SA_INTERRUPT, "IPI", NULL); |
600 | { | ||
601 | unsigned int irq; | ||
602 | int status; | ||
603 | int xics_status[2]; | ||
604 | unsigned long newmask; | ||
605 | cpumask_t tmp = CPU_MASK_NONE; | ||
606 | |||
607 | irq = virt_irq_to_real(irq_offset_down(virq)); | ||
608 | if (irq == XICS_IPI || irq == NO_IRQ) | ||
609 | return; | ||
610 | |||
611 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
612 | |||
613 | if (status) { | ||
614 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " | ||
615 | "returns %d\n", irq, status); | ||
616 | return; | ||
617 | } | ||
618 | |||
619 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
620 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | ||
621 | newmask = default_distrib_server; | ||
622 | } else { | ||
623 | cpus_and(tmp, cpu_online_map, cpumask); | ||
624 | if (cpus_empty(tmp)) | ||
625 | return; | ||
626 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); | ||
627 | } | ||
628 | |||
629 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
630 | irq, newmask, xics_status[1]); | ||
631 | |||
632 | if (status) { | ||
633 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " | ||
634 | "returns %d\n", irq, status); | ||
635 | return; | ||
636 | } | ||
637 | } | 671 | } |
672 | #endif /* CONFIG_SMP */ | ||
638 | 673 | ||
639 | void xics_teardown_cpu(int secondary) | 674 | void xics_teardown_cpu(int secondary) |
640 | { | 675 | { |
676 | struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI)); | ||
641 | int cpu = smp_processor_id(); | 677 | int cpu = smp_processor_id(); |
642 | 678 | ||
643 | ops->cppr_info(cpu, 0x00); | 679 | xics_set_cpu_priority(cpu, 0); |
644 | iosync(); | ||
645 | |||
646 | /* Clear IPI */ | ||
647 | ops->qirr_info(cpu, 0xff); | ||
648 | 680 | ||
649 | /* | 681 | /* |
650 | * we need to EOI the IPI if we got here from kexec down IPI | 682 | * we need to EOI the IPI if we got here from kexec down IPI |
@@ -653,7 +685,8 @@ void xics_teardown_cpu(int secondary) | |||
653 | * should we be flagging idle loop instead? | 685 | * should we be flagging idle loop instead? |
654 | * or creating some task to be scheduled? | 686 | * or creating some task to be scheduled? |
655 | */ | 687 | */ |
656 | ops->xirr_info_set(cpu, XICS_IPI); | 688 | if (desc->chip && desc->chip->eoi) |
689 | desc->chip->eoi(XICS_IPI); | ||
657 | 690 | ||
658 | /* | 691 | /* |
659 | * Some machines need to have at least one cpu in the GIQ, | 692 | * Some machines need to have at least one cpu in the GIQ, |
@@ -674,8 +707,7 @@ void xics_migrate_irqs_away(void) | |||
674 | unsigned int irq, virq, cpu = smp_processor_id(); | 707 | unsigned int irq, virq, cpu = smp_processor_id(); |
675 | 708 | ||
676 | /* Reject any interrupt that was queued to us... */ | 709 | /* Reject any interrupt that was queued to us... */ |
677 | ops->cppr_info(cpu, 0); | 710 | xics_set_cpu_priority(cpu, 0); |
678 | iosync(); | ||
679 | 711 | ||
680 | /* remove ourselves from the global interrupt queue */ | 712 | /* remove ourselves from the global interrupt queue */ |
681 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 713 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
@@ -683,11 +715,10 @@ void xics_migrate_irqs_away(void) | |||
683 | WARN_ON(status < 0); | 715 | WARN_ON(status < 0); |
684 | 716 | ||
685 | /* Allow IPIs again... */ | 717 | /* Allow IPIs again... */ |
686 | ops->cppr_info(cpu, DEFAULT_PRIORITY); | 718 | xics_set_cpu_priority(cpu, DEFAULT_PRIORITY); |
687 | iosync(); | ||
688 | 719 | ||
689 | for_each_irq(virq) { | 720 | for_each_irq(virq) { |
690 | irq_desc_t *desc; | 721 | struct irq_desc *desc; |
691 | int xics_status[2]; | 722 | int xics_status[2]; |
692 | unsigned long flags; | 723 | unsigned long flags; |
693 | 724 | ||