diff options
Diffstat (limited to 'arch/powerpc/platforms')
30 files changed, 1874 insertions, 1277 deletions
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index 7675e675dce1..5fe7b7faf45f 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig | |||
@@ -16,12 +16,21 @@ config MPC834x_SYS | |||
16 | 3 PCI slots. The PIBs PCI initialization is the bootloader's | 16 | 3 PCI slots. The PIBs PCI initialization is the bootloader's |
17 | responsiblilty. | 17 | responsiblilty. |
18 | 18 | ||
19 | config MPC834x_ITX | ||
20 | bool "Freescale MPC834x ITX" | ||
21 | select DEFAULT_UIMAGE | ||
22 | help | ||
23 | This option enables support for the MPC 834x ITX evaluation board. | ||
24 | |||
25 | Be aware that PCI initialization is the bootloader's | ||
26 | responsiblilty. | ||
27 | |||
19 | endchoice | 28 | endchoice |
20 | 29 | ||
21 | config MPC834x | 30 | config MPC834x |
22 | bool | 31 | bool |
23 | select PPC_UDBG_16550 | 32 | select PPC_UDBG_16550 |
24 | select PPC_INDIRECT_PCI | 33 | select PPC_INDIRECT_PCI |
25 | default y if MPC834x_SYS | 34 | default y if MPC834x_SYS || MPC834x_ITX |
26 | 35 | ||
27 | endmenu | 36 | endmenu |
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index 5c72367441a8..9387a110d28a 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile | |||
@@ -4,3 +4,4 @@ | |||
4 | obj-y := misc.o | 4 | obj-y := misc.o |
5 | obj-$(CONFIG_PCI) += pci.o | 5 | obj-$(CONFIG_PCI) += pci.o |
6 | obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o | 6 | obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o |
7 | obj-$(CONFIG_MPC834x_ITX) += mpc834x_itx.o | ||
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c new file mode 100644 index 000000000000..b46305645d38 --- /dev/null +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/83xx/mpc834x_itx.c | ||
3 | * | ||
4 | * MPC834x ITX board specific routines | ||
5 | * | ||
6 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/stddef.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/kdev_t.h> | ||
22 | #include <linux/major.h> | ||
23 | #include <linux/console.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include <linux/root_dev.h> | ||
27 | |||
28 | #include <asm/system.h> | ||
29 | #include <asm/atomic.h> | ||
30 | #include <asm/time.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/machdep.h> | ||
33 | #include <asm/ipic.h> | ||
34 | #include <asm/bootinfo.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/udbg.h> | ||
38 | #include <sysdev/fsl_soc.h> | ||
39 | |||
40 | #include "mpc83xx.h" | ||
41 | |||
42 | #include <platforms/83xx/mpc834x_sys.h> | ||
43 | |||
44 | #ifndef CONFIG_PCI | ||
45 | unsigned long isa_io_base = 0; | ||
46 | unsigned long isa_mem_base = 0; | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_PCI | ||
50 | static int | ||
51 | mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) | ||
52 | { | ||
53 | static char pci_irq_table[][4] = | ||
54 | /* | ||
55 | * PCI IDSEL/INTPIN->INTLINE | ||
56 | * A B C D | ||
57 | */ | ||
58 | { | ||
59 | {PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x0e */ | ||
60 | {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x0f */ | ||
61 | {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x10 */ | ||
62 | }; | ||
63 | |||
64 | const long min_idsel = 0x0e, max_idsel = 0x10, irqs_per_slot = 4; | ||
65 | return PCI_IRQ_TABLE_LOOKUP; | ||
66 | } | ||
67 | #endif /* CONFIG_PCI */ | ||
68 | |||
69 | /* ************************************************************************ | ||
70 | * | ||
71 | * Setup the architecture | ||
72 | * | ||
73 | */ | ||
74 | static void __init mpc834x_itx_setup_arch(void) | ||
75 | { | ||
76 | struct device_node *np; | ||
77 | |||
78 | if (ppc_md.progress) | ||
79 | ppc_md.progress("mpc834x_itx_setup_arch()", 0); | ||
80 | |||
81 | np = of_find_node_by_type(NULL, "cpu"); | ||
82 | if (np != 0) { | ||
83 | unsigned int *fp = | ||
84 | (int *)get_property(np, "clock-frequency", NULL); | ||
85 | if (fp != 0) | ||
86 | loops_per_jiffy = *fp / HZ; | ||
87 | else | ||
88 | loops_per_jiffy = 50000000 / HZ; | ||
89 | of_node_put(np); | ||
90 | } | ||
91 | #ifdef CONFIG_PCI | ||
92 | for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) | ||
93 | add_bridge(np); | ||
94 | |||
95 | ppc_md.pci_swizzle = common_swizzle; | ||
96 | ppc_md.pci_map_irq = mpc83xx_map_irq; | ||
97 | ppc_md.pci_exclude_device = mpc83xx_exclude_device; | ||
98 | #endif | ||
99 | |||
100 | #ifdef CONFIG_ROOT_NFS | ||
101 | ROOT_DEV = Root_NFS; | ||
102 | #else | ||
103 | ROOT_DEV = Root_HDA1; | ||
104 | #endif | ||
105 | } | ||
106 | |||
107 | void __init mpc834x_itx_init_IRQ(void) | ||
108 | { | ||
109 | u8 senses[8] = { | ||
110 | 0, /* EXT 0 */ | ||
111 | IRQ_SENSE_LEVEL, /* EXT 1 */ | ||
112 | IRQ_SENSE_LEVEL, /* EXT 2 */ | ||
113 | 0, /* EXT 3 */ | ||
114 | #ifdef CONFIG_PCI | ||
115 | IRQ_SENSE_LEVEL, /* EXT 4 */ | ||
116 | IRQ_SENSE_LEVEL, /* EXT 5 */ | ||
117 | IRQ_SENSE_LEVEL, /* EXT 6 */ | ||
118 | IRQ_SENSE_LEVEL, /* EXT 7 */ | ||
119 | #else | ||
120 | 0, /* EXT 4 */ | ||
121 | 0, /* EXT 5 */ | ||
122 | 0, /* EXT 6 */ | ||
123 | 0, /* EXT 7 */ | ||
124 | #endif | ||
125 | }; | ||
126 | |||
127 | ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8); | ||
128 | |||
129 | /* Initialize the default interrupt mapping priorities, | ||
130 | * in case the boot rom changed something on us. | ||
131 | */ | ||
132 | ipic_set_default_priority(); | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Called very early, MMU is off, device-tree isn't unflattened | ||
137 | */ | ||
138 | static int __init mpc834x_itx_probe(void) | ||
139 | { | ||
140 | /* We always match for now, eventually we should look at the flat | ||
141 | dev tree to ensure this is the board we are suppose to run on | ||
142 | */ | ||
143 | return 1; | ||
144 | } | ||
145 | |||
146 | define_machine(mpc834x_itx) { | ||
147 | .name = "MPC834x ITX", | ||
148 | .probe = mpc834x_itx_probe, | ||
149 | .setup_arch = mpc834x_itx_setup_arch, | ||
150 | .init_IRQ = mpc834x_itx_init_IRQ, | ||
151 | .get_irq = ipic_get_irq, | ||
152 | .restart = mpc83xx_restart, | ||
153 | .time_init = mpc83xx_time_init, | ||
154 | .calibrate_decr = generic_calibrate_decr, | ||
155 | .progress = udbg_progress, | ||
156 | }; | ||
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.h b/arch/powerpc/platforms/83xx/mpc834x_itx.h new file mode 100644 index 000000000000..174ca4ef55f3 --- /dev/null +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/83xx/mpc834x_itx.h | ||
3 | * | ||
4 | * MPC834X ITX common board definitions | ||
5 | * | ||
6 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef __MACH_MPC83XX_ITX_H__ | ||
16 | #define __MACH_MPC83XX_ITX_H__ | ||
17 | |||
18 | #define PIRQA MPC83xx_IRQ_EXT4 | ||
19 | #define PIRQB MPC83xx_IRQ_EXT5 | ||
20 | #define PIRQC MPC83xx_IRQ_EXT6 | ||
21 | #define PIRQD MPC83xx_IRQ_EXT7 | ||
22 | |||
23 | #endif /* __MACH_MPC83XX_ITX_H__ */ | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 22da1335445a..9d5da7896892 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -1,6 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Cell Internal Interrupt Controller | 2 | * Cell Internal Interrupt Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * IBM, Corp. | ||
6 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | 7 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
5 | * | 8 | * |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | 9 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
@@ -25,11 +28,13 @@ | |||
25 | #include <linux/module.h> | 28 | #include <linux/module.h> |
26 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
27 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/ioport.h> | ||
28 | 32 | ||
29 | #include <asm/io.h> | 33 | #include <asm/io.h> |
30 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
31 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
32 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
37 | #include <asm/machdep.h> | ||
33 | 38 | ||
34 | #include "interrupt.h" | 39 | #include "interrupt.h" |
35 | #include "cbe_regs.h" | 40 | #include "cbe_regs.h" |
@@ -37,231 +42,65 @@ | |||
37 | struct iic { | 42 | struct iic { |
38 | struct cbe_iic_thread_regs __iomem *regs; | 43 | struct cbe_iic_thread_regs __iomem *regs; |
39 | u8 target_id; | 44 | u8 target_id; |
45 | u8 eoi_stack[16]; | ||
46 | int eoi_ptr; | ||
47 | struct irq_host *host; | ||
40 | }; | 48 | }; |
41 | 49 | ||
42 | static DEFINE_PER_CPU(struct iic, iic); | 50 | static DEFINE_PER_CPU(struct iic, iic); |
51 | #define IIC_NODE_COUNT 2 | ||
52 | static struct irq_host *iic_hosts[IIC_NODE_COUNT]; | ||
43 | 53 | ||
44 | void iic_local_enable(void) | 54 | /* Convert between "pending" bits and hw irq number */ |
55 | static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) | ||
45 | { | 56 | { |
46 | struct iic *iic = &__get_cpu_var(iic); | 57 | unsigned char unit = bits.source & 0xf; |
47 | u64 tmp; | ||
48 | |||
49 | /* | ||
50 | * There seems to be a bug that is present in DD2.x CPUs | ||
51 | * and still only partially fixed in DD3.1. | ||
52 | * This bug causes a value written to the priority register | ||
53 | * not to make it there, resulting in a system hang unless we | ||
54 | * write it again. | ||
55 | * Masking with 0xf0 is done because the Cell BE does not | ||
56 | * implement the lower four bits of the interrupt priority, | ||
57 | * they always read back as zeroes, although future CPUs | ||
58 | * might implement different bits. | ||
59 | */ | ||
60 | do { | ||
61 | out_be64(&iic->regs->prio, 0xff); | ||
62 | tmp = in_be64(&iic->regs->prio); | ||
63 | } while ((tmp & 0xf0) != 0xf0); | ||
64 | } | ||
65 | |||
66 | void iic_local_disable(void) | ||
67 | { | ||
68 | out_be64(&__get_cpu_var(iic).regs->prio, 0x0); | ||
69 | } | ||
70 | 58 | ||
71 | static unsigned int iic_startup(unsigned int irq) | 59 | if (bits.flags & CBE_IIC_IRQ_IPI) |
72 | { | 60 | return IIC_IRQ_IPI0 | (bits.prio >> 4); |
73 | return 0; | 61 | else if (bits.class <= 3) |
62 | return (bits.class << 4) | unit; | ||
63 | else | ||
64 | return IIC_IRQ_INVALID; | ||
74 | } | 65 | } |
75 | 66 | ||
76 | static void iic_enable(unsigned int irq) | 67 | static void iic_mask(unsigned int irq) |
77 | { | 68 | { |
78 | iic_local_enable(); | ||
79 | } | 69 | } |
80 | 70 | ||
81 | static void iic_disable(unsigned int irq) | 71 | static void iic_unmask(unsigned int irq) |
82 | { | 72 | { |
83 | } | 73 | } |
84 | 74 | ||
85 | static void iic_end(unsigned int irq) | 75 | static void iic_eoi(unsigned int irq) |
86 | { | 76 | { |
87 | iic_local_enable(); | 77 | struct iic *iic = &__get_cpu_var(iic); |
78 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | ||
79 | BUG_ON(iic->eoi_ptr < 0); | ||
88 | } | 80 | } |
89 | 81 | ||
90 | static struct hw_interrupt_type iic_pic = { | 82 | static struct irq_chip iic_chip = { |
91 | .typename = " CELL-IIC ", | 83 | .typename = " CELL-IIC ", |
92 | .startup = iic_startup, | 84 | .mask = iic_mask, |
93 | .enable = iic_enable, | 85 | .unmask = iic_unmask, |
94 | .disable = iic_disable, | 86 | .eoi = iic_eoi, |
95 | .end = iic_end, | ||
96 | }; | 87 | }; |
97 | 88 | ||
98 | static int iic_external_get_irq(struct cbe_iic_pending_bits pending) | ||
99 | { | ||
100 | int irq; | ||
101 | unsigned char node, unit; | ||
102 | |||
103 | node = pending.source >> 4; | ||
104 | unit = pending.source & 0xf; | ||
105 | irq = -1; | ||
106 | |||
107 | /* | ||
108 | * This mapping is specific to the Cell Broadband | ||
109 | * Engine. We might need to get the numbers | ||
110 | * from the device tree to support future CPUs. | ||
111 | */ | ||
112 | switch (unit) { | ||
113 | case 0x00: | ||
114 | case 0x0b: | ||
115 | /* | ||
116 | * One of these units can be connected | ||
117 | * to an external interrupt controller. | ||
118 | */ | ||
119 | if (pending.class != 2) | ||
120 | break; | ||
121 | irq = IIC_EXT_OFFSET | ||
122 | + spider_get_irq(node) | ||
123 | + node * IIC_NODE_STRIDE; | ||
124 | break; | ||
125 | case 0x01 ... 0x04: | ||
126 | case 0x07 ... 0x0a: | ||
127 | /* | ||
128 | * These units are connected to the SPEs | ||
129 | */ | ||
130 | if (pending.class > 2) | ||
131 | break; | ||
132 | irq = IIC_SPE_OFFSET | ||
133 | + pending.class * IIC_CLASS_STRIDE | ||
134 | + node * IIC_NODE_STRIDE | ||
135 | + unit; | ||
136 | break; | ||
137 | } | ||
138 | if (irq == -1) | ||
139 | printk(KERN_WARNING "Unexpected interrupt class %02x, " | ||
140 | "source %02x, prio %02x, cpu %02x\n", pending.class, | ||
141 | pending.source, pending.prio, smp_processor_id()); | ||
142 | return irq; | ||
143 | } | ||
144 | |||
145 | /* Get an IRQ number from the pending state register of the IIC */ | 89 | /* Get an IRQ number from the pending state register of the IIC */ |
146 | int iic_get_irq(struct pt_regs *regs) | 90 | static unsigned int iic_get_irq(struct pt_regs *regs) |
147 | { | 91 | { |
148 | struct iic *iic; | 92 | struct cbe_iic_pending_bits pending; |
149 | int irq; | 93 | struct iic *iic; |
150 | struct cbe_iic_pending_bits pending; | 94 | |
151 | 95 | iic = &__get_cpu_var(iic); | |
152 | iic = &__get_cpu_var(iic); | 96 | *(unsigned long *) &pending = |
153 | *(unsigned long *) &pending = | 97 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); |
154 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | 98 | iic->eoi_stack[++iic->eoi_ptr] = pending.prio; |
155 | 99 | BUG_ON(iic->eoi_ptr > 15); | |
156 | irq = -1; | 100 | if (pending.flags & CBE_IIC_IRQ_VALID) |
157 | if (pending.flags & CBE_IIC_IRQ_VALID) { | 101 | return irq_linear_revmap(iic->host, |
158 | if (pending.flags & CBE_IIC_IRQ_IPI) { | 102 | iic_pending_to_hwnum(pending)); |
159 | irq = IIC_IPI_OFFSET + (pending.prio >> 4); | 103 | return NO_IRQ; |
160 | /* | ||
161 | if (irq > 0x80) | ||
162 | printk(KERN_WARNING "Unexpected IPI prio %02x" | ||
163 | "on CPU %02x\n", pending.prio, | ||
164 | smp_processor_id()); | ||
165 | */ | ||
166 | } else { | ||
167 | irq = iic_external_get_irq(pending); | ||
168 | } | ||
169 | } | ||
170 | return irq; | ||
171 | } | ||
172 | |||
173 | /* hardcoded part to be compatible with older firmware */ | ||
174 | |||
175 | static int setup_iic_hardcoded(void) | ||
176 | { | ||
177 | struct device_node *np; | ||
178 | int nodeid, cpu; | ||
179 | unsigned long regs; | ||
180 | struct iic *iic; | ||
181 | |||
182 | for_each_possible_cpu(cpu) { | ||
183 | iic = &per_cpu(iic, cpu); | ||
184 | nodeid = cpu/2; | ||
185 | |||
186 | for (np = of_find_node_by_type(NULL, "cpu"); | ||
187 | np; | ||
188 | np = of_find_node_by_type(np, "cpu")) { | ||
189 | if (nodeid == *(int *)get_property(np, "node-id", NULL)) | ||
190 | break; | ||
191 | } | ||
192 | |||
193 | if (!np) { | ||
194 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | ||
195 | iic->regs = NULL; | ||
196 | iic->target_id = 0xff; | ||
197 | return -ENODEV; | ||
198 | } | ||
199 | |||
200 | regs = *(long *)get_property(np, "iic", NULL); | ||
201 | |||
202 | /* hack until we have decided on the devtree info */ | ||
203 | regs += 0x400; | ||
204 | if (cpu & 1) | ||
205 | regs += 0x20; | ||
206 | |||
207 | printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); | ||
208 | iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); | ||
209 | iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); | ||
210 | } | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int setup_iic(void) | ||
216 | { | ||
217 | struct device_node *dn; | ||
218 | unsigned long *regs; | ||
219 | char *compatible; | ||
220 | unsigned *np, found = 0; | ||
221 | struct iic *iic = NULL; | ||
222 | |||
223 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | ||
224 | compatible = (char *)get_property(dn, "compatible", NULL); | ||
225 | |||
226 | if (!compatible) { | ||
227 | printk(KERN_WARNING "no compatible property found !\n"); | ||
228 | continue; | ||
229 | } | ||
230 | |||
231 | if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller")) | ||
232 | regs = (unsigned long *)get_property(dn,"reg", NULL); | ||
233 | else | ||
234 | continue; | ||
235 | |||
236 | if (!regs) | ||
237 | printk(KERN_WARNING "IIC: no reg property\n"); | ||
238 | |||
239 | np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL); | ||
240 | |||
241 | if (!np) { | ||
242 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
243 | iic->regs = NULL; | ||
244 | iic->target_id = 0xff; | ||
245 | return -ENODEV; | ||
246 | } | ||
247 | |||
248 | iic = &per_cpu(iic, np[0]); | ||
249 | iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); | ||
250 | iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); | ||
251 | printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); | ||
252 | |||
253 | iic = &per_cpu(iic, np[1]); | ||
254 | iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); | ||
255 | iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); | ||
256 | printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); | ||
257 | |||
258 | found++; | ||
259 | } | ||
260 | |||
261 | if (found) | ||
262 | return 0; | ||
263 | else | ||
264 | return -ENODEV; | ||
265 | } | 104 | } |
266 | 105 | ||
267 | #ifdef CONFIG_SMP | 106 | #ifdef CONFIG_SMP |
@@ -269,12 +108,12 @@ static int setup_iic(void) | |||
269 | /* Use the highest interrupt priorities for IPI */ | 108 | /* Use the highest interrupt priorities for IPI */ |
270 | static inline int iic_ipi_to_irq(int ipi) | 109 | static inline int iic_ipi_to_irq(int ipi) |
271 | { | 110 | { |
272 | return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; | 111 | return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi; |
273 | } | 112 | } |
274 | 113 | ||
275 | static inline int iic_irq_to_ipi(int irq) | 114 | static inline int iic_irq_to_ipi(int irq) |
276 | { | 115 | { |
277 | return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); | 116 | return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0); |
278 | } | 117 | } |
279 | 118 | ||
280 | void iic_setup_cpu(void) | 119 | void iic_setup_cpu(void) |
@@ -293,22 +132,51 @@ u8 iic_get_target_id(int cpu) | |||
293 | } | 132 | } |
294 | EXPORT_SYMBOL_GPL(iic_get_target_id); | 133 | EXPORT_SYMBOL_GPL(iic_get_target_id); |
295 | 134 | ||
135 | struct irq_host *iic_get_irq_host(int node) | ||
136 | { | ||
137 | if (node < 0 || node >= IIC_NODE_COUNT) | ||
138 | return NULL; | ||
139 | return iic_hosts[node]; | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | ||
142 | |||
143 | |||
296 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 144 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
297 | { | 145 | { |
298 | smp_message_recv(iic_irq_to_ipi(irq), regs); | 146 | int ipi = (int)(long)dev_id; |
147 | |||
148 | smp_message_recv(ipi, regs); | ||
149 | |||
299 | return IRQ_HANDLED; | 150 | return IRQ_HANDLED; |
300 | } | 151 | } |
301 | 152 | ||
302 | static void iic_request_ipi(int ipi, const char *name) | 153 | static void iic_request_ipi(int ipi, const char *name) |
303 | { | 154 | { |
304 | int irq; | 155 | int node, virq; |
305 | 156 | ||
306 | irq = iic_ipi_to_irq(ipi); | 157 | for (node = 0; node < IIC_NODE_COUNT; node++) { |
307 | /* IPIs are marked IRQF_DISABLED as they must run with irqs | 158 | char *rname; |
308 | * disabled */ | 159 | if (iic_hosts[node] == NULL) |
309 | get_irq_desc(irq)->chip = &iic_pic; | 160 | continue; |
310 | get_irq_desc(irq)->status |= IRQ_PER_CPU; | 161 | virq = irq_create_mapping(iic_hosts[node], |
311 | request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); | 162 | iic_ipi_to_irq(ipi), 0); |
163 | if (virq == NO_IRQ) { | ||
164 | printk(KERN_ERR | ||
165 | "iic: failed to map IPI %s on node %d\n", | ||
166 | name, node); | ||
167 | continue; | ||
168 | } | ||
169 | rname = kzalloc(strlen(name) + 16, GFP_KERNEL); | ||
170 | if (rname) | ||
171 | sprintf(rname, "%s node %d", name, node); | ||
172 | else | ||
173 | rname = (char *)name; | ||
174 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, | ||
175 | rname, (void *)(long)ipi)) | ||
176 | printk(KERN_ERR | ||
177 | "iic: failed to request IPI %s on node %d\n", | ||
178 | name, node); | ||
179 | } | ||
312 | } | 180 | } |
313 | 181 | ||
314 | void iic_request_IPIs(void) | 182 | void iic_request_IPIs(void) |
@@ -319,34 +187,119 @@ void iic_request_IPIs(void) | |||
319 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | 187 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); |
320 | #endif /* CONFIG_DEBUGGER */ | 188 | #endif /* CONFIG_DEBUGGER */ |
321 | } | 189 | } |
190 | |||
322 | #endif /* CONFIG_SMP */ | 191 | #endif /* CONFIG_SMP */ |
323 | 192 | ||
324 | static void iic_setup_spe_handlers(void) | 193 | |
194 | static int iic_host_match(struct irq_host *h, struct device_node *node) | ||
195 | { | ||
196 | return h->host_data != NULL && node == h->host_data; | ||
197 | } | ||
198 | |||
199 | static int iic_host_map(struct irq_host *h, unsigned int virq, | ||
200 | irq_hw_number_t hw, unsigned int flags) | ||
201 | { | ||
202 | if (hw < IIC_IRQ_IPI0) | ||
203 | set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); | ||
204 | else | ||
205 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int iic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
210 | u32 *intspec, unsigned int intsize, | ||
211 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
212 | |||
213 | { | ||
214 | /* Currently, we don't translate anything. That needs to be fixed as | ||
215 | * we get better defined device-trees. iic interrupts have to be | ||
216 | * explicitely mapped by whoever needs them | ||
217 | */ | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | static struct irq_host_ops iic_host_ops = { | ||
222 | .match = iic_host_match, | ||
223 | .map = iic_host_map, | ||
224 | .xlate = iic_host_xlate, | ||
225 | }; | ||
226 | |||
227 | static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, | ||
228 | struct irq_host *host) | ||
325 | { | 229 | { |
326 | int be, isrc; | 230 | /* XXX FIXME: should locate the linux CPU number from the HW cpu |
231 | * number properly. We are lucky for now | ||
232 | */ | ||
233 | struct iic *iic = &per_cpu(iic, hw_cpu); | ||
234 | |||
235 | iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); | ||
236 | BUG_ON(iic->regs == NULL); | ||
327 | 237 | ||
328 | /* Assume two threads per BE are present */ | 238 | iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); |
329 | for (be=0; be < num_present_cpus() / 2; be++) { | 239 | iic->eoi_stack[0] = 0xff; |
330 | for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { | 240 | iic->host = host; |
331 | int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; | 241 | out_be64(&iic->regs->prio, 0); |
332 | get_irq_desc(irq)->chip = &iic_pic; | 242 | |
243 | printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n", | ||
244 | hw_cpu, addr, iic->regs, iic->target_id); | ||
245 | } | ||
246 | |||
247 | static int __init setup_iic(void) | ||
248 | { | ||
249 | struct device_node *dn; | ||
250 | struct resource r0, r1; | ||
251 | struct irq_host *host; | ||
252 | int found = 0; | ||
253 | u32 *np; | ||
254 | |||
255 | for (dn = NULL; | ||
256 | (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { | ||
257 | if (!device_is_compatible(dn, | ||
258 | "IBM,CBEA-Internal-Interrupt-Controller")) | ||
259 | continue; | ||
260 | np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges", | ||
261 | NULL); | ||
262 | if (np == NULL) { | ||
263 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
264 | of_node_put(dn); | ||
265 | return -ENODEV; | ||
266 | } | ||
267 | if (of_address_to_resource(dn, 0, &r0) || | ||
268 | of_address_to_resource(dn, 1, &r1)) { | ||
269 | printk(KERN_WARNING "IIC: Can't resolve addresses\n"); | ||
270 | of_node_put(dn); | ||
271 | return -ENODEV; | ||
333 | } | 272 | } |
273 | host = NULL; | ||
274 | if (found < IIC_NODE_COUNT) { | ||
275 | host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, | ||
276 | IIC_SOURCE_COUNT, | ||
277 | &iic_host_ops, | ||
278 | IIC_IRQ_INVALID); | ||
279 | iic_hosts[found] = host; | ||
280 | BUG_ON(iic_hosts[found] == NULL); | ||
281 | iic_hosts[found]->host_data = of_node_get(dn); | ||
282 | found++; | ||
283 | } | ||
284 | init_one_iic(np[0], r0.start, host); | ||
285 | init_one_iic(np[1], r1.start, host); | ||
334 | } | 286 | } |
287 | |||
288 | if (found) | ||
289 | return 0; | ||
290 | else | ||
291 | return -ENODEV; | ||
335 | } | 292 | } |
336 | 293 | ||
337 | void iic_init_IRQ(void) | 294 | void __init iic_init_IRQ(void) |
338 | { | 295 | { |
339 | int cpu, irq_offset; | 296 | /* Discover and initialize iics */ |
340 | struct iic *iic; | ||
341 | |||
342 | if (setup_iic() < 0) | 297 | if (setup_iic() < 0) |
343 | setup_iic_hardcoded(); | 298 | panic("IIC: Failed to initialize !\n"); |
344 | 299 | ||
345 | irq_offset = 0; | 300 | /* Set master interrupt handling function */ |
346 | for_each_possible_cpu(cpu) { | 301 | ppc_md.get_irq = iic_get_irq; |
347 | iic = &per_cpu(iic, cpu); | 302 | |
348 | if (iic->regs) | 303 | /* Enable on current CPU */ |
349 | out_be64(&iic->regs->prio, 0xff); | 304 | iic_setup_cpu(); |
350 | } | ||
351 | iic_setup_spe_handlers(); | ||
352 | } | 305 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 799f77d98f96..5560a92ec3ab 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -37,27 +37,24 @@ | |||
37 | */ | 37 | */ |
38 | 38 | ||
39 | enum { | 39 | enum { |
40 | IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ | 40 | IIC_IRQ_INVALID = 0xff, |
41 | IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ | 41 | IIC_IRQ_MAX = 0x3f, |
42 | IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ | 42 | IIC_IRQ_EXT_IOIF0 = 0x20, |
43 | IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ | 43 | IIC_IRQ_EXT_IOIF1 = 0x2b, |
44 | IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ | 44 | IIC_IRQ_IPI0 = 0x40, |
45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ | 45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ |
46 | IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ | 46 | IIC_SOURCE_COUNT = 0x50, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | extern void iic_init_IRQ(void); | 49 | extern void iic_init_IRQ(void); |
50 | extern int iic_get_irq(struct pt_regs *regs); | ||
51 | extern void iic_cause_IPI(int cpu, int mesg); | 50 | extern void iic_cause_IPI(int cpu, int mesg); |
52 | extern void iic_request_IPIs(void); | 51 | extern void iic_request_IPIs(void); |
53 | extern void iic_setup_cpu(void); | 52 | extern void iic_setup_cpu(void); |
54 | extern void iic_local_enable(void); | ||
55 | extern void iic_local_disable(void); | ||
56 | 53 | ||
57 | extern u8 iic_get_target_id(int cpu); | 54 | extern u8 iic_get_target_id(int cpu); |
55 | extern struct irq_host *iic_get_irq_host(int node); | ||
58 | 56 | ||
59 | extern void spider_init_IRQ(void); | 57 | extern void spider_init_IRQ(void); |
60 | extern int spider_get_irq(int node); | ||
61 | 58 | ||
62 | #endif | 59 | #endif |
63 | #endif /* ASM_CELL_PIC_H */ | 60 | #endif /* ASM_CELL_PIC_H */ |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index d8c2a29b3c15..282987d6d4a2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/irq.h> | 49 | #include <asm/irq.h> |
50 | #include <asm/spu.h> | 50 | #include <asm/spu.h> |
51 | #include <asm/spu_priv1.h> | 51 | #include <asm/spu_priv1.h> |
52 | #include <asm/udbg.h> | ||
52 | 53 | ||
53 | #include "interrupt.h" | 54 | #include "interrupt.h" |
54 | #include "iommu.h" | 55 | #include "iommu.h" |
@@ -79,10 +80,22 @@ static void cell_progress(char *s, unsigned short hex) | |||
79 | printk("*** %04x : %s\n", hex, s ? s : ""); | 80 | printk("*** %04x : %s\n", hex, s ? s : ""); |
80 | } | 81 | } |
81 | 82 | ||
83 | static void __init cell_pcibios_fixup(void) | ||
84 | { | ||
85 | struct pci_dev *dev = NULL; | ||
86 | |||
87 | for_each_pci_dev(dev) | ||
88 | pci_read_irq_line(dev); | ||
89 | } | ||
90 | |||
91 | static void __init cell_init_irq(void) | ||
92 | { | ||
93 | iic_init_IRQ(); | ||
94 | spider_init_IRQ(); | ||
95 | } | ||
96 | |||
82 | static void __init cell_setup_arch(void) | 97 | static void __init cell_setup_arch(void) |
83 | { | 98 | { |
84 | ppc_md.init_IRQ = iic_init_IRQ; | ||
85 | ppc_md.get_irq = iic_get_irq; | ||
86 | #ifdef CONFIG_SPU_BASE | 99 | #ifdef CONFIG_SPU_BASE |
87 | spu_priv1_ops = &spu_priv1_mmio_ops; | 100 | spu_priv1_ops = &spu_priv1_mmio_ops; |
88 | #endif | 101 | #endif |
@@ -108,7 +121,6 @@ static void __init cell_setup_arch(void) | |||
108 | /* Find and initialize PCI host bridges */ | 121 | /* Find and initialize PCI host bridges */ |
109 | init_pci_config_tokens(); | 122 | init_pci_config_tokens(); |
110 | find_and_init_phbs(); | 123 | find_and_init_phbs(); |
111 | spider_init_IRQ(); | ||
112 | cbe_pervasive_init(); | 124 | cbe_pervasive_init(); |
113 | #ifdef CONFIG_DUMMY_CONSOLE | 125 | #ifdef CONFIG_DUMMY_CONSOLE |
114 | conswitchp = &dummy_con; | 126 | conswitchp = &dummy_con; |
@@ -126,8 +138,6 @@ static void __init cell_init_early(void) | |||
126 | 138 | ||
127 | cell_init_iommu(); | 139 | cell_init_iommu(); |
128 | 140 | ||
129 | ppc64_interrupt_controller = IC_CELL_PIC; | ||
130 | |||
131 | DBG(" <- cell_init_early()\n"); | 141 | DBG(" <- cell_init_early()\n"); |
132 | } | 142 | } |
133 | 143 | ||
@@ -173,6 +183,8 @@ define_machine(cell) { | |||
173 | .calibrate_decr = generic_calibrate_decr, | 183 | .calibrate_decr = generic_calibrate_decr, |
174 | .check_legacy_ioport = cell_check_legacy_ioport, | 184 | .check_legacy_ioport = cell_check_legacy_ioport, |
175 | .progress = cell_progress, | 185 | .progress = cell_progress, |
186 | .init_IRQ = cell_init_irq, | ||
187 | .pcibios_fixup = cell_pcibios_fixup, | ||
176 | #ifdef CONFIG_KEXEC | 188 | #ifdef CONFIG_KEXEC |
177 | .machine_kexec = default_machine_kexec, | 189 | .machine_kexec = default_machine_kexec, |
178 | .machine_kexec_prepare = default_machine_kexec_prepare, | 190 | .machine_kexec_prepare = default_machine_kexec_prepare, |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 7c3a0b6d34fd..ae7ef88f1a37 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/ioport.h> | ||
25 | 26 | ||
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
27 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
@@ -56,184 +57,313 @@ enum { | |||
56 | REISWAITEN = 0x508, /* Reissue Wait Control*/ | 57 | REISWAITEN = 0x508, /* Reissue Wait Control*/ |
57 | }; | 58 | }; |
58 | 59 | ||
59 | static void __iomem *spider_pics[4]; | 60 | #define SPIDER_CHIP_COUNT 4 |
61 | #define SPIDER_SRC_COUNT 64 | ||
62 | #define SPIDER_IRQ_INVALID 63 | ||
60 | 63 | ||
61 | static void __iomem *spider_get_pic(int irq) | 64 | struct spider_pic { |
62 | { | 65 | struct irq_host *host; |
63 | int node = irq / IIC_NODE_STRIDE; | 66 | struct device_node *of_node; |
64 | irq %= IIC_NODE_STRIDE; | 67 | void __iomem *regs; |
65 | 68 | unsigned int node_id; | |
66 | if (irq >= IIC_EXT_OFFSET && | 69 | }; |
67 | irq < IIC_EXT_OFFSET + IIC_NUM_EXT && | 70 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; |
68 | spider_pics) | ||
69 | return spider_pics[node]; | ||
70 | return NULL; | ||
71 | } | ||
72 | 71 | ||
73 | static int spider_get_nr(unsigned int irq) | 72 | static struct spider_pic *spider_virq_to_pic(unsigned int virq) |
74 | { | 73 | { |
75 | return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; | 74 | return irq_map[virq].host->host_data; |
76 | } | 75 | } |
77 | 76 | ||
78 | static void __iomem *spider_get_irq_config(int irq) | 77 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, |
78 | unsigned int src) | ||
79 | { | 79 | { |
80 | void __iomem *pic; | 80 | return pic->regs + TIR_CFGA + 8 * src; |
81 | pic = spider_get_pic(irq); | ||
82 | return pic + TIR_CFGA + 8 * spider_get_nr(irq); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | static void spider_enable_irq(unsigned int irq) | 83 | static void spider_unmask_irq(unsigned int virq) |
86 | { | 84 | { |
87 | int nodeid = (irq / IIC_NODE_STRIDE) * 0x10; | 85 | struct spider_pic *pic = spider_virq_to_pic(virq); |
88 | void __iomem *cfg = spider_get_irq_config(irq); | 86 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
89 | irq = spider_get_nr(irq); | ||
90 | 87 | ||
91 | out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid); | 88 | /* We use no locking as we should be covered by the descriptor lock |
92 | out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); | 89 | * for access to invidual source configuration registers |
90 | */ | ||
91 | out_be32(cfg, in_be32(cfg) | 0x30000000u); | ||
93 | } | 92 | } |
94 | 93 | ||
95 | static void spider_disable_irq(unsigned int irq) | 94 | static void spider_mask_irq(unsigned int virq) |
96 | { | 95 | { |
97 | void __iomem *cfg = spider_get_irq_config(irq); | 96 | struct spider_pic *pic = spider_virq_to_pic(virq); |
98 | irq = spider_get_nr(irq); | 97 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
99 | 98 | ||
99 | /* We use no locking as we should be covered by the descriptor lock | ||
100 | * for access to invidual source configuration registers | ||
101 | */ | ||
100 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | 102 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); |
101 | } | 103 | } |
102 | 104 | ||
103 | static unsigned int spider_startup_irq(unsigned int irq) | 105 | static void spider_ack_irq(unsigned int virq) |
104 | { | 106 | { |
105 | spider_enable_irq(irq); | 107 | struct spider_pic *pic = spider_virq_to_pic(virq); |
106 | return 0; | 108 | unsigned int src = irq_map[virq].hwirq; |
107 | } | ||
108 | 109 | ||
109 | static void spider_shutdown_irq(unsigned int irq) | 110 | /* Reset edge detection logic if necessary |
110 | { | 111 | */ |
111 | spider_disable_irq(irq); | 112 | if (get_irq_desc(virq)->status & IRQ_LEVEL) |
112 | } | 113 | return; |
113 | 114 | ||
114 | static void spider_end_irq(unsigned int irq) | 115 | /* Only interrupts 47 to 50 can be set to edge */ |
115 | { | 116 | if (src < 47 || src > 50) |
116 | spider_enable_irq(irq); | 117 | return; |
117 | } | ||
118 | 118 | ||
119 | static void spider_ack_irq(unsigned int irq) | 119 | /* Perform the clear of the edge logic */ |
120 | { | 120 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); |
121 | spider_disable_irq(irq); | ||
122 | iic_local_enable(); | ||
123 | } | 121 | } |
124 | 122 | ||
125 | static struct hw_interrupt_type spider_pic = { | 123 | static struct irq_chip spider_pic = { |
126 | .typename = " SPIDER ", | 124 | .typename = " SPIDER ", |
127 | .startup = spider_startup_irq, | 125 | .unmask = spider_unmask_irq, |
128 | .shutdown = spider_shutdown_irq, | 126 | .mask = spider_mask_irq, |
129 | .enable = spider_enable_irq, | ||
130 | .disable = spider_disable_irq, | ||
131 | .ack = spider_ack_irq, | 127 | .ack = spider_ack_irq, |
132 | .end = spider_end_irq, | ||
133 | }; | 128 | }; |
134 | 129 | ||
135 | int spider_get_irq(int node) | 130 | static int spider_host_match(struct irq_host *h, struct device_node *node) |
136 | { | 131 | { |
137 | unsigned long cs; | 132 | struct spider_pic *pic = h->host_data; |
138 | void __iomem *regs = spider_pics[node]; | 133 | return node == pic->of_node; |
139 | |||
140 | cs = in_be32(regs + TIR_CS) >> 24; | ||
141 | |||
142 | if (cs == 63) | ||
143 | return -1; | ||
144 | else | ||
145 | return cs; | ||
146 | } | 134 | } |
147 | 135 | ||
148 | /* hardcoded part to be compatible with older firmware */ | 136 | static int spider_host_map(struct irq_host *h, unsigned int virq, |
149 | 137 | irq_hw_number_t hw, unsigned int flags) | |
150 | void spider_init_IRQ_hardcoded(void) | ||
151 | { | 138 | { |
152 | int node; | 139 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; |
153 | long spiderpic; | 140 | struct spider_pic *pic = h->host_data; |
154 | long pics[] = { 0x24000008000, 0x34000008000 }; | 141 | void __iomem *cfg = spider_get_irq_config(pic, hw); |
155 | int n; | 142 | int level = 0; |
156 | 143 | u32 ic; | |
157 | pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); | 144 | |
158 | 145 | /* Note that only level high is supported for most interrupts */ | |
159 | for (node = 0; node < num_present_cpus()/2; node++) { | 146 | if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && |
160 | spiderpic = pics[node]; | 147 | (hw < 47 || hw > 50)) |
161 | printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic); | 148 | return -EINVAL; |
162 | spider_pics[node] = ioremap(spiderpic, 0x800); | 149 | |
163 | for (n = 0; n < IIC_NUM_EXT; n++) { | 150 | /* Decode sense type */ |
164 | int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | 151 | switch(sense) { |
165 | get_irq_desc(irq)->chip = &spider_pic; | 152 | case IRQ_TYPE_EDGE_RISING: |
166 | } | 153 | ic = 0x3; |
167 | 154 | break; | |
168 | /* do not mask any interrupts because of level */ | 155 | case IRQ_TYPE_EDGE_FALLING: |
169 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | 156 | ic = 0x2; |
170 | 157 | break; | |
171 | /* disable edge detection clear */ | 158 | case IRQ_TYPE_LEVEL_LOW: |
172 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | 159 | ic = 0x0; |
173 | 160 | level = 1; | |
174 | /* enable interrupt packets to be output */ | 161 | break; |
175 | out_be32(spider_pics[node] + TIR_PIEN, | 162 | case IRQ_TYPE_LEVEL_HIGH: |
176 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | 163 | case IRQ_TYPE_NONE: |
177 | 164 | ic = 0x1; | |
178 | /* Enable the interrupt detection enable bit. Do this last! */ | 165 | level = 1; |
179 | out_be32(spider_pics[node] + TIR_DEN, | 166 | break; |
180 | in_be32(spider_pics[node] + TIR_DEN) | 0x1); | 167 | default: |
168 | return -EINVAL; | ||
181 | } | 169 | } |
182 | } | ||
183 | 170 | ||
184 | void spider_init_IRQ(void) | 171 | /* Configure the source. One gross hack that was there before and |
185 | { | 172 | * that I've kept around is the priority to the BE which I set to |
186 | long spider_reg; | 173 | * be the same as the interrupt source number. I don't know wether |
187 | struct device_node *dn; | 174 | * that's supposed to make any kind of sense however, we'll have to |
188 | char *compatible; | 175 | * decide that, but for now, I'm not changing the behaviour. |
189 | int n, node = 0; | 176 | */ |
177 | out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe); | ||
178 | out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); | ||
179 | |||
180 | if (level) | ||
181 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
182 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); | ||
183 | return 0; | ||
184 | } | ||
190 | 185 | ||
191 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | 186 | static int spider_host_xlate(struct irq_host *h, struct device_node *ct, |
192 | compatible = (char *)get_property(dn, "compatible", NULL); | 187 | u32 *intspec, unsigned int intsize, |
188 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
193 | 189 | ||
194 | if (!compatible) | 190 | { |
195 | continue; | 191 | /* Spider interrupts have 2 cells, first is the interrupt source, |
192 | * second, well, I don't know for sure yet ... We mask the top bits | ||
193 | * because old device-trees encode a node number in there | ||
194 | */ | ||
195 | *out_hwirq = intspec[0] & 0x3f; | ||
196 | *out_flags = IRQ_TYPE_LEVEL_HIGH; | ||
197 | return 0; | ||
198 | } | ||
196 | 199 | ||
197 | if (strstr(compatible, "CBEA,platform-spider-pic")) | 200 | static struct irq_host_ops spider_host_ops = { |
198 | spider_reg = *(long *)get_property(dn,"reg", NULL); | 201 | .match = spider_host_match, |
199 | else if (strstr(compatible, "sti,platform-spider-pic")) { | 202 | .map = spider_host_map, |
200 | spider_init_IRQ_hardcoded(); | 203 | .xlate = spider_host_xlate, |
201 | return; | 204 | }; |
202 | } else | ||
203 | continue; | ||
204 | 205 | ||
205 | if (!spider_reg) | 206 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, |
206 | printk("interrupt controller does not have reg property !\n"); | 207 | struct pt_regs *regs) |
208 | { | ||
209 | struct spider_pic *pic = desc->handler_data; | ||
210 | unsigned int cs, virq; | ||
207 | 211 | ||
208 | n = prom_n_addr_cells(dn); | 212 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
213 | if (cs == SPIDER_IRQ_INVALID) | ||
214 | virq = NO_IRQ; | ||
215 | else | ||
216 | virq = irq_linear_revmap(pic->host, cs); | ||
217 | if (virq != NO_IRQ) | ||
218 | generic_handle_irq(virq, regs); | ||
219 | desc->chip->eoi(irq); | ||
220 | } | ||
209 | 221 | ||
210 | if ( n != 2) | 222 | /* For hooking up the cascace we have a problem. Our device-tree is |
211 | printk("reg property with invalid number of elements \n"); | 223 | * crap and we don't know on which BE iic interrupt we are hooked on at |
224 | * least not the "standard" way. We can reconstitute it based on two | ||
225 | * informations though: which BE node we are connected to and wether | ||
226 | * we are connected to IOIF0 or IOIF1. Right now, we really only care | ||
227 | * about the IBM cell blade and we know that its firmware gives us an | ||
228 | * interrupt-map property which is pretty strange. | ||
229 | */ | ||
230 | static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | ||
231 | { | ||
232 | unsigned int virq; | ||
233 | u32 *imap, *tmp; | ||
234 | int imaplen, intsize, unit; | ||
235 | struct device_node *iic; | ||
236 | struct irq_host *iic_host; | ||
237 | |||
238 | #if 0 /* Enable that when we have a way to retreive the node as well */ | ||
239 | /* First, we check wether we have a real "interrupts" in the device | ||
240 | * tree in case the device-tree is ever fixed | ||
241 | */ | ||
242 | struct of_irq oirq; | ||
243 | if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { | ||
244 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
245 | oirq.size); | ||
246 | goto bail; | ||
247 | } | ||
248 | #endif | ||
249 | |||
250 | /* Now do the horrible hacks */ | ||
251 | tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL); | ||
252 | if (tmp == NULL) | ||
253 | return NO_IRQ; | ||
254 | intsize = *tmp; | ||
255 | imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen); | ||
256 | if (imap == NULL || imaplen < (intsize + 1)) | ||
257 | return NO_IRQ; | ||
258 | iic = of_find_node_by_phandle(imap[intsize]); | ||
259 | if (iic == NULL) | ||
260 | return NO_IRQ; | ||
261 | imap += intsize + 1; | ||
262 | tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL); | ||
263 | if (tmp == NULL) | ||
264 | return NO_IRQ; | ||
265 | intsize = *tmp; | ||
266 | /* Assume unit is last entry of interrupt specifier */ | ||
267 | unit = imap[intsize - 1]; | ||
268 | /* Ok, we have a unit, now let's try to get the node */ | ||
269 | tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL); | ||
270 | if (tmp == NULL) { | ||
271 | of_node_put(iic); | ||
272 | return NO_IRQ; | ||
273 | } | ||
274 | /* ugly as hell but works for now */ | ||
275 | pic->node_id = (*tmp) >> 1; | ||
276 | of_node_put(iic); | ||
277 | |||
278 | /* Ok, now let's get cracking. You may ask me why I just didn't match | ||
279 | * the iic host from the iic OF node, but that way I'm still compatible | ||
280 | * with really really old old firmwares for which we don't have a node | ||
281 | */ | ||
282 | iic_host = iic_get_irq_host(pic->node_id); | ||
283 | if (iic_host == NULL) | ||
284 | return NO_IRQ; | ||
285 | /* Manufacture an IIC interrupt number of class 2 */ | ||
286 | virq = irq_create_mapping(iic_host, 0x20 | unit, 0); | ||
287 | if (virq == NO_IRQ) | ||
288 | printk(KERN_ERR "spider_pic: failed to map cascade !"); | ||
289 | return virq; | ||
290 | } | ||
212 | 291 | ||
213 | spider_pics[node] = ioremap(spider_reg, 0x800); | ||
214 | 292 | ||
215 | printk("SPIDER addr: %lx with %i addr_cells mapped to %p\n", | 293 | static void __init spider_init_one(struct device_node *of_node, int chip, |
216 | spider_reg, n, spider_pics[node]); | 294 | unsigned long addr) |
295 | { | ||
296 | struct spider_pic *pic = &spider_pics[chip]; | ||
297 | int i, virq; | ||
298 | |||
299 | /* Map registers */ | ||
300 | pic->regs = ioremap(addr, 0x1000); | ||
301 | if (pic->regs == NULL) | ||
302 | panic("spider_pic: can't map registers !"); | ||
303 | |||
304 | /* Allocate a host */ | ||
305 | pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT, | ||
306 | &spider_host_ops, SPIDER_IRQ_INVALID); | ||
307 | if (pic->host == NULL) | ||
308 | panic("spider_pic: can't allocate irq host !"); | ||
309 | pic->host->host_data = pic; | ||
310 | |||
311 | /* Fill out other bits */ | ||
312 | pic->of_node = of_node_get(of_node); | ||
313 | |||
314 | /* Go through all sources and disable them */ | ||
315 | for (i = 0; i < SPIDER_SRC_COUNT; i++) { | ||
316 | void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; | ||
317 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | ||
318 | } | ||
217 | 319 | ||
218 | for (n = 0; n < IIC_NUM_EXT; n++) { | 320 | /* do not mask any interrupts because of level */ |
219 | int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | 321 | out_be32(pic->regs + TIR_MSK, 0x0); |
220 | get_irq_desc(irq)->chip = &spider_pic; | ||
221 | } | ||
222 | 322 | ||
223 | /* do not mask any interrupts because of level */ | 323 | /* enable interrupt packets to be output */ |
224 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | 324 | out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); |
225 | 325 | ||
226 | /* disable edge detection clear */ | 326 | /* Hook up the cascade interrupt to the iic and nodeid */ |
227 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | 327 | virq = spider_find_cascade_and_node(pic); |
328 | if (virq == NO_IRQ) | ||
329 | return; | ||
330 | set_irq_data(virq, pic); | ||
331 | set_irq_chained_handler(virq, spider_irq_cascade); | ||
228 | 332 | ||
229 | /* enable interrupt packets to be output */ | 333 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", |
230 | out_be32(spider_pics[node] + TIR_PIEN, | 334 | pic->node_id, addr, of_node->full_name); |
231 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | ||
232 | 335 | ||
233 | /* Enable the interrupt detection enable bit. Do this last! */ | 336 | /* Enable the interrupt detection enable bit. Do this last! */ |
234 | out_be32(spider_pics[node] + TIR_DEN, | 337 | out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); |
235 | in_be32(spider_pics[node] + TIR_DEN) | 0x1); | 338 | } |
236 | 339 | ||
237 | node++; | 340 | void __init spider_init_IRQ(void) |
341 | { | ||
342 | struct resource r; | ||
343 | struct device_node *dn; | ||
344 | int chip = 0; | ||
345 | |||
346 | /* XXX node numbers are totally bogus. We _hope_ we get the device | ||
347 | * nodes in the right order here but that's definitely not guaranteed, | ||
348 | * we need to get the node from the device tree instead. | ||
349 | * There is currently no proper property for it (but our whole | ||
350 | * device-tree is bogus anyway) so all we can do is pray or maybe test | ||
351 | * the address and deduce the node-id | ||
352 | */ | ||
353 | for (dn = NULL; | ||
354 | (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | ||
355 | if (device_is_compatible(dn, "CBEA,platform-spider-pic")) { | ||
356 | if (of_address_to_resource(dn, 0, &r)) { | ||
357 | printk(KERN_WARNING "spider-pic: Failed\n"); | ||
358 | continue; | ||
359 | } | ||
360 | } else if (device_is_compatible(dn, "sti,platform-spider-pic") | ||
361 | && (chip < 2)) { | ||
362 | static long hard_coded_pics[] = | ||
363 | { 0x24000008000, 0x34000008000 }; | ||
364 | r.start = hard_coded_pics[chip]; | ||
365 | } else | ||
366 | continue; | ||
367 | spider_init_one(dn, chip++, r.start); | ||
238 | } | 368 | } |
239 | } | 369 | } |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 656c1ef5f4ad..5d2313a6c82b 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -264,51 +264,57 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
264 | return stat ? IRQ_HANDLED : IRQ_NONE; | 264 | return stat ? IRQ_HANDLED : IRQ_NONE; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int | 267 | static int spu_request_irqs(struct spu *spu) |
268 | spu_request_irqs(struct spu *spu) | ||
269 | { | 268 | { |
270 | int ret; | 269 | int ret = 0; |
271 | int irq_base; | ||
272 | |||
273 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | ||
274 | |||
275 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | ||
276 | ret = request_irq(irq_base + spu->isrc, | ||
277 | spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu); | ||
278 | if (ret) | ||
279 | goto out; | ||
280 | |||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | ||
282 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | ||
283 | spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu); | ||
284 | if (ret) | ||
285 | goto out1; | ||
286 | 270 | ||
287 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | 271 | if (spu->irqs[0] != NO_IRQ) { |
288 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | 272 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
289 | spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu); | 273 | spu->number); |
290 | if (ret) | 274 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
291 | goto out2; | 275 | IRQF_DISABLED, |
292 | goto out; | 276 | spu->irq_c0, spu); |
277 | if (ret) | ||
278 | goto bail0; | ||
279 | } | ||
280 | if (spu->irqs[1] != NO_IRQ) { | ||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | ||
282 | spu->number); | ||
283 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | ||
284 | IRQF_DISABLED, | ||
285 | spu->irq_c1, spu); | ||
286 | if (ret) | ||
287 | goto bail1; | ||
288 | } | ||
289 | if (spu->irqs[2] != NO_IRQ) { | ||
290 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | ||
291 | spu->number); | ||
292 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | ||
293 | IRQF_DISABLED, | ||
294 | spu->irq_c2, spu); | ||
295 | if (ret) | ||
296 | goto bail2; | ||
297 | } | ||
298 | return 0; | ||
293 | 299 | ||
294 | out2: | 300 | bail2: |
295 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 301 | if (spu->irqs[1] != NO_IRQ) |
296 | out1: | 302 | free_irq(spu->irqs[1], spu); |
297 | free_irq(irq_base + spu->isrc, spu); | 303 | bail1: |
298 | out: | 304 | if (spu->irqs[0] != NO_IRQ) |
305 | free_irq(spu->irqs[0], spu); | ||
306 | bail0: | ||
299 | return ret; | 307 | return ret; |
300 | } | 308 | } |
301 | 309 | ||
302 | static void | 310 | static void spu_free_irqs(struct spu *spu) |
303 | spu_free_irqs(struct spu *spu) | ||
304 | { | 311 | { |
305 | int irq_base; | 312 | if (spu->irqs[0] != NO_IRQ) |
306 | 313 | free_irq(spu->irqs[0], spu); | |
307 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | 314 | if (spu->irqs[1] != NO_IRQ) |
308 | 315 | free_irq(spu->irqs[1], spu); | |
309 | free_irq(irq_base + spu->isrc, spu); | 316 | if (spu->irqs[2] != NO_IRQ) |
310 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 317 | free_irq(spu->irqs[2], spu); |
311 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | ||
312 | } | 318 | } |
313 | 319 | ||
314 | static LIST_HEAD(spu_list); | 320 | static LIST_HEAD(spu_list); |
@@ -559,17 +565,38 @@ static void spu_unmap(struct spu *spu) | |||
559 | iounmap((u8 __iomem *)spu->local_store); | 565 | iounmap((u8 __iomem *)spu->local_store); |
560 | } | 566 | } |
561 | 567 | ||
568 | /* This function shall be abstracted for HV platforms */ | ||
569 | static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) | ||
570 | { | ||
571 | struct irq_host *host; | ||
572 | unsigned int isrc; | ||
573 | u32 *tmp; | ||
574 | |||
575 | host = iic_get_irq_host(spu->node); | ||
576 | if (host == NULL) | ||
577 | return -ENODEV; | ||
578 | |||
579 | /* Get the interrupt source from the device-tree */ | ||
580 | tmp = (u32 *)get_property(np, "isrc", NULL); | ||
581 | if (!tmp) | ||
582 | return -ENODEV; | ||
583 | spu->isrc = isrc = tmp[0]; | ||
584 | |||
585 | /* Now map interrupts of all 3 classes */ | ||
586 | spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0); | ||
587 | spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0); | ||
588 | spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0); | ||
589 | |||
590 | /* Right now, we only fail if class 2 failed */ | ||
591 | return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; | ||
592 | } | ||
593 | |||
562 | static int __init spu_map_device(struct spu *spu, struct device_node *node) | 594 | static int __init spu_map_device(struct spu *spu, struct device_node *node) |
563 | { | 595 | { |
564 | char *prop; | 596 | char *prop; |
565 | int ret; | 597 | int ret; |
566 | 598 | ||
567 | ret = -ENODEV; | 599 | ret = -ENODEV; |
568 | prop = get_property(node, "isrc", NULL); | ||
569 | if (!prop) | ||
570 | goto out; | ||
571 | spu->isrc = *(unsigned int *)prop; | ||
572 | |||
573 | spu->name = get_property(node, "name", NULL); | 600 | spu->name = get_property(node, "name", NULL); |
574 | if (!spu->name) | 601 | if (!spu->name) |
575 | goto out; | 602 | goto out; |
@@ -636,7 +663,8 @@ static int spu_create_sysdev(struct spu *spu) | |||
636 | return ret; | 663 | return ret; |
637 | } | 664 | } |
638 | 665 | ||
639 | sysdev_create_file(&spu->sysdev, &attr_isrc); | 666 | if (spu->isrc != 0) |
667 | sysdev_create_file(&spu->sysdev, &attr_isrc); | ||
640 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); | 668 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); |
641 | 669 | ||
642 | return 0; | 670 | return 0; |
@@ -668,6 +696,9 @@ static int __init create_spu(struct device_node *spe) | |||
668 | spu->nid = of_node_to_nid(spe); | 696 | spu->nid = of_node_to_nid(spe); |
669 | if (spu->nid == -1) | 697 | if (spu->nid == -1) |
670 | spu->nid = 0; | 698 | spu->nid = 0; |
699 | ret = spu_map_interrupts(spu, spe); | ||
700 | if (ret) | ||
701 | goto out_unmap; | ||
671 | spin_lock_init(&spu->register_lock); | 702 | spin_lock_init(&spu->register_lock); |
672 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); | 703 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
673 | spu_mfc_sr1_set(spu, 0x33); | 704 | spu_mfc_sr1_set(spu, 0x33); |
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 66c253498803..6802cdc3168a 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/sections.h> | 19 | #include <asm/sections.h> |
20 | #include <asm/pci-bridge.h> | 20 | #include <asm/pci-bridge.h> |
21 | #include <asm/open_pic.h> | ||
22 | #include <asm/grackle.h> | 21 | #include <asm/grackle.h> |
23 | #include <asm/rtas.h> | 22 | #include <asm/rtas.h> |
24 | 23 | ||
@@ -161,15 +160,9 @@ void __init | |||
161 | chrp_pcibios_fixup(void) | 160 | chrp_pcibios_fixup(void) |
162 | { | 161 | { |
163 | struct pci_dev *dev = NULL; | 162 | struct pci_dev *dev = NULL; |
164 | struct device_node *np; | ||
165 | 163 | ||
166 | /* PCI interrupts are controlled by the OpenPIC */ | 164 | for_each_pci_dev(dev) |
167 | for_each_pci_dev(dev) { | 165 | pci_read_irq_line(dev); |
168 | np = pci_device_to_OF_node(dev); | ||
169 | if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0)) | ||
170 | dev->irq = np->intrs[0].line; | ||
171 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | ||
172 | } | ||
173 | } | 166 | } |
174 | 167 | ||
175 | #define PRG_CL_RESET_VALID 0x00010000 | 168 | #define PRG_CL_RESET_VALID 0x00010000 |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 1f1771b212b4..bb10171132fa 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -59,7 +59,7 @@ void rtas_indicator_progress(char *, unsigned short); | |||
59 | int _chrp_type; | 59 | int _chrp_type; |
60 | EXPORT_SYMBOL(_chrp_type); | 60 | EXPORT_SYMBOL(_chrp_type); |
61 | 61 | ||
62 | struct mpic *chrp_mpic; | 62 | static struct mpic *chrp_mpic; |
63 | 63 | ||
64 | /* Used for doing CHRP event-scans */ | 64 | /* Used for doing CHRP event-scans */ |
65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); | 65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); |
@@ -315,24 +315,32 @@ chrp_event_scan(unsigned long unused) | |||
315 | jiffies + event_scan_interval); | 315 | jiffies + event_scan_interval); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, | ||
319 | struct pt_regs *regs) | ||
320 | { | ||
321 | unsigned int cascade_irq = i8259_irq(regs); | ||
322 | if (cascade_irq != NO_IRQ) | ||
323 | generic_handle_irq(cascade_irq, regs); | ||
324 | desc->chip->eoi(irq); | ||
325 | } | ||
326 | |||
318 | /* | 327 | /* |
319 | * Finds the open-pic node and sets up the mpic driver. | 328 | * Finds the open-pic node and sets up the mpic driver. |
320 | */ | 329 | */ |
321 | static void __init chrp_find_openpic(void) | 330 | static void __init chrp_find_openpic(void) |
322 | { | 331 | { |
323 | struct device_node *np, *root; | 332 | struct device_node *np, *root; |
324 | int len, i, j, irq_count; | 333 | int len, i, j; |
325 | int isu_size, idu_size; | 334 | int isu_size, idu_size; |
326 | unsigned int *iranges, *opprop = NULL; | 335 | unsigned int *iranges, *opprop = NULL; |
327 | int oplen = 0; | 336 | int oplen = 0; |
328 | unsigned long opaddr; | 337 | unsigned long opaddr; |
329 | int na = 1; | 338 | int na = 1; |
330 | unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS]; | ||
331 | 339 | ||
332 | np = find_type_devices("open-pic"); | 340 | np = of_find_node_by_type(NULL, "open-pic"); |
333 | if (np == NULL) | 341 | if (np == NULL) |
334 | return; | 342 | return; |
335 | root = find_path_device("/"); | 343 | root = of_find_node_by_path("/"); |
336 | if (root) { | 344 | if (root) { |
337 | opprop = (unsigned int *) get_property | 345 | opprop = (unsigned int *) get_property |
338 | (root, "platform-open-pic", &oplen); | 346 | (root, "platform-open-pic", &oplen); |
@@ -343,19 +351,15 @@ static void __init chrp_find_openpic(void) | |||
343 | oplen /= na * sizeof(unsigned int); | 351 | oplen /= na * sizeof(unsigned int); |
344 | } else { | 352 | } else { |
345 | struct resource r; | 353 | struct resource r; |
346 | if (of_address_to_resource(np, 0, &r)) | 354 | if (of_address_to_resource(np, 0, &r)) { |
347 | return; | 355 | goto bail; |
356 | } | ||
348 | opaddr = r.start; | 357 | opaddr = r.start; |
349 | oplen = 0; | 358 | oplen = 0; |
350 | } | 359 | } |
351 | 360 | ||
352 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); | 361 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); |
353 | 362 | ||
354 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | ||
355 | prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4); | ||
356 | /* i8259 cascade is always positive level */ | ||
357 | init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE; | ||
358 | |||
359 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); | 363 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); |
360 | if (iranges == NULL) | 364 | if (iranges == NULL) |
361 | len = 0; /* non-distributed mpic */ | 365 | len = 0; /* non-distributed mpic */ |
@@ -382,15 +386,12 @@ static void __init chrp_find_openpic(void) | |||
382 | if (len > 1) | 386 | if (len > 1) |
383 | isu_size = iranges[3]; | 387 | isu_size = iranges[3]; |
384 | 388 | ||
385 | chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY, | 389 | chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY, |
386 | isu_size, NUM_ISA_INTERRUPTS, irq_count, | 390 | isu_size, 0, " MPIC "); |
387 | NR_IRQS - 4, init_senses, irq_count, | ||
388 | " MPIC "); | ||
389 | if (chrp_mpic == NULL) { | 391 | if (chrp_mpic == NULL) { |
390 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); | 392 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); |
391 | return; | 393 | goto bail; |
392 | } | 394 | } |
393 | |||
394 | j = na - 1; | 395 | j = na - 1; |
395 | for (i = 1; i < len; ++i) { | 396 | for (i = 1; i < len; ++i) { |
396 | iranges += 2; | 397 | iranges += 2; |
@@ -402,7 +403,10 @@ static void __init chrp_find_openpic(void) | |||
402 | } | 403 | } |
403 | 404 | ||
404 | mpic_init(chrp_mpic); | 405 | mpic_init(chrp_mpic); |
405 | mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL); | 406 | ppc_md.get_irq = mpic_get_irq; |
407 | bail: | ||
408 | of_node_put(root); | ||
409 | of_node_put(np); | ||
406 | } | 410 | } |
407 | 411 | ||
408 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 412 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) |
@@ -413,14 +417,34 @@ static struct irqaction xmon_irqaction = { | |||
413 | }; | 417 | }; |
414 | #endif | 418 | #endif |
415 | 419 | ||
416 | void __init chrp_init_IRQ(void) | 420 | static void __init chrp_find_8259(void) |
417 | { | 421 | { |
418 | struct device_node *np; | 422 | struct device_node *np, *pic = NULL; |
419 | unsigned long chrp_int_ack = 0; | 423 | unsigned long chrp_int_ack = 0; |
420 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 424 | unsigned int cascade_irq; |
421 | struct device_node *kbd; | 425 | |
422 | #endif | 426 | /* Look for cascade */ |
427 | for_each_node_by_type(np, "interrupt-controller") | ||
428 | if (device_is_compatible(np, "chrp,iic")) { | ||
429 | pic = np; | ||
430 | break; | ||
431 | } | ||
432 | /* Ok, 8259 wasn't found. We need to handle the case where | ||
433 | * we have a pegasos that claims to be chrp but doesn't have | ||
434 | * a proper interrupt tree | ||
435 | */ | ||
436 | if (pic == NULL && chrp_mpic != NULL) { | ||
437 | printk(KERN_ERR "i8259: Not found in device-tree" | ||
438 | " assuming no legacy interrupts\n"); | ||
439 | return; | ||
440 | } | ||
423 | 441 | ||
442 | /* Look for intack. In a perfect world, we would look for it on | ||
443 | * the ISA bus that holds the 8259 but heh... Works that way. If | ||
444 | * we ever see a problem, we can try to re-use the pSeries code here. | ||
445 | * Also, Pegasos-type platforms don't have a proper node to start | ||
446 | * from anyway | ||
447 | */ | ||
424 | for (np = find_devices("pci"); np != NULL; np = np->next) { | 448 | for (np = find_devices("pci"); np != NULL; np = np->next) { |
425 | unsigned int *addrp = (unsigned int *) | 449 | unsigned int *addrp = (unsigned int *) |
426 | get_property(np, "8259-interrupt-acknowledge", NULL); | 450 | get_property(np, "8259-interrupt-acknowledge", NULL); |
@@ -431,11 +455,29 @@ void __init chrp_init_IRQ(void) | |||
431 | break; | 455 | break; |
432 | } | 456 | } |
433 | if (np == NULL) | 457 | if (np == NULL) |
434 | printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n"); | 458 | printk(KERN_WARNING "Cannot find PCI interrupt acknowledge" |
459 | " address, polling\n"); | ||
460 | |||
461 | i8259_init(pic, chrp_int_ack); | ||
462 | if (ppc_md.get_irq == NULL) | ||
463 | ppc_md.get_irq = i8259_irq; | ||
464 | if (chrp_mpic != NULL) { | ||
465 | cascade_irq = irq_of_parse_and_map(pic, 0); | ||
466 | if (cascade_irq == NO_IRQ) | ||
467 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | ||
468 | else | ||
469 | set_irq_chained_handler(cascade_irq, | ||
470 | chrp_8259_cascade); | ||
471 | } | ||
472 | } | ||
435 | 473 | ||
474 | void __init chrp_init_IRQ(void) | ||
475 | { | ||
476 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | ||
477 | struct device_node *kbd; | ||
478 | #endif | ||
436 | chrp_find_openpic(); | 479 | chrp_find_openpic(); |
437 | 480 | chrp_find_8259(); | |
438 | i8259_init(chrp_int_ack, 0); | ||
439 | 481 | ||
440 | if (_chrp_type == _CHRP_Pegasos) | 482 | if (_chrp_type == _CHRP_Pegasos) |
441 | ppc_md.get_irq = i8259_irq; | 483 | ppc_md.get_irq = i8259_irq; |
@@ -520,10 +562,6 @@ static int __init chrp_probe(void) | |||
520 | DMA_MODE_READ = 0x44; | 562 | DMA_MODE_READ = 0x44; |
521 | DMA_MODE_WRITE = 0x48; | 563 | DMA_MODE_WRITE = 0x48; |
522 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ | 564 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ |
523 | ppc_do_canonicalize_irqs = 1; | ||
524 | |||
525 | /* Assume we have an 8259... */ | ||
526 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
527 | 565 | ||
528 | return 1; | 566 | return 1; |
529 | } | 567 | } |
@@ -535,7 +573,6 @@ define_machine(chrp) { | |||
535 | .init = chrp_init2, | 573 | .init = chrp_init2, |
536 | .show_cpuinfo = chrp_show_cpuinfo, | 574 | .show_cpuinfo = chrp_show_cpuinfo, |
537 | .init_IRQ = chrp_init_IRQ, | 575 | .init_IRQ = chrp_init_IRQ, |
538 | .get_irq = mpic_get_irq, | ||
539 | .pcibios_fixup = chrp_pcibios_fixup, | 576 | .pcibios_fixup = chrp_pcibios_fixup, |
540 | .restart = rtas_restart, | 577 | .restart = rtas_restart, |
541 | .power_off = rtas_power_off, | 578 | .power_off = rtas_power_off, |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index c298ca1ea680..1d2307e87c30 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <asm/smp.h> | 29 | #include <asm/smp.h> |
30 | #include <asm/residual.h> | 30 | #include <asm/residual.h> |
31 | #include <asm/time.h> | 31 | #include <asm/time.h> |
32 | #include <asm/open_pic.h> | ||
33 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
34 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
35 | #include <asm/mpic.h> | 34 | #include <asm/mpic.h> |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index f70e820e7304..2275e64f3152 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -162,27 +162,6 @@ static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs) | |||
162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); | 162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); |
163 | } | 163 | } |
164 | 164 | ||
165 | /* | ||
166 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
167 | * It must be called before the bus walk. | ||
168 | */ | ||
169 | void __init iSeries_init_IRQ(void) | ||
170 | { | ||
171 | /* Register PCI event handler and open an event path */ | ||
172 | int ret; | ||
173 | |||
174 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
175 | &pci_event_handler); | ||
176 | if (ret == 0) { | ||
177 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
178 | if (ret != 0) | ||
179 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
180 | "failed with rc 0x%x\n", ret); | ||
181 | } else | ||
182 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
183 | "failed with rc 0x%x\n", ret); | ||
184 | } | ||
185 | |||
186 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) | 165 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) |
187 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 166 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
188 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 167 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
@@ -196,7 +175,7 @@ static void iseries_enable_IRQ(unsigned int irq) | |||
196 | { | 175 | { |
197 | u32 bus, dev_id, function, mask; | 176 | u32 bus, dev_id, function, mask; |
198 | const u32 sub_bus = 0; | 177 | const u32 sub_bus = 0; |
199 | unsigned int rirq = virt_irq_to_real_map[irq]; | 178 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
200 | 179 | ||
201 | /* The IRQ has already been locked by the caller */ | 180 | /* The IRQ has already been locked by the caller */ |
202 | bus = REAL_IRQ_TO_BUS(rirq); | 181 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -213,7 +192,7 @@ static unsigned int iseries_startup_IRQ(unsigned int irq) | |||
213 | { | 192 | { |
214 | u32 bus, dev_id, function, mask; | 193 | u32 bus, dev_id, function, mask; |
215 | const u32 sub_bus = 0; | 194 | const u32 sub_bus = 0; |
216 | unsigned int rirq = virt_irq_to_real_map[irq]; | 195 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
217 | 196 | ||
218 | bus = REAL_IRQ_TO_BUS(rirq); | 197 | bus = REAL_IRQ_TO_BUS(rirq); |
219 | function = REAL_IRQ_TO_FUNC(rirq); | 198 | function = REAL_IRQ_TO_FUNC(rirq); |
@@ -254,7 +233,7 @@ static void iseries_shutdown_IRQ(unsigned int irq) | |||
254 | { | 233 | { |
255 | u32 bus, dev_id, function, mask; | 234 | u32 bus, dev_id, function, mask; |
256 | const u32 sub_bus = 0; | 235 | const u32 sub_bus = 0; |
257 | unsigned int rirq = virt_irq_to_real_map[irq]; | 236 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
258 | 237 | ||
259 | /* irq should be locked by the caller */ | 238 | /* irq should be locked by the caller */ |
260 | bus = REAL_IRQ_TO_BUS(rirq); | 239 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -277,7 +256,7 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
277 | { | 256 | { |
278 | u32 bus, dev_id, function, mask; | 257 | u32 bus, dev_id, function, mask; |
279 | const u32 sub_bus = 0; | 258 | const u32 sub_bus = 0; |
280 | unsigned int rirq = virt_irq_to_real_map[irq]; | 259 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
281 | 260 | ||
282 | /* The IRQ has already been locked by the caller */ | 261 | /* The IRQ has already been locked by the caller */ |
283 | bus = REAL_IRQ_TO_BUS(rirq); | 262 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -291,19 +270,19 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
291 | 270 | ||
292 | static void iseries_end_IRQ(unsigned int irq) | 271 | static void iseries_end_IRQ(unsigned int irq) |
293 | { | 272 | { |
294 | unsigned int rirq = virt_irq_to_real_map[irq]; | 273 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
295 | 274 | ||
296 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | 275 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), |
297 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | 276 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); |
298 | } | 277 | } |
299 | 278 | ||
300 | static hw_irq_controller iSeries_IRQ_handler = { | 279 | static struct irq_chip iseries_pic = { |
301 | .typename = "iSeries irq controller", | 280 | .typename = "iSeries irq controller", |
302 | .startup = iseries_startup_IRQ, | 281 | .startup = iseries_startup_IRQ, |
303 | .shutdown = iseries_shutdown_IRQ, | 282 | .shutdown = iseries_shutdown_IRQ, |
304 | .enable = iseries_enable_IRQ, | 283 | .unmask = iseries_enable_IRQ, |
305 | .disable = iseries_disable_IRQ, | 284 | .mask = iseries_disable_IRQ, |
306 | .end = iseries_end_IRQ | 285 | .eoi = iseries_end_IRQ |
307 | }; | 286 | }; |
308 | 287 | ||
309 | /* | 288 | /* |
@@ -314,17 +293,14 @@ static hw_irq_controller iSeries_IRQ_handler = { | |||
314 | int __init iSeries_allocate_IRQ(HvBusNumber bus, | 293 | int __init iSeries_allocate_IRQ(HvBusNumber bus, |
315 | HvSubBusNumber sub_bus, u32 bsubbus) | 294 | HvSubBusNumber sub_bus, u32 bsubbus) |
316 | { | 295 | { |
317 | int virtirq; | ||
318 | unsigned int realirq; | 296 | unsigned int realirq; |
319 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); | 297 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); |
320 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); | 298 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); |
321 | 299 | ||
322 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) | 300 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) |
323 | + function; | 301 | + function; |
324 | virtirq = virt_irq_create_mapping(realirq); | ||
325 | 302 | ||
326 | irq_desc[virtirq].chip = &iSeries_IRQ_handler; | 303 | return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE); |
327 | return virtirq; | ||
328 | } | 304 | } |
329 | 305 | ||
330 | #endif /* CONFIG_PCI */ | 306 | #endif /* CONFIG_PCI */ |
@@ -332,10 +308,9 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus, | |||
332 | /* | 308 | /* |
333 | * Get the next pending IRQ. | 309 | * Get the next pending IRQ. |
334 | */ | 310 | */ |
335 | int iSeries_get_irq(struct pt_regs *regs) | 311 | unsigned int iSeries_get_irq(struct pt_regs *regs) |
336 | { | 312 | { |
337 | /* -2 means ignore this interrupt */ | 313 | int irq = NO_IRQ_IGNORE; |
338 | int irq = -2; | ||
339 | 314 | ||
340 | #ifdef CONFIG_SMP | 315 | #ifdef CONFIG_SMP |
341 | if (get_lppaca()->int_dword.fields.ipi_cnt) { | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
@@ -358,9 +333,57 @@ int iSeries_get_irq(struct pt_regs *regs) | |||
358 | } | 333 | } |
359 | spin_unlock(&pending_irqs_lock); | 334 | spin_unlock(&pending_irqs_lock); |
360 | if (irq >= NR_IRQS) | 335 | if (irq >= NR_IRQS) |
361 | irq = -2; | 336 | irq = NO_IRQ_IGNORE; |
362 | } | 337 | } |
363 | #endif | 338 | #endif |
364 | 339 | ||
365 | return irq; | 340 | return irq; |
366 | } | 341 | } |
342 | |||
343 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | ||
344 | irq_hw_number_t hw, unsigned int flags) | ||
345 | { | ||
346 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static struct irq_host_ops iseries_irq_host_ops = { | ||
352 | .map = iseries_irq_host_map, | ||
353 | }; | ||
354 | |||
355 | /* | ||
356 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
357 | * It must be called before the bus walk. | ||
358 | */ | ||
359 | void __init iSeries_init_IRQ(void) | ||
360 | { | ||
361 | /* Register PCI event handler and open an event path */ | ||
362 | struct irq_host *host; | ||
363 | int ret; | ||
364 | |||
365 | /* | ||
366 | * The Hypervisor only allows us up to 256 interrupt | ||
367 | * sources (the irq number is passed in a u8). | ||
368 | */ | ||
369 | irq_set_virq_count(256); | ||
370 | |||
371 | /* Create irq host. No need for a revmap since HV will give us | ||
372 | * back our virtual irq number | ||
373 | */ | ||
374 | host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); | ||
375 | BUG_ON(host == NULL); | ||
376 | irq_set_default_host(host); | ||
377 | |||
378 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
379 | &pci_event_handler); | ||
380 | if (ret == 0) { | ||
381 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
382 | if (ret != 0) | ||
383 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
384 | "failed with rc 0x%x\n", ret); | ||
385 | } else | ||
386 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
387 | "failed with rc 0x%x\n", ret); | ||
388 | } | ||
389 | |||
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h index 188aa808abd7..1ee8985140e5 100644 --- a/arch/powerpc/platforms/iseries/irq.h +++ b/arch/powerpc/platforms/iseries/irq.h | |||
@@ -4,6 +4,6 @@ | |||
4 | extern void iSeries_init_IRQ(void); | 4 | extern void iSeries_init_IRQ(void); |
5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); | 5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); |
6 | extern void iSeries_activate_IRQs(void); | 6 | extern void iSeries_activate_IRQs(void); |
7 | extern int iSeries_get_irq(struct pt_regs *); | 7 | extern unsigned int iSeries_get_irq(struct pt_regs *); |
8 | 8 | ||
9 | #endif /* _ISERIES_IRQ_H */ | 9 | #endif /* _ISERIES_IRQ_H */ |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index c877074745b2..c9605d773a77 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -294,8 +294,6 @@ static void __init iSeries_init_early(void) | |||
294 | { | 294 | { |
295 | DBG(" -> iSeries_init_early()\n"); | 295 | DBG(" -> iSeries_init_early()\n"); |
296 | 296 | ||
297 | ppc64_interrupt_controller = IC_ISERIES; | ||
298 | |||
299 | #if defined(CONFIG_BLK_DEV_INITRD) | 297 | #if defined(CONFIG_BLK_DEV_INITRD) |
300 | /* | 298 | /* |
301 | * If the init RAM disk has been configured and there is | 299 | * If the init RAM disk has been configured and there is |
@@ -659,12 +657,6 @@ static int __init iseries_probe(void) | |||
659 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 657 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
660 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 658 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
661 | 659 | ||
662 | /* | ||
663 | * The Hypervisor only allows us up to 256 interrupt | ||
664 | * sources (the irq number is passed in a u8). | ||
665 | */ | ||
666 | virt_irq_max = 255; | ||
667 | |||
668 | hpte_init_iSeries(); | 660 | hpte_init_iSeries(); |
669 | 661 | ||
670 | return 1; | 662 | return 1; |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index f7170ff86dab..63a1670d3bfd 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -443,18 +443,23 @@ void __init maple_pci_init(void) | |||
443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) | 443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) |
444 | { | 444 | { |
445 | struct device_node *np; | 445 | struct device_node *np; |
446 | int irq = channel ? 15 : 14; | 446 | unsigned int defirq = channel ? 15 : 14; |
447 | unsigned int irq; | ||
447 | 448 | ||
448 | if (pdev->vendor != PCI_VENDOR_ID_AMD || | 449 | if (pdev->vendor != PCI_VENDOR_ID_AMD || |
449 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) | 450 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) |
450 | return irq; | 451 | return defirq; |
451 | 452 | ||
452 | np = pci_device_to_OF_node(pdev); | 453 | np = pci_device_to_OF_node(pdev); |
453 | if (np == NULL) | 454 | if (np == NULL) |
454 | return irq; | 455 | return defirq; |
455 | if (np->n_intrs < 2) | 456 | irq = irq_of_parse_and_map(np, channel & 0x1); |
456 | return irq; | 457 | if (irq == NO_IRQ) { |
457 | return np->intrs[channel & 0x1].line; | 458 | printk("Failed to map onboard IDE interrupt for channel %d\n", |
459 | channel); | ||
460 | return defirq; | ||
461 | } | ||
462 | return irq; | ||
458 | } | 463 | } |
459 | 464 | ||
460 | /* XXX: To remove once all firmwares are ok */ | 465 | /* XXX: To remove once all firmwares are ok */ |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 5cf90c28b141..cb528c9de4c3 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define DEBUG | 14 | #undef DEBUG |
15 | 15 | ||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
@@ -198,50 +198,81 @@ static void __init maple_init_early(void) | |||
198 | { | 198 | { |
199 | DBG(" -> maple_init_early\n"); | 199 | DBG(" -> maple_init_early\n"); |
200 | 200 | ||
201 | /* Setup interrupt mapping options */ | ||
202 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
203 | |||
204 | iommu_init_early_dart(); | 201 | iommu_init_early_dart(); |
205 | 202 | ||
206 | DBG(" <- maple_init_early\n"); | 203 | DBG(" <- maple_init_early\n"); |
207 | } | 204 | } |
208 | 205 | ||
209 | 206 | /* | |
210 | static __init void maple_init_IRQ(void) | 207 | * This is almost identical to pSeries and CHRP. We need to make that |
208 | * code generic at one point, with appropriate bits in the device-tree to | ||
209 | * identify the presence of an HT APIC | ||
210 | */ | ||
211 | static void __init maple_init_IRQ(void) | ||
211 | { | 212 | { |
212 | struct device_node *root; | 213 | struct device_node *root, *np, *mpic_node = NULL; |
213 | unsigned int *opprop; | 214 | unsigned int *opprop; |
214 | unsigned long opic_addr; | 215 | unsigned long openpic_addr = 0; |
216 | int naddr, n, i, opplen, has_isus = 0; | ||
215 | struct mpic *mpic; | 217 | struct mpic *mpic; |
216 | unsigned char senses[128]; | 218 | unsigned int flags = MPIC_PRIMARY; |
217 | int n; | ||
218 | 219 | ||
219 | DBG(" -> maple_init_IRQ\n"); | 220 | /* Locate MPIC in the device-tree. Note that there is a bug |
221 | * in Maple device-tree where the type of the controller is | ||
222 | * open-pic and not interrupt-controller | ||
223 | */ | ||
224 | for_each_node_by_type(np, "open-pic") { | ||
225 | mpic_node = np; | ||
226 | break; | ||
227 | } | ||
228 | if (mpic_node == NULL) { | ||
229 | printk(KERN_ERR | ||
230 | "Failed to locate the MPIC interrupt controller\n"); | ||
231 | return; | ||
232 | } | ||
220 | 233 | ||
221 | /* XXX: Non standard, replace that with a proper openpic/mpic node | 234 | /* Find address list in /platform-open-pic */ |
222 | * in the device-tree. Find the Open PIC if present */ | ||
223 | root = of_find_node_by_path("/"); | 235 | root = of_find_node_by_path("/"); |
224 | opprop = (unsigned int *) get_property(root, | 236 | naddr = prom_n_addr_cells(root); |
225 | "platform-open-pic", NULL); | 237 | opprop = (unsigned int *) get_property(root, "platform-open-pic", |
226 | if (opprop == 0) | 238 | &opplen); |
227 | panic("OpenPIC not found !\n"); | 239 | if (opprop != 0) { |
228 | 240 | openpic_addr = of_read_number(opprop, naddr); | |
229 | n = prom_n_addr_cells(root); | 241 | has_isus = (opplen > naddr); |
230 | for (opic_addr = 0; n > 0; --n) | 242 | printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", |
231 | opic_addr = (opic_addr << 32) + *opprop++; | 243 | openpic_addr, has_isus); |
244 | } | ||
232 | of_node_put(root); | 245 | of_node_put(root); |
233 | 246 | ||
234 | /* Obtain sense values from device-tree */ | 247 | BUG_ON(openpic_addr == 0); |
235 | prom_get_irq_senses(senses, 0, 128); | 248 | |
249 | /* Check for a big endian MPIC */ | ||
250 | if (get_property(np, "big-endian", NULL) != NULL) | ||
251 | flags |= MPIC_BIG_ENDIAN; | ||
236 | 252 | ||
237 | mpic = mpic_alloc(opic_addr, | 253 | /* XXX Maple specific bits */ |
238 | MPIC_PRIMARY | MPIC_BIG_ENDIAN | | 254 | flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET; |
239 | MPIC_BROKEN_U3 | MPIC_WANTS_RESET, | 255 | |
240 | 0, 0, 128, 128, senses, 128, "U3-MPIC"); | 256 | /* Setup the openpic driver. More device-tree junks, we hard code no |
257 | * ISUs for now. I'll have to revisit some stuffs with the folks doing | ||
258 | * the firmware for those | ||
259 | */ | ||
260 | mpic = mpic_alloc(mpic_node, openpic_addr, flags, | ||
261 | /*has_isus ? 16 :*/ 0, 0, " MPIC "); | ||
241 | BUG_ON(mpic == NULL); | 262 | BUG_ON(mpic == NULL); |
242 | mpic_init(mpic); | ||
243 | 263 | ||
244 | DBG(" <- maple_init_IRQ\n"); | 264 | /* Add ISUs */ |
265 | opplen /= sizeof(u32); | ||
266 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
267 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
268 | mpic_assign_isu(mpic, n, isuaddr); | ||
269 | } | ||
270 | |||
271 | /* All ISUs are setup, complete initialization */ | ||
272 | mpic_init(mpic); | ||
273 | ppc_md.get_irq = mpic_get_irq; | ||
274 | of_node_put(mpic_node); | ||
275 | of_node_put(root); | ||
245 | } | 276 | } |
246 | 277 | ||
247 | static void __init maple_progress(char *s, unsigned short hex) | 278 | static void __init maple_progress(char *s, unsigned short hex) |
@@ -256,7 +287,9 @@ static void __init maple_progress(char *s, unsigned short hex) | |||
256 | static int __init maple_probe(void) | 287 | static int __init maple_probe(void) |
257 | { | 288 | { |
258 | unsigned long root = of_get_flat_dt_root(); | 289 | unsigned long root = of_get_flat_dt_root(); |
259 | if (!of_flat_dt_is_compatible(root, "Momentum,Maple")) | 290 | |
291 | if (!of_flat_dt_is_compatible(root, "Momentum,Maple") && | ||
292 | !of_flat_dt_is_compatible(root, "Momentum,Apache")) | ||
260 | return 0; | 293 | return 0; |
261 | /* | 294 | /* |
262 | * On U3, the DART (iommu) must be allocated now since it | 295 | * On U3, the DART (iommu) must be allocated now since it |
@@ -277,7 +310,6 @@ define_machine(maple_md) { | |||
277 | .setup_arch = maple_setup_arch, | 310 | .setup_arch = maple_setup_arch, |
278 | .init_early = maple_init_early, | 311 | .init_early = maple_init_early, |
279 | .init_IRQ = maple_init_IRQ, | 312 | .init_IRQ = maple_init_IRQ, |
280 | .get_irq = mpic_get_irq, | ||
281 | .pcibios_fixup = maple_pcibios_fixup, | 313 | .pcibios_fixup = maple_pcibios_fixup, |
282 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, | 314 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, |
283 | .restart = maple_restart, | 315 | .restart = maple_restart, |
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index cb257aeb91f6..e63d52f227ee 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c | |||
@@ -162,6 +162,8 @@ static void __init bootx_add_chosen_props(unsigned long base, | |||
162 | { | 162 | { |
163 | u32 val; | 163 | u32 val; |
164 | 164 | ||
165 | bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end); | ||
166 | |||
165 | if (bootx_info->kernelParamsOffset) { | 167 | if (bootx_info->kernelParamsOffset) { |
166 | char *args = (char *)((unsigned long)bootx_info) + | 168 | char *args = (char *)((unsigned long)bootx_info) + |
167 | bootx_info->kernelParamsOffset; | 169 | bootx_info->kernelParamsOffset; |
@@ -181,8 +183,25 @@ static void __init bootx_add_chosen_props(unsigned long base, | |||
181 | static void __init bootx_add_display_props(unsigned long base, | 183 | static void __init bootx_add_display_props(unsigned long base, |
182 | unsigned long *mem_end) | 184 | unsigned long *mem_end) |
183 | { | 185 | { |
186 | boot_infos_t *bi = bootx_info; | ||
187 | u32 tmp; | ||
188 | |||
184 | bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end); | 189 | bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end); |
185 | bootx_dt_add_prop("linux,opened", NULL, 0, mem_end); | 190 | bootx_dt_add_prop("linux,opened", NULL, 0, mem_end); |
191 | tmp = bi->dispDeviceDepth; | ||
192 | bootx_dt_add_prop("linux,bootx-depth", &tmp, 4, mem_end); | ||
193 | tmp = bi->dispDeviceRect[2] - bi->dispDeviceRect[0]; | ||
194 | bootx_dt_add_prop("linux,bootx-width", &tmp, 4, mem_end); | ||
195 | tmp = bi->dispDeviceRect[3] - bi->dispDeviceRect[1]; | ||
196 | bootx_dt_add_prop("linux,bootx-height", &tmp, 4, mem_end); | ||
197 | tmp = bi->dispDeviceRowBytes; | ||
198 | bootx_dt_add_prop("linux,bootx-linebytes", &tmp, 4, mem_end); | ||
199 | tmp = (u32)bi->dispDeviceBase; | ||
200 | if (tmp == 0) | ||
201 | tmp = (u32)bi->logicalDisplayBase; | ||
202 | tmp += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes; | ||
203 | tmp += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8); | ||
204 | bootx_dt_add_prop("linux,bootx-addr", &tmp, 4, mem_end); | ||
186 | } | 205 | } |
187 | 206 | ||
188 | static void __init bootx_dt_add_string(char *s, unsigned long *mem_end) | 207 | static void __init bootx_dt_add_string(char *s, unsigned long *mem_end) |
@@ -211,7 +230,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base, | |||
211 | 230 | ||
212 | if (!strcmp(namep, "/chosen")) { | 231 | if (!strcmp(namep, "/chosen")) { |
213 | DBG(" detected /chosen ! adding properties names !\n"); | 232 | DBG(" detected /chosen ! adding properties names !\n"); |
214 | bootx_dt_add_string("linux,platform", mem_end); | 233 | bootx_dt_add_string("linux,bootx", mem_end); |
215 | bootx_dt_add_string("linux,stdout-path", mem_end); | 234 | bootx_dt_add_string("linux,stdout-path", mem_end); |
216 | bootx_dt_add_string("linux,initrd-start", mem_end); | 235 | bootx_dt_add_string("linux,initrd-start", mem_end); |
217 | bootx_dt_add_string("linux,initrd-end", mem_end); | 236 | bootx_dt_add_string("linux,initrd-end", mem_end); |
@@ -222,6 +241,11 @@ static void __init bootx_scan_dt_build_strings(unsigned long base, | |||
222 | DBG(" detected display ! adding properties names !\n"); | 241 | DBG(" detected display ! adding properties names !\n"); |
223 | bootx_dt_add_string("linux,boot-display", mem_end); | 242 | bootx_dt_add_string("linux,boot-display", mem_end); |
224 | bootx_dt_add_string("linux,opened", mem_end); | 243 | bootx_dt_add_string("linux,opened", mem_end); |
244 | bootx_dt_add_string("linux,bootx-depth", mem_end); | ||
245 | bootx_dt_add_string("linux,bootx-width", mem_end); | ||
246 | bootx_dt_add_string("linux,bootx-height", mem_end); | ||
247 | bootx_dt_add_string("linux,bootx-linebytes", mem_end); | ||
248 | bootx_dt_add_string("linux,bootx-addr", mem_end); | ||
225 | strncpy(bootx_disp_path, namep, 255); | 249 | strncpy(bootx_disp_path, namep, 255); |
226 | } | 250 | } |
227 | 251 | ||
@@ -443,7 +467,14 @@ void __init bootx_init(unsigned long r3, unsigned long r4) | |||
443 | if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) | 467 | if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) |
444 | bi->logicalDisplayBase = bi->dispDeviceBase; | 468 | bi->logicalDisplayBase = bi->dispDeviceBase; |
445 | 469 | ||
470 | /* Fixup depth 16 -> 15 as that's what MacOS calls 16bpp */ | ||
471 | if (bi->dispDeviceDepth == 16) | ||
472 | bi->dispDeviceDepth = 15; | ||
473 | |||
446 | #ifdef CONFIG_BOOTX_TEXT | 474 | #ifdef CONFIG_BOOTX_TEXT |
475 | ptr = (unsigned long)bi->logicalDisplayBase; | ||
476 | ptr += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes; | ||
477 | ptr += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8); | ||
447 | btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0], | 478 | btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0], |
448 | bi->dispDeviceRect[3] - bi->dispDeviceRect[1], | 479 | bi->dispDeviceRect[3] - bi->dispDeviceRect[1], |
449 | bi->dispDeviceDepth, bi->dispDeviceRowBytes, | 480 | bi->dispDeviceDepth, bi->dispDeviceRowBytes, |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index ceafaf52a668..8677f50c2586 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -522,10 +522,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) | |||
522 | host->speed = KW_I2C_MODE_25KHZ; | 522 | host->speed = KW_I2C_MODE_25KHZ; |
523 | break; | 523 | break; |
524 | } | 524 | } |
525 | if (np->n_intrs > 0) | 525 | host->irq = irq_of_parse_and_map(np, 0); |
526 | host->irq = np->intrs[0].line; | 526 | if (host->irq == NO_IRQ) |
527 | else | 527 | printk(KERN_WARNING |
528 | host->irq = NO_IRQ; | 528 | "low_i2c: Failed to map interrupt for %s\n", |
529 | np->full_name); | ||
529 | 530 | ||
530 | host->base = ioremap((*addrp), 0x1000); | 531 | host->base = ioremap((*addrp), 0x1000); |
531 | if (host->base == NULL) { | 532 | if (host->base == NULL) { |
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 41fa2409482a..6a36ea9bf673 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
30 | #include <asm/nvram.h> | 30 | #include <asm/nvram.h> |
31 | 31 | ||
32 | #include "pmac.h" | ||
33 | |||
32 | #define DEBUG | 34 | #define DEBUG |
33 | 35 | ||
34 | #ifdef DEBUG | 36 | #ifdef DEBUG |
@@ -80,9 +82,6 @@ static int nvram_partitions[3]; | |||
80 | // XXX Turn that into a sem | 82 | // XXX Turn that into a sem |
81 | static DEFINE_SPINLOCK(nv_lock); | 83 | static DEFINE_SPINLOCK(nv_lock); |
82 | 84 | ||
83 | extern int pmac_newworld; | ||
84 | extern int system_running; | ||
85 | |||
86 | static int (*core99_write_bank)(int bank, u8* datas); | 85 | static int (*core99_write_bank)(int bank, u8* datas); |
87 | static int (*core99_erase_bank)(int bank); | 86 | static int (*core99_erase_bank)(int bank); |
88 | 87 | ||
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index d524a915aa86..556b349797e8 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -46,6 +46,9 @@ static int has_uninorth; | |||
46 | static struct pci_controller *u3_agp; | 46 | static struct pci_controller *u3_agp; |
47 | static struct pci_controller *u4_pcie; | 47 | static struct pci_controller *u4_pcie; |
48 | static struct pci_controller *u3_ht; | 48 | static struct pci_controller *u3_ht; |
49 | #define has_second_ohare 0 | ||
50 | #else | ||
51 | static int has_second_ohare; | ||
49 | #endif /* CONFIG_PPC64 */ | 52 | #endif /* CONFIG_PPC64 */ |
50 | 53 | ||
51 | extern u8 pci_cache_line_size; | 54 | extern u8 pci_cache_line_size; |
@@ -647,6 +650,33 @@ static void __init init_p2pbridge(void) | |||
647 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); | 650 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); |
648 | } | 651 | } |
649 | 652 | ||
653 | static void __init init_second_ohare(void) | ||
654 | { | ||
655 | struct device_node *np = of_find_node_by_name(NULL, "pci106b,7"); | ||
656 | unsigned char bus, devfn; | ||
657 | unsigned short cmd; | ||
658 | |||
659 | if (np == NULL) | ||
660 | return; | ||
661 | |||
662 | /* This must run before we initialize the PICs since the second | ||
663 | * ohare hosts a PIC that will be accessed there. | ||
664 | */ | ||
665 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
666 | struct pci_controller* hose = | ||
667 | pci_find_hose_for_OF_device(np); | ||
668 | if (!hose) { | ||
669 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
670 | return; | ||
671 | } | ||
672 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
673 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
674 | cmd &= ~PCI_COMMAND_IO; | ||
675 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
676 | } | ||
677 | has_second_ohare = 1; | ||
678 | } | ||
679 | |||
650 | /* | 680 | /* |
651 | * Some Apple desktop machines have a NEC PD720100A USB2 controller | 681 | * Some Apple desktop machines have a NEC PD720100A USB2 controller |
652 | * on the motherboard. Open Firmware, on these, will disable the | 682 | * on the motherboard. Open Firmware, on these, will disable the |
@@ -688,9 +718,6 @@ static void __init fixup_nec_usb2(void) | |||
688 | " EHCI, fixing up...\n"); | 718 | " EHCI, fixing up...\n"); |
689 | data &= ~1UL; | 719 | data &= ~1UL; |
690 | early_write_config_dword(hose, bus, devfn, 0xe4, data); | 720 | early_write_config_dword(hose, bus, devfn, 0xe4, data); |
691 | early_write_config_byte(hose, bus, | ||
692 | devfn | 2, PCI_INTERRUPT_LINE, | ||
693 | nec->intrs[0].line); | ||
694 | } | 721 | } |
695 | } | 722 | } |
696 | } | 723 | } |
@@ -958,32 +985,28 @@ static int __init add_bridge(struct device_node *dev) | |||
958 | return 0; | 985 | return 0; |
959 | } | 986 | } |
960 | 987 | ||
961 | static void __init pcibios_fixup_OF_interrupts(void) | 988 | void __init pmac_pcibios_fixup(void) |
962 | { | 989 | { |
963 | struct pci_dev* dev = NULL; | 990 | struct pci_dev* dev = NULL; |
964 | 991 | ||
965 | /* | ||
966 | * Open Firmware often doesn't initialize the | ||
967 | * PCI_INTERRUPT_LINE config register properly, so we | ||
968 | * should find the device node and apply the interrupt | ||
969 | * obtained from the OF device-tree | ||
970 | */ | ||
971 | for_each_pci_dev(dev) { | 992 | for_each_pci_dev(dev) { |
972 | struct device_node *node; | 993 | /* Read interrupt from the device-tree */ |
973 | node = pci_device_to_OF_node(dev); | 994 | pci_read_irq_line(dev); |
974 | /* this is the node, see if it has interrupts */ | 995 | |
975 | if (node && node->n_intrs > 0) | 996 | /* Fixup interrupt for the modem/ethernet combo controller. |
976 | dev->irq = node->intrs[0].line; | 997 | * on machines with a second ohare chip. |
977 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | 998 | * The number in the device tree (27) is bogus (correct for |
999 | * the ethernet-only board but not the combo ethernet/modem | ||
1000 | * board). The real interrupt is 28 on the second controller | ||
1001 | * -> 28+32 = 60. | ||
1002 | */ | ||
1003 | if (has_second_ohare && | ||
1004 | dev->vendor == PCI_VENDOR_ID_DEC && | ||
1005 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) | ||
1006 | dev->irq = irq_create_mapping(NULL, 60, 0); | ||
978 | } | 1007 | } |
979 | } | 1008 | } |
980 | 1009 | ||
981 | void __init pmac_pcibios_fixup(void) | ||
982 | { | ||
983 | /* Fixup interrupts according to OF tree */ | ||
984 | pcibios_fixup_OF_interrupts(); | ||
985 | } | ||
986 | |||
987 | #ifdef CONFIG_PPC64 | 1010 | #ifdef CONFIG_PPC64 |
988 | static void __init pmac_fixup_phb_resources(void) | 1011 | static void __init pmac_fixup_phb_resources(void) |
989 | { | 1012 | { |
@@ -1071,6 +1094,7 @@ void __init pmac_pci_init(void) | |||
1071 | 1094 | ||
1072 | #else /* CONFIG_PPC64 */ | 1095 | #else /* CONFIG_PPC64 */ |
1073 | init_p2pbridge(); | 1096 | init_p2pbridge(); |
1097 | init_second_ohare(); | ||
1074 | fixup_nec_usb2(); | 1098 | fixup_nec_usb2(); |
1075 | 1099 | ||
1076 | /* We are still having some issues with the Xserve G4, enabling | 1100 | /* We are still having some issues with the Xserve G4, enabling |
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index d6eab8b3f7de..6d66359ec8c8 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c | |||
@@ -24,19 +24,18 @@ static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs) | |||
24 | 24 | ||
25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) | 25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) |
26 | { | 26 | { |
27 | if (func->node->n_intrs < 1) | 27 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
28 | if (irq == NO_IRQ) | ||
28 | return -EINVAL; | 29 | return -EINVAL; |
29 | 30 | return request_irq(irq, macio_gpio_irq, 0, func->node->name, func); | |
30 | return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0, | ||
31 | func->node->name, func); | ||
32 | } | 31 | } |
33 | 32 | ||
34 | static int macio_do_gpio_irq_disable(struct pmf_function *func) | 33 | static int macio_do_gpio_irq_disable(struct pmf_function *func) |
35 | { | 34 | { |
36 | if (func->node->n_intrs < 1) | 35 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
36 | if (irq == NO_IRQ) | ||
37 | return -EINVAL; | 37 | return -EINVAL; |
38 | 38 | free_irq(irq, func); | |
39 | free_irq(func->node->intrs[0].line, func); | ||
40 | return 0; | 39 | return 0; |
41 | } | 40 | } |
42 | 41 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index c9b09a9e6050..3d328bc1f7e0 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -65,39 +65,36 @@ static u32 level_mask[4]; | |||
65 | 65 | ||
66 | static DEFINE_SPINLOCK(pmac_pic_lock); | 66 | static DEFINE_SPINLOCK(pmac_pic_lock); |
67 | 67 | ||
68 | #define GATWICK_IRQ_POOL_SIZE 10 | ||
69 | static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; | ||
70 | |||
71 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 68 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
72 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | 69 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
70 | static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | ||
71 | static int pmac_irq_cascade = -1; | ||
72 | static struct irq_host *pmac_pic_host; | ||
73 | 73 | ||
74 | /* | 74 | static void __pmac_retrigger(unsigned int irq_nr) |
75 | * Mark an irq as "lost". This is only used on the pmac | ||
76 | * since it can lose interrupts (see pmac_set_irq_mask). | ||
77 | * -- Cort | ||
78 | */ | ||
79 | void __set_lost(unsigned long irq_nr, int nokick) | ||
80 | { | 75 | { |
81 | if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { | 76 | if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) { |
77 | __set_bit(irq_nr, ppc_lost_interrupts); | ||
78 | irq_nr = pmac_irq_cascade; | ||
79 | mb(); | ||
80 | } | ||
81 | if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) { | ||
82 | atomic_inc(&ppc_n_lost_interrupts); | 82 | atomic_inc(&ppc_n_lost_interrupts); |
83 | if (!nokick) | 83 | set_dec(1); |
84 | set_dec(1); | ||
85 | } | 84 | } |
86 | } | 85 | } |
87 | 86 | ||
88 | static void pmac_mask_and_ack_irq(unsigned int irq_nr) | 87 | static void pmac_mask_and_ack_irq(unsigned int virq) |
89 | { | 88 | { |
90 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 89 | unsigned int src = irq_map[virq].hwirq; |
91 | int i = irq_nr >> 5; | 90 | unsigned long bit = 1UL << (virq & 0x1f); |
91 | int i = virq >> 5; | ||
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | 93 | ||
94 | if ((unsigned)irq_nr >= max_irqs) | ||
95 | return; | ||
96 | |||
97 | clear_bit(irq_nr, ppc_cached_irq_mask); | ||
98 | if (test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | ||
99 | atomic_dec(&ppc_n_lost_interrupts); | ||
100 | spin_lock_irqsave(&pmac_pic_lock, flags); | 94 | spin_lock_irqsave(&pmac_pic_lock, flags); |
95 | __clear_bit(src, ppc_cached_irq_mask); | ||
96 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) | ||
97 | atomic_dec(&ppc_n_lost_interrupts); | ||
101 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | 98 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); |
102 | out_le32(&pmac_irq_hw[i]->ack, bit); | 99 | out_le32(&pmac_irq_hw[i]->ack, bit); |
103 | do { | 100 | do { |
@@ -109,16 +106,29 @@ static void pmac_mask_and_ack_irq(unsigned int irq_nr) | |||
109 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 106 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
110 | } | 107 | } |
111 | 108 | ||
112 | static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | 109 | static void pmac_ack_irq(unsigned int virq) |
110 | { | ||
111 | unsigned int src = irq_map[virq].hwirq; | ||
112 | unsigned long bit = 1UL << (src & 0x1f); | ||
113 | int i = src >> 5; | ||
114 | unsigned long flags; | ||
115 | |||
116 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
117 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) | ||
118 | atomic_dec(&ppc_n_lost_interrupts); | ||
119 | out_le32(&pmac_irq_hw[i]->ack, bit); | ||
120 | (void)in_le32(&pmac_irq_hw[i]->ack); | ||
121 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
122 | } | ||
123 | |||
124 | static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | ||
113 | { | 125 | { |
114 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 126 | unsigned long bit = 1UL << (irq_nr & 0x1f); |
115 | int i = irq_nr >> 5; | 127 | int i = irq_nr >> 5; |
116 | unsigned long flags; | ||
117 | 128 | ||
118 | if ((unsigned)irq_nr >= max_irqs) | 129 | if ((unsigned)irq_nr >= max_irqs) |
119 | return; | 130 | return; |
120 | 131 | ||
121 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
122 | /* enable unmasked interrupts */ | 132 | /* enable unmasked interrupts */ |
123 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | 133 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); |
124 | 134 | ||
@@ -135,71 +145,78 @@ static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | |||
135 | * the bit in the flag register or request another interrupt. | 145 | * the bit in the flag register or request another interrupt. |
136 | */ | 146 | */ |
137 | if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) | 147 | if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) |
138 | __set_lost((ulong)irq_nr, nokicklost); | 148 | __pmac_retrigger(irq_nr); |
139 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
140 | } | 149 | } |
141 | 150 | ||
142 | /* When an irq gets requested for the first client, if it's an | 151 | /* When an irq gets requested for the first client, if it's an |
143 | * edge interrupt, we clear any previous one on the controller | 152 | * edge interrupt, we clear any previous one on the controller |
144 | */ | 153 | */ |
145 | static unsigned int pmac_startup_irq(unsigned int irq_nr) | 154 | static unsigned int pmac_startup_irq(unsigned int virq) |
146 | { | 155 | { |
147 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 156 | unsigned long flags; |
148 | int i = irq_nr >> 5; | 157 | unsigned int src = irq_map[virq].hwirq; |
158 | unsigned long bit = 1UL << (src & 0x1f); | ||
159 | int i = src >> 5; | ||
149 | 160 | ||
150 | if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) | 161 | spin_lock_irqsave(&pmac_pic_lock, flags); |
162 | if ((irq_desc[virq].status & IRQ_LEVEL) == 0) | ||
151 | out_le32(&pmac_irq_hw[i]->ack, bit); | 163 | out_le32(&pmac_irq_hw[i]->ack, bit); |
152 | set_bit(irq_nr, ppc_cached_irq_mask); | 164 | __set_bit(src, ppc_cached_irq_mask); |
153 | pmac_set_irq_mask(irq_nr, 0); | 165 | __pmac_set_irq_mask(src, 0); |
166 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
154 | 167 | ||
155 | return 0; | 168 | return 0; |
156 | } | 169 | } |
157 | 170 | ||
158 | static void pmac_mask_irq(unsigned int irq_nr) | 171 | static void pmac_mask_irq(unsigned int virq) |
159 | { | 172 | { |
160 | clear_bit(irq_nr, ppc_cached_irq_mask); | 173 | unsigned long flags; |
161 | pmac_set_irq_mask(irq_nr, 0); | 174 | unsigned int src = irq_map[virq].hwirq; |
162 | mb(); | 175 | |
176 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
177 | __clear_bit(src, ppc_cached_irq_mask); | ||
178 | __pmac_set_irq_mask(src, 0); | ||
179 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
163 | } | 180 | } |
164 | 181 | ||
165 | static void pmac_unmask_irq(unsigned int irq_nr) | 182 | static void pmac_unmask_irq(unsigned int virq) |
166 | { | 183 | { |
167 | set_bit(irq_nr, ppc_cached_irq_mask); | 184 | unsigned long flags; |
168 | pmac_set_irq_mask(irq_nr, 0); | 185 | unsigned int src = irq_map[virq].hwirq; |
186 | |||
187 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
188 | __set_bit(src, ppc_cached_irq_mask); | ||
189 | __pmac_set_irq_mask(src, 0); | ||
190 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
169 | } | 191 | } |
170 | 192 | ||
171 | static void pmac_end_irq(unsigned int irq_nr) | 193 | static int pmac_retrigger(unsigned int virq) |
172 | { | 194 | { |
173 | if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) | 195 | unsigned long flags; |
174 | && irq_desc[irq_nr].action) { | ||
175 | set_bit(irq_nr, ppc_cached_irq_mask); | ||
176 | pmac_set_irq_mask(irq_nr, 1); | ||
177 | } | ||
178 | } | ||
179 | 196 | ||
197 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
198 | __pmac_retrigger(irq_map[virq].hwirq); | ||
199 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
200 | return 1; | ||
201 | } | ||
180 | 202 | ||
181 | struct hw_interrupt_type pmac_pic = { | 203 | static struct irq_chip pmac_pic = { |
182 | .typename = " PMAC-PIC ", | 204 | .typename = " PMAC-PIC ", |
183 | .startup = pmac_startup_irq, | 205 | .startup = pmac_startup_irq, |
184 | .enable = pmac_unmask_irq, | 206 | .mask = pmac_mask_irq, |
185 | .disable = pmac_mask_irq, | 207 | .ack = pmac_ack_irq, |
186 | .ack = pmac_mask_and_ack_irq, | 208 | .mask_ack = pmac_mask_and_ack_irq, |
187 | .end = pmac_end_irq, | 209 | .unmask = pmac_unmask_irq, |
188 | }; | 210 | .retrigger = pmac_retrigger, |
189 | |||
190 | struct hw_interrupt_type gatwick_pic = { | ||
191 | .typename = " GATWICK ", | ||
192 | .startup = pmac_startup_irq, | ||
193 | .enable = pmac_unmask_irq, | ||
194 | .disable = pmac_mask_irq, | ||
195 | .ack = pmac_mask_and_ack_irq, | ||
196 | .end = pmac_end_irq, | ||
197 | }; | 211 | }; |
198 | 212 | ||
199 | static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | 213 | static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) |
200 | { | 214 | { |
215 | unsigned long flags; | ||
201 | int irq, bits; | 216 | int irq, bits; |
217 | int rc = IRQ_NONE; | ||
202 | 218 | ||
219 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
203 | for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { | 220 | for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { |
204 | int i = irq >> 5; | 221 | int i = irq >> 5; |
205 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; | 222 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; |
@@ -209,17 +226,20 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | |||
209 | if (bits == 0) | 226 | if (bits == 0) |
210 | continue; | 227 | continue; |
211 | irq += __ilog2(bits); | 228 | irq += __ilog2(bits); |
229 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
212 | __do_IRQ(irq, regs); | 230 | __do_IRQ(irq, regs); |
213 | return IRQ_HANDLED; | 231 | spin_lock_irqsave(&pmac_pic_lock, flags); |
232 | rc = IRQ_HANDLED; | ||
214 | } | 233 | } |
215 | printk("gatwick irq not from gatwick pic\n"); | 234 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
216 | return IRQ_NONE; | 235 | return rc; |
217 | } | 236 | } |
218 | 237 | ||
219 | static int pmac_get_irq(struct pt_regs *regs) | 238 | static unsigned int pmac_pic_get_irq(struct pt_regs *regs) |
220 | { | 239 | { |
221 | int irq; | 240 | int irq; |
222 | unsigned long bits = 0; | 241 | unsigned long bits = 0; |
242 | unsigned long flags; | ||
223 | 243 | ||
224 | #ifdef CONFIG_SMP | 244 | #ifdef CONFIG_SMP |
225 | void psurge_smp_message_recv(struct pt_regs *); | 245 | void psurge_smp_message_recv(struct pt_regs *); |
@@ -227,9 +247,10 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
227 | /* IPI's are a hack on the powersurge -- Cort */ | 247 | /* IPI's are a hack on the powersurge -- Cort */ |
228 | if ( smp_processor_id() != 0 ) { | 248 | if ( smp_processor_id() != 0 ) { |
229 | psurge_smp_message_recv(regs); | 249 | psurge_smp_message_recv(regs); |
230 | return -2; /* ignore, already handled */ | 250 | return NO_IRQ_IGNORE; /* ignore, already handled */ |
231 | } | 251 | } |
232 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
253 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
233 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { | 254 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { |
234 | int i = irq >> 5; | 255 | int i = irq >> 5; |
235 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; | 256 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; |
@@ -241,133 +262,10 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
241 | irq += __ilog2(bits); | 262 | irq += __ilog2(bits); |
242 | break; | 263 | break; |
243 | } | 264 | } |
244 | 265 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | |
245 | return irq; | 266 | if (unlikely(irq < 0)) |
246 | } | 267 | return NO_IRQ; |
247 | 268 | return irq_linear_revmap(pmac_pic_host, irq); | |
248 | /* This routine will fix some missing interrupt values in the device tree | ||
249 | * on the gatwick mac-io controller used by some PowerBooks | ||
250 | * | ||
251 | * Walking of OF nodes could use a bit more fixing up here, but it's not | ||
252 | * very important as this is all boot time code on static portions of the | ||
253 | * device-tree. | ||
254 | * | ||
255 | * However, the modifications done to "intrs" will have to be removed and | ||
256 | * replaced with proper updates of the "interrupts" properties or | ||
257 | * AAPL,interrupts, yet to be decided, once the dynamic parsing is there. | ||
258 | */ | ||
259 | static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, | ||
260 | int irq_base) | ||
261 | { | ||
262 | struct device_node *node; | ||
263 | int count; | ||
264 | |||
265 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); | ||
266 | count = 0; | ||
267 | for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) { | ||
268 | /* Fix SCC */ | ||
269 | if ((strcasecmp(node->name, "escc") == 0) && node->child) { | ||
270 | if (node->child->n_intrs < 3) { | ||
271 | node->child->intrs = &gatwick_int_pool[count]; | ||
272 | count += 3; | ||
273 | } | ||
274 | node->child->n_intrs = 3; | ||
275 | node->child->intrs[0].line = 15+irq_base; | ||
276 | node->child->intrs[1].line = 4+irq_base; | ||
277 | node->child->intrs[2].line = 5+irq_base; | ||
278 | printk(KERN_INFO "irq: fixed SCC on gatwick" | ||
279 | " (%d,%d,%d)\n", | ||
280 | node->child->intrs[0].line, | ||
281 | node->child->intrs[1].line, | ||
282 | node->child->intrs[2].line); | ||
283 | } | ||
284 | /* Fix media-bay & left SWIM */ | ||
285 | if (strcasecmp(node->name, "media-bay") == 0) { | ||
286 | struct device_node* ya_node; | ||
287 | |||
288 | if (node->n_intrs == 0) | ||
289 | node->intrs = &gatwick_int_pool[count++]; | ||
290 | node->n_intrs = 1; | ||
291 | node->intrs[0].line = 29+irq_base; | ||
292 | printk(KERN_INFO "irq: fixed media-bay on gatwick" | ||
293 | " (%d)\n", node->intrs[0].line); | ||
294 | |||
295 | ya_node = node->child; | ||
296 | while(ya_node) { | ||
297 | if (strcasecmp(ya_node->name, "floppy") == 0) { | ||
298 | if (ya_node->n_intrs < 2) { | ||
299 | ya_node->intrs = &gatwick_int_pool[count]; | ||
300 | count += 2; | ||
301 | } | ||
302 | ya_node->n_intrs = 2; | ||
303 | ya_node->intrs[0].line = 19+irq_base; | ||
304 | ya_node->intrs[1].line = 1+irq_base; | ||
305 | printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", | ||
306 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
307 | } | ||
308 | if (strcasecmp(ya_node->name, "ata4") == 0) { | ||
309 | if (ya_node->n_intrs < 2) { | ||
310 | ya_node->intrs = &gatwick_int_pool[count]; | ||
311 | count += 2; | ||
312 | } | ||
313 | ya_node->n_intrs = 2; | ||
314 | ya_node->intrs[0].line = 14+irq_base; | ||
315 | ya_node->intrs[1].line = 3+irq_base; | ||
316 | printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", | ||
317 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
318 | } | ||
319 | ya_node = ya_node->sibling; | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | if (count > 10) { | ||
324 | printk("WARNING !! Gatwick interrupt pool overflow\n"); | ||
325 | printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); | ||
326 | printk(" requested = %d\n", count); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * The PowerBook 3400/2400/3500 can have a combo ethernet/modem | ||
332 | * card which includes an ohare chip that acts as a second interrupt | ||
333 | * controller. If we find this second ohare, set it up and fix the | ||
334 | * interrupt value in the device tree for the ethernet chip. | ||
335 | */ | ||
336 | static void __init enable_second_ohare(struct device_node *np) | ||
337 | { | ||
338 | unsigned char bus, devfn; | ||
339 | unsigned short cmd; | ||
340 | struct device_node *ether; | ||
341 | |||
342 | /* This code doesn't strictly belong here, it could be part of | ||
343 | * either the PCI initialisation or the feature code. It's kept | ||
344 | * here for historical reasons. | ||
345 | */ | ||
346 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
347 | struct pci_controller* hose = | ||
348 | pci_find_hose_for_OF_device(np); | ||
349 | if (!hose) { | ||
350 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
351 | return; | ||
352 | } | ||
353 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
354 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
355 | cmd &= ~PCI_COMMAND_IO; | ||
356 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
357 | } | ||
358 | |||
359 | /* Fix interrupt for the modem/ethernet combo controller. The number | ||
360 | * in the device tree (27) is bogus (correct for the ethernet-only | ||
361 | * board but not the combo ethernet/modem board). | ||
362 | * The real interrupt is 28 on the second controller -> 28+32 = 60. | ||
363 | */ | ||
364 | ether = of_find_node_by_name(NULL, "pci1011,14"); | ||
365 | if (ether && ether->n_intrs > 0) { | ||
366 | ether->intrs[0].line = 60; | ||
367 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", | ||
368 | ether->intrs[0].line); | ||
369 | } | ||
370 | of_node_put(ether); | ||
371 | } | 269 | } |
372 | 270 | ||
373 | #ifdef CONFIG_XMON | 271 | #ifdef CONFIG_XMON |
@@ -386,17 +284,60 @@ static struct irqaction gatwick_cascade_action = { | |||
386 | .name = "cascade", | 284 | .name = "cascade", |
387 | }; | 285 | }; |
388 | 286 | ||
287 | static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | ||
288 | { | ||
289 | /* We match all, we don't always have a node anyway */ | ||
290 | return 1; | ||
291 | } | ||
292 | |||
293 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | ||
294 | irq_hw_number_t hw, unsigned int flags) | ||
295 | { | ||
296 | struct irq_desc *desc = get_irq_desc(virq); | ||
297 | int level; | ||
298 | |||
299 | if (hw >= max_irqs) | ||
300 | return -EINVAL; | ||
301 | |||
302 | /* Mark level interrupts, set delayed disable for edge ones and set | ||
303 | * handlers | ||
304 | */ | ||
305 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | ||
306 | if (level) | ||
307 | desc->status |= IRQ_LEVEL; | ||
308 | else | ||
309 | desc->status |= IRQ_DELAYED_DISABLE; | ||
310 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | ||
311 | handle_level_irq : handle_edge_irq); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
316 | u32 *intspec, unsigned int intsize, | ||
317 | irq_hw_number_t *out_hwirq, | ||
318 | unsigned int *out_flags) | ||
319 | |||
320 | { | ||
321 | *out_hwirq = *intspec; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static struct irq_host_ops pmac_pic_host_ops = { | ||
326 | .match = pmac_pic_host_match, | ||
327 | .map = pmac_pic_host_map, | ||
328 | .xlate = pmac_pic_host_xlate, | ||
329 | }; | ||
330 | |||
389 | static void __init pmac_pic_probe_oldstyle(void) | 331 | static void __init pmac_pic_probe_oldstyle(void) |
390 | { | 332 | { |
391 | int i; | 333 | int i; |
392 | int irq_cascade = -1; | ||
393 | struct device_node *master = NULL; | 334 | struct device_node *master = NULL; |
394 | struct device_node *slave = NULL; | 335 | struct device_node *slave = NULL; |
395 | u8 __iomem *addr; | 336 | u8 __iomem *addr; |
396 | struct resource r; | 337 | struct resource r; |
397 | 338 | ||
398 | /* Set our get_irq function */ | 339 | /* Set our get_irq function */ |
399 | ppc_md.get_irq = pmac_get_irq; | 340 | ppc_md.get_irq = pmac_pic_get_irq; |
400 | 341 | ||
401 | /* | 342 | /* |
402 | * Find the interrupt controller type & node | 343 | * Find the interrupt controller type & node |
@@ -414,7 +355,6 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
414 | if (slave) { | 355 | if (slave) { |
415 | max_irqs = 64; | 356 | max_irqs = 64; |
416 | level_mask[1] = OHARE_LEVEL_MASK; | 357 | level_mask[1] = OHARE_LEVEL_MASK; |
417 | enable_second_ohare(slave); | ||
418 | } | 358 | } |
419 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { | 359 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { |
420 | max_irqs = max_real_irqs = 64; | 360 | max_irqs = max_real_irqs = 64; |
@@ -438,14 +378,18 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
438 | max_irqs = 128; | 378 | max_irqs = 128; |
439 | level_mask[2] = HEATHROW_LEVEL_MASK; | 379 | level_mask[2] = HEATHROW_LEVEL_MASK; |
440 | level_mask[3] = 0; | 380 | level_mask[3] = 0; |
441 | pmac_fix_gatwick_interrupts(slave, max_real_irqs); | ||
442 | } | 381 | } |
443 | } | 382 | } |
444 | BUG_ON(master == NULL); | 383 | BUG_ON(master == NULL); |
445 | 384 | ||
446 | /* Set the handler for the main PIC */ | 385 | /* |
447 | for ( i = 0; i < max_real_irqs ; i++ ) | 386 | * Allocate an irq host |
448 | irq_desc[i].chip = &pmac_pic; | 387 | */ |
388 | pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs, | ||
389 | &pmac_pic_host_ops, | ||
390 | max_irqs); | ||
391 | BUG_ON(pmac_pic_host == NULL); | ||
392 | irq_set_default_host(pmac_pic_host); | ||
449 | 393 | ||
450 | /* Get addresses of first controller if we have a node for it */ | 394 | /* Get addresses of first controller if we have a node for it */ |
451 | BUG_ON(of_address_to_resource(master, 0, &r)); | 395 | BUG_ON(of_address_to_resource(master, 0, &r)); |
@@ -472,39 +416,38 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
472 | pmac_irq_hw[i++] = | 416 | pmac_irq_hw[i++] = |
473 | (volatile struct pmac_irq_hw __iomem *) | 417 | (volatile struct pmac_irq_hw __iomem *) |
474 | (addr + 0x10); | 418 | (addr + 0x10); |
475 | irq_cascade = slave->intrs[0].line; | 419 | pmac_irq_cascade = irq_of_parse_and_map(slave, 0); |
476 | 420 | ||
477 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" | 421 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" |
478 | " cascade: %d\n", slave->full_name, | 422 | " cascade: %d\n", slave->full_name, |
479 | max_irqs - max_real_irqs, irq_cascade); | 423 | max_irqs - max_real_irqs, pmac_irq_cascade); |
480 | } | 424 | } |
481 | of_node_put(slave); | 425 | of_node_put(slave); |
482 | 426 | ||
483 | /* disable all interrupts in all controllers */ | 427 | /* Disable all interrupts in all controllers */ |
484 | for (i = 0; i * 32 < max_irqs; ++i) | 428 | for (i = 0; i * 32 < max_irqs; ++i) |
485 | out_le32(&pmac_irq_hw[i]->enable, 0); | 429 | out_le32(&pmac_irq_hw[i]->enable, 0); |
486 | 430 | ||
487 | /* mark level interrupts */ | 431 | /* Hookup cascade irq */ |
488 | for (i = 0; i < max_irqs; i++) | 432 | if (slave && pmac_irq_cascade != NO_IRQ) |
489 | if (level_mask[i >> 5] & (1UL << (i & 0x1f))) | 433 | setup_irq(pmac_irq_cascade, &gatwick_cascade_action); |
490 | irq_desc[i].status = IRQ_LEVEL; | ||
491 | 434 | ||
492 | /* Setup handlers for secondary controller and hook cascade irq*/ | ||
493 | if (slave) { | ||
494 | for ( i = max_real_irqs ; i < max_irqs ; i++ ) | ||
495 | irq_desc[i].chip = &gatwick_pic; | ||
496 | setup_irq(irq_cascade, &gatwick_cascade_action); | ||
497 | } | ||
498 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); | 435 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); |
499 | #ifdef CONFIG_XMON | 436 | #ifdef CONFIG_XMON |
500 | setup_irq(20, &xmon_action); | 437 | setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action); |
501 | #endif | 438 | #endif |
502 | } | 439 | } |
503 | #endif /* CONFIG_PPC32 */ | 440 | #endif /* CONFIG_PPC32 */ |
504 | 441 | ||
505 | static int pmac_u3_cascade(struct pt_regs *regs, void *data) | 442 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc, |
443 | struct pt_regs *regs) | ||
506 | { | 444 | { |
507 | return mpic_get_one_irq((struct mpic *)data, regs); | 445 | struct mpic *mpic = desc->handler_data; |
446 | |||
447 | unsigned int cascade_irq = mpic_get_one_irq(mpic, regs); | ||
448 | if (cascade_irq != NO_IRQ) | ||
449 | generic_handle_irq(cascade_irq, regs); | ||
450 | desc->chip->eoi(irq); | ||
508 | } | 451 | } |
509 | 452 | ||
510 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | 453 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) |
@@ -514,21 +457,20 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | |||
514 | int nmi_irq; | 457 | int nmi_irq; |
515 | 458 | ||
516 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); | 459 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); |
517 | if (pswitch && pswitch->n_intrs) { | 460 | if (pswitch) { |
518 | nmi_irq = pswitch->intrs[0].line; | 461 | nmi_irq = irq_of_parse_and_map(pswitch, 0); |
519 | mpic_irq_set_priority(nmi_irq, 9); | 462 | if (nmi_irq != NO_IRQ) { |
520 | setup_irq(nmi_irq, &xmon_action); | 463 | mpic_irq_set_priority(nmi_irq, 9); |
464 | setup_irq(nmi_irq, &xmon_action); | ||
465 | } | ||
466 | of_node_put(pswitch); | ||
521 | } | 467 | } |
522 | of_node_put(pswitch); | ||
523 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ | 468 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ |
524 | } | 469 | } |
525 | 470 | ||
526 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | 471 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, |
527 | int master) | 472 | int master) |
528 | { | 473 | { |
529 | unsigned char senses[128]; | ||
530 | int offset = master ? 0 : 128; | ||
531 | int count = master ? 128 : 124; | ||
532 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; | 474 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; |
533 | struct resource r; | 475 | struct resource r; |
534 | struct mpic *mpic; | 476 | struct mpic *mpic; |
@@ -541,8 +483,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
541 | 483 | ||
542 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); | 484 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); |
543 | 485 | ||
544 | prom_get_irq_senses(senses, offset, offset + count); | ||
545 | |||
546 | flags |= MPIC_WANTS_RESET; | 486 | flags |= MPIC_WANTS_RESET; |
547 | if (get_property(np, "big-endian", NULL)) | 487 | if (get_property(np, "big-endian", NULL)) |
548 | flags |= MPIC_BIG_ENDIAN; | 488 | flags |= MPIC_BIG_ENDIAN; |
@@ -553,8 +493,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
553 | if (master && (flags & MPIC_BIG_ENDIAN)) | 493 | if (master && (flags & MPIC_BIG_ENDIAN)) |
554 | flags |= MPIC_BROKEN_U3; | 494 | flags |= MPIC_BROKEN_U3; |
555 | 495 | ||
556 | mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, | 496 | mpic = mpic_alloc(np, r.start, flags, 0, 0, name); |
557 | senses, count, name); | ||
558 | if (mpic == NULL) | 497 | if (mpic == NULL) |
559 | return NULL; | 498 | return NULL; |
560 | 499 | ||
@@ -567,6 +506,7 @@ static int __init pmac_pic_probe_mpic(void) | |||
567 | { | 506 | { |
568 | struct mpic *mpic1, *mpic2; | 507 | struct mpic *mpic1, *mpic2; |
569 | struct device_node *np, *master = NULL, *slave = NULL; | 508 | struct device_node *np, *master = NULL, *slave = NULL; |
509 | unsigned int cascade; | ||
570 | 510 | ||
571 | /* We can have up to 2 MPICs cascaded */ | 511 | /* We can have up to 2 MPICs cascaded */ |
572 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) | 512 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) |
@@ -603,8 +543,15 @@ static int __init pmac_pic_probe_mpic(void) | |||
603 | of_node_put(master); | 543 | of_node_put(master); |
604 | 544 | ||
605 | /* No slave, let's go out */ | 545 | /* No slave, let's go out */ |
606 | if (slave == NULL || slave->n_intrs < 1) | 546 | if (slave == NULL) |
547 | return 0; | ||
548 | |||
549 | /* Get/Map slave interrupt */ | ||
550 | cascade = irq_of_parse_and_map(slave, 0); | ||
551 | if (cascade == NO_IRQ) { | ||
552 | printk(KERN_ERR "Failed to map cascade IRQ\n"); | ||
607 | return 0; | 553 | return 0; |
554 | } | ||
608 | 555 | ||
609 | mpic2 = pmac_setup_one_mpic(slave, 0); | 556 | mpic2 = pmac_setup_one_mpic(slave, 0); |
610 | if (mpic2 == NULL) { | 557 | if (mpic2 == NULL) { |
@@ -612,7 +559,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
612 | of_node_put(slave); | 559 | of_node_put(slave); |
613 | return 0; | 560 | return 0; |
614 | } | 561 | } |
615 | mpic_setup_cascade(slave->intrs[0].line, pmac_u3_cascade, mpic2); | 562 | set_irq_data(cascade, mpic2); |
563 | set_irq_chained_handler(cascade, pmac_u3_cascade); | ||
616 | 564 | ||
617 | of_node_put(slave); | 565 | of_node_put(slave); |
618 | return 0; | 566 | return 0; |
@@ -621,6 +569,19 @@ static int __init pmac_pic_probe_mpic(void) | |||
621 | 569 | ||
622 | void __init pmac_pic_init(void) | 570 | void __init pmac_pic_init(void) |
623 | { | 571 | { |
572 | unsigned int flags = 0; | ||
573 | |||
574 | /* We configure the OF parsing based on our oldworld vs. newworld | ||
575 | * platform type and wether we were booted by BootX. | ||
576 | */ | ||
577 | #ifdef CONFIG_PPC32 | ||
578 | if (!pmac_newworld) | ||
579 | flags |= OF_IMAP_OLDWORLD_MAC; | ||
580 | if (get_property(of_chosen, "linux,bootx", NULL) != NULL) | ||
581 | flags |= OF_IMAP_NO_PHANDLE; | ||
582 | of_irq_map_init(flags); | ||
583 | #endif /* CONFIG_PPC_32 */ | ||
584 | |||
624 | /* We first try to detect Apple's new Core99 chipset, since mac-io | 585 | /* We first try to detect Apple's new Core99 chipset, since mac-io |
625 | * is quite different on those machines and contains an IBM MPIC2. | 586 | * is quite different on those machines and contains an IBM MPIC2. |
626 | */ | 587 | */ |
@@ -643,6 +604,7 @@ unsigned long sleep_save_mask[2]; | |||
643 | 604 | ||
644 | /* This used to be passed by the PMU driver but that link got | 605 | /* This used to be passed by the PMU driver but that link got |
645 | * broken with the new driver model. We use this tweak for now... | 606 | * broken with the new driver model. We use this tweak for now... |
607 | * We really want to do things differently though... | ||
646 | */ | 608 | */ |
647 | static int pmacpic_find_viaint(void) | 609 | static int pmacpic_find_viaint(void) |
648 | { | 610 | { |
@@ -656,7 +618,7 @@ static int pmacpic_find_viaint(void) | |||
656 | np = of_find_node_by_name(NULL, "via-pmu"); | 618 | np = of_find_node_by_name(NULL, "via-pmu"); |
657 | if (np == NULL) | 619 | if (np == NULL) |
658 | goto not_found; | 620 | goto not_found; |
659 | viaint = np->intrs[0].line; | 621 | viaint = irq_of_parse_and_map(np, 0);; |
660 | #endif /* CONFIG_ADB_PMU */ | 622 | #endif /* CONFIG_ADB_PMU */ |
661 | 623 | ||
662 | not_found: | 624 | not_found: |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 21c7b0f8f329..94e7b24b840b 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | struct rtc_time; | 13 | struct rtc_time; |
14 | 14 | ||
15 | extern int pmac_newworld; | ||
16 | |||
15 | extern long pmac_time_init(void); | 17 | extern long pmac_time_init(void); |
16 | extern unsigned long pmac_get_boot_time(void); | 18 | extern unsigned long pmac_get_boot_time(void); |
17 | extern void pmac_get_rtc_time(struct rtc_time *); | 19 | extern void pmac_get_rtc_time(struct rtc_time *); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 8654b5f07836..31a9da769fa2 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -613,9 +613,6 @@ static void __init pmac_init_early(void) | |||
613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); | 613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); |
614 | 614 | ||
615 | #ifdef CONFIG_PPC64 | 615 | #ifdef CONFIG_PPC64 |
616 | /* Setup interrupt mapping options */ | ||
617 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
618 | |||
619 | iommu_init_early_dart(); | 616 | iommu_init_early_dart(); |
620 | #endif | 617 | #endif |
621 | } | 618 | } |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 9639c66b453d..9df783088b61 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -72,32 +72,62 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id, | |||
72 | 72 | ||
73 | /* #define DEBUG */ | 73 | /* #define DEBUG */ |
74 | 74 | ||
75 | static void request_ras_irqs(struct device_node *np, char *propname, | 75 | |
76 | static void request_ras_irqs(struct device_node *np, | ||
76 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 77 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
77 | const char *name) | 78 | const char *name) |
78 | { | 79 | { |
79 | unsigned int *ireg, len, i; | 80 | int i, index, count = 0; |
80 | int virq, n_intr; | 81 | struct of_irq oirq; |
81 | 82 | u32 *opicprop; | |
82 | ireg = (unsigned int *)get_property(np, propname, &len); | 83 | unsigned int opicplen; |
83 | if (ireg == NULL) | 84 | unsigned int virqs[16]; |
84 | return; | 85 | |
85 | n_intr = prom_n_intr_cells(np); | 86 | /* Check for obsolete "open-pic-interrupt" property. If present, then |
86 | len /= n_intr * sizeof(*ireg); | 87 | * map those interrupts using the default interrupt host and default |
87 | 88 | * trigger | |
88 | for (i = 0; i < len; i++) { | 89 | */ |
89 | virq = virt_irq_create_mapping(*ireg); | 90 | opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); |
90 | if (virq == NO_IRQ) { | 91 | if (opicprop) { |
91 | printk(KERN_ERR "Unable to allocate interrupt " | 92 | opicplen /= sizeof(u32); |
92 | "number for %s\n", np->full_name); | 93 | for (i = 0; i < opicplen; i++) { |
93 | return; | 94 | if (count > 15) |
95 | break; | ||
96 | virqs[count] = irq_create_mapping(NULL, *(opicprop++), | ||
97 | IRQ_TYPE_NONE); | ||
98 | if (virqs[count] == NO_IRQ) | ||
99 | printk(KERN_ERR "Unable to allocate interrupt " | ||
100 | "number for %s\n", np->full_name); | ||
101 | else | ||
102 | count++; | ||
103 | |||
94 | } | 104 | } |
95 | if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { | 105 | } |
106 | /* Else use normal interrupt tree parsing */ | ||
107 | else { | ||
108 | /* First try to do a proper OF tree parsing */ | ||
109 | for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | ||
110 | index++) { | ||
111 | if (count > 15) | ||
112 | break; | ||
113 | virqs[count] = irq_create_of_mapping(oirq.controller, | ||
114 | oirq.specifier, | ||
115 | oirq.size); | ||
116 | if (virqs[count] == NO_IRQ) | ||
117 | printk(KERN_ERR "Unable to allocate interrupt " | ||
118 | "number for %s\n", np->full_name); | ||
119 | else | ||
120 | count++; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* Now request them */ | ||
125 | for (i = 0; i < count; i++) { | ||
126 | if (request_irq(virqs[i], handler, 0, name, NULL)) { | ||
96 | printk(KERN_ERR "Unable to request interrupt %d for " | 127 | printk(KERN_ERR "Unable to request interrupt %d for " |
97 | "%s\n", irq_offset_up(virq), np->full_name); | 128 | "%s\n", virqs[i], np->full_name); |
98 | return; | 129 | return; |
99 | } | 130 | } |
100 | ireg += n_intr; | ||
101 | } | 131 | } |
102 | } | 132 | } |
103 | 133 | ||
@@ -115,20 +145,14 @@ static int __init init_ras_IRQ(void) | |||
115 | /* Internal Errors */ | 145 | /* Internal Errors */ |
116 | np = of_find_node_by_path("/event-sources/internal-errors"); | 146 | np = of_find_node_by_path("/event-sources/internal-errors"); |
117 | if (np != NULL) { | 147 | if (np != NULL) { |
118 | request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt, | 148 | request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); |
119 | "RAS_ERROR"); | ||
120 | request_ras_irqs(np, "interrupts", ras_error_interrupt, | ||
121 | "RAS_ERROR"); | ||
122 | of_node_put(np); | 149 | of_node_put(np); |
123 | } | 150 | } |
124 | 151 | ||
125 | /* EPOW Events */ | 152 | /* EPOW Events */ |
126 | np = of_find_node_by_path("/event-sources/epow-events"); | 153 | np = of_find_node_by_path("/event-sources/epow-events"); |
127 | if (np != NULL) { | 154 | if (np != NULL) { |
128 | request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt, | 155 | request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); |
129 | "RAS_EPOW"); | ||
130 | request_ras_irqs(np, "interrupts", ras_epow_interrupt, | ||
131 | "RAS_EPOW"); | ||
132 | of_node_put(np); | 156 | of_node_put(np); |
133 | } | 157 | } |
134 | 158 | ||
@@ -162,7 +186,7 @@ ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
162 | 186 | ||
163 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 187 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
164 | RAS_VECTOR_OFFSET, | 188 | RAS_VECTOR_OFFSET, |
165 | virt_irq_to_real(irq_offset_down(irq)), | 189 | irq_map[irq].hwirq, |
166 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 190 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
167 | critical, __pa(&ras_log_buf), | 191 | critical, __pa(&ras_log_buf), |
168 | rtas_get_error_log_max()); | 192 | rtas_get_error_log_max()); |
@@ -198,7 +222,7 @@ ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
198 | 222 | ||
199 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 223 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
200 | RAS_VECTOR_OFFSET, | 224 | RAS_VECTOR_OFFSET, |
201 | virt_irq_to_real(irq_offset_down(irq)), | 225 | irq_map[irq].hwirq, |
202 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 226 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
203 | __pa(&ras_log_buf), | 227 | __pa(&ras_log_buf), |
204 | rtas_get_error_log_max()); | 228 | rtas_get_error_log_max()); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 999509d28af8..54a52437265c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -76,6 +76,9 @@ | |||
76 | #define DBG(fmt...) | 76 | #define DBG(fmt...) |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* move those away to a .h */ | ||
80 | extern void smp_init_pseries_mpic(void); | ||
81 | extern void smp_init_pseries_xics(void); | ||
79 | extern void find_udbg_vterm(void); | 82 | extern void find_udbg_vterm(void); |
80 | 83 | ||
81 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 84 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
@@ -83,7 +86,7 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ | |||
83 | static void pseries_shared_idle_sleep(void); | 86 | static void pseries_shared_idle_sleep(void); |
84 | static void pseries_dedicated_idle_sleep(void); | 87 | static void pseries_dedicated_idle_sleep(void); |
85 | 88 | ||
86 | struct mpic *pSeries_mpic; | 89 | static struct device_node *pSeries_mpic_node; |
87 | 90 | ||
88 | static void pSeries_show_cpuinfo(struct seq_file *m) | 91 | static void pSeries_show_cpuinfo(struct seq_file *m) |
89 | { | 92 | { |
@@ -118,63 +121,92 @@ static void __init fwnmi_init(void) | |||
118 | fwnmi_active = 1; | 121 | fwnmi_active = 1; |
119 | } | 122 | } |
120 | 123 | ||
121 | static void __init pSeries_init_mpic(void) | 124 | void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, |
125 | struct pt_regs *regs) | ||
122 | { | 126 | { |
123 | unsigned int *addrp; | 127 | unsigned int cascade_irq = i8259_irq(regs); |
124 | struct device_node *np; | 128 | if (cascade_irq != NO_IRQ) |
125 | unsigned long intack = 0; | 129 | generic_handle_irq(cascade_irq, regs); |
126 | 130 | desc->chip->eoi(irq); | |
127 | /* All ISUs are setup, complete initialization */ | ||
128 | mpic_init(pSeries_mpic); | ||
129 | |||
130 | /* Check what kind of cascade ACK we have */ | ||
131 | if (!(np = of_find_node_by_name(NULL, "pci")) | ||
132 | || !(addrp = (unsigned int *) | ||
133 | get_property(np, "8259-interrupt-acknowledge", NULL))) | ||
134 | printk(KERN_ERR "Cannot find pci to get ack address\n"); | ||
135 | else | ||
136 | intack = addrp[prom_n_addr_cells(np)-1]; | ||
137 | of_node_put(np); | ||
138 | |||
139 | /* Setup the legacy interrupts & controller */ | ||
140 | i8259_init(intack, 0); | ||
141 | |||
142 | /* Hook cascade to mpic */ | ||
143 | mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL); | ||
144 | } | 131 | } |
145 | 132 | ||
146 | static void __init pSeries_setup_mpic(void) | 133 | static void __init pseries_mpic_init_IRQ(void) |
147 | { | 134 | { |
135 | struct device_node *np, *old, *cascade = NULL; | ||
136 | unsigned int *addrp; | ||
137 | unsigned long intack = 0; | ||
148 | unsigned int *opprop; | 138 | unsigned int *opprop; |
149 | unsigned long openpic_addr = 0; | 139 | unsigned long openpic_addr = 0; |
150 | unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS]; | 140 | unsigned int cascade_irq; |
151 | struct device_node *root; | 141 | int naddr, n, i, opplen; |
152 | int irq_count; | 142 | struct mpic *mpic; |
153 | 143 | ||
154 | /* Find the Open PIC if present */ | 144 | np = of_find_node_by_path("/"); |
155 | root = of_find_node_by_path("/"); | 145 | naddr = prom_n_addr_cells(np); |
156 | opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL); | 146 | opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen); |
157 | if (opprop != 0) { | 147 | if (opprop != 0) { |
158 | int n = prom_n_addr_cells(root); | 148 | openpic_addr = of_read_number(opprop, naddr); |
159 | |||
160 | for (openpic_addr = 0; n > 0; --n) | ||
161 | openpic_addr = (openpic_addr << 32) + *opprop++; | ||
162 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); | 149 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); |
163 | } | 150 | } |
164 | of_node_put(root); | 151 | of_node_put(np); |
165 | 152 | ||
166 | BUG_ON(openpic_addr == 0); | 153 | BUG_ON(openpic_addr == 0); |
167 | 154 | ||
168 | /* Get the sense values from OF */ | ||
169 | prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS); | ||
170 | |||
171 | /* Setup the openpic driver */ | 155 | /* Setup the openpic driver */ |
172 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | 156 | mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, |
173 | pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY, | 157 | MPIC_PRIMARY, |
174 | 16, 16, irq_count, /* isu size, irq offset, irq count */ | 158 | 16, 250, /* isu size, irq count */ |
175 | NR_IRQS - 4, /* ipi offset */ | 159 | " MPIC "); |
176 | senses, irq_count, /* sense & sense size */ | 160 | BUG_ON(mpic == NULL); |
177 | " MPIC "); | 161 | |
162 | /* Add ISUs */ | ||
163 | opplen /= sizeof(u32); | ||
164 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
165 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
166 | mpic_assign_isu(mpic, n, isuaddr); | ||
167 | } | ||
168 | |||
169 | /* All ISUs are setup, complete initialization */ | ||
170 | mpic_init(mpic); | ||
171 | |||
172 | /* Look for cascade */ | ||
173 | for_each_node_by_type(np, "interrupt-controller") | ||
174 | if (device_is_compatible(np, "chrp,iic")) { | ||
175 | cascade = np; | ||
176 | break; | ||
177 | } | ||
178 | if (cascade == NULL) | ||
179 | return; | ||
180 | |||
181 | cascade_irq = irq_of_parse_and_map(cascade, 0); | ||
182 | if (cascade == NO_IRQ) { | ||
183 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* Check ACK type */ | ||
188 | for (old = of_node_get(cascade); old != NULL ; old = np) { | ||
189 | np = of_get_parent(old); | ||
190 | of_node_put(old); | ||
191 | if (np == NULL) | ||
192 | break; | ||
193 | if (strcmp(np->name, "pci") != 0) | ||
194 | continue; | ||
195 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", | ||
196 | NULL); | ||
197 | if (addrp == NULL) | ||
198 | continue; | ||
199 | naddr = prom_n_addr_cells(np); | ||
200 | intack = addrp[naddr-1]; | ||
201 | if (naddr > 1) | ||
202 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
203 | } | ||
204 | if (intack) | ||
205 | printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n", | ||
206 | intack); | ||
207 | i8259_init(cascade, intack); | ||
208 | of_node_put(cascade); | ||
209 | set_irq_chained_handler(cascade_irq, pseries_8259_cascade); | ||
178 | } | 210 | } |
179 | 211 | ||
180 | static void pseries_lpar_enable_pmcs(void) | 212 | static void pseries_lpar_enable_pmcs(void) |
@@ -192,23 +224,67 @@ static void pseries_lpar_enable_pmcs(void) | |||
192 | get_lppaca()->pmcregs_in_use = 1; | 224 | get_lppaca()->pmcregs_in_use = 1; |
193 | } | 225 | } |
194 | 226 | ||
195 | static void __init pSeries_setup_arch(void) | 227 | #ifdef CONFIG_KEXEC |
228 | static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) | ||
196 | { | 229 | { |
197 | /* Fixup ppc_md depending on the type of interrupt controller */ | 230 | mpic_teardown_this_cpu(secondary); |
198 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | 231 | } |
199 | ppc_md.init_IRQ = pSeries_init_mpic; | 232 | |
200 | ppc_md.get_irq = mpic_get_irq; | 233 | static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) |
201 | /* Allocate the mpic now, so that find_and_init_phbs() can | 234 | { |
202 | * fill the ISUs */ | 235 | /* Don't risk a hypervisor call if we're crashing */ |
203 | pSeries_setup_mpic(); | 236 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { |
204 | } else { | 237 | unsigned long vpa = __pa(get_lppaca()); |
205 | ppc_md.init_IRQ = xics_init_IRQ; | 238 | |
206 | ppc_md.get_irq = xics_get_irq; | 239 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { |
240 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
241 | "failed\n", smp_processor_id(), | ||
242 | hard_smp_processor_id()); | ||
243 | } | ||
207 | } | 244 | } |
245 | xics_teardown_cpu(secondary); | ||
246 | } | ||
247 | #endif /* CONFIG_KEXEC */ | ||
208 | 248 | ||
249 | static void __init pseries_discover_pic(void) | ||
250 | { | ||
251 | struct device_node *np; | ||
252 | char *typep; | ||
253 | |||
254 | for (np = NULL; (np = of_find_node_by_name(np, | ||
255 | "interrupt-controller"));) { | ||
256 | typep = (char *)get_property(np, "compatible", NULL); | ||
257 | if (strstr(typep, "open-pic")) { | ||
258 | pSeries_mpic_node = of_node_get(np); | ||
259 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | ||
260 | ppc_md.get_irq = mpic_get_irq; | ||
261 | #ifdef CONFIG_KEXEC | ||
262 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic; | ||
263 | #endif | ||
264 | #ifdef CONFIG_SMP | ||
265 | smp_init_pseries_mpic(); | ||
266 | #endif | ||
267 | return; | ||
268 | } else if (strstr(typep, "ppc-xicp")) { | ||
269 | ppc_md.init_IRQ = xics_init_IRQ; | ||
270 | #ifdef CONFIG_KEXEC | ||
271 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; | ||
272 | #endif | ||
209 | #ifdef CONFIG_SMP | 273 | #ifdef CONFIG_SMP |
210 | smp_init_pSeries(); | 274 | smp_init_pseries_xics(); |
211 | #endif | 275 | #endif |
276 | return; | ||
277 | } | ||
278 | } | ||
279 | printk(KERN_ERR "pSeries_discover_pic: failed to recognize" | ||
280 | " interrupt-controller\n"); | ||
281 | } | ||
282 | |||
283 | static void __init pSeries_setup_arch(void) | ||
284 | { | ||
285 | /* Discover PIC type and setup ppc_md accordingly */ | ||
286 | pseries_discover_pic(); | ||
287 | |||
212 | /* openpic global configuration register (64-bit format). */ | 288 | /* openpic global configuration register (64-bit format). */ |
213 | /* openpic Interrupt Source Unit pointer (64-bit format). */ | 289 | /* openpic Interrupt Source Unit pointer (64-bit format). */ |
214 | /* python0 facility area (mmio) (64-bit format) REAL address. */ | 290 | /* python0 facility area (mmio) (64-bit format) REAL address. */ |
@@ -260,41 +336,11 @@ static int __init pSeries_init_panel(void) | |||
260 | } | 336 | } |
261 | arch_initcall(pSeries_init_panel); | 337 | arch_initcall(pSeries_init_panel); |
262 | 338 | ||
263 | static void __init pSeries_discover_pic(void) | ||
264 | { | ||
265 | struct device_node *np; | ||
266 | char *typep; | ||
267 | |||
268 | /* | ||
269 | * Setup interrupt mapping options that are needed for finish_device_tree | ||
270 | * to properly parse the OF interrupt tree & do the virtual irq mapping | ||
271 | */ | ||
272 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
273 | ppc64_interrupt_controller = IC_INVALID; | ||
274 | for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { | ||
275 | typep = (char *)get_property(np, "compatible", NULL); | ||
276 | if (strstr(typep, "open-pic")) { | ||
277 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
278 | break; | ||
279 | } else if (strstr(typep, "ppc-xicp")) { | ||
280 | ppc64_interrupt_controller = IC_PPC_XIC; | ||
281 | break; | ||
282 | } | ||
283 | } | ||
284 | if (ppc64_interrupt_controller == IC_INVALID) | ||
285 | printk("pSeries_discover_pic: failed to recognize" | ||
286 | " interrupt-controller\n"); | ||
287 | |||
288 | } | ||
289 | |||
290 | static void pSeries_mach_cpu_die(void) | 339 | static void pSeries_mach_cpu_die(void) |
291 | { | 340 | { |
292 | local_irq_disable(); | 341 | local_irq_disable(); |
293 | idle_task_exit(); | 342 | idle_task_exit(); |
294 | /* Some hardware requires clearing the CPPR, while other hardware does not | 343 | xics_teardown_cpu(0); |
295 | * it is safe either way | ||
296 | */ | ||
297 | pSeriesLP_cppr_info(0, 0); | ||
298 | rtas_stop_self(); | 344 | rtas_stop_self(); |
299 | /* Should never get here... */ | 345 | /* Should never get here... */ |
300 | BUG(); | 346 | BUG(); |
@@ -332,8 +378,6 @@ static void __init pSeries_init_early(void) | |||
332 | 378 | ||
333 | iommu_init_early_pSeries(); | 379 | iommu_init_early_pSeries(); |
334 | 380 | ||
335 | pSeries_discover_pic(); | ||
336 | |||
337 | DBG(" <- pSeries_init_early()\n"); | 381 | DBG(" <- pSeries_init_early()\n"); |
338 | } | 382 | } |
339 | 383 | ||
@@ -505,27 +549,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus) | |||
505 | return PCI_PROBE_NORMAL; | 549 | return PCI_PROBE_NORMAL; |
506 | } | 550 | } |
507 | 551 | ||
508 | #ifdef CONFIG_KEXEC | ||
509 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | ||
510 | { | ||
511 | /* Don't risk a hypervisor call if we're crashing */ | ||
512 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | ||
513 | unsigned long vpa = __pa(get_lppaca()); | ||
514 | |||
515 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { | ||
516 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
517 | "failed\n", smp_processor_id(), | ||
518 | hard_smp_processor_id()); | ||
519 | } | ||
520 | } | ||
521 | |||
522 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | ||
523 | mpic_teardown_this_cpu(secondary); | ||
524 | else | ||
525 | xics_teardown_cpu(secondary); | ||
526 | } | ||
527 | #endif | ||
528 | |||
529 | define_machine(pseries) { | 552 | define_machine(pseries) { |
530 | .name = "pSeries", | 553 | .name = "pSeries", |
531 | .probe = pSeries_probe, | 554 | .probe = pSeries_probe, |
@@ -550,7 +573,6 @@ define_machine(pseries) { | |||
550 | .system_reset_exception = pSeries_system_reset_exception, | 573 | .system_reset_exception = pSeries_system_reset_exception, |
551 | .machine_check_exception = pSeries_machine_check_exception, | 574 | .machine_check_exception = pSeries_machine_check_exception, |
552 | #ifdef CONFIG_KEXEC | 575 | #ifdef CONFIG_KEXEC |
553 | .kexec_cpu_down = pseries_kexec_cpu_down, | ||
554 | .machine_kexec = default_machine_kexec, | 576 | .machine_kexec = default_machine_kexec, |
555 | .machine_kexec_prepare = default_machine_kexec_prepare, | 577 | .machine_kexec_prepare = default_machine_kexec_prepare, |
556 | .machine_crash_shutdown = default_machine_crash_shutdown, | 578 | .machine_crash_shutdown = default_machine_crash_shutdown, |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4ad144df49c2..ac61098ff401 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -416,27 +416,12 @@ static struct smp_ops_t pSeries_xics_smp_ops = { | |||
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | /* This is called very early */ | 418 | /* This is called very early */ |
419 | void __init smp_init_pSeries(void) | 419 | static void __init smp_init_pseries(void) |
420 | { | 420 | { |
421 | int i; | 421 | int i; |
422 | 422 | ||
423 | DBG(" -> smp_init_pSeries()\n"); | 423 | DBG(" -> smp_init_pSeries()\n"); |
424 | 424 | ||
425 | switch (ppc64_interrupt_controller) { | ||
426 | #ifdef CONFIG_MPIC | ||
427 | case IC_OPEN_PIC: | ||
428 | smp_ops = &pSeries_mpic_smp_ops; | ||
429 | break; | ||
430 | #endif | ||
431 | #ifdef CONFIG_XICS | ||
432 | case IC_PPC_XIC: | ||
433 | smp_ops = &pSeries_xics_smp_ops; | ||
434 | break; | ||
435 | #endif | ||
436 | default: | ||
437 | panic("Invalid interrupt controller"); | ||
438 | } | ||
439 | |||
440 | #ifdef CONFIG_HOTPLUG_CPU | 425 | #ifdef CONFIG_HOTPLUG_CPU |
441 | smp_ops->cpu_disable = pSeries_cpu_disable; | 426 | smp_ops->cpu_disable = pSeries_cpu_disable; |
442 | smp_ops->cpu_die = pSeries_cpu_die; | 427 | smp_ops->cpu_die = pSeries_cpu_die; |
@@ -471,3 +456,18 @@ void __init smp_init_pSeries(void) | |||
471 | DBG(" <- smp_init_pSeries()\n"); | 456 | DBG(" <- smp_init_pSeries()\n"); |
472 | } | 457 | } |
473 | 458 | ||
459 | #ifdef CONFIG_MPIC | ||
460 | void __init smp_init_pseries_mpic(void) | ||
461 | { | ||
462 | smp_ops = &pSeries_mpic_smp_ops; | ||
463 | |||
464 | smp_init_pseries(); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | void __init smp_init_pseries_xics(void) | ||
469 | { | ||
470 | smp_ops = &pSeries_xics_smp_ops; | ||
471 | |||
472 | smp_init_pseries(); | ||
473 | } | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 2ffebe31cb2d..716972aa9777 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | |||
12 | #undef DEBUG | ||
13 | |||
11 | #include <linux/types.h> | 14 | #include <linux/types.h> |
12 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
13 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -19,6 +22,7 @@ | |||
19 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
20 | #include <linux/radix-tree.h> | 23 | #include <linux/radix-tree.h> |
21 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | |||
22 | #include <asm/firmware.h> | 26 | #include <asm/firmware.h> |
23 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
24 | #include <asm/io.h> | 28 | #include <asm/io.h> |
@@ -31,26 +35,6 @@ | |||
31 | 35 | ||
32 | #include "xics.h" | 36 | #include "xics.h" |
33 | 37 | ||
34 | static unsigned int xics_startup(unsigned int irq); | ||
35 | static void xics_enable_irq(unsigned int irq); | ||
36 | static void xics_disable_irq(unsigned int irq); | ||
37 | static void xics_mask_and_ack_irq(unsigned int irq); | ||
38 | static void xics_end_irq(unsigned int irq); | ||
39 | static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask); | ||
40 | |||
41 | static struct hw_interrupt_type xics_pic = { | ||
42 | .typename = " XICS ", | ||
43 | .startup = xics_startup, | ||
44 | .enable = xics_enable_irq, | ||
45 | .disable = xics_disable_irq, | ||
46 | .ack = xics_mask_and_ack_irq, | ||
47 | .end = xics_end_irq, | ||
48 | .set_affinity = xics_set_affinity | ||
49 | }; | ||
50 | |||
51 | /* This is used to map real irq numbers to virtual */ | ||
52 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); | ||
53 | |||
54 | #define XICS_IPI 2 | 38 | #define XICS_IPI 2 |
55 | #define XICS_IRQ_SPURIOUS 0 | 39 | #define XICS_IRQ_SPURIOUS 0 |
56 | 40 | ||
@@ -81,12 +65,12 @@ struct xics_ipl { | |||
81 | 65 | ||
82 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | 66 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; |
83 | 67 | ||
84 | static int xics_irq_8259_cascade = 0; | ||
85 | static int xics_irq_8259_cascade_real = 0; | ||
86 | static unsigned int default_server = 0xFF; | 68 | static unsigned int default_server = 0xFF; |
87 | static unsigned int default_distrib_server = 0; | 69 | static unsigned int default_distrib_server = 0; |
88 | static unsigned int interrupt_server_size = 8; | 70 | static unsigned int interrupt_server_size = 8; |
89 | 71 | ||
72 | static struct irq_host *xics_host; | ||
73 | |||
90 | /* | 74 | /* |
91 | * XICS only has a single IPI, so encode the messages per CPU | 75 | * XICS only has a single IPI, so encode the messages per CPU |
92 | */ | 76 | */ |
@@ -98,48 +82,34 @@ static int ibm_set_xive; | |||
98 | static int ibm_int_on; | 82 | static int ibm_int_on; |
99 | static int ibm_int_off; | 83 | static int ibm_int_off; |
100 | 84 | ||
101 | typedef struct { | ||
102 | int (*xirr_info_get)(int cpu); | ||
103 | void (*xirr_info_set)(int cpu, int val); | ||
104 | void (*cppr_info)(int cpu, u8 val); | ||
105 | void (*qirr_info)(int cpu, u8 val); | ||
106 | } xics_ops; | ||
107 | 85 | ||
86 | /* Direct HW low level accessors */ | ||
108 | 87 | ||
109 | /* SMP */ | ||
110 | 88 | ||
111 | static int pSeries_xirr_info_get(int n_cpu) | 89 | static inline unsigned int direct_xirr_info_get(int n_cpu) |
112 | { | 90 | { |
113 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 91 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); |
114 | } | 92 | } |
115 | 93 | ||
116 | static void pSeries_xirr_info_set(int n_cpu, int value) | 94 | static inline void direct_xirr_info_set(int n_cpu, int value) |
117 | { | 95 | { |
118 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); | 96 | out_be32(&xics_per_cpu[n_cpu]->xirr.word, value); |
119 | } | 97 | } |
120 | 98 | ||
121 | static void pSeries_cppr_info(int n_cpu, u8 value) | 99 | static inline void direct_cppr_info(int n_cpu, u8 value) |
122 | { | 100 | { |
123 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); | 101 | out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value); |
124 | } | 102 | } |
125 | 103 | ||
126 | static void pSeries_qirr_info(int n_cpu, u8 value) | 104 | static inline void direct_qirr_info(int n_cpu, u8 value) |
127 | { | 105 | { |
128 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | 106 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); |
129 | } | 107 | } |
130 | 108 | ||
131 | static xics_ops pSeries_ops = { | ||
132 | pSeries_xirr_info_get, | ||
133 | pSeries_xirr_info_set, | ||
134 | pSeries_cppr_info, | ||
135 | pSeries_qirr_info | ||
136 | }; | ||
137 | 109 | ||
138 | static xics_ops *ops = &pSeries_ops; | 110 | /* LPAR low level accessors */ |
139 | 111 | ||
140 | 112 | ||
141 | /* LPAR */ | ||
142 | |||
143 | static inline long plpar_eoi(unsigned long xirr) | 113 | static inline long plpar_eoi(unsigned long xirr) |
144 | { | 114 | { |
145 | return plpar_hcall_norets(H_EOI, xirr); | 115 | return plpar_hcall_norets(H_EOI, xirr); |
@@ -161,7 +131,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret) | |||
161 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); | 131 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); |
162 | } | 132 | } |
163 | 133 | ||
164 | static int pSeriesLP_xirr_info_get(int n_cpu) | 134 | static inline unsigned int lpar_xirr_info_get(int n_cpu) |
165 | { | 135 | { |
166 | unsigned long lpar_rc; | 136 | unsigned long lpar_rc; |
167 | unsigned long return_value; | 137 | unsigned long return_value; |
@@ -169,10 +139,10 @@ static int pSeriesLP_xirr_info_get(int n_cpu) | |||
169 | lpar_rc = plpar_xirr(&return_value); | 139 | lpar_rc = plpar_xirr(&return_value); |
170 | if (lpar_rc != H_SUCCESS) | 140 | if (lpar_rc != H_SUCCESS) |
171 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); | 141 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); |
172 | return (int)return_value; | 142 | return (unsigned int)return_value; |
173 | } | 143 | } |
174 | 144 | ||
175 | static void pSeriesLP_xirr_info_set(int n_cpu, int value) | 145 | static inline void lpar_xirr_info_set(int n_cpu, int value) |
176 | { | 146 | { |
177 | unsigned long lpar_rc; | 147 | unsigned long lpar_rc; |
178 | unsigned long val64 = value & 0xffffffff; | 148 | unsigned long val64 = value & 0xffffffff; |
@@ -183,7 +153,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value) | |||
183 | val64); | 153 | val64); |
184 | } | 154 | } |
185 | 155 | ||
186 | void pSeriesLP_cppr_info(int n_cpu, u8 value) | 156 | static inline void lpar_cppr_info(int n_cpu, u8 value) |
187 | { | 157 | { |
188 | unsigned long lpar_rc; | 158 | unsigned long lpar_rc; |
189 | 159 | ||
@@ -192,7 +162,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value) | |||
192 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | 162 | panic("bad return code cppr - rc = %lx\n", lpar_rc); |
193 | } | 163 | } |
194 | 164 | ||
195 | static void pSeriesLP_qirr_info(int n_cpu , u8 value) | 165 | static inline void lpar_qirr_info(int n_cpu , u8 value) |
196 | { | 166 | { |
197 | unsigned long lpar_rc; | 167 | unsigned long lpar_rc; |
198 | 168 | ||
@@ -201,43 +171,16 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value) | |||
201 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | 171 | panic("bad return code qirr - rc = %lx\n", lpar_rc); |
202 | } | 172 | } |
203 | 173 | ||
204 | xics_ops pSeriesLP_ops = { | ||
205 | pSeriesLP_xirr_info_get, | ||
206 | pSeriesLP_xirr_info_set, | ||
207 | pSeriesLP_cppr_info, | ||
208 | pSeriesLP_qirr_info | ||
209 | }; | ||
210 | |||
211 | static unsigned int xics_startup(unsigned int virq) | ||
212 | { | ||
213 | unsigned int irq; | ||
214 | |||
215 | irq = irq_offset_down(virq); | ||
216 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | ||
217 | &virt_irq_to_real_map[irq]) == -ENOMEM) | ||
218 | printk(KERN_CRIT "Out of memory creating real -> virtual" | ||
219 | " IRQ mapping for irq %u (real 0x%x)\n", | ||
220 | virq, virt_irq_to_real(irq)); | ||
221 | xics_enable_irq(virq); | ||
222 | return 0; /* return value is ignored */ | ||
223 | } | ||
224 | 174 | ||
225 | static unsigned int real_irq_to_virt(unsigned int real_irq) | 175 | /* High level handlers and init code */ |
226 | { | ||
227 | unsigned int *ptr; | ||
228 | 176 | ||
229 | ptr = radix_tree_lookup(&irq_map, real_irq); | ||
230 | if (ptr == NULL) | ||
231 | return NO_IRQ; | ||
232 | return ptr - virt_irq_to_real_map; | ||
233 | } | ||
234 | 177 | ||
235 | #ifdef CONFIG_SMP | 178 | #ifdef CONFIG_SMP |
236 | static int get_irq_server(unsigned int irq) | 179 | static int get_irq_server(unsigned int virq) |
237 | { | 180 | { |
238 | unsigned int server; | 181 | unsigned int server; |
239 | /* For the moment only implement delivery to all cpus or one cpu */ | 182 | /* For the moment only implement delivery to all cpus or one cpu */ |
240 | cpumask_t cpumask = irq_desc[irq].affinity; | 183 | cpumask_t cpumask = irq_desc[virq].affinity; |
241 | cpumask_t tmp = CPU_MASK_NONE; | 184 | cpumask_t tmp = CPU_MASK_NONE; |
242 | 185 | ||
243 | if (!distribute_irqs) | 186 | if (!distribute_irqs) |
@@ -258,23 +201,28 @@ static int get_irq_server(unsigned int irq) | |||
258 | 201 | ||
259 | } | 202 | } |
260 | #else | 203 | #else |
261 | static int get_irq_server(unsigned int irq) | 204 | static int get_irq_server(unsigned int virq) |
262 | { | 205 | { |
263 | return default_server; | 206 | return default_server; |
264 | } | 207 | } |
265 | #endif | 208 | #endif |
266 | 209 | ||
267 | static void xics_enable_irq(unsigned int virq) | 210 | |
211 | static void xics_unmask_irq(unsigned int virq) | ||
268 | { | 212 | { |
269 | unsigned int irq; | 213 | unsigned int irq; |
270 | int call_status; | 214 | int call_status; |
271 | unsigned int server; | 215 | unsigned int server; |
272 | 216 | ||
273 | irq = virt_irq_to_real(irq_offset_down(virq)); | 217 | pr_debug("xics: unmask virq %d\n", virq); |
274 | if (irq == XICS_IPI) | 218 | |
219 | irq = (unsigned int)irq_map[virq].hwirq; | ||
220 | pr_debug(" -> map to hwirq 0x%x\n", irq); | ||
221 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
275 | return; | 222 | return; |
276 | 223 | ||
277 | server = get_irq_server(virq); | 224 | server = get_irq_server(virq); |
225 | |||
278 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 226 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
279 | DEFAULT_PRIORITY); | 227 | DEFAULT_PRIORITY); |
280 | if (call_status != 0) { | 228 | if (call_status != 0) { |
@@ -293,7 +241,7 @@ static void xics_enable_irq(unsigned int virq) | |||
293 | } | 241 | } |
294 | } | 242 | } |
295 | 243 | ||
296 | static void xics_disable_real_irq(unsigned int irq) | 244 | static void xics_mask_real_irq(unsigned int irq) |
297 | { | 245 | { |
298 | int call_status; | 246 | int call_status; |
299 | unsigned int server; | 247 | unsigned int server; |
@@ -318,75 +266,86 @@ static void xics_disable_real_irq(unsigned int irq) | |||
318 | } | 266 | } |
319 | } | 267 | } |
320 | 268 | ||
321 | static void xics_disable_irq(unsigned int virq) | 269 | static void xics_mask_irq(unsigned int virq) |
322 | { | 270 | { |
323 | unsigned int irq; | 271 | unsigned int irq; |
324 | 272 | ||
325 | irq = virt_irq_to_real(irq_offset_down(virq)); | 273 | pr_debug("xics: mask virq %d\n", virq); |
326 | xics_disable_real_irq(irq); | 274 | |
275 | irq = (unsigned int)irq_map[virq].hwirq; | ||
276 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
277 | return; | ||
278 | xics_mask_real_irq(irq); | ||
279 | } | ||
280 | |||
281 | static unsigned int xics_startup(unsigned int virq) | ||
282 | { | ||
283 | unsigned int irq; | ||
284 | |||
285 | /* force a reverse mapping of the interrupt so it gets in the cache */ | ||
286 | irq = (unsigned int)irq_map[virq].hwirq; | ||
287 | irq_radix_revmap(xics_host, irq); | ||
288 | |||
289 | /* unmask it */ | ||
290 | xics_unmask_irq(virq); | ||
291 | return 0; | ||
327 | } | 292 | } |
328 | 293 | ||
329 | static void xics_end_irq(unsigned int irq) | 294 | static void xics_eoi_direct(unsigned int virq) |
330 | { | 295 | { |
331 | int cpu = smp_processor_id(); | 296 | int cpu = smp_processor_id(); |
297 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
332 | 298 | ||
333 | iosync(); | 299 | iosync(); |
334 | ops->xirr_info_set(cpu, ((0xff << 24) | | 300 | direct_xirr_info_set(cpu, (0xff << 24) | irq); |
335 | (virt_irq_to_real(irq_offset_down(irq))))); | ||
336 | |||
337 | } | 301 | } |
338 | 302 | ||
339 | static void xics_mask_and_ack_irq(unsigned int irq) | 303 | |
304 | static void xics_eoi_lpar(unsigned int virq) | ||
340 | { | 305 | { |
341 | int cpu = smp_processor_id(); | 306 | int cpu = smp_processor_id(); |
307 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
342 | 308 | ||
343 | if (irq < irq_offset_value()) { | 309 | iosync(); |
344 | i8259_pic.ack(irq); | 310 | lpar_xirr_info_set(cpu, (0xff << 24) | irq); |
345 | iosync(); | ||
346 | ops->xirr_info_set(cpu, ((0xff<<24) | | ||
347 | xics_irq_8259_cascade_real)); | ||
348 | iosync(); | ||
349 | } | ||
350 | } | 311 | } |
351 | 312 | ||
352 | int xics_get_irq(struct pt_regs *regs) | 313 | static inline unsigned int xics_remap_irq(unsigned int vec) |
353 | { | 314 | { |
354 | unsigned int cpu = smp_processor_id(); | 315 | unsigned int irq; |
355 | unsigned int vec; | ||
356 | int irq; | ||
357 | 316 | ||
358 | vec = ops->xirr_info_get(cpu); | ||
359 | /* (vec >> 24) == old priority */ | ||
360 | vec &= 0x00ffffff; | 317 | vec &= 0x00ffffff; |
361 | 318 | ||
362 | /* for sanity, this had better be < NR_IRQS - 16 */ | 319 | if (vec == XICS_IRQ_SPURIOUS) |
363 | if (vec == xics_irq_8259_cascade_real) { | 320 | return NO_IRQ; |
364 | irq = i8259_irq(regs); | 321 | irq = irq_radix_revmap(xics_host, vec); |
365 | xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); | 322 | if (likely(irq != NO_IRQ)) |
366 | } else if (vec == XICS_IRQ_SPURIOUS) { | 323 | return irq; |
367 | irq = -1; | 324 | |
368 | } else { | 325 | printk(KERN_ERR "Interrupt %u (real) is invalid," |
369 | irq = real_irq_to_virt(vec); | 326 | " disabling it.\n", vec); |
370 | if (irq == NO_IRQ) | 327 | xics_mask_real_irq(vec); |
371 | irq = real_irq_to_virt_slowpath(vec); | 328 | return NO_IRQ; |
372 | if (irq == NO_IRQ) { | ||
373 | printk(KERN_ERR "Interrupt %u (real) is invalid," | ||
374 | " disabling it.\n", vec); | ||
375 | xics_disable_real_irq(vec); | ||
376 | } else | ||
377 | irq = irq_offset_up(irq); | ||
378 | } | ||
379 | return irq; | ||
380 | } | 329 | } |
381 | 330 | ||
382 | #ifdef CONFIG_SMP | 331 | static unsigned int xics_get_irq_direct(struct pt_regs *regs) |
332 | { | ||
333 | unsigned int cpu = smp_processor_id(); | ||
383 | 334 | ||
384 | static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 335 | return xics_remap_irq(direct_xirr_info_get(cpu)); |
336 | } | ||
337 | |||
338 | static unsigned int xics_get_irq_lpar(struct pt_regs *regs) | ||
385 | { | 339 | { |
386 | int cpu = smp_processor_id(); | 340 | unsigned int cpu = smp_processor_id(); |
341 | |||
342 | return xics_remap_irq(lpar_xirr_info_get(cpu)); | ||
343 | } | ||
387 | 344 | ||
388 | ops->qirr_info(cpu, 0xff); | 345 | #ifdef CONFIG_SMP |
389 | 346 | ||
347 | static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs) | ||
348 | { | ||
390 | WARN_ON(cpu_is_offline(cpu)); | 349 | WARN_ON(cpu_is_offline(cpu)); |
391 | 350 | ||
392 | while (xics_ipi_message[cpu].value) { | 351 | while (xics_ipi_message[cpu].value) { |
@@ -418,18 +377,88 @@ static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | |||
418 | return IRQ_HANDLED; | 377 | return IRQ_HANDLED; |
419 | } | 378 | } |
420 | 379 | ||
380 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs) | ||
381 | { | ||
382 | int cpu = smp_processor_id(); | ||
383 | |||
384 | direct_qirr_info(cpu, 0xff); | ||
385 | |||
386 | return xics_ipi_dispatch(cpu, regs); | ||
387 | } | ||
388 | |||
389 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs) | ||
390 | { | ||
391 | int cpu = smp_processor_id(); | ||
392 | |||
393 | lpar_qirr_info(cpu, 0xff); | ||
394 | |||
395 | return xics_ipi_dispatch(cpu, regs); | ||
396 | } | ||
397 | |||
421 | void xics_cause_IPI(int cpu) | 398 | void xics_cause_IPI(int cpu) |
422 | { | 399 | { |
423 | ops->qirr_info(cpu, IPI_PRIORITY); | 400 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
401 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
402 | else | ||
403 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
424 | } | 404 | } |
405 | |||
425 | #endif /* CONFIG_SMP */ | 406 | #endif /* CONFIG_SMP */ |
426 | 407 | ||
408 | static void xics_set_cpu_priority(int cpu, unsigned char cppr) | ||
409 | { | ||
410 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
411 | lpar_cppr_info(cpu, cppr); | ||
412 | else | ||
413 | direct_cppr_info(cpu, cppr); | ||
414 | iosync(); | ||
415 | } | ||
416 | |||
417 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | ||
418 | { | ||
419 | unsigned int irq; | ||
420 | int status; | ||
421 | int xics_status[2]; | ||
422 | unsigned long newmask; | ||
423 | cpumask_t tmp = CPU_MASK_NONE; | ||
424 | |||
425 | irq = (unsigned int)irq_map[virq].hwirq; | ||
426 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
427 | return; | ||
428 | |||
429 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
430 | |||
431 | if (status) { | ||
432 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " | ||
433 | "returns %d\n", irq, status); | ||
434 | return; | ||
435 | } | ||
436 | |||
437 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
438 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | ||
439 | newmask = default_distrib_server; | ||
440 | } else { | ||
441 | cpus_and(tmp, cpu_online_map, cpumask); | ||
442 | if (cpus_empty(tmp)) | ||
443 | return; | ||
444 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); | ||
445 | } | ||
446 | |||
447 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
448 | irq, newmask, xics_status[1]); | ||
449 | |||
450 | if (status) { | ||
451 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " | ||
452 | "returns %d\n", irq, status); | ||
453 | return; | ||
454 | } | ||
455 | } | ||
456 | |||
427 | void xics_setup_cpu(void) | 457 | void xics_setup_cpu(void) |
428 | { | 458 | { |
429 | int cpu = smp_processor_id(); | 459 | int cpu = smp_processor_id(); |
430 | 460 | ||
431 | ops->cppr_info(cpu, 0xff); | 461 | xics_set_cpu_priority(cpu, 0xff); |
432 | iosync(); | ||
433 | 462 | ||
434 | /* | 463 | /* |
435 | * Put the calling processor into the GIQ. This is really only | 464 | * Put the calling processor into the GIQ. This is really only |
@@ -442,72 +471,266 @@ void xics_setup_cpu(void) | |||
442 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | 471 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); |
443 | } | 472 | } |
444 | 473 | ||
445 | void xics_init_IRQ(void) | 474 | |
475 | static struct irq_chip xics_pic_direct = { | ||
476 | .typename = " XICS ", | ||
477 | .startup = xics_startup, | ||
478 | .mask = xics_mask_irq, | ||
479 | .unmask = xics_unmask_irq, | ||
480 | .eoi = xics_eoi_direct, | ||
481 | .set_affinity = xics_set_affinity | ||
482 | }; | ||
483 | |||
484 | |||
485 | static struct irq_chip xics_pic_lpar = { | ||
486 | .typename = " XICS ", | ||
487 | .startup = xics_startup, | ||
488 | .mask = xics_mask_irq, | ||
489 | .unmask = xics_unmask_irq, | ||
490 | .eoi = xics_eoi_lpar, | ||
491 | .set_affinity = xics_set_affinity | ||
492 | }; | ||
493 | |||
494 | |||
495 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
496 | { | ||
497 | /* IBM machines have interrupt parents of various funky types for things | ||
498 | * like vdevices, events, etc... The trick we use here is to match | ||
499 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
500 | */ | ||
501 | return !device_is_compatible(node, "chrp,iic"); | ||
502 | } | ||
503 | |||
504 | static int xics_host_map_direct(struct irq_host *h, unsigned int virq, | ||
505 | irq_hw_number_t hw, unsigned int flags) | ||
506 | { | ||
507 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
508 | |||
509 | pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n", | ||
510 | virq, hw, flags); | ||
511 | |||
512 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) | ||
513 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" | ||
514 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); | ||
515 | |||
516 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
517 | set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int xics_host_map_lpar(struct irq_host *h, unsigned int virq, | ||
522 | irq_hw_number_t hw, unsigned int flags) | ||
523 | { | ||
524 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
525 | |||
526 | pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n", | ||
527 | virq, hw, flags); | ||
528 | |||
529 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) | ||
530 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" | ||
531 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); | ||
532 | |||
533 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
534 | set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
539 | u32 *intspec, unsigned int intsize, | ||
540 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
541 | |||
542 | { | ||
543 | /* Current xics implementation translates everything | ||
544 | * to level. It is not technically right for MSIs but this | ||
545 | * is irrelevant at this point. We might get smarter in the future | ||
546 | */ | ||
547 | *out_hwirq = intspec[0]; | ||
548 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static struct irq_host_ops xics_host_direct_ops = { | ||
554 | .match = xics_host_match, | ||
555 | .map = xics_host_map_direct, | ||
556 | .xlate = xics_host_xlate, | ||
557 | }; | ||
558 | |||
559 | static struct irq_host_ops xics_host_lpar_ops = { | ||
560 | .match = xics_host_match, | ||
561 | .map = xics_host_map_lpar, | ||
562 | .xlate = xics_host_xlate, | ||
563 | }; | ||
564 | |||
565 | static void __init xics_init_host(void) | ||
566 | { | ||
567 | struct irq_host_ops *ops; | ||
568 | |||
569 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
570 | ops = &xics_host_lpar_ops; | ||
571 | else | ||
572 | ops = &xics_host_direct_ops; | ||
573 | xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops, | ||
574 | XICS_IRQ_SPURIOUS); | ||
575 | BUG_ON(xics_host == NULL); | ||
576 | irq_set_default_host(xics_host); | ||
577 | } | ||
578 | |||
579 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, | ||
580 | unsigned long size) | ||
446 | { | 581 | { |
582 | #ifdef CONFIG_SMP | ||
447 | int i; | 583 | int i; |
448 | unsigned long intr_size = 0; | ||
449 | struct device_node *np; | ||
450 | uint *ireg, ilen, indx = 0; | ||
451 | unsigned long intr_base = 0; | ||
452 | struct xics_interrupt_node { | ||
453 | unsigned long addr; | ||
454 | unsigned long size; | ||
455 | } intnodes[NR_CPUS]; | ||
456 | 584 | ||
457 | ppc64_boot_msg(0x20, "XICS Init"); | 585 | /* This may look gross but it's good enough for now, we don't quite |
586 | * have a hard -> linux processor id matching. | ||
587 | */ | ||
588 | for_each_possible_cpu(i) { | ||
589 | if (!cpu_present(i)) | ||
590 | continue; | ||
591 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
592 | xics_per_cpu[i] = ioremap(addr, size); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | #else | ||
597 | if (hw_id != 0) | ||
598 | return; | ||
599 | xics_per_cpu[0] = ioremap(addr, size); | ||
600 | #endif /* CONFIG_SMP */ | ||
601 | } | ||
458 | 602 | ||
459 | ibm_get_xive = rtas_token("ibm,get-xive"); | 603 | static void __init xics_init_one_node(struct device_node *np, |
460 | ibm_set_xive = rtas_token("ibm,set-xive"); | 604 | unsigned int *indx) |
461 | ibm_int_on = rtas_token("ibm,int-on"); | 605 | { |
462 | ibm_int_off = rtas_token("ibm,int-off"); | 606 | unsigned int ilen; |
607 | u32 *ireg; | ||
463 | 608 | ||
464 | np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); | 609 | /* This code does the theorically broken assumption that the interrupt |
465 | if (!np) | 610 | * server numbers are the same as the hard CPU numbers. |
466 | panic("xics_init_IRQ: can't find interrupt presentation"); | 611 | * This happens to be the case so far but we are playing with fire... |
612 | * should be fixed one of these days. -BenH. | ||
613 | */ | ||
614 | ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
467 | 615 | ||
468 | nextnode: | 616 | /* Do that ever happen ? we'll know soon enough... but even good'old |
469 | ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); | 617 | * f80 does have that property .. |
618 | */ | ||
619 | WARN_ON(ireg == NULL); | ||
470 | if (ireg) { | 620 | if (ireg) { |
471 | /* | 621 | /* |
472 | * set node starting index for this node | 622 | * set node starting index for this node |
473 | */ | 623 | */ |
474 | indx = *ireg; | 624 | *indx = *ireg; |
475 | } | 625 | } |
476 | 626 | ireg = (u32 *)get_property(np, "reg", &ilen); | |
477 | ireg = (uint *)get_property(np, "reg", &ilen); | ||
478 | if (!ireg) | 627 | if (!ireg) |
479 | panic("xics_init_IRQ: can't find interrupt reg property"); | 628 | panic("xics_init_IRQ: can't find interrupt reg property"); |
480 | 629 | ||
481 | while (ilen) { | 630 | while (ilen >= (4 * sizeof(u32))) { |
482 | intnodes[indx].addr = (unsigned long)*ireg++ << 32; | 631 | unsigned long addr, size; |
483 | ilen -= sizeof(uint); | 632 | |
484 | intnodes[indx].addr |= *ireg++; | 633 | /* XXX Use proper OF parsing code here !!! */ |
485 | ilen -= sizeof(uint); | 634 | addr = (unsigned long)*ireg++ << 32; |
486 | intnodes[indx].size = (unsigned long)*ireg++ << 32; | 635 | ilen -= sizeof(u32); |
487 | ilen -= sizeof(uint); | 636 | addr |= *ireg++; |
488 | intnodes[indx].size |= *ireg++; | 637 | ilen -= sizeof(u32); |
489 | ilen -= sizeof(uint); | 638 | size = (unsigned long)*ireg++ << 32; |
490 | indx++; | 639 | ilen -= sizeof(u32); |
491 | if (indx >= NR_CPUS) break; | 640 | size |= *ireg++; |
641 | ilen -= sizeof(u32); | ||
642 | xics_map_one_cpu(*indx, addr, size); | ||
643 | (*indx)++; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | |||
648 | static void __init xics_setup_8259_cascade(void) | ||
649 | { | ||
650 | struct device_node *np, *old, *found = NULL; | ||
651 | int cascade, naddr; | ||
652 | u32 *addrp; | ||
653 | unsigned long intack = 0; | ||
654 | |||
655 | for_each_node_by_type(np, "interrupt-controller") | ||
656 | if (device_is_compatible(np, "chrp,iic")) { | ||
657 | found = np; | ||
658 | break; | ||
659 | } | ||
660 | if (found == NULL) { | ||
661 | printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); | ||
662 | return; | ||
663 | } | ||
664 | cascade = irq_of_parse_and_map(found, 0); | ||
665 | if (cascade == NO_IRQ) { | ||
666 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
667 | return; | ||
668 | } | ||
669 | pr_debug("xics: cascade mapped to irq %d\n", cascade); | ||
670 | |||
671 | for (old = of_node_get(found); old != NULL ; old = np) { | ||
672 | np = of_get_parent(old); | ||
673 | of_node_put(old); | ||
674 | if (np == NULL) | ||
675 | break; | ||
676 | if (strcmp(np->name, "pci") != 0) | ||
677 | continue; | ||
678 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL); | ||
679 | if (addrp == NULL) | ||
680 | continue; | ||
681 | naddr = prom_n_addr_cells(np); | ||
682 | intack = addrp[naddr-1]; | ||
683 | if (naddr > 1) | ||
684 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
685 | } | ||
686 | if (intack) | ||
687 | printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack); | ||
688 | i8259_init(found, intack); | ||
689 | of_node_put(found); | ||
690 | set_irq_chained_handler(cascade, pseries_8259_cascade); | ||
691 | } | ||
692 | |||
693 | void __init xics_init_IRQ(void) | ||
694 | { | ||
695 | int i; | ||
696 | struct device_node *np; | ||
697 | u32 *ireg, ilen, indx = 0; | ||
698 | int found = 0; | ||
699 | |||
700 | ppc64_boot_msg(0x20, "XICS Init"); | ||
701 | |||
702 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
703 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
704 | ibm_int_on = rtas_token("ibm,int-on"); | ||
705 | ibm_int_off = rtas_token("ibm,int-off"); | ||
706 | |||
707 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
708 | found = 1; | ||
709 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
710 | break; | ||
711 | xics_init_one_node(np, &indx); | ||
492 | } | 712 | } |
713 | if (found == 0) | ||
714 | return; | ||
493 | 715 | ||
494 | np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); | 716 | xics_init_host(); |
495 | if ((indx < NR_CPUS) && np) goto nextnode; | ||
496 | 717 | ||
497 | /* Find the server numbers for the boot cpu. */ | 718 | /* Find the server numbers for the boot cpu. */ |
498 | for (np = of_find_node_by_type(NULL, "cpu"); | 719 | for (np = of_find_node_by_type(NULL, "cpu"); |
499 | np; | 720 | np; |
500 | np = of_find_node_by_type(np, "cpu")) { | 721 | np = of_find_node_by_type(np, "cpu")) { |
501 | ireg = (uint *)get_property(np, "reg", &ilen); | 722 | ireg = (u32 *)get_property(np, "reg", &ilen); |
502 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { | 723 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { |
503 | ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", | 724 | ireg = (u32 *)get_property(np, |
504 | &ilen); | 725 | "ibm,ppc-interrupt-gserver#s", |
726 | &ilen); | ||
505 | i = ilen / sizeof(int); | 727 | i = ilen / sizeof(int); |
506 | if (ireg && i > 0) { | 728 | if (ireg && i > 0) { |
507 | default_server = ireg[0]; | 729 | default_server = ireg[0]; |
508 | default_distrib_server = ireg[i-1]; /* take last element */ | 730 | /* take last element */ |
731 | default_distrib_server = ireg[i-1]; | ||
509 | } | 732 | } |
510 | ireg = (uint *)get_property(np, | 733 | ireg = (u32 *)get_property(np, |
511 | "ibm,interrupt-server#-size", NULL); | 734 | "ibm,interrupt-server#-size", NULL); |
512 | if (ireg) | 735 | if (ireg) |
513 | interrupt_server_size = *ireg; | 736 | interrupt_server_size = *ireg; |
@@ -516,135 +739,48 @@ nextnode: | |||
516 | } | 739 | } |
517 | of_node_put(np); | 740 | of_node_put(np); |
518 | 741 | ||
519 | intr_base = intnodes[0].addr; | ||
520 | intr_size = intnodes[0].size; | ||
521 | |||
522 | np = of_find_node_by_type(NULL, "interrupt-controller"); | ||
523 | if (!np) { | ||
524 | printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); | ||
525 | xics_irq_8259_cascade_real = -1; | ||
526 | xics_irq_8259_cascade = -1; | ||
527 | } else { | ||
528 | ireg = (uint *) get_property(np, "interrupts", NULL); | ||
529 | if (!ireg) | ||
530 | panic("xics_init_IRQ: can't find ISA interrupts property"); | ||
531 | |||
532 | xics_irq_8259_cascade_real = *ireg; | ||
533 | xics_irq_8259_cascade | ||
534 | = virt_irq_create_mapping(xics_irq_8259_cascade_real); | ||
535 | i8259_init(0, 0); | ||
536 | of_node_put(np); | ||
537 | } | ||
538 | |||
539 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 742 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
540 | ops = &pSeriesLP_ops; | 743 | ppc_md.get_irq = xics_get_irq_lpar; |
541 | else { | 744 | else |
542 | #ifdef CONFIG_SMP | 745 | ppc_md.get_irq = xics_get_irq_direct; |
543 | for_each_possible_cpu(i) { | ||
544 | int hard_id; | ||
545 | |||
546 | /* FIXME: Do this dynamically! --RR */ | ||
547 | if (!cpu_present(i)) | ||
548 | continue; | ||
549 | |||
550 | hard_id = get_hard_smp_processor_id(i); | ||
551 | xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, | ||
552 | intnodes[hard_id].size); | ||
553 | } | ||
554 | #else | ||
555 | xics_per_cpu[0] = ioremap(intr_base, intr_size); | ||
556 | #endif /* CONFIG_SMP */ | ||
557 | } | ||
558 | |||
559 | for (i = irq_offset_value(); i < NR_IRQS; ++i) | ||
560 | get_irq_desc(i)->chip = &xics_pic; | ||
561 | 746 | ||
562 | xics_setup_cpu(); | 747 | xics_setup_cpu(); |
563 | 748 | ||
749 | xics_setup_8259_cascade(); | ||
750 | |||
564 | ppc64_boot_msg(0x21, "XICS Done"); | 751 | ppc64_boot_msg(0x21, "XICS Done"); |
565 | } | 752 | } |
566 | 753 | ||
567 | /* | ||
568 | * We cant do this in init_IRQ because we need the memory subsystem up for | ||
569 | * request_irq() | ||
570 | */ | ||
571 | static int __init xics_setup_i8259(void) | ||
572 | { | ||
573 | if (ppc64_interrupt_controller == IC_PPC_XIC && | ||
574 | xics_irq_8259_cascade != -1) { | ||
575 | if (request_irq(irq_offset_up(xics_irq_8259_cascade), | ||
576 | no_action, 0, "8259 cascade", NULL)) | ||
577 | printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " | ||
578 | "cascade\n"); | ||
579 | } | ||
580 | return 0; | ||
581 | } | ||
582 | arch_initcall(xics_setup_i8259); | ||
583 | 754 | ||
584 | #ifdef CONFIG_SMP | 755 | #ifdef CONFIG_SMP |
585 | void xics_request_IPIs(void) | 756 | void xics_request_IPIs(void) |
586 | { | 757 | { |
587 | virt_irq_to_real_map[XICS_IPI] = XICS_IPI; | 758 | unsigned int ipi; |
759 | |||
760 | ipi = irq_create_mapping(xics_host, XICS_IPI, 0); | ||
761 | BUG_ON(ipi == NO_IRQ); | ||
588 | 762 | ||
589 | /* | 763 | /* |
590 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 764 | * IPIs are marked IRQF_DISABLED as they must run with irqs |
591 | * disabled | 765 | * disabled |
592 | */ | 766 | */ |
593 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, | 767 | set_irq_handler(ipi, handle_percpu_irq); |
594 | IRQF_DISABLED, "IPI", NULL); | 768 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
595 | get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU; | 769 | request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED, |
596 | } | 770 | "IPI", NULL); |
597 | #endif | 771 | else |
598 | 772 | request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED, | |
599 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 773 | "IPI", NULL); |
600 | { | ||
601 | unsigned int irq; | ||
602 | int status; | ||
603 | int xics_status[2]; | ||
604 | unsigned long newmask; | ||
605 | cpumask_t tmp = CPU_MASK_NONE; | ||
606 | |||
607 | irq = virt_irq_to_real(irq_offset_down(virq)); | ||
608 | if (irq == XICS_IPI || irq == NO_IRQ) | ||
609 | return; | ||
610 | |||
611 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
612 | |||
613 | if (status) { | ||
614 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive " | ||
615 | "returns %d\n", irq, status); | ||
616 | return; | ||
617 | } | ||
618 | |||
619 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
620 | if (cpus_equal(cpumask, CPU_MASK_ALL)) { | ||
621 | newmask = default_distrib_server; | ||
622 | } else { | ||
623 | cpus_and(tmp, cpu_online_map, cpumask); | ||
624 | if (cpus_empty(tmp)) | ||
625 | return; | ||
626 | newmask = get_hard_smp_processor_id(first_cpu(tmp)); | ||
627 | } | ||
628 | |||
629 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
630 | irq, newmask, xics_status[1]); | ||
631 | |||
632 | if (status) { | ||
633 | printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " | ||
634 | "returns %d\n", irq, status); | ||
635 | return; | ||
636 | } | ||
637 | } | 774 | } |
775 | #endif /* CONFIG_SMP */ | ||
638 | 776 | ||
639 | void xics_teardown_cpu(int secondary) | 777 | void xics_teardown_cpu(int secondary) |
640 | { | 778 | { |
641 | int cpu = smp_processor_id(); | 779 | int cpu = smp_processor_id(); |
780 | unsigned int ipi; | ||
781 | struct irq_desc *desc; | ||
642 | 782 | ||
643 | ops->cppr_info(cpu, 0x00); | 783 | xics_set_cpu_priority(cpu, 0); |
644 | iosync(); | ||
645 | |||
646 | /* Clear IPI */ | ||
647 | ops->qirr_info(cpu, 0xff); | ||
648 | 784 | ||
649 | /* | 785 | /* |
650 | * we need to EOI the IPI if we got here from kexec down IPI | 786 | * we need to EOI the IPI if we got here from kexec down IPI |
@@ -653,7 +789,13 @@ void xics_teardown_cpu(int secondary) | |||
653 | * should we be flagging idle loop instead? | 789 | * should we be flagging idle loop instead? |
654 | * or creating some task to be scheduled? | 790 | * or creating some task to be scheduled? |
655 | */ | 791 | */ |
656 | ops->xirr_info_set(cpu, XICS_IPI); | 792 | |
793 | ipi = irq_find_mapping(xics_host, XICS_IPI); | ||
794 | if (ipi == XICS_IRQ_SPURIOUS) | ||
795 | return; | ||
796 | desc = get_irq_desc(ipi); | ||
797 | if (desc->chip && desc->chip->eoi) | ||
798 | desc->chip->eoi(XICS_IPI); | ||
657 | 799 | ||
658 | /* | 800 | /* |
659 | * Some machines need to have at least one cpu in the GIQ, | 801 | * Some machines need to have at least one cpu in the GIQ, |
@@ -661,8 +803,8 @@ void xics_teardown_cpu(int secondary) | |||
661 | */ | 803 | */ |
662 | if (secondary) | 804 | if (secondary) |
663 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 805 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
664 | (1UL << interrupt_server_size) - 1 - | 806 | (1UL << interrupt_server_size) - 1 - |
665 | default_distrib_server, 0); | 807 | default_distrib_server, 0); |
666 | } | 808 | } |
667 | 809 | ||
668 | #ifdef CONFIG_HOTPLUG_CPU | 810 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -674,8 +816,7 @@ void xics_migrate_irqs_away(void) | |||
674 | unsigned int irq, virq, cpu = smp_processor_id(); | 816 | unsigned int irq, virq, cpu = smp_processor_id(); |
675 | 817 | ||
676 | /* Reject any interrupt that was queued to us... */ | 818 | /* Reject any interrupt that was queued to us... */ |
677 | ops->cppr_info(cpu, 0); | 819 | xics_set_cpu_priority(cpu, 0); |
678 | iosync(); | ||
679 | 820 | ||
680 | /* remove ourselves from the global interrupt queue */ | 821 | /* remove ourselves from the global interrupt queue */ |
681 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 822 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
@@ -683,24 +824,23 @@ void xics_migrate_irqs_away(void) | |||
683 | WARN_ON(status < 0); | 824 | WARN_ON(status < 0); |
684 | 825 | ||
685 | /* Allow IPIs again... */ | 826 | /* Allow IPIs again... */ |
686 | ops->cppr_info(cpu, DEFAULT_PRIORITY); | 827 | xics_set_cpu_priority(cpu, DEFAULT_PRIORITY); |
687 | iosync(); | ||
688 | 828 | ||
689 | for_each_irq(virq) { | 829 | for_each_irq(virq) { |
690 | irq_desc_t *desc; | 830 | struct irq_desc *desc; |
691 | int xics_status[2]; | 831 | int xics_status[2]; |
692 | unsigned long flags; | 832 | unsigned long flags; |
693 | 833 | ||
694 | /* We cant set affinity on ISA interrupts */ | 834 | /* We cant set affinity on ISA interrupts */ |
695 | if (virq < irq_offset_value()) | 835 | if (virq < NUM_ISA_INTERRUPTS) |
696 | continue; | 836 | continue; |
697 | 837 | if (irq_map[virq].host != xics_host) | |
698 | desc = get_irq_desc(virq); | 838 | continue; |
699 | irq = virt_irq_to_real(irq_offset_down(virq)); | 839 | irq = (unsigned int)irq_map[virq].hwirq; |
700 | |||
701 | /* We need to get IPIs still. */ | 840 | /* We need to get IPIs still. */ |
702 | if (irq == XICS_IPI || irq == NO_IRQ) | 841 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
703 | continue; | 842 | continue; |
843 | desc = get_irq_desc(virq); | ||
704 | 844 | ||
705 | /* We only need to migrate enabled IRQS */ | 845 | /* We only need to migrate enabled IRQS */ |
706 | if (desc == NULL || desc->chip == NULL | 846 | if (desc == NULL || desc->chip == NULL |
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h index e14c70868f1d..6ee1055b0ffb 100644 --- a/arch/powerpc/platforms/pseries/xics.h +++ b/arch/powerpc/platforms/pseries/xics.h | |||
@@ -14,13 +14,12 @@ | |||
14 | 14 | ||
15 | #include <linux/cache.h> | 15 | #include <linux/cache.h> |
16 | 16 | ||
17 | void xics_init_IRQ(void); | 17 | extern void xics_init_IRQ(void); |
18 | int xics_get_irq(struct pt_regs *); | 18 | extern void xics_setup_cpu(void); |
19 | void xics_setup_cpu(void); | 19 | extern void xics_teardown_cpu(int secondary); |
20 | void xics_teardown_cpu(int secondary); | 20 | extern void xics_cause_IPI(int cpu); |
21 | void xics_cause_IPI(int cpu); | 21 | extern void xics_request_IPIs(void); |
22 | void xics_request_IPIs(void); | 22 | extern void xics_migrate_irqs_away(void); |
23 | void xics_migrate_irqs_away(void); | ||
24 | 23 | ||
25 | /* first argument is ignored for now*/ | 24 | /* first argument is ignored for now*/ |
26 | void pSeriesLP_cppr_info(int n_cpu, u8 value); | 25 | void pSeriesLP_cppr_info(int n_cpu, u8 value); |
@@ -31,4 +30,8 @@ struct xics_ipi_struct { | |||
31 | 30 | ||
32 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; | 31 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; |
33 | 32 | ||
33 | struct irq_desc; | ||
34 | extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, | ||
35 | struct pt_regs *regs); | ||
36 | |||
34 | #endif /* _POWERPC_KERNEL_XICS_H */ | 37 | #endif /* _POWERPC_KERNEL_XICS_H */ |