diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-04-03 23:46:58 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-04-19 21:02:35 -0400 |
commit | 0b05ac6e24807f0c26f763b3a546c0bcbf84125f (patch) | |
tree | 9c1a113a050583e564dcd78a7aa80fde6320d8e1 /arch | |
parent | f0e615c3cb72b42191b558c130409335812621d8 (diff) |
powerpc/xics: Rewrite XICS driver
This is a significant rework of the XICS driver, too significant to
conveniently break it up into a series of smaller patches to be honest.
The driver is moved to a more generic location to allow new platforms
to use it, and is broken up into separate ICP and ICS "backends". For
now we have the native and "hypervisor" ICP backends and one common
RTAS ICS backend.
The driver supports one ICP backend instanciation, and many ICS ones,
in order to accomodate future platforms with multiple possibly different
interrupt "sources" mechanisms.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/irq.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/xics.h | 139 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/Kconfig | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/Makefile | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-cpu.c | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/kexec.c | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/plpar_wrappers.h | 27 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/setup.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/smp.c | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 949 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.h | 23 | ||||
-rw-r--r-- | arch/powerpc/sysdev/Kconfig | 3 | ||||
-rw-r--r-- | arch/powerpc/sysdev/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/Kconfig | 12 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/Makefile | 6 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-hv.c | 184 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-native.c | 312 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/ics-rtas.c | 229 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/xics-common.c | 461 |
19 files changed, 1377 insertions, 1017 deletions
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 67ab5fb7d153..47b7905a6369 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -142,6 +142,12 @@ extern struct irq_map_entry irq_map[NR_IRQS]; | |||
142 | 142 | ||
143 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 143 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
144 | 144 | ||
145 | /* This will eventually -replace- virq_to_hw if/when we stash the | ||
146 | * HW number in the irq_data itself. We use a macro so we can inline | ||
147 | * it as irq_data isn't defined yet | ||
148 | */ | ||
149 | #define irq_data_to_hw(d) (irq_map[(d)->irq].hwirq) | ||
150 | |||
145 | /** | 151 | /** |
146 | * irq_alloc_host - Allocate a new irq_host data structure | 152 | * irq_alloc_host - Allocate a new irq_host data structure |
147 | * @of_node: optional device-tree node of the interrupt controller | 153 | * @of_node: optional device-tree node of the interrupt controller |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h new file mode 100644 index 000000000000..146aad8534de --- /dev/null +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Common definitions accross all variants of ICP and ICS interrupt | ||
3 | * controllers. | ||
4 | */ | ||
5 | |||
6 | #ifndef _XICS_H | ||
7 | #define _XICS_H | ||
8 | |||
9 | #define XICS_IPI 2 | ||
10 | #define XICS_IRQ_SPURIOUS 0 | ||
11 | |||
12 | /* Want a priority other than 0. Various HW issues require this. */ | ||
13 | #define DEFAULT_PRIORITY 5 | ||
14 | |||
15 | /* | ||
16 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
17 | * arent marked IRQF_DISABLED | ||
18 | */ | ||
19 | #define IPI_PRIORITY 4 | ||
20 | |||
21 | /* The least favored priority */ | ||
22 | #define LOWEST_PRIORITY 0xFF | ||
23 | |||
24 | /* The number of priorities defined above */ | ||
25 | #define MAX_NUM_PRIORITIES 3 | ||
26 | |||
27 | /* Native ICP */ | ||
28 | extern int icp_native_init(void); | ||
29 | |||
30 | /* PAPR ICP */ | ||
31 | extern int icp_hv_init(void); | ||
32 | |||
33 | /* ICP ops */ | ||
34 | struct icp_ops { | ||
35 | unsigned int (*get_irq)(void); | ||
36 | void (*eoi)(struct irq_data *d); | ||
37 | void (*set_priority)(unsigned char prio); | ||
38 | void (*teardown_cpu)(void); | ||
39 | void (*flush_ipi)(void); | ||
40 | #ifdef CONFIG_SMP | ||
41 | void (*message_pass)(int target, int msg); | ||
42 | irq_handler_t ipi_action; | ||
43 | #endif | ||
44 | }; | ||
45 | |||
46 | extern const struct icp_ops *icp_ops; | ||
47 | |||
48 | /* Native ICS */ | ||
49 | extern int ics_native_init(void); | ||
50 | |||
51 | /* RTAS ICS */ | ||
52 | extern int ics_rtas_init(void); | ||
53 | |||
54 | /* ICS instance, hooked up to chip_data of an irq */ | ||
55 | struct ics { | ||
56 | struct list_head link; | ||
57 | int (*map)(struct ics *ics, unsigned int virq); | ||
58 | void (*mask_unknown)(struct ics *ics, unsigned long vec); | ||
59 | long (*get_server)(struct ics *ics, unsigned long vec); | ||
60 | char data[]; | ||
61 | }; | ||
62 | |||
63 | /* Commons */ | ||
64 | extern unsigned int xics_default_server; | ||
65 | extern unsigned int xics_default_distrib_server; | ||
66 | extern unsigned int xics_interrupt_server_size; | ||
67 | extern struct irq_host *xics_host; | ||
68 | |||
69 | struct xics_cppr { | ||
70 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
71 | int index; | ||
72 | }; | ||
73 | |||
74 | DECLARE_PER_CPU(struct xics_cppr, xics_cppr); | ||
75 | |||
76 | static inline void xics_push_cppr(unsigned int vec) | ||
77 | { | ||
78 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
79 | |||
80 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
81 | return; | ||
82 | |||
83 | if (vec == XICS_IPI) | ||
84 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
85 | else | ||
86 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
87 | } | ||
88 | |||
89 | static inline unsigned char xics_pop_cppr(void) | ||
90 | { | ||
91 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
92 | |||
93 | if (WARN_ON(os_cppr->index < 1)) | ||
94 | return LOWEST_PRIORITY; | ||
95 | |||
96 | return os_cppr->stack[--os_cppr->index]; | ||
97 | } | ||
98 | |||
99 | static inline void xics_set_base_cppr(unsigned char cppr) | ||
100 | { | ||
101 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
102 | |||
103 | /* we only really want to set the priority when there's | ||
104 | * just one cppr value on the stack | ||
105 | */ | ||
106 | WARN_ON(os_cppr->index != 0); | ||
107 | |||
108 | os_cppr->stack[0] = cppr; | ||
109 | } | ||
110 | |||
111 | static inline unsigned char xics_cppr_top(void) | ||
112 | { | ||
113 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
114 | |||
115 | return os_cppr->stack[os_cppr->index]; | ||
116 | } | ||
117 | |||
118 | DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
119 | |||
120 | extern void xics_init(void); | ||
121 | extern void xics_setup_cpu(void); | ||
122 | extern void xics_update_irq_servers(void); | ||
123 | extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); | ||
124 | extern void xics_mask_unknown_vec(unsigned int vec); | ||
125 | extern irqreturn_t xics_ipi_dispatch(int cpu); | ||
126 | extern int xics_smp_probe(void); | ||
127 | extern void xics_register_ics(struct ics *ics); | ||
128 | extern void xics_teardown_cpu(void); | ||
129 | extern void xics_kexec_teardown_cpu(int secondary); | ||
130 | extern void xics_migrate_irqs_away(void); | ||
131 | #ifdef CONFIG_SMP | ||
132 | extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
133 | unsigned int strict_check); | ||
134 | #else | ||
135 | #define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server) | ||
136 | #endif | ||
137 | |||
138 | |||
139 | #endif /* _XICS_H */ | ||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 5b3da4b4ea79..b0449229836e 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -3,7 +3,10 @@ config PPC_PSERIES | |||
3 | bool "IBM pSeries & new (POWER5-based) iSeries" | 3 | bool "IBM pSeries & new (POWER5-based) iSeries" |
4 | select MPIC | 4 | select MPIC |
5 | select PCI_MSI | 5 | select PCI_MSI |
6 | select XICS | 6 | select PPC_XICS |
7 | select PPC_ICP_NATIVE | ||
8 | select PPC_ICP_HV | ||
9 | select PPC_ICS_RTAS | ||
7 | select PPC_I8259 | 10 | select PPC_I8259 |
8 | select PPC_RTAS | 11 | select PPC_RTAS |
9 | select PPC_RTAS_DAEMON | 12 | select PPC_RTAS_DAEMON |
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index fc5237810ece..4cfefbaccd5f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | |||
5 | setup.o iommu.o event_sources.o ras.o \ | 5 | setup.o iommu.o event_sources.o ras.o \ |
6 | firmware.o power.o dlpar.o mobility.o | 6 | firmware.o power.o dlpar.o mobility.o |
7 | obj-$(CONFIG_SMP) += smp.o | 7 | obj-$(CONFIG_SMP) += smp.o |
8 | obj-$(CONFIG_XICS) += xics.o | ||
9 | obj-$(CONFIG_SCANLOG) += scanlog.o | 8 | obj-$(CONFIG_SCANLOG) += scanlog.o |
10 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o | 9 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o |
11 | obj-$(CONFIG_KEXEC) += kexec.o | 10 | obj-$(CONFIG_KEXEC) += kexec.o |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index ef8c45489e20..ae6c27df4dc4 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -28,7 +29,7 @@ | |||
28 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
29 | #include <asm/vdso_datapage.h> | 30 | #include <asm/vdso_datapage.h> |
30 | #include <asm/pSeries_reconfig.h> | 31 | #include <asm/pSeries_reconfig.h> |
31 | #include "xics.h" | 32 | #include <asm/xics.h> |
32 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
33 | #include "offline_states.h" | 34 | #include "offline_states.h" |
34 | 35 | ||
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 77d38a5e2ff9..54cf3a4aa16b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c | |||
@@ -7,15 +7,18 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
10 | #include <asm/machdep.h> | 13 | #include <asm/machdep.h> |
11 | #include <asm/page.h> | 14 | #include <asm/page.h> |
12 | #include <asm/firmware.h> | 15 | #include <asm/firmware.h> |
13 | #include <asm/kexec.h> | 16 | #include <asm/kexec.h> |
14 | #include <asm/mpic.h> | 17 | #include <asm/mpic.h> |
18 | #include <asm/xics.h> | ||
15 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
16 | 20 | ||
17 | #include "pseries.h" | 21 | #include "pseries.h" |
18 | #include "xics.h" | ||
19 | #include "plpar_wrappers.h" | 22 | #include "plpar_wrappers.h" |
20 | 23 | ||
21 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | 24 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index d9801117124b..4bf21207d7d3 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, | |||
270 | lbuf[1]); | 270 | lbuf[1]); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline long plpar_eoi(unsigned long xirr) | ||
274 | { | ||
275 | return plpar_hcall_norets(H_EOI, xirr); | ||
276 | } | ||
277 | |||
278 | static inline long plpar_cppr(unsigned long cppr) | ||
279 | { | ||
280 | return plpar_hcall_norets(H_CPPR, cppr); | ||
281 | } | ||
282 | |||
283 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | ||
284 | { | ||
285 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | ||
286 | } | ||
287 | |||
288 | static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) | ||
289 | { | ||
290 | long rc; | ||
291 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
292 | |||
293 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
294 | |||
295 | *xirr_ret = retbuf[0]; | ||
296 | |||
297 | return rc; | ||
298 | } | ||
299 | |||
300 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ | 273 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 6c42cfde8415..ab73ad2ff59d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <asm/irq.h> | 53 | #include <asm/irq.h> |
54 | #include <asm/time.h> | 54 | #include <asm/time.h> |
55 | #include <asm/nvram.h> | 55 | #include <asm/nvram.h> |
56 | #include "xics.h" | ||
57 | #include <asm/pmc.h> | 56 | #include <asm/pmc.h> |
58 | #include <asm/mpic.h> | 57 | #include <asm/mpic.h> |
58 | #include <asm/xics.h> | ||
59 | #include <asm/ppc-pci.h> | 59 | #include <asm/ppc-pci.h> |
60 | #include <asm/i8259.h> | 60 | #include <asm/i8259.h> |
61 | #include <asm/udbg.h> | 61 | #include <asm/udbg.h> |
@@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void) | |||
205 | mpic_assign_isu(mpic, n, isuaddr); | 205 | mpic_assign_isu(mpic, n, isuaddr); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* Setup top-level get_irq */ | ||
209 | ppc_md.get_irq = mpic_get_irq; | ||
210 | |||
208 | /* All ISUs are setup, complete initialization */ | 211 | /* All ISUs are setup, complete initialization */ |
209 | mpic_init(mpic); | 212 | mpic_init(mpic); |
210 | 213 | ||
@@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void) | |||
214 | 217 | ||
215 | static void __init pseries_xics_init_IRQ(void) | 218 | static void __init pseries_xics_init_IRQ(void) |
216 | { | 219 | { |
217 | xics_init_IRQ(); | 220 | xics_init(); |
218 | pseries_setup_i8259_cascade(); | 221 | pseries_setup_i8259_cascade(); |
219 | } | 222 | } |
220 | 223 | ||
@@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void) | |||
238 | if (strstr(typep, "open-pic")) { | 241 | if (strstr(typep, "open-pic")) { |
239 | pSeries_mpic_node = of_node_get(np); | 242 | pSeries_mpic_node = of_node_get(np); |
240 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | 243 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; |
241 | ppc_md.get_irq = mpic_get_irq; | ||
242 | setup_kexec_cpu_down_mpic(); | 244 | setup_kexec_cpu_down_mpic(); |
243 | smp_init_pseries_mpic(); | 245 | smp_init_pseries_mpic(); |
244 | return; | 246 | return; |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index a509c5292a67..fc72bfce7320 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -44,10 +44,11 @@ | |||
44 | #include <asm/mpic.h> | 44 | #include <asm/mpic.h> |
45 | #include <asm/vdso_datapage.h> | 45 | #include <asm/vdso_datapage.h> |
46 | #include <asm/cputhreads.h> | 46 | #include <asm/cputhreads.h> |
47 | #include <asm/mpic.h> | ||
48 | #include <asm/xics.h> | ||
47 | 49 | ||
48 | #include "plpar_wrappers.h" | 50 | #include "plpar_wrappers.h" |
49 | #include "pseries.h" | 51 | #include "pseries.h" |
50 | #include "xics.h" | ||
51 | #include "offline_states.h" | 52 | #include "offline_states.h" |
52 | 53 | ||
53 | 54 | ||
@@ -136,7 +137,6 @@ out: | |||
136 | return 1; | 137 | return 1; |
137 | } | 138 | } |
138 | 139 | ||
139 | #ifdef CONFIG_XICS | ||
140 | static void __devinit smp_xics_setup_cpu(int cpu) | 140 | static void __devinit smp_xics_setup_cpu(int cpu) |
141 | { | 141 | { |
142 | if (cpu != boot_cpuid) | 142 | if (cpu != boot_cpuid) |
@@ -151,7 +151,6 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
151 | set_default_offline_state(cpu); | 151 | set_default_offline_state(cpu); |
152 | #endif | 152 | #endif |
153 | } | 153 | } |
154 | #endif /* CONFIG_XICS */ | ||
155 | 154 | ||
156 | static void __devinit smp_pSeries_kick_cpu(int nr) | 155 | static void __devinit smp_pSeries_kick_cpu(int nr) |
157 | { | 156 | { |
@@ -197,23 +196,21 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
197 | 196 | ||
198 | return 1; | 197 | return 1; |
199 | } | 198 | } |
200 | #ifdef CONFIG_MPIC | 199 | |
201 | static struct smp_ops_t pSeries_mpic_smp_ops = { | 200 | static struct smp_ops_t pSeries_mpic_smp_ops = { |
202 | .message_pass = smp_mpic_message_pass, | 201 | .message_pass = smp_mpic_message_pass, |
203 | .probe = smp_mpic_probe, | 202 | .probe = smp_mpic_probe, |
204 | .kick_cpu = smp_pSeries_kick_cpu, | 203 | .kick_cpu = smp_pSeries_kick_cpu, |
205 | .setup_cpu = smp_mpic_setup_cpu, | 204 | .setup_cpu = smp_mpic_setup_cpu, |
206 | }; | 205 | }; |
207 | #endif | 206 | |
208 | #ifdef CONFIG_XICS | ||
209 | static struct smp_ops_t pSeries_xics_smp_ops = { | 207 | static struct smp_ops_t pSeries_xics_smp_ops = { |
210 | .message_pass = smp_xics_message_pass, | 208 | .message_pass = NULL, /* Filled at runtime by xics_smp_probe() */ |
211 | .probe = smp_xics_probe, | 209 | .probe = xics_smp_probe, |
212 | .kick_cpu = smp_pSeries_kick_cpu, | 210 | .kick_cpu = smp_pSeries_kick_cpu, |
213 | .setup_cpu = smp_xics_setup_cpu, | 211 | .setup_cpu = smp_xics_setup_cpu, |
214 | .cpu_bootable = smp_pSeries_cpu_bootable, | 212 | .cpu_bootable = smp_pSeries_cpu_bootable, |
215 | }; | 213 | }; |
216 | #endif | ||
217 | 214 | ||
218 | /* This is called very early */ | 215 | /* This is called very early */ |
219 | static void __init smp_init_pseries(void) | 216 | static void __init smp_init_pseries(void) |
@@ -245,14 +242,12 @@ static void __init smp_init_pseries(void) | |||
245 | pr_debug(" <- smp_init_pSeries()\n"); | 242 | pr_debug(" <- smp_init_pSeries()\n"); |
246 | } | 243 | } |
247 | 244 | ||
248 | #ifdef CONFIG_MPIC | ||
249 | void __init smp_init_pseries_mpic(void) | 245 | void __init smp_init_pseries_mpic(void) |
250 | { | 246 | { |
251 | smp_ops = &pSeries_mpic_smp_ops; | 247 | smp_ops = &pSeries_mpic_smp_ops; |
252 | 248 | ||
253 | smp_init_pseries(); | 249 | smp_init_pseries(); |
254 | } | 250 | } |
255 | #endif | ||
256 | 251 | ||
257 | void __init smp_init_pseries_xics(void) | 252 | void __init smp_init_pseries_xics(void) |
258 | { | 253 | { |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c deleted file mode 100644 index d6901334d66e..000000000000 --- a/arch/powerpc/platforms/pseries/xics.c +++ /dev/null | |||
@@ -1,949 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.c | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/radix-tree.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/msi.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/percpu.h> | ||
24 | |||
25 | #include <asm/firmware.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/smp.h> | ||
29 | #include <asm/rtas.h> | ||
30 | #include <asm/hvcall.h> | ||
31 | #include <asm/machdep.h> | ||
32 | |||
33 | #include "xics.h" | ||
34 | #include "plpar_wrappers.h" | ||
35 | |||
36 | static struct irq_host *xics_host; | ||
37 | |||
38 | #define XICS_IPI 2 | ||
39 | #define XICS_IRQ_SPURIOUS 0 | ||
40 | |||
41 | /* Want a priority other than 0. Various HW issues require this. */ | ||
42 | #define DEFAULT_PRIORITY 5 | ||
43 | |||
44 | /* | ||
45 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
46 | * arent marked IRQF_DISABLED | ||
47 | */ | ||
48 | #define IPI_PRIORITY 4 | ||
49 | |||
50 | /* The least favored priority */ | ||
51 | #define LOWEST_PRIORITY 0xFF | ||
52 | |||
53 | /* The number of priorities defined above */ | ||
54 | #define MAX_NUM_PRIORITIES 3 | ||
55 | |||
56 | static unsigned int default_server = 0xFF; | ||
57 | static unsigned int default_distrib_server = 0; | ||
58 | static unsigned int interrupt_server_size = 8; | ||
59 | |||
60 | /* RTAS service tokens */ | ||
61 | static int ibm_get_xive; | ||
62 | static int ibm_set_xive; | ||
63 | static int ibm_int_on; | ||
64 | static int ibm_int_off; | ||
65 | |||
66 | struct xics_cppr { | ||
67 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
68 | int index; | ||
69 | }; | ||
70 | |||
71 | static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
72 | |||
73 | /* Direct hardware low level accessors */ | ||
74 | |||
75 | /* The part of the interrupt presentation layer that we care about */ | ||
76 | struct xics_ipl { | ||
77 | union { | ||
78 | u32 word; | ||
79 | u8 bytes[4]; | ||
80 | } xirr_poll; | ||
81 | union { | ||
82 | u32 word; | ||
83 | u8 bytes[4]; | ||
84 | } xirr; | ||
85 | u32 dummy; | ||
86 | union { | ||
87 | u32 word; | ||
88 | u8 bytes[4]; | ||
89 | } qirr; | ||
90 | }; | ||
91 | |||
92 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | ||
93 | |||
94 | static inline unsigned int direct_xirr_info_get(void) | ||
95 | { | ||
96 | int cpu = smp_processor_id(); | ||
97 | |||
98 | return in_be32(&xics_per_cpu[cpu]->xirr.word); | ||
99 | } | ||
100 | |||
101 | static inline void direct_xirr_info_set(unsigned int value) | ||
102 | { | ||
103 | int cpu = smp_processor_id(); | ||
104 | |||
105 | out_be32(&xics_per_cpu[cpu]->xirr.word, value); | ||
106 | } | ||
107 | |||
108 | static inline void direct_cppr_info(u8 value) | ||
109 | { | ||
110 | int cpu = smp_processor_id(); | ||
111 | |||
112 | out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); | ||
113 | } | ||
114 | |||
115 | static inline void direct_qirr_info(int n_cpu, u8 value) | ||
116 | { | ||
117 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | ||
118 | } | ||
119 | |||
120 | |||
121 | /* LPAR low level accessors */ | ||
122 | |||
123 | static inline unsigned int lpar_xirr_info_get(unsigned char cppr) | ||
124 | { | ||
125 | unsigned long lpar_rc; | ||
126 | unsigned long return_value; | ||
127 | |||
128 | lpar_rc = plpar_xirr(&return_value, cppr); | ||
129 | if (lpar_rc != H_SUCCESS) | ||
130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); | ||
131 | return (unsigned int)return_value; | ||
132 | } | ||
133 | |||
134 | static inline void lpar_xirr_info_set(unsigned int value) | ||
135 | { | ||
136 | unsigned long lpar_rc; | ||
137 | |||
138 | lpar_rc = plpar_eoi(value); | ||
139 | if (lpar_rc != H_SUCCESS) | ||
140 | panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, | ||
141 | value); | ||
142 | } | ||
143 | |||
144 | static inline void lpar_cppr_info(u8 value) | ||
145 | { | ||
146 | unsigned long lpar_rc; | ||
147 | |||
148 | lpar_rc = plpar_cppr(value); | ||
149 | if (lpar_rc != H_SUCCESS) | ||
150 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | ||
151 | } | ||
152 | |||
153 | static inline void lpar_qirr_info(int n_cpu , u8 value) | ||
154 | { | ||
155 | unsigned long lpar_rc; | ||
156 | |||
157 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); | ||
158 | if (lpar_rc != H_SUCCESS) | ||
159 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | ||
160 | } | ||
161 | |||
162 | |||
163 | /* Interface to generic irq subsystem */ | ||
164 | |||
165 | #ifdef CONFIG_SMP | ||
166 | /* | ||
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
175 | unsigned int strict_check) | ||
176 | { | ||
177 | |||
178 | if (!distribute_irqs) | ||
179 | return default_server; | ||
180 | |||
181 | if (!cpumask_subset(cpu_possible_mask, cpumask)) { | ||
182 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
183 | |||
184 | if (server < nr_cpu_ids) | ||
185 | return get_hard_smp_processor_id(server); | ||
186 | |||
187 | if (strict_check) | ||
188 | return -1; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
197 | return default_distrib_server; | ||
198 | |||
199 | return default_server; | ||
200 | } | ||
201 | #else | ||
202 | #define get_irq_server(virq, cpumask, strict_check) (default_server) | ||
203 | #endif | ||
204 | |||
205 | static void xics_unmask_irq(struct irq_data *d) | ||
206 | { | ||
207 | unsigned int hwirq; | ||
208 | int call_status; | ||
209 | int server; | ||
210 | |||
211 | pr_devel("xics: unmask virq %d\n", d->irq); | ||
212 | |||
213 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
214 | pr_devel(" -> map to hwirq 0x%x\n", hwirq); | ||
215 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
216 | return; | ||
217 | |||
218 | server = get_irq_server(d->irq, d->affinity, 0); | ||
219 | |||
220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, | ||
221 | DEFAULT_PRIORITY); | ||
222 | if (call_status != 0) { | ||
223 | printk(KERN_ERR | ||
224 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
225 | __func__, hwirq, server, call_status); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Now unmask the interrupt (often a no-op) */ | ||
230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); | ||
231 | if (call_status != 0) { | ||
232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
233 | __func__, hwirq, call_status); | ||
234 | return; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static unsigned int xics_startup(struct irq_data *d) | ||
239 | { | ||
240 | /* | ||
241 | * The generic MSI code returns with the interrupt disabled on the | ||
242 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
243 | * at that level, so we do it here by hand. | ||
244 | */ | ||
245 | if (d->msi_desc) | ||
246 | unmask_msi_irq(d); | ||
247 | |||
248 | /* unmask it */ | ||
249 | xics_unmask_irq(d); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void xics_mask_real_irq(unsigned int hwirq) | ||
254 | { | ||
255 | int call_status; | ||
256 | |||
257 | if (hwirq == XICS_IPI) | ||
258 | return; | ||
259 | |||
260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); | ||
261 | if (call_status != 0) { | ||
262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
263 | __func__, hwirq, call_status); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, | ||
269 | default_server, 0xff); | ||
270 | if (call_status != 0) { | ||
271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
272 | __func__, hwirq, call_status); | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void xics_mask_irq(struct irq_data *d) | ||
278 | { | ||
279 | unsigned int hwirq; | ||
280 | |||
281 | pr_devel("xics: mask virq %d\n", d->irq); | ||
282 | |||
283 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
284 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
285 | return; | ||
286 | xics_mask_real_irq(hwirq); | ||
287 | } | ||
288 | |||
289 | static void xics_mask_unknown_vec(unsigned int vec) | ||
290 | { | ||
291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
292 | xics_mask_real_irq(vec); | ||
293 | } | ||
294 | |||
295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) | ||
296 | { | ||
297 | /* | ||
298 | * The top byte is the old cppr, to be restored on EOI. | ||
299 | * The remaining 24 bits are the vector. | ||
300 | */ | ||
301 | return xirr & 0x00ffffff; | ||
302 | } | ||
303 | |||
304 | static void push_cppr(unsigned int vec) | ||
305 | { | ||
306 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
307 | |||
308 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
309 | return; | ||
310 | |||
311 | if (vec == XICS_IPI) | ||
312 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
313 | else | ||
314 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
315 | } | ||
316 | |||
317 | static unsigned int xics_get_irq_direct(void) | ||
318 | { | ||
319 | unsigned int xirr = direct_xirr_info_get(); | ||
320 | unsigned int vec = xics_xirr_vector(xirr); | ||
321 | unsigned int irq; | ||
322 | |||
323 | if (vec == XICS_IRQ_SPURIOUS) | ||
324 | return NO_IRQ; | ||
325 | |||
326 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
327 | if (likely(irq != NO_IRQ)) { | ||
328 | push_cppr(vec); | ||
329 | return irq; | ||
330 | } | ||
331 | |||
332 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
333 | xics_mask_unknown_vec(vec); | ||
334 | |||
335 | /* We might learn about it later, so EOI it */ | ||
336 | direct_xirr_info_set(xirr); | ||
337 | return NO_IRQ; | ||
338 | } | ||
339 | |||
340 | static unsigned int xics_get_irq_lpar(void) | ||
341 | { | ||
342 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
343 | unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); | ||
344 | unsigned int vec = xics_xirr_vector(xirr); | ||
345 | unsigned int irq; | ||
346 | |||
347 | if (vec == XICS_IRQ_SPURIOUS) | ||
348 | return NO_IRQ; | ||
349 | |||
350 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
351 | if (likely(irq != NO_IRQ)) { | ||
352 | push_cppr(vec); | ||
353 | return irq; | ||
354 | } | ||
355 | |||
356 | /* We don't have a linux mapping, so have RTAS mask it. */ | ||
357 | xics_mask_unknown_vec(vec); | ||
358 | |||
359 | /* We might learn about it later, so EOI it */ | ||
360 | lpar_xirr_info_set(xirr); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | |||
364 | static unsigned char pop_cppr(void) | ||
365 | { | ||
366 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
367 | |||
368 | if (WARN_ON(os_cppr->index < 1)) | ||
369 | return LOWEST_PRIORITY; | ||
370 | |||
371 | return os_cppr->stack[--os_cppr->index]; | ||
372 | } | ||
373 | |||
374 | static void xics_eoi_direct(struct irq_data *d) | ||
375 | { | ||
376 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
377 | |||
378 | iosync(); | ||
379 | direct_xirr_info_set((pop_cppr() << 24) | hwirq); | ||
380 | } | ||
381 | |||
382 | static void xics_eoi_lpar(struct irq_data *d) | ||
383 | { | ||
384 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
385 | |||
386 | iosync(); | ||
387 | lpar_xirr_info_set((pop_cppr() << 24) | hwirq); | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) | ||
392 | { | ||
393 | unsigned int hwirq; | ||
394 | int status; | ||
395 | int xics_status[2]; | ||
396 | int irq_server; | ||
397 | |||
398 | hwirq = (unsigned int)irq_map[d->irq].hwirq; | ||
399 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
400 | return -1; | ||
401 | |||
402 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); | ||
403 | |||
404 | if (status) { | ||
405 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
406 | __func__, hwirq, status); | ||
407 | return -1; | ||
408 | } | ||
409 | |||
410 | irq_server = get_irq_server(d->irq, cpumask, 1); | ||
411 | if (irq_server == -1) { | ||
412 | char cpulist[128]; | ||
413 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
414 | printk(KERN_WARNING | ||
415 | "%s: No online cpus in the mask %s for irq %d\n", | ||
416 | __func__, cpulist, d->irq); | ||
417 | return -1; | ||
418 | } | ||
419 | |||
420 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
421 | hwirq, irq_server, xics_status[1]); | ||
422 | |||
423 | if (status) { | ||
424 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
425 | __func__, hwirq, status); | ||
426 | return -1; | ||
427 | } | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static struct irq_chip xics_pic_direct = { | ||
433 | .name = "XICS", | ||
434 | .irq_startup = xics_startup, | ||
435 | .irq_mask = xics_mask_irq, | ||
436 | .irq_unmask = xics_unmask_irq, | ||
437 | .irq_eoi = xics_eoi_direct, | ||
438 | .irq_set_affinity = xics_set_affinity | ||
439 | }; | ||
440 | |||
441 | static struct irq_chip xics_pic_lpar = { | ||
442 | .name = "XICS", | ||
443 | .irq_startup = xics_startup, | ||
444 | .irq_mask = xics_mask_irq, | ||
445 | .irq_unmask = xics_unmask_irq, | ||
446 | .irq_eoi = xics_eoi_lpar, | ||
447 | .irq_set_affinity = xics_set_affinity | ||
448 | }; | ||
449 | |||
450 | |||
451 | /* Interface to arch irq controller subsystem layer */ | ||
452 | |||
453 | /* Points to the irq_chip we're actually using */ | ||
454 | static struct irq_chip *xics_irq_chip; | ||
455 | |||
456 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
457 | { | ||
458 | /* IBM machines have interrupt parents of various funky types for things | ||
459 | * like vdevices, events, etc... The trick we use here is to match | ||
460 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
461 | */ | ||
462 | return !of_device_is_compatible(node, "chrp,iic"); | ||
463 | } | ||
464 | |||
465 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
466 | irq_hw_number_t hw) | ||
467 | { | ||
468 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
469 | |||
470 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
471 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
472 | |||
473 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
474 | irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
479 | const u32 *intspec, unsigned int intsize, | ||
480 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
481 | |||
482 | { | ||
483 | /* Current xics implementation translates everything | ||
484 | * to level. It is not technically right for MSIs but this | ||
485 | * is irrelevant at this point. We might get smarter in the future | ||
486 | */ | ||
487 | *out_hwirq = intspec[0]; | ||
488 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static struct irq_host_ops xics_host_ops = { | ||
494 | .match = xics_host_match, | ||
495 | .map = xics_host_map, | ||
496 | .xlate = xics_host_xlate, | ||
497 | }; | ||
498 | |||
499 | static void __init xics_init_host(void) | ||
500 | { | ||
501 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
502 | xics_irq_chip = &xics_pic_lpar; | ||
503 | else | ||
504 | xics_irq_chip = &xics_pic_direct; | ||
505 | |||
506 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
507 | XICS_IRQ_SPURIOUS); | ||
508 | BUG_ON(xics_host == NULL); | ||
509 | irq_set_default_host(xics_host); | ||
510 | } | ||
511 | |||
512 | |||
513 | /* Inter-processor interrupt support */ | ||
514 | |||
515 | #ifdef CONFIG_SMP | ||
516 | /* | ||
517 | * XICS only has a single IPI, so encode the messages per CPU | ||
518 | */ | ||
519 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
520 | |||
521 | static inline void smp_xics_do_message(int cpu, int msg) | ||
522 | { | ||
523 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
524 | |||
525 | set_bit(msg, tgt); | ||
526 | mb(); | ||
527 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
528 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
529 | else | ||
530 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
531 | } | ||
532 | |||
533 | void smp_xics_message_pass(int target, int msg) | ||
534 | { | ||
535 | unsigned int i; | ||
536 | |||
537 | if (target < NR_CPUS) { | ||
538 | smp_xics_do_message(target, msg); | ||
539 | } else { | ||
540 | for_each_online_cpu(i) { | ||
541 | if (target == MSG_ALL_BUT_SELF | ||
542 | && i == smp_processor_id()) | ||
543 | continue; | ||
544 | smp_xics_do_message(i, msg); | ||
545 | } | ||
546 | } | ||
547 | } | ||
548 | |||
549 | static irqreturn_t xics_ipi_dispatch(int cpu) | ||
550 | { | ||
551 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
552 | |||
553 | mb(); /* order mmio clearing qirr */ | ||
554 | while (*tgt) { | ||
555 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
556 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
557 | } | ||
558 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
559 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
560 | } | ||
561 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
562 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
563 | } | ||
564 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
565 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
566 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
567 | } | ||
568 | #endif | ||
569 | } | ||
570 | return IRQ_HANDLED; | ||
571 | } | ||
572 | |||
573 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) | ||
574 | { | ||
575 | int cpu = smp_processor_id(); | ||
576 | |||
577 | direct_qirr_info(cpu, 0xff); | ||
578 | |||
579 | return xics_ipi_dispatch(cpu); | ||
580 | } | ||
581 | |||
582 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) | ||
583 | { | ||
584 | int cpu = smp_processor_id(); | ||
585 | |||
586 | lpar_qirr_info(cpu, 0xff); | ||
587 | |||
588 | return xics_ipi_dispatch(cpu); | ||
589 | } | ||
590 | |||
591 | static void xics_request_ipi(void) | ||
592 | { | ||
593 | unsigned int ipi; | ||
594 | int rc; | ||
595 | |||
596 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
597 | BUG_ON(ipi == NO_IRQ); | ||
598 | |||
599 | /* | ||
600 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
601 | * disabled | ||
602 | */ | ||
603 | irq_set_handler(ipi, handle_percpu_irq); | ||
604 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
605 | rc = request_irq(ipi, xics_ipi_action_lpar, | ||
606 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
607 | else | ||
608 | rc = request_irq(ipi, xics_ipi_action_direct, | ||
609 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
610 | BUG_ON(rc); | ||
611 | } | ||
612 | |||
613 | int __init smp_xics_probe(void) | ||
614 | { | ||
615 | xics_request_ipi(); | ||
616 | |||
617 | return cpumask_weight(cpu_possible_mask); | ||
618 | } | ||
619 | |||
620 | #endif /* CONFIG_SMP */ | ||
621 | |||
622 | |||
623 | /* Initialization */ | ||
624 | |||
625 | static void xics_update_irq_servers(void) | ||
626 | { | ||
627 | int i, j; | ||
628 | struct device_node *np; | ||
629 | u32 ilen; | ||
630 | const u32 *ireg; | ||
631 | u32 hcpuid; | ||
632 | |||
633 | /* Find the server numbers for the boot cpu. */ | ||
634 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
635 | BUG_ON(!np); | ||
636 | |||
637 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
638 | if (!ireg) { | ||
639 | of_node_put(np); | ||
640 | return; | ||
641 | } | ||
642 | |||
643 | i = ilen / sizeof(int); | ||
644 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
645 | |||
646 | /* Global interrupt distribution server is specified in the last | ||
647 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
648 | * entry fom this property for current boot cpu id and use it as | ||
649 | * default distribution server | ||
650 | */ | ||
651 | for (j = 0; j < i; j += 2) { | ||
652 | if (ireg[j] == hcpuid) { | ||
653 | default_server = hcpuid; | ||
654 | default_distrib_server = ireg[j+1]; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | of_node_put(np); | ||
659 | } | ||
660 | |||
661 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, | ||
662 | unsigned long size) | ||
663 | { | ||
664 | int i; | ||
665 | |||
666 | /* This may look gross but it's good enough for now, we don't quite | ||
667 | * have a hard -> linux processor id matching. | ||
668 | */ | ||
669 | for_each_possible_cpu(i) { | ||
670 | if (!cpu_present(i)) | ||
671 | continue; | ||
672 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
673 | xics_per_cpu[i] = ioremap(addr, size); | ||
674 | return; | ||
675 | } | ||
676 | } | ||
677 | } | ||
678 | |||
679 | static void __init xics_init_one_node(struct device_node *np, | ||
680 | unsigned int *indx) | ||
681 | { | ||
682 | unsigned int ilen; | ||
683 | const u32 *ireg; | ||
684 | |||
685 | /* This code does the theorically broken assumption that the interrupt | ||
686 | * server numbers are the same as the hard CPU numbers. | ||
687 | * This happens to be the case so far but we are playing with fire... | ||
688 | * should be fixed one of these days. -BenH. | ||
689 | */ | ||
690 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
691 | |||
692 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
693 | * f80 does have that property .. | ||
694 | */ | ||
695 | WARN_ON(ireg == NULL); | ||
696 | if (ireg) { | ||
697 | /* | ||
698 | * set node starting index for this node | ||
699 | */ | ||
700 | *indx = *ireg; | ||
701 | } | ||
702 | ireg = of_get_property(np, "reg", &ilen); | ||
703 | if (!ireg) | ||
704 | panic("xics_init_IRQ: can't find interrupt reg property"); | ||
705 | |||
706 | while (ilen >= (4 * sizeof(u32))) { | ||
707 | unsigned long addr, size; | ||
708 | |||
709 | /* XXX Use proper OF parsing code here !!! */ | ||
710 | addr = (unsigned long)*ireg++ << 32; | ||
711 | ilen -= sizeof(u32); | ||
712 | addr |= *ireg++; | ||
713 | ilen -= sizeof(u32); | ||
714 | size = (unsigned long)*ireg++ << 32; | ||
715 | ilen -= sizeof(u32); | ||
716 | size |= *ireg++; | ||
717 | ilen -= sizeof(u32); | ||
718 | xics_map_one_cpu(*indx, addr, size); | ||
719 | (*indx)++; | ||
720 | } | ||
721 | } | ||
722 | |||
723 | void __init xics_init_IRQ(void) | ||
724 | { | ||
725 | struct device_node *np; | ||
726 | u32 indx = 0; | ||
727 | int found = 0; | ||
728 | const u32 *isize; | ||
729 | |||
730 | ppc64_boot_msg(0x20, "XICS Init"); | ||
731 | |||
732 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
733 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
734 | ibm_int_on = rtas_token("ibm,int-on"); | ||
735 | ibm_int_off = rtas_token("ibm,int-off"); | ||
736 | |||
737 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
738 | found = 1; | ||
739 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
740 | of_node_put(np); | ||
741 | break; | ||
742 | } | ||
743 | xics_init_one_node(np, &indx); | ||
744 | } | ||
745 | if (found == 0) | ||
746 | return; | ||
747 | |||
748 | /* get the bit size of server numbers */ | ||
749 | found = 0; | ||
750 | |||
751 | for_each_compatible_node(np, NULL, "ibm,ppc-xics") { | ||
752 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
753 | |||
754 | if (!isize) | ||
755 | continue; | ||
756 | |||
757 | if (!found) { | ||
758 | interrupt_server_size = *isize; | ||
759 | found = 1; | ||
760 | } else if (*isize != interrupt_server_size) { | ||
761 | printk(KERN_WARNING "XICS: " | ||
762 | "mismatched ibm,interrupt-server#-size\n"); | ||
763 | interrupt_server_size = max(*isize, | ||
764 | interrupt_server_size); | ||
765 | } | ||
766 | } | ||
767 | |||
768 | xics_update_irq_servers(); | ||
769 | xics_init_host(); | ||
770 | |||
771 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
772 | ppc_md.get_irq = xics_get_irq_lpar; | ||
773 | else | ||
774 | ppc_md.get_irq = xics_get_irq_direct; | ||
775 | |||
776 | xics_setup_cpu(); | ||
777 | |||
778 | ppc64_boot_msg(0x21, "XICS Done"); | ||
779 | } | ||
780 | |||
781 | /* Cpu startup, shutdown, and hotplug */ | ||
782 | |||
783 | static void xics_set_cpu_priority(unsigned char cppr) | ||
784 | { | ||
785 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
786 | |||
787 | /* | ||
788 | * we only really want to set the priority when there's | ||
789 | * just one cppr value on the stack | ||
790 | */ | ||
791 | WARN_ON(os_cppr->index != 0); | ||
792 | |||
793 | os_cppr->stack[0] = cppr; | ||
794 | |||
795 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
796 | lpar_cppr_info(cppr); | ||
797 | else | ||
798 | direct_cppr_info(cppr); | ||
799 | iosync(); | ||
800 | } | ||
801 | |||
802 | /* Have the calling processor join or leave the specified global queue */ | ||
803 | static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
804 | { | ||
805 | int index; | ||
806 | int status; | ||
807 | |||
808 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
809 | return; | ||
810 | |||
811 | index = (1UL << interrupt_server_size) - 1 - gserver; | ||
812 | |||
813 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
814 | |||
815 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
816 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
817 | } | ||
818 | |||
819 | void xics_setup_cpu(void) | ||
820 | { | ||
821 | xics_set_cpu_priority(LOWEST_PRIORITY); | ||
822 | |||
823 | xics_set_cpu_giq(default_distrib_server, 1); | ||
824 | } | ||
825 | |||
826 | void xics_teardown_cpu(void) | ||
827 | { | ||
828 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
829 | int cpu = smp_processor_id(); | ||
830 | |||
831 | /* | ||
832 | * we have to reset the cppr index to 0 because we're | ||
833 | * not going to return from the IPI | ||
834 | */ | ||
835 | os_cppr->index = 0; | ||
836 | xics_set_cpu_priority(0); | ||
837 | |||
838 | /* Clear any pending IPI request */ | ||
839 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
840 | lpar_qirr_info(cpu, 0xff); | ||
841 | else | ||
842 | direct_qirr_info(cpu, 0xff); | ||
843 | } | ||
844 | |||
845 | void xics_kexec_teardown_cpu(int secondary) | ||
846 | { | ||
847 | xics_teardown_cpu(); | ||
848 | |||
849 | /* | ||
850 | * we take the ipi irq but and never return so we | ||
851 | * need to EOI the IPI, but want to leave our priority 0 | ||
852 | * | ||
853 | * should we check all the other interrupts too? | ||
854 | * should we be flagging idle loop instead? | ||
855 | * or creating some task to be scheduled? | ||
856 | */ | ||
857 | |||
858 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
859 | lpar_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
860 | else | ||
861 | direct_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
862 | |||
863 | /* | ||
864 | * Some machines need to have at least one cpu in the GIQ, | ||
865 | * so leave the master cpu in the group. | ||
866 | */ | ||
867 | if (secondary) | ||
868 | xics_set_cpu_giq(default_distrib_server, 0); | ||
869 | } | ||
870 | |||
871 | #ifdef CONFIG_HOTPLUG_CPU | ||
872 | |||
873 | /* Interrupts are disabled. */ | ||
874 | void xics_migrate_irqs_away(void) | ||
875 | { | ||
876 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
877 | int virq; | ||
878 | |||
879 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
880 | if (hw_cpu == default_server) | ||
881 | xics_update_irq_servers(); | ||
882 | |||
883 | /* Reject any interrupt that was queued to us... */ | ||
884 | xics_set_cpu_priority(0); | ||
885 | |||
886 | /* Remove ourselves from the global interrupt queue */ | ||
887 | xics_set_cpu_giq(default_distrib_server, 0); | ||
888 | |||
889 | /* Allow IPIs again... */ | ||
890 | xics_set_cpu_priority(DEFAULT_PRIORITY); | ||
891 | |||
892 | for_each_irq(virq) { | ||
893 | struct irq_desc *desc; | ||
894 | struct irq_chip *chip; | ||
895 | unsigned int hwirq; | ||
896 | int xics_status[2]; | ||
897 | int status; | ||
898 | unsigned long flags; | ||
899 | |||
900 | /* We can't set affinity on ISA interrupts */ | ||
901 | if (virq < NUM_ISA_INTERRUPTS) | ||
902 | continue; | ||
903 | if (irq_map[virq].host != xics_host) | ||
904 | continue; | ||
905 | hwirq = (unsigned int)irq_map[virq].hwirq; | ||
906 | /* We need to get IPIs still. */ | ||
907 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) | ||
908 | continue; | ||
909 | |||
910 | desc = irq_to_desc(virq); | ||
911 | |||
912 | /* We only need to migrate enabled IRQS */ | ||
913 | if (desc == NULL || desc->action == NULL) | ||
914 | continue; | ||
915 | |||
916 | chip = irq_desc_get_chip(desc); | ||
917 | if (chip == NULL || chip->irq_set_affinity == NULL) | ||
918 | continue; | ||
919 | |||
920 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
921 | |||
922 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); | ||
923 | if (status) { | ||
924 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
925 | __func__, hwirq, status); | ||
926 | goto unlock; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * We only support delivery to all cpus or to one cpu. | ||
931 | * The irq has to be migrated only in the single cpu | ||
932 | * case. | ||
933 | */ | ||
934 | if (xics_status[0] != hw_cpu) | ||
935 | goto unlock; | ||
936 | |||
937 | /* This is expected during cpu offline. */ | ||
938 | if (cpu_online(cpu)) | ||
939 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | ||
940 | virq, cpu); | ||
941 | |||
942 | /* Reset affinity to all cpus */ | ||
943 | cpumask_setall(desc->irq_data.affinity); | ||
944 | chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true); | ||
945 | unlock: | ||
946 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
947 | } | ||
948 | } | ||
949 | #endif | ||
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h deleted file mode 100644 index d1d5a83039ae..000000000000 --- a/arch/powerpc/platforms/pseries/xics.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.h | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _POWERPC_KERNEL_XICS_H | ||
13 | #define _POWERPC_KERNEL_XICS_H | ||
14 | |||
15 | extern void xics_init_IRQ(void); | ||
16 | extern void xics_setup_cpu(void); | ||
17 | extern void xics_teardown_cpu(void); | ||
18 | extern void xics_kexec_teardown_cpu(int secondary); | ||
19 | extern void xics_migrate_irqs_away(void); | ||
20 | extern int smp_xics_probe(void); | ||
21 | extern void smp_xics_message_pass(int target, int msg); | ||
22 | |||
23 | #endif /* _POWERPC_KERNEL_XICS_H */ | ||
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 396582835cb5..cfc18770af79 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig | |||
@@ -12,3 +12,6 @@ config PPC_MSI_BITMAP | |||
12 | depends on PCI_MSI | 12 | depends on PCI_MSI |
13 | default y if MPIC | 13 | default y if MPIC |
14 | default y if FSL_PCI | 14 | default y if FSL_PCI |
15 | |||
16 | source "arch/powerpc/sysdev/xics/Kconfig" | ||
17 | |||
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 1e0c933ef772..9516e7598573 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -57,3 +57,7 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o | |||
57 | ifeq ($(CONFIG_SUSPEND),y) | 57 | ifeq ($(CONFIG_SUSPEND),y) |
58 | obj-$(CONFIG_6xx) += 6xx-suspend.o | 58 | obj-$(CONFIG_6xx) += 6xx-suspend.o |
59 | endif | 59 | endif |
60 | |||
61 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
62 | |||
63 | obj-$(CONFIG_PPC_XICS) += xics/ | ||
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 000000000000..123b8ddf2816 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Kconfig | |||
@@ -0,0 +1,12 @@ | |||
1 | config PPC_XICS | ||
2 | def_bool n | ||
3 | |||
4 | config PPC_ICP_NATIVE | ||
5 | def_bool n | ||
6 | |||
7 | config PPC_ICP_HV | ||
8 | def_bool n | ||
9 | |||
10 | config PPC_ICS_RTAS | ||
11 | def_bool n | ||
12 | |||
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 000000000000..b75a6059337f --- /dev/null +++ b/arch/powerpc/sysdev/xics/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
2 | |||
3 | obj-y += xics-common.o | ||
4 | obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o | ||
5 | obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o | ||
6 | obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o | ||
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 000000000000..b03d348b19a5 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-hv.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/of.h> | ||
18 | |||
19 | #include <asm/smp.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/errno.h> | ||
22 | #include <asm/xics.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/hvcall.h> | ||
25 | |||
26 | static inline unsigned int icp_hv_get_xirr(unsigned char cppr) | ||
27 | { | ||
28 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
29 | long rc; | ||
30 | |||
31 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
32 | if (rc != H_SUCCESS) | ||
33 | panic(" bad return code xirr - rc = %lx\n", rc); | ||
34 | return (unsigned int)retbuf[0]; | ||
35 | } | ||
36 | |||
37 | static inline void icp_hv_set_xirr(unsigned int value) | ||
38 | { | ||
39 | long rc = plpar_hcall_norets(H_EOI, value); | ||
40 | if (rc != H_SUCCESS) | ||
41 | panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); | ||
42 | } | ||
43 | |||
44 | static inline void icp_hv_set_cppr(u8 value) | ||
45 | { | ||
46 | long rc = plpar_hcall_norets(H_CPPR, value); | ||
47 | if (rc != H_SUCCESS) | ||
48 | panic("bad return code cppr - rc = %lx\n", rc); | ||
49 | } | ||
50 | |||
51 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | ||
52 | { | ||
53 | long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), | ||
54 | value); | ||
55 | if (rc != H_SUCCESS) | ||
56 | panic("bad return code qirr - rc = %lx\n", rc); | ||
57 | } | ||
58 | |||
59 | static void icp_hv_eoi(struct irq_data *d) | ||
60 | { | ||
61 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
62 | |||
63 | iosync(); | ||
64 | icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
65 | } | ||
66 | |||
67 | static void icp_hv_teardown_cpu(void) | ||
68 | { | ||
69 | int cpu = smp_processor_id(); | ||
70 | |||
71 | /* Clear any pending IPI */ | ||
72 | icp_hv_set_qirr(cpu, 0xff); | ||
73 | } | ||
74 | |||
75 | static void icp_hv_flush_ipi(void) | ||
76 | { | ||
77 | /* We take the ipi irq but and never return so we | ||
78 | * need to EOI the IPI, but want to leave our priority 0 | ||
79 | * | ||
80 | * should we check all the other interrupts too? | ||
81 | * should we be flagging idle loop instead? | ||
82 | * or creating some task to be scheduled? | ||
83 | */ | ||
84 | |||
85 | icp_hv_set_xirr((0x00 << 24) | XICS_IPI); | ||
86 | } | ||
87 | |||
88 | static unsigned int icp_hv_get_irq(void) | ||
89 | { | ||
90 | unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); | ||
91 | unsigned int vec = xirr & 0x00ffffff; | ||
92 | unsigned int irq; | ||
93 | |||
94 | if (vec == XICS_IRQ_SPURIOUS) | ||
95 | return NO_IRQ; | ||
96 | |||
97 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
98 | if (likely(irq != NO_IRQ)) { | ||
99 | xics_push_cppr(vec); | ||
100 | return irq; | ||
101 | } | ||
102 | |||
103 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
104 | xics_mask_unknown_vec(vec); | ||
105 | |||
106 | /* We might learn about it later, so EOI it */ | ||
107 | icp_hv_set_xirr(xirr); | ||
108 | |||
109 | return NO_IRQ; | ||
110 | } | ||
111 | |||
112 | static void icp_hv_set_cpu_priority(unsigned char cppr) | ||
113 | { | ||
114 | xics_set_base_cppr(cppr); | ||
115 | icp_hv_set_cppr(cppr); | ||
116 | iosync(); | ||
117 | } | ||
118 | |||
119 | #ifdef CONFIG_SMP | ||
120 | |||
121 | static inline void icp_hv_do_message(int cpu, int msg) | ||
122 | { | ||
123 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
124 | |||
125 | set_bit(msg, tgt); | ||
126 | mb(); | ||
127 | icp_hv_set_qirr(cpu, IPI_PRIORITY); | ||
128 | } | ||
129 | |||
130 | static void icp_hv_message_pass(int target, int msg) | ||
131 | { | ||
132 | unsigned int i; | ||
133 | |||
134 | if (target < NR_CPUS) { | ||
135 | icp_hv_do_message(target, msg); | ||
136 | } else { | ||
137 | for_each_online_cpu(i) { | ||
138 | if (target == MSG_ALL_BUT_SELF | ||
139 | && i == smp_processor_id()) | ||
140 | continue; | ||
141 | icp_hv_do_message(i, msg); | ||
142 | } | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) | ||
147 | { | ||
148 | int cpu = smp_processor_id(); | ||
149 | |||
150 | icp_hv_set_qirr(cpu, 0xff); | ||
151 | |||
152 | return xics_ipi_dispatch(cpu); | ||
153 | } | ||
154 | |||
155 | #endif /* CONFIG_SMP */ | ||
156 | |||
157 | static const struct icp_ops icp_hv_ops = { | ||
158 | .get_irq = icp_hv_get_irq, | ||
159 | .eoi = icp_hv_eoi, | ||
160 | .set_priority = icp_hv_set_cpu_priority, | ||
161 | .teardown_cpu = icp_hv_teardown_cpu, | ||
162 | .flush_ipi = icp_hv_flush_ipi, | ||
163 | #ifdef CONFIG_SMP | ||
164 | .ipi_action = icp_hv_ipi_action, | ||
165 | .message_pass = icp_hv_message_pass, | ||
166 | #endif | ||
167 | }; | ||
168 | |||
169 | int icp_hv_init(void) | ||
170 | { | ||
171 | struct device_node *np; | ||
172 | |||
173 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); | ||
174 | if (!np) | ||
175 | np = of_find_node_by_type(NULL, | ||
176 | "PowerPC-External-Interrupt-Presentation"); | ||
177 | if (!np) | ||
178 | return -ENODEV; | ||
179 | |||
180 | icp_ops = &icp_hv_ops; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 000000000000..be5e3d748edb --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-native.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | |||
20 | #include <asm/prom.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/smp.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/errno.h> | ||
25 | #include <asm/xics.h> | ||
26 | |||
27 | struct icp_ipl { | ||
28 | union { | ||
29 | u32 word; | ||
30 | u8 bytes[4]; | ||
31 | } xirr_poll; | ||
32 | union { | ||
33 | u32 word; | ||
34 | u8 bytes[4]; | ||
35 | } xirr; | ||
36 | u32 dummy; | ||
37 | union { | ||
38 | u32 word; | ||
39 | u8 bytes[4]; | ||
40 | } qirr; | ||
41 | u32 link_a; | ||
42 | u32 link_b; | ||
43 | u32 link_c; | ||
44 | }; | ||
45 | |||
46 | static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; | ||
47 | |||
48 | static inline unsigned int icp_native_get_xirr(void) | ||
49 | { | ||
50 | int cpu = smp_processor_id(); | ||
51 | |||
52 | return in_be32(&icp_native_regs[cpu]->xirr.word); | ||
53 | } | ||
54 | |||
55 | static inline void icp_native_set_xirr(unsigned int value) | ||
56 | { | ||
57 | int cpu = smp_processor_id(); | ||
58 | |||
59 | out_be32(&icp_native_regs[cpu]->xirr.word, value); | ||
60 | } | ||
61 | |||
62 | static inline void icp_native_set_cppr(u8 value) | ||
63 | { | ||
64 | int cpu = smp_processor_id(); | ||
65 | |||
66 | out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); | ||
67 | } | ||
68 | |||
69 | static inline void icp_native_set_qirr(int n_cpu, u8 value) | ||
70 | { | ||
71 | out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); | ||
72 | } | ||
73 | |||
74 | static void icp_native_set_cpu_priority(unsigned char cppr) | ||
75 | { | ||
76 | xics_set_base_cppr(cppr); | ||
77 | icp_native_set_cppr(cppr); | ||
78 | iosync(); | ||
79 | } | ||
80 | |||
81 | static void icp_native_eoi(struct irq_data *d) | ||
82 | { | ||
83 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
84 | |||
85 | iosync(); | ||
86 | icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
87 | } | ||
88 | |||
89 | static void icp_native_teardown_cpu(void) | ||
90 | { | ||
91 | int cpu = smp_processor_id(); | ||
92 | |||
93 | /* Clear any pending IPI */ | ||
94 | icp_native_set_qirr(cpu, 0xff); | ||
95 | } | ||
96 | |||
97 | static void icp_native_flush_ipi(void) | ||
98 | { | ||
99 | /* We take the ipi irq but and never return so we | ||
100 | * need to EOI the IPI, but want to leave our priority 0 | ||
101 | * | ||
102 | * should we check all the other interrupts too? | ||
103 | * should we be flagging idle loop instead? | ||
104 | * or creating some task to be scheduled? | ||
105 | */ | ||
106 | |||
107 | icp_native_set_xirr((0x00 << 24) | XICS_IPI); | ||
108 | } | ||
109 | |||
110 | static unsigned int icp_native_get_irq(void) | ||
111 | { | ||
112 | unsigned int xirr = icp_native_get_xirr(); | ||
113 | unsigned int vec = xirr & 0x00ffffff; | ||
114 | unsigned int irq; | ||
115 | |||
116 | if (vec == XICS_IRQ_SPURIOUS) | ||
117 | return NO_IRQ; | ||
118 | |||
119 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
120 | if (likely(irq != NO_IRQ)) { | ||
121 | xics_push_cppr(vec); | ||
122 | return irq; | ||
123 | } | ||
124 | |||
125 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
126 | xics_mask_unknown_vec(vec); | ||
127 | |||
128 | /* We might learn about it later, so EOI it */ | ||
129 | icp_native_set_xirr(xirr); | ||
130 | |||
131 | return NO_IRQ; | ||
132 | } | ||
133 | |||
134 | #ifdef CONFIG_SMP | ||
135 | |||
136 | static inline void icp_native_do_message(int cpu, int msg) | ||
137 | { | ||
138 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
139 | |||
140 | set_bit(msg, tgt); | ||
141 | mb(); | ||
142 | icp_native_set_qirr(cpu, IPI_PRIORITY); | ||
143 | } | ||
144 | |||
145 | static void icp_native_message_pass(int target, int msg) | ||
146 | { | ||
147 | unsigned int i; | ||
148 | |||
149 | if (target < NR_CPUS) { | ||
150 | icp_native_do_message(target, msg); | ||
151 | } else { | ||
152 | for_each_online_cpu(i) { | ||
153 | if (target == MSG_ALL_BUT_SELF | ||
154 | && i == smp_processor_id()) | ||
155 | continue; | ||
156 | icp_native_do_message(i, msg); | ||
157 | } | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) | ||
162 | { | ||
163 | int cpu = smp_processor_id(); | ||
164 | |||
165 | icp_native_set_qirr(cpu, 0xff); | ||
166 | |||
167 | return xics_ipi_dispatch(cpu); | ||
168 | } | ||
169 | |||
170 | #endif /* CONFIG_SMP */ | ||
171 | |||
172 | static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, | ||
173 | unsigned long size) | ||
174 | { | ||
175 | char *rname; | ||
176 | int i, cpu = -1; | ||
177 | |||
178 | /* This may look gross but it's good enough for now, we don't quite | ||
179 | * have a hard -> linux processor id matching. | ||
180 | */ | ||
181 | for_each_possible_cpu(i) { | ||
182 | if (!cpu_present(i)) | ||
183 | continue; | ||
184 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
185 | cpu = i; | ||
186 | break; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /* Fail, skip that CPU. Don't print, it's normal, some XICS come up | ||
191 | * with way more entries in there than you have CPUs | ||
192 | */ | ||
193 | if (cpu == -1) | ||
194 | return 0; | ||
195 | |||
196 | rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", | ||
197 | cpu, hw_id); | ||
198 | |||
199 | if (!request_mem_region(addr, size, rname)) { | ||
200 | pr_warning("icp_native: Could not reserve ICP MMIO" | ||
201 | " for CPU %d, interrupt server #0x%x\n", | ||
202 | cpu, hw_id); | ||
203 | return -EBUSY; | ||
204 | } | ||
205 | |||
206 | icp_native_regs[cpu] = ioremap(addr, size); | ||
207 | if (!icp_native_regs[cpu]) { | ||
208 | pr_warning("icp_native: Failed ioremap for CPU %d, " | ||
209 | "interrupt server #0x%x, addr %#lx\n", | ||
210 | cpu, hw_id, addr); | ||
211 | release_mem_region(addr, size); | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int __init icp_native_init_one_node(struct device_node *np, | ||
218 | unsigned int *indx) | ||
219 | { | ||
220 | unsigned int ilen; | ||
221 | const u32 *ireg; | ||
222 | int i; | ||
223 | int reg_tuple_size; | ||
224 | int num_servers = 0; | ||
225 | |||
226 | /* This code does the theorically broken assumption that the interrupt | ||
227 | * server numbers are the same as the hard CPU numbers. | ||
228 | * This happens to be the case so far but we are playing with fire... | ||
229 | * should be fixed one of these days. -BenH. | ||
230 | */ | ||
231 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); | ||
232 | |||
233 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
234 | * f80 does have that property .. | ||
235 | */ | ||
236 | WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); | ||
237 | |||
238 | if (ireg) { | ||
239 | *indx = of_read_number(ireg, 1); | ||
240 | if (ilen >= 2*sizeof(u32)) | ||
241 | num_servers = of_read_number(ireg + 1, 1); | ||
242 | } | ||
243 | |||
244 | ireg = of_get_property(np, "reg", &ilen); | ||
245 | if (!ireg) { | ||
246 | pr_err("icp_native: Can't find interrupt reg property"); | ||
247 | return -1; | ||
248 | } | ||
249 | |||
250 | reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; | ||
251 | if (((ilen % reg_tuple_size) != 0) | ||
252 | || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { | ||
253 | pr_err("icp_native: ICP reg len (%d) != num servers (%d)", | ||
254 | ilen / reg_tuple_size, num_servers); | ||
255 | return -1; | ||
256 | } | ||
257 | |||
258 | for (i = 0; i < (ilen / reg_tuple_size); i++) { | ||
259 | struct resource r; | ||
260 | int err; | ||
261 | |||
262 | err = of_address_to_resource(np, i, &r); | ||
263 | if (err) { | ||
264 | pr_err("icp_native: Could not translate ICP MMIO" | ||
265 | " for interrupt server 0x%x (%d)\n", *indx, err); | ||
266 | return -1; | ||
267 | } | ||
268 | |||
269 | if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) | ||
270 | return -1; | ||
271 | |||
272 | (*indx)++; | ||
273 | } | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static const struct icp_ops icp_native_ops = { | ||
278 | .get_irq = icp_native_get_irq, | ||
279 | .eoi = icp_native_eoi, | ||
280 | .set_priority = icp_native_set_cpu_priority, | ||
281 | .teardown_cpu = icp_native_teardown_cpu, | ||
282 | .flush_ipi = icp_native_flush_ipi, | ||
283 | #ifdef CONFIG_SMP | ||
284 | .ipi_action = icp_native_ipi_action, | ||
285 | .message_pass = icp_native_message_pass, | ||
286 | #endif | ||
287 | }; | ||
288 | |||
289 | int icp_native_init(void) | ||
290 | { | ||
291 | struct device_node *np; | ||
292 | u32 indx = 0; | ||
293 | int found = 0; | ||
294 | |||
295 | for_each_compatible_node(np, NULL, "ibm,ppc-xicp") | ||
296 | if (icp_native_init_one_node(np, &indx) == 0) | ||
297 | found = 1; | ||
298 | if (!found) { | ||
299 | for_each_node_by_type(np, | ||
300 | "PowerPC-External-Interrupt-Presentation") { | ||
301 | if (icp_native_init_one_node(np, &indx) == 0) | ||
302 | found = 1; | ||
303 | } | ||
304 | } | ||
305 | |||
306 | if (found == 0) | ||
307 | return -ENODEV; | ||
308 | |||
309 | icp_ops = &icp_native_ops; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 000000000000..5b3ee387e89d --- /dev/null +++ b/arch/powerpc/sysdev/xics/ics-rtas.c | |||
@@ -0,0 +1,229 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/irq.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/cpu.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/msi.h> | ||
11 | |||
12 | #include <asm/prom.h> | ||
13 | #include <asm/smp.h> | ||
14 | #include <asm/machdep.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/xics.h> | ||
18 | #include <asm/rtas.h> | ||
19 | |||
20 | /* RTAS service tokens */ | ||
21 | static int ibm_get_xive; | ||
22 | static int ibm_set_xive; | ||
23 | static int ibm_int_on; | ||
24 | static int ibm_int_off; | ||
25 | |||
26 | static int ics_rtas_map(struct ics *ics, unsigned int virq); | ||
27 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); | ||
28 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec); | ||
29 | |||
30 | /* Only one global & state struct ics */ | ||
31 | static struct ics ics_rtas = { | ||
32 | .map = ics_rtas_map, | ||
33 | .mask_unknown = ics_rtas_mask_unknown, | ||
34 | .get_server = ics_rtas_get_server, | ||
35 | }; | ||
36 | |||
37 | static void ics_rtas_unmask_irq(struct irq_data *d) | ||
38 | { | ||
39 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
40 | int call_status; | ||
41 | int server; | ||
42 | |||
43 | pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
44 | |||
45 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
46 | return; | ||
47 | |||
48 | server = xics_get_irq_server(d->irq, d->affinity, 0); | ||
49 | |||
50 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, | ||
51 | DEFAULT_PRIORITY); | ||
52 | if (call_status != 0) { | ||
53 | printk(KERN_ERR | ||
54 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
55 | __func__, hw_irq, server, call_status); | ||
56 | return; | ||
57 | } | ||
58 | |||
59 | /* Now unmask the interrupt (often a no-op) */ | ||
60 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); | ||
61 | if (call_status != 0) { | ||
62 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
63 | __func__, hw_irq, call_status); | ||
64 | return; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static unsigned int ics_rtas_startup(struct irq_data *d) | ||
69 | { | ||
70 | #ifdef CONFIG_PCI_MSI | ||
71 | /* | ||
72 | * The generic MSI code returns with the interrupt disabled on the | ||
73 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
74 | * at that level, so we do it here by hand. | ||
75 | */ | ||
76 | if (d->msi_desc) | ||
77 | unmask_msi_irq(d); | ||
78 | #endif | ||
79 | /* unmask it */ | ||
80 | ics_rtas_unmask_irq(d); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void ics_rtas_mask_real_irq(unsigned int hw_irq) | ||
85 | { | ||
86 | int call_status; | ||
87 | |||
88 | if (hw_irq == XICS_IPI) | ||
89 | return; | ||
90 | |||
91 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); | ||
92 | if (call_status != 0) { | ||
93 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
94 | __func__, hw_irq, call_status); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
99 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, | ||
100 | xics_default_server, 0xff); | ||
101 | if (call_status != 0) { | ||
102 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
103 | __func__, hw_irq, call_status); | ||
104 | return; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void ics_rtas_mask_irq(struct irq_data *d) | ||
109 | { | ||
110 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
111 | |||
112 | pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
113 | |||
114 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
115 | return; | ||
116 | ics_rtas_mask_real_irq(hw_irq); | ||
117 | } | ||
118 | |||
119 | static int ics_rtas_set_affinity(struct irq_data *d, | ||
120 | const struct cpumask *cpumask, | ||
121 | bool force) | ||
122 | { | ||
123 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
124 | int status; | ||
125 | int xics_status[2]; | ||
126 | int irq_server; | ||
127 | |||
128 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
129 | return -1; | ||
130 | |||
131 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); | ||
132 | |||
133 | if (status) { | ||
134 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
135 | __func__, hw_irq, status); | ||
136 | return -1; | ||
137 | } | ||
138 | |||
139 | irq_server = xics_get_irq_server(d->irq, cpumask, 1); | ||
140 | if (irq_server == -1) { | ||
141 | char cpulist[128]; | ||
142 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
143 | printk(KERN_WARNING | ||
144 | "%s: No online cpus in the mask %s for irq %d\n", | ||
145 | __func__, cpulist, d->irq); | ||
146 | return -1; | ||
147 | } | ||
148 | |||
149 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
150 | hw_irq, irq_server, xics_status[1]); | ||
151 | |||
152 | if (status) { | ||
153 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
154 | __func__, hw_irq, status); | ||
155 | return -1; | ||
156 | } | ||
157 | |||
158 | return IRQ_SET_MASK_OK; | ||
159 | } | ||
160 | |||
161 | static struct irq_chip ics_rtas_irq_chip = { | ||
162 | .name = "XICS", | ||
163 | .irq_startup = ics_rtas_startup, | ||
164 | .irq_mask = ics_rtas_mask_irq, | ||
165 | .irq_unmask = ics_rtas_unmask_irq, | ||
166 | .irq_eoi = NULL, /* Patched at init time */ | ||
167 | .irq_set_affinity = ics_rtas_set_affinity | ||
168 | }; | ||
169 | |||
170 | static int ics_rtas_map(struct ics *ics, unsigned int virq) | ||
171 | { | ||
172 | unsigned int hw_irq = (unsigned int)irq_map[virq].hwirq; | ||
173 | int status[2]; | ||
174 | int rc; | ||
175 | |||
176 | if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* Check if RTAS knows about this interrupt */ | ||
180 | rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); | ||
181 | if (rc) | ||
182 | return -ENXIO; | ||
183 | |||
184 | irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); | ||
185 | irq_set_chip_data(virq, &ics_rtas); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) | ||
191 | { | ||
192 | ics_rtas_mask_real_irq(vec); | ||
193 | } | ||
194 | |||
195 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec) | ||
196 | { | ||
197 | int rc, status[2]; | ||
198 | |||
199 | rc = rtas_call(ibm_get_xive, 1, 3, status, vec); | ||
200 | if (rc) | ||
201 | return -1; | ||
202 | return status[0]; | ||
203 | } | ||
204 | |||
205 | int ics_rtas_init(void) | ||
206 | { | ||
207 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
208 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
209 | ibm_int_on = rtas_token("ibm,int-on"); | ||
210 | ibm_int_off = rtas_token("ibm,int-off"); | ||
211 | |||
212 | /* We enable the RTAS "ICS" if RTAS is present with the | ||
213 | * appropriate tokens | ||
214 | */ | ||
215 | if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || | ||
216 | ibm_set_xive == RTAS_UNKNOWN_SERVICE) | ||
217 | return -ENODEV; | ||
218 | |||
219 | /* We need to patch our irq chip's EOI to point to the | ||
220 | * right ICP | ||
221 | */ | ||
222 | ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; | ||
223 | |||
224 | /* Register ourselves */ | ||
225 | xics_register_ics(&ics_rtas); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 000000000000..a2be84de5237 --- /dev/null +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | #include <asm/prom.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/smp.h> | ||
27 | #include <asm/machdep.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/errno.h> | ||
30 | #include <asm/rtas.h> | ||
31 | #include <asm/xics.h> | ||
32 | #include <asm/firmware.h> | ||
33 | |||
34 | /* Globals common to all ICP/ICS implementations */ | ||
35 | const struct icp_ops *icp_ops; | ||
36 | |||
37 | unsigned int xics_default_server = 0xff; | ||
38 | unsigned int xics_default_distrib_server = 0; | ||
39 | unsigned int xics_interrupt_server_size = 8; | ||
40 | |||
41 | DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
42 | |||
43 | struct irq_host *xics_host; | ||
44 | |||
45 | static LIST_HEAD(ics_list); | ||
46 | |||
47 | void xics_update_irq_servers(void) | ||
48 | { | ||
49 | int i, j; | ||
50 | struct device_node *np; | ||
51 | u32 ilen; | ||
52 | const u32 *ireg; | ||
53 | u32 hcpuid; | ||
54 | |||
55 | /* Find the server numbers for the boot cpu. */ | ||
56 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
57 | BUG_ON(!np); | ||
58 | |||
59 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
60 | xics_default_server = hcpuid; | ||
61 | |||
62 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
63 | if (!ireg) { | ||
64 | of_node_put(np); | ||
65 | return; | ||
66 | } | ||
67 | |||
68 | i = ilen / sizeof(int); | ||
69 | |||
70 | /* Global interrupt distribution server is specified in the last | ||
71 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
72 | * entry fom this property for current boot cpu id and use it as | ||
73 | * default distribution server | ||
74 | */ | ||
75 | for (j = 0; j < i; j += 2) { | ||
76 | if (ireg[j] == hcpuid) { | ||
77 | xics_default_distrib_server = ireg[j+1]; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | of_node_put(np); | ||
82 | } | ||
83 | |||
84 | /* GIQ stuff, currently only supported on RTAS setups, will have | ||
85 | * to be sorted properly for bare metal | ||
86 | */ | ||
87 | void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
88 | { | ||
89 | #ifdef CONFIG_PPC_RTAS | ||
90 | int index; | ||
91 | int status; | ||
92 | |||
93 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
94 | return; | ||
95 | |||
96 | index = (1UL << xics_interrupt_server_size) - 1 - gserver; | ||
97 | |||
98 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
99 | |||
100 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
101 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
102 | #endif | ||
103 | } | ||
104 | |||
105 | void xics_setup_cpu(void) | ||
106 | { | ||
107 | icp_ops->set_priority(LOWEST_PRIORITY); | ||
108 | |||
109 | xics_set_cpu_giq(xics_default_distrib_server, 1); | ||
110 | } | ||
111 | |||
112 | void xics_mask_unknown_vec(unsigned int vec) | ||
113 | { | ||
114 | struct ics *ics; | ||
115 | |||
116 | pr_err("Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
117 | |||
118 | list_for_each_entry(ics, &ics_list, link) | ||
119 | ics->mask_unknown(ics, vec); | ||
120 | } | ||
121 | |||
122 | |||
123 | #ifdef CONFIG_SMP | ||
124 | |||
125 | DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
126 | |||
127 | irqreturn_t xics_ipi_dispatch(int cpu) | ||
128 | { | ||
129 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
130 | |||
131 | mb(); /* order mmio clearing qirr */ | ||
132 | while (*tgt) { | ||
133 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
134 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
135 | } | ||
136 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
137 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
138 | } | ||
139 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
140 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
141 | } | ||
142 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
143 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
144 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
145 | } | ||
146 | #endif | ||
147 | } | ||
148 | return IRQ_HANDLED; | ||
149 | } | ||
150 | |||
151 | static void xics_request_ipi(void) | ||
152 | { | ||
153 | unsigned int ipi; | ||
154 | |||
155 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
156 | BUG_ON(ipi == NO_IRQ); | ||
157 | |||
158 | /* | ||
159 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
160 | * disabled | ||
161 | */ | ||
162 | irq_set_handler(ipi, handle_percpu_irq); | ||
163 | BUG_ON(request_irq(ipi, icp_ops->ipi_action, | ||
164 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); | ||
165 | } | ||
166 | |||
167 | int __init xics_smp_probe(void) | ||
168 | { | ||
169 | /* Setup message_pass callback based on which ICP is used */ | ||
170 | smp_ops->message_pass = icp_ops->message_pass; | ||
171 | |||
172 | /* Register all the IPIs */ | ||
173 | xics_request_ipi(); | ||
174 | |||
175 | return cpumask_weight(cpu_possible_mask); | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_SMP */ | ||
179 | |||
180 | void xics_teardown_cpu(void) | ||
181 | { | ||
182 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
183 | |||
184 | /* | ||
185 | * we have to reset the cppr index to 0 because we're | ||
186 | * not going to return from the IPI | ||
187 | */ | ||
188 | os_cppr->index = 0; | ||
189 | icp_ops->set_priority(0); | ||
190 | icp_ops->teardown_cpu(); | ||
191 | } | ||
192 | |||
193 | void xics_kexec_teardown_cpu(int secondary) | ||
194 | { | ||
195 | xics_teardown_cpu(); | ||
196 | |||
197 | icp_ops->flush_ipi(); | ||
198 | |||
199 | /* | ||
200 | * Some machines need to have at least one cpu in the GIQ, | ||
201 | * so leave the master cpu in the group. | ||
202 | */ | ||
203 | if (secondary) | ||
204 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
205 | } | ||
206 | |||
207 | |||
208 | #ifdef CONFIG_HOTPLUG_CPU | ||
209 | |||
210 | /* Interrupts are disabled. */ | ||
211 | void xics_migrate_irqs_away(void) | ||
212 | { | ||
213 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
214 | unsigned int irq, virq; | ||
215 | |||
216 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
217 | if (hw_cpu == xics_default_server) | ||
218 | xics_update_irq_servers(); | ||
219 | |||
220 | /* Reject any interrupt that was queued to us... */ | ||
221 | icp_ops->set_priority(0); | ||
222 | |||
223 | /* Remove ourselves from the global interrupt queue */ | ||
224 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
225 | |||
226 | /* Allow IPIs again... */ | ||
227 | icp_ops->set_priority(DEFAULT_PRIORITY); | ||
228 | |||
229 | for_each_irq(virq) { | ||
230 | struct irq_desc *desc; | ||
231 | struct irq_chip *chip; | ||
232 | long server; | ||
233 | unsigned long flags; | ||
234 | struct ics *ics; | ||
235 | |||
236 | /* We can't set affinity on ISA interrupts */ | ||
237 | if (virq < NUM_ISA_INTERRUPTS) | ||
238 | continue; | ||
239 | if (irq_map[virq].host != xics_host) | ||
240 | continue; | ||
241 | irq = (unsigned int)irq_map[virq].hwirq; | ||
242 | /* We need to get IPIs still. */ | ||
243 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
244 | continue; | ||
245 | desc = irq_to_desc(virq); | ||
246 | /* We only need to migrate enabled IRQS */ | ||
247 | if (!desc || !desc->action) | ||
248 | continue; | ||
249 | chip = irq_desc_get_chip(desc); | ||
250 | if (!chip || !chip->irq_set_affinity) | ||
251 | continue; | ||
252 | |||
253 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
254 | |||
255 | /* Locate interrupt server */ | ||
256 | server = -1; | ||
257 | ics = irq_get_chip_data(virq); | ||
258 | if (ics) | ||
259 | server = ics->get_server(ics, irq); | ||
260 | if (server < 0) { | ||
261 | printk(KERN_ERR "%s: Can't find server for irq %d\n", | ||
262 | __func__, irq); | ||
263 | goto unlock; | ||
264 | } | ||
265 | |||
266 | /* We only support delivery to all cpus or to one cpu. | ||
267 | * The irq has to be migrated only in the single cpu | ||
268 | * case. | ||
269 | */ | ||
270 | if (server != hw_cpu) | ||
271 | goto unlock; | ||
272 | |||
273 | /* This is expected during cpu offline. */ | ||
274 | if (cpu_online(cpu)) | ||
275 | pr_warning("IRQ %u affinity broken off cpu %u\n", | ||
276 | virq, cpu); | ||
277 | |||
278 | /* Reset affinity to all cpus */ | ||
279 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
280 | irq_set_affinity(virq, cpu_all_mask); | ||
281 | continue; | ||
282 | unlock: | ||
283 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
284 | } | ||
285 | } | ||
286 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
287 | |||
288 | #ifdef CONFIG_SMP | ||
289 | /* | ||
290 | * For the moment we only implement delivery to all cpus or one cpu. | ||
291 | * | ||
292 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
293 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
294 | * are set. This is so things like irqbalance (which set core and package | ||
295 | * wide affinities) do the right thing. | ||
296 | */ | ||
297 | int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
298 | unsigned int strict_check) | ||
299 | { | ||
300 | |||
301 | if (!distribute_irqs) | ||
302 | return xics_default_server; | ||
303 | |||
304 | if (!cpumask_subset(cpu_possible_mask, cpumask)) { | ||
305 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
306 | |||
307 | if (server < nr_cpu_ids) | ||
308 | return get_hard_smp_processor_id(server); | ||
309 | |||
310 | if (strict_check) | ||
311 | return -1; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Workaround issue with some versions of JS20 firmware that | ||
316 | * deliver interrupts to cpus which haven't been started. This | ||
317 | * happens when using the maxcpus= boot option. | ||
318 | */ | ||
319 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
320 | return xics_default_distrib_server; | ||
321 | |||
322 | return xics_default_server; | ||
323 | } | ||
324 | #endif /* CONFIG_SMP */ | ||
325 | |||
326 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
327 | { | ||
328 | /* IBM machines have interrupt parents of various funky types for things | ||
329 | * like vdevices, events, etc... The trick we use here is to match | ||
330 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
331 | */ | ||
332 | return !of_device_is_compatible(node, "chrp,iic"); | ||
333 | } | ||
334 | |||
335 | /* Dummies */ | ||
336 | static void xics_ipi_unmask(struct irq_data *d) { } | ||
337 | static void xics_ipi_mask(struct irq_data *d) { } | ||
338 | |||
339 | static struct irq_chip xics_ipi_chip = { | ||
340 | .name = "XICS", | ||
341 | .irq_eoi = NULL, /* Patched at init time */ | ||
342 | .irq_mask = xics_ipi_mask, | ||
343 | .irq_unmask = xics_ipi_unmask, | ||
344 | }; | ||
345 | |||
346 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
347 | irq_hw_number_t hw) | ||
348 | { | ||
349 | struct ics *ics; | ||
350 | |||
351 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
352 | |||
353 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
354 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
355 | |||
356 | /* They aren't all level sensitive but we just don't really know */ | ||
357 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
358 | |||
359 | /* Don't call into ICS for IPIs */ | ||
360 | if (hw == XICS_IPI) { | ||
361 | irq_set_chip_and_handler(virq, &xics_ipi_chip, | ||
362 | handle_fasteoi_irq); | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /* Let the ICS setup the chip data */ | ||
367 | list_for_each_entry(ics, &ics_list, link) | ||
368 | if (ics->map(ics, virq) == 0) | ||
369 | break; | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
374 | const u32 *intspec, unsigned int intsize, | ||
375 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
376 | |||
377 | { | ||
378 | /* Current xics implementation translates everything | ||
379 | * to level. It is not technically right for MSIs but this | ||
380 | * is irrelevant at this point. We might get smarter in the future | ||
381 | */ | ||
382 | *out_hwirq = intspec[0]; | ||
383 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static struct irq_host_ops xics_host_ops = { | ||
389 | .match = xics_host_match, | ||
390 | .map = xics_host_map, | ||
391 | .xlate = xics_host_xlate, | ||
392 | }; | ||
393 | |||
394 | static void __init xics_init_host(void) | ||
395 | { | ||
396 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
397 | XICS_IRQ_SPURIOUS); | ||
398 | BUG_ON(xics_host == NULL); | ||
399 | irq_set_default_host(xics_host); | ||
400 | } | ||
401 | |||
402 | void __init xics_register_ics(struct ics *ics) | ||
403 | { | ||
404 | list_add(&ics->link, &ics_list); | ||
405 | } | ||
406 | |||
407 | static void __init xics_get_server_size(void) | ||
408 | { | ||
409 | struct device_node *np; | ||
410 | const u32 *isize; | ||
411 | |||
412 | /* We fetch the interrupt server size from the first ICS node | ||
413 | * we find if any | ||
414 | */ | ||
415 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); | ||
416 | if (!np) | ||
417 | return; | ||
418 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
419 | if (!isize) | ||
420 | return; | ||
421 | xics_interrupt_server_size = *isize; | ||
422 | of_node_put(np); | ||
423 | } | ||
424 | |||
425 | void __init xics_init(void) | ||
426 | { | ||
427 | int rc = -1; | ||
428 | |||
429 | /* Fist locate ICP */ | ||
430 | #ifdef CONFIG_PPC_ICP_HV | ||
431 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
432 | rc = icp_hv_init(); | ||
433 | #endif | ||
434 | #ifdef CONFIG_PPC_ICP_NATIVE | ||
435 | if (rc < 0) | ||
436 | rc = icp_native_init(); | ||
437 | #endif | ||
438 | if (rc < 0) { | ||
439 | pr_warning("XICS: Cannot find a Presentation Controller !\n"); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | /* Copy get_irq callback over to ppc_md */ | ||
444 | ppc_md.get_irq = icp_ops->get_irq; | ||
445 | |||
446 | /* Patch up IPI chip EOI */ | ||
447 | xics_ipi_chip.irq_eoi = icp_ops->eoi; | ||
448 | |||
449 | /* Now locate ICS */ | ||
450 | #ifdef CONFIG_PPC_ICS_RTAS | ||
451 | rc = ics_rtas_init(); | ||
452 | #endif | ||
453 | if (rc < 0) | ||
454 | pr_warning("XICS: Cannot find a Source Controller !\n"); | ||
455 | |||
456 | /* Initialize common bits */ | ||
457 | xics_get_server_size(); | ||
458 | xics_update_irq_servers(); | ||
459 | xics_init_host(); | ||
460 | xics_setup_cpu(); | ||
461 | } | ||