diff options
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r-- | arch/powerpc/sysdev/Kconfig | 3 | ||||
-rw-r--r-- | arch/powerpc/sysdev/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/Kconfig | 12 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/Makefile | 6 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-hv.c | 184 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-native.c | 312 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/ics-rtas.c | 229 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/xics-common.c | 461 |
8 files changed, 1211 insertions, 0 deletions
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 396582835cb5..cfc18770af79 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig | |||
@@ -12,3 +12,6 @@ config PPC_MSI_BITMAP | |||
12 | depends on PCI_MSI | 12 | depends on PCI_MSI |
13 | default y if MPIC | 13 | default y if MPIC |
14 | default y if FSL_PCI | 14 | default y if FSL_PCI |
15 | |||
16 | source "arch/powerpc/sysdev/xics/Kconfig" | ||
17 | |||
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 1e0c933ef772..9516e7598573 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -57,3 +57,7 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o | |||
57 | ifeq ($(CONFIG_SUSPEND),y) | 57 | ifeq ($(CONFIG_SUSPEND),y) |
58 | obj-$(CONFIG_6xx) += 6xx-suspend.o | 58 | obj-$(CONFIG_6xx) += 6xx-suspend.o |
59 | endif | 59 | endif |
60 | |||
61 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
62 | |||
63 | obj-$(CONFIG_PPC_XICS) += xics/ | ||
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 000000000000..123b8ddf2816 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Kconfig | |||
@@ -0,0 +1,12 @@ | |||
1 | config PPC_XICS | ||
2 | def_bool n | ||
3 | |||
4 | config PPC_ICP_NATIVE | ||
5 | def_bool n | ||
6 | |||
7 | config PPC_ICP_HV | ||
8 | def_bool n | ||
9 | |||
10 | config PPC_ICS_RTAS | ||
11 | def_bool n | ||
12 | |||
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 000000000000..b75a6059337f --- /dev/null +++ b/arch/powerpc/sysdev/xics/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
2 | |||
3 | obj-y += xics-common.o | ||
4 | obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o | ||
5 | obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o | ||
6 | obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o | ||
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 000000000000..b03d348b19a5 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-hv.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/of.h> | ||
18 | |||
19 | #include <asm/smp.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/errno.h> | ||
22 | #include <asm/xics.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/hvcall.h> | ||
25 | |||
26 | static inline unsigned int icp_hv_get_xirr(unsigned char cppr) | ||
27 | { | ||
28 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
29 | long rc; | ||
30 | |||
31 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
32 | if (rc != H_SUCCESS) | ||
33 | panic(" bad return code xirr - rc = %lx\n", rc); | ||
34 | return (unsigned int)retbuf[0]; | ||
35 | } | ||
36 | |||
37 | static inline void icp_hv_set_xirr(unsigned int value) | ||
38 | { | ||
39 | long rc = plpar_hcall_norets(H_EOI, value); | ||
40 | if (rc != H_SUCCESS) | ||
41 | panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); | ||
42 | } | ||
43 | |||
44 | static inline void icp_hv_set_cppr(u8 value) | ||
45 | { | ||
46 | long rc = plpar_hcall_norets(H_CPPR, value); | ||
47 | if (rc != H_SUCCESS) | ||
48 | panic("bad return code cppr - rc = %lx\n", rc); | ||
49 | } | ||
50 | |||
51 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | ||
52 | { | ||
53 | long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), | ||
54 | value); | ||
55 | if (rc != H_SUCCESS) | ||
56 | panic("bad return code qirr - rc = %lx\n", rc); | ||
57 | } | ||
58 | |||
59 | static void icp_hv_eoi(struct irq_data *d) | ||
60 | { | ||
61 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
62 | |||
63 | iosync(); | ||
64 | icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
65 | } | ||
66 | |||
67 | static void icp_hv_teardown_cpu(void) | ||
68 | { | ||
69 | int cpu = smp_processor_id(); | ||
70 | |||
71 | /* Clear any pending IPI */ | ||
72 | icp_hv_set_qirr(cpu, 0xff); | ||
73 | } | ||
74 | |||
75 | static void icp_hv_flush_ipi(void) | ||
76 | { | ||
77 | /* We take the ipi irq but and never return so we | ||
78 | * need to EOI the IPI, but want to leave our priority 0 | ||
79 | * | ||
80 | * should we check all the other interrupts too? | ||
81 | * should we be flagging idle loop instead? | ||
82 | * or creating some task to be scheduled? | ||
83 | */ | ||
84 | |||
85 | icp_hv_set_xirr((0x00 << 24) | XICS_IPI); | ||
86 | } | ||
87 | |||
88 | static unsigned int icp_hv_get_irq(void) | ||
89 | { | ||
90 | unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); | ||
91 | unsigned int vec = xirr & 0x00ffffff; | ||
92 | unsigned int irq; | ||
93 | |||
94 | if (vec == XICS_IRQ_SPURIOUS) | ||
95 | return NO_IRQ; | ||
96 | |||
97 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
98 | if (likely(irq != NO_IRQ)) { | ||
99 | xics_push_cppr(vec); | ||
100 | return irq; | ||
101 | } | ||
102 | |||
103 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
104 | xics_mask_unknown_vec(vec); | ||
105 | |||
106 | /* We might learn about it later, so EOI it */ | ||
107 | icp_hv_set_xirr(xirr); | ||
108 | |||
109 | return NO_IRQ; | ||
110 | } | ||
111 | |||
112 | static void icp_hv_set_cpu_priority(unsigned char cppr) | ||
113 | { | ||
114 | xics_set_base_cppr(cppr); | ||
115 | icp_hv_set_cppr(cppr); | ||
116 | iosync(); | ||
117 | } | ||
118 | |||
119 | #ifdef CONFIG_SMP | ||
120 | |||
121 | static inline void icp_hv_do_message(int cpu, int msg) | ||
122 | { | ||
123 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
124 | |||
125 | set_bit(msg, tgt); | ||
126 | mb(); | ||
127 | icp_hv_set_qirr(cpu, IPI_PRIORITY); | ||
128 | } | ||
129 | |||
130 | static void icp_hv_message_pass(int target, int msg) | ||
131 | { | ||
132 | unsigned int i; | ||
133 | |||
134 | if (target < NR_CPUS) { | ||
135 | icp_hv_do_message(target, msg); | ||
136 | } else { | ||
137 | for_each_online_cpu(i) { | ||
138 | if (target == MSG_ALL_BUT_SELF | ||
139 | && i == smp_processor_id()) | ||
140 | continue; | ||
141 | icp_hv_do_message(i, msg); | ||
142 | } | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) | ||
147 | { | ||
148 | int cpu = smp_processor_id(); | ||
149 | |||
150 | icp_hv_set_qirr(cpu, 0xff); | ||
151 | |||
152 | return xics_ipi_dispatch(cpu); | ||
153 | } | ||
154 | |||
155 | #endif /* CONFIG_SMP */ | ||
156 | |||
157 | static const struct icp_ops icp_hv_ops = { | ||
158 | .get_irq = icp_hv_get_irq, | ||
159 | .eoi = icp_hv_eoi, | ||
160 | .set_priority = icp_hv_set_cpu_priority, | ||
161 | .teardown_cpu = icp_hv_teardown_cpu, | ||
162 | .flush_ipi = icp_hv_flush_ipi, | ||
163 | #ifdef CONFIG_SMP | ||
164 | .ipi_action = icp_hv_ipi_action, | ||
165 | .message_pass = icp_hv_message_pass, | ||
166 | #endif | ||
167 | }; | ||
168 | |||
169 | int icp_hv_init(void) | ||
170 | { | ||
171 | struct device_node *np; | ||
172 | |||
173 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); | ||
174 | if (!np) | ||
175 | np = of_find_node_by_type(NULL, | ||
176 | "PowerPC-External-Interrupt-Presentation"); | ||
177 | if (!np) | ||
178 | return -ENODEV; | ||
179 | |||
180 | icp_ops = &icp_hv_ops; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 000000000000..be5e3d748edb --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-native.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | |||
20 | #include <asm/prom.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/smp.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/errno.h> | ||
25 | #include <asm/xics.h> | ||
26 | |||
27 | struct icp_ipl { | ||
28 | union { | ||
29 | u32 word; | ||
30 | u8 bytes[4]; | ||
31 | } xirr_poll; | ||
32 | union { | ||
33 | u32 word; | ||
34 | u8 bytes[4]; | ||
35 | } xirr; | ||
36 | u32 dummy; | ||
37 | union { | ||
38 | u32 word; | ||
39 | u8 bytes[4]; | ||
40 | } qirr; | ||
41 | u32 link_a; | ||
42 | u32 link_b; | ||
43 | u32 link_c; | ||
44 | }; | ||
45 | |||
46 | static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; | ||
47 | |||
48 | static inline unsigned int icp_native_get_xirr(void) | ||
49 | { | ||
50 | int cpu = smp_processor_id(); | ||
51 | |||
52 | return in_be32(&icp_native_regs[cpu]->xirr.word); | ||
53 | } | ||
54 | |||
55 | static inline void icp_native_set_xirr(unsigned int value) | ||
56 | { | ||
57 | int cpu = smp_processor_id(); | ||
58 | |||
59 | out_be32(&icp_native_regs[cpu]->xirr.word, value); | ||
60 | } | ||
61 | |||
62 | static inline void icp_native_set_cppr(u8 value) | ||
63 | { | ||
64 | int cpu = smp_processor_id(); | ||
65 | |||
66 | out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); | ||
67 | } | ||
68 | |||
69 | static inline void icp_native_set_qirr(int n_cpu, u8 value) | ||
70 | { | ||
71 | out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); | ||
72 | } | ||
73 | |||
74 | static void icp_native_set_cpu_priority(unsigned char cppr) | ||
75 | { | ||
76 | xics_set_base_cppr(cppr); | ||
77 | icp_native_set_cppr(cppr); | ||
78 | iosync(); | ||
79 | } | ||
80 | |||
81 | static void icp_native_eoi(struct irq_data *d) | ||
82 | { | ||
83 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
84 | |||
85 | iosync(); | ||
86 | icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
87 | } | ||
88 | |||
89 | static void icp_native_teardown_cpu(void) | ||
90 | { | ||
91 | int cpu = smp_processor_id(); | ||
92 | |||
93 | /* Clear any pending IPI */ | ||
94 | icp_native_set_qirr(cpu, 0xff); | ||
95 | } | ||
96 | |||
97 | static void icp_native_flush_ipi(void) | ||
98 | { | ||
99 | /* We take the ipi irq but and never return so we | ||
100 | * need to EOI the IPI, but want to leave our priority 0 | ||
101 | * | ||
102 | * should we check all the other interrupts too? | ||
103 | * should we be flagging idle loop instead? | ||
104 | * or creating some task to be scheduled? | ||
105 | */ | ||
106 | |||
107 | icp_native_set_xirr((0x00 << 24) | XICS_IPI); | ||
108 | } | ||
109 | |||
110 | static unsigned int icp_native_get_irq(void) | ||
111 | { | ||
112 | unsigned int xirr = icp_native_get_xirr(); | ||
113 | unsigned int vec = xirr & 0x00ffffff; | ||
114 | unsigned int irq; | ||
115 | |||
116 | if (vec == XICS_IRQ_SPURIOUS) | ||
117 | return NO_IRQ; | ||
118 | |||
119 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
120 | if (likely(irq != NO_IRQ)) { | ||
121 | xics_push_cppr(vec); | ||
122 | return irq; | ||
123 | } | ||
124 | |||
125 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
126 | xics_mask_unknown_vec(vec); | ||
127 | |||
128 | /* We might learn about it later, so EOI it */ | ||
129 | icp_native_set_xirr(xirr); | ||
130 | |||
131 | return NO_IRQ; | ||
132 | } | ||
133 | |||
134 | #ifdef CONFIG_SMP | ||
135 | |||
136 | static inline void icp_native_do_message(int cpu, int msg) | ||
137 | { | ||
138 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
139 | |||
140 | set_bit(msg, tgt); | ||
141 | mb(); | ||
142 | icp_native_set_qirr(cpu, IPI_PRIORITY); | ||
143 | } | ||
144 | |||
145 | static void icp_native_message_pass(int target, int msg) | ||
146 | { | ||
147 | unsigned int i; | ||
148 | |||
149 | if (target < NR_CPUS) { | ||
150 | icp_native_do_message(target, msg); | ||
151 | } else { | ||
152 | for_each_online_cpu(i) { | ||
153 | if (target == MSG_ALL_BUT_SELF | ||
154 | && i == smp_processor_id()) | ||
155 | continue; | ||
156 | icp_native_do_message(i, msg); | ||
157 | } | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) | ||
162 | { | ||
163 | int cpu = smp_processor_id(); | ||
164 | |||
165 | icp_native_set_qirr(cpu, 0xff); | ||
166 | |||
167 | return xics_ipi_dispatch(cpu); | ||
168 | } | ||
169 | |||
170 | #endif /* CONFIG_SMP */ | ||
171 | |||
172 | static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, | ||
173 | unsigned long size) | ||
174 | { | ||
175 | char *rname; | ||
176 | int i, cpu = -1; | ||
177 | |||
178 | /* This may look gross but it's good enough for now, we don't quite | ||
179 | * have a hard -> linux processor id matching. | ||
180 | */ | ||
181 | for_each_possible_cpu(i) { | ||
182 | if (!cpu_present(i)) | ||
183 | continue; | ||
184 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
185 | cpu = i; | ||
186 | break; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /* Fail, skip that CPU. Don't print, it's normal, some XICS come up | ||
191 | * with way more entries in there than you have CPUs | ||
192 | */ | ||
193 | if (cpu == -1) | ||
194 | return 0; | ||
195 | |||
196 | rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", | ||
197 | cpu, hw_id); | ||
198 | |||
199 | if (!request_mem_region(addr, size, rname)) { | ||
200 | pr_warning("icp_native: Could not reserve ICP MMIO" | ||
201 | " for CPU %d, interrupt server #0x%x\n", | ||
202 | cpu, hw_id); | ||
203 | return -EBUSY; | ||
204 | } | ||
205 | |||
206 | icp_native_regs[cpu] = ioremap(addr, size); | ||
207 | if (!icp_native_regs[cpu]) { | ||
208 | pr_warning("icp_native: Failed ioremap for CPU %d, " | ||
209 | "interrupt server #0x%x, addr %#lx\n", | ||
210 | cpu, hw_id, addr); | ||
211 | release_mem_region(addr, size); | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int __init icp_native_init_one_node(struct device_node *np, | ||
218 | unsigned int *indx) | ||
219 | { | ||
220 | unsigned int ilen; | ||
221 | const u32 *ireg; | ||
222 | int i; | ||
223 | int reg_tuple_size; | ||
224 | int num_servers = 0; | ||
225 | |||
226 | /* This code does the theorically broken assumption that the interrupt | ||
227 | * server numbers are the same as the hard CPU numbers. | ||
228 | * This happens to be the case so far but we are playing with fire... | ||
229 | * should be fixed one of these days. -BenH. | ||
230 | */ | ||
231 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); | ||
232 | |||
233 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
234 | * f80 does have that property .. | ||
235 | */ | ||
236 | WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); | ||
237 | |||
238 | if (ireg) { | ||
239 | *indx = of_read_number(ireg, 1); | ||
240 | if (ilen >= 2*sizeof(u32)) | ||
241 | num_servers = of_read_number(ireg + 1, 1); | ||
242 | } | ||
243 | |||
244 | ireg = of_get_property(np, "reg", &ilen); | ||
245 | if (!ireg) { | ||
246 | pr_err("icp_native: Can't find interrupt reg property"); | ||
247 | return -1; | ||
248 | } | ||
249 | |||
250 | reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; | ||
251 | if (((ilen % reg_tuple_size) != 0) | ||
252 | || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { | ||
253 | pr_err("icp_native: ICP reg len (%d) != num servers (%d)", | ||
254 | ilen / reg_tuple_size, num_servers); | ||
255 | return -1; | ||
256 | } | ||
257 | |||
258 | for (i = 0; i < (ilen / reg_tuple_size); i++) { | ||
259 | struct resource r; | ||
260 | int err; | ||
261 | |||
262 | err = of_address_to_resource(np, i, &r); | ||
263 | if (err) { | ||
264 | pr_err("icp_native: Could not translate ICP MMIO" | ||
265 | " for interrupt server 0x%x (%d)\n", *indx, err); | ||
266 | return -1; | ||
267 | } | ||
268 | |||
269 | if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) | ||
270 | return -1; | ||
271 | |||
272 | (*indx)++; | ||
273 | } | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static const struct icp_ops icp_native_ops = { | ||
278 | .get_irq = icp_native_get_irq, | ||
279 | .eoi = icp_native_eoi, | ||
280 | .set_priority = icp_native_set_cpu_priority, | ||
281 | .teardown_cpu = icp_native_teardown_cpu, | ||
282 | .flush_ipi = icp_native_flush_ipi, | ||
283 | #ifdef CONFIG_SMP | ||
284 | .ipi_action = icp_native_ipi_action, | ||
285 | .message_pass = icp_native_message_pass, | ||
286 | #endif | ||
287 | }; | ||
288 | |||
289 | int icp_native_init(void) | ||
290 | { | ||
291 | struct device_node *np; | ||
292 | u32 indx = 0; | ||
293 | int found = 0; | ||
294 | |||
295 | for_each_compatible_node(np, NULL, "ibm,ppc-xicp") | ||
296 | if (icp_native_init_one_node(np, &indx) == 0) | ||
297 | found = 1; | ||
298 | if (!found) { | ||
299 | for_each_node_by_type(np, | ||
300 | "PowerPC-External-Interrupt-Presentation") { | ||
301 | if (icp_native_init_one_node(np, &indx) == 0) | ||
302 | found = 1; | ||
303 | } | ||
304 | } | ||
305 | |||
306 | if (found == 0) | ||
307 | return -ENODEV; | ||
308 | |||
309 | icp_ops = &icp_native_ops; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 000000000000..5b3ee387e89d --- /dev/null +++ b/arch/powerpc/sysdev/xics/ics-rtas.c | |||
@@ -0,0 +1,229 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/irq.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/cpu.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/msi.h> | ||
11 | |||
12 | #include <asm/prom.h> | ||
13 | #include <asm/smp.h> | ||
14 | #include <asm/machdep.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/xics.h> | ||
18 | #include <asm/rtas.h> | ||
19 | |||
20 | /* RTAS service tokens */ | ||
21 | static int ibm_get_xive; | ||
22 | static int ibm_set_xive; | ||
23 | static int ibm_int_on; | ||
24 | static int ibm_int_off; | ||
25 | |||
26 | static int ics_rtas_map(struct ics *ics, unsigned int virq); | ||
27 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); | ||
28 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec); | ||
29 | |||
30 | /* Only one global & state struct ics */ | ||
31 | static struct ics ics_rtas = { | ||
32 | .map = ics_rtas_map, | ||
33 | .mask_unknown = ics_rtas_mask_unknown, | ||
34 | .get_server = ics_rtas_get_server, | ||
35 | }; | ||
36 | |||
37 | static void ics_rtas_unmask_irq(struct irq_data *d) | ||
38 | { | ||
39 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
40 | int call_status; | ||
41 | int server; | ||
42 | |||
43 | pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
44 | |||
45 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
46 | return; | ||
47 | |||
48 | server = xics_get_irq_server(d->irq, d->affinity, 0); | ||
49 | |||
50 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, | ||
51 | DEFAULT_PRIORITY); | ||
52 | if (call_status != 0) { | ||
53 | printk(KERN_ERR | ||
54 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
55 | __func__, hw_irq, server, call_status); | ||
56 | return; | ||
57 | } | ||
58 | |||
59 | /* Now unmask the interrupt (often a no-op) */ | ||
60 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); | ||
61 | if (call_status != 0) { | ||
62 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
63 | __func__, hw_irq, call_status); | ||
64 | return; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static unsigned int ics_rtas_startup(struct irq_data *d) | ||
69 | { | ||
70 | #ifdef CONFIG_PCI_MSI | ||
71 | /* | ||
72 | * The generic MSI code returns with the interrupt disabled on the | ||
73 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
74 | * at that level, so we do it here by hand. | ||
75 | */ | ||
76 | if (d->msi_desc) | ||
77 | unmask_msi_irq(d); | ||
78 | #endif | ||
79 | /* unmask it */ | ||
80 | ics_rtas_unmask_irq(d); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void ics_rtas_mask_real_irq(unsigned int hw_irq) | ||
85 | { | ||
86 | int call_status; | ||
87 | |||
88 | if (hw_irq == XICS_IPI) | ||
89 | return; | ||
90 | |||
91 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); | ||
92 | if (call_status != 0) { | ||
93 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
94 | __func__, hw_irq, call_status); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
99 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, | ||
100 | xics_default_server, 0xff); | ||
101 | if (call_status != 0) { | ||
102 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
103 | __func__, hw_irq, call_status); | ||
104 | return; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void ics_rtas_mask_irq(struct irq_data *d) | ||
109 | { | ||
110 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
111 | |||
112 | pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
113 | |||
114 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
115 | return; | ||
116 | ics_rtas_mask_real_irq(hw_irq); | ||
117 | } | ||
118 | |||
119 | static int ics_rtas_set_affinity(struct irq_data *d, | ||
120 | const struct cpumask *cpumask, | ||
121 | bool force) | ||
122 | { | ||
123 | unsigned int hw_irq = (unsigned int)irq_data_to_hw(d); | ||
124 | int status; | ||
125 | int xics_status[2]; | ||
126 | int irq_server; | ||
127 | |||
128 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
129 | return -1; | ||
130 | |||
131 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); | ||
132 | |||
133 | if (status) { | ||
134 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
135 | __func__, hw_irq, status); | ||
136 | return -1; | ||
137 | } | ||
138 | |||
139 | irq_server = xics_get_irq_server(d->irq, cpumask, 1); | ||
140 | if (irq_server == -1) { | ||
141 | char cpulist[128]; | ||
142 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
143 | printk(KERN_WARNING | ||
144 | "%s: No online cpus in the mask %s for irq %d\n", | ||
145 | __func__, cpulist, d->irq); | ||
146 | return -1; | ||
147 | } | ||
148 | |||
149 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
150 | hw_irq, irq_server, xics_status[1]); | ||
151 | |||
152 | if (status) { | ||
153 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
154 | __func__, hw_irq, status); | ||
155 | return -1; | ||
156 | } | ||
157 | |||
158 | return IRQ_SET_MASK_OK; | ||
159 | } | ||
160 | |||
161 | static struct irq_chip ics_rtas_irq_chip = { | ||
162 | .name = "XICS", | ||
163 | .irq_startup = ics_rtas_startup, | ||
164 | .irq_mask = ics_rtas_mask_irq, | ||
165 | .irq_unmask = ics_rtas_unmask_irq, | ||
166 | .irq_eoi = NULL, /* Patched at init time */ | ||
167 | .irq_set_affinity = ics_rtas_set_affinity | ||
168 | }; | ||
169 | |||
170 | static int ics_rtas_map(struct ics *ics, unsigned int virq) | ||
171 | { | ||
172 | unsigned int hw_irq = (unsigned int)irq_map[virq].hwirq; | ||
173 | int status[2]; | ||
174 | int rc; | ||
175 | |||
176 | if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* Check if RTAS knows about this interrupt */ | ||
180 | rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); | ||
181 | if (rc) | ||
182 | return -ENXIO; | ||
183 | |||
184 | irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); | ||
185 | irq_set_chip_data(virq, &ics_rtas); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) | ||
191 | { | ||
192 | ics_rtas_mask_real_irq(vec); | ||
193 | } | ||
194 | |||
195 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec) | ||
196 | { | ||
197 | int rc, status[2]; | ||
198 | |||
199 | rc = rtas_call(ibm_get_xive, 1, 3, status, vec); | ||
200 | if (rc) | ||
201 | return -1; | ||
202 | return status[0]; | ||
203 | } | ||
204 | |||
205 | int ics_rtas_init(void) | ||
206 | { | ||
207 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
208 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
209 | ibm_int_on = rtas_token("ibm,int-on"); | ||
210 | ibm_int_off = rtas_token("ibm,int-off"); | ||
211 | |||
212 | /* We enable the RTAS "ICS" if RTAS is present with the | ||
213 | * appropriate tokens | ||
214 | */ | ||
215 | if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || | ||
216 | ibm_set_xive == RTAS_UNKNOWN_SERVICE) | ||
217 | return -ENODEV; | ||
218 | |||
219 | /* We need to patch our irq chip's EOI to point to the | ||
220 | * right ICP | ||
221 | */ | ||
222 | ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; | ||
223 | |||
224 | /* Register ourselves */ | ||
225 | xics_register_ics(&ics_rtas); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 000000000000..a2be84de5237 --- /dev/null +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | #include <asm/prom.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/smp.h> | ||
27 | #include <asm/machdep.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/errno.h> | ||
30 | #include <asm/rtas.h> | ||
31 | #include <asm/xics.h> | ||
32 | #include <asm/firmware.h> | ||
33 | |||
34 | /* Globals common to all ICP/ICS implementations */ | ||
35 | const struct icp_ops *icp_ops; | ||
36 | |||
37 | unsigned int xics_default_server = 0xff; | ||
38 | unsigned int xics_default_distrib_server = 0; | ||
39 | unsigned int xics_interrupt_server_size = 8; | ||
40 | |||
41 | DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
42 | |||
43 | struct irq_host *xics_host; | ||
44 | |||
45 | static LIST_HEAD(ics_list); | ||
46 | |||
47 | void xics_update_irq_servers(void) | ||
48 | { | ||
49 | int i, j; | ||
50 | struct device_node *np; | ||
51 | u32 ilen; | ||
52 | const u32 *ireg; | ||
53 | u32 hcpuid; | ||
54 | |||
55 | /* Find the server numbers for the boot cpu. */ | ||
56 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
57 | BUG_ON(!np); | ||
58 | |||
59 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
60 | xics_default_server = hcpuid; | ||
61 | |||
62 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
63 | if (!ireg) { | ||
64 | of_node_put(np); | ||
65 | return; | ||
66 | } | ||
67 | |||
68 | i = ilen / sizeof(int); | ||
69 | |||
70 | /* Global interrupt distribution server is specified in the last | ||
71 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
72 | * entry fom this property for current boot cpu id and use it as | ||
73 | * default distribution server | ||
74 | */ | ||
75 | for (j = 0; j < i; j += 2) { | ||
76 | if (ireg[j] == hcpuid) { | ||
77 | xics_default_distrib_server = ireg[j+1]; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | of_node_put(np); | ||
82 | } | ||
83 | |||
84 | /* GIQ stuff, currently only supported on RTAS setups, will have | ||
85 | * to be sorted properly for bare metal | ||
86 | */ | ||
87 | void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
88 | { | ||
89 | #ifdef CONFIG_PPC_RTAS | ||
90 | int index; | ||
91 | int status; | ||
92 | |||
93 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
94 | return; | ||
95 | |||
96 | index = (1UL << xics_interrupt_server_size) - 1 - gserver; | ||
97 | |||
98 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
99 | |||
100 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
101 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
102 | #endif | ||
103 | } | ||
104 | |||
105 | void xics_setup_cpu(void) | ||
106 | { | ||
107 | icp_ops->set_priority(LOWEST_PRIORITY); | ||
108 | |||
109 | xics_set_cpu_giq(xics_default_distrib_server, 1); | ||
110 | } | ||
111 | |||
112 | void xics_mask_unknown_vec(unsigned int vec) | ||
113 | { | ||
114 | struct ics *ics; | ||
115 | |||
116 | pr_err("Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
117 | |||
118 | list_for_each_entry(ics, &ics_list, link) | ||
119 | ics->mask_unknown(ics, vec); | ||
120 | } | ||
121 | |||
122 | |||
123 | #ifdef CONFIG_SMP | ||
124 | |||
125 | DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
126 | |||
127 | irqreturn_t xics_ipi_dispatch(int cpu) | ||
128 | { | ||
129 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
130 | |||
131 | mb(); /* order mmio clearing qirr */ | ||
132 | while (*tgt) { | ||
133 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
134 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
135 | } | ||
136 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
137 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
138 | } | ||
139 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
140 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
141 | } | ||
142 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
143 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
144 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
145 | } | ||
146 | #endif | ||
147 | } | ||
148 | return IRQ_HANDLED; | ||
149 | } | ||
150 | |||
151 | static void xics_request_ipi(void) | ||
152 | { | ||
153 | unsigned int ipi; | ||
154 | |||
155 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
156 | BUG_ON(ipi == NO_IRQ); | ||
157 | |||
158 | /* | ||
159 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
160 | * disabled | ||
161 | */ | ||
162 | irq_set_handler(ipi, handle_percpu_irq); | ||
163 | BUG_ON(request_irq(ipi, icp_ops->ipi_action, | ||
164 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); | ||
165 | } | ||
166 | |||
167 | int __init xics_smp_probe(void) | ||
168 | { | ||
169 | /* Setup message_pass callback based on which ICP is used */ | ||
170 | smp_ops->message_pass = icp_ops->message_pass; | ||
171 | |||
172 | /* Register all the IPIs */ | ||
173 | xics_request_ipi(); | ||
174 | |||
175 | return cpumask_weight(cpu_possible_mask); | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_SMP */ | ||
179 | |||
180 | void xics_teardown_cpu(void) | ||
181 | { | ||
182 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
183 | |||
184 | /* | ||
185 | * we have to reset the cppr index to 0 because we're | ||
186 | * not going to return from the IPI | ||
187 | */ | ||
188 | os_cppr->index = 0; | ||
189 | icp_ops->set_priority(0); | ||
190 | icp_ops->teardown_cpu(); | ||
191 | } | ||
192 | |||
193 | void xics_kexec_teardown_cpu(int secondary) | ||
194 | { | ||
195 | xics_teardown_cpu(); | ||
196 | |||
197 | icp_ops->flush_ipi(); | ||
198 | |||
199 | /* | ||
200 | * Some machines need to have at least one cpu in the GIQ, | ||
201 | * so leave the master cpu in the group. | ||
202 | */ | ||
203 | if (secondary) | ||
204 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
205 | } | ||
206 | |||
207 | |||
208 | #ifdef CONFIG_HOTPLUG_CPU | ||
209 | |||
210 | /* Interrupts are disabled. */ | ||
211 | void xics_migrate_irqs_away(void) | ||
212 | { | ||
213 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
214 | unsigned int irq, virq; | ||
215 | |||
216 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
217 | if (hw_cpu == xics_default_server) | ||
218 | xics_update_irq_servers(); | ||
219 | |||
220 | /* Reject any interrupt that was queued to us... */ | ||
221 | icp_ops->set_priority(0); | ||
222 | |||
223 | /* Remove ourselves from the global interrupt queue */ | ||
224 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
225 | |||
226 | /* Allow IPIs again... */ | ||
227 | icp_ops->set_priority(DEFAULT_PRIORITY); | ||
228 | |||
229 | for_each_irq(virq) { | ||
230 | struct irq_desc *desc; | ||
231 | struct irq_chip *chip; | ||
232 | long server; | ||
233 | unsigned long flags; | ||
234 | struct ics *ics; | ||
235 | |||
236 | /* We can't set affinity on ISA interrupts */ | ||
237 | if (virq < NUM_ISA_INTERRUPTS) | ||
238 | continue; | ||
239 | if (irq_map[virq].host != xics_host) | ||
240 | continue; | ||
241 | irq = (unsigned int)irq_map[virq].hwirq; | ||
242 | /* We need to get IPIs still. */ | ||
243 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
244 | continue; | ||
245 | desc = irq_to_desc(virq); | ||
246 | /* We only need to migrate enabled IRQS */ | ||
247 | if (!desc || !desc->action) | ||
248 | continue; | ||
249 | chip = irq_desc_get_chip(desc); | ||
250 | if (!chip || !chip->irq_set_affinity) | ||
251 | continue; | ||
252 | |||
253 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
254 | |||
255 | /* Locate interrupt server */ | ||
256 | server = -1; | ||
257 | ics = irq_get_chip_data(virq); | ||
258 | if (ics) | ||
259 | server = ics->get_server(ics, irq); | ||
260 | if (server < 0) { | ||
261 | printk(KERN_ERR "%s: Can't find server for irq %d\n", | ||
262 | __func__, irq); | ||
263 | goto unlock; | ||
264 | } | ||
265 | |||
266 | /* We only support delivery to all cpus or to one cpu. | ||
267 | * The irq has to be migrated only in the single cpu | ||
268 | * case. | ||
269 | */ | ||
270 | if (server != hw_cpu) | ||
271 | goto unlock; | ||
272 | |||
273 | /* This is expected during cpu offline. */ | ||
274 | if (cpu_online(cpu)) | ||
275 | pr_warning("IRQ %u affinity broken off cpu %u\n", | ||
276 | virq, cpu); | ||
277 | |||
278 | /* Reset affinity to all cpus */ | ||
279 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
280 | irq_set_affinity(virq, cpu_all_mask); | ||
281 | continue; | ||
282 | unlock: | ||
283 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
284 | } | ||
285 | } | ||
286 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
287 | |||
288 | #ifdef CONFIG_SMP | ||
289 | /* | ||
290 | * For the moment we only implement delivery to all cpus or one cpu. | ||
291 | * | ||
292 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
293 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
294 | * are set. This is so things like irqbalance (which set core and package | ||
295 | * wide affinities) do the right thing. | ||
296 | */ | ||
297 | int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
298 | unsigned int strict_check) | ||
299 | { | ||
300 | |||
301 | if (!distribute_irqs) | ||
302 | return xics_default_server; | ||
303 | |||
304 | if (!cpumask_subset(cpu_possible_mask, cpumask)) { | ||
305 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
306 | |||
307 | if (server < nr_cpu_ids) | ||
308 | return get_hard_smp_processor_id(server); | ||
309 | |||
310 | if (strict_check) | ||
311 | return -1; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Workaround issue with some versions of JS20 firmware that | ||
316 | * deliver interrupts to cpus which haven't been started. This | ||
317 | * happens when using the maxcpus= boot option. | ||
318 | */ | ||
319 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
320 | return xics_default_distrib_server; | ||
321 | |||
322 | return xics_default_server; | ||
323 | } | ||
324 | #endif /* CONFIG_SMP */ | ||
325 | |||
326 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
327 | { | ||
328 | /* IBM machines have interrupt parents of various funky types for things | ||
329 | * like vdevices, events, etc... The trick we use here is to match | ||
330 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
331 | */ | ||
332 | return !of_device_is_compatible(node, "chrp,iic"); | ||
333 | } | ||
334 | |||
335 | /* Dummies */ | ||
336 | static void xics_ipi_unmask(struct irq_data *d) { } | ||
337 | static void xics_ipi_mask(struct irq_data *d) { } | ||
338 | |||
339 | static struct irq_chip xics_ipi_chip = { | ||
340 | .name = "XICS", | ||
341 | .irq_eoi = NULL, /* Patched at init time */ | ||
342 | .irq_mask = xics_ipi_mask, | ||
343 | .irq_unmask = xics_ipi_unmask, | ||
344 | }; | ||
345 | |||
346 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
347 | irq_hw_number_t hw) | ||
348 | { | ||
349 | struct ics *ics; | ||
350 | |||
351 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
352 | |||
353 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
354 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
355 | |||
356 | /* They aren't all level sensitive but we just don't really know */ | ||
357 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
358 | |||
359 | /* Don't call into ICS for IPIs */ | ||
360 | if (hw == XICS_IPI) { | ||
361 | irq_set_chip_and_handler(virq, &xics_ipi_chip, | ||
362 | handle_fasteoi_irq); | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /* Let the ICS setup the chip data */ | ||
367 | list_for_each_entry(ics, &ics_list, link) | ||
368 | if (ics->map(ics, virq) == 0) | ||
369 | break; | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
374 | const u32 *intspec, unsigned int intsize, | ||
375 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
376 | |||
377 | { | ||
378 | /* Current xics implementation translates everything | ||
379 | * to level. It is not technically right for MSIs but this | ||
380 | * is irrelevant at this point. We might get smarter in the future | ||
381 | */ | ||
382 | *out_hwirq = intspec[0]; | ||
383 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static struct irq_host_ops xics_host_ops = { | ||
389 | .match = xics_host_match, | ||
390 | .map = xics_host_map, | ||
391 | .xlate = xics_host_xlate, | ||
392 | }; | ||
393 | |||
394 | static void __init xics_init_host(void) | ||
395 | { | ||
396 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
397 | XICS_IRQ_SPURIOUS); | ||
398 | BUG_ON(xics_host == NULL); | ||
399 | irq_set_default_host(xics_host); | ||
400 | } | ||
401 | |||
402 | void __init xics_register_ics(struct ics *ics) | ||
403 | { | ||
404 | list_add(&ics->link, &ics_list); | ||
405 | } | ||
406 | |||
407 | static void __init xics_get_server_size(void) | ||
408 | { | ||
409 | struct device_node *np; | ||
410 | const u32 *isize; | ||
411 | |||
412 | /* We fetch the interrupt server size from the first ICS node | ||
413 | * we find if any | ||
414 | */ | ||
415 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); | ||
416 | if (!np) | ||
417 | return; | ||
418 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
419 | if (!isize) | ||
420 | return; | ||
421 | xics_interrupt_server_size = *isize; | ||
422 | of_node_put(np); | ||
423 | } | ||
424 | |||
425 | void __init xics_init(void) | ||
426 | { | ||
427 | int rc = -1; | ||
428 | |||
429 | /* Fist locate ICP */ | ||
430 | #ifdef CONFIG_PPC_ICP_HV | ||
431 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
432 | rc = icp_hv_init(); | ||
433 | #endif | ||
434 | #ifdef CONFIG_PPC_ICP_NATIVE | ||
435 | if (rc < 0) | ||
436 | rc = icp_native_init(); | ||
437 | #endif | ||
438 | if (rc < 0) { | ||
439 | pr_warning("XICS: Cannot find a Presentation Controller !\n"); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | /* Copy get_irq callback over to ppc_md */ | ||
444 | ppc_md.get_irq = icp_ops->get_irq; | ||
445 | |||
446 | /* Patch up IPI chip EOI */ | ||
447 | xics_ipi_chip.irq_eoi = icp_ops->eoi; | ||
448 | |||
449 | /* Now locate ICS */ | ||
450 | #ifdef CONFIG_PPC_ICS_RTAS | ||
451 | rc = ics_rtas_init(); | ||
452 | #endif | ||
453 | if (rc < 0) | ||
454 | pr_warning("XICS: Cannot find a Source Controller !\n"); | ||
455 | |||
456 | /* Initialize common bits */ | ||
457 | xics_get_server_size(); | ||
458 | xics_update_irq_servers(); | ||
459 | xics_init_host(); | ||
460 | xics_setup_cpu(); | ||
461 | } | ||