diff options
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r-- | arch/powerpc/platforms/pseries/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/dlpar.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/eeh.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/event_sources.c | 79 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-cpu.c | 67 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hvCall.S | 38 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/iommu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 33 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/plpar_wrappers.h | 26 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/pseries.h | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/ras.c | 62 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/setup.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/smp.c | 42 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 38 |
14 files changed, 266 insertions, 150 deletions
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 0ff5174ae4f5..3dbef309bc8d 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -DDEBUG | |||
7 | endif | 7 | endif |
8 | 8 | ||
9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | 9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ |
10 | setup.o iommu.o ras.o \ | 10 | setup.o iommu.o event_sources.o ras.o \ |
11 | firmware.o power.o dlpar.o | 11 | firmware.o power.o dlpar.o |
12 | obj-$(CONFIG_SMP) += smp.o | 12 | obj-$(CONFIG_SMP) += smp.o |
13 | obj-$(CONFIG_XICS) += xics.o | 13 | obj-$(CONFIG_XICS) += xics.o |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index e1682bc168a3..d71e58584086 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) | |||
79 | * prepend this to the full_name. | 79 | * prepend this to the full_name. |
80 | */ | 80 | */ |
81 | name = (char *)ccwa + ccwa->name_offset; | 81 | name = (char *)ccwa + ccwa->name_offset; |
82 | dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); | 82 | dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); |
83 | if (!dn->full_name) { | 83 | if (!dn->full_name) { |
84 | kfree(dn); | 84 | kfree(dn); |
85 | return NULL; | 85 | return NULL; |
86 | } | 86 | } |
87 | 87 | ||
88 | sprintf(dn->full_name, "/%s", name); | ||
89 | return dn; | 88 | return dn; |
90 | } | 89 | } |
91 | 90 | ||
@@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
410 | * directory of the device tree. CPUs actually live in the | 409 | * directory of the device tree. CPUs actually live in the |
411 | * cpus directory so we need to fixup the full_name. | 410 | * cpus directory so we need to fixup the full_name. |
412 | */ | 411 | */ |
413 | cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, | 412 | cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name); |
414 | GFP_KERNEL); | ||
415 | if (!cpu_name) { | 413 | if (!cpu_name) { |
416 | dlpar_free_cc_nodes(dn); | 414 | dlpar_free_cc_nodes(dn); |
417 | rc = -ENOMEM; | 415 | rc = -ENOMEM; |
418 | goto out; | 416 | goto out; |
419 | } | 417 | } |
420 | 418 | ||
421 | sprintf(cpu_name, "/cpus%s", dn->full_name); | ||
422 | kfree(dn->full_name); | 419 | kfree(dn->full_name); |
423 | dn->full_name = cpu_name; | 420 | dn->full_name = cpu_name; |
424 | 421 | ||
@@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
433 | if (rc) { | 430 | if (rc) { |
434 | dlpar_release_drc(drc_index); | 431 | dlpar_release_drc(drc_index); |
435 | dlpar_free_cc_nodes(dn); | 432 | dlpar_free_cc_nodes(dn); |
433 | goto out; | ||
436 | } | 434 | } |
437 | 435 | ||
438 | rc = dlpar_online_cpu(dn); | 436 | rc = dlpar_online_cpu(dn); |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 7df7fbb7cacb..34b7dc12e731 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn) | |||
749 | /* Determine type of EEH reset required by device, | 749 | /* Determine type of EEH reset required by device, |
750 | * default hot reset or fundamental reset | 750 | * default hot reset or fundamental reset |
751 | */ | 751 | */ |
752 | if (dev->needs_freset) | 752 | if (dev && dev->needs_freset) |
753 | rtas_pci_slot_reset(pdn, 3); | 753 | rtas_pci_slot_reset(pdn, 3); |
754 | else | 754 | else |
755 | rtas_pci_slot_reset(pdn, 1); | 755 | rtas_pci_slot_reset(pdn, 1); |
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c new file mode 100644 index 000000000000..e889c9d9586a --- /dev/null +++ b/arch/powerpc/platforms/pseries/event_sources.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Dave Engebretsen IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <asm/prom.h> | ||
20 | |||
21 | #include "pseries.h" | ||
22 | |||
23 | void request_event_sources_irqs(struct device_node *np, | ||
24 | irq_handler_t handler, | ||
25 | const char *name) | ||
26 | { | ||
27 | int i, index, count = 0; | ||
28 | struct of_irq oirq; | ||
29 | const u32 *opicprop; | ||
30 | unsigned int opicplen; | ||
31 | unsigned int virqs[16]; | ||
32 | |||
33 | /* Check for obsolete "open-pic-interrupt" property. If present, then | ||
34 | * map those interrupts using the default interrupt host and default | ||
35 | * trigger | ||
36 | */ | ||
37 | opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); | ||
38 | if (opicprop) { | ||
39 | opicplen /= sizeof(u32); | ||
40 | for (i = 0; i < opicplen; i++) { | ||
41 | if (count > 15) | ||
42 | break; | ||
43 | virqs[count] = irq_create_mapping(NULL, *(opicprop++)); | ||
44 | if (virqs[count] == NO_IRQ) | ||
45 | printk(KERN_ERR "Unable to allocate interrupt " | ||
46 | "number for %s\n", np->full_name); | ||
47 | else | ||
48 | count++; | ||
49 | |||
50 | } | ||
51 | } | ||
52 | /* Else use normal interrupt tree parsing */ | ||
53 | else { | ||
54 | /* First try to do a proper OF tree parsing */ | ||
55 | for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | ||
56 | index++) { | ||
57 | if (count > 15) | ||
58 | break; | ||
59 | virqs[count] = irq_create_of_mapping(oirq.controller, | ||
60 | oirq.specifier, | ||
61 | oirq.size); | ||
62 | if (virqs[count] == NO_IRQ) | ||
63 | printk(KERN_ERR "Unable to allocate interrupt " | ||
64 | "number for %s\n", np->full_name); | ||
65 | else | ||
66 | count++; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* Now request them */ | ||
71 | for (i = 0; i < count; i++) { | ||
72 | if (request_irq(virqs[i], handler, 0, name, NULL)) { | ||
73 | printk(KERN_ERR "Unable to request interrupt %d for " | ||
74 | "%s\n", virqs[i], np->full_name); | ||
75 | return; | ||
76 | } | ||
77 | } | ||
78 | } | ||
79 | |||
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index a8e1d5d17a28..8f85f399ab9f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void) | |||
154 | for(;;); | 154 | for(;;); |
155 | } | 155 | } |
156 | 156 | ||
157 | static int qcss_tok; /* query-cpu-stopped-state token */ | ||
158 | |||
159 | /* Get state of physical CPU. | ||
160 | * Return codes: | ||
161 | * 0 - The processor is in the RTAS stopped state | ||
162 | * 1 - stop-self is in progress | ||
163 | * 2 - The processor is not in the RTAS stopped state | ||
164 | * -1 - Hardware Error | ||
165 | * -2 - Hardware Busy, Try again later. | ||
166 | */ | ||
167 | static int query_cpu_stopped(unsigned int pcpu) | ||
168 | { | ||
169 | int cpu_status, status; | ||
170 | |||
171 | status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | ||
172 | if (status != 0) { | ||
173 | printk(KERN_ERR | ||
174 | "RTAS query-cpu-stopped-state failed: %i\n", status); | ||
175 | return status; | ||
176 | } | ||
177 | |||
178 | return cpu_status; | ||
179 | } | ||
180 | |||
181 | static int pseries_cpu_disable(void) | 157 | static int pseries_cpu_disable(void) |
182 | { | 158 | { |
183 | int cpu = smp_processor_id(); | 159 | int cpu = smp_processor_id(); |
@@ -187,7 +163,7 @@ static int pseries_cpu_disable(void) | |||
187 | 163 | ||
188 | /*fix boot_cpuid here*/ | 164 | /*fix boot_cpuid here*/ |
189 | if (cpu == boot_cpuid) | 165 | if (cpu == boot_cpuid) |
190 | boot_cpuid = any_online_cpu(cpu_online_map); | 166 | boot_cpuid = cpumask_any(cpu_online_mask); |
191 | 167 | ||
192 | /* FIXME: abstract this to not be platform specific later on */ | 168 | /* FIXME: abstract this to not be platform specific later on */ |
193 | xics_migrate_irqs_away(); | 169 | xics_migrate_irqs_away(); |
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu) | |||
224 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { | 200 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { |
225 | 201 | ||
226 | for (tries = 0; tries < 25; tries++) { | 202 | for (tries = 0; tries < 25; tries++) { |
227 | cpu_status = query_cpu_stopped(pcpu); | 203 | cpu_status = smp_query_cpu_stopped(pcpu); |
228 | if (cpu_status == 0 || cpu_status == -1) | 204 | if (cpu_status == QCSS_STOPPED || |
205 | cpu_status == QCSS_HARDWARE_ERROR) | ||
229 | break; | 206 | break; |
230 | cpu_relax(); | 207 | cpu_relax(); |
231 | } | 208 | } |
@@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu) | |||
245 | } | 222 | } |
246 | 223 | ||
247 | /* | 224 | /* |
248 | * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle | 225 | * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle |
249 | * here is that a cpu device node may represent up to two logical cpus | 226 | * here is that a cpu device node may represent up to two logical cpus |
250 | * in the SMT case. We must honor the assumption in other code that | 227 | * in the SMT case. We must honor the assumption in other code that |
251 | * the logical ids for sibling SMT threads x and y are adjacent, such | 228 | * the logical ids for sibling SMT threads x and y are adjacent, such |
@@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu) | |||
254 | static int pseries_add_processor(struct device_node *np) | 231 | static int pseries_add_processor(struct device_node *np) |
255 | { | 232 | { |
256 | unsigned int cpu; | 233 | unsigned int cpu; |
257 | cpumask_t candidate_map, tmp = CPU_MASK_NONE; | 234 | cpumask_var_t candidate_mask, tmp; |
258 | int err = -ENOSPC, len, nthreads, i; | 235 | int err = -ENOSPC, len, nthreads, i; |
259 | const u32 *intserv; | 236 | const u32 *intserv; |
260 | 237 | ||
@@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np) | |||
262 | if (!intserv) | 239 | if (!intserv) |
263 | return 0; | 240 | return 0; |
264 | 241 | ||
242 | zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); | ||
243 | zalloc_cpumask_var(&tmp, GFP_KERNEL); | ||
244 | |||
265 | nthreads = len / sizeof(u32); | 245 | nthreads = len / sizeof(u32); |
266 | for (i = 0; i < nthreads; i++) | 246 | for (i = 0; i < nthreads; i++) |
267 | cpu_set(i, tmp); | 247 | cpumask_set_cpu(i, tmp); |
268 | 248 | ||
269 | cpu_maps_update_begin(); | 249 | cpu_maps_update_begin(); |
270 | 250 | ||
271 | BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); | 251 | BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); |
272 | 252 | ||
273 | /* Get a bitmap of unoccupied slots. */ | 253 | /* Get a bitmap of unoccupied slots. */ |
274 | cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); | 254 | cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); |
275 | if (cpus_empty(candidate_map)) { | 255 | if (cpumask_empty(candidate_mask)) { |
276 | /* If we get here, it most likely means that NR_CPUS is | 256 | /* If we get here, it most likely means that NR_CPUS is |
277 | * less than the partition's max processors setting. | 257 | * less than the partition's max processors setting. |
278 | */ | 258 | */ |
279 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" | 259 | printk(KERN_ERR "Cannot add cpu %s; this system configuration" |
280 | " supports %d logical cpus.\n", np->full_name, | 260 | " supports %d logical cpus.\n", np->full_name, |
281 | cpus_weight(cpu_possible_map)); | 261 | cpumask_weight(cpu_possible_mask)); |
282 | goto out_unlock; | 262 | goto out_unlock; |
283 | } | 263 | } |
284 | 264 | ||
285 | while (!cpus_empty(tmp)) | 265 | while (!cpumask_empty(tmp)) |
286 | if (cpus_subset(tmp, candidate_map)) | 266 | if (cpumask_subset(tmp, candidate_mask)) |
287 | /* Found a range where we can insert the new cpu(s) */ | 267 | /* Found a range where we can insert the new cpu(s) */ |
288 | break; | 268 | break; |
289 | else | 269 | else |
290 | cpus_shift_left(tmp, tmp, nthreads); | 270 | cpumask_shift_left(tmp, tmp, nthreads); |
291 | 271 | ||
292 | if (cpus_empty(tmp)) { | 272 | if (cpumask_empty(tmp)) { |
293 | printk(KERN_ERR "Unable to find space in cpu_present_map for" | 273 | printk(KERN_ERR "Unable to find space in cpu_present_mask for" |
294 | " processor %s with %d thread(s)\n", np->name, | 274 | " processor %s with %d thread(s)\n", np->name, |
295 | nthreads); | 275 | nthreads); |
296 | goto out_unlock; | 276 | goto out_unlock; |
297 | } | 277 | } |
298 | 278 | ||
299 | for_each_cpu_mask(cpu, tmp) { | 279 | for_each_cpu(cpu, tmp) { |
300 | BUG_ON(cpu_isset(cpu, cpu_present_map)); | 280 | BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); |
301 | set_cpu_present(cpu, true); | 281 | set_cpu_present(cpu, true); |
302 | set_hard_smp_processor_id(cpu, *intserv++); | 282 | set_hard_smp_processor_id(cpu, *intserv++); |
303 | } | 283 | } |
304 | err = 0; | 284 | err = 0; |
305 | out_unlock: | 285 | out_unlock: |
306 | cpu_maps_update_done(); | 286 | cpu_maps_update_done(); |
287 | free_cpumask_var(candidate_mask); | ||
288 | free_cpumask_var(tmp); | ||
307 | return err; | 289 | return err; |
308 | } | 290 | } |
309 | 291 | ||
@@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np) | |||
334 | set_hard_smp_processor_id(cpu, -1); | 316 | set_hard_smp_processor_id(cpu, -1); |
335 | break; | 317 | break; |
336 | } | 318 | } |
337 | if (cpu == NR_CPUS) | 319 | if (cpu >= nr_cpu_ids) |
338 | printk(KERN_WARNING "Could not find cpu to remove " | 320 | printk(KERN_WARNING "Could not find cpu to remove " |
339 | "with physical id 0x%x\n", intserv[i]); | 321 | "with physical id 0x%x\n", intserv[i]); |
340 | } | 322 | } |
@@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void) | |||
388 | struct device_node *np; | 370 | struct device_node *np; |
389 | const char *typep; | 371 | const char *typep; |
390 | int cpu; | 372 | int cpu; |
373 | int qcss_tok; | ||
391 | 374 | ||
392 | for_each_node_by_name(np, "interrupt-controller") { | 375 | for_each_node_by_name(np, "interrupt-controller") { |
393 | typep = of_get_property(np, "compatible", NULL); | 376 | typep = of_get_property(np, "compatible", NULL); |
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 383a5d0e9818..48d20573e4de 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9) | |||
228 | mtcrf 0xff,r0 | 228 | mtcrf 0xff,r0 |
229 | 229 | ||
230 | blr /* return r3 = status */ | 230 | blr /* return r3 = status */ |
231 | |||
232 | /* See plpar_hcall_raw to see why this is needed */ | ||
233 | _GLOBAL(plpar_hcall9_raw) | ||
234 | HMT_MEDIUM | ||
235 | |||
236 | mfcr r0 | ||
237 | stw r0,8(r1) | ||
238 | |||
239 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | ||
240 | |||
241 | mr r4,r5 | ||
242 | mr r5,r6 | ||
243 | mr r6,r7 | ||
244 | mr r7,r8 | ||
245 | mr r8,r9 | ||
246 | mr r9,r10 | ||
247 | ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ | ||
248 | ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ | ||
249 | ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ | ||
250 | |||
251 | HVSC /* invoke the hypervisor */ | ||
252 | |||
253 | mr r0,r12 | ||
254 | ld r12,STK_PARM(r4)(r1) | ||
255 | std r4, 0(r12) | ||
256 | std r5, 8(r12) | ||
257 | std r6, 16(r12) | ||
258 | std r7, 24(r12) | ||
259 | std r8, 32(r12) | ||
260 | std r9, 40(r12) | ||
261 | std r10,48(r12) | ||
262 | std r11,56(r12) | ||
263 | std r0, 64(r12) | ||
264 | |||
265 | lwz r0,8(r1) | ||
266 | mtcrf 0xff,r0 | ||
267 | |||
268 | blr /* return r3 = status */ | ||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 1a0000a4b6d6..d26182d42cbf 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -468,7 +468,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) | |||
468 | 468 | ||
469 | pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); | 469 | pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); |
470 | 470 | ||
471 | dn = dev->dev.archdata.of_node; | 471 | dn = dev->dev.of_node; |
472 | 472 | ||
473 | /* If we're the direct child of a root bus, then we need to allocate | 473 | /* If we're the direct child of a root bus, then we need to allocate |
474 | * an iommu table ourselves. The bus setup code should have setup | 474 | * an iommu table ourselves. The bus setup code should have setup |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0707653612ba..cf79b46d8f88 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void) | |||
367 | { | 367 | { |
368 | unsigned long size_bytes = 1UL << ppc64_pft_size; | 368 | unsigned long size_bytes = 1UL << ppc64_pft_size; |
369 | unsigned long hpte_count = size_bytes >> 4; | 369 | unsigned long hpte_count = size_bytes >> 4; |
370 | unsigned long dummy1, dummy2, dword0; | 370 | struct { |
371 | unsigned long pteh; | ||
372 | unsigned long ptel; | ||
373 | } ptes[4]; | ||
371 | long lpar_rc; | 374 | long lpar_rc; |
372 | int i; | 375 | int i, j; |
373 | 376 | ||
374 | /* TODO: Use bulk call */ | 377 | /* Read in batches of 4, |
375 | for (i = 0; i < hpte_count; i++) { | 378 | * invalidate only valid entries not in the VRMA |
376 | /* dont remove HPTEs with VRMA mappings */ | 379 | * hpte_count will be a multiple of 4 |
377 | lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, | 380 | */ |
378 | &dummy1, &dummy2); | 381 | for (i = 0; i < hpte_count; i += 4) { |
379 | if (lpar_rc == H_NOT_FOUND) { | 382 | lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); |
380 | lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); | 383 | if (lpar_rc != H_SUCCESS) |
381 | if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) | 384 | continue; |
382 | != HPTE_V_VRMA_MASK)) | 385 | for (j = 0; j < 4; j++){ |
383 | /* Can be hpte for 1TB Seg. So remove it */ | 386 | if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == |
384 | plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); | 387 | HPTE_V_VRMA_MASK) |
388 | continue; | ||
389 | if (ptes[j].pteh & HPTE_V_VALID) | ||
390 | plpar_pte_remove_raw(0, i + j, 0, | ||
391 | &(ptes[j].pteh), &(ptes[j].ptel)); | ||
385 | } | 392 | } |
386 | } | 393 | } |
387 | } | 394 | } |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index a05f8d427856..d9801117124b 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -4,6 +4,14 @@ | |||
4 | #include <asm/hvcall.h> | 4 | #include <asm/hvcall.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | 6 | ||
7 | /* Get state of physical CPU from query_cpu_stopped */ | ||
8 | int smp_query_cpu_stopped(unsigned int pcpu); | ||
9 | #define QCSS_STOPPED 0 | ||
10 | #define QCSS_STOPPING 1 | ||
11 | #define QCSS_NOT_STOPPED 2 | ||
12 | #define QCSS_HARDWARE_ERROR -1 | ||
13 | #define QCSS_HARDWARE_BUSY -2 | ||
14 | |||
7 | static inline long poll_pending(void) | 15 | static inline long poll_pending(void) |
8 | { | 16 | { |
9 | return plpar_hcall_norets(H_POLL_PENDING); | 17 | return plpar_hcall_norets(H_POLL_PENDING); |
@@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex, | |||
183 | return rc; | 191 | return rc; |
184 | } | 192 | } |
185 | 193 | ||
194 | /* | ||
195 | * plpar_pte_read_4_raw can be called in real mode. | ||
196 | * ptes must be 8*sizeof(unsigned long) | ||
197 | */ | ||
198 | static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex, | ||
199 | unsigned long *ptes) | ||
200 | |||
201 | { | ||
202 | long rc; | ||
203 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
204 | |||
205 | rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex); | ||
206 | |||
207 | memcpy(ptes, retbuf, 8*sizeof(unsigned long)); | ||
208 | |||
209 | return rc; | ||
210 | } | ||
211 | |||
186 | static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, | 212 | static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, |
187 | unsigned long avpn) | 213 | unsigned long avpn) |
188 | { | 214 | { |
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 9e17c0d2a0c8..40c93cad91d2 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -10,6 +10,13 @@ | |||
10 | #ifndef _PSERIES_PSERIES_H | 10 | #ifndef _PSERIES_PSERIES_H |
11 | #define _PSERIES_PSERIES_H | 11 | #define _PSERIES_PSERIES_H |
12 | 12 | ||
13 | #include <linux/interrupt.h> | ||
14 | |||
15 | struct device_node; | ||
16 | |||
17 | extern void request_event_sources_irqs(struct device_node *np, | ||
18 | irq_handler_t handler, const char *name); | ||
19 | |||
13 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); | 20 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); |
14 | 21 | ||
15 | struct pt_regs; | 22 | struct pt_regs; |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index db940d2c39a0..41a3e9a039ed 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); | |||
67 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); | 67 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); |
68 | 68 | ||
69 | 69 | ||
70 | static void request_ras_irqs(struct device_node *np, | ||
71 | irq_handler_t handler, | ||
72 | const char *name) | ||
73 | { | ||
74 | int i, index, count = 0; | ||
75 | struct of_irq oirq; | ||
76 | const u32 *opicprop; | ||
77 | unsigned int opicplen; | ||
78 | unsigned int virqs[16]; | ||
79 | |||
80 | /* Check for obsolete "open-pic-interrupt" property. If present, then | ||
81 | * map those interrupts using the default interrupt host and default | ||
82 | * trigger | ||
83 | */ | ||
84 | opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); | ||
85 | if (opicprop) { | ||
86 | opicplen /= sizeof(u32); | ||
87 | for (i = 0; i < opicplen; i++) { | ||
88 | if (count > 15) | ||
89 | break; | ||
90 | virqs[count] = irq_create_mapping(NULL, *(opicprop++)); | ||
91 | if (virqs[count] == NO_IRQ) | ||
92 | printk(KERN_ERR "Unable to allocate interrupt " | ||
93 | "number for %s\n", np->full_name); | ||
94 | else | ||
95 | count++; | ||
96 | |||
97 | } | ||
98 | } | ||
99 | /* Else use normal interrupt tree parsing */ | ||
100 | else { | ||
101 | /* First try to do a proper OF tree parsing */ | ||
102 | for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | ||
103 | index++) { | ||
104 | if (count > 15) | ||
105 | break; | ||
106 | virqs[count] = irq_create_of_mapping(oirq.controller, | ||
107 | oirq.specifier, | ||
108 | oirq.size); | ||
109 | if (virqs[count] == NO_IRQ) | ||
110 | printk(KERN_ERR "Unable to allocate interrupt " | ||
111 | "number for %s\n", np->full_name); | ||
112 | else | ||
113 | count++; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* Now request them */ | ||
118 | for (i = 0; i < count; i++) { | ||
119 | if (request_irq(virqs[i], handler, 0, name, NULL)) { | ||
120 | printk(KERN_ERR "Unable to request interrupt %d for " | ||
121 | "%s\n", virqs[i], np->full_name); | ||
122 | return; | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /* | 70 | /* |
128 | * Initialize handlers for the set of interrupts caused by hardware errors | 71 | * Initialize handlers for the set of interrupts caused by hardware errors |
129 | * and power system events. | 72 | * and power system events. |
@@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void) | |||
138 | /* Internal Errors */ | 81 | /* Internal Errors */ |
139 | np = of_find_node_by_path("/event-sources/internal-errors"); | 82 | np = of_find_node_by_path("/event-sources/internal-errors"); |
140 | if (np != NULL) { | 83 | if (np != NULL) { |
141 | request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); | 84 | request_event_sources_irqs(np, ras_error_interrupt, |
85 | "RAS_ERROR"); | ||
142 | of_node_put(np); | 86 | of_node_put(np); |
143 | } | 87 | } |
144 | 88 | ||
145 | /* EPOW Events */ | 89 | /* EPOW Events */ |
146 | np = of_find_node_by_path("/event-sources/epow-events"); | 90 | np = of_find_node_by_path("/event-sources/epow-events"); |
147 | if (np != NULL) { | 91 | if (np != NULL) { |
148 | request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); | 92 | request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW"); |
149 | of_node_put(np); | 93 | of_node_put(np); |
150 | } | 94 | } |
151 | 95 | ||
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 6710761bf60f..a6d19e3a505e 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -496,13 +496,14 @@ static int __init pSeries_probe(void) | |||
496 | } | 496 | } |
497 | 497 | ||
498 | 498 | ||
499 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | 499 | DECLARE_PER_CPU(long, smt_snooze_delay); |
500 | 500 | ||
501 | static void pseries_dedicated_idle_sleep(void) | 501 | static void pseries_dedicated_idle_sleep(void) |
502 | { | 502 | { |
503 | unsigned int cpu = smp_processor_id(); | 503 | unsigned int cpu = smp_processor_id(); |
504 | unsigned long start_snooze; | 504 | unsigned long start_snooze; |
505 | unsigned long in_purr, out_purr; | 505 | unsigned long in_purr, out_purr; |
506 | long snooze = __get_cpu_var(smt_snooze_delay); | ||
506 | 507 | ||
507 | /* | 508 | /* |
508 | * Indicate to the HV that we are idle. Now would be | 509 | * Indicate to the HV that we are idle. Now would be |
@@ -517,13 +518,12 @@ static void pseries_dedicated_idle_sleep(void) | |||
517 | * has been checked recently. If we should poll for a little | 518 | * has been checked recently. If we should poll for a little |
518 | * while, do so. | 519 | * while, do so. |
519 | */ | 520 | */ |
520 | if (__get_cpu_var(smt_snooze_delay)) { | 521 | if (snooze) { |
521 | start_snooze = get_tb() + | 522 | start_snooze = get_tb() + snooze * tb_ticks_per_usec; |
522 | __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec; | ||
523 | local_irq_enable(); | 523 | local_irq_enable(); |
524 | set_thread_flag(TIF_POLLING_NRFLAG); | 524 | set_thread_flag(TIF_POLLING_NRFLAG); |
525 | 525 | ||
526 | while (get_tb() < start_snooze) { | 526 | while ((snooze < 0) || (get_tb() < start_snooze)) { |
527 | if (need_resched() || cpu_is_offline(cpu)) | 527 | if (need_resched() || cpu_is_offline(cpu)) |
528 | goto out; | 528 | goto out; |
529 | ppc64_runlatch_off(); | 529 | ppc64_runlatch_off(); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4e7f89a84561..3b1bf61c45be 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -55,7 +55,29 @@ | |||
55 | * The Primary thread of each non-boot processor was started from the OF client | 55 | * The Primary thread of each non-boot processor was started from the OF client |
56 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. | 56 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. |
57 | */ | 57 | */ |
58 | static cpumask_t of_spin_map; | 58 | static cpumask_var_t of_spin_mask; |
59 | |||
60 | /* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ | ||
61 | int smp_query_cpu_stopped(unsigned int pcpu) | ||
62 | { | ||
63 | int cpu_status, status; | ||
64 | int qcss_tok = rtas_token("query-cpu-stopped-state"); | ||
65 | |||
66 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { | ||
67 | printk(KERN_INFO "Firmware doesn't support " | ||
68 | "query-cpu-stopped-state\n"); | ||
69 | return QCSS_HARDWARE_ERROR; | ||
70 | } | ||
71 | |||
72 | status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); | ||
73 | if (status != 0) { | ||
74 | printk(KERN_ERR | ||
75 | "RTAS query-cpu-stopped-state failed: %i\n", status); | ||
76 | return status; | ||
77 | } | ||
78 | |||
79 | return cpu_status; | ||
80 | } | ||
59 | 81 | ||
60 | /** | 82 | /** |
61 | * smp_startup_cpu() - start the given cpu | 83 | * smp_startup_cpu() - start the given cpu |
@@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
76 | unsigned int pcpu; | 98 | unsigned int pcpu; |
77 | int start_cpu; | 99 | int start_cpu; |
78 | 100 | ||
79 | if (cpu_isset(lcpu, of_spin_map)) | 101 | if (cpumask_test_cpu(lcpu, of_spin_mask)) |
80 | /* Already started by OF and sitting in spin loop */ | 102 | /* Already started by OF and sitting in spin loop */ |
81 | return 1; | 103 | return 1; |
82 | 104 | ||
83 | pcpu = get_hard_smp_processor_id(lcpu); | 105 | pcpu = get_hard_smp_processor_id(lcpu); |
84 | 106 | ||
107 | /* Check to see if the CPU out of FW already for kexec */ | ||
108 | if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ | ||
109 | cpumask_set_cpu(lcpu, of_spin_mask); | ||
110 | return 1; | ||
111 | } | ||
112 | |||
85 | /* Fixup atomic count: it exited inside IRQ handler. */ | 113 | /* Fixup atomic count: it exited inside IRQ handler. */ |
86 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; | 114 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; |
87 | 115 | ||
@@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
115 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) | 143 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) |
116 | vpa_init(cpu); | 144 | vpa_init(cpu); |
117 | 145 | ||
118 | cpu_clear(cpu, of_spin_map); | 146 | cpumask_clear_cpu(cpu, of_spin_mask); |
119 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); | 147 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); |
120 | set_default_offline_state(cpu); | 148 | set_default_offline_state(cpu); |
121 | 149 | ||
@@ -186,17 +214,19 @@ static void __init smp_init_pseries(void) | |||
186 | 214 | ||
187 | pr_debug(" -> smp_init_pSeries()\n"); | 215 | pr_debug(" -> smp_init_pSeries()\n"); |
188 | 216 | ||
217 | alloc_bootmem_cpumask_var(&of_spin_mask); | ||
218 | |||
189 | /* Mark threads which are still spinning in hold loops. */ | 219 | /* Mark threads which are still spinning in hold loops. */ |
190 | if (cpu_has_feature(CPU_FTR_SMT)) { | 220 | if (cpu_has_feature(CPU_FTR_SMT)) { |
191 | for_each_present_cpu(i) { | 221 | for_each_present_cpu(i) { |
192 | if (cpu_thread_in_core(i) == 0) | 222 | if (cpu_thread_in_core(i) == 0) |
193 | cpu_set(i, of_spin_map); | 223 | cpumask_set_cpu(i, of_spin_mask); |
194 | } | 224 | } |
195 | } else { | 225 | } else { |
196 | of_spin_map = cpu_present_map; | 226 | cpumask_copy(of_spin_mask, cpu_present_mask); |
197 | } | 227 | } |
198 | 228 | ||
199 | cpu_clear(boot_cpuid, of_spin_map); | 229 | cpumask_clear_cpu(boot_cpuid, of_spin_mask); |
200 | 230 | ||
201 | /* Non-lpar has additional take/give timebase */ | 231 | /* Non-lpar has additional take/give timebase */ |
202 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { | 232 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 1bcedd8b4616..f19d19468393 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) | |||
163 | /* Interface to generic irq subsystem */ | 163 | /* Interface to generic irq subsystem */ |
164 | 164 | ||
165 | #ifdef CONFIG_SMP | 165 | #ifdef CONFIG_SMP |
166 | static int get_irq_server(unsigned int virq, cpumask_t cpumask, | 166 | /* |
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
167 | unsigned int strict_check) | 175 | unsigned int strict_check) |
168 | { | 176 | { |
169 | int server; | ||
170 | /* For the moment only implement delivery to all cpus or one cpu */ | ||
171 | cpumask_t tmp = CPU_MASK_NONE; | ||
172 | 177 | ||
173 | if (!distribute_irqs) | 178 | if (!distribute_irqs) |
174 | return default_server; | 179 | return default_server; |
175 | 180 | ||
176 | if (!cpus_equal(cpumask, CPU_MASK_ALL)) { | 181 | if (!cpumask_equal(cpumask, cpu_all_mask)) { |
177 | cpus_and(tmp, cpu_online_map, cpumask); | 182 | int server = cpumask_first_and(cpu_online_mask, cpumask); |
178 | |||
179 | server = first_cpu(tmp); | ||
180 | 183 | ||
181 | if (server < NR_CPUS) | 184 | if (server < nr_cpu_ids) |
182 | return get_hard_smp_processor_id(server); | 185 | return get_hard_smp_processor_id(server); |
183 | 186 | ||
184 | if (strict_check) | 187 | if (strict_check) |
185 | return -1; | 188 | return -1; |
186 | } | 189 | } |
187 | 190 | ||
188 | if (cpus_equal(cpu_online_map, cpu_present_map)) | 191 | /* |
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
189 | return default_distrib_server; | 197 | return default_distrib_server; |
190 | 198 | ||
191 | return default_server; | 199 | return default_server; |
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq) | |||
207 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
208 | return; | 216 | return; |
209 | 217 | ||
210 | server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); | 218 | server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); |
211 | 219 | ||
212 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, |
213 | DEFAULT_PRIORITY); | 221 | DEFAULT_PRIORITY); |
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | |||
398 | return -1; | 406 | return -1; |
399 | } | 407 | } |
400 | 408 | ||
401 | /* | 409 | irq_server = get_irq_server(virq, cpumask, 1); |
402 | * For the moment only implement delivery to all cpus or one cpu. | ||
403 | * Get current irq_server for the given irq | ||
404 | */ | ||
405 | irq_server = get_irq_server(virq, *cpumask, 1); | ||
406 | if (irq_server == -1) { | 410 | if (irq_server == -1) { |
407 | char cpulist[128]; | 411 | char cpulist[128]; |
408 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | 412 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void) | |||
611 | { | 615 | { |
612 | xics_request_ipi(); | 616 | xics_request_ipi(); |
613 | 617 | ||
614 | return cpus_weight(cpu_possible_map); | 618 | return cpumask_weight(cpu_possible_mask); |
615 | } | 619 | } |
616 | 620 | ||
617 | #endif /* CONFIG_SMP */ | 621 | #endif /* CONFIG_SMP */ |